···11+when:
22+ - event: ["push", "pull_request"]
33+ branch: ["master"]
44+55+engine: nixery
66+77+dependencies:
88+ nixpkgs:
99+ - go
1010+ - gcc
1111+1212+steps:
1313+ - name: patch static dir
1414+ command: |
1515+ mkdir -p appview/pages/static; touch appview/pages/static/x
1616+1717+ - name: run all tests
1818+ environment:
1919+ CGO_ENABLED: 1
2020+ command: |
2121+ go test -v ./...
-16
.zed/settings.json
···11-// Folder-specific settings
22-//
33-// For a full list of overridable settings, and general information on folder-specific settings,
44-// see the documentation: https://zed.dev/docs/configuring-zed#settings-files
55-{
66- "languages": {
77- "HTML": {
88- "prettier": {
99- "format_on_save": false,
1010- "allowed": true,
1111- "parser": "go-template",
1212- "plugins": ["prettier-plugin-go-template"]
1313- }
1414- }
1515- }
1616-}
+1481-1498
api/tangled/cbor_gen.go
···504504505505 return nil
506506}
507507+func (t *FeedReaction) MarshalCBOR(w io.Writer) error {
508508+ if t == nil {
509509+ _, err := w.Write(cbg.CborNull)
510510+ return err
511511+ }
512512+513513+ cw := cbg.NewCborWriter(w)
514514+515515+ if _, err := cw.Write([]byte{164}); err != nil {
516516+ return err
517517+ }
518518+519519+ // t.LexiconTypeID (string) (string)
520520+ if len("$type") > 1000000 {
521521+ return xerrors.Errorf("Value in field \"$type\" was too long")
522522+ }
523523+524524+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
525525+ return err
526526+ }
527527+ if _, err := cw.WriteString(string("$type")); err != nil {
528528+ return err
529529+ }
530530+531531+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.feed.reaction"))); err != nil {
532532+ return err
533533+ }
534534+ if _, err := cw.WriteString(string("sh.tangled.feed.reaction")); err != nil {
535535+ return err
536536+ }
537537+538538+ // t.Subject (string) (string)
539539+ if len("subject") > 1000000 {
540540+ return xerrors.Errorf("Value in field \"subject\" was too long")
541541+ }
542542+543543+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("subject"))); err != nil {
544544+ return err
545545+ }
546546+ if _, err := cw.WriteString(string("subject")); err != nil {
547547+ return err
548548+ }
549549+550550+ if len(t.Subject) > 1000000 {
551551+ return xerrors.Errorf("Value in field t.Subject was too long")
552552+ }
553553+554554+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Subject))); err != nil {
555555+ return err
556556+ }
557557+ if _, err := cw.WriteString(string(t.Subject)); err != nil {
558558+ return err
559559+ }
560560+561561+ // t.Reaction (string) (string)
562562+ if len("reaction") > 1000000 {
563563+ return xerrors.Errorf("Value in field \"reaction\" was too long")
564564+ }
565565+566566+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("reaction"))); err != nil {
567567+ return err
568568+ }
569569+ if _, err := cw.WriteString(string("reaction")); err != nil {
570570+ return err
571571+ }
572572+573573+ if len(t.Reaction) > 1000000 {
574574+ return xerrors.Errorf("Value in field t.Reaction was too long")
575575+ }
576576+577577+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Reaction))); err != nil {
578578+ return err
579579+ }
580580+ if _, err := cw.WriteString(string(t.Reaction)); err != nil {
581581+ return err
582582+ }
583583+584584+ // t.CreatedAt (string) (string)
585585+ if len("createdAt") > 1000000 {
586586+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
587587+ }
588588+589589+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
590590+ return err
591591+ }
592592+ if _, err := cw.WriteString(string("createdAt")); err != nil {
593593+ return err
594594+ }
595595+596596+ if len(t.CreatedAt) > 1000000 {
597597+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
598598+ }
599599+600600+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
601601+ return err
602602+ }
603603+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
604604+ return err
605605+ }
606606+ return nil
607607+}
608608+609609+func (t *FeedReaction) UnmarshalCBOR(r io.Reader) (err error) {
610610+ *t = FeedReaction{}
611611+612612+ cr := cbg.NewCborReader(r)
613613+614614+ maj, extra, err := cr.ReadHeader()
615615+ if err != nil {
616616+ return err
617617+ }
618618+ defer func() {
619619+ if err == io.EOF {
620620+ err = io.ErrUnexpectedEOF
621621+ }
622622+ }()
623623+624624+ if maj != cbg.MajMap {
625625+ return fmt.Errorf("cbor input should be of type map")
626626+ }
627627+628628+ if extra > cbg.MaxLength {
629629+ return fmt.Errorf("FeedReaction: map struct too large (%d)", extra)
630630+ }
631631+632632+ n := extra
633633+634634+ nameBuf := make([]byte, 9)
635635+ for i := uint64(0); i < n; i++ {
636636+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
637637+ if err != nil {
638638+ return err
639639+ }
640640+641641+ if !ok {
642642+ // Field doesn't exist on this type, so ignore it
643643+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
644644+ return err
645645+ }
646646+ continue
647647+ }
648648+649649+ switch string(nameBuf[:nameLen]) {
650650+ // t.LexiconTypeID (string) (string)
651651+ case "$type":
652652+653653+ {
654654+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
655655+ if err != nil {
656656+ return err
657657+ }
658658+659659+ t.LexiconTypeID = string(sval)
660660+ }
661661+ // t.Subject (string) (string)
662662+ case "subject":
663663+664664+ {
665665+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
666666+ if err != nil {
667667+ return err
668668+ }
669669+670670+ t.Subject = string(sval)
671671+ }
672672+ // t.Reaction (string) (string)
673673+ case "reaction":
674674+675675+ {
676676+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
677677+ if err != nil {
678678+ return err
679679+ }
680680+681681+ t.Reaction = string(sval)
682682+ }
683683+ // t.CreatedAt (string) (string)
684684+ case "createdAt":
685685+686686+ {
687687+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
688688+ if err != nil {
689689+ return err
690690+ }
691691+692692+ t.CreatedAt = string(sval)
693693+ }
694694+695695+ default:
696696+ // Field doesn't exist on this type, so ignore it
697697+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
698698+ return err
699699+ }
700700+ }
701701+ }
702702+703703+ return nil
704704+}
507705func (t *FeedStar) MarshalCBOR(w io.Writer) error {
508706 if t == nil {
509707 _, err := w.Write(cbg.CborNull)
···1004120210051203 return nil
10061204}
10071007-func (t *GitRefUpdate_Meta) MarshalCBOR(w io.Writer) error {
12051205+func (t *GitRefUpdate_CommitCountBreakdown) MarshalCBOR(w io.Writer) error {
12061206+ if t == nil {
12071207+ _, err := w.Write(cbg.CborNull)
12081208+ return err
12091209+ }
12101210+12111211+ cw := cbg.NewCborWriter(w)
12121212+ fieldCount := 1
12131213+12141214+ if t.ByEmail == nil {
12151215+ fieldCount--
12161216+ }
12171217+12181218+ if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
12191219+ return err
12201220+ }
12211221+12221222+ // t.ByEmail ([]*tangled.GitRefUpdate_IndividualEmailCommitCount) (slice)
12231223+ if t.ByEmail != nil {
12241224+12251225+ if len("byEmail") > 1000000 {
12261226+ return xerrors.Errorf("Value in field \"byEmail\" was too long")
12271227+ }
12281228+12291229+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("byEmail"))); err != nil {
12301230+ return err
12311231+ }
12321232+ if _, err := cw.WriteString(string("byEmail")); err != nil {
12331233+ return err
12341234+ }
12351235+12361236+ if len(t.ByEmail) > 8192 {
12371237+ return xerrors.Errorf("Slice value in field t.ByEmail was too long")
12381238+ }
12391239+12401240+ if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.ByEmail))); err != nil {
12411241+ return err
12421242+ }
12431243+ for _, v := range t.ByEmail {
12441244+ if err := v.MarshalCBOR(cw); err != nil {
12451245+ return err
12461246+ }
12471247+12481248+ }
12491249+ }
12501250+ return nil
12511251+}
12521252+12531253+func (t *GitRefUpdate_CommitCountBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
12541254+ *t = GitRefUpdate_CommitCountBreakdown{}
12551255+12561256+ cr := cbg.NewCborReader(r)
12571257+12581258+ maj, extra, err := cr.ReadHeader()
12591259+ if err != nil {
12601260+ return err
12611261+ }
12621262+ defer func() {
12631263+ if err == io.EOF {
12641264+ err = io.ErrUnexpectedEOF
12651265+ }
12661266+ }()
12671267+12681268+ if maj != cbg.MajMap {
12691269+ return fmt.Errorf("cbor input should be of type map")
12701270+ }
12711271+12721272+ if extra > cbg.MaxLength {
12731273+ return fmt.Errorf("GitRefUpdate_CommitCountBreakdown: map struct too large (%d)", extra)
12741274+ }
12751275+12761276+ n := extra
12771277+12781278+ nameBuf := make([]byte, 7)
12791279+ for i := uint64(0); i < n; i++ {
12801280+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
12811281+ if err != nil {
12821282+ return err
12831283+ }
12841284+12851285+ if !ok {
12861286+ // Field doesn't exist on this type, so ignore it
12871287+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
12881288+ return err
12891289+ }
12901290+ continue
12911291+ }
12921292+12931293+ switch string(nameBuf[:nameLen]) {
12941294+ // t.ByEmail ([]*tangled.GitRefUpdate_IndividualEmailCommitCount) (slice)
12951295+ case "byEmail":
12961296+12971297+ maj, extra, err = cr.ReadHeader()
12981298+ if err != nil {
12991299+ return err
13001300+ }
13011301+13021302+ if extra > 8192 {
13031303+ return fmt.Errorf("t.ByEmail: array too large (%d)", extra)
13041304+ }
13051305+13061306+ if maj != cbg.MajArray {
13071307+ return fmt.Errorf("expected cbor array")
13081308+ }
13091309+13101310+ if extra > 0 {
13111311+ t.ByEmail = make([]*GitRefUpdate_IndividualEmailCommitCount, extra)
13121312+ }
13131313+13141314+ for i := 0; i < int(extra); i++ {
13151315+ {
13161316+ var maj byte
13171317+ var extra uint64
13181318+ var err error
13191319+ _ = maj
13201320+ _ = extra
13211321+ _ = err
13221322+13231323+ {
13241324+13251325+ b, err := cr.ReadByte()
13261326+ if err != nil {
13271327+ return err
13281328+ }
13291329+ if b != cbg.CborNull[0] {
13301330+ if err := cr.UnreadByte(); err != nil {
13311331+ return err
13321332+ }
13331333+ t.ByEmail[i] = new(GitRefUpdate_IndividualEmailCommitCount)
13341334+ if err := t.ByEmail[i].UnmarshalCBOR(cr); err != nil {
13351335+ return xerrors.Errorf("unmarshaling t.ByEmail[i] pointer: %w", err)
13361336+ }
13371337+ }
13381338+13391339+ }
13401340+13411341+ }
13421342+ }
13431343+13441344+ default:
13451345+ // Field doesn't exist on this type, so ignore it
13461346+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
13471347+ return err
13481348+ }
13491349+ }
13501350+ }
13511351+13521352+ return nil
13531353+}
13541354+func (t *GitRefUpdate_IndividualEmailCommitCount) MarshalCBOR(w io.Writer) error {
10081355 if t == nil {
10091356 _, err := w.Write(cbg.CborNull)
10101357 return err
···10161363 return err
10171364 }
1018136510191019- // t.CommitCount (tangled.GitRefUpdate_Meta_CommitCount) (struct)
10201020- if len("commitCount") > 1000000 {
10211021- return xerrors.Errorf("Value in field \"commitCount\" was too long")
13661366+ // t.Count (int64) (int64)
13671367+ if len("count") > 1000000 {
13681368+ return xerrors.Errorf("Value in field \"count\" was too long")
10221369 }
1023137010241024- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commitCount"))); err != nil {
13711371+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("count"))); err != nil {
10251372 return err
10261373 }
10271027- if _, err := cw.WriteString(string("commitCount")); err != nil {
13741374+ if _, err := cw.WriteString(string("count")); err != nil {
10281375 return err
10291376 }
1030137710311031- if err := t.CommitCount.MarshalCBOR(cw); err != nil {
10321032- return err
13781378+ if t.Count >= 0 {
13791379+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Count)); err != nil {
13801380+ return err
13811381+ }
13821382+ } else {
13831383+ if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Count-1)); err != nil {
13841384+ return err
13851385+ }
10331386 }
1034138710351035- // t.IsDefaultRef (bool) (bool)
10361036- if len("isDefaultRef") > 1000000 {
10371037- return xerrors.Errorf("Value in field \"isDefaultRef\" was too long")
13881388+ // t.Email (string) (string)
13891389+ if len("email") > 1000000 {
13901390+ return xerrors.Errorf("Value in field \"email\" was too long")
10381391 }
1039139210401040- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("isDefaultRef"))); err != nil {
13931393+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("email"))); err != nil {
10411394 return err
10421395 }
10431043- if _, err := cw.WriteString(string("isDefaultRef")); err != nil {
13961396+ if _, err := cw.WriteString(string("email")); err != nil {
10441397 return err
10451398 }
1046139910471047- if err := cbg.WriteBool(w, t.IsDefaultRef); err != nil {
14001400+ if len(t.Email) > 1000000 {
14011401+ return xerrors.Errorf("Value in field t.Email was too long")
14021402+ }
14031403+14041404+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Email))); err != nil {
14051405+ return err
14061406+ }
14071407+ if _, err := cw.WriteString(string(t.Email)); err != nil {
10481408 return err
10491409 }
10501410 return nil
10511411}
1052141210531053-func (t *GitRefUpdate_Meta) UnmarshalCBOR(r io.Reader) (err error) {
10541054- *t = GitRefUpdate_Meta{}
14131413+func (t *GitRefUpdate_IndividualEmailCommitCount) UnmarshalCBOR(r io.Reader) (err error) {
14141414+ *t = GitRefUpdate_IndividualEmailCommitCount{}
1055141510561416 cr := cbg.NewCborReader(r)
10571417···10701430 }
1071143110721432 if extra > cbg.MaxLength {
10731073- return fmt.Errorf("GitRefUpdate_Meta: map struct too large (%d)", extra)
14331433+ return fmt.Errorf("GitRefUpdate_IndividualEmailCommitCount: map struct too large (%d)", extra)
10741434 }
1075143510761436 n := extra
1077143710781078- nameBuf := make([]byte, 12)
14381438+ nameBuf := make([]byte, 5)
10791439 for i := uint64(0); i < n; i++ {
10801440 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
10811441 if err != nil {
···10911451 }
1092145210931453 switch string(nameBuf[:nameLen]) {
10941094- // t.CommitCount (tangled.GitRefUpdate_Meta_CommitCount) (struct)
10951095- case "commitCount":
10961096-14541454+ // t.Count (int64) (int64)
14551455+ case "count":
10971456 {
10981098-10991099- b, err := cr.ReadByte()
14571457+ maj, extra, err := cr.ReadHeader()
11001458 if err != nil {
11011459 return err
11021460 }
11031103- if b != cbg.CborNull[0] {
11041104- if err := cr.UnreadByte(); err != nil {
11051105- return err
14611461+ var extraI int64
14621462+ switch maj {
14631463+ case cbg.MajUnsignedInt:
14641464+ extraI = int64(extra)
14651465+ if extraI < 0 {
14661466+ return fmt.Errorf("int64 positive overflow")
11061467 }
11071107- t.CommitCount = new(GitRefUpdate_Meta_CommitCount)
11081108- if err := t.CommitCount.UnmarshalCBOR(cr); err != nil {
11091109- return xerrors.Errorf("unmarshaling t.CommitCount pointer: %w", err)
14681468+ case cbg.MajNegativeInt:
14691469+ extraI = int64(extra)
14701470+ if extraI < 0 {
14711471+ return fmt.Errorf("int64 negative overflow")
11101472 }
14731473+ extraI = -1 - extraI
14741474+ default:
14751475+ return fmt.Errorf("wrong type for int64 field: %d", maj)
11111476 }
1112147714781478+ t.Count = int64(extraI)
11131479 }
11141114- // t.IsDefaultRef (bool) (bool)
11151115- case "isDefaultRef":
14801480+ // t.Email (string) (string)
14811481+ case "email":
1116148211171117- maj, extra, err = cr.ReadHeader()
11181118- if err != nil {
11191119- return err
11201120- }
11211121- if maj != cbg.MajOther {
11221122- return fmt.Errorf("booleans must be major type 7")
11231123- }
11241124- switch extra {
11251125- case 20:
11261126- t.IsDefaultRef = false
11271127- case 21:
11281128- t.IsDefaultRef = true
11291129- default:
11301130- return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
14831483+ {
14841484+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
14851485+ if err != nil {
14861486+ return err
14871487+ }
14881488+14891489+ t.Email = string(sval)
11311490 }
1132149111331492 default:
···1140149911411500 return nil
11421501}
11431143-func (t *GitRefUpdate_Meta_CommitCount) MarshalCBOR(w io.Writer) error {
15021502+func (t *GitRefUpdate_LangBreakdown) MarshalCBOR(w io.Writer) error {
11441503 if t == nil {
11451504 _, err := w.Write(cbg.CborNull)
11461505 return err
···11491508 cw := cbg.NewCborWriter(w)
11501509 fieldCount := 1
1151151011521152- if t.ByEmail == nil {
15111511+ if t.Inputs == nil {
11531512 fieldCount--
11541513 }
11551514···11571516 return err
11581517 }
1159151811601160- // t.ByEmail ([]*tangled.GitRefUpdate_Meta_CommitCount_ByEmail_Elem) (slice)
11611161- if t.ByEmail != nil {
15191519+ // t.Inputs ([]*tangled.GitRefUpdate_IndividualLanguageSize) (slice)
15201520+ if t.Inputs != nil {
1162152111631163- if len("byEmail") > 1000000 {
11641164- return xerrors.Errorf("Value in field \"byEmail\" was too long")
15221522+ if len("inputs") > 1000000 {
15231523+ return xerrors.Errorf("Value in field \"inputs\" was too long")
11651524 }
1166152511671167- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("byEmail"))); err != nil {
15261526+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("inputs"))); err != nil {
11681527 return err
11691528 }
11701170- if _, err := cw.WriteString(string("byEmail")); err != nil {
15291529+ if _, err := cw.WriteString(string("inputs")); err != nil {
11711530 return err
11721531 }
1173153211741174- if len(t.ByEmail) > 8192 {
11751175- return xerrors.Errorf("Slice value in field t.ByEmail was too long")
15331533+ if len(t.Inputs) > 8192 {
15341534+ return xerrors.Errorf("Slice value in field t.Inputs was too long")
11761535 }
1177153611781178- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.ByEmail))); err != nil {
15371537+ if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Inputs))); err != nil {
11791538 return err
11801539 }
11811181- for _, v := range t.ByEmail {
15401540+ for _, v := range t.Inputs {
11821541 if err := v.MarshalCBOR(cw); err != nil {
11831542 return err
11841543 }
···11881547 return nil
11891548}
1190154911911191-func (t *GitRefUpdate_Meta_CommitCount) UnmarshalCBOR(r io.Reader) (err error) {
11921192- *t = GitRefUpdate_Meta_CommitCount{}
15501550+func (t *GitRefUpdate_LangBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
15511551+ *t = GitRefUpdate_LangBreakdown{}
1193155211941553 cr := cbg.NewCborReader(r)
11951554···12081567 }
1209156812101569 if extra > cbg.MaxLength {
12111211- return fmt.Errorf("GitRefUpdate_Meta_CommitCount: map struct too large (%d)", extra)
15701570+ return fmt.Errorf("GitRefUpdate_LangBreakdown: map struct too large (%d)", extra)
12121571 }
1213157212141573 n := extra
1215157412161216- nameBuf := make([]byte, 7)
15751575+ nameBuf := make([]byte, 6)
12171576 for i := uint64(0); i < n; i++ {
12181577 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
12191578 if err != nil {
···12291588 }
1230158912311590 switch string(nameBuf[:nameLen]) {
12321232- // t.ByEmail ([]*tangled.GitRefUpdate_Meta_CommitCount_ByEmail_Elem) (slice)
12331233- case "byEmail":
15911591+ // t.Inputs ([]*tangled.GitRefUpdate_IndividualLanguageSize) (slice)
15921592+ case "inputs":
1234159312351594 maj, extra, err = cr.ReadHeader()
12361595 if err != nil {
···12381597 }
1239159812401599 if extra > 8192 {
12411241- return fmt.Errorf("t.ByEmail: array too large (%d)", extra)
16001600+ return fmt.Errorf("t.Inputs: array too large (%d)", extra)
12421601 }
1243160212441603 if maj != cbg.MajArray {
···12461605 }
1247160612481607 if extra > 0 {
12491249- t.ByEmail = make([]*GitRefUpdate_Meta_CommitCount_ByEmail_Elem, extra)
16081608+ t.Inputs = make([]*GitRefUpdate_IndividualLanguageSize, extra)
12501609 }
1251161012521611 for i := 0; i < int(extra); i++ {
···12681627 if err := cr.UnreadByte(); err != nil {
12691628 return err
12701629 }
12711271- t.ByEmail[i] = new(GitRefUpdate_Meta_CommitCount_ByEmail_Elem)
12721272- if err := t.ByEmail[i].UnmarshalCBOR(cr); err != nil {
12731273- return xerrors.Errorf("unmarshaling t.ByEmail[i] pointer: %w", err)
16301630+ t.Inputs[i] = new(GitRefUpdate_IndividualLanguageSize)
16311631+ if err := t.Inputs[i].UnmarshalCBOR(cr); err != nil {
16321632+ return xerrors.Errorf("unmarshaling t.Inputs[i] pointer: %w", err)
12741633 }
12751634 }
12761635···1289164812901649 return nil
12911650}
12921292-func (t *GitRefUpdate_Meta_CommitCount_ByEmail_Elem) MarshalCBOR(w io.Writer) error {
16511651+func (t *GitRefUpdate_IndividualLanguageSize) MarshalCBOR(w io.Writer) error {
12931652 if t == nil {
12941653 _, err := w.Write(cbg.CborNull)
12951654 return err
···13011660 return err
13021661 }
1303166213041304- // t.Count (int64) (int64)
13051305- if len("count") > 1000000 {
13061306- return xerrors.Errorf("Value in field \"count\" was too long")
16631663+ // t.Lang (string) (string)
16641664+ if len("lang") > 1000000 {
16651665+ return xerrors.Errorf("Value in field \"lang\" was too long")
13071666 }
1308166713091309- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("count"))); err != nil {
16681668+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("lang"))); err != nil {
13101669 return err
13111670 }
13121312- if _, err := cw.WriteString(string("count")); err != nil {
16711671+ if _, err := cw.WriteString(string("lang")); err != nil {
13131672 return err
13141673 }
1315167413161316- if t.Count >= 0 {
13171317- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Count)); err != nil {
13181318- return err
13191319- }
13201320- } else {
13211321- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Count-1)); err != nil {
13221322- return err
13231323- }
16751675+ if len(t.Lang) > 1000000 {
16761676+ return xerrors.Errorf("Value in field t.Lang was too long")
13241677 }
1325167813261326- // t.Email (string) (string)
13271327- if len("email") > 1000000 {
13281328- return xerrors.Errorf("Value in field \"email\" was too long")
13291329- }
13301330-13311331- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("email"))); err != nil {
16791679+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Lang))); err != nil {
13321680 return err
13331681 }
13341334- if _, err := cw.WriteString(string("email")); err != nil {
16821682+ if _, err := cw.WriteString(string(t.Lang)); err != nil {
13351683 return err
13361684 }
1337168513381338- if len(t.Email) > 1000000 {
13391339- return xerrors.Errorf("Value in field t.Email was too long")
16861686+ // t.Size (int64) (int64)
16871687+ if len("size") > 1000000 {
16881688+ return xerrors.Errorf("Value in field \"size\" was too long")
13401689 }
1341169013421342- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Email))); err != nil {
16911691+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("size"))); err != nil {
13431692 return err
13441693 }
13451345- if _, err := cw.WriteString(string(t.Email)); err != nil {
16941694+ if _, err := cw.WriteString(string("size")); err != nil {
13461695 return err
13471696 }
16971697+16981698+ if t.Size >= 0 {
16991699+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
17001700+ return err
17011701+ }
17021702+ } else {
17031703+ if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Size-1)); err != nil {
17041704+ return err
17051705+ }
17061706+ }
17071707+13481708 return nil
13491709}
1350171013511351-func (t *GitRefUpdate_Meta_CommitCount_ByEmail_Elem) UnmarshalCBOR(r io.Reader) (err error) {
13521352- *t = GitRefUpdate_Meta_CommitCount_ByEmail_Elem{}
17111711+func (t *GitRefUpdate_IndividualLanguageSize) UnmarshalCBOR(r io.Reader) (err error) {
17121712+ *t = GitRefUpdate_IndividualLanguageSize{}
1353171313541714 cr := cbg.NewCborReader(r)
13551715···13681728 }
1369172913701730 if extra > cbg.MaxLength {
13711371- return fmt.Errorf("GitRefUpdate_Meta_CommitCount_ByEmail_Elem: map struct too large (%d)", extra)
17311731+ return fmt.Errorf("GitRefUpdate_IndividualLanguageSize: map struct too large (%d)", extra)
13721732 }
1373173313741734 n := extra
1375173513761376- nameBuf := make([]byte, 5)
17361736+ nameBuf := make([]byte, 4)
13771737 for i := uint64(0); i < n; i++ {
13781738 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
13791739 if err != nil {
···13891749 }
1390175013911751 switch string(nameBuf[:nameLen]) {
13921392- // t.Count (int64) (int64)
13931393- case "count":
17521752+ // t.Lang (string) (string)
17531753+ case "lang":
17541754+17551755+ {
17561756+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
17571757+ if err != nil {
17581758+ return err
17591759+ }
17601760+17611761+ t.Lang = string(sval)
17621762+ }
17631763+ // t.Size (int64) (int64)
17641764+ case "size":
13941765 {
13951766 maj, extra, err := cr.ReadHeader()
13961767 if err != nil {
···14131784 return fmt.Errorf("wrong type for int64 field: %d", maj)
14141785 }
1415178614161416- t.Count = int64(extraI)
17871787+ t.Size = int64(extraI)
14171788 }
14181418- // t.Email (string) (string)
14191419- case "email":
17891789+17901790+ default:
17911791+ // Field doesn't exist on this type, so ignore it
17921792+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
17931793+ return err
17941794+ }
17951795+ }
17961796+ }
17971797+17981798+ return nil
17991799+}
18001800+func (t *GitRefUpdate_Meta) MarshalCBOR(w io.Writer) error {
18011801+ if t == nil {
18021802+ _, err := w.Write(cbg.CborNull)
18031803+ return err
18041804+ }
18051805+18061806+ cw := cbg.NewCborWriter(w)
18071807+ fieldCount := 3
18081808+18091809+ if t.LangBreakdown == nil {
18101810+ fieldCount--
18111811+ }
18121812+18131813+ if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
18141814+ return err
18151815+ }
18161816+18171817+ // t.CommitCount (tangled.GitRefUpdate_CommitCountBreakdown) (struct)
18181818+ if len("commitCount") > 1000000 {
18191819+ return xerrors.Errorf("Value in field \"commitCount\" was too long")
18201820+ }
18211821+18221822+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commitCount"))); err != nil {
18231823+ return err
18241824+ }
18251825+ if _, err := cw.WriteString(string("commitCount")); err != nil {
18261826+ return err
18271827+ }
18281828+18291829+ if err := t.CommitCount.MarshalCBOR(cw); err != nil {
18301830+ return err
18311831+ }
18321832+18331833+ // t.IsDefaultRef (bool) (bool)
18341834+ if len("isDefaultRef") > 1000000 {
18351835+ return xerrors.Errorf("Value in field \"isDefaultRef\" was too long")
18361836+ }
18371837+18381838+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("isDefaultRef"))); err != nil {
18391839+ return err
18401840+ }
18411841+ if _, err := cw.WriteString(string("isDefaultRef")); err != nil {
18421842+ return err
18431843+ }
18441844+18451845+ if err := cbg.WriteBool(w, t.IsDefaultRef); err != nil {
18461846+ return err
18471847+ }
18481848+18491849+ // t.LangBreakdown (tangled.GitRefUpdate_LangBreakdown) (struct)
18501850+ if t.LangBreakdown != nil {
18511851+18521852+ if len("langBreakdown") > 1000000 {
18531853+ return xerrors.Errorf("Value in field \"langBreakdown\" was too long")
18541854+ }
18551855+18561856+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("langBreakdown"))); err != nil {
18571857+ return err
18581858+ }
18591859+ if _, err := cw.WriteString(string("langBreakdown")); err != nil {
18601860+ return err
18611861+ }
18621862+18631863+ if err := t.LangBreakdown.MarshalCBOR(cw); err != nil {
18641864+ return err
18651865+ }
18661866+ }
18671867+ return nil
18681868+}
18691869+18701870+func (t *GitRefUpdate_Meta) UnmarshalCBOR(r io.Reader) (err error) {
18711871+ *t = GitRefUpdate_Meta{}
18721872+18731873+ cr := cbg.NewCborReader(r)
18741874+18751875+ maj, extra, err := cr.ReadHeader()
18761876+ if err != nil {
18771877+ return err
18781878+ }
18791879+ defer func() {
18801880+ if err == io.EOF {
18811881+ err = io.ErrUnexpectedEOF
18821882+ }
18831883+ }()
18841884+18851885+ if maj != cbg.MajMap {
18861886+ return fmt.Errorf("cbor input should be of type map")
18871887+ }
18881888+18891889+ if extra > cbg.MaxLength {
18901890+ return fmt.Errorf("GitRefUpdate_Meta: map struct too large (%d)", extra)
18911891+ }
18921892+18931893+ n := extra
18941894+18951895+ nameBuf := make([]byte, 13)
18961896+ for i := uint64(0); i < n; i++ {
18971897+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
18981898+ if err != nil {
18991899+ return err
19001900+ }
19011901+19021902+ if !ok {
19031903+ // Field doesn't exist on this type, so ignore it
19041904+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
19051905+ return err
19061906+ }
19071907+ continue
19081908+ }
19091909+19101910+ switch string(nameBuf[:nameLen]) {
19111911+ // t.CommitCount (tangled.GitRefUpdate_CommitCountBreakdown) (struct)
19121912+ case "commitCount":
1420191314211914 {
14221422- sval, err := cbg.ReadStringWithMax(cr, 1000000)
19151915+19161916+ b, err := cr.ReadByte()
14231917 if err != nil {
14241918 return err
14251919 }
19201920+ if b != cbg.CborNull[0] {
19211921+ if err := cr.UnreadByte(); err != nil {
19221922+ return err
19231923+ }
19241924+ t.CommitCount = new(GitRefUpdate_CommitCountBreakdown)
19251925+ if err := t.CommitCount.UnmarshalCBOR(cr); err != nil {
19261926+ return xerrors.Errorf("unmarshaling t.CommitCount pointer: %w", err)
19271927+ }
19281928+ }
1426192914271427- t.Email = string(sval)
19301930+ }
19311931+ // t.IsDefaultRef (bool) (bool)
19321932+ case "isDefaultRef":
19331933+19341934+ maj, extra, err = cr.ReadHeader()
19351935+ if err != nil {
19361936+ return err
19371937+ }
19381938+ if maj != cbg.MajOther {
19391939+ return fmt.Errorf("booleans must be major type 7")
19401940+ }
19411941+ switch extra {
19421942+ case 20:
19431943+ t.IsDefaultRef = false
19441944+ case 21:
19451945+ t.IsDefaultRef = true
19461946+ default:
19471947+ return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
19481948+ }
19491949+ // t.LangBreakdown (tangled.GitRefUpdate_LangBreakdown) (struct)
19501950+ case "langBreakdown":
19511951+19521952+ {
19531953+19541954+ b, err := cr.ReadByte()
19551955+ if err != nil {
19561956+ return err
19571957+ }
19581958+ if b != cbg.CborNull[0] {
19591959+ if err := cr.UnreadByte(); err != nil {
19601960+ return err
19611961+ }
19621962+ t.LangBreakdown = new(GitRefUpdate_LangBreakdown)
19631963+ if err := t.LangBreakdown.UnmarshalCBOR(cr); err != nil {
19641964+ return xerrors.Errorf("unmarshaling t.LangBreakdown pointer: %w", err)
19651965+ }
19661966+ }
19671967+14281968 }
1429196914301970 default:
···15782118 }
1579211915802120 t.Subject = string(sval)
21212121+ }
21222122+ // t.CreatedAt (string) (string)
21232123+ case "createdAt":
21242124+21252125+ {
21262126+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
21272127+ if err != nil {
21282128+ return err
21292129+ }
21302130+21312131+ t.CreatedAt = string(sval)
21322132+ }
21332133+21342134+ default:
21352135+ // Field doesn't exist on this type, so ignore it
21362136+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
21372137+ return err
21382138+ }
21392139+ }
21402140+ }
21412141+21422142+ return nil
21432143+}
21442144+func (t *Knot) MarshalCBOR(w io.Writer) error {
21452145+ if t == nil {
21462146+ _, err := w.Write(cbg.CborNull)
21472147+ return err
21482148+ }
21492149+21502150+ cw := cbg.NewCborWriter(w)
21512151+21522152+ if _, err := cw.Write([]byte{162}); err != nil {
21532153+ return err
21542154+ }
21552155+21562156+ // t.LexiconTypeID (string) (string)
21572157+ if len("$type") > 1000000 {
21582158+ return xerrors.Errorf("Value in field \"$type\" was too long")
21592159+ }
21602160+21612161+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
21622162+ return err
21632163+ }
21642164+ if _, err := cw.WriteString(string("$type")); err != nil {
21652165+ return err
21662166+ }
21672167+21682168+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.knot"))); err != nil {
21692169+ return err
21702170+ }
21712171+ if _, err := cw.WriteString(string("sh.tangled.knot")); err != nil {
21722172+ return err
21732173+ }
21742174+21752175+ // t.CreatedAt (string) (string)
21762176+ if len("createdAt") > 1000000 {
21772177+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
21782178+ }
21792179+21802180+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
21812181+ return err
21822182+ }
21832183+ if _, err := cw.WriteString(string("createdAt")); err != nil {
21842184+ return err
21852185+ }
21862186+21872187+ if len(t.CreatedAt) > 1000000 {
21882188+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
21892189+ }
21902190+21912191+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
21922192+ return err
21932193+ }
21942194+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
21952195+ return err
21962196+ }
21972197+ return nil
21982198+}
21992199+22002200+func (t *Knot) UnmarshalCBOR(r io.Reader) (err error) {
22012201+ *t = Knot{}
22022202+22032203+ cr := cbg.NewCborReader(r)
22042204+22052205+ maj, extra, err := cr.ReadHeader()
22062206+ if err != nil {
22072207+ return err
22082208+ }
22092209+ defer func() {
22102210+ if err == io.EOF {
22112211+ err = io.ErrUnexpectedEOF
22122212+ }
22132213+ }()
22142214+22152215+ if maj != cbg.MajMap {
22162216+ return fmt.Errorf("cbor input should be of type map")
22172217+ }
22182218+22192219+ if extra > cbg.MaxLength {
22202220+ return fmt.Errorf("Knot: map struct too large (%d)", extra)
22212221+ }
22222222+22232223+ n := extra
22242224+22252225+ nameBuf := make([]byte, 9)
22262226+ for i := uint64(0); i < n; i++ {
22272227+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
22282228+ if err != nil {
22292229+ return err
22302230+ }
22312231+22322232+ if !ok {
22332233+ // Field doesn't exist on this type, so ignore it
22342234+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
22352235+ return err
22362236+ }
22372237+ continue
22382238+ }
22392239+22402240+ switch string(nameBuf[:nameLen]) {
22412241+ // t.LexiconTypeID (string) (string)
22422242+ case "$type":
22432243+22442244+ {
22452245+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
22462246+ if err != nil {
22472247+ return err
22482248+ }
22492249+22502250+ t.LexiconTypeID = string(sval)
15812251 }
15822252 // t.CreatedAt (string) (string)
15832253 case "createdAt":
···2188285821892859 return nil
21902860}
21912191-func (t *Pipeline_Dependencies_Elem) MarshalCBOR(w io.Writer) error {
21922192- if t == nil {
21932193- _, err := w.Write(cbg.CborNull)
21942194- return err
21952195- }
21962196-21972197- cw := cbg.NewCborWriter(w)
21982198-21992199- if _, err := cw.Write([]byte{162}); err != nil {
22002200- return err
22012201- }
22022202-22032203- // t.Packages ([]string) (slice)
22042204- if len("packages") > 1000000 {
22052205- return xerrors.Errorf("Value in field \"packages\" was too long")
22062206- }
22072207-22082208- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("packages"))); err != nil {
22092209- return err
22102210- }
22112211- if _, err := cw.WriteString(string("packages")); err != nil {
22122212- return err
22132213- }
22142214-22152215- if len(t.Packages) > 8192 {
22162216- return xerrors.Errorf("Slice value in field t.Packages was too long")
22172217- }
22182218-22192219- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Packages))); err != nil {
22202220- return err
22212221- }
22222222- for _, v := range t.Packages {
22232223- if len(v) > 1000000 {
22242224- return xerrors.Errorf("Value in field v was too long")
22252225- }
22262226-22272227- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
22282228- return err
22292229- }
22302230- if _, err := cw.WriteString(string(v)); err != nil {
22312231- return err
22322232- }
22332233-22342234- }
22352235-22362236- // t.Registry (string) (string)
22372237- if len("registry") > 1000000 {
22382238- return xerrors.Errorf("Value in field \"registry\" was too long")
22392239- }
22402240-22412241- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("registry"))); err != nil {
22422242- return err
22432243- }
22442244- if _, err := cw.WriteString(string("registry")); err != nil {
22452245- return err
22462246- }
22472247-22482248- if len(t.Registry) > 1000000 {
22492249- return xerrors.Errorf("Value in field t.Registry was too long")
22502250- }
22512251-22522252- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Registry))); err != nil {
22532253- return err
22542254- }
22552255- if _, err := cw.WriteString(string(t.Registry)); err != nil {
22562256- return err
22572257- }
22582258- return nil
22592259-}
22602260-22612261-func (t *Pipeline_Dependencies_Elem) UnmarshalCBOR(r io.Reader) (err error) {
22622262- *t = Pipeline_Dependencies_Elem{}
22632263-22642264- cr := cbg.NewCborReader(r)
22652265-22662266- maj, extra, err := cr.ReadHeader()
22672267- if err != nil {
22682268- return err
22692269- }
22702270- defer func() {
22712271- if err == io.EOF {
22722272- err = io.ErrUnexpectedEOF
22732273- }
22742274- }()
22752275-22762276- if maj != cbg.MajMap {
22772277- return fmt.Errorf("cbor input should be of type map")
22782278- }
22792279-22802280- if extra > cbg.MaxLength {
22812281- return fmt.Errorf("Pipeline_Dependencies_Elem: map struct too large (%d)", extra)
22822282- }
22832283-22842284- n := extra
22852285-22862286- nameBuf := make([]byte, 8)
22872287- for i := uint64(0); i < n; i++ {
22882288- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
22892289- if err != nil {
22902290- return err
22912291- }
22922292-22932293- if !ok {
22942294- // Field doesn't exist on this type, so ignore it
22952295- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
22962296- return err
22972297- }
22982298- continue
22992299- }
23002300-23012301- switch string(nameBuf[:nameLen]) {
23022302- // t.Packages ([]string) (slice)
23032303- case "packages":
23042304-23052305- maj, extra, err = cr.ReadHeader()
23062306- if err != nil {
23072307- return err
23082308- }
23092309-23102310- if extra > 8192 {
23112311- return fmt.Errorf("t.Packages: array too large (%d)", extra)
23122312- }
23132313-23142314- if maj != cbg.MajArray {
23152315- return fmt.Errorf("expected cbor array")
23162316- }
23172317-23182318- if extra > 0 {
23192319- t.Packages = make([]string, extra)
23202320- }
23212321-23222322- for i := 0; i < int(extra); i++ {
23232323- {
23242324- var maj byte
23252325- var extra uint64
23262326- var err error
23272327- _ = maj
23282328- _ = extra
23292329- _ = err
23302330-23312331- {
23322332- sval, err := cbg.ReadStringWithMax(cr, 1000000)
23332333- if err != nil {
23342334- return err
23352335- }
23362336-23372337- t.Packages[i] = string(sval)
23382338- }
23392339-23402340- }
23412341- }
23422342- // t.Registry (string) (string)
23432343- case "registry":
23442344-23452345- {
23462346- sval, err := cbg.ReadStringWithMax(cr, 1000000)
23472347- if err != nil {
23482348- return err
23492349- }
23502350-23512351- t.Registry = string(sval)
23522352- }
23532353-23542354- default:
23552355- // Field doesn't exist on this type, so ignore it
23562356- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
23572357- return err
23582358- }
23592359- }
23602360- }
23612361-23622362- return nil
23632363-}
23642861func (t *Pipeline_ManualTriggerData) MarshalCBOR(w io.Writer) error {
23652862 if t == nil {
23662863 _, err := w.Write(cbg.CborNull)
···23782875 return err
23792876 }
2380287723812381- // t.Inputs ([]*tangled.Pipeline_ManualTriggerData_Inputs_Elem) (slice)
28782878+ // t.Inputs ([]*tangled.Pipeline_Pair) (slice)
23822879 if t.Inputs != nil {
2383288023842881 if len("inputs") > 1000000 {
···24502947 }
2451294824522949 switch string(nameBuf[:nameLen]) {
24532453- // t.Inputs ([]*tangled.Pipeline_ManualTriggerData_Inputs_Elem) (slice)
29502950+ // t.Inputs ([]*tangled.Pipeline_Pair) (slice)
24542951 case "inputs":
2455295224562953 maj, extra, err = cr.ReadHeader()
···24672964 }
2468296524692966 if extra > 0 {
24702470- t.Inputs = make([]*Pipeline_ManualTriggerData_Inputs_Elem, extra)
29672967+ t.Inputs = make([]*Pipeline_Pair, extra)
24712968 }
2472296924732970 for i := 0; i < int(extra); i++ {
···24892986 if err := cr.UnreadByte(); err != nil {
24902987 return err
24912988 }
24922492- t.Inputs[i] = new(Pipeline_ManualTriggerData_Inputs_Elem)
29892989+ t.Inputs[i] = new(Pipeline_Pair)
24932990 if err := t.Inputs[i].UnmarshalCBOR(cr); err != nil {
24942991 return xerrors.Errorf("unmarshaling t.Inputs[i] pointer: %w", err)
24952992 }
···2510300725113008 return nil
25123009}
25132513-func (t *Pipeline_ManualTriggerData_Inputs_Elem) MarshalCBOR(w io.Writer) error {
30103010+func (t *Pipeline_Pair) MarshalCBOR(w io.Writer) error {
25143011 if t == nil {
25153012 _, err := w.Write(cbg.CborNull)
25163013 return err
···25703067 return nil
25713068}
2572306925732573-func (t *Pipeline_ManualTriggerData_Inputs_Elem) UnmarshalCBOR(r io.Reader) (err error) {
25742574- *t = Pipeline_ManualTriggerData_Inputs_Elem{}
30703070+func (t *Pipeline_Pair) UnmarshalCBOR(r io.Reader) (err error) {
30713071+ *t = Pipeline_Pair{}
2575307225763073 cr := cbg.NewCborReader(r)
25773074···25903087 }
2591308825923089 if extra > cbg.MaxLength {
25932593- return fmt.Errorf("Pipeline_ManualTriggerData_Inputs_Elem: map struct too large (%d)", extra)
30903090+ return fmt.Errorf("Pipeline_Pair: map struct too large (%d)", extra)
25943091 }
2595309225963093 n := extra
···3014351130153512 return nil
30163513}
30173017-30183018-func (t *Pipeline_Step_Environment_Elem) MarshalCBOR(w io.Writer) error {
30193019- if t == nil {
30203020- _, err := w.Write(cbg.CborNull)
30213021- return err
30223022- }
30233023-30243024- cw := cbg.NewCborWriter(w)
30253025-30263026- if _, err := cw.Write([]byte{162}); err != nil {
30273027- return err
30283028- }
30293029-30303030- // t.Key (string) (string)
30313031- if len("key") > 1000000 {
30323032- return xerrors.Errorf("Value in field \"key\" was too long")
30333033- }
30343034-30353035- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("key"))); err != nil {
30363036- return err
30373037- }
30383038- if _, err := cw.WriteString(string("key")); err != nil {
30393039- return err
30403040- }
30413041-30423042- if len(t.Key) > 1000000 {
30433043- return xerrors.Errorf("Value in field t.Key was too long")
30443044- }
30453045-30463046- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Key))); err != nil {
30473047- return err
30483048- }
30493049- if _, err := cw.WriteString(string(t.Key)); err != nil {
30503050- return err
30513051- }
30523052-30533053- // t.Value (string) (string)
30543054- if len("value") > 1000000 {
30553055- return xerrors.Errorf("Value in field \"value\" was too long")
30563056- }
30573057-30583058- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("value"))); err != nil {
30593059- return err
30603060- }
30613061- if _, err := cw.WriteString(string("value")); err != nil {
30623062- return err
30633063- }
30643064-30653065- if len(t.Value) > 1000000 {
30663066- return xerrors.Errorf("Value in field t.Value was too long")
30673067- }
30683068-30693069- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Value))); err != nil {
30703070- return err
30713071- }
30723072- if _, err := cw.WriteString(string(t.Value)); err != nil {
30733073- return err
30743074- }
30753075- return nil
30763076-}
30773077-30783078-func (t *Pipeline_Step_Environment_Elem) UnmarshalCBOR(r io.Reader) (err error) {
30793079- *t = Pipeline_Step_Environment_Elem{}
30803080-30813081- cr := cbg.NewCborReader(r)
30823082-30833083- maj, extra, err := cr.ReadHeader()
30843084- if err != nil {
30853085- return err
30863086- }
30873087- defer func() {
30883088- if err == io.EOF {
30893089- err = io.ErrUnexpectedEOF
30903090- }
30913091- }()
30923092-30933093- if maj != cbg.MajMap {
30943094- return fmt.Errorf("cbor input should be of type map")
30953095- }
30963096-30973097- if extra > cbg.MaxLength {
30983098- return fmt.Errorf("Pipeline_Step_Environment_Elem: map struct too large (%d)", extra)
30993099- }
31003100-31013101- n := extra
31023102-31033103- nameBuf := make([]byte, 5)
31043104- for i := uint64(0); i < n; i++ {
31053105- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
31063106- if err != nil {
31073107- return err
31083108- }
31093109-31103110- if !ok {
31113111- // Field doesn't exist on this type, so ignore it
31123112- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
31133113- return err
31143114- }
31153115- continue
31163116- }
31173117-31183118- switch string(nameBuf[:nameLen]) {
31193119- // t.Key (string) (string)
31203120- case "key":
31213121-31223122- {
31233123- sval, err := cbg.ReadStringWithMax(cr, 1000000)
31243124- if err != nil {
31253125- return err
31263126- }
31273127-31283128- t.Key = string(sval)
31293129- }
31303130- // t.Value (string) (string)
31313131- case "value":
31323132-31333133- {
31343134- sval, err := cbg.ReadStringWithMax(cr, 1000000)
31353135- if err != nil {
31363136- return err
31373137- }
31383138-31393139- t.Value = string(sval)
31403140- }
31413141-31423142- default:
31433143- // Field doesn't exist on this type, so ignore it
31443144- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
31453145- return err
31463146- }
31473147- }
31483148- }
31493149-31503150- return nil
31513151-}
31523514func (t *PipelineStatus) MarshalCBOR(w io.Writer) error {
31533515 if t == nil {
31543516 _, err := w.Write(cbg.CborNull)
···3511387335123874 return nil
35133875}
35143514-35153515-func (t *Pipeline_Step) MarshalCBOR(w io.Writer) error {
35163516- if t == nil {
35173517- _, err := w.Write(cbg.CborNull)
35183518- return err
35193519- }
35203520-35213521- cw := cbg.NewCborWriter(w)
35223522- fieldCount := 3
35233523-35243524- if t.Environment == nil {
35253525- fieldCount--
35263526- }
35273527-35283528- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
35293529- return err
35303530- }
35313531-35323532- // t.Name (string) (string)
35333533- if len("name") > 1000000 {
35343534- return xerrors.Errorf("Value in field \"name\" was too long")
35353535- }
35363536-35373537- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("name"))); err != nil {
35383538- return err
35393539- }
35403540- if _, err := cw.WriteString(string("name")); err != nil {
35413541- return err
35423542- }
35433543-35443544- if len(t.Name) > 1000000 {
35453545- return xerrors.Errorf("Value in field t.Name was too long")
35463546- }
35473547-35483548- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil {
35493549- return err
35503550- }
35513551- if _, err := cw.WriteString(string(t.Name)); err != nil {
35523552- return err
35533553- }
35543554-35553555- // t.Command (string) (string)
35563556- if len("command") > 1000000 {
35573557- return xerrors.Errorf("Value in field \"command\" was too long")
35583558- }
35593559-35603560- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("command"))); err != nil {
35613561- return err
35623562- }
35633563- if _, err := cw.WriteString(string("command")); err != nil {
35643564- return err
35653565- }
35663566-35673567- if len(t.Command) > 1000000 {
35683568- return xerrors.Errorf("Value in field t.Command was too long")
35693569- }
35703570-35713571- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Command))); err != nil {
35723572- return err
35733573- }
35743574- if _, err := cw.WriteString(string(t.Command)); err != nil {
35753575- return err
35763576- }
35773577-35783578- // t.Environment ([]*tangled.Pipeline_Step_Environment_Elem) (slice)
35793579- if t.Environment != nil {
35803580-35813581- if len("environment") > 1000000 {
35823582- return xerrors.Errorf("Value in field \"environment\" was too long")
35833583- }
35843584-35853585- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
35863586- return err
35873587- }
35883588- if _, err := cw.WriteString(string("environment")); err != nil {
35893589- return err
35903590- }
35913591-35923592- if len(t.Environment) > 8192 {
35933593- return xerrors.Errorf("Slice value in field t.Environment was too long")
35943594- }
35953595-35963596- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
35973597- return err
35983598- }
35993599- for _, v := range t.Environment {
36003600- if err := v.MarshalCBOR(cw); err != nil {
36013601- return err
36023602- }
36033603-36043604- }
36053605- }
36063606- return nil
36073607-}
36083608-36093609-func (t *Pipeline_Step) UnmarshalCBOR(r io.Reader) (err error) {
36103610- *t = Pipeline_Step{}
36113611-36123612- cr := cbg.NewCborReader(r)
36133613-36143614- maj, extra, err := cr.ReadHeader()
36153615- if err != nil {
36163616- return err
36173617- }
36183618- defer func() {
36193619- if err == io.EOF {
36203620- err = io.ErrUnexpectedEOF
36213621- }
36223622- }()
36233623-36243624- if maj != cbg.MajMap {
36253625- return fmt.Errorf("cbor input should be of type map")
36263626- }
36273627-36283628- if extra > cbg.MaxLength {
36293629- return fmt.Errorf("Pipeline_Step: map struct too large (%d)", extra)
36303630- }
36313631-36323632- n := extra
36333633-36343634- nameBuf := make([]byte, 11)
36353635- for i := uint64(0); i < n; i++ {
36363636- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
36373637- if err != nil {
36383638- return err
36393639- }
36403640-36413641- if !ok {
36423642- // Field doesn't exist on this type, so ignore it
36433643- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
36443644- return err
36453645- }
36463646- continue
36473647- }
36483648-36493649- switch string(nameBuf[:nameLen]) {
36503650- // t.Name (string) (string)
36513651- case "name":
36523652-36533653- {
36543654- sval, err := cbg.ReadStringWithMax(cr, 1000000)
36553655- if err != nil {
36563656- return err
36573657- }
36583658-36593659- t.Name = string(sval)
36603660- }
36613661- // t.Command (string) (string)
36623662- case "command":
36633663-36643664- {
36653665- sval, err := cbg.ReadStringWithMax(cr, 1000000)
36663666- if err != nil {
36673667- return err
36683668- }
36693669-36703670- t.Command = string(sval)
36713671- }
36723672- // t.Environment ([]*tangled.Pipeline_Step_Environment_Elem) (slice)
36733673- case "environment":
36743674-36753675- maj, extra, err = cr.ReadHeader()
36763676- if err != nil {
36773677- return err
36783678- }
36793679-36803680- if extra > 8192 {
36813681- return fmt.Errorf("t.Environment: array too large (%d)", extra)
36823682- }
36833683-36843684- if maj != cbg.MajArray {
36853685- return fmt.Errorf("expected cbor array")
36863686- }
36873687-36883688- if extra > 0 {
36893689- t.Environment = make([]*Pipeline_Step_Environment_Elem, extra)
36903690- }
36913691-36923692- for i := 0; i < int(extra); i++ {
36933693- {
36943694- var maj byte
36953695- var extra uint64
36963696- var err error
36973697- _ = maj
36983698- _ = extra
36993699- _ = err
37003700-37013701- {
37023702-37033703- b, err := cr.ReadByte()
37043704- if err != nil {
37053705- return err
37063706- }
37073707- if b != cbg.CborNull[0] {
37083708- if err := cr.UnreadByte(); err != nil {
37093709- return err
37103710- }
37113711- t.Environment[i] = new(Pipeline_Step_Environment_Elem)
37123712- if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
37133713- return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
37143714- }
37153715- }
37163716-37173717- }
37183718-37193719- }
37203720- }
37213721-37223722- default:
37233723- // Field doesn't exist on this type, so ignore it
37243724- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
37253725- return err
37263726- }
37273727- }
37283728- }
37293729-37303730- return nil
37313731-}
37323876func (t *Pipeline_TriggerMetadata) MarshalCBOR(w io.Writer) error {
37333877 if t == nil {
37343878 _, err := w.Write(cbg.CborNull)
···4205434942064350 cw := cbg.NewCborWriter(w)
4207435142084208- if _, err := cw.Write([]byte{165}); err != nil {
43524352+ if _, err := cw.Write([]byte{164}); err != nil {
43534353+ return err
43544354+ }
43554355+43564356+ // t.Raw (string) (string)
43574357+ if len("raw") > 1000000 {
43584358+ return xerrors.Errorf("Value in field \"raw\" was too long")
43594359+ }
43604360+43614361+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("raw"))); err != nil {
43624362+ return err
43634363+ }
43644364+ if _, err := cw.WriteString(string("raw")); err != nil {
43654365+ return err
43664366+ }
43674367+43684368+ if len(t.Raw) > 1000000 {
43694369+ return xerrors.Errorf("Value in field t.Raw was too long")
43704370+ }
43714371+43724372+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Raw))); err != nil {
43734373+ return err
43744374+ }
43754375+ if _, err := cw.WriteString(string(t.Raw)); err != nil {
42094376 return err
42104377 }
42114378···42484415 return err
42494416 }
4250441742514251- // t.Steps ([]*tangled.Pipeline_Step) (slice)
42524252- if len("steps") > 1000000 {
42534253- return xerrors.Errorf("Value in field \"steps\" was too long")
44184418+ // t.Engine (string) (string)
44194419+ if len("engine") > 1000000 {
44204420+ return xerrors.Errorf("Value in field \"engine\" was too long")
42544421 }
4255442242564256- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("steps"))); err != nil {
42574257- return err
42584258- }
42594259- if _, err := cw.WriteString(string("steps")); err != nil {
44234423+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("engine"))); err != nil {
42604424 return err
42614425 }
42624262-42634263- if len(t.Steps) > 8192 {
42644264- return xerrors.Errorf("Slice value in field t.Steps was too long")
42654265- }
42664266-42674267- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Steps))); err != nil {
44264426+ if _, err := cw.WriteString(string("engine")); err != nil {
42684427 return err
42694428 }
42704270- for _, v := range t.Steps {
42714271- if err := v.MarshalCBOR(cw); err != nil {
42724272- return err
42734273- }
4274442944304430+ if len(t.Engine) > 1000000 {
44314431+ return xerrors.Errorf("Value in field t.Engine was too long")
42754432 }
4276443342774277- // t.Environment ([]*tangled.Pipeline_Workflow_Environment_Elem) (slice)
42784278- if len("environment") > 1000000 {
42794279- return xerrors.Errorf("Value in field \"environment\" was too long")
42804280- }
42814281-42824282- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
44344434+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Engine))); err != nil {
42834435 return err
42844436 }
42854285- if _, err := cw.WriteString(string("environment")); err != nil {
44374437+ if _, err := cw.WriteString(string(t.Engine)); err != nil {
42864438 return err
42874439 }
42884288-42894289- if len(t.Environment) > 8192 {
42904290- return xerrors.Errorf("Slice value in field t.Environment was too long")
42914291- }
42924292-42934293- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
42944294- return err
42954295- }
42964296- for _, v := range t.Environment {
42974297- if err := v.MarshalCBOR(cw); err != nil {
42984298- return err
42994299- }
43004300-43014301- }
43024302-43034303- // t.Dependencies ([]tangled.Pipeline_Dependencies_Elem) (slice)
43044304- if len("dependencies") > 1000000 {
43054305- return xerrors.Errorf("Value in field \"dependencies\" was too long")
43064306- }
43074307-43084308- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("dependencies"))); err != nil {
43094309- return err
43104310- }
43114311- if _, err := cw.WriteString(string("dependencies")); err != nil {
43124312- return err
43134313- }
43144314-43154315- if len(t.Dependencies) > 8192 {
43164316- return xerrors.Errorf("Slice value in field t.Dependencies was too long")
43174317- }
43184318-43194319- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Dependencies))); err != nil {
43204320- return err
43214321- }
43224322- for _, v := range t.Dependencies {
43234323- if err := v.MarshalCBOR(cw); err != nil {
43244324- return err
43254325- }
43264326-43274327- }
43284440 return nil
43294441}
43304442···4353446543544466 n := extra
4355446743564356- nameBuf := make([]byte, 12)
44684468+ nameBuf := make([]byte, 6)
43574469 for i := uint64(0); i < n; i++ {
43584470 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
43594471 if err != nil {
···43694481 }
4370448243714483 switch string(nameBuf[:nameLen]) {
43724372- // t.Name (string) (string)
44844484+ // t.Raw (string) (string)
44854485+ case "raw":
44864486+44874487+ {
44884488+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
44894489+ if err != nil {
44904490+ return err
44914491+ }
44924492+44934493+ t.Raw = string(sval)
44944494+ }
44954495+ // t.Name (string) (string)
43734496 case "name":
4374449743754498 {
···44004523 }
4401452444024525 }
44034403- // t.Steps ([]*tangled.Pipeline_Step) (slice)
44044404- case "steps":
44054405-44064406- maj, extra, err = cr.ReadHeader()
44074407- if err != nil {
44084408- return err
44094409- }
44104410-44114411- if extra > 8192 {
44124412- return fmt.Errorf("t.Steps: array too large (%d)", extra)
44134413- }
44144414-44154415- if maj != cbg.MajArray {
44164416- return fmt.Errorf("expected cbor array")
44174417- }
44184418-44194419- if extra > 0 {
44204420- t.Steps = make([]*Pipeline_Step, extra)
44214421- }
44224422-44234423- for i := 0; i < int(extra); i++ {
44244424- {
44254425- var maj byte
44264426- var extra uint64
44274427- var err error
44284428- _ = maj
44294429- _ = extra
44304430- _ = err
44314431-44324432- {
44334433-44344434- b, err := cr.ReadByte()
44354435- if err != nil {
44364436- return err
44374437- }
44384438- if b != cbg.CborNull[0] {
44394439- if err := cr.UnreadByte(); err != nil {
44404440- return err
44414441- }
44424442- t.Steps[i] = new(Pipeline_Step)
44434443- if err := t.Steps[i].UnmarshalCBOR(cr); err != nil {
44444444- return xerrors.Errorf("unmarshaling t.Steps[i] pointer: %w", err)
44454445- }
44464446- }
44474447-44484448- }
44494449-44504450- }
44514451- }
44524452- // t.Environment ([]*tangled.Pipeline_Workflow_Environment_Elem) (slice)
44534453- case "environment":
44544454-44554455- maj, extra, err = cr.ReadHeader()
44564456- if err != nil {
44574457- return err
44584458- }
44594459-44604460- if extra > 8192 {
44614461- return fmt.Errorf("t.Environment: array too large (%d)", extra)
44624462- }
44634463-44644464- if maj != cbg.MajArray {
44654465- return fmt.Errorf("expected cbor array")
44664466- }
44674467-44684468- if extra > 0 {
44694469- t.Environment = make([]*Pipeline_Workflow_Environment_Elem, extra)
44704470- }
44714471-44724472- for i := 0; i < int(extra); i++ {
44734473- {
44744474- var maj byte
44754475- var extra uint64
44764476- var err error
44774477- _ = maj
44784478- _ = extra
44794479- _ = err
44804480-44814481- {
44824482-44834483- b, err := cr.ReadByte()
44844484- if err != nil {
44854485- return err
44864486- }
44874487- if b != cbg.CborNull[0] {
44884488- if err := cr.UnreadByte(); err != nil {
44894489- return err
44904490- }
44914491- t.Environment[i] = new(Pipeline_Workflow_Environment_Elem)
44924492- if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
44934493- return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
44944494- }
44954495- }
44964496-44974497- }
44984498-44994499- }
45004500- }
45014501- // t.Dependencies ([]tangled.Pipeline_Dependencies_Elem) (slice)
45024502- case "dependencies":
45034503-45044504- maj, extra, err = cr.ReadHeader()
45054505- if err != nil {
45064506- return err
45074507- }
45084508-45094509- if extra > 8192 {
45104510- return fmt.Errorf("t.Dependencies: array too large (%d)", extra)
45114511- }
45124512-45134513- if maj != cbg.MajArray {
45144514- return fmt.Errorf("expected cbor array")
45154515- }
45164516-45174517- if extra > 0 {
45184518- t.Dependencies = make([]Pipeline_Dependencies_Elem, extra)
45194519- }
45204520-45214521- for i := 0; i < int(extra); i++ {
45224522- {
45234523- var maj byte
45244524- var extra uint64
45254525- var err error
45264526- _ = maj
45274527- _ = extra
45284528- _ = err
45294529-45304530- {
45314531-45324532- if err := t.Dependencies[i].UnmarshalCBOR(cr); err != nil {
45334533- return xerrors.Errorf("unmarshaling t.Dependencies[i]: %w", err)
45344534- }
45354535-45364536- }
45374537-45384538- }
45394539- }
45404540-45414541- default:
45424542- // Field doesn't exist on this type, so ignore it
45434543- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
45444544- return err
45454545- }
45464546- }
45474547- }
45484548-45494549- return nil
45504550-}
45514551-func (t *Pipeline_Workflow_Environment_Elem) MarshalCBOR(w io.Writer) error {
45524552- if t == nil {
45534553- _, err := w.Write(cbg.CborNull)
45544554- return err
45554555- }
45564556-45574557- cw := cbg.NewCborWriter(w)
45584558-45594559- if _, err := cw.Write([]byte{162}); err != nil {
45604560- return err
45614561- }
45624562-45634563- // t.Key (string) (string)
45644564- if len("key") > 1000000 {
45654565- return xerrors.Errorf("Value in field \"key\" was too long")
45664566- }
45674567-45684568- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("key"))); err != nil {
45694569- return err
45704570- }
45714571- if _, err := cw.WriteString(string("key")); err != nil {
45724572- return err
45734573- }
45744574-45754575- if len(t.Key) > 1000000 {
45764576- return xerrors.Errorf("Value in field t.Key was too long")
45774577- }
45784578-45794579- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Key))); err != nil {
45804580- return err
45814581- }
45824582- if _, err := cw.WriteString(string(t.Key)); err != nil {
45834583- return err
45844584- }
45854585-45864586- // t.Value (string) (string)
45874587- if len("value") > 1000000 {
45884588- return xerrors.Errorf("Value in field \"value\" was too long")
45894589- }
45904590-45914591- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("value"))); err != nil {
45924592- return err
45934593- }
45944594- if _, err := cw.WriteString(string("value")); err != nil {
45954595- return err
45964596- }
45974597-45984598- if len(t.Value) > 1000000 {
45994599- return xerrors.Errorf("Value in field t.Value was too long")
46004600- }
46014601-46024602- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Value))); err != nil {
46034603- return err
46044604- }
46054605- if _, err := cw.WriteString(string(t.Value)); err != nil {
46064606- return err
46074607- }
46084608- return nil
46094609-}
46104610-46114611-func (t *Pipeline_Workflow_Environment_Elem) UnmarshalCBOR(r io.Reader) (err error) {
46124612- *t = Pipeline_Workflow_Environment_Elem{}
46134613-46144614- cr := cbg.NewCborReader(r)
46154615-46164616- maj, extra, err := cr.ReadHeader()
46174617- if err != nil {
46184618- return err
46194619- }
46204620- defer func() {
46214621- if err == io.EOF {
46224622- err = io.ErrUnexpectedEOF
46234623- }
46244624- }()
46254625-46264626- if maj != cbg.MajMap {
46274627- return fmt.Errorf("cbor input should be of type map")
46284628- }
46294629-46304630- if extra > cbg.MaxLength {
46314631- return fmt.Errorf("Pipeline_Workflow_Environment_Elem: map struct too large (%d)", extra)
46324632- }
46334633-46344634- n := extra
46354635-46364636- nameBuf := make([]byte, 5)
46374637- for i := uint64(0); i < n; i++ {
46384638- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
46394639- if err != nil {
46404640- return err
46414641- }
46424642-46434643- if !ok {
46444644- // Field doesn't exist on this type, so ignore it
46454645- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
46464646- return err
46474647- }
46484648- continue
46494649- }
46504650-46514651- switch string(nameBuf[:nameLen]) {
46524652- // t.Key (string) (string)
46534653- case "key":
45264526+ // t.Engine (string) (string)
45274527+ case "engine":
4654452846554529 {
46564530 sval, err := cbg.ReadStringWithMax(cr, 1000000)
···46584532 return err
46594533 }
4660453446614661- t.Key = string(sval)
46624662- }
46634663- // t.Value (string) (string)
46644664- case "value":
46654665-46664666- {
46674667- sval, err := cbg.ReadStringWithMax(cr, 1000000)
46684668- if err != nil {
46694669- return err
46704670- }
46714671-46724672- t.Value = string(sval)
45354535+ t.Engine = string(sval)
46734536 }
4674453746754538 default:
···5574543755755438 return nil
55765439}
54405440+func (t *RepoCollaborator) MarshalCBOR(w io.Writer) error {
54415441+ if t == nil {
54425442+ _, err := w.Write(cbg.CborNull)
54435443+ return err
54445444+ }
54455445+54465446+ cw := cbg.NewCborWriter(w)
54475447+54485448+ if _, err := cw.Write([]byte{164}); err != nil {
54495449+ return err
54505450+ }
54515451+54525452+ // t.Repo (string) (string)
54535453+ if len("repo") > 1000000 {
54545454+ return xerrors.Errorf("Value in field \"repo\" was too long")
54555455+ }
54565456+54575457+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
54585458+ return err
54595459+ }
54605460+ if _, err := cw.WriteString(string("repo")); err != nil {
54615461+ return err
54625462+ }
54635463+54645464+ if len(t.Repo) > 1000000 {
54655465+ return xerrors.Errorf("Value in field t.Repo was too long")
54665466+ }
54675467+54685468+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repo))); err != nil {
54695469+ return err
54705470+ }
54715471+ if _, err := cw.WriteString(string(t.Repo)); err != nil {
54725472+ return err
54735473+ }
54745474+54755475+ // t.LexiconTypeID (string) (string)
54765476+ if len("$type") > 1000000 {
54775477+ return xerrors.Errorf("Value in field \"$type\" was too long")
54785478+ }
54795479+54805480+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
54815481+ return err
54825482+ }
54835483+ if _, err := cw.WriteString(string("$type")); err != nil {
54845484+ return err
54855485+ }
54865486+54875487+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.repo.collaborator"))); err != nil {
54885488+ return err
54895489+ }
54905490+ if _, err := cw.WriteString(string("sh.tangled.repo.collaborator")); err != nil {
54915491+ return err
54925492+ }
54935493+54945494+ // t.Subject (string) (string)
54955495+ if len("subject") > 1000000 {
54965496+ return xerrors.Errorf("Value in field \"subject\" was too long")
54975497+ }
54985498+54995499+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("subject"))); err != nil {
55005500+ return err
55015501+ }
55025502+ if _, err := cw.WriteString(string("subject")); err != nil {
55035503+ return err
55045504+ }
55055505+55065506+ if len(t.Subject) > 1000000 {
55075507+ return xerrors.Errorf("Value in field t.Subject was too long")
55085508+ }
55095509+55105510+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Subject))); err != nil {
55115511+ return err
55125512+ }
55135513+ if _, err := cw.WriteString(string(t.Subject)); err != nil {
55145514+ return err
55155515+ }
55165516+55175517+ // t.CreatedAt (string) (string)
55185518+ if len("createdAt") > 1000000 {
55195519+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
55205520+ }
55215521+55225522+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
55235523+ return err
55245524+ }
55255525+ if _, err := cw.WriteString(string("createdAt")); err != nil {
55265526+ return err
55275527+ }
55285528+55295529+ if len(t.CreatedAt) > 1000000 {
55305530+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
55315531+ }
55325532+55335533+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
55345534+ return err
55355535+ }
55365536+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
55375537+ return err
55385538+ }
55395539+ return nil
55405540+}
55415541+55425542+func (t *RepoCollaborator) UnmarshalCBOR(r io.Reader) (err error) {
55435543+ *t = RepoCollaborator{}
55445544+55455545+ cr := cbg.NewCborReader(r)
55465546+55475547+ maj, extra, err := cr.ReadHeader()
55485548+ if err != nil {
55495549+ return err
55505550+ }
55515551+ defer func() {
55525552+ if err == io.EOF {
55535553+ err = io.ErrUnexpectedEOF
55545554+ }
55555555+ }()
55565556+55575557+ if maj != cbg.MajMap {
55585558+ return fmt.Errorf("cbor input should be of type map")
55595559+ }
55605560+55615561+ if extra > cbg.MaxLength {
55625562+ return fmt.Errorf("RepoCollaborator: map struct too large (%d)", extra)
55635563+ }
55645564+55655565+ n := extra
55665566+55675567+ nameBuf := make([]byte, 9)
55685568+ for i := uint64(0); i < n; i++ {
55695569+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
55705570+ if err != nil {
55715571+ return err
55725572+ }
55735573+55745574+ if !ok {
55755575+ // Field doesn't exist on this type, so ignore it
55765576+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
55775577+ return err
55785578+ }
55795579+ continue
55805580+ }
55815581+55825582+ switch string(nameBuf[:nameLen]) {
55835583+ // t.Repo (string) (string)
55845584+ case "repo":
55855585+55865586+ {
55875587+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
55885588+ if err != nil {
55895589+ return err
55905590+ }
55915591+55925592+ t.Repo = string(sval)
55935593+ }
55945594+ // t.LexiconTypeID (string) (string)
55955595+ case "$type":
55965596+55975597+ {
55985598+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
55995599+ if err != nil {
56005600+ return err
56015601+ }
56025602+56035603+ t.LexiconTypeID = string(sval)
56045604+ }
56055605+ // t.Subject (string) (string)
56065606+ case "subject":
56075607+56085608+ {
56095609+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
56105610+ if err != nil {
56115611+ return err
56125612+ }
56135613+56145614+ t.Subject = string(sval)
56155615+ }
56165616+ // t.CreatedAt (string) (string)
56175617+ case "createdAt":
56185618+56195619+ {
56205620+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
56215621+ if err != nil {
56225622+ return err
56235623+ }
56245624+56255625+ t.CreatedAt = string(sval)
56265626+ }
56275627+56285628+ default:
56295629+ // Field doesn't exist on this type, so ignore it
56305630+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
56315631+ return err
56325632+ }
56335633+ }
56345634+ }
56355635+56365636+ return nil
56375637+}
55775638func (t *RepoIssue) MarshalCBOR(w io.Writer) error {
55785639 if t == nil {
55795640 _, err := w.Write(cbg.CborNull)
···55815642 }
5582564355835644 cw := cbg.NewCborWriter(w)
55845584- fieldCount := 7
56455645+ fieldCount := 5
5585564655865647 if t.Body == nil {
55875648 fieldCount--
···56655726 return err
56665727 }
5667572856685668- // t.Owner (string) (string)
56695669- if len("owner") > 1000000 {
56705670- return xerrors.Errorf("Value in field \"owner\" was too long")
56715671- }
56725672-56735673- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
56745674- return err
56755675- }
56765676- if _, err := cw.WriteString(string("owner")); err != nil {
56775677- return err
56785678- }
56795679-56805680- if len(t.Owner) > 1000000 {
56815681- return xerrors.Errorf("Value in field t.Owner was too long")
56825682- }
56835683-56845684- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Owner))); err != nil {
56855685- return err
56865686- }
56875687- if _, err := cw.WriteString(string(t.Owner)); err != nil {
56885688- return err
56895689- }
56905690-56915729 // t.Title (string) (string)
56925730 if len("title") > 1000000 {
56935731 return xerrors.Errorf("Value in field \"title\" was too long")
···57095747 }
57105748 if _, err := cw.WriteString(string(t.Title)); err != nil {
57115749 return err
57125712- }
57135713-57145714- // t.IssueId (int64) (int64)
57155715- if len("issueId") > 1000000 {
57165716- return xerrors.Errorf("Value in field \"issueId\" was too long")
57175717- }
57185718-57195719- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("issueId"))); err != nil {
57205720- return err
57215721- }
57225722- if _, err := cw.WriteString(string("issueId")); err != nil {
57235723- return err
57245724- }
57255725-57265726- if t.IssueId >= 0 {
57275727- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.IssueId)); err != nil {
57285728- return err
57295729- }
57305730- } else {
57315731- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.IssueId-1)); err != nil {
57325732- return err
57335733- }
57345750 }
5735575157365752 // t.CreatedAt (string) (string)
···5842585858435859 t.LexiconTypeID = string(sval)
58445860 }
58455845- // t.Owner (string) (string)
58465846- case "owner":
58475847-58485848- {
58495849- sval, err := cbg.ReadStringWithMax(cr, 1000000)
58505850- if err != nil {
58515851- return err
58525852- }
58535853-58545854- t.Owner = string(sval)
58555855- }
58565861 // t.Title (string) (string)
58575862 case "title":
58585863···5864586958655870 t.Title = string(sval)
58665871 }
58675867- // t.IssueId (int64) (int64)
58685868- case "issueId":
58695869- {
58705870- maj, extra, err := cr.ReadHeader()
58715871- if err != nil {
58725872- return err
58735873- }
58745874- var extraI int64
58755875- switch maj {
58765876- case cbg.MajUnsignedInt:
58775877- extraI = int64(extra)
58785878- if extraI < 0 {
58795879- return fmt.Errorf("int64 positive overflow")
58805880- }
58815881- case cbg.MajNegativeInt:
58825882- extraI = int64(extra)
58835883- if extraI < 0 {
58845884- return fmt.Errorf("int64 negative overflow")
58855885- }
58865886- extraI = -1 - extraI
58875887- default:
58885888- return fmt.Errorf("wrong type for int64 field: %d", maj)
58895889- }
58905890-58915891- t.IssueId = int64(extraI)
58925892- }
58935872 // t.CreatedAt (string) (string)
58945873 case "createdAt":
58955874···59195898 }
5920589959215900 cw := cbg.NewCborWriter(w)
59225922- fieldCount := 7
59015901+ fieldCount := 5
5923590259245924- if t.CommentId == nil {
59255925- fieldCount--
59265926- }
59275927-59285928- if t.Owner == nil {
59295929- fieldCount--
59305930- }
59315931-59325932- if t.Repo == nil {
59035903+ if t.ReplyTo == nil {
59335904 fieldCount--
59345905 }
59355906···59605931 return err
59615932 }
5962593359635963- // t.Repo (string) (string)
59645964- if t.Repo != nil {
59655965-59665966- if len("repo") > 1000000 {
59675967- return xerrors.Errorf("Value in field \"repo\" was too long")
59685968- }
59695969-59705970- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
59715971- return err
59725972- }
59735973- if _, err := cw.WriteString(string("repo")); err != nil {
59745974- return err
59755975- }
59765976-59775977- if t.Repo == nil {
59785978- if _, err := cw.Write(cbg.CborNull); err != nil {
59795979- return err
59805980- }
59815981- } else {
59825982- if len(*t.Repo) > 1000000 {
59835983- return xerrors.Errorf("Value in field t.Repo was too long")
59845984- }
59855985-59865986- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Repo))); err != nil {
59875987- return err
59885988- }
59895989- if _, err := cw.WriteString(string(*t.Repo)); err != nil {
59905990- return err
59915991- }
59925992- }
59935993- }
59945994-59955934 // t.LexiconTypeID (string) (string)
59965935 if len("$type") > 1000000 {
59975936 return xerrors.Errorf("Value in field \"$type\" was too long")
···60345973 return err
60355974 }
6036597560376037- // t.Owner (string) (string)
60386038- if t.Owner != nil {
59765976+ // t.ReplyTo (string) (string)
59775977+ if t.ReplyTo != nil {
6039597860406040- if len("owner") > 1000000 {
60416041- return xerrors.Errorf("Value in field \"owner\" was too long")
59795979+ if len("replyTo") > 1000000 {
59805980+ return xerrors.Errorf("Value in field \"replyTo\" was too long")
60425981 }
6043598260446044- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
59835983+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("replyTo"))); err != nil {
60455984 return err
60465985 }
60476047- if _, err := cw.WriteString(string("owner")); err != nil {
59865986+ if _, err := cw.WriteString(string("replyTo")); err != nil {
60485987 return err
60495988 }
6050598960516051- if t.Owner == nil {
59905990+ if t.ReplyTo == nil {
60525991 if _, err := cw.Write(cbg.CborNull); err != nil {
60535992 return err
60545993 }
60555994 } else {
60566056- if len(*t.Owner) > 1000000 {
60576057- return xerrors.Errorf("Value in field t.Owner was too long")
59955995+ if len(*t.ReplyTo) > 1000000 {
59965996+ return xerrors.Errorf("Value in field t.ReplyTo was too long")
60585997 }
6059599860606060- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Owner))); err != nil {
59995999+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.ReplyTo))); err != nil {
60616000 return err
60626001 }
60636063- if _, err := cw.WriteString(string(*t.Owner)); err != nil {
60026002+ if _, err := cw.WriteString(string(*t.ReplyTo)); err != nil {
60646003 return err
60656004 }
60666005 }
60676067- }
60686068-60696069- // t.CommentId (int64) (int64)
60706070- if t.CommentId != nil {
60716071-60726072- if len("commentId") > 1000000 {
60736073- return xerrors.Errorf("Value in field \"commentId\" was too long")
60746074- }
60756075-60766076- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commentId"))); err != nil {
60776077- return err
60786078- }
60796079- if _, err := cw.WriteString(string("commentId")); err != nil {
60806080- return err
60816081- }
60826082-60836083- if t.CommentId == nil {
60846084- if _, err := cw.Write(cbg.CborNull); err != nil {
60856085- return err
60866086- }
60876087- } else {
60886088- if *t.CommentId >= 0 {
60896089- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.CommentId)); err != nil {
60906090- return err
60916091- }
60926092- } else {
60936093- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-*t.CommentId-1)); err != nil {
60946094- return err
60956095- }
60966096- }
60976097- }
60986098-60996006 }
6100600761016008 // t.CreatedAt (string) (string)
···6175608261766083 t.Body = string(sval)
61776084 }
61786178- // t.Repo (string) (string)
61796179- case "repo":
61806180-61816181- {
61826182- b, err := cr.ReadByte()
61836183- if err != nil {
61846184- return err
61856185- }
61866186- if b != cbg.CborNull[0] {
61876187- if err := cr.UnreadByte(); err != nil {
61886188- return err
61896189- }
61906190-61916191- sval, err := cbg.ReadStringWithMax(cr, 1000000)
61926192- if err != nil {
61936193- return err
61946194- }
61956195-61966196- t.Repo = (*string)(&sval)
61976197- }
61986198- }
61996085 // t.LexiconTypeID (string) (string)
62006086 case "$type":
62016087···6218610462196105 t.Issue = string(sval)
62206106 }
62216221- // t.Owner (string) (string)
62226222- case "owner":
61076107+ // t.ReplyTo (string) (string)
61086108+ case "replyTo":
6223610962246110 {
62256111 b, err := cr.ReadByte()
···62366122 return err
62376123 }
6238612462396239- t.Owner = (*string)(&sval)
62406240- }
62416241- }
62426242- // t.CommentId (int64) (int64)
62436243- case "commentId":
62446244- {
62456245-62466246- b, err := cr.ReadByte()
62476247- if err != nil {
62486248- return err
62496249- }
62506250- if b != cbg.CborNull[0] {
62516251- if err := cr.UnreadByte(); err != nil {
62526252- return err
62536253- }
62546254- maj, extra, err := cr.ReadHeader()
62556255- if err != nil {
62566256- return err
62576257- }
62586258- var extraI int64
62596259- switch maj {
62606260- case cbg.MajUnsignedInt:
62616261- extraI = int64(extra)
62626262- if extraI < 0 {
62636263- return fmt.Errorf("int64 positive overflow")
62646264- }
62656265- case cbg.MajNegativeInt:
62666266- extraI = int64(extra)
62676267- if extraI < 0 {
62686268- return fmt.Errorf("int64 negative overflow")
62696269- }
62706270- extraI = -1 - extraI
62716271- default:
62726272- return fmt.Errorf("wrong type for int64 field: %d", maj)
62736273- }
62746274-62756275- t.CommentId = (*int64)(&extraI)
61256125+ t.ReplyTo = (*string)(&sval)
62766126 }
62776127 }
62786128 // t.CreatedAt (string) (string)
···64686318 }
6469631964706320 cw := cbg.NewCborWriter(w)
64716471- fieldCount := 9
63216321+ fieldCount := 7
6472632264736323 if t.Body == nil {
64746324 fieldCount--
···65796429 return err
65806430 }
6581643165826582- // t.PullId (int64) (int64)
65836583- if len("pullId") > 1000000 {
65846584- return xerrors.Errorf("Value in field \"pullId\" was too long")
65856585- }
65866586-65876587- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("pullId"))); err != nil {
65886588- return err
65896589- }
65906590- if _, err := cw.WriteString(string("pullId")); err != nil {
65916591- return err
65926592- }
65936593-65946594- if t.PullId >= 0 {
65956595- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PullId)); err != nil {
65966596- return err
65976597- }
65986598- } else {
65996599- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.PullId-1)); err != nil {
66006600- return err
66016601- }
66026602- }
66036603-66046432 // t.Source (tangled.RepoPull_Source) (struct)
66056433 if t.Source != nil {
66066434···66206448 }
66216449 }
6622645066236623- // t.CreatedAt (string) (string)
66246624- if len("createdAt") > 1000000 {
66256625- return xerrors.Errorf("Value in field \"createdAt\" was too long")
64516451+ // t.Target (tangled.RepoPull_Target) (struct)
64526452+ if len("target") > 1000000 {
64536453+ return xerrors.Errorf("Value in field \"target\" was too long")
66266454 }
6627645566286628- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
64566456+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("target"))); err != nil {
66296457 return err
66306458 }
66316631- if _, err := cw.WriteString(string("createdAt")); err != nil {
64596459+ if _, err := cw.WriteString(string("target")); err != nil {
66326460 return err
66336461 }
6634646266356635- if len(t.CreatedAt) > 1000000 {
66366636- return xerrors.Errorf("Value in field t.CreatedAt was too long")
66376637- }
66386638-66396639- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
66406640- return err
66416641- }
66426642- if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
64636463+ if err := t.Target.MarshalCBOR(cw); err != nil {
66436464 return err
66446465 }
6645646666466646- // t.TargetRepo (string) (string)
66476647- if len("targetRepo") > 1000000 {
66486648- return xerrors.Errorf("Value in field \"targetRepo\" was too long")
66496649- }
66506650-66516651- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("targetRepo"))); err != nil {
66526652- return err
66536653- }
66546654- if _, err := cw.WriteString(string("targetRepo")); err != nil {
66556655- return err
66566656- }
66576657-66586658- if len(t.TargetRepo) > 1000000 {
66596659- return xerrors.Errorf("Value in field t.TargetRepo was too long")
66606660- }
66616661-66626662- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TargetRepo))); err != nil {
66636663- return err
66646664- }
66656665- if _, err := cw.WriteString(string(t.TargetRepo)); err != nil {
66666666- return err
64676467+ // t.CreatedAt (string) (string)
64686468+ if len("createdAt") > 1000000 {
64696469+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
66676470 }
6668647166696669- // t.TargetBranch (string) (string)
66706670- if len("targetBranch") > 1000000 {
66716671- return xerrors.Errorf("Value in field \"targetBranch\" was too long")
66726672- }
66736673-66746674- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("targetBranch"))); err != nil {
64726472+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
66756473 return err
66766474 }
66776677- if _, err := cw.WriteString(string("targetBranch")); err != nil {
64756475+ if _, err := cw.WriteString(string("createdAt")); err != nil {
66786476 return err
66796477 }
6680647866816681- if len(t.TargetBranch) > 1000000 {
66826682- return xerrors.Errorf("Value in field t.TargetBranch was too long")
64796479+ if len(t.CreatedAt) > 1000000 {
64806480+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
66836481 }
6684648266856685- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TargetBranch))); err != nil {
64836483+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
66866484 return err
66876485 }
66886688- if _, err := cw.WriteString(string(t.TargetBranch)); err != nil {
64866486+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
66896487 return err
66906488 }
66916489 return nil
···6716651467176515 n := extra
6718651667196719- nameBuf := make([]byte, 12)
65176517+ nameBuf := make([]byte, 9)
67206518 for i := uint64(0); i < n; i++ {
67216519 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
67226520 if err != nil {
···6786658467876585 t.Title = string(sval)
67886586 }
67896789- // t.PullId (int64) (int64)
67906790- case "pullId":
67916791- {
67926792- maj, extra, err := cr.ReadHeader()
67936793- if err != nil {
67946794- return err
67956795- }
67966796- var extraI int64
67976797- switch maj {
67986798- case cbg.MajUnsignedInt:
67996799- extraI = int64(extra)
68006800- if extraI < 0 {
68016801- return fmt.Errorf("int64 positive overflow")
68026802- }
68036803- case cbg.MajNegativeInt:
68046804- extraI = int64(extra)
68056805- if extraI < 0 {
68066806- return fmt.Errorf("int64 negative overflow")
68076807- }
68086808- extraI = -1 - extraI
68096809- default:
68106810- return fmt.Errorf("wrong type for int64 field: %d", maj)
68116811- }
68126812-68136813- t.PullId = int64(extraI)
68146814- }
68156587 // t.Source (tangled.RepoPull_Source) (struct)
68166588 case "source":
68176589···68326604 }
6833660568346606 }
68356835- // t.CreatedAt (string) (string)
68366836- case "createdAt":
66076607+ // t.Target (tangled.RepoPull_Target) (struct)
66086608+ case "target":
6837660968386610 {
68396839- sval, err := cbg.ReadStringWithMax(cr, 1000000)
66116611+66126612+ b, err := cr.ReadByte()
68406613 if err != nil {
68416614 return err
68426615 }
68436843-68446844- t.CreatedAt = string(sval)
68456845- }
68466846- // t.TargetRepo (string) (string)
68476847- case "targetRepo":
68486848-68496849- {
68506850- sval, err := cbg.ReadStringWithMax(cr, 1000000)
68516851- if err != nil {
68526852- return err
66166616+ if b != cbg.CborNull[0] {
66176617+ if err := cr.UnreadByte(); err != nil {
66186618+ return err
66196619+ }
66206620+ t.Target = new(RepoPull_Target)
66216621+ if err := t.Target.UnmarshalCBOR(cr); err != nil {
66226622+ return xerrors.Errorf("unmarshaling t.Target pointer: %w", err)
66236623+ }
68536624 }
6854662568556855- t.TargetRepo = string(sval)
68566626 }
68576857- // t.TargetBranch (string) (string)
68586858- case "targetBranch":
66276627+ // t.CreatedAt (string) (string)
66286628+ case "createdAt":
6859662968606630 {
68616631 sval, err := cbg.ReadStringWithMax(cr, 1000000)
···68636633 return err
68646634 }
6865663568666866- t.TargetBranch = string(sval)
66366636+ t.CreatedAt = string(sval)
68676637 }
6868663868696639 default:
···68836653 }
6884665468856655 cw := cbg.NewCborWriter(w)
68866886- fieldCount := 7
6887665668886888- if t.CommentId == nil {
68896889- fieldCount--
68906890- }
68916891-68926892- if t.Owner == nil {
68936893- fieldCount--
68946894- }
68956895-68966896- if t.Repo == nil {
68976897- fieldCount--
68986898- }
68996899-69006900- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
66576657+ if _, err := cw.Write([]byte{164}); err != nil {
69016658 return err
69026659 }
69036660···69476704 return err
69486705 }
6949670669506950- // t.Repo (string) (string)
69516951- if t.Repo != nil {
69526952-69536953- if len("repo") > 1000000 {
69546954- return xerrors.Errorf("Value in field \"repo\" was too long")
69556955- }
69566956-69576957- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
69586958- return err
69596959- }
69606960- if _, err := cw.WriteString(string("repo")); err != nil {
69616961- return err
69626962- }
69636963-69646964- if t.Repo == nil {
69656965- if _, err := cw.Write(cbg.CborNull); err != nil {
69666966- return err
69676967- }
69686968- } else {
69696969- if len(*t.Repo) > 1000000 {
69706970- return xerrors.Errorf("Value in field t.Repo was too long")
69716971- }
69726972-69736973- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Repo))); err != nil {
69746974- return err
69756975- }
69766976- if _, err := cw.WriteString(string(*t.Repo)); err != nil {
69776977- return err
69786978- }
69796979- }
69806980- }
69816981-69826707 // t.LexiconTypeID (string) (string)
69836708 if len("$type") > 1000000 {
69846709 return xerrors.Errorf("Value in field \"$type\" was too long")
···69986723 return err
69996724 }
7000672570017001- // t.Owner (string) (string)
70027002- if t.Owner != nil {
70037003-70047004- if len("owner") > 1000000 {
70057005- return xerrors.Errorf("Value in field \"owner\" was too long")
70067006- }
70077007-70087008- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
70097009- return err
70107010- }
70117011- if _, err := cw.WriteString(string("owner")); err != nil {
70127012- return err
70137013- }
70147014-70157015- if t.Owner == nil {
70167016- if _, err := cw.Write(cbg.CborNull); err != nil {
70177017- return err
70187018- }
70197019- } else {
70207020- if len(*t.Owner) > 1000000 {
70217021- return xerrors.Errorf("Value in field t.Owner was too long")
70227022- }
70237023-70247024- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Owner))); err != nil {
70257025- return err
70267026- }
70277027- if _, err := cw.WriteString(string(*t.Owner)); err != nil {
70287028- return err
70297029- }
70307030- }
70317031- }
70327032-70337033- // t.CommentId (int64) (int64)
70347034- if t.CommentId != nil {
70357035-70367036- if len("commentId") > 1000000 {
70377037- return xerrors.Errorf("Value in field \"commentId\" was too long")
70387038- }
70397039-70407040- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commentId"))); err != nil {
70417041- return err
70427042- }
70437043- if _, err := cw.WriteString(string("commentId")); err != nil {
70447044- return err
70457045- }
70467046-70477047- if t.CommentId == nil {
70487048- if _, err := cw.Write(cbg.CborNull); err != nil {
70497049- return err
70507050- }
70517051- } else {
70527052- if *t.CommentId >= 0 {
70537053- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.CommentId)); err != nil {
70547054- return err
70557055- }
70567056- } else {
70577057- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-*t.CommentId-1)); err != nil {
70587058- return err
70597059- }
70607060- }
70617061- }
70627062-70637063- }
70647064-70656726 // t.CreatedAt (string) (string)
70666727 if len("createdAt") > 1000000 {
70676728 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···7150681171516812 t.Pull = string(sval)
71526813 }
71537153- // t.Repo (string) (string)
71547154- case "repo":
71557155-71567156- {
71577157- b, err := cr.ReadByte()
71587158- if err != nil {
71597159- return err
71607160- }
71617161- if b != cbg.CborNull[0] {
71627162- if err := cr.UnreadByte(); err != nil {
71637163- return err
71647164- }
71657165-71667166- sval, err := cbg.ReadStringWithMax(cr, 1000000)
71677167- if err != nil {
71687168- return err
71697169- }
71707170-71717171- t.Repo = (*string)(&sval)
71727172- }
71737173- }
71746814 // t.LexiconTypeID (string) (string)
71756815 case "$type":
71766816···7182682271836823 t.LexiconTypeID = string(sval)
71846824 }
71857185- // t.Owner (string) (string)
71867186- case "owner":
71877187-71887188- {
71897189- b, err := cr.ReadByte()
71907190- if err != nil {
71917191- return err
71927192- }
71937193- if b != cbg.CborNull[0] {
71947194- if err := cr.UnreadByte(); err != nil {
71957195- return err
71967196- }
71977197-71987198- sval, err := cbg.ReadStringWithMax(cr, 1000000)
71997199- if err != nil {
72007200- return err
72017201- }
72027202-72037203- t.Owner = (*string)(&sval)
72047204- }
72057205- }
72067206- // t.CommentId (int64) (int64)
72077207- case "commentId":
72087208- {
72097209-72107210- b, err := cr.ReadByte()
72117211- if err != nil {
72127212- return err
72137213- }
72147214- if b != cbg.CborNull[0] {
72157215- if err := cr.UnreadByte(); err != nil {
72167216- return err
72177217- }
72187218- maj, extra, err := cr.ReadHeader()
72197219- if err != nil {
72207220- return err
72217221- }
72227222- var extraI int64
72237223- switch maj {
72247224- case cbg.MajUnsignedInt:
72257225- extraI = int64(extra)
72267226- if extraI < 0 {
72277227- return fmt.Errorf("int64 positive overflow")
72287228- }
72297229- case cbg.MajNegativeInt:
72307230- extraI = int64(extra)
72317231- if extraI < 0 {
72327232- return fmt.Errorf("int64 negative overflow")
72337233- }
72347234- extraI = -1 - extraI
72357235- default:
72367236- return fmt.Errorf("wrong type for int64 field: %d", maj)
72377237- }
72387238-72397239- t.CommentId = (*int64)(&extraI)
72407240- }
72417241- }
72426825 // t.CreatedAt (string) (string)
72436826 case "createdAt":
72446827···72686851 }
7269685272706853 cw := cbg.NewCborWriter(w)
72717271- fieldCount := 2
68546854+ fieldCount := 3
7272685572736856 if t.Repo == nil {
72746857 fieldCount--
72756858 }
7276685972776860 if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
68616861+ return err
68626862+ }
68636863+68646864+ // t.Sha (string) (string)
68656865+ if len("sha") > 1000000 {
68666866+ return xerrors.Errorf("Value in field \"sha\" was too long")
68676867+ }
68686868+68696869+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sha"))); err != nil {
68706870+ return err
68716871+ }
68726872+ if _, err := cw.WriteString(string("sha")); err != nil {
68736873+ return err
68746874+ }
68756875+68766876+ if len(t.Sha) > 1000000 {
68776877+ return xerrors.Errorf("Value in field t.Sha was too long")
68786878+ }
68796879+68806880+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Sha))); err != nil {
68816881+ return err
68826882+ }
68836883+ if _, err := cw.WriteString(string(t.Sha)); err != nil {
72786884 return err
72796885 }
72806886···73766982 }
7377698373786984 switch string(nameBuf[:nameLen]) {
73797379- // t.Repo (string) (string)
69856985+ // t.Sha (string) (string)
69866986+ case "sha":
69876987+69886988+ {
69896989+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
69906990+ if err != nil {
69916991+ return err
69926992+ }
69936993+69946994+ t.Sha = string(sval)
69956995+ }
69966996+ // t.Repo (string) (string)
73806997 case "repo":
7381699873826999 {
···7583720075847201 return nil
75857202}
72037203+func (t *RepoPull_Target) MarshalCBOR(w io.Writer) error {
72047204+ if t == nil {
72057205+ _, err := w.Write(cbg.CborNull)
72067206+ return err
72077207+ }
72087208+72097209+ cw := cbg.NewCborWriter(w)
72107210+72117211+ if _, err := cw.Write([]byte{162}); err != nil {
72127212+ return err
72137213+ }
72147214+72157215+ // t.Repo (string) (string)
72167216+ if len("repo") > 1000000 {
72177217+ return xerrors.Errorf("Value in field \"repo\" was too long")
72187218+ }
72197219+72207220+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
72217221+ return err
72227222+ }
72237223+ if _, err := cw.WriteString(string("repo")); err != nil {
72247224+ return err
72257225+ }
72267226+72277227+ if len(t.Repo) > 1000000 {
72287228+ return xerrors.Errorf("Value in field t.Repo was too long")
72297229+ }
72307230+72317231+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repo))); err != nil {
72327232+ return err
72337233+ }
72347234+ if _, err := cw.WriteString(string(t.Repo)); err != nil {
72357235+ return err
72367236+ }
72377237+72387238+ // t.Branch (string) (string)
72397239+ if len("branch") > 1000000 {
72407240+ return xerrors.Errorf("Value in field \"branch\" was too long")
72417241+ }
72427242+72437243+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("branch"))); err != nil {
72447244+ return err
72457245+ }
72467246+ if _, err := cw.WriteString(string("branch")); err != nil {
72477247+ return err
72487248+ }
72497249+72507250+ if len(t.Branch) > 1000000 {
72517251+ return xerrors.Errorf("Value in field t.Branch was too long")
72527252+ }
72537253+72547254+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Branch))); err != nil {
72557255+ return err
72567256+ }
72577257+ if _, err := cw.WriteString(string(t.Branch)); err != nil {
72587258+ return err
72597259+ }
72607260+ return nil
72617261+}
72627262+72637263+func (t *RepoPull_Target) UnmarshalCBOR(r io.Reader) (err error) {
72647264+ *t = RepoPull_Target{}
72657265+72667266+ cr := cbg.NewCborReader(r)
72677267+72687268+ maj, extra, err := cr.ReadHeader()
72697269+ if err != nil {
72707270+ return err
72717271+ }
72727272+ defer func() {
72737273+ if err == io.EOF {
72747274+ err = io.ErrUnexpectedEOF
72757275+ }
72767276+ }()
72777277+72787278+ if maj != cbg.MajMap {
72797279+ return fmt.Errorf("cbor input should be of type map")
72807280+ }
72817281+72827282+ if extra > cbg.MaxLength {
72837283+ return fmt.Errorf("RepoPull_Target: map struct too large (%d)", extra)
72847284+ }
72857285+72867286+ n := extra
72877287+72887288+ nameBuf := make([]byte, 6)
72897289+ for i := uint64(0); i < n; i++ {
72907290+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
72917291+ if err != nil {
72927292+ return err
72937293+ }
72947294+72957295+ if !ok {
72967296+ // Field doesn't exist on this type, so ignore it
72977297+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
72987298+ return err
72997299+ }
73007300+ continue
73017301+ }
73027302+73037303+ switch string(nameBuf[:nameLen]) {
73047304+ // t.Repo (string) (string)
73057305+ case "repo":
73067306+73077307+ {
73087308+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
73097309+ if err != nil {
73107310+ return err
73117311+ }
73127312+73137313+ t.Repo = string(sval)
73147314+ }
73157315+ // t.Branch (string) (string)
73167316+ case "branch":
73177317+73187318+ {
73197319+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
73207320+ if err != nil {
73217321+ return err
73227322+ }
73237323+73247324+ t.Branch = string(sval)
73257325+ }
73267326+73277327+ default:
73287328+ // Field doesn't exist on this type, so ignore it
73297329+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
73307330+ return err
73317331+ }
73327332+ }
73337333+ }
73347334+73357335+ return nil
73367336+}
75867337func (t *Spindle) MarshalCBOR(w io.Writer) error {
75877338 if t == nil {
75887339 _, err := w.Write(cbg.CborNull)
···7911766279127663 return nil
79137664}
76657665+func (t *String) MarshalCBOR(w io.Writer) error {
76667666+ if t == nil {
76677667+ _, err := w.Write(cbg.CborNull)
76687668+ return err
76697669+ }
76707670+76717671+ cw := cbg.NewCborWriter(w)
76727672+76737673+ if _, err := cw.Write([]byte{165}); err != nil {
76747674+ return err
76757675+ }
76767676+76777677+ // t.LexiconTypeID (string) (string)
76787678+ if len("$type") > 1000000 {
76797679+ return xerrors.Errorf("Value in field \"$type\" was too long")
76807680+ }
76817681+76827682+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
76837683+ return err
76847684+ }
76857685+ if _, err := cw.WriteString(string("$type")); err != nil {
76867686+ return err
76877687+ }
76887688+76897689+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.string"))); err != nil {
76907690+ return err
76917691+ }
76927692+ if _, err := cw.WriteString(string("sh.tangled.string")); err != nil {
76937693+ return err
76947694+ }
76957695+76967696+ // t.Contents (string) (string)
76977697+ if len("contents") > 1000000 {
76987698+ return xerrors.Errorf("Value in field \"contents\" was too long")
76997699+ }
77007700+77017701+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("contents"))); err != nil {
77027702+ return err
77037703+ }
77047704+ if _, err := cw.WriteString(string("contents")); err != nil {
77057705+ return err
77067706+ }
77077707+77087708+ if len(t.Contents) > 1000000 {
77097709+ return xerrors.Errorf("Value in field t.Contents was too long")
77107710+ }
77117711+77127712+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Contents))); err != nil {
77137713+ return err
77147714+ }
77157715+ if _, err := cw.WriteString(string(t.Contents)); err != nil {
77167716+ return err
77177717+ }
77187718+77197719+ // t.Filename (string) (string)
77207720+ if len("filename") > 1000000 {
77217721+ return xerrors.Errorf("Value in field \"filename\" was too long")
77227722+ }
77237723+77247724+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("filename"))); err != nil {
77257725+ return err
77267726+ }
77277727+ if _, err := cw.WriteString(string("filename")); err != nil {
77287728+ return err
77297729+ }
77307730+77317731+ if len(t.Filename) > 1000000 {
77327732+ return xerrors.Errorf("Value in field t.Filename was too long")
77337733+ }
77347734+77357735+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Filename))); err != nil {
77367736+ return err
77377737+ }
77387738+ if _, err := cw.WriteString(string(t.Filename)); err != nil {
77397739+ return err
77407740+ }
77417741+77427742+ // t.CreatedAt (string) (string)
77437743+ if len("createdAt") > 1000000 {
77447744+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
77457745+ }
77467746+77477747+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
77487748+ return err
77497749+ }
77507750+ if _, err := cw.WriteString(string("createdAt")); err != nil {
77517751+ return err
77527752+ }
77537753+77547754+ if len(t.CreatedAt) > 1000000 {
77557755+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
77567756+ }
77577757+77587758+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
77597759+ return err
77607760+ }
77617761+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
77627762+ return err
77637763+ }
77647764+77657765+ // t.Description (string) (string)
77667766+ if len("description") > 1000000 {
77677767+ return xerrors.Errorf("Value in field \"description\" was too long")
77687768+ }
77697769+77707770+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("description"))); err != nil {
77717771+ return err
77727772+ }
77737773+ if _, err := cw.WriteString(string("description")); err != nil {
77747774+ return err
77757775+ }
77767776+77777777+ if len(t.Description) > 1000000 {
77787778+ return xerrors.Errorf("Value in field t.Description was too long")
77797779+ }
77807780+77817781+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Description))); err != nil {
77827782+ return err
77837783+ }
77847784+ if _, err := cw.WriteString(string(t.Description)); err != nil {
77857785+ return err
77867786+ }
77877787+ return nil
77887788+}
77897789+77907790+func (t *String) UnmarshalCBOR(r io.Reader) (err error) {
77917791+ *t = String{}
77927792+77937793+ cr := cbg.NewCborReader(r)
77947794+77957795+ maj, extra, err := cr.ReadHeader()
77967796+ if err != nil {
77977797+ return err
77987798+ }
77997799+ defer func() {
78007800+ if err == io.EOF {
78017801+ err = io.ErrUnexpectedEOF
78027802+ }
78037803+ }()
78047804+78057805+ if maj != cbg.MajMap {
78067806+ return fmt.Errorf("cbor input should be of type map")
78077807+ }
78087808+78097809+ if extra > cbg.MaxLength {
78107810+ return fmt.Errorf("String: map struct too large (%d)", extra)
78117811+ }
78127812+78137813+ n := extra
78147814+78157815+ nameBuf := make([]byte, 11)
78167816+ for i := uint64(0); i < n; i++ {
78177817+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
78187818+ if err != nil {
78197819+ return err
78207820+ }
78217821+78227822+ if !ok {
78237823+ // Field doesn't exist on this type, so ignore it
78247824+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
78257825+ return err
78267826+ }
78277827+ continue
78287828+ }
78297829+78307830+ switch string(nameBuf[:nameLen]) {
78317831+ // t.LexiconTypeID (string) (string)
78327832+ case "$type":
78337833+78347834+ {
78357835+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
78367836+ if err != nil {
78377837+ return err
78387838+ }
78397839+78407840+ t.LexiconTypeID = string(sval)
78417841+ }
78427842+ // t.Contents (string) (string)
78437843+ case "contents":
78447844+78457845+ {
78467846+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
78477847+ if err != nil {
78487848+ return err
78497849+ }
78507850+78517851+ t.Contents = string(sval)
78527852+ }
78537853+ // t.Filename (string) (string)
78547854+ case "filename":
78557855+78567856+ {
78577857+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
78587858+ if err != nil {
78597859+ return err
78607860+ }
78617861+78627862+ t.Filename = string(sval)
78637863+ }
78647864+ // t.CreatedAt (string) (string)
78657865+ case "createdAt":
78667866+78677867+ {
78687868+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
78697869+ if err != nil {
78707870+ return err
78717871+ }
78727872+78737873+ t.CreatedAt = string(sval)
78747874+ }
78757875+ // t.Description (string) (string)
78767876+ case "description":
78777877+78787878+ {
78797879+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
78807880+ if err != nil {
78817881+ return err
78827882+ }
78837883+78847884+ t.Description = string(sval)
78857885+ }
78867886+78877887+ default:
78887888+ // Field doesn't exist on this type, so ignore it
78897889+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
78907890+ return err
78917891+ }
78927892+ }
78937893+ }
78947894+78957895+ return nil
78967896+}
···11+package db
22+33+import (
44+ "fmt"
55+ "strings"
66+ "time"
77+88+ "github.com/bluesky-social/indigo/atproto/syntax"
99+)
1010+1111+type Collaborator struct {
1212+ // identifiers for the record
1313+ Id int64
1414+ Did syntax.DID
1515+ Rkey string
1616+1717+ // content
1818+ SubjectDid syntax.DID
1919+ RepoAt syntax.ATURI
2020+2121+ // meta
2222+ Created time.Time
2323+}
2424+2525+func AddCollaborator(e Execer, c Collaborator) error {
2626+ _, err := e.Exec(
2727+ `insert into collaborators (did, rkey, subject_did, repo_at) values (?, ?, ?, ?);`,
2828+ c.Did, c.Rkey, c.SubjectDid, c.RepoAt,
2929+ )
3030+ return err
3131+}
3232+3333+func DeleteCollaborator(e Execer, filters ...filter) error {
3434+ var conditions []string
3535+ var args []any
3636+ for _, filter := range filters {
3737+ conditions = append(conditions, filter.Condition())
3838+ args = append(args, filter.Arg()...)
3939+ }
4040+4141+ whereClause := ""
4242+ if conditions != nil {
4343+ whereClause = " where " + strings.Join(conditions, " and ")
4444+ }
4545+4646+ query := fmt.Sprintf(`delete from collaborators %s`, whereClause)
4747+4848+ _, err := e.Exec(query, args...)
4949+ return err
5050+}
5151+5252+func CollaboratingIn(e Execer, collaborator string) ([]Repo, error) {
5353+ rows, err := e.Query(`select repo_at from collaborators where subject_did = ?`, collaborator)
5454+ if err != nil {
5555+ return nil, err
5656+ }
5757+ defer rows.Close()
5858+5959+ var repoAts []string
6060+ for rows.Next() {
6161+ var aturi string
6262+ err := rows.Scan(&aturi)
6363+ if err != nil {
6464+ return nil, err
6565+ }
6666+ repoAts = append(repoAts, aturi)
6767+ }
6868+ if err := rows.Err(); err != nil {
6969+ return nil, err
7070+ }
7171+ if repoAts == nil {
7272+ return nil, nil
7373+ }
7474+7575+ return GetRepos(e, 0, FilterIn("at_uri", repoAts))
7676+}
+363-27
appview/db/db.go
···2727}
28282929func Make(dbPath string) (*DB, error) {
3030- db, err := sql.Open("sqlite3", dbPath)
3030+ // https://github.com/mattn/go-sqlite3#connection-string
3131+ opts := []string{
3232+ "_foreign_keys=1",
3333+ "_journal_mode=WAL",
3434+ "_synchronous=NORMAL",
3535+ "_auto_vacuum=incremental",
3636+ }
3737+3838+ db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
3939+ if err != nil {
4040+ return nil, err
4141+ }
4242+4343+ ctx := context.Background()
4444+4545+ conn, err := db.Conn(ctx)
3146 if err != nil {
3247 return nil, err
3348 }
3434- _, err = db.Exec(`
3535- pragma journal_mode = WAL;
3636- pragma synchronous = normal;
3737- pragma foreign_keys = on;
3838- pragma temp_store = memory;
3939- pragma mmap_size = 30000000000;
4040- pragma page_size = 32768;
4141- pragma auto_vacuum = incremental;
4242- pragma busy_timeout = 5000;
4949+ defer conn.Close()
43505151+ _, err = conn.ExecContext(ctx, `
4452 create table if not exists registrations (
4553 id integer primary key autoincrement,
4654 domain text not null unique,
···199207 unique(starred_by_did, repo_at)
200208 );
201209210210+ create table if not exists reactions (
211211+ id integer primary key autoincrement,
212212+ reacted_by_did text not null,
213213+ thread_at text not null,
214214+ kind text not null,
215215+ rkey text not null,
216216+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
217217+ unique(reacted_by_did, thread_at, kind)
218218+ );
219219+202220 create table if not exists emails (
203221 id integer primary key autoincrement,
204222 did text not null,
···330348 verified text, -- time of verification
331349 created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
332350333333- unique(instance)
351351+ unique(owner, instance)
352352+ );
353353+354354+ create table if not exists spindle_members (
355355+ -- identifiers for the record
356356+ id integer primary key autoincrement,
357357+ did text not null,
358358+ rkey text not null,
359359+360360+ -- data
361361+ instance text not null,
362362+ subject text not null,
363363+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
364364+365365+ -- constraints
366366+ unique (did, instance, subject)
334367 );
335368336369 create table if not exists pipelines (
···395428 on delete cascade
396429 );
397430431431+ create table if not exists repo_languages (
432432+ -- identifiers
433433+ id integer primary key autoincrement,
434434+435435+ -- repo identifiers
436436+ repo_at text not null,
437437+ ref text not null,
438438+ is_default_ref integer not null default 0,
439439+440440+ -- language breakdown
441441+ language text not null,
442442+ bytes integer not null check (bytes >= 0),
443443+444444+ unique(repo_at, ref, language)
445445+ );
446446+447447+ create table if not exists signups_inflight (
448448+ id integer primary key autoincrement,
449449+ email text not null unique,
450450+ invite_code text not null,
451451+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
452452+ );
453453+454454+ create table if not exists strings (
455455+ -- identifiers
456456+ did text not null,
457457+ rkey text not null,
458458+459459+ -- content
460460+ filename text not null,
461461+ description text,
462462+ content text not null,
463463+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
464464+ edited text,
465465+466466+ primary key (did, rkey)
467467+ );
468468+398469 create table if not exists migrations (
399470 id integer primary key autoincrement,
400471 name text unique
401472 );
473473+474474+ -- indexes for better star query performance
475475+ create index if not exists idx_stars_created on stars(created);
476476+ create index if not exists idx_stars_repo_at_created on stars(repo_at, created);
402477 `)
403478 if err != nil {
404479 return nil, err
405480 }
406481407482 // run migrations
408408- runMigration(db, "add-description-to-repos", func(tx *sql.Tx) error {
483483+ runMigration(conn, "add-description-to-repos", func(tx *sql.Tx) error {
409484 tx.Exec(`
410485 alter table repos add column description text check (length(description) <= 200);
411486 `)
412487 return nil
413488 })
414489415415- runMigration(db, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
490490+ runMigration(conn, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
416491 // add unconstrained column
417492 _, err := tx.Exec(`
418493 alter table public_keys
···435510 return nil
436511 })
437512438438- runMigration(db, "add-rkey-to-comments", func(tx *sql.Tx) error {
513513+ runMigration(conn, "add-rkey-to-comments", func(tx *sql.Tx) error {
439514 _, err := tx.Exec(`
440515 alter table comments drop column comment_at;
441516 alter table comments add column rkey text;
···443518 return err
444519 })
445520446446- runMigration(db, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
521521+ runMigration(conn, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
447522 _, err := tx.Exec(`
448523 alter table comments add column deleted text; -- timestamp
449524 alter table comments add column edited text; -- timestamp
···451526 return err
452527 })
453528454454- runMigration(db, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
529529+ runMigration(conn, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
455530 _, err := tx.Exec(`
456531 alter table pulls add column source_branch text;
457532 alter table pulls add column source_repo_at text;
···460535 return err
461536 })
462537463463- runMigration(db, "add-source-to-repos", func(tx *sql.Tx) error {
538538+ runMigration(conn, "add-source-to-repos", func(tx *sql.Tx) error {
464539 _, err := tx.Exec(`
465540 alter table repos add column source text;
466541 `)
···471546 // NOTE: this cannot be done in a transaction, so it is run outside [0]
472547 //
473548 // [0]: https://sqlite.org/pragma.html#pragma_foreign_keys
474474- db.Exec("pragma foreign_keys = off;")
475475- runMigration(db, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
549549+ conn.ExecContext(ctx, "pragma foreign_keys = off;")
550550+ runMigration(conn, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
476551 _, err := tx.Exec(`
477552 create table pulls_new (
478553 -- identifiers
···527602 `)
528603 return err
529604 })
530530- db.Exec("pragma foreign_keys = on;")
605605+ conn.ExecContext(ctx, "pragma foreign_keys = on;")
531606532607 // run migrations
533533- runMigration(db, "add-spindle-to-repos", func(tx *sql.Tx) error {
608608+ runMigration(conn, "add-spindle-to-repos", func(tx *sql.Tx) error {
534609 tx.Exec(`
535610 alter table repos add column spindle text;
536611 `)
537612 return nil
538613 })
539614615615+ // drop all knot secrets, add unique constraint to knots
616616+ //
617617+ // knots will henceforth use service auth for signed requests
618618+ runMigration(conn, "no-more-secrets", func(tx *sql.Tx) error {
619619+ _, err := tx.Exec(`
620620+ create table registrations_new (
621621+ id integer primary key autoincrement,
622622+ domain text not null,
623623+ did text not null,
624624+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
625625+ registered text,
626626+ read_only integer not null default 0,
627627+ unique(domain, did)
628628+ );
629629+630630+ insert into registrations_new (id, domain, did, created, registered, read_only)
631631+ select id, domain, did, created, registered, 1 from registrations
632632+ where registered is not null;
633633+634634+ drop table registrations;
635635+ alter table registrations_new rename to registrations;
636636+ `)
637637+ return err
638638+ })
639639+640640+ // recreate and add rkey + created columns with default constraint
641641+ runMigration(conn, "rework-collaborators-table", func(tx *sql.Tx) error {
642642+ // create new table
643643+ // - repo_at instead of repo integer
644644+ // - rkey field
645645+ // - created field
646646+ _, err := tx.Exec(`
647647+ create table collaborators_new (
648648+ -- identifiers for the record
649649+ id integer primary key autoincrement,
650650+ did text not null,
651651+ rkey text,
652652+653653+ -- content
654654+ subject_did text not null,
655655+ repo_at text not null,
656656+657657+ -- meta
658658+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
659659+660660+ -- constraints
661661+ foreign key (repo_at) references repos(at_uri) on delete cascade
662662+ )
663663+ `)
664664+ if err != nil {
665665+ return err
666666+ }
667667+668668+ // copy data
669669+ _, err = tx.Exec(`
670670+ insert into collaborators_new (id, did, rkey, subject_did, repo_at)
671671+ select
672672+ c.id,
673673+ r.did,
674674+ '',
675675+ c.did,
676676+ r.at_uri
677677+ from collaborators c
678678+ join repos r on c.repo = r.id
679679+ `)
680680+ if err != nil {
681681+ return err
682682+ }
683683+684684+ // drop old table
685685+ _, err = tx.Exec(`drop table collaborators`)
686686+ if err != nil {
687687+ return err
688688+ }
689689+690690+ // rename new table
691691+ _, err = tx.Exec(`alter table collaborators_new rename to collaborators`)
692692+ return err
693693+ })
694694+695695+ runMigration(conn, "add-rkey-to-issues", func(tx *sql.Tx) error {
696696+ _, err := tx.Exec(`
697697+ alter table issues add column rkey text not null default '';
698698+699699+ -- get last url section from issue_at and save to rkey column
700700+ update issues
701701+ set rkey = replace(issue_at, rtrim(issue_at, replace(issue_at, '/', '')), '');
702702+ `)
703703+ return err
704704+ })
705705+706706+ // repurpose the read-only column to "needs-upgrade"
707707+ runMigration(conn, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error {
708708+ _, err := tx.Exec(`
709709+ alter table registrations rename column read_only to needs_upgrade;
710710+ `)
711711+ return err
712712+ })
713713+714714+ // require all knots to upgrade after the release of total xrpc
715715+ runMigration(conn, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error {
716716+ _, err := tx.Exec(`
717717+ update registrations set needs_upgrade = 1;
718718+ `)
719719+ return err
720720+ })
721721+722722+ // require all knots to upgrade after the release of total xrpc
723723+ runMigration(conn, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error {
724724+ _, err := tx.Exec(`
725725+ alter table spindles add column needs_upgrade integer not null default 0;
726726+ `)
727727+ if err != nil {
728728+ return err
729729+ }
730730+731731+ _, err = tx.Exec(`
732732+ update spindles set needs_upgrade = 1;
733733+ `)
734734+ return err
735735+ })
736736+737737+ // remove issue_at from issues and replace with generated column
738738+ //
739739+ // this requires a full table recreation because stored columns
740740+ // cannot be added via alter
741741+ //
742742+ // couple other changes:
743743+ // - columns renamed to be more consistent
744744+ // - adds edited and deleted fields
745745+ //
746746+ // disable foreign-keys for the next migration
747747+ conn.ExecContext(ctx, "pragma foreign_keys = off;")
748748+ runMigration(conn, "remove-issue-at-from-issues", func(tx *sql.Tx) error {
749749+ _, err := tx.Exec(`
750750+ create table if not exists issues_new (
751751+ -- identifiers
752752+ id integer primary key autoincrement,
753753+ did text not null,
754754+ rkey text not null,
755755+ at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo.issue' || '/' || rkey) stored,
756756+757757+ -- at identifiers
758758+ repo_at text not null,
759759+760760+ -- content
761761+ issue_id integer not null,
762762+ title text not null,
763763+ body text not null,
764764+ open integer not null default 1,
765765+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
766766+ edited text, -- timestamp
767767+ deleted text, -- timestamp
768768+769769+ unique(did, rkey),
770770+ unique(repo_at, issue_id),
771771+ unique(at_uri),
772772+ foreign key (repo_at) references repos(at_uri) on delete cascade
773773+ );
774774+ `)
775775+ if err != nil {
776776+ return err
777777+ }
778778+779779+ // transfer data
780780+ _, err = tx.Exec(`
781781+ insert into issues_new (id, did, rkey, repo_at, issue_id, title, body, open, created)
782782+ select
783783+ i.id,
784784+ i.owner_did,
785785+ i.rkey,
786786+ i.repo_at,
787787+ i.issue_id,
788788+ i.title,
789789+ i.body,
790790+ i.open,
791791+ i.created
792792+ from issues i;
793793+ `)
794794+ if err != nil {
795795+ return err
796796+ }
797797+798798+ // drop old table
799799+ _, err = tx.Exec(`drop table issues`)
800800+ if err != nil {
801801+ return err
802802+ }
803803+804804+ // rename new table
805805+ _, err = tx.Exec(`alter table issues_new rename to issues`)
806806+ return err
807807+ })
808808+ conn.ExecContext(ctx, "pragma foreign_keys = on;")
809809+810810+ // - renames the comments table to 'issue_comments'
811811+ // - rework issue comments to update constraints:
812812+ // * unique(did, rkey)
813813+ // * remove comment-id and just use the global ID
814814+ // * foreign key (repo_at, issue_id)
815815+ // - new columns
816816+ // * column "reply_to" which can be any other comment
817817+ // * column "at-uri" which is a generated column
818818+ runMigration(conn, "rework-issue-comments", func(tx *sql.Tx) error {
819819+ _, err := tx.Exec(`
820820+ create table if not exists issue_comments (
821821+ -- identifiers
822822+ id integer primary key autoincrement,
823823+ did text not null,
824824+ rkey text,
825825+ at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo.issue.comment' || '/' || rkey) stored,
826826+827827+ -- at identifiers
828828+ issue_at text not null,
829829+ reply_to text, -- at_uri of parent comment
830830+831831+ -- content
832832+ body text not null,
833833+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
834834+ edited text,
835835+ deleted text,
836836+837837+ -- constraints
838838+ unique(did, rkey),
839839+ unique(at_uri),
840840+ foreign key (issue_at) references issues(at_uri) on delete cascade
841841+ );
842842+ `)
843843+ if err != nil {
844844+ return err
845845+ }
846846+847847+ // transfer data
848848+ _, err = tx.Exec(`
849849+ insert into issue_comments (id, did, rkey, issue_at, body, created, edited, deleted)
850850+ select
851851+ c.id,
852852+ c.owner_did,
853853+ c.rkey,
854854+ i.at_uri, -- get at_uri from issues table
855855+ c.body,
856856+ c.created,
857857+ c.edited,
858858+ c.deleted
859859+ from comments c
860860+ join issues i on c.repo_at = i.repo_at and c.issue_id = i.issue_id;
861861+ `)
862862+ if err != nil {
863863+ return err
864864+ }
865865+866866+ // drop old table
867867+ _, err = tx.Exec(`drop table comments`)
868868+ return err
869869+ })
870870+540871 return &DB{db}, nil
541872}
542873543874type migrationFn = func(*sql.Tx) error
544875545545-func runMigration(d *sql.DB, name string, migrationFn migrationFn) error {
546546- tx, err := d.Begin()
876876+func runMigration(c *sql.Conn, name string, migrationFn migrationFn) error {
877877+ tx, err := c.BeginTx(context.Background(), nil)
547878 if err != nil {
548879 return err
549880 }
···583914 return nil
584915}
585916917917+func (d *DB) Close() error {
918918+ return d.DB.Close()
919919+}
920920+586921type filter struct {
587922 key string
588923 arg any
···610945 kind := rv.Kind()
611946612947 // if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)`
613613- if kind == reflect.Slice || kind == reflect.Array {
948948+ if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
614949 if rv.Len() == 0 {
615615- panic(fmt.Sprintf("empty slice passed to %q filter on %s", f.cmp, f.key))
950950+ // always false
951951+ return "1 = 0"
616952 }
617953618954 placeholders := make([]string, rv.Len())
···629965func (f filter) Arg() []any {
630966 rv := reflect.ValueOf(f.arg)
631967 kind := rv.Kind()
632632- if kind == reflect.Slice || kind == reflect.Array {
968968+ if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
633969 if rv.Len() == 0 {
634634- panic(fmt.Sprintf("empty slice passed to %q filter on %s", f.cmp, f.key))
970970+ return nil
635971 }
636972637973 out := make([]any, rv.Len())
+16-2
appview/db/email.go
···103103 query := `
104104 select email, did
105105 from emails
106106- where
107107- verified = ?
106106+ where
107107+ verified = ?
108108 and email in (` + strings.Join(placeholders, ",") + `)
109109 `
110110···153153 `
154154 var count int
155155 err := e.QueryRow(query, did, email).Scan(&count)
156156+ if err != nil {
157157+ return false, err
158158+ }
159159+ return count > 0, nil
160160+}
161161+162162+func CheckEmailExistsAtAll(e Execer, email string) (bool, error) {
163163+ query := `
164164+ select count(*)
165165+ from emails
166166+ where email = ?
167167+ `
168168+ var count int
169169+ err := e.QueryRow(query, email).Scan(&count)
156170 if err != nil {
157171 return false, err
158172 }
+147-44
appview/db/follow.go
···11package db
2233import (
44+ "fmt"
45 "log"
66+ "strings"
57 "time"
68)
79···1214 Rkey string
1315}
14161515-func AddFollow(e Execer, userDid, subjectDid, rkey string) error {
1717+func AddFollow(e Execer, follow *Follow) error {
1618 query := `insert or ignore into follows (user_did, subject_did, rkey) values (?, ?, ?)`
1717- _, err := e.Exec(query, userDid, subjectDid, rkey)
1919+ _, err := e.Exec(query, follow.UserDid, follow.SubjectDid, follow.Rkey)
1820 return err
1921}
2022···5355 return err
5456}
55575656-func GetFollowerFollowing(e Execer, did string) (int, int, error) {
5757- followers, following := 0, 0
5858+type FollowStats struct {
5959+ Followers int64
6060+ Following int64
6161+}
6262+6363+func GetFollowerFollowingCount(e Execer, did string) (FollowStats, error) {
6464+ var followers, following int64
5865 err := e.QueryRow(
5959- `SELECT
6666+ `SELECT
6067 COUNT(CASE WHEN subject_did = ? THEN 1 END) AS followers,
6168 COUNT(CASE WHEN user_did = ? THEN 1 END) AS following
6269 FROM follows;`, did, did).Scan(&followers, &following)
6370 if err != nil {
6464- return 0, 0, err
7171+ return FollowStats{}, err
6572 }
6666- return followers, following, nil
7373+ return FollowStats{
7474+ Followers: followers,
7575+ Following: following,
7676+ }, nil
6777}
68786969-type FollowStatus int
7979+func GetFollowerFollowingCounts(e Execer, dids []string) (map[string]FollowStats, error) {
8080+ if len(dids) == 0 {
8181+ return nil, nil
8282+ }
70837171-const (
7272- IsNotFollowing FollowStatus = iota
7373- IsFollowing
7474- IsSelf
7575-)
8484+ placeholders := make([]string, len(dids))
8585+ for i := range placeholders {
8686+ placeholders[i] = "?"
8787+ }
8888+ placeholderStr := strings.Join(placeholders, ",")
76897777-func (s FollowStatus) String() string {
7878- switch s {
7979- case IsNotFollowing:
8080- return "IsNotFollowing"
8181- case IsFollowing:
8282- return "IsFollowing"
8383- case IsSelf:
8484- return "IsSelf"
8585- default:
8686- return "IsNotFollowing"
9090+ args := make([]any, len(dids)*2)
9191+ for i, did := range dids {
9292+ args[i] = did
9393+ args[i+len(dids)] = did
8794 }
8888-}
89959090-func GetFollowStatus(e Execer, userDid, subjectDid string) FollowStatus {
9191- if userDid == subjectDid {
9292- return IsSelf
9393- } else if _, err := GetFollow(e, userDid, subjectDid); err != nil {
9494- return IsNotFollowing
9595- } else {
9696- return IsFollowing
9696+ query := fmt.Sprintf(`
9797+ select
9898+ coalesce(f.did, g.did) as did,
9999+ coalesce(f.followers, 0) as followers,
100100+ coalesce(g.following, 0) as following
101101+ from (
102102+ select subject_did as did, count(*) as followers
103103+ from follows
104104+ where subject_did in (%s)
105105+ group by subject_did
106106+ ) f
107107+ full outer join (
108108+ select user_did as did, count(*) as following
109109+ from follows
110110+ where user_did in (%s)
111111+ group by user_did
112112+ ) g on f.did = g.did`,
113113+ placeholderStr, placeholderStr)
114114+115115+ result := make(map[string]FollowStats)
116116+117117+ rows, err := e.Query(query, args...)
118118+ if err != nil {
119119+ return nil, err
120120+ }
121121+ defer rows.Close()
122122+123123+ for rows.Next() {
124124+ var did string
125125+ var followers, following int64
126126+ if err := rows.Scan(&did, &followers, &following); err != nil {
127127+ return nil, err
128128+ }
129129+ result[did] = FollowStats{
130130+ Followers: followers,
131131+ Following: following,
132132+ }
133133+ }
134134+135135+ for _, did := range dids {
136136+ if _, exists := result[did]; !exists {
137137+ result[did] = FollowStats{
138138+ Followers: 0,
139139+ Following: 0,
140140+ }
141141+ }
97142 }
143143+144144+ return result, nil
98145}
99146100100-func GetAllFollows(e Execer, limit int) ([]Follow, error) {
147147+func GetFollows(e Execer, limit int, filters ...filter) ([]Follow, error) {
101148 var follows []Follow
102149103103- rows, err := e.Query(`
104104- select user_did, subject_did, followed_at, rkey
150150+ var conditions []string
151151+ var args []any
152152+ for _, filter := range filters {
153153+ conditions = append(conditions, filter.Condition())
154154+ args = append(args, filter.Arg()...)
155155+ }
156156+157157+ whereClause := ""
158158+ if conditions != nil {
159159+ whereClause = " where " + strings.Join(conditions, " and ")
160160+ }
161161+ limitClause := ""
162162+ if limit > 0 {
163163+ limitClause = " limit ?"
164164+ args = append(args, limit)
165165+ }
166166+167167+ query := fmt.Sprintf(
168168+ `select user_did, subject_did, followed_at, rkey
105169 from follows
170170+ %s
106171 order by followed_at desc
107107- limit ?`, limit,
108108- )
172172+ %s
173173+ `, whereClause, limitClause)
174174+175175+ rows, err := e.Query(query, args...)
109176 if err != nil {
110177 return nil, err
111178 }
112112- defer rows.Close()
113113-114179 for rows.Next() {
115180 var follow Follow
116181 var followedAt string
117117- if err := rows.Scan(&follow.UserDid, &follow.SubjectDid, &followedAt, &follow.Rkey); err != nil {
182182+ err := rows.Scan(
183183+ &follow.UserDid,
184184+ &follow.SubjectDid,
185185+ &followedAt,
186186+ &follow.Rkey,
187187+ )
188188+ if err != nil {
118189 return nil, err
119190 }
120120-121191 followedAtTime, err := time.Parse(time.RFC3339, followedAt)
122192 if err != nil {
123193 log.Println("unable to determine followed at time")
···125195 } else {
126196 follow.FollowedAt = followedAtTime
127197 }
128128-129198 follows = append(follows, follow)
130199 }
200200+ return follows, nil
201201+}
131202132132- if err := rows.Err(); err != nil {
133133- return nil, err
203203+func GetFollowers(e Execer, did string) ([]Follow, error) {
204204+ return GetFollows(e, 0, FilterEq("subject_did", did))
205205+}
206206+207207+func GetFollowing(e Execer, did string) ([]Follow, error) {
208208+ return GetFollows(e, 0, FilterEq("user_did", did))
209209+}
210210+211211+type FollowStatus int
212212+213213+const (
214214+ IsNotFollowing FollowStatus = iota
215215+ IsFollowing
216216+ IsSelf
217217+)
218218+219219+func (s FollowStatus) String() string {
220220+ switch s {
221221+ case IsNotFollowing:
222222+ return "IsNotFollowing"
223223+ case IsFollowing:
224224+ return "IsFollowing"
225225+ case IsSelf:
226226+ return "IsSelf"
227227+ default:
228228+ return "IsNotFollowing"
134229 }
230230+}
135231136136- return follows, nil
232232+func GetFollowStatus(e Execer, userDid, subjectDid string) FollowStatus {
233233+ if userDid == subjectDid {
234234+ return IsSelf
235235+ } else if _, err := GetFollow(e, userDid, subjectDid); err != nil {
236236+ return IsNotFollowing
237237+ } else {
238238+ return IsFollowing
239239+ }
137240}
+459-306
appview/db/issues.go
···2233import (
44 "database/sql"
55+ "fmt"
66+ "maps"
77+ "slices"
88+ "sort"
99+ "strings"
510 "time"
611712 "github.com/bluesky-social/indigo/atproto/syntax"
1313+ "tangled.sh/tangled.sh/core/api/tangled"
814 "tangled.sh/tangled.sh/core/appview/pagination"
915)
10161117type Issue struct {
1212- RepoAt syntax.ATURI
1313- OwnerDid string
1414- IssueId int
1515- IssueAt string
1616- Created time.Time
1717- Title string
1818- Body string
1919- Open bool
1818+ Id int64
1919+ Did string
2020+ Rkey string
2121+ RepoAt syntax.ATURI
2222+ IssueId int
2323+ Created time.Time
2424+ Edited *time.Time
2525+ Deleted *time.Time
2626+ Title string
2727+ Body string
2828+ Open bool
20292130 // optionally, populate this when querying for reverse mappings
2231 // like comment counts, parent repo etc.
2323- Metadata *IssueMetadata
3232+ Comments []IssueComment
3333+ Repo *Repo
2434}
25352626-type IssueMetadata struct {
2727- CommentCount int
2828- Repo *Repo
2929- // labels, assignee etc.
3636+func (i *Issue) AtUri() syntax.ATURI {
3737+ return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.Did, tangled.RepoIssueNSID, i.Rkey))
3038}
31393232-type Comment struct {
3333- OwnerDid string
3434- RepoAt syntax.ATURI
3535- Rkey string
3636- Issue int
3737- CommentId int
3838- Body string
3939- Created *time.Time
4040- Deleted *time.Time
4141- Edited *time.Time
4040+func (i *Issue) AsRecord() tangled.RepoIssue {
4141+ return tangled.RepoIssue{
4242+ Repo: i.RepoAt.String(),
4343+ Title: i.Title,
4444+ Body: &i.Body,
4545+ CreatedAt: i.Created.Format(time.RFC3339),
4646+ }
4247}
43484444-func NewIssue(tx *sql.Tx, issue *Issue) error {
4545- defer tx.Rollback()
4949+func (i *Issue) State() string {
5050+ if i.Open {
5151+ return "open"
5252+ }
5353+ return "closed"
5454+}
46554747- _, err := tx.Exec(`
4848- insert or ignore into repo_issue_seqs (repo_at, next_issue_id)
4949- values (?, 1)
5050- `, issue.RepoAt)
5151- if err != nil {
5252- return err
5656+type CommentListItem struct {
5757+ Self *IssueComment
5858+ Replies []*IssueComment
5959+}
6060+6161+func (i *Issue) CommentList() []CommentListItem {
6262+ // Create a map to quickly find comments by their aturi
6363+ toplevel := make(map[string]*CommentListItem)
6464+ var replies []*IssueComment
6565+6666+ // collect top level comments into the map
6767+ for _, comment := range i.Comments {
6868+ if comment.IsTopLevel() {
6969+ toplevel[comment.AtUri().String()] = &CommentListItem{
7070+ Self: &comment,
7171+ }
7272+ } else {
7373+ replies = append(replies, &comment)
7474+ }
5375 }
54765555- var nextId int
5656- err = tx.QueryRow(`
5757- update repo_issue_seqs
5858- set next_issue_id = next_issue_id + 1
5959- where repo_at = ?
6060- returning next_issue_id - 1
6161- `, issue.RepoAt).Scan(&nextId)
6262- if err != nil {
6363- return err
7777+ for _, r := range replies {
7878+ parentAt := *r.ReplyTo
7979+ if parent, exists := toplevel[parentAt]; exists {
8080+ parent.Replies = append(parent.Replies, r)
8181+ }
6482 }
65836666- issue.IssueId = nextId
8484+ var listing []CommentListItem
8585+ for _, v := range toplevel {
8686+ listing = append(listing, *v)
8787+ }
67886868- _, err = tx.Exec(`
6969- insert into issues (repo_at, owner_did, issue_id, title, body)
7070- values (?, ?, ?, ?, ?)
7171- `, issue.RepoAt, issue.OwnerDid, issue.IssueId, issue.Title, issue.Body)
8989+ // sort everything
9090+ sortFunc := func(a, b *IssueComment) bool {
9191+ return a.Created.Before(b.Created)
9292+ }
9393+ sort.Slice(listing, func(i, j int) bool {
9494+ return sortFunc(listing[i].Self, listing[j].Self)
9595+ })
9696+ for _, r := range listing {
9797+ sort.Slice(r.Replies, func(i, j int) bool {
9898+ return sortFunc(r.Replies[i], r.Replies[j])
9999+ })
100100+ }
101101+102102+ return listing
103103+}
104104+105105+func IssueFromRecord(did, rkey string, record tangled.RepoIssue) Issue {
106106+ created, err := time.Parse(time.RFC3339, record.CreatedAt)
72107 if err != nil {
7373- return err
108108+ created = time.Now()
74109 }
751107676- if err := tx.Commit(); err != nil {
7777- return err
111111+ body := ""
112112+ if record.Body != nil {
113113+ body = *record.Body
78114 }
791158080- return nil
116116+ return Issue{
117117+ RepoAt: syntax.ATURI(record.Repo),
118118+ Did: did,
119119+ Rkey: rkey,
120120+ Created: created,
121121+ Title: record.Title,
122122+ Body: body,
123123+ Open: true, // new issues are open by default
124124+ }
81125}
821268383-func SetIssueAt(e Execer, repoAt syntax.ATURI, issueId int, issueAt string) error {
8484- _, err := e.Exec(`update issues set issue_at = ? where repo_at = ? and issue_id = ?`, issueAt, repoAt, issueId)
8585- return err
127127+type IssueComment struct {
128128+ Id int64
129129+ Did string
130130+ Rkey string
131131+ IssueAt string
132132+ ReplyTo *string
133133+ Body string
134134+ Created time.Time
135135+ Edited *time.Time
136136+ Deleted *time.Time
86137}
871388888-func GetIssueAt(e Execer, repoAt syntax.ATURI, issueId int) (string, error) {
8989- var issueAt string
9090- err := e.QueryRow(`select issue_at from issues where repo_at = ? and issue_id = ?`, repoAt, issueId).Scan(&issueAt)
9191- return issueAt, err
139139+func (i *IssueComment) AtUri() syntax.ATURI {
140140+ return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.Did, tangled.RepoIssueCommentNSID, i.Rkey))
92141}
931429494-func GetIssueId(e Execer, repoAt syntax.ATURI) (int, error) {
9595- var issueId int
9696- err := e.QueryRow(`select next_issue_id from repo_issue_seqs where repo_at = ?`, repoAt).Scan(&issueId)
9797- return issueId - 1, err
143143+func (i *IssueComment) AsRecord() tangled.RepoIssueComment {
144144+ return tangled.RepoIssueComment{
145145+ Body: i.Body,
146146+ Issue: i.IssueAt,
147147+ CreatedAt: i.Created.Format(time.RFC3339),
148148+ ReplyTo: i.ReplyTo,
149149+ }
98150}
99151100100-func GetIssueOwnerDid(e Execer, repoAt syntax.ATURI, issueId int) (string, error) {
101101- var ownerDid string
102102- err := e.QueryRow(`select owner_did from issues where repo_at = ? and issue_id = ?`, repoAt, issueId).Scan(&ownerDid)
103103- return ownerDid, err
152152+func (i *IssueComment) IsTopLevel() bool {
153153+ return i.ReplyTo == nil
104154}
105155106106-func GetIssues(e Execer, repoAt syntax.ATURI, isOpen bool, page pagination.Page) ([]Issue, error) {
107107- var issues []Issue
108108- openValue := 0
109109- if isOpen {
110110- openValue = 1
156156+func IssueCommentFromRecord(e Execer, did, rkey string, record tangled.RepoIssueComment) (*IssueComment, error) {
157157+ created, err := time.Parse(time.RFC3339, record.CreatedAt)
158158+ if err != nil {
159159+ created = time.Now()
111160 }
112161113113- rows, err := e.Query(
114114- `
115115- with numbered_issue as (
116116- select
117117- i.owner_did,
118118- i.issue_id,
119119- i.created,
120120- i.title,
121121- i.body,
122122- i.open,
123123- count(c.id) as comment_count,
124124- row_number() over (order by i.created desc) as row_num
125125- from
126126- issues i
127127- left join
128128- comments c on i.repo_at = c.repo_at and i.issue_id = c.issue_id
129129- where
130130- i.repo_at = ? and i.open = ?
131131- group by
132132- i.id, i.owner_did, i.issue_id, i.created, i.title, i.body, i.open
133133- )
134134- select
135135- owner_did,
136136- issue_id,
137137- created,
138138- title,
139139- body,
140140- open,
141141- comment_count
142142- from
143143- numbered_issue
144144- where
145145- row_num between ? and ?`,
146146- repoAt, openValue, page.Offset+1, page.Offset+page.Limit)
147147- if err != nil {
162162+ ownerDid := did
163163+164164+ if _, err = syntax.ParseATURI(record.Issue); err != nil {
148165 return nil, err
149166 }
150150- defer rows.Close()
151167152152- for rows.Next() {
153153- var issue Issue
154154- var createdAt string
155155- var metadata IssueMetadata
156156- err := rows.Scan(&issue.OwnerDid, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &metadata.CommentCount)
157157- if err != nil {
158158- return nil, err
159159- }
168168+ comment := IssueComment{
169169+ Did: ownerDid,
170170+ Rkey: rkey,
171171+ Body: record.Body,
172172+ IssueAt: record.Issue,
173173+ ReplyTo: record.ReplyTo,
174174+ Created: created,
175175+ }
160176161161- createdTime, err := time.Parse(time.RFC3339, createdAt)
162162- if err != nil {
163163- return nil, err
177177+ return &comment, nil
178178+}
179179+180180+func PutIssue(tx *sql.Tx, issue *Issue) error {
181181+ // ensure sequence exists
182182+ _, err := tx.Exec(`
183183+ insert or ignore into repo_issue_seqs (repo_at, next_issue_id)
184184+ values (?, 1)
185185+ `, issue.RepoAt)
186186+ if err != nil {
187187+ return err
188188+ }
189189+190190+ issues, err := GetIssues(
191191+ tx,
192192+ FilterEq("did", issue.Did),
193193+ FilterEq("rkey", issue.Rkey),
194194+ )
195195+ switch {
196196+ case err != nil:
197197+ return err
198198+ case len(issues) == 0:
199199+ return createNewIssue(tx, issue)
200200+ case len(issues) != 1: // should be unreachable
201201+ return fmt.Errorf("invalid number of issues returned: %d", len(issues))
202202+ default:
203203+ // if content is identical, do not edit
204204+ existingIssue := issues[0]
205205+ if existingIssue.Title == issue.Title && existingIssue.Body == issue.Body {
206206+ return nil
164207 }
165165- issue.Created = createdTime
166166- issue.Metadata = &metadata
167208168168- issues = append(issues, issue)
209209+ issue.Id = existingIssue.Id
210210+ issue.IssueId = existingIssue.IssueId
211211+ return updateIssue(tx, issue)
169212 }
213213+}
170214171171- if err := rows.Err(); err != nil {
172172- return nil, err
215215+func createNewIssue(tx *sql.Tx, issue *Issue) error {
216216+ // get next issue_id
217217+ var newIssueId int
218218+ err := tx.QueryRow(`
219219+ update repo_issue_seqs
220220+ set next_issue_id = next_issue_id + 1
221221+ where repo_at = ?
222222+ returning next_issue_id - 1
223223+ `, issue.RepoAt).Scan(&newIssueId)
224224+ if err != nil {
225225+ return err
173226 }
174227175175- return issues, nil
228228+ // insert new issue
229229+ row := tx.QueryRow(`
230230+ insert into issues (repo_at, did, rkey, issue_id, title, body)
231231+ values (?, ?, ?, ?, ?, ?)
232232+ returning rowid, issue_id
233233+ `, issue.RepoAt, issue.Did, issue.Rkey, newIssueId, issue.Title, issue.Body)
234234+235235+ return row.Scan(&issue.Id, &issue.IssueId)
176236}
177237178178-// timeframe here is directly passed into the sql query filter, and any
179179-// timeframe in the past should be negative; e.g.: "-3 months"
180180-func GetIssuesByOwnerDid(e Execer, ownerDid string, timeframe string) ([]Issue, error) {
181181- var issues []Issue
238238+func updateIssue(tx *sql.Tx, issue *Issue) error {
239239+ // update existing issue
240240+ _, err := tx.Exec(`
241241+ update issues
242242+ set title = ?, body = ?, edited = ?
243243+ where did = ? and rkey = ?
244244+ `, issue.Title, issue.Body, time.Now().Format(time.RFC3339), issue.Did, issue.Rkey)
245245+ return err
246246+}
247247+248248+func GetIssuesPaginated(e Execer, page pagination.Page, filters ...filter) ([]Issue, error) {
249249+ issueMap := make(map[string]*Issue) // at-uri -> issue
250250+251251+ var conditions []string
252252+ var args []any
253253+254254+ for _, filter := range filters {
255255+ conditions = append(conditions, filter.Condition())
256256+ args = append(args, filter.Arg()...)
257257+ }
258258+259259+ whereClause := ""
260260+ if conditions != nil {
261261+ whereClause = " where " + strings.Join(conditions, " and ")
262262+ }
263263+264264+ pLower := FilterGte("row_num", page.Offset+1)
265265+ pUpper := FilterLte("row_num", page.Offset+page.Limit)
266266+267267+ args = append(args, pLower.Arg()...)
268268+ args = append(args, pUpper.Arg()...)
269269+ pagination := " where " + pLower.Condition() + " and " + pUpper.Condition()
270270+271271+ query := fmt.Sprintf(
272272+ `
273273+ select * from (
274274+ select
275275+ id,
276276+ did,
277277+ rkey,
278278+ repo_at,
279279+ issue_id,
280280+ title,
281281+ body,
282282+ open,
283283+ created,
284284+ edited,
285285+ deleted,
286286+ row_number() over (order by created desc) as row_num
287287+ from
288288+ issues
289289+ %s
290290+ ) ranked_issues
291291+ %s
292292+ `,
293293+ whereClause,
294294+ pagination,
295295+ )
182296183183- rows, err := e.Query(
184184- `select
185185- i.owner_did,
186186- i.repo_at,
187187- i.issue_id,
188188- i.created,
189189- i.title,
190190- i.body,
191191- i.open,
192192- r.did,
193193- r.name,
194194- r.knot,
195195- r.rkey,
196196- r.created
197197- from
198198- issues i
199199- join
200200- repos r on i.repo_at = r.at_uri
201201- where
202202- i.owner_did = ? and i.created >= date ('now', ?)
203203- order by
204204- i.created desc`,
205205- ownerDid, timeframe)
297297+ rows, err := e.Query(query, args...)
206298 if err != nil {
207207- return nil, err
299299+ return nil, fmt.Errorf("failed to query issues table: %w", err)
208300 }
209301 defer rows.Close()
210302211303 for rows.Next() {
212304 var issue Issue
213213- var issueCreatedAt, repoCreatedAt string
214214- var repo Repo
305305+ var createdAt string
306306+ var editedAt, deletedAt sql.Null[string]
307307+ var rowNum int64
215308 err := rows.Scan(
216216- &issue.OwnerDid,
309309+ &issue.Id,
310310+ &issue.Did,
311311+ &issue.Rkey,
217312 &issue.RepoAt,
218313 &issue.IssueId,
219219- &issueCreatedAt,
220314 &issue.Title,
221315 &issue.Body,
222316 &issue.Open,
223223- &repo.Did,
224224- &repo.Name,
225225- &repo.Knot,
226226- &repo.Rkey,
227227- &repoCreatedAt,
317317+ &createdAt,
318318+ &editedAt,
319319+ &deletedAt,
320320+ &rowNum,
228321 )
229322 if err != nil {
230230- return nil, err
323323+ return nil, fmt.Errorf("failed to scan issue: %w", err)
231324 }
232325233233- issueCreatedTime, err := time.Parse(time.RFC3339, issueCreatedAt)
234234- if err != nil {
235235- return nil, err
326326+ if t, err := time.Parse(time.RFC3339, createdAt); err == nil {
327327+ issue.Created = t
236328 }
237237- issue.Created = issueCreatedTime
238329239239- repoCreatedTime, err := time.Parse(time.RFC3339, repoCreatedAt)
240240- if err != nil {
241241- return nil, err
330330+ if editedAt.Valid {
331331+ if t, err := time.Parse(time.RFC3339, editedAt.V); err == nil {
332332+ issue.Edited = &t
333333+ }
242334 }
243243- repo.Created = repoCreatedTime
244335245245- issue.Metadata = &IssueMetadata{
246246- Repo: &repo,
336336+ if deletedAt.Valid {
337337+ if t, err := time.Parse(time.RFC3339, deletedAt.V); err == nil {
338338+ issue.Deleted = &t
339339+ }
247340 }
248341249249- issues = append(issues, issue)
342342+ atUri := issue.AtUri().String()
343343+ issueMap[atUri] = &issue
250344 }
251345252252- if err := rows.Err(); err != nil {
253253- return nil, err
346346+ // collect reverse repos
347347+ repoAts := make([]string, 0, len(issueMap)) // or just []string{}
348348+ for _, issue := range issueMap {
349349+ repoAts = append(repoAts, string(issue.RepoAt))
350350+ }
351351+352352+ repos, err := GetRepos(e, 0, FilterIn("at_uri", repoAts))
353353+ if err != nil {
354354+ return nil, fmt.Errorf("failed to build repo mappings: %w", err)
355355+ }
356356+357357+ repoMap := make(map[string]*Repo)
358358+ for i := range repos {
359359+ repoMap[string(repos[i].RepoAt())] = &repos[i]
360360+ }
361361+362362+ for issueAt, i := range issueMap {
363363+ if r, ok := repoMap[string(i.RepoAt)]; ok {
364364+ i.Repo = r
365365+ } else {
366366+ // do not show up the issue if the repo is deleted
367367+ // TODO: foreign key where?
368368+ delete(issueMap, issueAt)
369369+ }
370370+ }
371371+372372+ // collect comments
373373+ issueAts := slices.Collect(maps.Keys(issueMap))
374374+ comments, err := GetIssueComments(e, FilterIn("issue_at", issueAts))
375375+ if err != nil {
376376+ return nil, fmt.Errorf("failed to query comments: %w", err)
377377+ }
378378+379379+ for i := range comments {
380380+ issueAt := comments[i].IssueAt
381381+ if issue, ok := issueMap[issueAt]; ok {
382382+ issue.Comments = append(issue.Comments, comments[i])
383383+ }
384384+ }
385385+386386+ var issues []Issue
387387+ for _, i := range issueMap {
388388+ issues = append(issues, *i)
254389 }
390390+391391+ sort.Slice(issues, func(i, j int) bool {
392392+ return issues[i].Created.After(issues[j].Created)
393393+ })
255394256395 return issues, nil
257396}
258397398398+func GetIssues(e Execer, filters ...filter) ([]Issue, error) {
399399+ return GetIssuesPaginated(e, pagination.FirstPage(), filters...)
400400+}
401401+259402func GetIssue(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, error) {
260260- query := `select owner_did, created, title, body, open from issues where repo_at = ? and issue_id = ?`
403403+ query := `select id, owner_did, rkey, created, title, body, open from issues where repo_at = ? and issue_id = ?`
261404 row := e.QueryRow(query, repoAt, issueId)
262405263406 var issue Issue
264407 var createdAt string
265265- err := row.Scan(&issue.OwnerDid, &createdAt, &issue.Title, &issue.Body, &issue.Open)
408408+ err := row.Scan(&issue.Id, &issue.Did, &issue.Rkey, &createdAt, &issue.Title, &issue.Body, &issue.Open)
266409 if err != nil {
267410 return nil, err
268411 }
···276419 return &issue, nil
277420}
278421279279-func GetIssueWithComments(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, []Comment, error) {
280280- query := `select owner_did, issue_id, created, title, body, open from issues where repo_at = ? and issue_id = ?`
281281- row := e.QueryRow(query, repoAt, issueId)
422422+func AddIssueComment(e Execer, c IssueComment) (int64, error) {
423423+ result, err := e.Exec(
424424+ `insert into issue_comments (
425425+ did,
426426+ rkey,
427427+ issue_at,
428428+ body,
429429+ reply_to,
430430+ created,
431431+ edited
432432+ )
433433+ values (?, ?, ?, ?, ?, ?, null)
434434+ on conflict(did, rkey) do update set
435435+ issue_at = excluded.issue_at,
436436+ body = excluded.body,
437437+ edited = case
438438+ when
439439+ issue_comments.issue_at != excluded.issue_at
440440+ or issue_comments.body != excluded.body
441441+ or issue_comments.reply_to != excluded.reply_to
442442+ then ?
443443+ else issue_comments.edited
444444+ end`,
445445+ c.Did,
446446+ c.Rkey,
447447+ c.IssueAt,
448448+ c.Body,
449449+ c.ReplyTo,
450450+ c.Created.Format(time.RFC3339),
451451+ time.Now().Format(time.RFC3339),
452452+ )
453453+ if err != nil {
454454+ return 0, err
455455+ }
282456283283- var issue Issue
284284- var createdAt string
285285- err := row.Scan(&issue.OwnerDid, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open)
457457+ id, err := result.LastInsertId()
286458 if err != nil {
287287- return nil, nil, err
459459+ return 0, err
288460 }
289461290290- createdTime, err := time.Parse(time.RFC3339, createdAt)
291291- if err != nil {
292292- return nil, nil, err
462462+ return id, nil
463463+}
464464+465465+func DeleteIssueComments(e Execer, filters ...filter) error {
466466+ var conditions []string
467467+ var args []any
468468+ for _, filter := range filters {
469469+ conditions = append(conditions, filter.Condition())
470470+ args = append(args, filter.Arg()...)
293471 }
294294- issue.Created = createdTime
295472296296- comments, err := GetComments(e, repoAt, issueId)
297297- if err != nil {
298298- return nil, nil, err
473473+ whereClause := ""
474474+ if conditions != nil {
475475+ whereClause = " where " + strings.Join(conditions, " and ")
299476 }
300477301301- return &issue, comments, nil
302302-}
478478+ query := fmt.Sprintf(`update issue_comments set body = "", deleted = strftime('%%Y-%%m-%%dT%%H:%%M:%%SZ', 'now') %s`, whereClause)
303479304304-func NewIssueComment(e Execer, comment *Comment) error {
305305- query := `insert into comments (owner_did, repo_at, rkey, issue_id, comment_id, body) values (?, ?, ?, ?, ?, ?)`
306306- _, err := e.Exec(
307307- query,
308308- comment.OwnerDid,
309309- comment.RepoAt,
310310- comment.Rkey,
311311- comment.Issue,
312312- comment.CommentId,
313313- comment.Body,
314314- )
480480+ _, err := e.Exec(query, args...)
315481 return err
316482}
317483318318-func GetComments(e Execer, repoAt syntax.ATURI, issueId int) ([]Comment, error) {
319319- var comments []Comment
484484+func GetIssueComments(e Execer, filters ...filter) ([]IssueComment, error) {
485485+ var comments []IssueComment
320486321321- rows, err := e.Query(`
487487+ var conditions []string
488488+ var args []any
489489+ for _, filter := range filters {
490490+ conditions = append(conditions, filter.Condition())
491491+ args = append(args, filter.Arg()...)
492492+ }
493493+494494+ whereClause := ""
495495+ if conditions != nil {
496496+ whereClause = " where " + strings.Join(conditions, " and ")
497497+ }
498498+499499+ query := fmt.Sprintf(`
322500 select
323323- owner_did,
324324- issue_id,
325325- comment_id,
501501+ id,
502502+ did,
326503 rkey,
504504+ issue_at,
505505+ reply_to,
327506 body,
328507 created,
329508 edited,
330509 deleted
331510 from
332332- comments
333333- where
334334- repo_at = ? and issue_id = ?
335335- order by
336336- created asc`,
337337- repoAt,
338338- issueId,
339339- )
340340- if err == sql.ErrNoRows {
341341- return []Comment{}, nil
342342- }
511511+ issue_comments
512512+ %s
513513+ `, whereClause)
514514+515515+ rows, err := e.Query(query, args...)
343516 if err != nil {
344517 return nil, err
345518 }
346346- defer rows.Close()
347519348520 for rows.Next() {
349349- var comment Comment
350350- var createdAt string
351351- var deletedAt, editedAt, rkey sql.NullString
352352- err := rows.Scan(&comment.OwnerDid, &comment.Issue, &comment.CommentId, &rkey, &comment.Body, &createdAt, &editedAt, &deletedAt)
521521+ var comment IssueComment
522522+ var created string
523523+ var rkey, edited, deleted, replyTo sql.Null[string]
524524+ err := rows.Scan(
525525+ &comment.Id,
526526+ &comment.Did,
527527+ &rkey,
528528+ &comment.IssueAt,
529529+ &replyTo,
530530+ &comment.Body,
531531+ &created,
532532+ &edited,
533533+ &deleted,
534534+ )
353535 if err != nil {
354536 return nil, err
355537 }
356538357357- createdAtTime, err := time.Parse(time.RFC3339, createdAt)
358358- if err != nil {
359359- return nil, err
539539+ // this is a remnant from old times, newer comments always have rkey
540540+ if rkey.Valid {
541541+ comment.Rkey = rkey.V
360542 }
361361- comment.Created = &createdAtTime
362543363363- if deletedAt.Valid {
364364- deletedTime, err := time.Parse(time.RFC3339, deletedAt.String)
365365- if err != nil {
366366- return nil, err
544544+ if t, err := time.Parse(time.RFC3339, created); err == nil {
545545+ comment.Created = t
546546+ }
547547+548548+ if edited.Valid {
549549+ if t, err := time.Parse(time.RFC3339, edited.V); err == nil {
550550+ comment.Edited = &t
367551 }
368368- comment.Deleted = &deletedTime
369552 }
370553371371- if editedAt.Valid {
372372- editedTime, err := time.Parse(time.RFC3339, editedAt.String)
373373- if err != nil {
374374- return nil, err
554554+ if deleted.Valid {
555555+ if t, err := time.Parse(time.RFC3339, deleted.V); err == nil {
556556+ comment.Deleted = &t
375557 }
376376- comment.Edited = &editedTime
377558 }
378559379379- if rkey.Valid {
380380- comment.Rkey = rkey.String
560560+ if replyTo.Valid {
561561+ comment.ReplyTo = &replyTo.V
381562 }
382563383564 comments = append(comments, comment)
384565 }
385566386386- if err := rows.Err(); err != nil {
567567+ if err = rows.Err(); err != nil {
387568 return nil, err
388569 }
389570390571 return comments, nil
391572}
392573393393-func GetComment(e Execer, repoAt syntax.ATURI, issueId, commentId int) (*Comment, error) {
394394- query := `
395395- select
396396- owner_did, body, rkey, created, deleted, edited
397397- from
398398- comments where repo_at = ? and issue_id = ? and comment_id = ?
399399- `
400400- row := e.QueryRow(query, repoAt, issueId, commentId)
401401-402402- var comment Comment
403403- var createdAt string
404404- var deletedAt, editedAt, rkey sql.NullString
405405- err := row.Scan(&comment.OwnerDid, &comment.Body, &rkey, &createdAt, &deletedAt, &editedAt)
406406- if err != nil {
407407- return nil, err
574574+func DeleteIssues(e Execer, filters ...filter) error {
575575+ var conditions []string
576576+ var args []any
577577+ for _, filter := range filters {
578578+ conditions = append(conditions, filter.Condition())
579579+ args = append(args, filter.Arg()...)
408580 }
409581410410- createdTime, err := time.Parse(time.RFC3339, createdAt)
411411- if err != nil {
412412- return nil, err
582582+ whereClause := ""
583583+ if conditions != nil {
584584+ whereClause = " where " + strings.Join(conditions, " and ")
413585 }
414414- comment.Created = &createdTime
415586416416- if deletedAt.Valid {
417417- deletedTime, err := time.Parse(time.RFC3339, deletedAt.String)
418418- if err != nil {
419419- return nil, err
420420- }
421421- comment.Deleted = &deletedTime
422422- }
587587+ query := fmt.Sprintf(`delete from issues %s`, whereClause)
588588+ _, err := e.Exec(query, args...)
589589+ return err
590590+}
423591424424- if editedAt.Valid {
425425- editedTime, err := time.Parse(time.RFC3339, editedAt.String)
426426- if err != nil {
427427- return nil, err
428428- }
429429- comment.Edited = &editedTime
592592+func CloseIssues(e Execer, filters ...filter) error {
593593+ var conditions []string
594594+ var args []any
595595+ for _, filter := range filters {
596596+ conditions = append(conditions, filter.Condition())
597597+ args = append(args, filter.Arg()...)
430598 }
431599432432- if rkey.Valid {
433433- comment.Rkey = rkey.String
600600+ whereClause := ""
601601+ if conditions != nil {
602602+ whereClause = " where " + strings.Join(conditions, " and ")
434603 }
435604436436- comment.RepoAt = repoAt
437437- comment.Issue = issueId
438438- comment.CommentId = commentId
439439-440440- return &comment, nil
441441-}
442442-443443-func EditComment(e Execer, repoAt syntax.ATURI, issueId, commentId int, newBody string) error {
444444- _, err := e.Exec(
445445- `
446446- update comments
447447- set body = ?,
448448- edited = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
449449- where repo_at = ? and issue_id = ? and comment_id = ?
450450- `, newBody, repoAt, issueId, commentId)
605605+ query := fmt.Sprintf(`update issues set open = 0 %s`, whereClause)
606606+ _, err := e.Exec(query, args...)
451607 return err
452608}
453609454454-func DeleteComment(e Execer, repoAt syntax.ATURI, issueId, commentId int) error {
455455- _, err := e.Exec(
456456- `
457457- update comments
458458- set body = "",
459459- deleted = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
460460- where repo_at = ? and issue_id = ? and comment_id = ?
461461- `, repoAt, issueId, commentId)
462462- return err
463463-}
610610+func ReopenIssues(e Execer, filters ...filter) error {
611611+ var conditions []string
612612+ var args []any
613613+ for _, filter := range filters {
614614+ conditions = append(conditions, filter.Condition())
615615+ args = append(args, filter.Arg()...)
616616+ }
464617465465-func CloseIssue(e Execer, repoAt syntax.ATURI, issueId int) error {
466466- _, err := e.Exec(`update issues set open = 0 where repo_at = ? and issue_id = ?`, repoAt, issueId)
467467- return err
468468-}
618618+ whereClause := ""
619619+ if conditions != nil {
620620+ whereClause = " where " + strings.Join(conditions, " and ")
621621+ }
469622470470-func ReopenIssue(e Execer, repoAt syntax.ATURI, issueId int) error {
471471- _, err := e.Exec(`update issues set open = 1 where repo_at = ? and issue_id = ?`, repoAt, issueId)
623623+ query := fmt.Sprintf(`update issues set open = 1 %s`, whereClause)
624624+ _, err := e.Exec(query, args...)
472625 return err
473626}
474627
+93
appview/db/language.go
···11+package db
22+33+import (
44+ "fmt"
55+ "strings"
66+77+ "github.com/bluesky-social/indigo/atproto/syntax"
88+)
99+1010+type RepoLanguage struct {
1111+ Id int64
1212+ RepoAt syntax.ATURI
1313+ Ref string
1414+ IsDefaultRef bool
1515+ Language string
1616+ Bytes int64
1717+}
1818+1919+func GetRepoLanguages(e Execer, filters ...filter) ([]RepoLanguage, error) {
2020+ var conditions []string
2121+ var args []any
2222+ for _, filter := range filters {
2323+ conditions = append(conditions, filter.Condition())
2424+ args = append(args, filter.Arg()...)
2525+ }
2626+2727+ whereClause := ""
2828+ if conditions != nil {
2929+ whereClause = " where " + strings.Join(conditions, " and ")
3030+ }
3131+3232+ query := fmt.Sprintf(
3333+ `select id, repo_at, ref, is_default_ref, language, bytes from repo_languages %s`,
3434+ whereClause,
3535+ )
3636+ rows, err := e.Query(query, args...)
3737+3838+ if err != nil {
3939+ return nil, fmt.Errorf("failed to execute query: %w ", err)
4040+ }
4141+4242+ var langs []RepoLanguage
4343+ for rows.Next() {
4444+ var rl RepoLanguage
4545+ var isDefaultRef int
4646+4747+ err := rows.Scan(
4848+ &rl.Id,
4949+ &rl.RepoAt,
5050+ &rl.Ref,
5151+ &isDefaultRef,
5252+ &rl.Language,
5353+ &rl.Bytes,
5454+ )
5555+ if err != nil {
5656+ return nil, fmt.Errorf("failed to scan: %w ", err)
5757+ }
5858+5959+ if isDefaultRef != 0 {
6060+ rl.IsDefaultRef = true
6161+ }
6262+6363+ langs = append(langs, rl)
6464+ }
6565+ if err = rows.Err(); err != nil {
6666+ return nil, fmt.Errorf("failed to scan rows: %w ", err)
6767+ }
6868+6969+ return langs, nil
7070+}
7171+7272+func InsertRepoLanguages(e Execer, langs []RepoLanguage) error {
7373+ stmt, err := e.Prepare(
7474+ "insert or replace into repo_languages (repo_at, ref, is_default_ref, language, bytes) values (?, ?, ?, ?, ?)",
7575+ )
7676+ if err != nil {
7777+ return err
7878+ }
7979+8080+ for _, l := range langs {
8181+ isDefaultRef := 0
8282+ if l.IsDefaultRef {
8383+ isDefaultRef = 1
8484+ }
8585+8686+ _, err := stmt.Exec(l.RepoAt, l.Ref, isDefaultRef, l.Language, l.Bytes)
8787+ if err != nil {
8888+ return err
8989+ }
9090+ }
9191+9292+ return nil
9393+}
-62
appview/db/migrations/20250305_113405.sql
···11--- Simplified SQLite Database Migration Script for Issues and Comments
22-33--- Migration for issues table
44-CREATE TABLE issues_new (
55- id integer primary key autoincrement,
66- owner_did text not null,
77- repo_at text not null,
88- issue_id integer not null,
99- title text not null,
1010- body text not null,
1111- open integer not null default 1,
1212- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
1313- issue_at text,
1414- unique(repo_at, issue_id),
1515- foreign key (repo_at) references repos(at_uri) on delete cascade
1616-);
1717-1818--- Migrate data to new issues table
1919-INSERT INTO issues_new (
2020- id, owner_did, repo_at, issue_id,
2121- title, body, open, created, issue_at
2222-)
2323-SELECT
2424- id, owner_did, repo_at, issue_id,
2525- title, body, open, created, issue_at
2626-FROM issues;
2727-2828--- Drop old issues table
2929-DROP TABLE issues;
3030-3131--- Rename new issues table
3232-ALTER TABLE issues_new RENAME TO issues;
3333-3434--- Migration for comments table
3535-CREATE TABLE comments_new (
3636- id integer primary key autoincrement,
3737- owner_did text not null,
3838- issue_id integer not null,
3939- repo_at text not null,
4040- comment_id integer not null,
4141- comment_at text not null,
4242- body text not null,
4343- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
4444- unique(issue_id, comment_id),
4545- foreign key (repo_at, issue_id) references issues(repo_at, issue_id) on delete cascade
4646-);
4747-4848--- Migrate data to new comments table
4949-INSERT INTO comments_new (
5050- id, owner_did, issue_id, repo_at,
5151- comment_id, comment_at, body, created
5252-)
5353-SELECT
5454- id, owner_did, issue_id, repo_at,
5555- comment_id, comment_at, body, created
5656-FROM comments;
5757-5858--- Drop old comments table
5959-DROP TABLE comments;
6060-6161--- Rename new comments table
6262-ALTER TABLE comments_new RENAME TO comments;
-66
appview/db/migrations/validate.sql
···11--- Validation Queries for Database Migration
22-33--- 1. Verify Issues Table Structure
44-PRAGMA table_info(issues);
55-66--- 2. Verify Comments Table Structure
77-PRAGMA table_info(comments);
88-99--- 3. Check Total Row Count Consistency
1010-SELECT
1111- 'Issues Row Count' AS check_type,
1212- (SELECT COUNT(*) FROM issues) AS row_count
1313-UNION ALL
1414-SELECT
1515- 'Comments Row Count' AS check_type,
1616- (SELECT COUNT(*) FROM comments) AS row_count;
1717-1818--- 4. Verify Unique Constraint on Issues
1919-SELECT
2020- repo_at,
2121- issue_id,
2222- COUNT(*) as duplicate_count
2323-FROM issues
2424-GROUP BY repo_at, issue_id
2525-HAVING duplicate_count > 1;
2626-2727--- 5. Verify Foreign Key Integrity for Comments
2828-SELECT
2929- 'Orphaned Comments' AS check_type,
3030- COUNT(*) AS orphaned_count
3131-FROM comments c
3232-LEFT JOIN issues i ON c.repo_at = i.repo_at AND c.issue_id = i.issue_id
3333-WHERE i.id IS NULL;
3434-3535--- 6. Check Foreign Key Constraint
3636-PRAGMA foreign_key_list(comments);
3737-3838--- 7. Sample Data Integrity Check
3939-SELECT
4040- 'Sample Issues' AS check_type,
4141- repo_at,
4242- issue_id,
4343- title,
4444- created
4545-FROM issues
4646-LIMIT 5;
4747-4848--- 8. Sample Comments Data Integrity Check
4949-SELECT
5050- 'Sample Comments' AS check_type,
5151- repo_at,
5252- issue_id,
5353- comment_id,
5454- body,
5555- created
5656-FROM comments
5757-LIMIT 5;
5858-5959--- 9. Verify Constraint on Comments (Issue ID and Comment ID Uniqueness)
6060-SELECT
6161- issue_id,
6262- comment_id,
6363- COUNT(*) as duplicate_count
6464-FROM comments
6565-GROUP BY issue_id, comment_id
6666-HAVING duplicate_count > 1;
+27-12
appview/db/pipeline.go
···99 "github.com/bluesky-social/indigo/atproto/syntax"
1010 "github.com/go-git/go-git/v5/plumbing"
1111 spindle "tangled.sh/tangled.sh/core/spindle/models"
1212+ "tangled.sh/tangled.sh/core/workflow"
1213)
13141415type Pipeline struct {
···2728}
28292930type WorkflowStatus struct {
3030- data []PipelineStatus
3131+ Data []PipelineStatus
3132}
32333334func (w WorkflowStatus) Latest() PipelineStatus {
3434- return w.data[len(w.data)-1]
3535+ return w.Data[len(w.Data)-1]
3536}
36373738// time taken by this workflow to reach an "end state"
3839func (w WorkflowStatus) TimeTaken() time.Duration {
3940 var start, end *time.Time
4040- for _, s := range w.data {
4141+ for _, s := range w.Data {
4142 if s.Status.IsStart() {
4243 start = &s.Created
4344 }
···7879 return ws
7980}
80818282+// if we know that a spindle has picked up this pipeline, then it is Responding
8383+func (p Pipeline) IsResponding() bool {
8484+ return len(p.Statuses) != 0
8585+}
8686+8187type Trigger struct {
8288 Id int
8383- Kind string
8989+ Kind workflow.TriggerKind
84908591 // push trigger fields
8692 PushRef *string
···95101}
9610297103func (t *Trigger) IsPush() bool {
9898- return t != nil && t.Kind == "push"
104104+ return t != nil && t.Kind == workflow.TriggerKindPush
99105}
100106101107func (t *Trigger) IsPullRequest() bool {
102102- return t != nil && t.Kind == "pull_request"
108108+ return t != nil && t.Kind == workflow.TriggerKindPullRequest
103109}
104110105111func (t *Trigger) TargetRef() string {
···256262 status.Status,
257263 status.Error,
258264 status.ExitCode,
265265+ status.Created.Format(time.RFC3339),
259266 }
260267261268 placeholders := make([]string, len(args))
···272279 workflow,
273280 status,
274281 error,
275275- exit_code
282282+ exit_code,
283283+ created
276284 ) values (%s)
277285 `, strings.Join(placeholders, ","))
278286···355363 return nil, err
356364 }
357365358358- // Parse created time manually
359366 p.Created, err = time.Parse(time.RFC3339, created)
360367 if err != nil {
361368 return nil, fmt.Errorf("invalid pipeline created timestamp %q: %w", created, err)
362369 }
363370364364- // Link trigger to pipeline
365371 t.Id = p.TriggerId
366372 p.Trigger = &t
367373 p.Statuses = make(map[string]WorkflowStatus)
···440446 }
441447442448 // append
443443- statuses.data = append(statuses.data, ps)
449449+ statuses.Data = append(statuses.Data, ps)
444450445451 // reassign
446452 pipeline.Statuses[ps.Workflow] = statuses
···450456 var all []Pipeline
451457 for _, p := range pipelines {
452458 for _, s := range p.Statuses {
453453- slices.SortFunc(s.data, func(a, b PipelineStatus) int {
459459+ slices.SortFunc(s.Data, func(a, b PipelineStatus) int {
454460 if a.Created.After(b.Created) {
455461 return 1
456462 }
457457- return -1
463463+ if a.Created.Before(b.Created) {
464464+ return -1
465465+ }
466466+ if a.ID > b.ID {
467467+ return 1
468468+ }
469469+ if a.ID < b.ID {
470470+ return -1
471471+ }
472472+ return 0
458473 })
459474 }
460475 all = append(all, p)
+126-5
appview/db/profile.go
···2222 ByMonth []ByMonth
2323}
24242525+func (p *ProfileTimeline) IsEmpty() bool {
2626+ if p == nil {
2727+ return true
2828+ }
2929+3030+ for _, m := range p.ByMonth {
3131+ if !m.IsEmpty() {
3232+ return false
3333+ }
3434+ }
3535+3636+ return true
3737+}
3838+2539type ByMonth struct {
2640 RepoEvents []RepoEvent
2741 IssueEvents IssueEvents
···118132 *items = append(*items, &pull)
119133 }
120134121121- issues, err := GetIssuesByOwnerDid(e, forDid, timeframe)
135135+ issues, err := GetIssues(
136136+ e,
137137+ FilterEq("did", forDid),
138138+ FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
139139+ )
122140 if err != nil {
123141 return nil, fmt.Errorf("error getting issues by owner did: %w", err)
124142 }
···137155 *items = append(*items, &issue)
138156 }
139157140140- repos, err := GetAllReposByDid(e, forDid)
158158+ repos, err := GetRepos(e, 0, FilterEq("did", forDid))
141159 if err != nil {
142160 return nil, fmt.Errorf("error getting all repos by did: %w", err)
143161 }
···348366 return tx.Commit()
349367}
350368369369+func GetProfiles(e Execer, filters ...filter) (map[string]*Profile, error) {
370370+ var conditions []string
371371+ var args []any
372372+ for _, filter := range filters {
373373+ conditions = append(conditions, filter.Condition())
374374+ args = append(args, filter.Arg()...)
375375+ }
376376+377377+ whereClause := ""
378378+ if conditions != nil {
379379+ whereClause = " where " + strings.Join(conditions, " and ")
380380+ }
381381+382382+ profilesQuery := fmt.Sprintf(
383383+ `select
384384+ id,
385385+ did,
386386+ description,
387387+ include_bluesky,
388388+ location
389389+ from
390390+ profile
391391+ %s`,
392392+ whereClause,
393393+ )
394394+ rows, err := e.Query(profilesQuery, args...)
395395+ if err != nil {
396396+ return nil, err
397397+ }
398398+399399+ profileMap := make(map[string]*Profile)
400400+ for rows.Next() {
401401+ var profile Profile
402402+ var includeBluesky int
403403+404404+ err = rows.Scan(&profile.ID, &profile.Did, &profile.Description, &includeBluesky, &profile.Location)
405405+ if err != nil {
406406+ return nil, err
407407+ }
408408+409409+ if includeBluesky != 0 {
410410+ profile.IncludeBluesky = true
411411+ }
412412+413413+ profileMap[profile.Did] = &profile
414414+ }
415415+ if err = rows.Err(); err != nil {
416416+ return nil, err
417417+ }
418418+419419+ // populate profile links
420420+ inClause := strings.TrimSuffix(strings.Repeat("?, ", len(profileMap)), ", ")
421421+ args = make([]any, len(profileMap))
422422+ i := 0
423423+ for did := range profileMap {
424424+ args[i] = did
425425+ i++
426426+ }
427427+428428+ linksQuery := fmt.Sprintf("select link, did from profile_links where did in (%s)", inClause)
429429+ rows, err = e.Query(linksQuery, args...)
430430+ if err != nil {
431431+ return nil, err
432432+ }
433433+ idxs := make(map[string]int)
434434+ for did := range profileMap {
435435+ idxs[did] = 0
436436+ }
437437+ for rows.Next() {
438438+ var link, did string
439439+ if err = rows.Scan(&link, &did); err != nil {
440440+ return nil, err
441441+ }
442442+443443+ idx := idxs[did]
444444+ profileMap[did].Links[idx] = link
445445+ idxs[did] = idx + 1
446446+ }
447447+448448+ pinsQuery := fmt.Sprintf("select at_uri, did from profile_pinned_repositories where did in (%s)", inClause)
449449+ rows, err = e.Query(pinsQuery, args...)
450450+ if err != nil {
451451+ return nil, err
452452+ }
453453+ idxs = make(map[string]int)
454454+ for did := range profileMap {
455455+ idxs[did] = 0
456456+ }
457457+ for rows.Next() {
458458+ var link syntax.ATURI
459459+ var did string
460460+ if err = rows.Scan(&link, &did); err != nil {
461461+ return nil, err
462462+ }
463463+464464+ idx := idxs[did]
465465+ profileMap[did].PinnedRepos[idx] = link
466466+ idxs[did] = idx + 1
467467+ }
468468+469469+ return profileMap, nil
470470+}
471471+351472func GetProfile(e Execer, did string) (*Profile, error) {
352473 var profile Profile
353474 profile.Did = did
···432553 query = `select count(id) from pulls where owner_did = ? and state = ?`
433554 args = append(args, did, PullOpen)
434555 case VanityStatOpenIssueCount:
435435- query = `select count(id) from issues where owner_did = ? and open = 1`
556556+ query = `select count(id) from issues where did = ? and open = 1`
436557 args = append(args, did)
437558 case VanityStatClosedIssueCount:
438438- query = `select count(id) from issues where owner_did = ? and open = 0`
559559+ query = `select count(id) from issues where did = ? and open = 0`
439560 args = append(args, did)
440561 case VanityStatRepositoryCount:
441562 query = `select count(id) from repos where did = ?`
···469590 }
470591471592 // ensure all pinned repos are either own repos or collaborating repos
472472- repos, err := GetAllReposByDid(e, profile.Did)
593593+ repos, err := GetRepos(e, 0, FilterEq("did", profile.Did))
473594 if err != nil {
474595 log.Printf("getting repos for %s: %s", profile.Did, err)
475596 }
···1313 FormatMarkdown: []string{".md", ".markdown", ".mdown", ".mkdn", ".mkd"},
1414}
15151616+// ReadmeFilenames contains the list of common README filenames to search for,
1717+// in order of preference. Only includes well-supported formats.
1818+var ReadmeFilenames = []string{
1919+ "README.md", "readme.md",
2020+ "README",
2121+ "readme",
2222+ "README.markdown",
2323+ "readme.markdown",
2424+ "README.txt",
2525+ "readme.txt",
2626+}
2727+1628func GetFormat(filename string) Format {
1729 for format, extensions := range FileTypes {
1830 for _, extension := range extensions {
···55 "encoding/json"
66 "errors"
77 "fmt"
88- "io"
98 "log"
109 "net/http"
1110 "sort"
···1413 "time"
15141615 "tangled.sh/tangled.sh/core/api/tangled"
1717- "tangled.sh/tangled.sh/core/appview"
1816 "tangled.sh/tangled.sh/core/appview/config"
1917 "tangled.sh/tangled.sh/core/appview/db"
2020- "tangled.sh/tangled.sh/core/appview/idresolver"
1818+ "tangled.sh/tangled.sh/core/appview/notify"
2119 "tangled.sh/tangled.sh/core/appview/oauth"
2220 "tangled.sh/tangled.sh/core/appview/pages"
2121+ "tangled.sh/tangled.sh/core/appview/pages/markup"
2322 "tangled.sh/tangled.sh/core/appview/reporesolver"
2424- "tangled.sh/tangled.sh/core/knotclient"
2323+ "tangled.sh/tangled.sh/core/appview/xrpcclient"
2424+ "tangled.sh/tangled.sh/core/idresolver"
2525 "tangled.sh/tangled.sh/core/patchutil"
2626+ "tangled.sh/tangled.sh/core/tid"
2627 "tangled.sh/tangled.sh/core/types"
27282829 "github.com/bluekeyes/go-gitdiff/gitdiff"
2930 comatproto "github.com/bluesky-social/indigo/api/atproto"
3030- "github.com/bluesky-social/indigo/atproto/syntax"
3131 lexutil "github.com/bluesky-social/indigo/lex/util"
3232+ indigoxrpc "github.com/bluesky-social/indigo/xrpc"
3233 "github.com/go-chi/chi/v5"
3334 "github.com/google/uuid"
3434- "github.com/posthog/posthog-go"
3535)
36363737type Pulls struct {
···4141 idResolver *idresolver.Resolver
4242 db *db.DB
4343 config *config.Config
4444- posthog posthog.Client
4444+ notifier notify.Notifier
4545}
46464747func New(
···5151 resolver *idresolver.Resolver,
5252 db *db.DB,
5353 config *config.Config,
5454- posthog posthog.Client,
5454+ notifier notify.Notifier,
5555) *Pulls {
5656 return &Pulls{
5757 oauth: oauth,
···6060 idResolver: resolver,
6161 db: db,
6262 config: config,
6363- posthog: posthog,
6363+ notifier: notifier,
6464 }
6565}
6666···9696 return
9797 }
98989999- mergeCheckResponse := s.mergeCheck(f, pull, stack)
9999+ mergeCheckResponse := s.mergeCheck(r, f, pull, stack)
100100 resubmitResult := pages.Unknown
101101 if user.Did == pull.OwnerDid {
102102- resubmitResult = s.resubmitCheck(f, pull, stack)
102102+ resubmitResult = s.resubmitCheck(r, f, pull, stack)
103103 }
104104105105 s.pages.PullActionsFragment(w, pages.PullActionsParams{
···151151 }
152152 }
153153154154- resolvedIds := s.idResolver.ResolveIdents(r.Context(), identsToResolve)
155155- didHandleMap := make(map[string]string)
156156- for _, identity := range resolvedIds {
157157- if !identity.Handle.IsInvalidHandle() {
158158- didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
159159- } else {
160160- didHandleMap[identity.DID.String()] = identity.DID.String()
161161- }
162162- }
163163-164164- mergeCheckResponse := s.mergeCheck(f, pull, stack)
154154+ mergeCheckResponse := s.mergeCheck(r, f, pull, stack)
165155 resubmitResult := pages.Unknown
166156 if user != nil && user.Did == pull.OwnerDid {
167167- resubmitResult = s.resubmitCheck(f, pull, stack)
157157+ resubmitResult = s.resubmitCheck(r, f, pull, stack)
158158+ }
159159+160160+ repoInfo := f.RepoInfo(user)
161161+162162+ m := make(map[string]db.Pipeline)
163163+164164+ var shas []string
165165+ for _, s := range pull.Submissions {
166166+ shas = append(shas, s.SourceRev)
167167+ }
168168+ for _, p := range stack {
169169+ shas = append(shas, p.LatestSha())
170170+ }
171171+ for _, p := range abandonedPulls {
172172+ shas = append(shas, p.LatestSha())
173173+ }
174174+175175+ ps, err := db.GetPipelineStatuses(
176176+ s.db,
177177+ db.FilterEq("repo_owner", repoInfo.OwnerDid),
178178+ db.FilterEq("repo_name", repoInfo.Name),
179179+ db.FilterEq("knot", repoInfo.Knot),
180180+ db.FilterIn("sha", shas),
181181+ )
182182+ if err != nil {
183183+ log.Printf("failed to fetch pipeline statuses: %s", err)
184184+ // non-fatal
185185+ }
186186+187187+ for _, p := range ps {
188188+ m[p.Sha] = p
189189+ }
190190+191191+ reactionCountMap, err := db.GetReactionCountMap(s.db, pull.PullAt())
192192+ if err != nil {
193193+ log.Println("failed to get pull reactions")
194194+ s.pages.Notice(w, "pulls", "Failed to load pull. Try again later.")
195195+ }
196196+197197+ userReactions := map[db.ReactionKind]bool{}
198198+ if user != nil {
199199+ userReactions = db.GetReactionStatusMap(s.db, user.Did, pull.PullAt())
168200 }
169201170202 s.pages.RepoSinglePull(w, pages.RepoSinglePullParams{
171203 LoggedInUser: user,
172172- RepoInfo: f.RepoInfo(user),
173173- DidHandleMap: didHandleMap,
204204+ RepoInfo: repoInfo,
174205 Pull: pull,
175206 Stack: stack,
176207 AbandonedPulls: abandonedPulls,
177208 MergeCheck: mergeCheckResponse,
178209 ResubmitCheck: resubmitResult,
210210+ Pipelines: m,
211211+212212+ OrderedReactionKinds: db.OrderedReactionKinds,
213213+ Reactions: reactionCountMap,
214214+ UserReacted: userReactions,
179215 })
180216}
181217182182-func (s *Pulls) mergeCheck(f *reporesolver.ResolvedRepo, pull *db.Pull, stack db.Stack) types.MergeCheckResponse {
218218+func (s *Pulls) mergeCheck(r *http.Request, f *reporesolver.ResolvedRepo, pull *db.Pull, stack db.Stack) types.MergeCheckResponse {
183219 if pull.State == db.PullMerged {
184220 return types.MergeCheckResponse{}
185221 }
186222187187- secret, err := db.GetRegistrationKey(s.db, f.Knot)
188188- if err != nil {
189189- log.Printf("failed to get registration key: %v", err)
190190- return types.MergeCheckResponse{
191191- Error: "failed to check merge status: this knot is unregistered",
192192- }
223223+ scheme := "https"
224224+ if s.config.Core.Dev {
225225+ scheme = "http"
193226 }
227227+ host := fmt.Sprintf("%s://%s", scheme, f.Knot)
194228195195- ksClient, err := knotclient.NewSignedClient(f.Knot, secret, s.config.Core.Dev)
196196- if err != nil {
197197- log.Printf("failed to setup signed client for %s; ignoring: %v", f.Knot, err)
198198- return types.MergeCheckResponse{
199199- Error: "failed to check merge status",
200200- }
229229+ xrpcc := indigoxrpc.Client{
230230+ Host: host,
201231 }
202232203233 patch := pull.LatestPatch()
···210240 patch = mergeable.CombinedPatch()
211241 }
212242213213- resp, err := ksClient.MergeCheck([]byte(patch), f.OwnerDid(), f.RepoName, pull.TargetBranch)
214214- if err != nil {
215215- log.Println("failed to check for mergeability:", err)
243243+ resp, xe := tangled.RepoMergeCheck(
244244+ r.Context(),
245245+ &xrpcc,
246246+ &tangled.RepoMergeCheck_Input{
247247+ Did: f.OwnerDid(),
248248+ Name: f.Name,
249249+ Branch: pull.TargetBranch,
250250+ Patch: patch,
251251+ },
252252+ )
253253+ if err := xrpcclient.HandleXrpcErr(xe); err != nil {
254254+ log.Println("failed to check for mergeability", "err", err)
216255 return types.MergeCheckResponse{
217217- Error: "failed to check merge status",
256256+ Error: fmt.Sprintf("failed to check merge status: %s", err.Error()),
218257 }
219258 }
220220- switch resp.StatusCode {
221221- case 404:
222222- return types.MergeCheckResponse{
223223- Error: "failed to check merge status: this knot does not support PRs",
224224- }
225225- case 400:
226226- return types.MergeCheckResponse{
227227- Error: "failed to check merge status: does this knot support PRs?",
259259+260260+ // convert xrpc response to internal types
261261+ conflicts := make([]types.ConflictInfo, len(resp.Conflicts))
262262+ for i, conflict := range resp.Conflicts {
263263+ conflicts[i] = types.ConflictInfo{
264264+ Filename: conflict.Filename,
265265+ Reason: conflict.Reason,
228266 }
229267 }
230268231231- respBody, err := io.ReadAll(resp.Body)
232232- if err != nil {
233233- log.Println("failed to read merge check response body")
234234- return types.MergeCheckResponse{
235235- Error: "failed to check merge status: knot is not speaking the right language",
236236- }
269269+ result := types.MergeCheckResponse{
270270+ IsConflicted: resp.Is_conflicted,
271271+ Conflicts: conflicts,
272272+ }
273273+274274+ if resp.Message != nil {
275275+ result.Message = *resp.Message
237276 }
238238- defer resp.Body.Close()
239277240240- var mergeCheckResponse types.MergeCheckResponse
241241- err = json.Unmarshal(respBody, &mergeCheckResponse)
242242- if err != nil {
243243- log.Println("failed to unmarshal merge check response", err)
244244- return types.MergeCheckResponse{
245245- Error: "failed to check merge status: knot is not speaking the right language",
246246- }
278278+ if resp.Error != nil {
279279+ result.Error = *resp.Error
247280 }
248281249249- return mergeCheckResponse
282282+ return result
250283}
251284252252-func (s *Pulls) resubmitCheck(f *reporesolver.ResolvedRepo, pull *db.Pull, stack db.Stack) pages.ResubmitResult {
285285+func (s *Pulls) resubmitCheck(r *http.Request, f *reporesolver.ResolvedRepo, pull *db.Pull, stack db.Stack) pages.ResubmitResult {
253286 if pull.State == db.PullMerged || pull.State == db.PullDeleted || pull.PullSource == nil {
254287 return pages.Unknown
255288 }
···271304 // pulls within the same repo
272305 knot = f.Knot
273306 ownerDid = f.OwnerDid()
274274- repoName = f.RepoName
307307+ repoName = f.Name
275308 }
276309277277- us, err := knotclient.NewUnsignedClient(knot, s.config.Core.Dev)
278278- if err != nil {
279279- log.Printf("failed to setup client for %s; ignoring: %v", knot, err)
280280- return pages.Unknown
310310+ scheme := "http"
311311+ if !s.config.Core.Dev {
312312+ scheme = "https"
313313+ }
314314+ host := fmt.Sprintf("%s://%s", scheme, knot)
315315+ xrpcc := &indigoxrpc.Client{
316316+ Host: host,
281317 }
282318283283- result, err := us.Branch(ownerDid, repoName, pull.PullSource.Branch)
319319+ repo := fmt.Sprintf("%s/%s", ownerDid, repoName)
320320+ branchResp, err := tangled.RepoBranch(r.Context(), xrpcc, pull.PullSource.Branch, repo)
284321 if err != nil {
322322+ if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
323323+ log.Println("failed to call XRPC repo.branches", xrpcerr)
324324+ return pages.Unknown
325325+ }
285326 log.Println("failed to reach knotserver", err)
286327 return pages.Unknown
287328 }
288329330330+ targetBranch := branchResp
331331+289332 latestSourceRev := pull.Submissions[pull.LastRoundNumber()].SourceRev
290333291334 if pull.IsStacked() && stack != nil {
···293336 latestSourceRev = top.Submissions[top.LastRoundNumber()].SourceRev
294337 }
295338296296- if latestSourceRev != result.Branch.Hash {
339339+ if latestSourceRev != targetBranch.Hash {
297340 return pages.ShouldResubmit
298341 }
299342···308351 return
309352 }
310353354354+ var diffOpts types.DiffOpts
355355+ if d := r.URL.Query().Get("diff"); d == "split" {
356356+ diffOpts.Split = true
357357+ }
358358+311359 pull, ok := r.Context().Value("pull").(*db.Pull)
312360 if !ok {
313361 log.Println("failed to get pull")
···325373 return
326374 }
327375328328- identsToResolve := []string{pull.OwnerDid}
329329- resolvedIds := s.idResolver.ResolveIdents(r.Context(), identsToResolve)
330330- didHandleMap := make(map[string]string)
331331- for _, identity := range resolvedIds {
332332- if !identity.Handle.IsInvalidHandle() {
333333- didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
334334- } else {
335335- didHandleMap[identity.DID.String()] = identity.DID.String()
336336- }
337337- }
338338-339376 patch := pull.Submissions[roundIdInt].Patch
340377 diff := patchutil.AsNiceDiff(patch, pull.TargetBranch)
341378342379 s.pages.RepoPullPatchPage(w, pages.RepoPullPatchParams{
343380 LoggedInUser: user,
344344- DidHandleMap: didHandleMap,
345381 RepoInfo: f.RepoInfo(user),
346382 Pull: pull,
347383 Stack: stack,
348384 Round: roundIdInt,
349385 Submission: pull.Submissions[roundIdInt],
350386 Diff: &diff,
387387+ DiffOpts: diffOpts,
351388 })
352389353390}
···361398 return
362399 }
363400401401+ var diffOpts types.DiffOpts
402402+ if d := r.URL.Query().Get("diff"); d == "split" {
403403+ diffOpts.Split = true
404404+ }
405405+364406 pull, ok := r.Context().Value("pull").(*db.Pull)
365407 if !ok {
366408 log.Println("failed to get pull")
···382424 return
383425 }
384426385385- identsToResolve := []string{pull.OwnerDid}
386386- resolvedIds := s.idResolver.ResolveIdents(r.Context(), identsToResolve)
387387- didHandleMap := make(map[string]string)
388388- for _, identity := range resolvedIds {
389389- if !identity.Handle.IsInvalidHandle() {
390390- didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
391391- } else {
392392- didHandleMap[identity.DID.String()] = identity.DID.String()
393393- }
394394- }
395395-396427 currentPatch, err := patchutil.AsDiff(pull.Submissions[roundIdInt].Patch)
397428 if err != nil {
398429 log.Println("failed to interdiff; current patch malformed")
···414445 RepoInfo: f.RepoInfo(user),
415446 Pull: pull,
416447 Round: roundIdInt,
417417- DidHandleMap: didHandleMap,
418448 Interdiff: interdiff,
449449+ DiffOpts: diffOpts,
419450 })
420420- return
421451}
422452423453func (s *Pulls) RepoPullPatchRaw(w http.ResponseWriter, r *http.Request) {
···436466 return
437467 }
438468439439- identsToResolve := []string{pull.OwnerDid}
440440- resolvedIds := s.idResolver.ResolveIdents(r.Context(), identsToResolve)
441441- didHandleMap := make(map[string]string)
442442- for _, identity := range resolvedIds {
443443- if !identity.Handle.IsInvalidHandle() {
444444- didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
445445- } else {
446446- didHandleMap[identity.DID.String()] = identity.DID.String()
447447- }
448448- }
449449-450450- w.Header().Set("Content-Type", "text/plain")
469469+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
451470 w.Write([]byte(pull.Submissions[roundIdInt].Patch))
452471}
453472···471490472491 pulls, err := db.GetPulls(
473492 s.db,
474474- db.FilterEq("repo_at", f.RepoAt),
493493+ db.FilterEq("repo_at", f.RepoAt()),
475494 db.FilterEq("state", state),
476495 )
477496 if err != nil {
···497516498517 // we want to group all stacked PRs into just one list
499518 stacks := make(map[string]db.Stack)
519519+ var shas []string
500520 n := 0
501521 for _, p := range pulls {
522522+ // store the sha for later
523523+ shas = append(shas, p.LatestSha())
502524 // this PR is stacked
503525 if p.StackId != "" {
504526 // we have already seen this PR stack
···517539 }
518540 pulls = pulls[:n]
519541520520- identsToResolve := make([]string, len(pulls))
521521- for i, pull := range pulls {
522522- identsToResolve[i] = pull.OwnerDid
542542+ repoInfo := f.RepoInfo(user)
543543+ ps, err := db.GetPipelineStatuses(
544544+ s.db,
545545+ db.FilterEq("repo_owner", repoInfo.OwnerDid),
546546+ db.FilterEq("repo_name", repoInfo.Name),
547547+ db.FilterEq("knot", repoInfo.Knot),
548548+ db.FilterIn("sha", shas),
549549+ )
550550+ if err != nil {
551551+ log.Printf("failed to fetch pipeline statuses: %s", err)
552552+ // non-fatal
523553 }
524524- resolvedIds := s.idResolver.ResolveIdents(r.Context(), identsToResolve)
525525- didHandleMap := make(map[string]string)
526526- for _, identity := range resolvedIds {
527527- if !identity.Handle.IsInvalidHandle() {
528528- didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
529529- } else {
530530- didHandleMap[identity.DID.String()] = identity.DID.String()
531531- }
554554+ m := make(map[string]db.Pipeline)
555555+ for _, p := range ps {
556556+ m[p.Sha] = p
532557 }
533558534559 s.pages.RepoPulls(w, pages.RepoPullsParams{
535560 LoggedInUser: s.oauth.GetUser(r),
536561 RepoInfo: f.RepoInfo(user),
537562 Pulls: pulls,
538538- DidHandleMap: didHandleMap,
539563 FilteringBy: state,
540564 Stacks: stacks,
565565+ Pipelines: m,
541566 })
542542- return
543567}
544568545569func (s *Pulls) PullComment(w http.ResponseWriter, r *http.Request) {
···591615 defer tx.Rollback()
592616593617 createdAt := time.Now().Format(time.RFC3339)
594594- ownerDid := user.Did
595618596596- pullAt, err := db.GetPullAt(s.db, f.RepoAt, pull.PullId)
619619+ pullAt, err := db.GetPullAt(s.db, f.RepoAt(), pull.PullId)
597620 if err != nil {
598621 log.Println("failed to get pull at", err)
599622 s.pages.Notice(w, "pull-comment", "Failed to create comment.")
600623 return
601624 }
602625603603- atUri := f.RepoAt.String()
604626 client, err := s.oauth.AuthorizedClient(r)
605627 if err != nil {
606628 log.Println("failed to get authorized client", err)
···610632 atResp, err := client.RepoPutRecord(r.Context(), &comatproto.RepoPutRecord_Input{
611633 Collection: tangled.RepoPullCommentNSID,
612634 Repo: user.Did,
613613- Rkey: appview.TID(),
635635+ Rkey: tid.TID(),
614636 Record: &lexutil.LexiconTypeDecoder{
615637 Val: &tangled.RepoPullComment{
616616- Repo: &atUri,
617638 Pull: string(pullAt),
618618- Owner: &ownerDid,
619639 Body: body,
620640 CreatedAt: createdAt,
621641 },
···627647 return
628648 }
629649630630- // Create the pull comment in the database with the commentAt field
631631- commentId, err := db.NewPullComment(tx, &db.PullComment{
650650+ comment := &db.PullComment{
632651 OwnerDid: user.Did,
633633- RepoAt: f.RepoAt.String(),
652652+ RepoAt: f.RepoAt().String(),
634653 PullId: pull.PullId,
635654 Body: body,
636655 CommentAt: atResp.Uri,
637656 SubmissionId: pull.Submissions[roundNumber].ID,
638638- })
657657+ }
658658+659659+ // Create the pull comment in the database with the commentAt field
660660+ commentId, err := db.NewPullComment(tx, comment)
639661 if err != nil {
640662 log.Println("failed to create pull comment", err)
641663 s.pages.Notice(w, "pull-comment", "Failed to create comment.")
···649671 return
650672 }
651673652652- if !s.config.Core.Dev {
653653- err = s.posthog.Enqueue(posthog.Capture{
654654- DistinctId: user.Did,
655655- Event: "new_pull_comment",
656656- Properties: posthog.Properties{"repo_at": f.RepoAt.String(), "pull_id": pull.PullId},
657657- })
658658- if err != nil {
659659- log.Println("failed to enqueue posthog event:", err)
660660- }
661661- }
674674+ s.notifier.NewPullComment(r.Context(), comment)
662675663676 s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", f.OwnerSlashRepo(), pull.PullId, commentId))
664677 return
···675688676689 switch r.Method {
677690 case http.MethodGet:
678678- us, err := knotclient.NewUnsignedClient(f.Knot, s.config.Core.Dev)
691691+ scheme := "http"
692692+ if !s.config.Core.Dev {
693693+ scheme = "https"
694694+ }
695695+ host := fmt.Sprintf("%s://%s", scheme, f.Knot)
696696+ xrpcc := &indigoxrpc.Client{
697697+ Host: host,
698698+ }
699699+700700+ repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
701701+ xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
679702 if err != nil {
680680- log.Printf("failed to create unsigned client for %s", f.Knot)
681681- s.pages.Error503(w)
703703+ if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
704704+ log.Println("failed to call XRPC repo.branches", xrpcerr)
705705+ s.pages.Error503(w)
706706+ return
707707+ }
708708+ log.Println("failed to fetch branches", err)
682709 return
683710 }
684711685685- result, err := us.Branches(f.OwnerDid(), f.RepoName)
686686- if err != nil {
687687- log.Println("failed to fetch branches", err)
712712+ var result types.RepoBranchesResponse
713713+ if err := json.Unmarshal(xrpcBytes, &result); err != nil {
714714+ log.Println("failed to decode XRPC response", err)
715715+ s.pages.Error503(w)
688716 return
689717 }
690718···730758 s.pages.Notice(w, "pull", "Title is required for git-diff patches.")
731759 return
732760 }
761761+ sanitizer := markup.NewSanitizer()
762762+ if st := strings.TrimSpace(sanitizer.SanitizeDescription(title)); (st) == "" {
763763+ s.pages.Notice(w, "pull", "Title is empty after HTML sanitization")
764764+ return
765765+ }
733766 }
734767735768 // Validate we have at least one valid PR creation method
···744777 return
745778 }
746779747747- us, err := knotclient.NewUnsignedClient(f.Knot, s.config.Core.Dev)
748748- if err != nil {
749749- log.Printf("failed to create unsigned client to %s: %v", f.Knot, err)
750750- s.pages.Notice(w, "pull", "Failed to create a pull request. Try again later.")
751751- return
780780+ // us, err := knotclient.NewUnsignedClient(f.Knot, s.config.Core.Dev)
781781+ // if err != nil {
782782+ // log.Printf("failed to create unsigned client to %s: %v", f.Knot, err)
783783+ // s.pages.Notice(w, "pull", "Failed to create a pull request. Try again later.")
784784+ // return
785785+ // }
786786+787787+ // TODO: make capabilities an xrpc call
788788+ caps := struct {
789789+ PullRequests struct {
790790+ FormatPatch bool
791791+ BranchSubmissions bool
792792+ ForkSubmissions bool
793793+ PatchSubmissions bool
794794+ }
795795+ }{
796796+ PullRequests: struct {
797797+ FormatPatch bool
798798+ BranchSubmissions bool
799799+ ForkSubmissions bool
800800+ PatchSubmissions bool
801801+ }{
802802+ FormatPatch: true,
803803+ BranchSubmissions: true,
804804+ ForkSubmissions: true,
805805+ PatchSubmissions: true,
806806+ },
752807 }
753808754754- caps, err := us.Capabilities()
755755- if err != nil {
756756- log.Println("error fetching knot caps", f.Knot, err)
757757- s.pages.Notice(w, "pull", "Failed to create a pull request. Try again later.")
758758- return
759759- }
809809+ // caps, err := us.Capabilities()
810810+ // if err != nil {
811811+ // log.Println("error fetching knot caps", f.Knot, err)
812812+ // s.pages.Notice(w, "pull", "Failed to create a pull request. Try again later.")
813813+ // return
814814+ // }
760815761816 if !caps.PullRequests.FormatPatch {
762817 s.pages.Notice(w, "pull", "This knot doesn't support format-patch. Unfortunately, there is no fallback for now.")
···798853 sourceBranch string,
799854 isStacked bool,
800855) {
801801- pullSource := &db.PullSource{
802802- Branch: sourceBranch,
856856+ scheme := "http"
857857+ if !s.config.Core.Dev {
858858+ scheme = "https"
803859 }
804804- recordPullSource := &tangled.RepoPull_Source{
805805- Branch: sourceBranch,
860860+ host := fmt.Sprintf("%s://%s", scheme, f.Knot)
861861+ xrpcc := &indigoxrpc.Client{
862862+ Host: host,
806863 }
807864808808- // Generate a patch using /compare
809809- ksClient, err := knotclient.NewUnsignedClient(f.Knot, s.config.Core.Dev)
865865+ repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
866866+ xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, repo, targetBranch, sourceBranch)
810867 if err != nil {
811811- log.Printf("failed to create signed client for %s: %s", f.Knot, err)
812812- s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
868868+ if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
869869+ log.Println("failed to call XRPC repo.compare", xrpcerr)
870870+ s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
871871+ return
872872+ }
873873+ log.Println("failed to compare", err)
874874+ s.pages.Notice(w, "pull", err.Error())
813875 return
814876 }
815877816816- comparison, err := ksClient.Compare(f.OwnerDid(), f.RepoName, targetBranch, sourceBranch)
817817- if err != nil {
818818- log.Println("failed to compare", err)
819819- s.pages.Notice(w, "pull", err.Error())
878878+ var comparison types.RepoFormatPatchResponse
879879+ if err := json.Unmarshal(xrpcBytes, &comparison); err != nil {
880880+ log.Println("failed to decode XRPC compare response", err)
881881+ s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
820882 return
821883 }
822884···828890 return
829891 }
830892893893+ pullSource := &db.PullSource{
894894+ Branch: sourceBranch,
895895+ }
896896+ recordPullSource := &tangled.RepoPull_Source{
897897+ Branch: sourceBranch,
898898+ Sha: comparison.Rev2,
899899+ }
900900+831901 s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, sourceRev, pullSource, recordPullSource, isStacked)
832902}
833903···841911}
842912843913func (s *Pulls) handleForkBasedPull(w http.ResponseWriter, r *http.Request, f *reporesolver.ResolvedRepo, user *oauth.User, forkRepo string, title, body, targetBranch, sourceBranch string, isStacked bool) {
844844- fork, err := db.GetForkByDid(s.db, user.Did, forkRepo)
914914+ repoString := strings.SplitN(forkRepo, "/", 2)
915915+ forkOwnerDid := repoString[0]
916916+ repoName := repoString[1]
917917+ fork, err := db.GetForkByDid(s.db, forkOwnerDid, repoName)
845918 if errors.Is(err, sql.ErrNoRows) {
846919 s.pages.Notice(w, "pull", "No such fork.")
847920 return
···851924 return
852925 }
853926854854- secret, err := db.GetRegistrationKey(s.db, fork.Knot)
855855- if err != nil {
856856- log.Println("failed to fetch registration key:", err)
857857- s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
858858- return
859859- }
860860-861861- sc, err := knotclient.NewSignedClient(fork.Knot, secret, s.config.Core.Dev)
862862- if err != nil {
863863- log.Println("failed to create signed client:", err)
864864- s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
865865- return
866866- }
867867-868868- us, err := knotclient.NewUnsignedClient(fork.Knot, s.config.Core.Dev)
869869- if err != nil {
870870- log.Println("failed to create unsigned client:", err)
871871- s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
872872- return
873873- }
927927+ client, err := s.oauth.ServiceClient(
928928+ r,
929929+ oauth.WithService(fork.Knot),
930930+ oauth.WithLxm(tangled.RepoHiddenRefNSID),
931931+ oauth.WithDev(s.config.Core.Dev),
932932+ )
874933875875- resp, err := sc.NewHiddenRef(user.Did, fork.Name, sourceBranch, targetBranch)
876876- if err != nil {
877877- log.Println("failed to create hidden ref:", err, resp.StatusCode)
878878- s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
934934+ resp, err := tangled.RepoHiddenRef(
935935+ r.Context(),
936936+ client,
937937+ &tangled.RepoHiddenRef_Input{
938938+ ForkRef: sourceBranch,
939939+ RemoteRef: targetBranch,
940940+ Repo: fork.RepoAt().String(),
941941+ },
942942+ )
943943+ if err := xrpcclient.HandleXrpcErr(err); err != nil {
944944+ s.pages.Notice(w, "pull", err.Error())
879945 return
880946 }
881947882882- switch resp.StatusCode {
883883- case 404:
884884- case 400:
885885- s.pages.Notice(w, "pull", "Branch based pull requests are not supported on this knot.")
948948+ if !resp.Success {
949949+ errorMsg := "Failed to create pull request"
950950+ if resp.Error != nil {
951951+ errorMsg = fmt.Sprintf("Failed to create pull request: %s", *resp.Error)
952952+ }
953953+ s.pages.Notice(w, "pull", errorMsg)
886954 return
887955 }
888956···892960 // hiddenRef: hidden/feature-1/main (on repo-fork)
893961 // targetBranch: main (on repo-1)
894962 // sourceBranch: feature-1 (on repo-fork)
895895- comparison, err := us.Compare(user.Did, fork.Name, hiddenRef, sourceBranch)
963963+ forkScheme := "http"
964964+ if !s.config.Core.Dev {
965965+ forkScheme = "https"
966966+ }
967967+ forkHost := fmt.Sprintf("%s://%s", forkScheme, fork.Knot)
968968+ forkXrpcc := &indigoxrpc.Client{
969969+ Host: forkHost,
970970+ }
971971+972972+ forkRepoId := fmt.Sprintf("%s/%s", fork.Did, fork.Name)
973973+ forkXrpcBytes, err := tangled.RepoCompare(r.Context(), forkXrpcc, forkRepoId, hiddenRef, sourceBranch)
896974 if err != nil {
975975+ if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
976976+ log.Println("failed to call XRPC repo.compare for fork", xrpcerr)
977977+ s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
978978+ return
979979+ }
897980 log.Println("failed to compare across branches", err)
898981 s.pages.Notice(w, "pull", err.Error())
899982 return
900983 }
901984985985+ var comparison types.RepoFormatPatchResponse
986986+ if err := json.Unmarshal(forkXrpcBytes, &comparison); err != nil {
987987+ log.Println("failed to decode XRPC compare response for fork", err)
988988+ s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
989989+ return
990990+ }
991991+902992 sourceRev := comparison.Rev2
903993 patch := comparison.Patch
904994···907997 return
908998 }
909999910910- forkAtUri, err := syntax.ParseATURI(fork.AtUri)
911911- if err != nil {
912912- log.Println("failed to parse fork AT URI", err)
913913- s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
914914- return
915915- }
10001000+ forkAtUri := fork.RepoAt()
10011001+ forkAtUriStr := forkAtUri.String()
9161002917917- s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, sourceRev, &db.PullSource{
10031003+ pullSource := &db.PullSource{
9181004 Branch: sourceBranch,
9191005 RepoAt: &forkAtUri,
920920- }, &tangled.RepoPull_Source{Branch: sourceBranch, Repo: &fork.AtUri}, isStacked)
10061006+ }
10071007+ recordPullSource := &tangled.RepoPull_Source{
10081008+ Branch: sourceBranch,
10091009+ Repo: &forkAtUriStr,
10101010+ Sha: sourceRev,
10111011+ }
10121012+10131013+ s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, sourceRev, pullSource, recordPullSource, isStacked)
9211014}
92210159231016func (s *Pulls) createPullRequest(
···9341027) {
9351028 if isStacked {
9361029 // creates a series of PRs, each linking to the previous, identified by jj's change-id
937937- s.createStackedPulLRequest(
10301030+ s.createStackedPullRequest(
9381031 w,
9391032 r,
9401033 f,
···9791072 body = formatPatches[0].Body
9801073 }
9811074982982- rkey := appview.TID()
10751075+ rkey := tid.TID()
9831076 initialSubmission := db.PullSubmission{
9841077 Patch: patch,
9851078 SourceRev: sourceRev,
9861079 }
987987- err = db.NewPull(tx, &db.Pull{
10801080+ pull := &db.Pull{
9881081 Title: title,
9891082 Body: body,
9901083 TargetBranch: targetBranch,
9911084 OwnerDid: user.Did,
992992- RepoAt: f.RepoAt,
10851085+ RepoAt: f.RepoAt(),
9931086 Rkey: rkey,
9941087 Submissions: []*db.PullSubmission{
9951088 &initialSubmission,
9961089 },
9971090 PullSource: pullSource,
998998- })
10911091+ }
10921092+ err = db.NewPull(tx, pull)
9991093 if err != nil {
10001094 log.Println("failed to create pull request", err)
10011095 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
10021096 return
10031097 }
10041004- pullId, err := db.NextPullId(tx, f.RepoAt)
10981098+ pullId, err := db.NextPullId(tx, f.RepoAt())
10051099 if err != nil {
10061100 log.Println("failed to get pull id", err)
10071101 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
···10141108 Rkey: rkey,
10151109 Record: &lexutil.LexiconTypeDecoder{
10161110 Val: &tangled.RepoPull{
10171017- Title: title,
10181018- PullId: int64(pullId),
10191019- TargetRepo: string(f.RepoAt),
10201020- TargetBranch: targetBranch,
10211021- Patch: patch,
10221022- Source: recordPullSource,
11111111+ Title: title,
11121112+ Target: &tangled.RepoPull_Target{
11131113+ Repo: string(f.RepoAt()),
11141114+ Branch: targetBranch,
11151115+ },
11161116+ Patch: patch,
11171117+ Source: recordPullSource,
10231118 },
10241119 },
10251120 })
···10351130 return
10361131 }
1037113210381038- if !s.config.Core.Dev {
10391039- err = s.posthog.Enqueue(posthog.Capture{
10401040- DistinctId: user.Did,
10411041- Event: "new_pull",
10421042- Properties: posthog.Properties{"repo_at": f.RepoAt.String(), "pull_id": pullId},
10431043- })
10441044- if err != nil {
10451045- log.Println("failed to enqueue posthog event:", err)
10461046- }
10471047- }
11331133+ s.notifier.NewPull(r.Context(), pull)
1048113410491135 s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pullId))
10501136}
1051113710521052-func (s *Pulls) createStackedPulLRequest(
11381138+func (s *Pulls) createStackedPullRequest(
10531139 w http.ResponseWriter,
10541140 r *http.Request,
10551141 f *reporesolver.ResolvedRepo,
···11961282 return
11971283 }
1198128411991199- us, err := knotclient.NewUnsignedClient(f.Knot, s.config.Core.Dev)
12851285+ scheme := "http"
12861286+ if !s.config.Core.Dev {
12871287+ scheme = "https"
12881288+ }
12891289+ host := fmt.Sprintf("%s://%s", scheme, f.Knot)
12901290+ xrpcc := &indigoxrpc.Client{
12911291+ Host: host,
12921292+ }
12931293+12941294+ repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
12951295+ xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
12001296 if err != nil {
12011201- log.Printf("failed to create unsigned client for %s", f.Knot)
12021202- s.pages.Error503(w)
12971297+ if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
12981298+ log.Println("failed to call XRPC repo.branches", xrpcerr)
12991299+ s.pages.Error503(w)
13001300+ return
13011301+ }
13021302+ log.Println("failed to fetch branches", err)
12031303 return
12041304 }
1205130512061206- result, err := us.Branches(f.OwnerDid(), f.RepoName)
12071207- if err != nil {
12081208- log.Println("failed to reach knotserver", err)
13061306+ var result types.RepoBranchesResponse
13071307+ if err := json.Unmarshal(xrpcBytes, &result); err != nil {
13081308+ log.Println("failed to decode XRPC response", err)
13091309+ s.pages.Error503(w)
12091310 return
12101311 }
12111312···12591360 }
1260136112611362 forkVal := r.URL.Query().Get("fork")
12621262-13631363+ repoString := strings.SplitN(forkVal, "/", 2)
13641364+ forkOwnerDid := repoString[0]
13651365+ forkName := repoString[1]
12631366 // fork repo
12641264- repo, err := db.GetRepo(s.db, user.Did, forkVal)
13671367+ repo, err := db.GetRepo(s.db, forkOwnerDid, forkName)
12651368 if err != nil {
12661369 log.Println("failed to get repo", user.Did, forkVal)
12671370 return
12681371 }
1269137212701270- sourceBranchesClient, err := knotclient.NewUnsignedClient(repo.Knot, s.config.Core.Dev)
12711271- if err != nil {
12721272- log.Printf("failed to create unsigned client for %s", repo.Knot)
12731273- s.pages.Error503(w)
12741274- return
13731373+ sourceScheme := "http"
13741374+ if !s.config.Core.Dev {
13751375+ sourceScheme = "https"
13761376+ }
13771377+ sourceHost := fmt.Sprintf("%s://%s", sourceScheme, repo.Knot)
13781378+ sourceXrpcc := &indigoxrpc.Client{
13791379+ Host: sourceHost,
12751380 }
1276138112771277- sourceResult, err := sourceBranchesClient.Branches(user.Did, repo.Name)
13821382+ sourceRepo := fmt.Sprintf("%s/%s", forkOwnerDid, repo.Name)
13831383+ sourceXrpcBytes, err := tangled.RepoBranches(r.Context(), sourceXrpcc, "", 0, sourceRepo)
12781384 if err != nil {
12791279- log.Println("failed to reach knotserver for source branches", err)
13851385+ if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
13861386+ log.Println("failed to call XRPC repo.branches for source", xrpcerr)
13871387+ s.pages.Error503(w)
13881388+ return
13891389+ }
13901390+ log.Println("failed to fetch source branches", err)
12801391 return
12811392 }
1282139312831283- targetBranchesClient, err := knotclient.NewUnsignedClient(f.Knot, s.config.Core.Dev)
12841284- if err != nil {
12851285- log.Printf("failed to create unsigned client for target knot %s", f.Knot)
13941394+ // Decode source branches
13951395+ var sourceBranches types.RepoBranchesResponse
13961396+ if err := json.Unmarshal(sourceXrpcBytes, &sourceBranches); err != nil {
13971397+ log.Println("failed to decode source branches XRPC response", err)
12861398 s.pages.Error503(w)
12871399 return
12881400 }
1289140112901290- targetResult, err := targetBranchesClient.Branches(f.OwnerDid(), f.RepoName)
14021402+ targetScheme := "http"
14031403+ if !s.config.Core.Dev {
14041404+ targetScheme = "https"
14051405+ }
14061406+ targetHost := fmt.Sprintf("%s://%s", targetScheme, f.Knot)
14071407+ targetXrpcc := &indigoxrpc.Client{
14081408+ Host: targetHost,
14091409+ }
14101410+14111411+ targetRepo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
14121412+ targetXrpcBytes, err := tangled.RepoBranches(r.Context(), targetXrpcc, "", 0, targetRepo)
12911413 if err != nil {
12921292- log.Println("failed to reach knotserver for target branches", err)
14141414+ if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
14151415+ log.Println("failed to call XRPC repo.branches for target", xrpcerr)
14161416+ s.pages.Error503(w)
14171417+ return
14181418+ }
14191419+ log.Println("failed to fetch target branches", err)
14201420+ return
14211421+ }
14221422+14231423+ // Decode target branches
14241424+ var targetBranches types.RepoBranchesResponse
14251425+ if err := json.Unmarshal(targetXrpcBytes, &targetBranches); err != nil {
14261426+ log.Println("failed to decode target branches XRPC response", err)
14271427+ s.pages.Error503(w)
12931428 return
12941429 }
1295143012961296- sourceBranches := sourceResult.Branches
12971297- sort.Slice(sourceBranches, func(i int, j int) bool {
12981298- return sourceBranches[i].Commit.Committer.When.After(sourceBranches[j].Commit.Committer.When)
14311431+ sort.Slice(sourceBranches.Branches, func(i int, j int) bool {
14321432+ return sourceBranches.Branches[i].Commit.Committer.When.After(sourceBranches.Branches[j].Commit.Committer.When)
12991433 })
1300143413011435 s.pages.PullCompareForkBranchesFragment(w, pages.PullCompareForkBranchesParams{
13021436 RepoInfo: f.RepoInfo(user),
13031303- SourceBranches: sourceBranches,
13041304- TargetBranches: targetResult.Branches,
14371437+ SourceBranches: sourceBranches.Branches,
14381438+ TargetBranches: targetBranches.Branches,
13051439 })
13061440}
13071441···13961530 return
13971531 }
1398153213991399- ksClient, err := knotclient.NewUnsignedClient(f.Knot, s.config.Core.Dev)
14001400- if err != nil {
14011401- log.Printf("failed to create client for %s: %s", f.Knot, err)
14021402- s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
14031403- return
15331533+ scheme := "http"
15341534+ if !s.config.Core.Dev {
15351535+ scheme = "https"
15361536+ }
15371537+ host := fmt.Sprintf("%s://%s", scheme, f.Knot)
15381538+ xrpcc := &indigoxrpc.Client{
15391539+ Host: host,
14041540 }
1405154114061406- comparison, err := ksClient.Compare(f.OwnerDid(), f.RepoName, pull.TargetBranch, pull.PullSource.Branch)
15421542+ repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
15431543+ xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, repo, pull.TargetBranch, pull.PullSource.Branch)
14071544 if err != nil {
15451545+ if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
15461546+ log.Println("failed to call XRPC repo.compare", xrpcerr)
15471547+ s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
15481548+ return
15491549+ }
14081550 log.Printf("compare request failed: %s", err)
14091551 s.pages.Notice(w, "resubmit-error", err.Error())
14101552 return
14111553 }
1412155415551555+ var comparison types.RepoFormatPatchResponse
15561556+ if err := json.Unmarshal(xrpcBytes, &comparison); err != nil {
15571557+ log.Println("failed to decode XRPC compare response", err)
15581558+ s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
15591559+ return
15601560+ }
15611561+14131562 sourceRev := comparison.Rev2
14141563 patch := comparison.Patch
14151564···14461595 }
1447159614481597 // extract patch by performing compare
14491449- ksClient, err := knotclient.NewUnsignedClient(forkRepo.Knot, s.config.Core.Dev)
15981598+ forkScheme := "http"
15991599+ if !s.config.Core.Dev {
16001600+ forkScheme = "https"
16011601+ }
16021602+ forkHost := fmt.Sprintf("%s://%s", forkScheme, forkRepo.Knot)
16031603+ forkRepoId := fmt.Sprintf("%s/%s", forkRepo.Did, forkRepo.Name)
16041604+ forkXrpcBytes, err := tangled.RepoCompare(r.Context(), &indigoxrpc.Client{Host: forkHost}, forkRepoId, pull.TargetBranch, pull.PullSource.Branch)
14501605 if err != nil {
14511451- log.Printf("failed to create client for %s: %s", forkRepo.Knot, err)
16061606+ if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
16071607+ log.Println("failed to call XRPC repo.compare for fork", xrpcerr)
16081608+ s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
16091609+ return
16101610+ }
16111611+ log.Printf("failed to compare branches: %s", err)
14521612 s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
14531613 return
14541614 }
1455161514561456- secret, err := db.GetRegistrationKey(s.db, forkRepo.Knot)
14571457- if err != nil {
14581458- log.Printf("failed to get registration key for %s: %s", forkRepo.Knot, err)
16161616+ var forkComparison types.RepoFormatPatchResponse
16171617+ if err := json.Unmarshal(forkXrpcBytes, &forkComparison); err != nil {
16181618+ log.Println("failed to decode XRPC compare response for fork", err)
14591619 s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
14601620 return
14611621 }
1462162214631623 // update the hidden tracking branch to latest
14641464- signedClient, err := knotclient.NewSignedClient(forkRepo.Knot, secret, s.config.Core.Dev)
16241624+ client, err := s.oauth.ServiceClient(
16251625+ r,
16261626+ oauth.WithService(forkRepo.Knot),
16271627+ oauth.WithLxm(tangled.RepoHiddenRefNSID),
16281628+ oauth.WithDev(s.config.Core.Dev),
16291629+ )
14651630 if err != nil {
14661466- log.Printf("failed to create signed client for %s: %s", forkRepo.Knot, err)
14671467- s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
16311631+ log.Printf("failed to connect to knot server: %v", err)
14681632 return
14691633 }
1470163414711471- resp, err := signedClient.NewHiddenRef(forkRepo.Did, forkRepo.Name, pull.PullSource.Branch, pull.TargetBranch)
14721472- if err != nil || resp.StatusCode != http.StatusNoContent {
14731473- log.Printf("failed to update tracking branch: %s", err)
14741474- s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
16351635+ resp, err := tangled.RepoHiddenRef(
16361636+ r.Context(),
16371637+ client,
16381638+ &tangled.RepoHiddenRef_Input{
16391639+ ForkRef: pull.PullSource.Branch,
16401640+ RemoteRef: pull.TargetBranch,
16411641+ Repo: forkRepo.RepoAt().String(),
16421642+ },
16431643+ )
16441644+ if err := xrpcclient.HandleXrpcErr(err); err != nil {
16451645+ s.pages.Notice(w, "resubmit-error", err.Error())
14751646 return
14761647 }
14771477-14781478- hiddenRef := fmt.Sprintf("hidden/%s/%s", pull.PullSource.Branch, pull.TargetBranch)
14791479- comparison, err := ksClient.Compare(forkRepo.Did, forkRepo.Name, hiddenRef, pull.PullSource.Branch)
14801480- if err != nil {
14811481- log.Printf("failed to compare branches: %s", err)
14821482- s.pages.Notice(w, "resubmit-error", err.Error())
16481648+ if !resp.Success {
16491649+ log.Println("Failed to update tracking ref.", "err", resp.Error)
16501650+ s.pages.Notice(w, "resubmit-error", "Failed to update tracking ref.")
14831651 return
14841652 }
16531653+16541654+ // Use the fork comparison we already made
16551655+ comparison := forkComparison
1485165614861657 sourceRev := comparison.Rev2
14871658 patch := comparison.Patch
···15661737 if pull.IsBranchBased() {
15671738 recordPullSource = &tangled.RepoPull_Source{
15681739 Branch: pull.PullSource.Branch,
17401740+ Sha: sourceRev,
15691741 }
15701742 }
15711743 if pull.IsForkBased() {
···15731745 recordPullSource = &tangled.RepoPull_Source{
15741746 Branch: pull.PullSource.Branch,
15751747 Repo: &repoAt,
17481748+ Sha: sourceRev,
15761749 }
15771750 }
15781751···15831756 SwapRecord: ex.Cid,
15841757 Record: &lexutil.LexiconTypeDecoder{
15851758 Val: &tangled.RepoPull{
15861586- Title: pull.Title,
15871587- PullId: int64(pull.PullId),
15881588- TargetRepo: string(f.RepoAt),
15891589- TargetBranch: pull.TargetBranch,
15901590- Patch: patch, // new patch
15911591- Source: recordPullSource,
17591759+ Title: pull.Title,
17601760+ Target: &tangled.RepoPull_Target{
17611761+ Repo: string(f.RepoAt()),
17621762+ Branch: pull.TargetBranch,
17631763+ },
17641764+ Patch: patch, // new patch
17651765+ Source: recordPullSource,
15921766 },
15931767 },
15941768 })
···16051779 }
1606178016071781 s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
16081608- return
16091782}
1610178316111784func (s *Pulls) resubmitStackedPullHelper(
···1702187517031876 // deleted pulls are marked as deleted in the DB
17041877 for _, p := range deletions {
18781878+ // do not do delete already merged PRs
18791879+ if p.State == db.PullMerged {
18801880+ continue
18811881+ }
18821882+17051883 err := db.DeletePull(tx, p.RepoAt, p.PullId)
17061884 if err != nil {
17071885 log.Println("failed to delete pull", err, p.PullId)
···17421920 op, _ := origById[id]
17431921 np, _ := newById[id]
1744192219231923+ // do not update already merged PRs
19241924+ if op.State == db.PullMerged {
19251925+ continue
19261926+ }
19271927+17451928 submission := np.Submissions[np.LastRoundNumber()]
1746192917471930 // resubmit the old pull
···18492032 }
1850203318512034 s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
18521852- return
18532035}
1854203618552037func (s *Pulls) MergePull(w http.ResponseWriter, r *http.Request) {
···1887206918882070 patch := pullsToMerge.CombinedPatch()
1889207118901890- secret, err := db.GetRegistrationKey(s.db, f.Knot)
18911891- if err != nil {
18921892- log.Printf("no registration key found for domain %s: %s\n", f.Knot, err)
18931893- s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
18941894- return
18951895- }
18961896-18972072 ident, err := s.idResolver.ResolveIdent(r.Context(), pull.OwnerDid)
18982073 if err != nil {
18992074 log.Printf("resolving identity: %s", err)
···19062081 log.Printf("failed to get primary email: %s", err)
19072082 }
1908208319091909- ksClient, err := knotclient.NewSignedClient(f.Knot, secret, s.config.Core.Dev)
19101910- if err != nil {
19111911- log.Printf("failed to create signed client for %s: %s", f.Knot, err)
19121912- s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
19131913- return
20842084+ authorName := ident.Handle.String()
20852085+ mergeInput := &tangled.RepoMerge_Input{
20862086+ Did: f.OwnerDid(),
20872087+ Name: f.Name,
20882088+ Branch: pull.TargetBranch,
20892089+ Patch: patch,
20902090+ CommitMessage: &pull.Title,
20912091+ AuthorName: &authorName,
20922092+ }
20932093+20942094+ if pull.Body != "" {
20952095+ mergeInput.CommitBody = &pull.Body
19142096 }
1915209719161916- // Merge the pull request
19171917- resp, err := ksClient.Merge([]byte(patch), f.OwnerDid(), f.RepoName, pull.TargetBranch, pull.Title, pull.Body, ident.Handle.String(), email.Address)
20982098+ if email.Address != "" {
20992099+ mergeInput.AuthorEmail = &email.Address
21002100+ }
21012101+21022102+ client, err := s.oauth.ServiceClient(
21032103+ r,
21042104+ oauth.WithService(f.Knot),
21052105+ oauth.WithLxm(tangled.RepoMergeNSID),
21062106+ oauth.WithDev(s.config.Core.Dev),
21072107+ )
19182108 if err != nil {
19191919- log.Printf("failed to merge pull request: %s", err)
21092109+ log.Printf("failed to connect to knot server: %v", err)
19202110 s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
19212111 return
19222112 }
1923211319241924- if resp.StatusCode != http.StatusOK {
19251925- log.Printf("knotserver returned non-OK status code for merge: %d", resp.StatusCode)
19261926- s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
21142114+ err = tangled.RepoMerge(r.Context(), client, mergeInput)
21152115+ if err := xrpcclient.HandleXrpcErr(err); err != nil {
21162116+ s.pages.Notice(w, "pull-merge-error", err.Error())
19272117 return
19282118 }
19292119···19362126 defer tx.Rollback()
1937212719382128 for _, p := range pullsToMerge {
19391939- err := db.MergePull(tx, f.RepoAt, p.PullId)
21292129+ err := db.MergePull(tx, f.RepoAt(), p.PullId)
19402130 if err != nil {
19412131 log.Printf("failed to update pull request status in database: %s", err)
19422132 s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
···19522142 return
19532143 }
1954214419551955- s.pages.HxLocation(w, fmt.Sprintf("/@%s/%s/pulls/%d", f.OwnerHandle(), f.RepoName, pull.PullId))
21452145+ s.pages.HxLocation(w, fmt.Sprintf("/@%s/%s/pulls/%d", f.OwnerHandle(), f.Name, pull.PullId))
19562146}
1957214719582148func (s *Pulls) ClosePull(w http.ResponseWriter, r *http.Request) {
···1973216319742164 // auth filter: only owner or collaborators can close
19752165 roles := f.RolesInRepo(user)
21662166+ isOwner := roles.IsOwner()
19762167 isCollaborator := roles.IsCollaborator()
19772168 isPullAuthor := user.Did == pull.OwnerDid
19781978- isCloseAllowed := isCollaborator || isPullAuthor
21692169+ isCloseAllowed := isOwner || isCollaborator || isPullAuthor
19792170 if !isCloseAllowed {
19802171 log.Println("failed to close pull")
19812172 s.pages.Notice(w, "pull-close", "You are unauthorized to close this pull.")
···2003219420042195 for _, p := range pullsToClose {
20052196 // Close the pull in the database
20062006- err = db.ClosePull(tx, f.RepoAt, p.PullId)
21972197+ err = db.ClosePull(tx, f.RepoAt(), p.PullId)
20072198 if err != nil {
20082199 log.Println("failed to close pull", err)
20092200 s.pages.Notice(w, "pull-close", "Failed to close pull.")
···20192210 }
2020221120212212 s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
20222022- return
20232213}
2024221420252215func (s *Pulls) ReopenPull(w http.ResponseWriter, r *http.Request) {
···2041223120422232 // auth filter: only owner or collaborators can close
20432233 roles := f.RolesInRepo(user)
22342234+ isOwner := roles.IsOwner()
20442235 isCollaborator := roles.IsCollaborator()
20452236 isPullAuthor := user.Did == pull.OwnerDid
20462046- isCloseAllowed := isCollaborator || isPullAuthor
22372237+ isCloseAllowed := isOwner || isCollaborator || isPullAuthor
20472238 if !isCloseAllowed {
20482239 log.Println("failed to close pull")
20492240 s.pages.Notice(w, "pull-close", "You are unauthorized to close this pull.")
···2071226220722263 for _, p := range pullsToReopen {
20732264 // Close the pull in the database
20742074- err = db.ReopenPull(tx, f.RepoAt, p.PullId)
22652265+ err = db.ReopenPull(tx, f.RepoAt(), p.PullId)
20752266 if err != nil {
20762267 log.Println("failed to close pull", err)
20772268 s.pages.Notice(w, "pull-close", "Failed to close pull.")
···20872278 }
2088227920892280 s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
20902090- return
20912281}
2092228220932283func newStack(f *reporesolver.ResolvedRepo, user *oauth.User, targetBranch, patch string, pullSource *db.PullSource, stackId string) (db.Stack, error) {
···2113230321142304 title := fp.Title
21152305 body := fp.Body
21162116- rkey := appview.TID()
23062306+ rkey := tid.TID()
2117230721182308 initialSubmission := db.PullSubmission{
21192309 Patch: fp.Raw,
···21242314 Body: body,
21252315 TargetBranch: targetBranch,
21262316 OwnerDid: user.Did,
21272127- RepoAt: f.RepoAt,
23172317+ RepoAt: f.RepoAt(),
21282318 Rkey: rkey,
21292319 Submissions: []*db.PullSubmission{
21302320 &initialSubmission,
+2
appview/pulls/router.go
···4444 r.Get("/", s.ResubmitPull)
4545 r.Post("/", s.ResubmitPull)
4646 })
4747+ // permissions here require us to know pull author
4848+ // it is handled within the route
4749 r.Post("/close", s.ClosePull)
4850 r.Post("/reopen", s.ReopenPull)
4951 // collaborators only
···1111### message format
12121313```
1414-<service/top-level directory>: <affected package/directory>: <short summary of change>
1414+<service/top-level directory>/<affected package/directory>: <short summary of change>
151516161717Optional longer description can go here, if necessary. Explain what the
···2323Here are some examples:
24242525```
2626-appview: state: fix token expiry check in middleware
2626+appview/state: fix token expiry check in middleware
27272828The previous check did not account for clock drift, leading to premature
2929token invalidation.
3030```
31313232```
3333-knotserver: git/service: improve error checking in upload-pack
3333+knotserver/git/service: improve error checking in upload-pack
3434```
35353636···5454- Don't include unrelated changes in the same commit.
5555- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
5656before submitting if necessary.
5757+5858+## code formatting
5959+6060+We use a variety of tools to format our code, and multiplex them with
6161+[`treefmt`](https://treefmt.com): all you need to do to format your changes
6262+is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
57635864## proposals for bigger changes
5965···115121If you're submitting a PR with multiple commits, make sure each one is
116122signed.
117123118118-For [jj](https://jj-vcs.github.io/jj/latest/) users, you can add this to
119119-your jj config:
124124+For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125125+to make it sign off commits in the tangled repo:
120126121121-```
122122-ui.should-sign-off = true
123123-```
124124-125125-and to your `templates.draft_commit_description`, add the following `if`
126126-block:
127127-128128-```
129129- if(
130130- config("ui.should-sign-off").as_boolean() && !description.contains("Signed-off-by: " ++ author.name()),
131131- "\nSigned-off-by: " ++ author.name() ++ " <" ++ author.email() ++ ">",
132132- ),
127127+```shell
128128+# Safety check, should say "No matching config key..."
129129+jj config list templates.commit_trailers
130130+# The command below may need to be adjusted if the command above returned something.
131131+jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
133132```
134133135134Refer to the [jj
136136-documentation](https://jj-vcs.github.io/jj/latest/config/#default-description)
135135+documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
137136for more information.
+101-11
docs/hacking.md
···3232nix run .#watch-tailwind
3333```
34343535-## running a knot
3535+To authenticate with the appview, you will need redis and
3636+OAUTH JWKs to be setup:
3737+3838+```
3939+# oauth jwks should already be setup by the nix devshell:
4040+echo $TANGLED_OAUTH_JWKS
4141+{"crv":"P-256","d":"tELKHYH-Dko6qo4ozYcVPE1ah6LvXHFV2wpcWpi8ab4","kid":"1753352226","kty":"EC","x":"mRzYpLzAGq74kJez9UbgGfV040DxgsXpMbaVsdy8RZs","y":"azqqXzUYywMlLb2Uc5AVG18nuLXyPnXr4kI4T39eeIc"}
4242+4343+# if not, you can set it up yourself:
4444+go build -o genjwks.out ./cmd/genjwks
4545+export TANGLED_OAUTH_JWKS="$(./genjwks.out)"
4646+4747+# run redis in at a new shell to store oauth sessions
4848+redis-server
4949+```
5050+5151+## running knots and spindles
36523753An end-to-end knot setup requires setting up a machine with
3854`sshd`, `AuthorizedKeysCommand`, and git user, which is
3955quite cumbersome. So the nix flake provides a
4056`nixosConfiguration` to do so.
41574242-To begin, head to `http://localhost:3000` in the browser and
4343-generate a knot secret. Replace the existing secret in
4444-`flake.nix` with the newly generated secret.
5858+<details>
5959+ <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
45604646-You can now start a lightweight NixOS VM using
4747-`nixos-shell` like so:
6161+ In order to build Tangled's dev VM on macOS, you will
6262+ first need to set up a Linux Nix builder. The recommended
6363+ way to do so is to run a [`darwin.linux-builder`
6464+ VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
6565+ and to register it in `nix.conf` as a builder for Linux
6666+ with the same architecture as your Mac (`linux-aarch64` if
6767+ you are using Apple Silicon).
6868+6969+ > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
7070+ > the tangled repo so that it doesn't conflict with the other VM. For example,
7171+ > you can do
7272+ >
7373+ > ```shell
7474+ > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
7575+ > ```
7676+ >
7777+ > to store the builder VM in a temporary dir.
7878+ >
7979+ > You should read and follow [all the other intructions][darwin builder vm] to
8080+ > avoid subtle problems.
8181+8282+ Alternatively, you can use any other method to set up a
8383+ Linux machine with `nix` installed that you can `sudo ssh`
8484+ into (in other words, root user on your Mac has to be able
8585+ to ssh into the Linux machine without entering a password)
8686+ and that has the same architecture as your Mac. See
8787+ [remote builder
8888+ instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
8989+ for how to register such a builder in `nix.conf`.
9090+9191+ > WARNING: If you'd like to use
9292+ > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
9393+ > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
9494+ > ssh` works can be tricky. It seems to be [possible with
9595+ > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
9696+9797+</details>
9898+9999+To begin, grab your DID from http://localhost:3000/settings.
100100+Then, set `TANGLED_VM_KNOT_OWNER` and
101101+`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
102102+lightweight NixOS VM like so:
4810349104```bash
5050-QEMU_NET_OPTS="hostfwd=tcp::6000-:6000,hostfwd=tcp::2222-:22" nixos-shell --flake .#knotVM
105105+nix run --impure .#vm
511065252-# hit Ctrl-a + c + q to exit the VM
107107+# type `poweroff` at the shell to exit the VM
53108```
541095555-This starts a knot on port 6000 with `ssh` exposed on port
5656-2222. You can push repositories to this VM with this ssh
5757-config block on your main machine:
110110+This starts a knot on port 6000, a spindle on port 6555
111111+with `ssh` exposed on port 2222.
112112+113113+Once the services are running, head to
114114+http://localhost:3000/knots and hit verify. It should
115115+verify the ownership of the services instantly if everything
116116+went smoothly.
117117+118118+You can push repositories to this VM with this ssh config
119119+block on your main machine:
5812059121```bash
60122Host nixos-shell
···70132git remote add local-dev git@nixos-shell:user/repo
71133git push local-dev main
72134```
135135+136136+### running a spindle
137137+138138+The above VM should already be running a spindle on
139139+`localhost:6555`. Head to http://localhost:3000/spindles and
140140+hit verify. You can then configure each repository to use
141141+this spindle and run CI jobs.
142142+143143+Of interest when debugging spindles:
144144+145145+```
146146+# service logs from journald:
147147+journalctl -xeu spindle
148148+149149+# CI job logs from disk:
150150+ls /var/log/spindle
151151+152152+# debugging spindle db:
153153+sqlite3 /var/lib/spindle/spindle.db
154154+155155+# litecli has a nicer REPL interface:
156156+litecli /var/lib/spindle/spindle.db
157157+```
158158+159159+If for any reason you wish to disable either one of the
160160+services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
161161+`services.tangled-spindle.enable` (or
162162+`services.tangled-knot.enable`) to `false`.
+59-6
docs/knot-hosting.md
···2233So you want to run your own knot server? Great! Here are a few prerequisites:
4455-1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux of some kind.
55+1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
662. A (sub)domain name. People generally use `knot.example.com`.
773. A valid SSL certificate for your domain.
88···5959EOF
6060```
61616262+Then, reload `sshd`:
6363+6464+```
6565+sudo systemctl reload ssh
6666+```
6767+6268Next, create the `git` user. We'll use the `git` user's home directory
6369to store repositories:
6470···6773```
68746975Create `/home/git/.knot.env` with the following, updating the values as
7070-necessary. The `KNOT_SERVER_SECRET` can be obtaind from the
7171-[/knots](/knots) page on Tangled.
7676+necessary. The `KNOT_SERVER_OWNER` should be set to your
7777+DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
72787379```
7480KNOT_REPO_SCAN_PATH=/home/git
7581KNOT_SERVER_HOSTNAME=knot.example.com
7682APPVIEW_ENDPOINT=https://tangled.sh
7777-KNOT_SERVER_SECRET=secret
8383+KNOT_SERVER_OWNER=did:plc:foobar
7884KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
7985KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
8086```
···8995systemctl start knotserver
9096```
91979292-You should now have a running knot server! You can finalize your registration by hitting the
9393-`initialize` button on the [/knots](/knots) page.
9898+The last step is to configure a reverse proxy like Nginx or Caddy to front your
9999+knot. Here's an example configuration for Nginx:
100100+101101+```
102102+server {
103103+ listen 80;
104104+ listen [::]:80;
105105+ server_name knot.example.com;
106106+107107+ location / {
108108+ proxy_pass http://localhost:5555;
109109+ proxy_set_header Host $host;
110110+ proxy_set_header X-Real-IP $remote_addr;
111111+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
112112+ proxy_set_header X-Forwarded-Proto $scheme;
113113+ }
114114+115115+ # wss endpoint for git events
116116+ location /events {
117117+ proxy_set_header X-Forwarded-For $remote_addr;
118118+ proxy_set_header Host $http_host;
119119+ proxy_set_header Upgrade websocket;
120120+ proxy_set_header Connection Upgrade;
121121+ proxy_pass http://localhost:5555;
122122+ }
123123+ # additional config for SSL/TLS go here.
124124+}
125125+126126+```
127127+128128+Remember to use Let's Encrypt or similar to procure a certificate for your
129129+knot domain.
130130+131131+You should now have a running knot server! You can finalize
132132+your registration by hitting the `verify` button on the
133133+[/knots](https://tangled.sh/knots) page. This simply creates
134134+a record on your PDS to announce the existence of the knot.
9413595136### custom paths
96137···158199```
159200160201Make sure to restart your SSH server!
202202+203203+#### MOTD (message of the day)
204204+205205+To configure the MOTD used ("Welcome to this knot!" by default), edit the
206206+`/home/git/motd` file:
207207+208208+```
209209+printf "Hi from this knot!\n" > /home/git/motd
210210+```
211211+212212+Note that you should add a newline at the end if setting a non-empty message
213213+since the knot won't do this for you.
+60
docs/migrations.md
···11+# Migrations
22+33+This document is laid out in reverse-chronological order.
44+Newer migration guides are listed first, and older guides
55+are further down the page.
66+77+## Upgrading from v1.8.x
88+99+After v1.8.2, the HTTP API for knot and spindles have been
1010+deprecated and replaced with XRPC. Repositories on outdated
1111+knots will not be viewable from the appview. Upgrading is
1212+straightforward however.
1313+1414+For knots:
1515+1616+- Upgrade to latest tag (v1.9.0 or above)
1717+- Head to the [knot dashboard](https://tangled.sh/knots) and
1818+ hit the "retry" button to verify your knot
1919+2020+For spindles:
2121+2222+- Upgrade to latest tag (v1.9.0 or above)
2323+- Head to the [spindle
2424+ dashboard](https://tangled.sh/spindles) and hit the
2525+ "retry" button to verify your spindle
2626+2727+## Upgrading from v1.7.x
2828+2929+After v1.7.0, knot secrets have been deprecated. You no
3030+longer need a secret from the appview to run a knot. All
3131+authorized commands to knots are managed via [Inter-Service
3232+Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
3333+Knots will be read-only until upgraded.
3434+3535+Upgrading is quite easy, in essence:
3636+3737+- `KNOT_SERVER_SECRET` is no more, you can remove this
3838+ environment variable entirely
3939+- `KNOT_SERVER_OWNER` is now required on boot, set this to
4040+ your DID. You can find your DID in the
4141+ [settings](https://tangled.sh/settings) page.
4242+- Restart your knot once you have replaced the environment
4343+ variable
4444+- Head to the [knot dashboard](https://tangled.sh/knots) and
4545+ hit the "retry" button to verify your knot. This simply
4646+ writes a `sh.tangled.knot` record to your PDS.
4747+4848+If you use the nix module, simply bump the flake to the
4949+latest revision, and change your config block like so:
5050+5151+```diff
5252+ services.tangled-knot = {
5353+ enable = true;
5454+ server = {
5555+- secretFile = /path/to/secret;
5656++ owner = "did:plc:foo";
5757+ };
5858+ };
5959+```
6060+
+25
docs/spindle/architecture.md
···11+# spindle architecture
22+33+Spindle is a small CI runner service. Here's a high level overview of how it operates:
44+55+* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
66+[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
77+* when a new repo record comes through (typically when you add a spindle to a
88+repo from the settings), spindle then resolves the underlying knot and
99+subscribes to repo events (see:
1010+[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
1111+* the spindle engine then handles execution of the pipeline, with results and
1212+logs beamed on the spindle event stream over wss
1313+1414+### the engine
1515+1616+At present, the only supported backend is Docker (and Podman, if Docker
1717+compatibility is enabled, so that `/run/docker.sock` is created). Spindle
1818+executes each step in the pipeline in a fresh container, with state persisted
1919+across steps within the `/tangled/workspace` directory.
2020+2121+The base image for the container is constructed on the fly using
2222+[Nixery](https://nixery.dev), which is handy for caching layers for frequently
2323+used packages.
2424+2525+The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
+52
docs/spindle/hosting.md
···11+# spindle self-hosting guide
22+33+## prerequisites
44+55+* Go
66+* Docker (the only supported backend currently)
77+88+## configuration
99+1010+Spindle is configured using environment variables. The following environment variables are available:
1111+1212+* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
1313+* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
1414+* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
1515+* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
1616+* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
1717+* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
1818+* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
1919+* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
2020+* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
2121+2222+## running spindle
2323+2424+1. **Set the environment variables.** For example:
2525+2626+ ```shell
2727+ export SPINDLE_SERVER_HOSTNAME="your-hostname"
2828+ export SPINDLE_SERVER_OWNER="your-did"
2929+ ```
3030+3131+2. **Build the Spindle binary.**
3232+3333+ ```shell
3434+ cd core
3535+ go mod download
3636+ go build -o cmd/spindle/spindle cmd/spindle/main.go
3737+ ```
3838+3939+3. **Create the log directory.**
4040+4141+ ```shell
4242+ sudo mkdir -p /var/log/spindle
4343+ sudo chown $USER:$USER -R /var/log/spindle
4444+ ```
4545+4646+4. **Run the Spindle binary.**
4747+4848+ ```shell
4949+ ./cmd/spindle/spindle
5050+ ```
5151+5252+Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
+285
docs/spindle/openbao.md
···11+# spindle secrets with openbao
22+33+This document covers setting up Spindle to use OpenBao for secrets
44+management via OpenBao Proxy instead of the default SQLite backend.
55+66+## overview
77+88+Spindle now uses OpenBao Proxy for secrets management. The proxy handles
99+authentication automatically using AppRole credentials, while Spindle
1010+connects to the local proxy instead of directly to the OpenBao server.
1111+1212+This approach provides better security, automatic token renewal, and
1313+simplified application code.
1414+1515+## installation
1616+1717+Install OpenBao from nixpkgs:
1818+1919+```bash
2020+nix shell nixpkgs#openbao # for a local server
2121+```
2222+2323+## setup
2424+2525+The setup process can is documented for both local development and production.
2626+2727+### local development
2828+2929+Start OpenBao in dev mode:
3030+3131+```bash
3232+bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
3333+```
3434+3535+This starts OpenBao on `http://localhost:8201` with a root token.
3636+3737+Set up environment for bao CLI:
3838+3939+```bash
4040+export BAO_ADDR=http://localhost:8200
4141+export BAO_TOKEN=root
4242+```
4343+4444+### production
4545+4646+You would typically use a systemd service with a configuration file. Refer to
4747+[@tangled.sh/infra](https://tangled.sh/@tangled.sh/infra) for how this can be
4848+achieved using Nix.
4949+5050+Then, initialize the bao server:
5151+```bash
5252+bao operator init -key-shares=1 -key-threshold=1
5353+```
5454+5555+This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
5656+```bash
5757+bao operator unseal <unseal_key>
5858+```
5959+6060+All steps below remain the same across both dev and production setups.
6161+6262+### configure openbao server
6363+6464+Create the spindle KV mount:
6565+6666+```bash
6767+bao secrets enable -path=spindle -version=2 kv
6868+```
6969+7070+Set up AppRole authentication and policy:
7171+7272+Create a policy file `spindle-policy.hcl`:
7373+7474+```hcl
7575+# Full access to spindle KV v2 data
7676+path "spindle/data/*" {
7777+ capabilities = ["create", "read", "update", "delete"]
7878+}
7979+8080+# Access to metadata for listing and management
8181+path "spindle/metadata/*" {
8282+ capabilities = ["list", "read", "delete", "update"]
8383+}
8484+8585+# Allow listing at root level
8686+path "spindle/" {
8787+ capabilities = ["list"]
8888+}
8989+9090+# Required for connection testing and health checks
9191+path "auth/token/lookup-self" {
9292+ capabilities = ["read"]
9393+}
9494+```
9595+9696+Apply the policy and create an AppRole:
9797+9898+```bash
9999+bao policy write spindle-policy spindle-policy.hcl
100100+bao auth enable approle
101101+bao write auth/approle/role/spindle \
102102+ token_policies="spindle-policy" \
103103+ token_ttl=1h \
104104+ token_max_ttl=4h \
105105+ bind_secret_id=true \
106106+ secret_id_ttl=0 \
107107+ secret_id_num_uses=0
108108+```
109109+110110+Get the credentials:
111111+112112+```bash
113113+# Get role ID (static)
114114+ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115115+116116+# Generate secret ID
117117+SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118118+119119+echo "Role ID: $ROLE_ID"
120120+echo "Secret ID: $SECRET_ID"
121121+```
122122+123123+### create proxy configuration
124124+125125+Create the credential files:
126126+127127+```bash
128128+# Create directory for OpenBao files
129129+mkdir -p /tmp/openbao
130130+131131+# Save credentials
132132+echo "$ROLE_ID" > /tmp/openbao/role-id
133133+echo "$SECRET_ID" > /tmp/openbao/secret-id
134134+chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135135+```
136136+137137+Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138138+139139+```hcl
140140+# OpenBao server connection
141141+vault {
142142+ address = "http://localhost:8200"
143143+}
144144+145145+# Auto-Auth using AppRole
146146+auto_auth {
147147+ method "approle" {
148148+ mount_path = "auth/approle"
149149+ config = {
150150+ role_id_file_path = "/tmp/openbao/role-id"
151151+ secret_id_file_path = "/tmp/openbao/secret-id"
152152+ }
153153+ }
154154+155155+ # Optional: write token to file for debugging
156156+ sink "file" {
157157+ config = {
158158+ path = "/tmp/openbao/token"
159159+ mode = 0640
160160+ }
161161+ }
162162+}
163163+164164+# Proxy listener for Spindle
165165+listener "tcp" {
166166+ address = "127.0.0.1:8201"
167167+ tls_disable = true
168168+}
169169+170170+# Enable API proxy with auto-auth token
171171+api_proxy {
172172+ use_auto_auth_token = true
173173+}
174174+175175+# Enable response caching
176176+cache {
177177+ use_auto_auth_token = true
178178+}
179179+180180+# Logging
181181+log_level = "info"
182182+```
183183+184184+### start the proxy
185185+186186+Start OpenBao Proxy:
187187+188188+```bash
189189+bao proxy -config=/tmp/openbao/proxy.hcl
190190+```
191191+192192+The proxy will authenticate with OpenBao and start listening on
193193+`127.0.0.1:8201`.
194194+195195+### configure spindle
196196+197197+Set these environment variables for Spindle:
198198+199199+```bash
200200+export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201201+export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
202202+export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203203+```
204204+205205+Start Spindle:
206206+207207+Spindle will now connect to the local proxy, which handles all
208208+authentication automatically.
209209+210210+## production setup for proxy
211211+212212+For production, you'll want to run the proxy as a service:
213213+214214+Place your production configuration in `/etc/openbao/proxy.hcl` with
215215+proper TLS settings for the vault connection.
216216+217217+## verifying setup
218218+219219+Test the proxy directly:
220220+221221+```bash
222222+# Check proxy health
223223+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224224+225225+# Test token lookup through proxy
226226+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227227+```
228228+229229+Test OpenBao operations through the server:
230230+231231+```bash
232232+# List all secrets
233233+bao kv list spindle/
234234+235235+# Add a test secret via Spindle API, then check it exists
236236+bao kv list spindle/repos/
237237+238238+# Get a specific secret
239239+bao kv get spindle/repos/your_repo_path/SECRET_NAME
240240+```
241241+242242+## how it works
243243+244244+- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245245+- The proxy authenticates with OpenBao using AppRole credentials
246246+- All Spindle requests go through the proxy, which injects authentication tokens
247247+- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248248+- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249249+- The proxy handles all token renewal automatically
250250+- Spindle no longer manages tokens or authentication directly
251251+252252+## troubleshooting
253253+254254+**Connection refused**: Check that the OpenBao Proxy is running and
255255+listening on the configured address.
256256+257257+**403 errors**: Verify the AppRole credentials are correct and the policy
258258+has the necessary permissions.
259259+260260+**404 route errors**: The spindle KV mount probably doesn't exist - run
261261+the mount creation step again.
262262+263263+**Proxy authentication failures**: Check the proxy logs and verify the
264264+role-id and secret-id files are readable and contain valid credentials.
265265+266266+**Secret not found after writing**: This can indicate policy permission
267267+issues. Verify the policy includes both `spindle/data/*` and
268268+`spindle/metadata/*` paths with appropriate capabilities.
269269+270270+Check proxy logs:
271271+272272+```bash
273273+# If running as systemd service
274274+journalctl -u openbao-proxy -f
275275+276276+# If running directly, check the console output
277277+```
278278+279279+Test AppRole authentication manually:
280280+281281+```bash
282282+bao write auth/approle/login \
283283+ role_id="$(cat /tmp/openbao/role-id)" \
284284+ secret_id="$(cat /tmp/openbao/secret-id)"
285285+```
+165
docs/spindle/pipeline.md
···11+# spindle pipelines
22+33+Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
44+55+The fields are:
66+77+- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
88+- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
99+- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
1010+- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
1111+- [Environment](#environment): An **optional** field that allows you to define environment variables.
1212+- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
1313+1414+## Trigger
1515+1616+The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
1717+1818+- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
1919+ - `push`: The workflow should run every time a commit is pushed to the repository.
2020+ - `pull_request`: The workflow should run every time a pull request is made or updated.
2121+ - `manual`: The workflow can be triggered manually.
2222+- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
2323+2424+For example, if you'd like define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
2525+2626+```yaml
2727+when:
2828+ - event: ["push", "manual"]
2929+ branch: ["main", "develop"]
3030+ - event: ["pull_request"]
3131+ branch: ["main"]
3232+```
3333+3434+## Engine
3535+3636+Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
3737+3838+- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
3939+4040+Example:
4141+4242+```yaml
4343+engine: "nixery"
4444+```
4545+4646+## Clone options
4747+4848+When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
4949+5050+- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
5151+- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
5252+- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
5353+5454+The default settings are:
5555+5656+```yaml
5757+clone:
5858+ skip: false
5959+ depth: 1
6060+ submodules: false
6161+```
6262+6363+## Dependencies
6464+6565+Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
6666+6767+Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
6868+6969+```yaml
7070+dependencies:
7171+ # nixpkgs
7272+ nixpkgs:
7373+ - nodejs
7474+ - go
7575+ # custom registry
7676+ git+https://tangled.sh/@example.com/my_pkg:
7777+ - my_pkg
7878+```
7979+8080+Now these dependencies are available to use in your workflow!
8181+8282+## Environment
8383+8484+The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
8585+8686+Example:
8787+8888+```yaml
8989+environment:
9090+ GOOS: "linux"
9191+ GOARCH: "arm64"
9292+ NODE_ENV: "production"
9393+ MY_ENV_VAR: "MY_ENV_VALUE"
9494+```
9595+9696+## Steps
9797+9898+The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
9999+100100+- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
101101+- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
102102+- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103103+104104+Example:
105105+106106+```yaml
107107+steps:
108108+ - name: "Build backend"
109109+ command: "go build"
110110+ environment:
111111+ GOOS: "darwin"
112112+ GOARCH: "arm64"
113113+ - name: "Build frontend"
114114+ command: "npm run build"
115115+ environment:
116116+ NODE_ENV: "production"
117117+```
118118+119119+## Complete workflow
120120+121121+```yaml
122122+# .tangled/workflows/build.yml
123123+124124+when:
125125+ - event: ["push", "manual"]
126126+ branch: ["main", "develop"]
127127+ - event: ["pull_request"]
128128+ branch: ["main"]
129129+130130+engine: "nixery"
131131+132132+# using the default values
133133+clone:
134134+ skip: false
135135+ depth: 1
136136+ submodules: false
137137+138138+dependencies:
139139+ # nixpkgs
140140+ nixpkgs:
141141+ - nodejs
142142+ - go
143143+ # custom registry
144144+ git+https://tangled.sh/@example.com/my_pkg:
145145+ - my_pkg
146146+147147+environment:
148148+ GOOS: "linux"
149149+ GOARCH: "arm64"
150150+ NODE_ENV: "production"
151151+ MY_ENV_VAR: "MY_ENV_VALUE"
152152+153153+steps:
154154+ - name: "Build backend"
155155+ command: "go build"
156156+ environment:
157157+ GOOS: "darwin"
158158+ GOARCH: "arm64"
159159+ - name: "Build frontend"
160160+ command: "npm run build"
161161+ environment:
162162+ NODE_ENV: "production"
163163+```
164164+165165+If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
···11+package xrpc
22+33+import (
44+ "fmt"
55+ "net/http"
66+ "runtime/debug"
77+88+ "tangled.sh/tangled.sh/core/api/tangled"
99+)
1010+1111+// version is set during build time.
1212+var version string
1313+1414+func (x *Xrpc) Version(w http.ResponseWriter, r *http.Request) {
1515+ if version == "" {
1616+ info, ok := debug.ReadBuildInfo()
1717+ if !ok {
1818+ http.Error(w, "failed to read build info", http.StatusInternalServerError)
1919+ return
2020+ }
2121+2222+ var modVer string
2323+ var sha string
2424+ var modified bool
2525+2626+ for _, mod := range info.Deps {
2727+ if mod.Path == "tangled.sh/tangled.sh/knotserver/xrpc" {
2828+ modVer = mod.Version
2929+ break
3030+ }
3131+ }
3232+3333+ for _, setting := range info.Settings {
3434+ switch setting.Key {
3535+ case "vcs.revision":
3636+ sha = setting.Value
3737+ case "vcs.modified":
3838+ modified = setting.Value == "true"
3939+ }
4040+ }
4141+4242+ if modVer == "" {
4343+ modVer = "unknown"
4444+ }
4545+4646+ if sha == "" {
4747+ version = modVer
4848+ } else if modified {
4949+ version = fmt.Sprintf("%s (%s with modifications)", modVer, sha)
5050+ } else {
5151+ version = fmt.Sprintf("%s (%s)", modVer, sha)
5252+ }
5353+ }
5454+5555+ response := tangled.KnotVersion_Output{
5656+ Version: version,
5757+ }
5858+5959+ writeJson(w, response)
6060+}
+127
knotserver/xrpc/xrpc.go
···11+package xrpc
22+33+import (
44+ "encoding/json"
55+ "log/slog"
66+ "net/http"
77+ "strings"
88+99+ securejoin "github.com/cyphar/filepath-securejoin"
1010+ "tangled.sh/tangled.sh/core/api/tangled"
1111+ "tangled.sh/tangled.sh/core/idresolver"
1212+ "tangled.sh/tangled.sh/core/jetstream"
1313+ "tangled.sh/tangled.sh/core/knotserver/config"
1414+ "tangled.sh/tangled.sh/core/knotserver/db"
1515+ "tangled.sh/tangled.sh/core/notifier"
1616+ "tangled.sh/tangled.sh/core/rbac"
1717+ xrpcerr "tangled.sh/tangled.sh/core/xrpc/errors"
1818+ "tangled.sh/tangled.sh/core/xrpc/serviceauth"
1919+2020+ "github.com/go-chi/chi/v5"
2121+)
2222+2323+type Xrpc struct {
2424+ Config *config.Config
2525+ Db *db.DB
2626+ Ingester *jetstream.JetstreamClient
2727+ Enforcer *rbac.Enforcer
2828+ Logger *slog.Logger
2929+ Notifier *notifier.Notifier
3030+ Resolver *idresolver.Resolver
3131+ ServiceAuth *serviceauth.ServiceAuth
3232+}
3333+3434+func (x *Xrpc) Router() http.Handler {
3535+ r := chi.NewRouter()
3636+3737+ r.Group(func(r chi.Router) {
3838+ r.Use(x.ServiceAuth.VerifyServiceAuth)
3939+4040+ r.Post("/"+tangled.RepoSetDefaultBranchNSID, x.SetDefaultBranch)
4141+ r.Post("/"+tangled.RepoCreateNSID, x.CreateRepo)
4242+ r.Post("/"+tangled.RepoDeleteNSID, x.DeleteRepo)
4343+ r.Post("/"+tangled.RepoForkStatusNSID, x.ForkStatus)
4444+ r.Post("/"+tangled.RepoForkSyncNSID, x.ForkSync)
4545+ r.Post("/"+tangled.RepoHiddenRefNSID, x.HiddenRef)
4646+ r.Post("/"+tangled.RepoMergeNSID, x.Merge)
4747+ })
4848+4949+ // merge check is an open endpoint
5050+ //
5151+ // TODO: should we constrain this more?
5252+ // - we can calculate on PR submit/resubmit/gitRefUpdate etc.
5353+ // - use ETags on clients to keep requests to a minimum
5454+ r.Post("/"+tangled.RepoMergeCheckNSID, x.MergeCheck)
5555+5656+ // repo query endpoints (no auth required)
5757+ r.Get("/"+tangled.RepoTreeNSID, x.RepoTree)
5858+ r.Get("/"+tangled.RepoLogNSID, x.RepoLog)
5959+ r.Get("/"+tangled.RepoBranchesNSID, x.RepoBranches)
6060+ r.Get("/"+tangled.RepoTagsNSID, x.RepoTags)
6161+ r.Get("/"+tangled.RepoBlobNSID, x.RepoBlob)
6262+ r.Get("/"+tangled.RepoDiffNSID, x.RepoDiff)
6363+ r.Get("/"+tangled.RepoCompareNSID, x.RepoCompare)
6464+ r.Get("/"+tangled.RepoGetDefaultBranchNSID, x.RepoGetDefaultBranch)
6565+ r.Get("/"+tangled.RepoBranchNSID, x.RepoBranch)
6666+ r.Get("/"+tangled.RepoArchiveNSID, x.RepoArchive)
6767+ r.Get("/"+tangled.RepoLanguagesNSID, x.RepoLanguages)
6868+6969+ // knot query endpoints (no auth required)
7070+ r.Get("/"+tangled.KnotListKeysNSID, x.ListKeys)
7171+ r.Get("/"+tangled.KnotVersionNSID, x.Version)
7272+7373+ // service query endpoints (no auth required)
7474+ r.Get("/"+tangled.OwnerNSID, x.Owner)
7575+7676+ return r
7777+}
7878+7979+// parseRepoParam parses a repo parameter in 'did/repoName' format and returns
8080+// the full repository path on disk
8181+func (x *Xrpc) parseRepoParam(repo string) (string, error) {
8282+ if repo == "" {
8383+ return "", xrpcerr.NewXrpcError(
8484+ xrpcerr.WithTag("InvalidRequest"),
8585+ xrpcerr.WithMessage("missing repo parameter"),
8686+ )
8787+ }
8888+8989+ // Parse repo string (did/repoName format)
9090+ parts := strings.SplitN(repo, "/", 2)
9191+ if len(parts) != 2 {
9292+ return "", xrpcerr.NewXrpcError(
9393+ xrpcerr.WithTag("InvalidRequest"),
9494+ xrpcerr.WithMessage("invalid repo format, expected 'did/repoName'"),
9595+ )
9696+ }
9797+9898+ did := parts[0]
9999+ repoName := parts[1]
100100+101101+ // Construct repository path using the same logic as didPath
102102+ didRepoPath, err := securejoin.SecureJoin(did, repoName)
103103+ if err != nil {
104104+ return "", xrpcerr.RepoNotFoundError
105105+ }
106106+107107+ repoPath, err := securejoin.SecureJoin(x.Config.Repo.ScanPath, didRepoPath)
108108+ if err != nil {
109109+ return "", xrpcerr.RepoNotFoundError
110110+ }
111111+112112+ return repoPath, nil
113113+}
114114+115115+func writeError(w http.ResponseWriter, e xrpcerr.XrpcError, status int) {
116116+ w.Header().Set("Content-Type", "application/json")
117117+ w.WriteHeader(status)
118118+ json.NewEncoder(w).Encode(e)
119119+}
120120+121121+func writeJson(w http.ResponseWriter, response any) {
122122+ w.Header().Set("Content-Type", "application/json")
123123+ if err := json.NewEncoder(w).Encode(response); err != nil {
124124+ writeError(w, xrpcerr.GenericError(err), http.StatusInternalServerError)
125125+ return
126126+ }
127127+}
+158
legal/privacy.md
···11+# Privacy Policy
22+33+**Last updated:** January 15, 2025
44+55+This Privacy Policy describes how Tangled ("we," "us," or "our")
66+collects, uses, and shares your personal information when you use our
77+platform and services (the "Service").
88+99+## 1. Information We Collect
1010+1111+### Account Information
1212+1313+When you create an account, we collect:
1414+1515+- Your chosen username
1616+- Email address
1717+- Profile information you choose to provide
1818+- Authentication data
1919+2020+### Content and Activity
2121+2222+We store:
2323+2424+- Code repositories and associated metadata
2525+- Issues, pull requests, and comments
2626+- Activity logs and usage patterns
2727+- Public keys for authentication
2828+2929+## 2. Data Location and Hosting
3030+3131+### EU Data Hosting
3232+3333+**All Tangled service data is hosted within the European Union.**
3434+Specifically:
3535+3636+- **Personal Data Servers (PDS):** Accounts hosted on Tangled PDS
3737+ (*.tngl.sh) are located in Finland
3838+- **Application Data:** All other service data is stored on EU-based
3939+ servers
4040+- **Data Processing:** All data processing occurs within EU
4141+ jurisdiction
4242+4343+### External PDS Notice
4444+4545+**Important:** If your account is hosted on Bluesky's PDS or other
4646+self-hosted Personal Data Servers (not *.tngl.sh), we do not control
4747+that data. The data protection, storage location, and privacy
4848+practices for such accounts are governed by the respective PDS
4949+provider's policies, not this Privacy Policy. We only control data
5050+processing within our own services and infrastructure.
5151+5252+## 3. Third-Party Data Processors
5353+5454+We only share your data with the following third-party processors:
5555+5656+### Resend (Email Services)
5757+5858+- **Purpose:** Sending transactional emails (account verification,
5959+ notifications)
6060+- **Data Shared:** Email address and necessary message content
6161+6262+### Cloudflare (Image Caching)
6363+6464+- **Purpose:** Caching and optimizing image delivery
6565+- **Data Shared:** Public images and associated metadata for caching
6666+ purposes
6767+6868+### Posthog (Usage Metrics Tracking)
6969+7070+- **Purpose:** Tracking usage and platform metrics
7171+- **Data Shared:** Anonymous usage data, IP addresses, DIDs, and browser
7272+ information
7373+7474+## 4. How We Use Your Information
7575+7676+We use your information to:
7777+7878+- Provide and maintain the Service
7979+- Process your transactions and requests
8080+- Send you technical notices and support messages
8181+- Improve and develop new features
8282+- Ensure security and prevent fraud
8383+- Comply with legal obligations
8484+8585+## 5. Data Sharing and Disclosure
8686+8787+We do not sell, trade, or rent your personal information. We may share
8888+your information only in the following circumstances:
8989+9090+- With the third-party processors listed above
9191+- When required by law or legal process
9292+- To protect our rights, property, or safety, or that of our users
9393+- In connection with a merger, acquisition, or sale of assets (with
9494+ appropriate protections)
9595+9696+## 6. Data Security
9797+9898+We implement appropriate technical and organizational measures to
9999+protect your personal information against unauthorized access,
100100+alteration, disclosure, or destruction. However, no method of
101101+transmission over the Internet is 100% secure.
102102+103103+## 7. Data Retention
104104+105105+We retain your personal information for as long as necessary to provide
106106+the Service and fulfill the purposes outlined in this Privacy Policy,
107107+unless a longer retention period is required by law.
108108+109109+## 8. Your Rights
110110+111111+Under applicable data protection laws, you have the right to:
112112+113113+- Access your personal information
114114+- Correct inaccurate information
115115+- Request deletion of your information
116116+- Object to processing of your information
117117+- Data portability
118118+- Withdraw consent (where applicable)
119119+120120+## 9. Cookies and Tracking
121121+122122+We use cookies and similar technologies to:
123123+124124+- Maintain your login session
125125+- Remember your preferences
126126+- Analyze usage patterns to improve the Service
127127+128128+You can control cookie settings through your browser preferences.
129129+130130+## 10. Children's Privacy
131131+132132+The Service is not intended for children under 16 years of age. We do
133133+not knowingly collect personal information from children under 16. If
134134+we become aware that we have collected such information, we will take
135135+steps to delete it.
136136+137137+## 11. International Data Transfers
138138+139139+While all our primary data processing occurs within the EU, some of our
140140+third-party processors may process data outside the EU. When this
141141+occurs, we ensure appropriate safeguards are in place, such as Standard
142142+Contractual Clauses or adequacy decisions.
143143+144144+## 12. Changes to This Privacy Policy
145145+146146+We may update this Privacy Policy from time to time. We will notify you
147147+of any changes by posting the new Privacy Policy on this page and
148148+updating the "Last updated" date.
149149+150150+## 13. Contact Information
151151+152152+If you have any questions about this Privacy Policy or wish to exercise
153153+your rights, please contact us through our platform or via email.
154154+155155+---
156156+157157+This Privacy Policy complies with the EU General Data Protection
158158+Regulation (GDPR) and other applicable data protection laws.
+109
legal/terms.md
···11+# Terms of Service
22+33+**Last updated:** January 15, 2025
44+55+Welcome to Tangled. These Terms of Service ("Terms") govern your access
66+to and use of the Tangled platform and services (the "Service")
77+operated by us ("Tangled," "we," "us," or "our").
88+99+## 1. Acceptance of Terms
1010+1111+By accessing or using our Service, you agree to be bound by these Terms.
1212+If you disagree with any part of these terms, then you may not access
1313+the Service.
1414+1515+## 2. Account Registration
1616+1717+To use certain features of the Service, you must register for an
1818+account. You agree to provide accurate, current, and complete
1919+information during the registration process and to update such
2020+information to keep it accurate, current, and complete.
2121+2222+## 3. Account Termination
2323+2424+> **Important Notice**
2525+>
2626+> **We reserve the right to terminate, suspend, or restrict access to
2727+> your account at any time, for any reason, or for no reason at all, at
2828+> our sole discretion.** This includes, but is not limited to,
2929+> termination for violation of these Terms, inappropriate conduct, spam,
3030+> abuse, or any other behavior we deem harmful to the Service or other
3131+> users.
3232+>
3333+> Account termination may result in the loss of access to your
3434+> repositories, data, and other content associated with your account. We
3535+> are not obligated to provide advance notice of termination, though we
3636+> may do so in our discretion.
3737+3838+## 4. Acceptable Use
3939+4040+You agree not to use the Service to:
4141+4242+- Violate any applicable laws or regulations
4343+- Infringe upon the rights of others
4444+- Upload, store, or share content that is illegal, harmful, threatening,
4545+ abusive, harassing, defamatory, vulgar, obscene, or otherwise
4646+ objectionable
4747+- Engage in spam, phishing, or other deceptive practices
4848+- Attempt to gain unauthorized access to the Service or other users'
4949+ accounts
5050+- Interfere with or disrupt the Service or servers connected to the
5151+ Service
5252+5353+## 5. Content and Intellectual Property
5454+5555+You retain ownership of the content you upload to the Service. By
5656+uploading content, you grant us a non-exclusive, worldwide, royalty-free
5757+license to use, reproduce, modify, and distribute your content as
5858+necessary to provide the Service.
5959+6060+## 6. Privacy
6161+6262+Your privacy is important to us. Please review our [Privacy
6363+Policy](/privacy), which also governs your use of the Service.
6464+6565+## 7. Disclaimers
6666+6767+The Service is provided on an "AS IS" and "AS AVAILABLE" basis. We make
6868+no warranties, expressed or implied, and hereby disclaim and negate all
6969+other warranties including without limitation, implied warranties or
7070+conditions of merchantability, fitness for a particular purpose, or
7171+non-infringement of intellectual property or other violation of rights.
7272+7373+## 8. Limitation of Liability
7474+7575+In no event shall Tangled, nor its directors, employees, partners,
7676+agents, suppliers, or affiliates, be liable for any indirect,
7777+incidental, special, consequential, or punitive damages, including
7878+without limitation, loss of profits, data, use, goodwill, or other
7979+intangible losses, resulting from your use of the Service.
8080+8181+## 9. Indemnification
8282+8383+You agree to defend, indemnify, and hold harmless Tangled and its
8484+affiliates, officers, directors, employees, and agents from and against
8585+any and all claims, damages, obligations, losses, liabilities, costs,
8686+or debt, and expenses (including attorney's fees).
8787+8888+## 10. Governing Law
8989+9090+These Terms shall be interpreted and governed by the laws of Finland,
9191+without regard to its conflict of law provisions.
9292+9393+## 11. Changes to Terms
9494+9595+We reserve the right to modify or replace these Terms at any time. If a
9696+revision is material, we will try to provide at least 30 days notice
9797+prior to any new terms taking effect.
9898+9999+## 12. Contact Information
100100+101101+If you have any questions about these Terms of Service, please contact
102102+us through our platform or via email.
103103+104104+---
105105+106106+These terms are effective as of the last updated date shown above and
107107+will remain in effect except with respect to any changes in their
108108+provisions in the future, which will be in effect immediately after
109109+being posted on this page.
-52
lexicons/artifact.json
···11-{
22- "lexicon": 1,
33- "id": "sh.tangled.repo.artifact",
44- "needsCbor": true,
55- "needsType": true,
66- "defs": {
77- "main": {
88- "type": "record",
99- "key": "tid",
1010- "record": {
1111- "type": "object",
1212- "required": [
1313- "name",
1414- "repo",
1515- "tag",
1616- "createdAt",
1717- "artifact"
1818- ],
1919- "properties": {
2020- "name": {
2121- "type": "string",
2222- "description": "name of the artifact"
2323- },
2424- "repo": {
2525- "type": "string",
2626- "format": "at-uri",
2727- "description": "repo that this artifact is being uploaded to"
2828- },
2929- "tag": {
3030- "type": "bytes",
3131- "description": "hash of the tag object that this artifact is attached to (only annotated tags are supported)",
3232- "minLength": 20,
3333- "maxLength": 20
3434- },
3535- "createdAt": {
3636- "type": "string",
3737- "format": "datetime",
3838- "description": "time of creation of this artifact"
3939- },
4040- "artifact": {
4141- "type": "blob",
4242- "description": "the artifact",
4343- "accept": [
4444- "*/*"
4545- ],
4646- "maxSize": 52428800
4747- }
4848- }
4949- }
5050- }
5151- }
5252-}
···99// NewHandler sets up a new slog.Handler with the service name
1010// as an attribute
1111func NewHandler(name string) slog.Handler {
1212- handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{})
1212+ handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
1313+ Level: slog.LevelDebug,
1414+ })
13151416 var attrs []slog.Attr
1517 attrs = append(attrs, slog.Attr{Key: "service", Value: slog.StringValue(name)})
···11{
22 nixpkgs,
33+ system,
44+ hostSystem,
35 self,
44-}:
55-nixpkgs.lib.nixosSystem {
66- system = "x86_64-linux";
77- modules = [
88- self.nixosModules.knot
99- ({
1010- config,
1111- pkgs,
1212- ...
1313- }: {
1414- virtualisation.memorySize = 2048;
1515- virtualisation.diskSize = 10 * 1024;
1616- virtualisation.cores = 2;
1717- services.getty.autologinUser = "root";
1818- environment.systemPackages = with pkgs; [curl vim git];
1919- systemd.tmpfiles.rules = let
2020- u = config.services.tangled-knot.gitUser;
2121- g = config.services.tangled-knot.gitUser;
2222- in [
2323- "d /var/lib/knot 0770 ${u} ${g} - -" # Create the directory first
2424- "f+ /var/lib/knot/secret 0660 ${u} ${g} - KNOT_SERVER_SECRET=7387221d57e64499b179a9dff19c5f1abf436470e2976d3585badddad5282970"
2525- ];
2626- services.tangled-knot = {
2727- enable = true;
2828- server = {
2929- secretFile = "/var/lib/knot/secret";
3030- hostname = "localhost:6000";
3131- listenAddr = "0.0.0.0:6000";
66+}: let
77+ envVar = name: let
88+ var = builtins.getEnv name;
99+ in
1010+ if var == ""
1111+ then throw "\$${name} must be defined, see docs/hacking.md for more details"
1212+ else var;
1313+in
1414+ nixpkgs.lib.nixosSystem {
1515+ inherit system;
1616+ modules = [
1717+ self.nixosModules.knot
1818+ self.nixosModules.spindle
1919+ ({
2020+ lib,
2121+ config,
2222+ pkgs,
2323+ ...
2424+ }: {
2525+ virtualisation.vmVariant.virtualisation = {
2626+ host.pkgs = import nixpkgs {system = hostSystem;};
2727+2828+ graphics = false;
2929+ memorySize = 2048;
3030+ diskSize = 10 * 1024;
3131+ cores = 2;
3232+ forwardPorts = [
3333+ # ssh
3434+ {
3535+ from = "host";
3636+ host.port = 2222;
3737+ guest.port = 22;
3838+ }
3939+ # knot
4040+ {
4141+ from = "host";
4242+ host.port = 6000;
4343+ guest.port = 6000;
4444+ }
4545+ # spindle
4646+ {
4747+ from = "host";
4848+ host.port = 6555;
4949+ guest.port = 6555;
5050+ }
5151+ ];
5252+ sharedDirectories = {
5353+ # We can't use the 9p mounts directly for most of these
5454+ # as SQLite is incompatible with them. So instead we
5555+ # mount the shared directories to a different location
5656+ # and copy the contents around on service start/stop.
5757+ knotData = {
5858+ source = "$TANGLED_VM_DATA_DIR/knot";
5959+ target = "/mnt/knot-data";
6060+ };
6161+ spindleData = {
6262+ source = "$TANGLED_VM_DATA_DIR/spindle";
6363+ target = "/mnt/spindle-data";
6464+ };
6565+ spindleLogs = {
6666+ source = "$TANGLED_VM_DATA_DIR/spindle-logs";
6767+ target = "/var/log/spindle";
6868+ };
6969+ };
3270 };
3333- };
3434- })
3535- ];
3636-}
7171+ # This is fine because any and all ports that are forwarded to host are explicitly marked above, we don't need a separate guest firewall
7272+ networking.firewall.enable = false;
7373+ time.timeZone = "Europe/London";
7474+ services.getty.autologinUser = "root";
7575+ environment.systemPackages = with pkgs; [curl vim git sqlite litecli];
7676+ services.tangled-knot = {
7777+ enable = true;
7878+ motd = "Welcome to the development knot!\n";
7979+ server = {
8080+ owner = envVar "TANGLED_VM_KNOT_OWNER";
8181+ hostname = "localhost:6000";
8282+ listenAddr = "0.0.0.0:6000";
8383+ };
8484+ };
8585+ services.tangled-spindle = {
8686+ enable = true;
8787+ server = {
8888+ owner = envVar "TANGLED_VM_SPINDLE_OWNER";
8989+ hostname = "localhost:6555";
9090+ listenAddr = "0.0.0.0:6555";
9191+ dev = true;
9292+ queueSize = 100;
9393+ maxJobCount = 2;
9494+ secrets = {
9595+ provider = "sqlite";
9696+ };
9797+ };
9898+ };
9999+ users = {
100100+ # So we don't have to deal with permission clashing between
101101+ # blank disk VMs and existing state
102102+ users.${config.services.tangled-knot.gitUser}.uid = 666;
103103+ groups.${config.services.tangled-knot.gitUser}.gid = 666;
104104+105105+ # TODO: separate spindle user
106106+ };
107107+ systemd.services = let
108108+ mkDataSyncScripts = source: target: {
109109+ enableStrictShellChecks = true;
110110+111111+ preStart = lib.mkBefore ''
112112+ mkdir -p ${target}
113113+ ${lib.getExe pkgs.rsync} -a ${source}/ ${target}
114114+ '';
115115+116116+ postStop = lib.mkAfter ''
117117+ ${lib.getExe pkgs.rsync} -a ${target}/ ${source}
118118+ '';
119119+120120+ serviceConfig.PermissionsStartOnly = true;
121121+ };
122122+ in {
123123+ knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled-knot.stateDir;
124124+ spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled-spindle.server.dbPath);
125125+ };
126126+ })
127127+ ];
128128+ }
+1-1
patchutil/combinediff.go
···119119 // we have f1 and f2, combine them
120120 combined, err := combineFiles(f1, f2)
121121 if err != nil {
122122- fmt.Println(err)
122122+ // fmt.Println(err)
123123 }
124124125125 // combined can be nil commit 2 reverted all changes from commit 1
+25
patchutil/interdiff.go
···55 "strings"
6677 "github.com/bluekeyes/go-gitdiff/gitdiff"
88+ "tangled.sh/tangled.sh/core/types"
89)
9101011type InterdiffResult struct {
···3334 *gitdiff.File
3435 Name string
3536 Status InterdiffFileStatus
3737+}
3838+3939+func (s *InterdiffFile) Split() *types.SplitDiff {
4040+ fragments := make([]types.SplitFragment, len(s.TextFragments))
4141+4242+ for i, fragment := range s.TextFragments {
4343+ leftLines, rightLines := types.SeparateLines(fragment)
4444+4545+ fragments[i] = types.SplitFragment{
4646+ Header: fragment.Header(),
4747+ LeftLines: leftLines,
4848+ RightLines: rightLines,
4949+ }
5050+ }
5151+5252+ return &types.SplitDiff{
5353+ Name: s.Id(),
5454+ TextFragments: fragments,
5555+ }
5656+}
5757+5858+// used by html elements as a unique ID for hrefs
5959+func (s *InterdiffFile) Id() string {
6060+ return s.Name
3661}
37623863func (s *InterdiffFile) String() string {