···11-// Folder-specific settings
22-//
33-// For a full list of overridable settings, and general information on folder-specific settings,
44-// see the documentation: https://zed.dev/docs/configuring-zed#settings-files
55-{
66- "languages": {
77- "HTML": {
88- "prettier": {
99- "format_on_save": false,
1010- "allowed": true,
1111- "parser": "go-template",
1212- "plugins": ["prettier-plugin-go-template"]
1313- }
1414- }
1515- }
1616-}
+1001-1332
api/tangled/cbor_gen.go
···1202120212031203 return nil
12041204}
12051205-func (t *GitRefUpdate_Meta) MarshalCBOR(w io.Writer) error {
12061206- if t == nil {
12071207- _, err := w.Write(cbg.CborNull)
12081208- return err
12091209- }
12101210-12111211- cw := cbg.NewCborWriter(w)
12121212- fieldCount := 3
12131213-12141214- if t.LangBreakdown == nil {
12151215- fieldCount--
12161216- }
12171217-12181218- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
12191219- return err
12201220- }
12211221-12221222- // t.CommitCount (tangled.GitRefUpdate_Meta_CommitCount) (struct)
12231223- if len("commitCount") > 1000000 {
12241224- return xerrors.Errorf("Value in field \"commitCount\" was too long")
12251225- }
12261226-12271227- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commitCount"))); err != nil {
12281228- return err
12291229- }
12301230- if _, err := cw.WriteString(string("commitCount")); err != nil {
12311231- return err
12321232- }
12331233-12341234- if err := t.CommitCount.MarshalCBOR(cw); err != nil {
12351235- return err
12361236- }
12371237-12381238- // t.IsDefaultRef (bool) (bool)
12391239- if len("isDefaultRef") > 1000000 {
12401240- return xerrors.Errorf("Value in field \"isDefaultRef\" was too long")
12411241- }
12421242-12431243- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("isDefaultRef"))); err != nil {
12441244- return err
12451245- }
12461246- if _, err := cw.WriteString(string("isDefaultRef")); err != nil {
12471247- return err
12481248- }
12491249-12501250- if err := cbg.WriteBool(w, t.IsDefaultRef); err != nil {
12511251- return err
12521252- }
12531253-12541254- // t.LangBreakdown (tangled.GitRefUpdate_Meta_LangBreakdown) (struct)
12551255- if t.LangBreakdown != nil {
12561256-12571257- if len("langBreakdown") > 1000000 {
12581258- return xerrors.Errorf("Value in field \"langBreakdown\" was too long")
12591259- }
12601260-12611261- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("langBreakdown"))); err != nil {
12621262- return err
12631263- }
12641264- if _, err := cw.WriteString(string("langBreakdown")); err != nil {
12651265- return err
12661266- }
12671267-12681268- if err := t.LangBreakdown.MarshalCBOR(cw); err != nil {
12691269- return err
12701270- }
12711271- }
12721272- return nil
12731273-}
12741274-12751275-func (t *GitRefUpdate_Meta) UnmarshalCBOR(r io.Reader) (err error) {
12761276- *t = GitRefUpdate_Meta{}
12771277-12781278- cr := cbg.NewCborReader(r)
12791279-12801280- maj, extra, err := cr.ReadHeader()
12811281- if err != nil {
12821282- return err
12831283- }
12841284- defer func() {
12851285- if err == io.EOF {
12861286- err = io.ErrUnexpectedEOF
12871287- }
12881288- }()
12891289-12901290- if maj != cbg.MajMap {
12911291- return fmt.Errorf("cbor input should be of type map")
12921292- }
12931293-12941294- if extra > cbg.MaxLength {
12951295- return fmt.Errorf("GitRefUpdate_Meta: map struct too large (%d)", extra)
12961296- }
12971297-12981298- n := extra
12991299-13001300- nameBuf := make([]byte, 13)
13011301- for i := uint64(0); i < n; i++ {
13021302- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
13031303- if err != nil {
13041304- return err
13051305- }
13061306-13071307- if !ok {
13081308- // Field doesn't exist on this type, so ignore it
13091309- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
13101310- return err
13111311- }
13121312- continue
13131313- }
13141314-13151315- switch string(nameBuf[:nameLen]) {
13161316- // t.CommitCount (tangled.GitRefUpdate_Meta_CommitCount) (struct)
13171317- case "commitCount":
13181318-13191319- {
13201320-13211321- b, err := cr.ReadByte()
13221322- if err != nil {
13231323- return err
13241324- }
13251325- if b != cbg.CborNull[0] {
13261326- if err := cr.UnreadByte(); err != nil {
13271327- return err
13281328- }
13291329- t.CommitCount = new(GitRefUpdate_Meta_CommitCount)
13301330- if err := t.CommitCount.UnmarshalCBOR(cr); err != nil {
13311331- return xerrors.Errorf("unmarshaling t.CommitCount pointer: %w", err)
13321332- }
13331333- }
13341334-13351335- }
13361336- // t.IsDefaultRef (bool) (bool)
13371337- case "isDefaultRef":
13381338-13391339- maj, extra, err = cr.ReadHeader()
13401340- if err != nil {
13411341- return err
13421342- }
13431343- if maj != cbg.MajOther {
13441344- return fmt.Errorf("booleans must be major type 7")
13451345- }
13461346- switch extra {
13471347- case 20:
13481348- t.IsDefaultRef = false
13491349- case 21:
13501350- t.IsDefaultRef = true
13511351- default:
13521352- return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
13531353- }
13541354- // t.LangBreakdown (tangled.GitRefUpdate_Meta_LangBreakdown) (struct)
13551355- case "langBreakdown":
13561356-13571357- {
13581358-13591359- b, err := cr.ReadByte()
13601360- if err != nil {
13611361- return err
13621362- }
13631363- if b != cbg.CborNull[0] {
13641364- if err := cr.UnreadByte(); err != nil {
13651365- return err
13661366- }
13671367- t.LangBreakdown = new(GitRefUpdate_Meta_LangBreakdown)
13681368- if err := t.LangBreakdown.UnmarshalCBOR(cr); err != nil {
13691369- return xerrors.Errorf("unmarshaling t.LangBreakdown pointer: %w", err)
13701370- }
13711371- }
13721372-13731373- }
13741374-13751375- default:
13761376- // Field doesn't exist on this type, so ignore it
13771377- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
13781378- return err
13791379- }
13801380- }
13811381- }
13821382-13831383- return nil
13841384-}
13851385-func (t *GitRefUpdate_Meta_CommitCount) MarshalCBOR(w io.Writer) error {
12051205+func (t *GitRefUpdate_CommitCountBreakdown) MarshalCBOR(w io.Writer) error {
13861206 if t == nil {
13871207 _, err := w.Write(cbg.CborNull)
13881208 return err
···13991219 return err
14001220 }
1401122114021402- // t.ByEmail ([]*tangled.GitRefUpdate_Meta_CommitCount_ByEmail_Elem) (slice)
12221222+ // t.ByEmail ([]*tangled.GitRefUpdate_IndividualEmailCommitCount) (slice)
14031223 if t.ByEmail != nil {
1404122414051225 if len("byEmail") > 1000000 {
···14301250 return nil
14311251}
1432125214331433-func (t *GitRefUpdate_Meta_CommitCount) UnmarshalCBOR(r io.Reader) (err error) {
14341434- *t = GitRefUpdate_Meta_CommitCount{}
12531253+func (t *GitRefUpdate_CommitCountBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
12541254+ *t = GitRefUpdate_CommitCountBreakdown{}
1435125514361256 cr := cbg.NewCborReader(r)
14371257···14501270 }
1451127114521272 if extra > cbg.MaxLength {
14531453- return fmt.Errorf("GitRefUpdate_Meta_CommitCount: map struct too large (%d)", extra)
12731273+ return fmt.Errorf("GitRefUpdate_CommitCountBreakdown: map struct too large (%d)", extra)
14541274 }
1455127514561276 n := extra
···14711291 }
1472129214731293 switch string(nameBuf[:nameLen]) {
14741474- // t.ByEmail ([]*tangled.GitRefUpdate_Meta_CommitCount_ByEmail_Elem) (slice)
12941294+ // t.ByEmail ([]*tangled.GitRefUpdate_IndividualEmailCommitCount) (slice)
14751295 case "byEmail":
1476129614771297 maj, extra, err = cr.ReadHeader()
···14881308 }
1489130914901310 if extra > 0 {
14911491- t.ByEmail = make([]*GitRefUpdate_Meta_CommitCount_ByEmail_Elem, extra)
13111311+ t.ByEmail = make([]*GitRefUpdate_IndividualEmailCommitCount, extra)
14921312 }
1493131314941314 for i := 0; i < int(extra); i++ {
···15101330 if err := cr.UnreadByte(); err != nil {
15111331 return err
15121332 }
15131513- t.ByEmail[i] = new(GitRefUpdate_Meta_CommitCount_ByEmail_Elem)
13331333+ t.ByEmail[i] = new(GitRefUpdate_IndividualEmailCommitCount)
15141334 if err := t.ByEmail[i].UnmarshalCBOR(cr); err != nil {
15151335 return xerrors.Errorf("unmarshaling t.ByEmail[i] pointer: %w", err)
15161336 }
···1531135115321352 return nil
15331353}
15341534-func (t *GitRefUpdate_Meta_CommitCount_ByEmail_Elem) MarshalCBOR(w io.Writer) error {
13541354+func (t *GitRefUpdate_IndividualEmailCommitCount) MarshalCBOR(w io.Writer) error {
15351355 if t == nil {
15361356 _, err := w.Write(cbg.CborNull)
15371357 return err
···15901410 return nil
15911411}
1592141215931593-func (t *GitRefUpdate_Meta_CommitCount_ByEmail_Elem) UnmarshalCBOR(r io.Reader) (err error) {
15941594- *t = GitRefUpdate_Meta_CommitCount_ByEmail_Elem{}
14131413+func (t *GitRefUpdate_IndividualEmailCommitCount) UnmarshalCBOR(r io.Reader) (err error) {
14141414+ *t = GitRefUpdate_IndividualEmailCommitCount{}
1595141515961416 cr := cbg.NewCborReader(r)
15971417···16101430 }
1611143116121432 if extra > cbg.MaxLength {
16131613- return fmt.Errorf("GitRefUpdate_Meta_CommitCount_ByEmail_Elem: map struct too large (%d)", extra)
14331433+ return fmt.Errorf("GitRefUpdate_IndividualEmailCommitCount: map struct too large (%d)", extra)
16141434 }
1615143516161436 n := extra
···1679149916801500 return nil
16811501}
16821682-func (t *GitRefUpdate_Meta_LangBreakdown) MarshalCBOR(w io.Writer) error {
15021502+func (t *GitRefUpdate_LangBreakdown) MarshalCBOR(w io.Writer) error {
16831503 if t == nil {
16841504 _, err := w.Write(cbg.CborNull)
16851505 return err
···16961516 return err
16971517 }
1698151816991699- // t.Inputs ([]*tangled.GitRefUpdate_Pair) (slice)
15191519+ // t.Inputs ([]*tangled.GitRefUpdate_IndividualLanguageSize) (slice)
17001520 if t.Inputs != nil {
1701152117021522 if len("inputs") > 1000000 {
···17271547 return nil
17281548}
1729154917301730-func (t *GitRefUpdate_Meta_LangBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
17311731- *t = GitRefUpdate_Meta_LangBreakdown{}
15501550+func (t *GitRefUpdate_LangBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
15511551+ *t = GitRefUpdate_LangBreakdown{}
1732155217331553 cr := cbg.NewCborReader(r)
17341554···17471567 }
1748156817491569 if extra > cbg.MaxLength {
17501750- return fmt.Errorf("GitRefUpdate_Meta_LangBreakdown: map struct too large (%d)", extra)
15701570+ return fmt.Errorf("GitRefUpdate_LangBreakdown: map struct too large (%d)", extra)
17511571 }
1752157217531573 n := extra
···17681588 }
1769158917701590 switch string(nameBuf[:nameLen]) {
17711771- // t.Inputs ([]*tangled.GitRefUpdate_Pair) (slice)
15911591+ // t.Inputs ([]*tangled.GitRefUpdate_IndividualLanguageSize) (slice)
17721592 case "inputs":
1773159317741594 maj, extra, err = cr.ReadHeader()
···17851605 }
1786160617871607 if extra > 0 {
17881788- t.Inputs = make([]*GitRefUpdate_Pair, extra)
16081608+ t.Inputs = make([]*GitRefUpdate_IndividualLanguageSize, extra)
17891609 }
1790161017911611 for i := 0; i < int(extra); i++ {
···18071627 if err := cr.UnreadByte(); err != nil {
18081628 return err
18091629 }
18101810- t.Inputs[i] = new(GitRefUpdate_Pair)
16301630+ t.Inputs[i] = new(GitRefUpdate_IndividualLanguageSize)
18111631 if err := t.Inputs[i].UnmarshalCBOR(cr); err != nil {
18121632 return xerrors.Errorf("unmarshaling t.Inputs[i] pointer: %w", err)
18131633 }
···1828164818291649 return nil
18301650}
18311831-func (t *GitRefUpdate_Pair) MarshalCBOR(w io.Writer) error {
16511651+func (t *GitRefUpdate_IndividualLanguageSize) MarshalCBOR(w io.Writer) error {
18321652 if t == nil {
18331653 _, err := w.Write(cbg.CborNull)
18341654 return err
···18881708 return nil
18891709}
1890171018911891-func (t *GitRefUpdate_Pair) UnmarshalCBOR(r io.Reader) (err error) {
18921892- *t = GitRefUpdate_Pair{}
17111711+func (t *GitRefUpdate_IndividualLanguageSize) UnmarshalCBOR(r io.Reader) (err error) {
17121712+ *t = GitRefUpdate_IndividualLanguageSize{}
1893171318941714 cr := cbg.NewCborReader(r)
18951715···19081728 }
1909172919101730 if extra > cbg.MaxLength {
19111911- return fmt.Errorf("GitRefUpdate_Pair: map struct too large (%d)", extra)
17311731+ return fmt.Errorf("GitRefUpdate_IndividualLanguageSize: map struct too large (%d)", extra)
19121732 }
1913173319141734 n := extra
···1977179719781798 return nil
19791799}
18001800+func (t *GitRefUpdate_Meta) MarshalCBOR(w io.Writer) error {
18011801+ if t == nil {
18021802+ _, err := w.Write(cbg.CborNull)
18031803+ return err
18041804+ }
18051805+18061806+ cw := cbg.NewCborWriter(w)
18071807+ fieldCount := 3
18081808+18091809+ if t.LangBreakdown == nil {
18101810+ fieldCount--
18111811+ }
18121812+18131813+ if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
18141814+ return err
18151815+ }
18161816+18171817+ // t.CommitCount (tangled.GitRefUpdate_CommitCountBreakdown) (struct)
18181818+ if len("commitCount") > 1000000 {
18191819+ return xerrors.Errorf("Value in field \"commitCount\" was too long")
18201820+ }
18211821+18221822+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commitCount"))); err != nil {
18231823+ return err
18241824+ }
18251825+ if _, err := cw.WriteString(string("commitCount")); err != nil {
18261826+ return err
18271827+ }
18281828+18291829+ if err := t.CommitCount.MarshalCBOR(cw); err != nil {
18301830+ return err
18311831+ }
18321832+18331833+ // t.IsDefaultRef (bool) (bool)
18341834+ if len("isDefaultRef") > 1000000 {
18351835+ return xerrors.Errorf("Value in field \"isDefaultRef\" was too long")
18361836+ }
18371837+18381838+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("isDefaultRef"))); err != nil {
18391839+ return err
18401840+ }
18411841+ if _, err := cw.WriteString(string("isDefaultRef")); err != nil {
18421842+ return err
18431843+ }
18441844+18451845+ if err := cbg.WriteBool(w, t.IsDefaultRef); err != nil {
18461846+ return err
18471847+ }
18481848+18491849+ // t.LangBreakdown (tangled.GitRefUpdate_LangBreakdown) (struct)
18501850+ if t.LangBreakdown != nil {
18511851+18521852+ if len("langBreakdown") > 1000000 {
18531853+ return xerrors.Errorf("Value in field \"langBreakdown\" was too long")
18541854+ }
18551855+18561856+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("langBreakdown"))); err != nil {
18571857+ return err
18581858+ }
18591859+ if _, err := cw.WriteString(string("langBreakdown")); err != nil {
18601860+ return err
18611861+ }
18621862+18631863+ if err := t.LangBreakdown.MarshalCBOR(cw); err != nil {
18641864+ return err
18651865+ }
18661866+ }
18671867+ return nil
18681868+}
18691869+18701870+func (t *GitRefUpdate_Meta) UnmarshalCBOR(r io.Reader) (err error) {
18711871+ *t = GitRefUpdate_Meta{}
18721872+18731873+ cr := cbg.NewCborReader(r)
18741874+18751875+ maj, extra, err := cr.ReadHeader()
18761876+ if err != nil {
18771877+ return err
18781878+ }
18791879+ defer func() {
18801880+ if err == io.EOF {
18811881+ err = io.ErrUnexpectedEOF
18821882+ }
18831883+ }()
18841884+18851885+ if maj != cbg.MajMap {
18861886+ return fmt.Errorf("cbor input should be of type map")
18871887+ }
18881888+18891889+ if extra > cbg.MaxLength {
18901890+ return fmt.Errorf("GitRefUpdate_Meta: map struct too large (%d)", extra)
18911891+ }
18921892+18931893+ n := extra
18941894+18951895+ nameBuf := make([]byte, 13)
18961896+ for i := uint64(0); i < n; i++ {
18971897+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
18981898+ if err != nil {
18991899+ return err
19001900+ }
19011901+19021902+ if !ok {
19031903+ // Field doesn't exist on this type, so ignore it
19041904+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
19051905+ return err
19061906+ }
19071907+ continue
19081908+ }
19091909+19101910+ switch string(nameBuf[:nameLen]) {
19111911+ // t.CommitCount (tangled.GitRefUpdate_CommitCountBreakdown) (struct)
19121912+ case "commitCount":
19131913+19141914+ {
19151915+19161916+ b, err := cr.ReadByte()
19171917+ if err != nil {
19181918+ return err
19191919+ }
19201920+ if b != cbg.CborNull[0] {
19211921+ if err := cr.UnreadByte(); err != nil {
19221922+ return err
19231923+ }
19241924+ t.CommitCount = new(GitRefUpdate_CommitCountBreakdown)
19251925+ if err := t.CommitCount.UnmarshalCBOR(cr); err != nil {
19261926+ return xerrors.Errorf("unmarshaling t.CommitCount pointer: %w", err)
19271927+ }
19281928+ }
19291929+19301930+ }
19311931+ // t.IsDefaultRef (bool) (bool)
19321932+ case "isDefaultRef":
19331933+19341934+ maj, extra, err = cr.ReadHeader()
19351935+ if err != nil {
19361936+ return err
19371937+ }
19381938+ if maj != cbg.MajOther {
19391939+ return fmt.Errorf("booleans must be major type 7")
19401940+ }
19411941+ switch extra {
19421942+ case 20:
19431943+ t.IsDefaultRef = false
19441944+ case 21:
19451945+ t.IsDefaultRef = true
19461946+ default:
19471947+ return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
19481948+ }
19491949+ // t.LangBreakdown (tangled.GitRefUpdate_LangBreakdown) (struct)
19501950+ case "langBreakdown":
19511951+19521952+ {
19531953+19541954+ b, err := cr.ReadByte()
19551955+ if err != nil {
19561956+ return err
19571957+ }
19581958+ if b != cbg.CborNull[0] {
19591959+ if err := cr.UnreadByte(); err != nil {
19601960+ return err
19611961+ }
19621962+ t.LangBreakdown = new(GitRefUpdate_LangBreakdown)
19631963+ if err := t.LangBreakdown.UnmarshalCBOR(cr); err != nil {
19641964+ return xerrors.Errorf("unmarshaling t.LangBreakdown pointer: %w", err)
19651965+ }
19661966+ }
19671967+19681968+ }
19691969+19701970+ default:
19711971+ // Field doesn't exist on this type, so ignore it
19721972+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
19731973+ return err
19741974+ }
19751975+ }
19761976+ }
19771977+19781978+ return nil
19791979+}
19801980func (t *GraphFollow) MarshalCBOR(w io.Writer) error {
19811981 if t == nil {
19821982 _, err := w.Write(cbg.CborNull)
···21182118 }
2119211921202120 t.Subject = string(sval)
21212121+ }
21222122+ // t.CreatedAt (string) (string)
21232123+ case "createdAt":
21242124+21252125+ {
21262126+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
21272127+ if err != nil {
21282128+ return err
21292129+ }
21302130+21312131+ t.CreatedAt = string(sval)
21322132+ }
21332133+21342134+ default:
21352135+ // Field doesn't exist on this type, so ignore it
21362136+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
21372137+ return err
21382138+ }
21392139+ }
21402140+ }
21412141+21422142+ return nil
21432143+}
21442144+func (t *Knot) MarshalCBOR(w io.Writer) error {
21452145+ if t == nil {
21462146+ _, err := w.Write(cbg.CborNull)
21472147+ return err
21482148+ }
21492149+21502150+ cw := cbg.NewCborWriter(w)
21512151+21522152+ if _, err := cw.Write([]byte{162}); err != nil {
21532153+ return err
21542154+ }
21552155+21562156+ // t.LexiconTypeID (string) (string)
21572157+ if len("$type") > 1000000 {
21582158+ return xerrors.Errorf("Value in field \"$type\" was too long")
21592159+ }
21602160+21612161+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
21622162+ return err
21632163+ }
21642164+ if _, err := cw.WriteString(string("$type")); err != nil {
21652165+ return err
21662166+ }
21672167+21682168+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.knot"))); err != nil {
21692169+ return err
21702170+ }
21712171+ if _, err := cw.WriteString(string("sh.tangled.knot")); err != nil {
21722172+ return err
21732173+ }
21742174+21752175+ // t.CreatedAt (string) (string)
21762176+ if len("createdAt") > 1000000 {
21772177+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
21782178+ }
21792179+21802180+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
21812181+ return err
21822182+ }
21832183+ if _, err := cw.WriteString(string("createdAt")); err != nil {
21842184+ return err
21852185+ }
21862186+21872187+ if len(t.CreatedAt) > 1000000 {
21882188+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
21892189+ }
21902190+21912191+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
21922192+ return err
21932193+ }
21942194+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
21952195+ return err
21962196+ }
21972197+ return nil
21982198+}
21992199+22002200+func (t *Knot) UnmarshalCBOR(r io.Reader) (err error) {
22012201+ *t = Knot{}
22022202+22032203+ cr := cbg.NewCborReader(r)
22042204+22052205+ maj, extra, err := cr.ReadHeader()
22062206+ if err != nil {
22072207+ return err
22082208+ }
22092209+ defer func() {
22102210+ if err == io.EOF {
22112211+ err = io.ErrUnexpectedEOF
22122212+ }
22132213+ }()
22142214+22152215+ if maj != cbg.MajMap {
22162216+ return fmt.Errorf("cbor input should be of type map")
22172217+ }
22182218+22192219+ if extra > cbg.MaxLength {
22202220+ return fmt.Errorf("Knot: map struct too large (%d)", extra)
22212221+ }
22222222+22232223+ n := extra
22242224+22252225+ nameBuf := make([]byte, 9)
22262226+ for i := uint64(0); i < n; i++ {
22272227+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
22282228+ if err != nil {
22292229+ return err
22302230+ }
22312231+22322232+ if !ok {
22332233+ // Field doesn't exist on this type, so ignore it
22342234+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
22352235+ return err
22362236+ }
22372237+ continue
22382238+ }
22392239+22402240+ switch string(nameBuf[:nameLen]) {
22412241+ // t.LexiconTypeID (string) (string)
22422242+ case "$type":
22432243+22442244+ {
22452245+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
22462246+ if err != nil {
22472247+ return err
22482248+ }
22492249+22502250+ t.LexiconTypeID = string(sval)
21212251 }
21222252 // t.CreatedAt (string) (string)
21232253 case "createdAt":
···2728285827292859 return nil
27302860}
27312731-func (t *Pipeline_Dependency) MarshalCBOR(w io.Writer) error {
27322732- if t == nil {
27332733- _, err := w.Write(cbg.CborNull)
27342734- return err
27352735- }
27362736-27372737- cw := cbg.NewCborWriter(w)
27382738-27392739- if _, err := cw.Write([]byte{162}); err != nil {
27402740- return err
27412741- }
27422742-27432743- // t.Packages ([]string) (slice)
27442744- if len("packages") > 1000000 {
27452745- return xerrors.Errorf("Value in field \"packages\" was too long")
27462746- }
27472747-27482748- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("packages"))); err != nil {
27492749- return err
27502750- }
27512751- if _, err := cw.WriteString(string("packages")); err != nil {
27522752- return err
27532753- }
27542754-27552755- if len(t.Packages) > 8192 {
27562756- return xerrors.Errorf("Slice value in field t.Packages was too long")
27572757- }
27582758-27592759- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Packages))); err != nil {
27602760- return err
27612761- }
27622762- for _, v := range t.Packages {
27632763- if len(v) > 1000000 {
27642764- return xerrors.Errorf("Value in field v was too long")
27652765- }
27662766-27672767- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
27682768- return err
27692769- }
27702770- if _, err := cw.WriteString(string(v)); err != nil {
27712771- return err
27722772- }
27732773-27742774- }
27752775-27762776- // t.Registry (string) (string)
27772777- if len("registry") > 1000000 {
27782778- return xerrors.Errorf("Value in field \"registry\" was too long")
27792779- }
27802780-27812781- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("registry"))); err != nil {
27822782- return err
27832783- }
27842784- if _, err := cw.WriteString(string("registry")); err != nil {
27852785- return err
27862786- }
27872787-27882788- if len(t.Registry) > 1000000 {
27892789- return xerrors.Errorf("Value in field t.Registry was too long")
27902790- }
27912791-27922792- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Registry))); err != nil {
27932793- return err
27942794- }
27952795- if _, err := cw.WriteString(string(t.Registry)); err != nil {
27962796- return err
27972797- }
27982798- return nil
27992799-}
28002800-28012801-func (t *Pipeline_Dependency) UnmarshalCBOR(r io.Reader) (err error) {
28022802- *t = Pipeline_Dependency{}
28032803-28042804- cr := cbg.NewCborReader(r)
28052805-28062806- maj, extra, err := cr.ReadHeader()
28072807- if err != nil {
28082808- return err
28092809- }
28102810- defer func() {
28112811- if err == io.EOF {
28122812- err = io.ErrUnexpectedEOF
28132813- }
28142814- }()
28152815-28162816- if maj != cbg.MajMap {
28172817- return fmt.Errorf("cbor input should be of type map")
28182818- }
28192819-28202820- if extra > cbg.MaxLength {
28212821- return fmt.Errorf("Pipeline_Dependency: map struct too large (%d)", extra)
28222822- }
28232823-28242824- n := extra
28252825-28262826- nameBuf := make([]byte, 8)
28272827- for i := uint64(0); i < n; i++ {
28282828- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
28292829- if err != nil {
28302830- return err
28312831- }
28322832-28332833- if !ok {
28342834- // Field doesn't exist on this type, so ignore it
28352835- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
28362836- return err
28372837- }
28382838- continue
28392839- }
28402840-28412841- switch string(nameBuf[:nameLen]) {
28422842- // t.Packages ([]string) (slice)
28432843- case "packages":
28442844-28452845- maj, extra, err = cr.ReadHeader()
28462846- if err != nil {
28472847- return err
28482848- }
28492849-28502850- if extra > 8192 {
28512851- return fmt.Errorf("t.Packages: array too large (%d)", extra)
28522852- }
28532853-28542854- if maj != cbg.MajArray {
28552855- return fmt.Errorf("expected cbor array")
28562856- }
28572857-28582858- if extra > 0 {
28592859- t.Packages = make([]string, extra)
28602860- }
28612861-28622862- for i := 0; i < int(extra); i++ {
28632863- {
28642864- var maj byte
28652865- var extra uint64
28662866- var err error
28672867- _ = maj
28682868- _ = extra
28692869- _ = err
28702870-28712871- {
28722872- sval, err := cbg.ReadStringWithMax(cr, 1000000)
28732873- if err != nil {
28742874- return err
28752875- }
28762876-28772877- t.Packages[i] = string(sval)
28782878- }
28792879-28802880- }
28812881- }
28822882- // t.Registry (string) (string)
28832883- case "registry":
28842884-28852885- {
28862886- sval, err := cbg.ReadStringWithMax(cr, 1000000)
28872887- if err != nil {
28882888- return err
28892889- }
28902890-28912891- t.Registry = string(sval)
28922892- }
28932893-28942894- default:
28952895- // Field doesn't exist on this type, so ignore it
28962896- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
28972897- return err
28982898- }
28992899- }
29002900- }
29012901-29022902- return nil
29032903-}
29042861func (t *Pipeline_ManualTriggerData) MarshalCBOR(w io.Writer) error {
29052862 if t == nil {
29062863 _, err := w.Write(cbg.CborNull)
···3916387339173874 return nil
39183875}
39193919-func (t *Pipeline_Step) MarshalCBOR(w io.Writer) error {
39203920- if t == nil {
39213921- _, err := w.Write(cbg.CborNull)
39223922- return err
39233923- }
39243924-39253925- cw := cbg.NewCborWriter(w)
39263926- fieldCount := 3
39273927-39283928- if t.Environment == nil {
39293929- fieldCount--
39303930- }
39313931-39323932- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
39333933- return err
39343934- }
39353935-39363936- // t.Name (string) (string)
39373937- if len("name") > 1000000 {
39383938- return xerrors.Errorf("Value in field \"name\" was too long")
39393939- }
39403940-39413941- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("name"))); err != nil {
39423942- return err
39433943- }
39443944- if _, err := cw.WriteString(string("name")); err != nil {
39453945- return err
39463946- }
39473947-39483948- if len(t.Name) > 1000000 {
39493949- return xerrors.Errorf("Value in field t.Name was too long")
39503950- }
39513951-39523952- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil {
39533953- return err
39543954- }
39553955- if _, err := cw.WriteString(string(t.Name)); err != nil {
39563956- return err
39573957- }
39583958-39593959- // t.Command (string) (string)
39603960- if len("command") > 1000000 {
39613961- return xerrors.Errorf("Value in field \"command\" was too long")
39623962- }
39633963-39643964- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("command"))); err != nil {
39653965- return err
39663966- }
39673967- if _, err := cw.WriteString(string("command")); err != nil {
39683968- return err
39693969- }
39703970-39713971- if len(t.Command) > 1000000 {
39723972- return xerrors.Errorf("Value in field t.Command was too long")
39733973- }
39743974-39753975- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Command))); err != nil {
39763976- return err
39773977- }
39783978- if _, err := cw.WriteString(string(t.Command)); err != nil {
39793979- return err
39803980- }
39813981-39823982- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
39833983- if t.Environment != nil {
39843984-39853985- if len("environment") > 1000000 {
39863986- return xerrors.Errorf("Value in field \"environment\" was too long")
39873987- }
39883988-39893989- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
39903990- return err
39913991- }
39923992- if _, err := cw.WriteString(string("environment")); err != nil {
39933993- return err
39943994- }
39953995-39963996- if len(t.Environment) > 8192 {
39973997- return xerrors.Errorf("Slice value in field t.Environment was too long")
39983998- }
39993999-40004000- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
40014001- return err
40024002- }
40034003- for _, v := range t.Environment {
40044004- if err := v.MarshalCBOR(cw); err != nil {
40054005- return err
40064006- }
40074007-40084008- }
40094009- }
40104010- return nil
40114011-}
40124012-40134013-func (t *Pipeline_Step) UnmarshalCBOR(r io.Reader) (err error) {
40144014- *t = Pipeline_Step{}
40154015-40164016- cr := cbg.NewCborReader(r)
40174017-40184018- maj, extra, err := cr.ReadHeader()
40194019- if err != nil {
40204020- return err
40214021- }
40224022- defer func() {
40234023- if err == io.EOF {
40244024- err = io.ErrUnexpectedEOF
40254025- }
40264026- }()
40274027-40284028- if maj != cbg.MajMap {
40294029- return fmt.Errorf("cbor input should be of type map")
40304030- }
40314031-40324032- if extra > cbg.MaxLength {
40334033- return fmt.Errorf("Pipeline_Step: map struct too large (%d)", extra)
40344034- }
40354035-40364036- n := extra
40374037-40384038- nameBuf := make([]byte, 11)
40394039- for i := uint64(0); i < n; i++ {
40404040- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
40414041- if err != nil {
40424042- return err
40434043- }
40444044-40454045- if !ok {
40464046- // Field doesn't exist on this type, so ignore it
40474047- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
40484048- return err
40494049- }
40504050- continue
40514051- }
40524052-40534053- switch string(nameBuf[:nameLen]) {
40544054- // t.Name (string) (string)
40554055- case "name":
40564056-40574057- {
40584058- sval, err := cbg.ReadStringWithMax(cr, 1000000)
40594059- if err != nil {
40604060- return err
40614061- }
40624062-40634063- t.Name = string(sval)
40644064- }
40654065- // t.Command (string) (string)
40664066- case "command":
40674067-40684068- {
40694069- sval, err := cbg.ReadStringWithMax(cr, 1000000)
40704070- if err != nil {
40714071- return err
40724072- }
40734073-40744074- t.Command = string(sval)
40754075- }
40764076- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
40774077- case "environment":
40784078-40794079- maj, extra, err = cr.ReadHeader()
40804080- if err != nil {
40814081- return err
40824082- }
40834083-40844084- if extra > 8192 {
40854085- return fmt.Errorf("t.Environment: array too large (%d)", extra)
40864086- }
40874087-40884088- if maj != cbg.MajArray {
40894089- return fmt.Errorf("expected cbor array")
40904090- }
40914091-40924092- if extra > 0 {
40934093- t.Environment = make([]*Pipeline_Pair, extra)
40944094- }
40954095-40964096- for i := 0; i < int(extra); i++ {
40974097- {
40984098- var maj byte
40994099- var extra uint64
41004100- var err error
41014101- _ = maj
41024102- _ = extra
41034103- _ = err
41044104-41054105- {
41064106-41074107- b, err := cr.ReadByte()
41084108- if err != nil {
41094109- return err
41104110- }
41114111- if b != cbg.CborNull[0] {
41124112- if err := cr.UnreadByte(); err != nil {
41134113- return err
41144114- }
41154115- t.Environment[i] = new(Pipeline_Pair)
41164116- if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
41174117- return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
41184118- }
41194119- }
41204120-41214121- }
41224122-41234123- }
41244124- }
41254125-41264126- default:
41274127- // Field doesn't exist on this type, so ignore it
41284128- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
41294129- return err
41304130- }
41314131- }
41324132- }
41334133-41344134- return nil
41354135-}
41363876func (t *Pipeline_TriggerMetadata) MarshalCBOR(w io.Writer) error {
41373877 if t == nil {
41383878 _, err := w.Write(cbg.CborNull)
···4609434946104350 cw := cbg.NewCborWriter(w)
4611435146124612- if _, err := cw.Write([]byte{165}); err != nil {
43524352+ if _, err := cw.Write([]byte{164}); err != nil {
43534353+ return err
43544354+ }
43554355+43564356+ // t.Raw (string) (string)
43574357+ if len("raw") > 1000000 {
43584358+ return xerrors.Errorf("Value in field \"raw\" was too long")
43594359+ }
43604360+43614361+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("raw"))); err != nil {
43624362+ return err
43634363+ }
43644364+ if _, err := cw.WriteString(string("raw")); err != nil {
43654365+ return err
43664366+ }
43674367+43684368+ if len(t.Raw) > 1000000 {
43694369+ return xerrors.Errorf("Value in field t.Raw was too long")
43704370+ }
43714371+43724372+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Raw))); err != nil {
43734373+ return err
43744374+ }
43754375+ if _, err := cw.WriteString(string(t.Raw)); err != nil {
46134376 return err
46144377 }
46154378···46524415 return err
46534416 }
4654441746554655- // t.Steps ([]*tangled.Pipeline_Step) (slice)
46564656- if len("steps") > 1000000 {
46574657- return xerrors.Errorf("Value in field \"steps\" was too long")
46584658- }
46594659-46604660- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("steps"))); err != nil {
46614661- return err
46624662- }
46634663- if _, err := cw.WriteString(string("steps")); err != nil {
46644664- return err
44184418+ // t.Engine (string) (string)
44194419+ if len("engine") > 1000000 {
44204420+ return xerrors.Errorf("Value in field \"engine\" was too long")
46654421 }
4666442246674667- if len(t.Steps) > 8192 {
46684668- return xerrors.Errorf("Slice value in field t.Steps was too long")
46694669- }
46704670-46714671- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Steps))); err != nil {
46724672- return err
46734673- }
46744674- for _, v := range t.Steps {
46754675- if err := v.MarshalCBOR(cw); err != nil {
46764676- return err
46774677- }
46784678-46794679- }
46804680-46814681- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
46824682- if len("environment") > 1000000 {
46834683- return xerrors.Errorf("Value in field \"environment\" was too long")
46844684- }
46854685-46864686- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
46874687- return err
46884688- }
46894689- if _, err := cw.WriteString(string("environment")); err != nil {
44234423+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("engine"))); err != nil {
46904424 return err
46914425 }
46924692-46934693- if len(t.Environment) > 8192 {
46944694- return xerrors.Errorf("Slice value in field t.Environment was too long")
46954695- }
46964696-46974697- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
44264426+ if _, err := cw.WriteString(string("engine")); err != nil {
46984427 return err
46994699- }
47004700- for _, v := range t.Environment {
47014701- if err := v.MarshalCBOR(cw); err != nil {
47024702- return err
47034703- }
47044704-47054428 }
4706442947074707- // t.Dependencies ([]*tangled.Pipeline_Dependency) (slice)
47084708- if len("dependencies") > 1000000 {
47094709- return xerrors.Errorf("Value in field \"dependencies\" was too long")
44304430+ if len(t.Engine) > 1000000 {
44314431+ return xerrors.Errorf("Value in field t.Engine was too long")
47104432 }
4711443347124712- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("dependencies"))); err != nil {
44344434+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Engine))); err != nil {
47134435 return err
47144436 }
47154715- if _, err := cw.WriteString(string("dependencies")); err != nil {
47164716- return err
47174717- }
47184718-47194719- if len(t.Dependencies) > 8192 {
47204720- return xerrors.Errorf("Slice value in field t.Dependencies was too long")
47214721- }
47224722-47234723- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Dependencies))); err != nil {
44374437+ if _, err := cw.WriteString(string(t.Engine)); err != nil {
47244438 return err
47254725- }
47264726- for _, v := range t.Dependencies {
47274727- if err := v.MarshalCBOR(cw); err != nil {
47284728- return err
47294729- }
47304730-47314439 }
47324440 return nil
47334441}
···4757446547584466 n := extra
4759446747604760- nameBuf := make([]byte, 12)
44684468+ nameBuf := make([]byte, 6)
47614469 for i := uint64(0); i < n; i++ {
47624470 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
47634471 if err != nil {
···47734481 }
4774448247754483 switch string(nameBuf[:nameLen]) {
47764776- // t.Name (string) (string)
44844484+ // t.Raw (string) (string)
44854485+ case "raw":
44864486+44874487+ {
44884488+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
44894489+ if err != nil {
44904490+ return err
44914491+ }
44924492+44934493+ t.Raw = string(sval)
44944494+ }
44954495+ // t.Name (string) (string)
47774496 case "name":
4778449747794498 {
···48044523 }
4805452448064525 }
48074807- // t.Steps ([]*tangled.Pipeline_Step) (slice)
48084808- case "steps":
45264526+ // t.Engine (string) (string)
45274527+ case "engine":
4809452848104810- maj, extra, err = cr.ReadHeader()
48114811- if err != nil {
48124812- return err
48134813- }
48144814-48154815- if extra > 8192 {
48164816- return fmt.Errorf("t.Steps: array too large (%d)", extra)
48174817- }
48184818-48194819- if maj != cbg.MajArray {
48204820- return fmt.Errorf("expected cbor array")
48214821- }
48224822-48234823- if extra > 0 {
48244824- t.Steps = make([]*Pipeline_Step, extra)
48254825- }
48264826-48274827- for i := 0; i < int(extra); i++ {
48284828- {
48294829- var maj byte
48304830- var extra uint64
48314831- var err error
48324832- _ = maj
48334833- _ = extra
48344834- _ = err
48354835-48364836- {
48374837-48384838- b, err := cr.ReadByte()
48394839- if err != nil {
48404840- return err
48414841- }
48424842- if b != cbg.CborNull[0] {
48434843- if err := cr.UnreadByte(); err != nil {
48444844- return err
48454845- }
48464846- t.Steps[i] = new(Pipeline_Step)
48474847- if err := t.Steps[i].UnmarshalCBOR(cr); err != nil {
48484848- return xerrors.Errorf("unmarshaling t.Steps[i] pointer: %w", err)
48494849- }
48504850- }
48514851-48524852- }
48534853-45294529+ {
45304530+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
45314531+ if err != nil {
45324532+ return err
48544533 }
48554855- }
48564856- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
48574857- case "environment":
4858453448594859- maj, extra, err = cr.ReadHeader()
48604860- if err != nil {
48614861- return err
48624862- }
48634863-48644864- if extra > 8192 {
48654865- return fmt.Errorf("t.Environment: array too large (%d)", extra)
48664866- }
48674867-48684868- if maj != cbg.MajArray {
48694869- return fmt.Errorf("expected cbor array")
48704870- }
48714871-48724872- if extra > 0 {
48734873- t.Environment = make([]*Pipeline_Pair, extra)
48744874- }
48754875-48764876- for i := 0; i < int(extra); i++ {
48774877- {
48784878- var maj byte
48794879- var extra uint64
48804880- var err error
48814881- _ = maj
48824882- _ = extra
48834883- _ = err
48844884-48854885- {
48864886-48874887- b, err := cr.ReadByte()
48884888- if err != nil {
48894889- return err
48904890- }
48914891- if b != cbg.CborNull[0] {
48924892- if err := cr.UnreadByte(); err != nil {
48934893- return err
48944894- }
48954895- t.Environment[i] = new(Pipeline_Pair)
48964896- if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
48974897- return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
48984898- }
48994899- }
49004900-49014901- }
49024902-49034903- }
49044904- }
49054905- // t.Dependencies ([]*tangled.Pipeline_Dependency) (slice)
49064906- case "dependencies":
49074907-49084908- maj, extra, err = cr.ReadHeader()
49094909- if err != nil {
49104910- return err
49114911- }
49124912-49134913- if extra > 8192 {
49144914- return fmt.Errorf("t.Dependencies: array too large (%d)", extra)
49154915- }
49164916-49174917- if maj != cbg.MajArray {
49184918- return fmt.Errorf("expected cbor array")
49194919- }
49204920-49214921- if extra > 0 {
49224922- t.Dependencies = make([]*Pipeline_Dependency, extra)
49234923- }
49244924-49254925- for i := 0; i < int(extra); i++ {
49264926- {
49274927- var maj byte
49284928- var extra uint64
49294929- var err error
49304930- _ = maj
49314931- _ = extra
49324932- _ = err
49334933-49344934- {
49354935-49364936- b, err := cr.ReadByte()
49374937- if err != nil {
49384938- return err
49394939- }
49404940- if b != cbg.CborNull[0] {
49414941- if err := cr.UnreadByte(); err != nil {
49424942- return err
49434943- }
49444944- t.Dependencies[i] = new(Pipeline_Dependency)
49454945- if err := t.Dependencies[i].UnmarshalCBOR(cr); err != nil {
49464946- return xerrors.Errorf("unmarshaling t.Dependencies[i] pointer: %w", err)
49474947- }
49484948- }
49494949-49504950- }
49514951-49524952- }
45354535+ t.Engine = string(sval)
49534536 }
4954453749554538 default:
···5854543758555438 return nil
58565439}
54405440+func (t *RepoCollaborator) MarshalCBOR(w io.Writer) error {
54415441+ if t == nil {
54425442+ _, err := w.Write(cbg.CborNull)
54435443+ return err
54445444+ }
54455445+54465446+ cw := cbg.NewCborWriter(w)
54475447+54485448+ if _, err := cw.Write([]byte{164}); err != nil {
54495449+ return err
54505450+ }
54515451+54525452+ // t.Repo (string) (string)
54535453+ if len("repo") > 1000000 {
54545454+ return xerrors.Errorf("Value in field \"repo\" was too long")
54555455+ }
54565456+54575457+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
54585458+ return err
54595459+ }
54605460+ if _, err := cw.WriteString(string("repo")); err != nil {
54615461+ return err
54625462+ }
54635463+54645464+ if len(t.Repo) > 1000000 {
54655465+ return xerrors.Errorf("Value in field t.Repo was too long")
54665466+ }
54675467+54685468+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repo))); err != nil {
54695469+ return err
54705470+ }
54715471+ if _, err := cw.WriteString(string(t.Repo)); err != nil {
54725472+ return err
54735473+ }
54745474+54755475+ // t.LexiconTypeID (string) (string)
54765476+ if len("$type") > 1000000 {
54775477+ return xerrors.Errorf("Value in field \"$type\" was too long")
54785478+ }
54795479+54805480+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
54815481+ return err
54825482+ }
54835483+ if _, err := cw.WriteString(string("$type")); err != nil {
54845484+ return err
54855485+ }
54865486+54875487+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.repo.collaborator"))); err != nil {
54885488+ return err
54895489+ }
54905490+ if _, err := cw.WriteString(string("sh.tangled.repo.collaborator")); err != nil {
54915491+ return err
54925492+ }
54935493+54945494+ // t.Subject (string) (string)
54955495+ if len("subject") > 1000000 {
54965496+ return xerrors.Errorf("Value in field \"subject\" was too long")
54975497+ }
54985498+54995499+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("subject"))); err != nil {
55005500+ return err
55015501+ }
55025502+ if _, err := cw.WriteString(string("subject")); err != nil {
55035503+ return err
55045504+ }
55055505+55065506+ if len(t.Subject) > 1000000 {
55075507+ return xerrors.Errorf("Value in field t.Subject was too long")
55085508+ }
55095509+55105510+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Subject))); err != nil {
55115511+ return err
55125512+ }
55135513+ if _, err := cw.WriteString(string(t.Subject)); err != nil {
55145514+ return err
55155515+ }
55165516+55175517+ // t.CreatedAt (string) (string)
55185518+ if len("createdAt") > 1000000 {
55195519+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
55205520+ }
55215521+55225522+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
55235523+ return err
55245524+ }
55255525+ if _, err := cw.WriteString(string("createdAt")); err != nil {
55265526+ return err
55275527+ }
55285528+55295529+ if len(t.CreatedAt) > 1000000 {
55305530+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
55315531+ }
55325532+55335533+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
55345534+ return err
55355535+ }
55365536+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
55375537+ return err
55385538+ }
55395539+ return nil
55405540+}
55415541+55425542+func (t *RepoCollaborator) UnmarshalCBOR(r io.Reader) (err error) {
55435543+ *t = RepoCollaborator{}
55445544+55455545+ cr := cbg.NewCborReader(r)
55465546+55475547+ maj, extra, err := cr.ReadHeader()
55485548+ if err != nil {
55495549+ return err
55505550+ }
55515551+ defer func() {
55525552+ if err == io.EOF {
55535553+ err = io.ErrUnexpectedEOF
55545554+ }
55555555+ }()
55565556+55575557+ if maj != cbg.MajMap {
55585558+ return fmt.Errorf("cbor input should be of type map")
55595559+ }
55605560+55615561+ if extra > cbg.MaxLength {
55625562+ return fmt.Errorf("RepoCollaborator: map struct too large (%d)", extra)
55635563+ }
55645564+55655565+ n := extra
55665566+55675567+ nameBuf := make([]byte, 9)
55685568+ for i := uint64(0); i < n; i++ {
55695569+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
55705570+ if err != nil {
55715571+ return err
55725572+ }
55735573+55745574+ if !ok {
55755575+ // Field doesn't exist on this type, so ignore it
55765576+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
55775577+ return err
55785578+ }
55795579+ continue
55805580+ }
55815581+55825582+ switch string(nameBuf[:nameLen]) {
55835583+ // t.Repo (string) (string)
55845584+ case "repo":
55855585+55865586+ {
55875587+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
55885588+ if err != nil {
55895589+ return err
55905590+ }
55915591+55925592+ t.Repo = string(sval)
55935593+ }
55945594+ // t.LexiconTypeID (string) (string)
55955595+ case "$type":
55965596+55975597+ {
55985598+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
55995599+ if err != nil {
56005600+ return err
56015601+ }
56025602+56035603+ t.LexiconTypeID = string(sval)
56045604+ }
56055605+ // t.Subject (string) (string)
56065606+ case "subject":
56075607+56085608+ {
56095609+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
56105610+ if err != nil {
56115611+ return err
56125612+ }
56135613+56145614+ t.Subject = string(sval)
56155615+ }
56165616+ // t.CreatedAt (string) (string)
56175617+ case "createdAt":
56185618+56195619+ {
56205620+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
56215621+ if err != nil {
56225622+ return err
56235623+ }
56245624+56255625+ t.CreatedAt = string(sval)
56265626+ }
56275627+56285628+ default:
56295629+ // Field doesn't exist on this type, so ignore it
56305630+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
56315631+ return err
56325632+ }
56335633+ }
56345634+ }
56355635+56365636+ return nil
56375637+}
58575638func (t *RepoIssue) MarshalCBOR(w io.Writer) error {
58585639 if t == nil {
58595640 _, err := w.Write(cbg.CborNull)
···58615642 }
5862564358635644 cw := cbg.NewCborWriter(w)
58645864- fieldCount := 7
56455645+ fieldCount := 5
5865564658665647 if t.Body == nil {
58675648 fieldCount--
···59455726 return err
59465727 }
5947572859485948- // t.Owner (string) (string)
59495949- if len("owner") > 1000000 {
59505950- return xerrors.Errorf("Value in field \"owner\" was too long")
59515951- }
59525952-59535953- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
59545954- return err
59555955- }
59565956- if _, err := cw.WriteString(string("owner")); err != nil {
59575957- return err
59585958- }
59595959-59605960- if len(t.Owner) > 1000000 {
59615961- return xerrors.Errorf("Value in field t.Owner was too long")
59625962- }
59635963-59645964- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Owner))); err != nil {
59655965- return err
59665966- }
59675967- if _, err := cw.WriteString(string(t.Owner)); err != nil {
59685968- return err
59695969- }
59705970-59715729 // t.Title (string) (string)
59725730 if len("title") > 1000000 {
59735731 return xerrors.Errorf("Value in field \"title\" was too long")
···59895747 }
59905748 if _, err := cw.WriteString(string(t.Title)); err != nil {
59915749 return err
59925992- }
59935993-59945994- // t.IssueId (int64) (int64)
59955995- if len("issueId") > 1000000 {
59965996- return xerrors.Errorf("Value in field \"issueId\" was too long")
59975997- }
59985998-59995999- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("issueId"))); err != nil {
60006000- return err
60016001- }
60026002- if _, err := cw.WriteString(string("issueId")); err != nil {
60036003- return err
60046004- }
60056005-60066006- if t.IssueId >= 0 {
60076007- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.IssueId)); err != nil {
60086008- return err
60096009- }
60106010- } else {
60116011- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.IssueId-1)); err != nil {
60126012- return err
60136013- }
60145750 }
6015575160165752 // t.CreatedAt (string) (string)
···6122585861235859 t.LexiconTypeID = string(sval)
61245860 }
61256125- // t.Owner (string) (string)
61266126- case "owner":
61276127-61286128- {
61296129- sval, err := cbg.ReadStringWithMax(cr, 1000000)
61306130- if err != nil {
61316131- return err
61326132- }
61336133-61346134- t.Owner = string(sval)
61356135- }
61365861 // t.Title (string) (string)
61375862 case "title":
61385863···6144586961455870 t.Title = string(sval)
61465871 }
61476147- // t.IssueId (int64) (int64)
61486148- case "issueId":
61496149- {
61506150- maj, extra, err := cr.ReadHeader()
61516151- if err != nil {
61526152- return err
61536153- }
61546154- var extraI int64
61556155- switch maj {
61566156- case cbg.MajUnsignedInt:
61576157- extraI = int64(extra)
61586158- if extraI < 0 {
61596159- return fmt.Errorf("int64 positive overflow")
61606160- }
61616161- case cbg.MajNegativeInt:
61626162- extraI = int64(extra)
61636163- if extraI < 0 {
61646164- return fmt.Errorf("int64 negative overflow")
61656165- }
61666166- extraI = -1 - extraI
61676167- default:
61686168- return fmt.Errorf("wrong type for int64 field: %d", maj)
61696169- }
61706170-61716171- t.IssueId = int64(extraI)
61726172- }
61735872 // t.CreatedAt (string) (string)
61745873 case "createdAt":
61755874···61995898 }
6200589962015900 cw := cbg.NewCborWriter(w)
62026202- fieldCount := 7
59015901+ fieldCount := 5
6203590262046204- if t.CommentId == nil {
62056205- fieldCount--
62066206- }
62076207-62086208- if t.Owner == nil {
62096209- fieldCount--
62106210- }
62116211-62126212- if t.Repo == nil {
59035903+ if t.ReplyTo == nil {
62135904 fieldCount--
62145905 }
62155906···62405931 return err
62415932 }
6242593362436243- // t.Repo (string) (string)
62446244- if t.Repo != nil {
62456245-62466246- if len("repo") > 1000000 {
62476247- return xerrors.Errorf("Value in field \"repo\" was too long")
62486248- }
62496249-62506250- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
62516251- return err
62526252- }
62536253- if _, err := cw.WriteString(string("repo")); err != nil {
62546254- return err
62556255- }
62566256-62576257- if t.Repo == nil {
62586258- if _, err := cw.Write(cbg.CborNull); err != nil {
62596259- return err
62606260- }
62616261- } else {
62626262- if len(*t.Repo) > 1000000 {
62636263- return xerrors.Errorf("Value in field t.Repo was too long")
62646264- }
62656265-62666266- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Repo))); err != nil {
62676267- return err
62686268- }
62696269- if _, err := cw.WriteString(string(*t.Repo)); err != nil {
62706270- return err
62716271- }
62726272- }
62736273- }
62746274-62755934 // t.LexiconTypeID (string) (string)
62765935 if len("$type") > 1000000 {
62775936 return xerrors.Errorf("Value in field \"$type\" was too long")
···63145973 return err
63155974 }
6316597563176317- // t.Owner (string) (string)
63186318- if t.Owner != nil {
59765976+ // t.ReplyTo (string) (string)
59775977+ if t.ReplyTo != nil {
6319597863206320- if len("owner") > 1000000 {
63216321- return xerrors.Errorf("Value in field \"owner\" was too long")
59795979+ if len("replyTo") > 1000000 {
59805980+ return xerrors.Errorf("Value in field \"replyTo\" was too long")
63225981 }
6323598263246324- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
59835983+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("replyTo"))); err != nil {
63255984 return err
63265985 }
63276327- if _, err := cw.WriteString(string("owner")); err != nil {
59865986+ if _, err := cw.WriteString(string("replyTo")); err != nil {
63285987 return err
63295988 }
6330598963316331- if t.Owner == nil {
59905990+ if t.ReplyTo == nil {
63325991 if _, err := cw.Write(cbg.CborNull); err != nil {
63335992 return err
63345993 }
63355994 } else {
63366336- if len(*t.Owner) > 1000000 {
63376337- return xerrors.Errorf("Value in field t.Owner was too long")
59955995+ if len(*t.ReplyTo) > 1000000 {
59965996+ return xerrors.Errorf("Value in field t.ReplyTo was too long")
63385997 }
6339599863406340- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Owner))); err != nil {
59995999+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.ReplyTo))); err != nil {
63416000 return err
63426001 }
63436343- if _, err := cw.WriteString(string(*t.Owner)); err != nil {
60026002+ if _, err := cw.WriteString(string(*t.ReplyTo)); err != nil {
63446003 return err
63456004 }
63466005 }
63476006 }
6348600763496349- // t.CommentId (int64) (int64)
63506350- if t.CommentId != nil {
63516351-63526352- if len("commentId") > 1000000 {
63536353- return xerrors.Errorf("Value in field \"commentId\" was too long")
63546354- }
63556355-63566356- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commentId"))); err != nil {
63576357- return err
63586358- }
63596359- if _, err := cw.WriteString(string("commentId")); err != nil {
63606360- return err
63616361- }
63626362-63636363- if t.CommentId == nil {
63646364- if _, err := cw.Write(cbg.CborNull); err != nil {
63656365- return err
63666366- }
63676367- } else {
63686368- if *t.CommentId >= 0 {
63696369- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.CommentId)); err != nil {
63706370- return err
63716371- }
63726372- } else {
63736373- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-*t.CommentId-1)); err != nil {
63746374- return err
63756375- }
63766376- }
63776377- }
63786378-63796379- }
63806380-63816008 // t.CreatedAt (string) (string)
63826009 if len("createdAt") > 1000000 {
63836010 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···6455608264566083 t.Body = string(sval)
64576084 }
64586458- // t.Repo (string) (string)
64596459- case "repo":
64606460-64616461- {
64626462- b, err := cr.ReadByte()
64636463- if err != nil {
64646464- return err
64656465- }
64666466- if b != cbg.CborNull[0] {
64676467- if err := cr.UnreadByte(); err != nil {
64686468- return err
64696469- }
64706470-64716471- sval, err := cbg.ReadStringWithMax(cr, 1000000)
64726472- if err != nil {
64736473- return err
64746474- }
64756475-64766476- t.Repo = (*string)(&sval)
64776477- }
64786478- }
64796085 // t.LexiconTypeID (string) (string)
64806086 case "$type":
64816087···6498610464996105 t.Issue = string(sval)
65006106 }
65016501- // t.Owner (string) (string)
65026502- case "owner":
61076107+ // t.ReplyTo (string) (string)
61086108+ case "replyTo":
6503610965046110 {
65056111 b, err := cr.ReadByte()
···65166122 return err
65176123 }
6518612465196519- t.Owner = (*string)(&sval)
65206520- }
65216521- }
65226522- // t.CommentId (int64) (int64)
65236523- case "commentId":
65246524- {
65256525-65266526- b, err := cr.ReadByte()
65276527- if err != nil {
65286528- return err
65296529- }
65306530- if b != cbg.CborNull[0] {
65316531- if err := cr.UnreadByte(); err != nil {
65326532- return err
65336533- }
65346534- maj, extra, err := cr.ReadHeader()
65356535- if err != nil {
65366536- return err
65376537- }
65386538- var extraI int64
65396539- switch maj {
65406540- case cbg.MajUnsignedInt:
65416541- extraI = int64(extra)
65426542- if extraI < 0 {
65436543- return fmt.Errorf("int64 positive overflow")
65446544- }
65456545- case cbg.MajNegativeInt:
65466546- extraI = int64(extra)
65476547- if extraI < 0 {
65486548- return fmt.Errorf("int64 negative overflow")
65496549- }
65506550- extraI = -1 - extraI
65516551- default:
65526552- return fmt.Errorf("wrong type for int64 field: %d", maj)
65536553- }
65546554-65556555- t.CommentId = (*int64)(&extraI)
61256125+ t.ReplyTo = (*string)(&sval)
65566126 }
65576127 }
65586128 // t.CreatedAt (string) (string)
···67486318 }
6749631967506320 cw := cbg.NewCborWriter(w)
67516751- fieldCount := 9
63216321+ fieldCount := 7
6752632267536323 if t.Body == nil {
67546324 fieldCount--
···68596429 return err
68606430 }
6861643168626862- // t.PullId (int64) (int64)
68636863- if len("pullId") > 1000000 {
68646864- return xerrors.Errorf("Value in field \"pullId\" was too long")
68656865- }
68666866-68676867- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("pullId"))); err != nil {
68686868- return err
68696869- }
68706870- if _, err := cw.WriteString(string("pullId")); err != nil {
68716871- return err
68726872- }
68736873-68746874- if t.PullId >= 0 {
68756875- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PullId)); err != nil {
68766876- return err
68776877- }
68786878- } else {
68796879- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.PullId-1)); err != nil {
68806880- return err
68816881- }
68826882- }
68836883-68846432 // t.Source (tangled.RepoPull_Source) (struct)
68856433 if t.Source != nil {
68866434···69006448 }
69016449 }
6902645069036903- // t.CreatedAt (string) (string)
69046904- if len("createdAt") > 1000000 {
69056905- return xerrors.Errorf("Value in field \"createdAt\" was too long")
64516451+ // t.Target (tangled.RepoPull_Target) (struct)
64526452+ if len("target") > 1000000 {
64536453+ return xerrors.Errorf("Value in field \"target\" was too long")
69066454 }
6907645569086908- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
64566456+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("target"))); err != nil {
69096457 return err
69106458 }
69116911- if _, err := cw.WriteString(string("createdAt")); err != nil {
64596459+ if _, err := cw.WriteString(string("target")); err != nil {
69126460 return err
69136461 }
6914646269156915- if len(t.CreatedAt) > 1000000 {
69166916- return xerrors.Errorf("Value in field t.CreatedAt was too long")
69176917- }
69186918-69196919- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
69206920- return err
69216921- }
69226922- if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
64636463+ if err := t.Target.MarshalCBOR(cw); err != nil {
69236464 return err
69246465 }
6925646669266926- // t.TargetRepo (string) (string)
69276927- if len("targetRepo") > 1000000 {
69286928- return xerrors.Errorf("Value in field \"targetRepo\" was too long")
69296929- }
69306930-69316931- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("targetRepo"))); err != nil {
69326932- return err
69336933- }
69346934- if _, err := cw.WriteString(string("targetRepo")); err != nil {
69356935- return err
69366936- }
69376937-69386938- if len(t.TargetRepo) > 1000000 {
69396939- return xerrors.Errorf("Value in field t.TargetRepo was too long")
69406940- }
69416941-69426942- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TargetRepo))); err != nil {
69436943- return err
69446944- }
69456945- if _, err := cw.WriteString(string(t.TargetRepo)); err != nil {
69466946- return err
69476947- }
69486948-69496949- // t.TargetBranch (string) (string)
69506950- if len("targetBranch") > 1000000 {
69516951- return xerrors.Errorf("Value in field \"targetBranch\" was too long")
64676467+ // t.CreatedAt (string) (string)
64686468+ if len("createdAt") > 1000000 {
64696469+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
69526470 }
6953647169546954- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("targetBranch"))); err != nil {
64726472+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
69556473 return err
69566474 }
69576957- if _, err := cw.WriteString(string("targetBranch")); err != nil {
64756475+ if _, err := cw.WriteString(string("createdAt")); err != nil {
69586476 return err
69596477 }
6960647869616961- if len(t.TargetBranch) > 1000000 {
69626962- return xerrors.Errorf("Value in field t.TargetBranch was too long")
64796479+ if len(t.CreatedAt) > 1000000 {
64806480+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
69636481 }
6964648269656965- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TargetBranch))); err != nil {
64836483+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
69666484 return err
69676485 }
69686968- if _, err := cw.WriteString(string(t.TargetBranch)); err != nil {
64866486+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
69696487 return err
69706488 }
69716489 return nil
···6996651469976515 n := extra
6998651669996999- nameBuf := make([]byte, 12)
65176517+ nameBuf := make([]byte, 9)
70006518 for i := uint64(0); i < n; i++ {
70016519 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
70026520 if err != nil {
···7066658470676585 t.Title = string(sval)
70686586 }
70697069- // t.PullId (int64) (int64)
70707070- case "pullId":
70717071- {
70727072- maj, extra, err := cr.ReadHeader()
70737073- if err != nil {
70747074- return err
70757075- }
70767076- var extraI int64
70777077- switch maj {
70787078- case cbg.MajUnsignedInt:
70797079- extraI = int64(extra)
70807080- if extraI < 0 {
70817081- return fmt.Errorf("int64 positive overflow")
70827082- }
70837083- case cbg.MajNegativeInt:
70847084- extraI = int64(extra)
70857085- if extraI < 0 {
70867086- return fmt.Errorf("int64 negative overflow")
70877087- }
70887088- extraI = -1 - extraI
70897089- default:
70907090- return fmt.Errorf("wrong type for int64 field: %d", maj)
70917091- }
70927092-70937093- t.PullId = int64(extraI)
70947094- }
70956587 // t.Source (tangled.RepoPull_Source) (struct)
70966588 case "source":
70976589···71126604 }
7113660571146606 }
71157115- // t.CreatedAt (string) (string)
71167116- case "createdAt":
66076607+ // t.Target (tangled.RepoPull_Target) (struct)
66086608+ case "target":
7117660971186610 {
71197119- sval, err := cbg.ReadStringWithMax(cr, 1000000)
66116611+66126612+ b, err := cr.ReadByte()
71206613 if err != nil {
71216614 return err
71226615 }
71237123-71247124- t.CreatedAt = string(sval)
71257125- }
71267126- // t.TargetRepo (string) (string)
71277127- case "targetRepo":
71287128-71297129- {
71307130- sval, err := cbg.ReadStringWithMax(cr, 1000000)
71317131- if err != nil {
71327132- return err
66166616+ if b != cbg.CborNull[0] {
66176617+ if err := cr.UnreadByte(); err != nil {
66186618+ return err
66196619+ }
66206620+ t.Target = new(RepoPull_Target)
66216621+ if err := t.Target.UnmarshalCBOR(cr); err != nil {
66226622+ return xerrors.Errorf("unmarshaling t.Target pointer: %w", err)
66236623+ }
71336624 }
7134662571357135- t.TargetRepo = string(sval)
71366626 }
71377137- // t.TargetBranch (string) (string)
71387138- case "targetBranch":
66276627+ // t.CreatedAt (string) (string)
66286628+ case "createdAt":
7139662971406630 {
71416631 sval, err := cbg.ReadStringWithMax(cr, 1000000)
···71436633 return err
71446634 }
7145663571467146- t.TargetBranch = string(sval)
66366636+ t.CreatedAt = string(sval)
71476637 }
7148663871496639 default:
···71636653 }
7164665471656655 cw := cbg.NewCborWriter(w)
71667166- fieldCount := 7
7167665671687168- if t.CommentId == nil {
71697169- fieldCount--
71707170- }
71717171-71727172- if t.Owner == nil {
71737173- fieldCount--
71747174- }
71757175-71767176- if t.Repo == nil {
71777177- fieldCount--
71787178- }
71797179-71807180- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
66576657+ if _, err := cw.Write([]byte{164}); err != nil {
71816658 return err
71826659 }
71836660···72276704 return err
72286705 }
7229670672307230- // t.Repo (string) (string)
72317231- if t.Repo != nil {
72327232-72337233- if len("repo") > 1000000 {
72347234- return xerrors.Errorf("Value in field \"repo\" was too long")
72357235- }
72367236-72377237- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
72387238- return err
72397239- }
72407240- if _, err := cw.WriteString(string("repo")); err != nil {
72417241- return err
72427242- }
72437243-72447244- if t.Repo == nil {
72457245- if _, err := cw.Write(cbg.CborNull); err != nil {
72467246- return err
72477247- }
72487248- } else {
72497249- if len(*t.Repo) > 1000000 {
72507250- return xerrors.Errorf("Value in field t.Repo was too long")
72517251- }
72527252-72537253- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Repo))); err != nil {
72547254- return err
72557255- }
72567256- if _, err := cw.WriteString(string(*t.Repo)); err != nil {
72577257- return err
72587258- }
72597259- }
72607260- }
72617261-72626707 // t.LexiconTypeID (string) (string)
72636708 if len("$type") > 1000000 {
72646709 return xerrors.Errorf("Value in field \"$type\" was too long")
···72766721 }
72776722 if _, err := cw.WriteString(string("sh.tangled.repo.pull.comment")); err != nil {
72786723 return err
72797279- }
72807280-72817281- // t.Owner (string) (string)
72827282- if t.Owner != nil {
72837283-72847284- if len("owner") > 1000000 {
72857285- return xerrors.Errorf("Value in field \"owner\" was too long")
72867286- }
72877287-72887288- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
72897289- return err
72907290- }
72917291- if _, err := cw.WriteString(string("owner")); err != nil {
72927292- return err
72937293- }
72947294-72957295- if t.Owner == nil {
72967296- if _, err := cw.Write(cbg.CborNull); err != nil {
72977297- return err
72987298- }
72997299- } else {
73007300- if len(*t.Owner) > 1000000 {
73017301- return xerrors.Errorf("Value in field t.Owner was too long")
73027302- }
73037303-73047304- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Owner))); err != nil {
73057305- return err
73067306- }
73077307- if _, err := cw.WriteString(string(*t.Owner)); err != nil {
73087308- return err
73097309- }
73107310- }
73117311- }
73127312-73137313- // t.CommentId (int64) (int64)
73147314- if t.CommentId != nil {
73157315-73167316- if len("commentId") > 1000000 {
73177317- return xerrors.Errorf("Value in field \"commentId\" was too long")
73187318- }
73197319-73207320- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commentId"))); err != nil {
73217321- return err
73227322- }
73237323- if _, err := cw.WriteString(string("commentId")); err != nil {
73247324- return err
73257325- }
73267326-73277327- if t.CommentId == nil {
73287328- if _, err := cw.Write(cbg.CborNull); err != nil {
73297329- return err
73307330- }
73317331- } else {
73327332- if *t.CommentId >= 0 {
73337333- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.CommentId)); err != nil {
73347334- return err
73357335- }
73367336- } else {
73377337- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-*t.CommentId-1)); err != nil {
73387338- return err
73397339- }
73407340- }
73417341- }
73427342-73436724 }
7344672573456726 // t.CreatedAt (string) (string)
···7430681174316812 t.Pull = string(sval)
74326813 }
74337433- // t.Repo (string) (string)
74347434- case "repo":
74357435-74367436- {
74377437- b, err := cr.ReadByte()
74387438- if err != nil {
74397439- return err
74407440- }
74417441- if b != cbg.CborNull[0] {
74427442- if err := cr.UnreadByte(); err != nil {
74437443- return err
74447444- }
74457445-74467446- sval, err := cbg.ReadStringWithMax(cr, 1000000)
74477447- if err != nil {
74487448- return err
74497449- }
74507450-74517451- t.Repo = (*string)(&sval)
74527452- }
74537453- }
74546814 // t.LexiconTypeID (string) (string)
74556815 case "$type":
74566816···74616821 }
7462682274636823 t.LexiconTypeID = string(sval)
74647464- }
74657465- // t.Owner (string) (string)
74667466- case "owner":
74677467-74687468- {
74697469- b, err := cr.ReadByte()
74707470- if err != nil {
74717471- return err
74727472- }
74737473- if b != cbg.CborNull[0] {
74747474- if err := cr.UnreadByte(); err != nil {
74757475- return err
74767476- }
74777477-74787478- sval, err := cbg.ReadStringWithMax(cr, 1000000)
74797479- if err != nil {
74807480- return err
74817481- }
74827482-74837483- t.Owner = (*string)(&sval)
74847484- }
74857485- }
74867486- // t.CommentId (int64) (int64)
74877487- case "commentId":
74887488- {
74897489-74907490- b, err := cr.ReadByte()
74917491- if err != nil {
74927492- return err
74937493- }
74947494- if b != cbg.CborNull[0] {
74957495- if err := cr.UnreadByte(); err != nil {
74967496- return err
74977497- }
74987498- maj, extra, err := cr.ReadHeader()
74997499- if err != nil {
75007500- return err
75017501- }
75027502- var extraI int64
75037503- switch maj {
75047504- case cbg.MajUnsignedInt:
75057505- extraI = int64(extra)
75067506- if extraI < 0 {
75077507- return fmt.Errorf("int64 positive overflow")
75087508- }
75097509- case cbg.MajNegativeInt:
75107510- extraI = int64(extra)
75117511- if extraI < 0 {
75127512- return fmt.Errorf("int64 negative overflow")
75137513- }
75147514- extraI = -1 - extraI
75157515- default:
75167516- return fmt.Errorf("wrong type for int64 field: %d", maj)
75177517- }
75187518-75197519- t.CommentId = (*int64)(&extraI)
75207520- }
75216824 }
75226825 // t.CreatedAt (string) (string)
75236826 case "createdAt":
···7897720078987201 return nil
78997202}
72037203+func (t *RepoPull_Target) MarshalCBOR(w io.Writer) error {
72047204+ if t == nil {
72057205+ _, err := w.Write(cbg.CborNull)
72067206+ return err
72077207+ }
72087208+72097209+ cw := cbg.NewCborWriter(w)
72107210+72117211+ if _, err := cw.Write([]byte{162}); err != nil {
72127212+ return err
72137213+ }
72147214+72157215+ // t.Repo (string) (string)
72167216+ if len("repo") > 1000000 {
72177217+ return xerrors.Errorf("Value in field \"repo\" was too long")
72187218+ }
72197219+72207220+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
72217221+ return err
72227222+ }
72237223+ if _, err := cw.WriteString(string("repo")); err != nil {
72247224+ return err
72257225+ }
72267226+72277227+ if len(t.Repo) > 1000000 {
72287228+ return xerrors.Errorf("Value in field t.Repo was too long")
72297229+ }
72307230+72317231+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repo))); err != nil {
72327232+ return err
72337233+ }
72347234+ if _, err := cw.WriteString(string(t.Repo)); err != nil {
72357235+ return err
72367236+ }
72377237+72387238+ // t.Branch (string) (string)
72397239+ if len("branch") > 1000000 {
72407240+ return xerrors.Errorf("Value in field \"branch\" was too long")
72417241+ }
72427242+72437243+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("branch"))); err != nil {
72447244+ return err
72457245+ }
72467246+ if _, err := cw.WriteString(string("branch")); err != nil {
72477247+ return err
72487248+ }
72497249+72507250+ if len(t.Branch) > 1000000 {
72517251+ return xerrors.Errorf("Value in field t.Branch was too long")
72527252+ }
72537253+72547254+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Branch))); err != nil {
72557255+ return err
72567256+ }
72577257+ if _, err := cw.WriteString(string(t.Branch)); err != nil {
72587258+ return err
72597259+ }
72607260+ return nil
72617261+}
72627262+72637263+func (t *RepoPull_Target) UnmarshalCBOR(r io.Reader) (err error) {
72647264+ *t = RepoPull_Target{}
72657265+72667266+ cr := cbg.NewCborReader(r)
72677267+72687268+ maj, extra, err := cr.ReadHeader()
72697269+ if err != nil {
72707270+ return err
72717271+ }
72727272+ defer func() {
72737273+ if err == io.EOF {
72747274+ err = io.ErrUnexpectedEOF
72757275+ }
72767276+ }()
72777277+72787278+ if maj != cbg.MajMap {
72797279+ return fmt.Errorf("cbor input should be of type map")
72807280+ }
72817281+72827282+ if extra > cbg.MaxLength {
72837283+ return fmt.Errorf("RepoPull_Target: map struct too large (%d)", extra)
72847284+ }
72857285+72867286+ n := extra
72877287+72887288+ nameBuf := make([]byte, 6)
72897289+ for i := uint64(0); i < n; i++ {
72907290+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
72917291+ if err != nil {
72927292+ return err
72937293+ }
72947294+72957295+ if !ok {
72967296+ // Field doesn't exist on this type, so ignore it
72977297+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
72987298+ return err
72997299+ }
73007300+ continue
73017301+ }
73027302+73037303+ switch string(nameBuf[:nameLen]) {
73047304+ // t.Repo (string) (string)
73057305+ case "repo":
73067306+73077307+ {
73087308+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
73097309+ if err != nil {
73107310+ return err
73117311+ }
73127312+73137313+ t.Repo = string(sval)
73147314+ }
73157315+ // t.Branch (string) (string)
73167316+ case "branch":
73177317+73187318+ {
73197319+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
73207320+ if err != nil {
73217321+ return err
73227322+ }
73237323+73247324+ t.Branch = string(sval)
73257325+ }
73267326+73277327+ default:
73287328+ // Field doesn't exist on this type, so ignore it
73297329+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
73307330+ return err
73317331+ }
73327332+ }
73337333+ }
73347334+73357335+ return nil
73367336+}
79007337func (t *Spindle) MarshalCBOR(w io.Writer) error {
79017338 if t == nil {
79027339 _, err := w.Write(cbg.CborNull)
···8225766282267663 return nil
82277664}
76657665+func (t *String) MarshalCBOR(w io.Writer) error {
76667666+ if t == nil {
76677667+ _, err := w.Write(cbg.CborNull)
76687668+ return err
76697669+ }
76707670+76717671+ cw := cbg.NewCborWriter(w)
76727672+76737673+ if _, err := cw.Write([]byte{165}); err != nil {
76747674+ return err
76757675+ }
76767676+76777677+ // t.LexiconTypeID (string) (string)
76787678+ if len("$type") > 1000000 {
76797679+ return xerrors.Errorf("Value in field \"$type\" was too long")
76807680+ }
76817681+76827682+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
76837683+ return err
76847684+ }
76857685+ if _, err := cw.WriteString(string("$type")); err != nil {
76867686+ return err
76877687+ }
76887688+76897689+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.string"))); err != nil {
76907690+ return err
76917691+ }
76927692+ if _, err := cw.WriteString(string("sh.tangled.string")); err != nil {
76937693+ return err
76947694+ }
76957695+76967696+ // t.Contents (string) (string)
76977697+ if len("contents") > 1000000 {
76987698+ return xerrors.Errorf("Value in field \"contents\" was too long")
76997699+ }
77007700+77017701+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("contents"))); err != nil {
77027702+ return err
77037703+ }
77047704+ if _, err := cw.WriteString(string("contents")); err != nil {
77057705+ return err
77067706+ }
77077707+77087708+ if len(t.Contents) > 1000000 {
77097709+ return xerrors.Errorf("Value in field t.Contents was too long")
77107710+ }
77117711+77127712+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Contents))); err != nil {
77137713+ return err
77147714+ }
77157715+ if _, err := cw.WriteString(string(t.Contents)); err != nil {
77167716+ return err
77177717+ }
77187718+77197719+ // t.Filename (string) (string)
77207720+ if len("filename") > 1000000 {
77217721+ return xerrors.Errorf("Value in field \"filename\" was too long")
77227722+ }
77237723+77247724+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("filename"))); err != nil {
77257725+ return err
77267726+ }
77277727+ if _, err := cw.WriteString(string("filename")); err != nil {
77287728+ return err
77297729+ }
77307730+77317731+ if len(t.Filename) > 1000000 {
77327732+ return xerrors.Errorf("Value in field t.Filename was too long")
77337733+ }
77347734+77357735+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Filename))); err != nil {
77367736+ return err
77377737+ }
77387738+ if _, err := cw.WriteString(string(t.Filename)); err != nil {
77397739+ return err
77407740+ }
77417741+77427742+ // t.CreatedAt (string) (string)
77437743+ if len("createdAt") > 1000000 {
77447744+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
77457745+ }
77467746+77477747+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
77487748+ return err
77497749+ }
77507750+ if _, err := cw.WriteString(string("createdAt")); err != nil {
77517751+ return err
77527752+ }
77537753+77547754+ if len(t.CreatedAt) > 1000000 {
77557755+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
77567756+ }
77577757+77587758+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
77597759+ return err
77607760+ }
77617761+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
77627762+ return err
77637763+ }
77647764+77657765+ // t.Description (string) (string)
77667766+ if len("description") > 1000000 {
77677767+ return xerrors.Errorf("Value in field \"description\" was too long")
77687768+ }
77697769+77707770+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("description"))); err != nil {
77717771+ return err
77727772+ }
77737773+ if _, err := cw.WriteString(string("description")); err != nil {
77747774+ return err
77757775+ }
77767776+77777777+ if len(t.Description) > 1000000 {
77787778+ return xerrors.Errorf("Value in field t.Description was too long")
77797779+ }
77807780+77817781+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Description))); err != nil {
77827782+ return err
77837783+ }
77847784+ if _, err := cw.WriteString(string(t.Description)); err != nil {
77857785+ return err
77867786+ }
77877787+ return nil
77887788+}
77897789+77907790+func (t *String) UnmarshalCBOR(r io.Reader) (err error) {
77917791+ *t = String{}
77927792+77937793+ cr := cbg.NewCborReader(r)
77947794+77957795+ maj, extra, err := cr.ReadHeader()
77967796+ if err != nil {
77977797+ return err
77987798+ }
77997799+ defer func() {
78007800+ if err == io.EOF {
78017801+ err = io.ErrUnexpectedEOF
78027802+ }
78037803+ }()
78047804+78057805+ if maj != cbg.MajMap {
78067806+ return fmt.Errorf("cbor input should be of type map")
78077807+ }
78087808+78097809+ if extra > cbg.MaxLength {
78107810+ return fmt.Errorf("String: map struct too large (%d)", extra)
78117811+ }
78127812+78137813+ n := extra
78147814+78157815+ nameBuf := make([]byte, 11)
78167816+ for i := uint64(0); i < n; i++ {
78177817+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
78187818+ if err != nil {
78197819+ return err
78207820+ }
78217821+78227822+ if !ok {
78237823+ // Field doesn't exist on this type, so ignore it
78247824+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
78257825+ return err
78267826+ }
78277827+ continue
78287828+ }
78297829+78307830+ switch string(nameBuf[:nameLen]) {
78317831+ // t.LexiconTypeID (string) (string)
78327832+ case "$type":
78337833+78347834+ {
78357835+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
78367836+ if err != nil {
78377837+ return err
78387838+ }
78397839+78407840+ t.LexiconTypeID = string(sval)
78417841+ }
78427842+ // t.Contents (string) (string)
78437843+ case "contents":
78447844+78457845+ {
78467846+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
78477847+ if err != nil {
78487848+ return err
78497849+ }
78507850+78517851+ t.Contents = string(sval)
78527852+ }
78537853+ // t.Filename (string) (string)
78547854+ case "filename":
78557855+78567856+ {
78577857+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
78587858+ if err != nil {
78597859+ return err
78607860+ }
78617861+78627862+ t.Filename = string(sval)
78637863+ }
78647864+ // t.CreatedAt (string) (string)
78657865+ case "createdAt":
78667866+78677867+ {
78687868+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
78697869+ if err != nil {
78707870+ return err
78717871+ }
78727872+78737873+ t.CreatedAt = string(sval)
78747874+ }
78757875+ // t.Description (string) (string)
78767876+ case "description":
78777877+78787878+ {
78797879+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
78807880+ if err != nil {
78817881+ return err
78827882+ }
78837883+78847884+ t.Description = string(sval)
78857885+ }
78867886+78877887+ default:
78887888+ // Field doesn't exist on this type, so ignore it
78897889+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
78907890+ return err
78917891+ }
78927892+ }
78937893+ }
78947894+78957895+ return nil
78967896+}
+19-15
api/tangled/gitrefUpdate.go
···3333 RepoName string `json:"repoName" cborgen:"repoName"`
3434}
35353636-type GitRefUpdate_Meta struct {
3737- CommitCount *GitRefUpdate_Meta_CommitCount `json:"commitCount" cborgen:"commitCount"`
3838- IsDefaultRef bool `json:"isDefaultRef" cborgen:"isDefaultRef"`
3939- LangBreakdown *GitRefUpdate_Meta_LangBreakdown `json:"langBreakdown,omitempty" cborgen:"langBreakdown,omitempty"`
3636+// GitRefUpdate_CommitCountBreakdown is a "commitCountBreakdown" in the sh.tangled.git.refUpdate schema.
3737+type GitRefUpdate_CommitCountBreakdown struct {
3838+ ByEmail []*GitRefUpdate_IndividualEmailCommitCount `json:"byEmail,omitempty" cborgen:"byEmail,omitempty"`
4039}
41404242-type GitRefUpdate_Meta_CommitCount struct {
4343- ByEmail []*GitRefUpdate_Meta_CommitCount_ByEmail_Elem `json:"byEmail,omitempty" cborgen:"byEmail,omitempty"`
4444-}
4545-4646-type GitRefUpdate_Meta_CommitCount_ByEmail_Elem struct {
4141+// GitRefUpdate_IndividualEmailCommitCount is a "individualEmailCommitCount" in the sh.tangled.git.refUpdate schema.
4242+type GitRefUpdate_IndividualEmailCommitCount struct {
4743 Count int64 `json:"count" cborgen:"count"`
4844 Email string `json:"email" cborgen:"email"`
4945}
50465151-type GitRefUpdate_Meta_LangBreakdown struct {
5252- Inputs []*GitRefUpdate_Pair `json:"inputs,omitempty" cborgen:"inputs,omitempty"`
5353-}
5454-5555-// GitRefUpdate_Pair is a "pair" in the sh.tangled.git.refUpdate schema.
5656-type GitRefUpdate_Pair struct {
4747+// GitRefUpdate_IndividualLanguageSize is a "individualLanguageSize" in the sh.tangled.git.refUpdate schema.
4848+type GitRefUpdate_IndividualLanguageSize struct {
5749 Lang string `json:"lang" cborgen:"lang"`
5850 Size int64 `json:"size" cborgen:"size"`
5951}
5252+5353+// GitRefUpdate_LangBreakdown is a "langBreakdown" in the sh.tangled.git.refUpdate schema.
5454+type GitRefUpdate_LangBreakdown struct {
5555+ Inputs []*GitRefUpdate_IndividualLanguageSize `json:"inputs,omitempty" cborgen:"inputs,omitempty"`
5656+}
5757+5858+// GitRefUpdate_Meta is a "meta" in the sh.tangled.git.refUpdate schema.
5959+type GitRefUpdate_Meta struct {
6060+ CommitCount *GitRefUpdate_CommitCountBreakdown `json:"commitCount" cborgen:"commitCount"`
6161+ IsDefaultRef bool `json:"isDefaultRef" cborgen:"isDefaultRef"`
6262+ LangBreakdown *GitRefUpdate_LangBreakdown `json:"langBreakdown,omitempty" cborgen:"langBreakdown,omitempty"`
6363+}
···11+package db
22+33+import (
44+ "fmt"
55+ "strings"
66+ "time"
77+88+ "github.com/bluesky-social/indigo/atproto/syntax"
99+)
1010+1111+type Collaborator struct {
1212+ // identifiers for the record
1313+ Id int64
1414+ Did syntax.DID
1515+ Rkey string
1616+1717+ // content
1818+ SubjectDid syntax.DID
1919+ RepoAt syntax.ATURI
2020+2121+ // meta
2222+ Created time.Time
2323+}
2424+2525+func AddCollaborator(e Execer, c Collaborator) error {
2626+ _, err := e.Exec(
2727+ `insert into collaborators (did, rkey, subject_did, repo_at) values (?, ?, ?, ?);`,
2828+ c.Did, c.Rkey, c.SubjectDid, c.RepoAt,
2929+ )
3030+ return err
3131+}
3232+3333+func DeleteCollaborator(e Execer, filters ...filter) error {
3434+ var conditions []string
3535+ var args []any
3636+ for _, filter := range filters {
3737+ conditions = append(conditions, filter.Condition())
3838+ args = append(args, filter.Arg()...)
3939+ }
4040+4141+ whereClause := ""
4242+ if conditions != nil {
4343+ whereClause = " where " + strings.Join(conditions, " and ")
4444+ }
4545+4646+ query := fmt.Sprintf(`delete from collaborators %s`, whereClause)
4747+4848+ _, err := e.Exec(query, args...)
4949+ return err
5050+}
5151+5252+func CollaboratingIn(e Execer, collaborator string) ([]Repo, error) {
5353+ rows, err := e.Query(`select repo_at from collaborators where subject_did = ?`, collaborator)
5454+ if err != nil {
5555+ return nil, err
5656+ }
5757+ defer rows.Close()
5858+5959+ var repoAts []string
6060+ for rows.Next() {
6161+ var aturi string
6262+ err := rows.Scan(&aturi)
6363+ if err != nil {
6464+ return nil, err
6565+ }
6666+ repoAts = append(repoAts, aturi)
6767+ }
6868+ if err := rows.Err(); err != nil {
6969+ return nil, err
7070+ }
7171+ if repoAts == nil {
7272+ return nil, nil
7373+ }
7474+7575+ return GetRepos(e, 0, FilterIn("at_uri", repoAts))
7676+}
+318-24
appview/db/db.go
···2727}
28282929func Make(dbPath string) (*DB, error) {
3030- db, err := sql.Open("sqlite3", dbPath)
3030+ // https://github.com/mattn/go-sqlite3#connection-string
3131+ opts := []string{
3232+ "_foreign_keys=1",
3333+ "_journal_mode=WAL",
3434+ "_synchronous=NORMAL",
3535+ "_auto_vacuum=incremental",
3636+ }
3737+3838+ db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
3139 if err != nil {
3240 return nil, err
3341 }
3434- _, err = db.Exec(`
3535- pragma journal_mode = WAL;
3636- pragma synchronous = normal;
3737- pragma foreign_keys = on;
3838- pragma temp_store = memory;
3939- pragma mmap_size = 30000000000;
4040- pragma page_size = 32768;
4141- pragma auto_vacuum = incremental;
4242- pragma busy_timeout = 5000;
4242+4343+ ctx := context.Background()
43444545+ conn, err := db.Conn(ctx)
4646+ if err != nil {
4747+ return nil, err
4848+ }
4949+ defer conn.Close()
5050+5151+ _, err = conn.ExecContext(ctx, `
4452 create table if not exists registrations (
4553 id integer primary key autoincrement,
4654 domain text not null unique,
···436444 unique(repo_at, ref, language)
437445 );
438446447447+ create table if not exists signups_inflight (
448448+ id integer primary key autoincrement,
449449+ email text not null unique,
450450+ invite_code text not null,
451451+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
452452+ );
453453+454454+ create table if not exists strings (
455455+ -- identifiers
456456+ did text not null,
457457+ rkey text not null,
458458+459459+ -- content
460460+ filename text not null,
461461+ description text,
462462+ content text not null,
463463+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
464464+ edited text,
465465+466466+ primary key (did, rkey)
467467+ );
468468+439469 create table if not exists migrations (
440470 id integer primary key autoincrement,
441471 name text unique
442472 );
473473+474474+ -- indexes for better star query performance
475475+ create index if not exists idx_stars_created on stars(created);
476476+ create index if not exists idx_stars_repo_at_created on stars(repo_at, created);
443477 `)
444478 if err != nil {
445479 return nil, err
446480 }
447481448482 // run migrations
449449- runMigration(db, "add-description-to-repos", func(tx *sql.Tx) error {
483483+ runMigration(conn, "add-description-to-repos", func(tx *sql.Tx) error {
450484 tx.Exec(`
451485 alter table repos add column description text check (length(description) <= 200);
452486 `)
453487 return nil
454488 })
455489456456- runMigration(db, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
490490+ runMigration(conn, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
457491 // add unconstrained column
458492 _, err := tx.Exec(`
459493 alter table public_keys
···476510 return nil
477511 })
478512479479- runMigration(db, "add-rkey-to-comments", func(tx *sql.Tx) error {
513513+ runMigration(conn, "add-rkey-to-comments", func(tx *sql.Tx) error {
480514 _, err := tx.Exec(`
481515 alter table comments drop column comment_at;
482516 alter table comments add column rkey text;
···484518 return err
485519 })
486520487487- runMigration(db, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
521521+ runMigration(conn, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
488522 _, err := tx.Exec(`
489523 alter table comments add column deleted text; -- timestamp
490524 alter table comments add column edited text; -- timestamp
···492526 return err
493527 })
494528495495- runMigration(db, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
529529+ runMigration(conn, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
496530 _, err := tx.Exec(`
497531 alter table pulls add column source_branch text;
498532 alter table pulls add column source_repo_at text;
···501535 return err
502536 })
503537504504- runMigration(db, "add-source-to-repos", func(tx *sql.Tx) error {
538538+ runMigration(conn, "add-source-to-repos", func(tx *sql.Tx) error {
505539 _, err := tx.Exec(`
506540 alter table repos add column source text;
507541 `)
···512546 // NOTE: this cannot be done in a transaction, so it is run outside [0]
513547 //
514548 // [0]: https://sqlite.org/pragma.html#pragma_foreign_keys
515515- db.Exec("pragma foreign_keys = off;")
516516- runMigration(db, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
549549+ conn.ExecContext(ctx, "pragma foreign_keys = off;")
550550+ runMigration(conn, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
517551 _, err := tx.Exec(`
518552 create table pulls_new (
519553 -- identifiers
···568602 `)
569603 return err
570604 })
571571- db.Exec("pragma foreign_keys = on;")
605605+ conn.ExecContext(ctx, "pragma foreign_keys = on;")
572606573607 // run migrations
574574- runMigration(db, "add-spindle-to-repos", func(tx *sql.Tx) error {
608608+ runMigration(conn, "add-spindle-to-repos", func(tx *sql.Tx) error {
575609 tx.Exec(`
576610 alter table repos add column spindle text;
577611 `)
578612 return nil
579613 })
580614615615+ // drop all knot secrets, add unique constraint to knots
616616+ //
617617+ // knots will henceforth use service auth for signed requests
618618+ runMigration(conn, "no-more-secrets", func(tx *sql.Tx) error {
619619+ _, err := tx.Exec(`
620620+ create table registrations_new (
621621+ id integer primary key autoincrement,
622622+ domain text not null,
623623+ did text not null,
624624+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
625625+ registered text,
626626+ read_only integer not null default 0,
627627+ unique(domain, did)
628628+ );
629629+630630+ insert into registrations_new (id, domain, did, created, registered, read_only)
631631+ select id, domain, did, created, registered, 1 from registrations
632632+ where registered is not null;
633633+634634+ drop table registrations;
635635+ alter table registrations_new rename to registrations;
636636+ `)
637637+ return err
638638+ })
639639+640640+ // recreate and add rkey + created columns with default constraint
641641+ runMigration(conn, "rework-collaborators-table", func(tx *sql.Tx) error {
642642+ // create new table
643643+ // - repo_at instead of repo integer
644644+ // - rkey field
645645+ // - created field
646646+ _, err := tx.Exec(`
647647+ create table collaborators_new (
648648+ -- identifiers for the record
649649+ id integer primary key autoincrement,
650650+ did text not null,
651651+ rkey text,
652652+653653+ -- content
654654+ subject_did text not null,
655655+ repo_at text not null,
656656+657657+ -- meta
658658+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
659659+660660+ -- constraints
661661+ foreign key (repo_at) references repos(at_uri) on delete cascade
662662+ )
663663+ `)
664664+ if err != nil {
665665+ return err
666666+ }
667667+668668+ // copy data
669669+ _, err = tx.Exec(`
670670+ insert into collaborators_new (id, did, rkey, subject_did, repo_at)
671671+ select
672672+ c.id,
673673+ r.did,
674674+ '',
675675+ c.did,
676676+ r.at_uri
677677+ from collaborators c
678678+ join repos r on c.repo = r.id
679679+ `)
680680+ if err != nil {
681681+ return err
682682+ }
683683+684684+ // drop old table
685685+ _, err = tx.Exec(`drop table collaborators`)
686686+ if err != nil {
687687+ return err
688688+ }
689689+690690+ // rename new table
691691+ _, err = tx.Exec(`alter table collaborators_new rename to collaborators`)
692692+ return err
693693+ })
694694+695695+ runMigration(conn, "add-rkey-to-issues", func(tx *sql.Tx) error {
696696+ _, err := tx.Exec(`
697697+ alter table issues add column rkey text not null default '';
698698+699699+ -- get last url section from issue_at and save to rkey column
700700+ update issues
701701+ set rkey = replace(issue_at, rtrim(issue_at, replace(issue_at, '/', '')), '');
702702+ `)
703703+ return err
704704+ })
705705+706706+ // repurpose the read-only column to "needs-upgrade"
707707+ runMigration(conn, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error {
708708+ _, err := tx.Exec(`
709709+ alter table registrations rename column read_only to needs_upgrade;
710710+ `)
711711+ return err
712712+ })
713713+714714+ // require all knots to upgrade after the release of total xrpc
715715+ runMigration(conn, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error {
716716+ _, err := tx.Exec(`
717717+ update registrations set needs_upgrade = 1;
718718+ `)
719719+ return err
720720+ })
721721+722722+ // require all knots to upgrade after the release of total xrpc
723723+ runMigration(conn, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error {
724724+ _, err := tx.Exec(`
725725+ alter table spindles add column needs_upgrade integer not null default 0;
726726+ `)
727727+ if err != nil {
728728+ return err
729729+ }
730730+731731+ _, err = tx.Exec(`
732732+ update spindles set needs_upgrade = 1;
733733+ `)
734734+ return err
735735+ })
736736+737737+ // remove issue_at from issues and replace with generated column
738738+ //
739739+ // this requires a full table recreation because stored columns
740740+ // cannot be added via alter
741741+ //
742742+ // couple other changes:
743743+ // - columns renamed to be more consistent
744744+ // - adds edited and deleted fields
745745+ //
746746+ // disable foreign-keys for the next migration
747747+ conn.ExecContext(ctx, "pragma foreign_keys = off;")
748748+ runMigration(conn, "remove-issue-at-from-issues", func(tx *sql.Tx) error {
749749+ _, err := tx.Exec(`
750750+ create table if not exists issues_new (
751751+ -- identifiers
752752+ id integer primary key autoincrement,
753753+ did text not null,
754754+ rkey text not null,
755755+ at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo.issue' || '/' || rkey) stored,
756756+757757+ -- at identifiers
758758+ repo_at text not null,
759759+760760+ -- content
761761+ issue_id integer not null,
762762+ title text not null,
763763+ body text not null,
764764+ open integer not null default 1,
765765+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
766766+ edited text, -- timestamp
767767+ deleted text, -- timestamp
768768+769769+ unique(did, rkey),
770770+ unique(repo_at, issue_id),
771771+ unique(at_uri),
772772+ foreign key (repo_at) references repos(at_uri) on delete cascade
773773+ );
774774+ `)
775775+ if err != nil {
776776+ return err
777777+ }
778778+779779+ // transfer data
780780+ _, err = tx.Exec(`
781781+ insert into issues_new (id, did, rkey, repo_at, issue_id, title, body, open, created)
782782+ select
783783+ i.id,
784784+ i.owner_did,
785785+ i.rkey,
786786+ i.repo_at,
787787+ i.issue_id,
788788+ i.title,
789789+ i.body,
790790+ i.open,
791791+ i.created
792792+ from issues i;
793793+ `)
794794+ if err != nil {
795795+ return err
796796+ }
797797+798798+ // drop old table
799799+ _, err = tx.Exec(`drop table issues`)
800800+ if err != nil {
801801+ return err
802802+ }
803803+804804+ // rename new table
805805+ _, err = tx.Exec(`alter table issues_new rename to issues`)
806806+ return err
807807+ })
808808+ conn.ExecContext(ctx, "pragma foreign_keys = on;")
809809+810810+ // - renames the comments table to 'issue_comments'
811811+ // - rework issue comments to update constraints:
812812+ // * unique(did, rkey)
813813+ // * remove comment-id and just use the global ID
814814+ // * foreign key (repo_at, issue_id)
815815+ // - new columns
816816+ // * column "reply_to" which can be any other comment
817817+ // * column "at-uri" which is a generated column
818818+ runMigration(conn, "rework-issue-comments", func(tx *sql.Tx) error {
819819+ _, err := tx.Exec(`
820820+ create table if not exists issue_comments (
821821+ -- identifiers
822822+ id integer primary key autoincrement,
823823+ did text not null,
824824+ rkey text,
825825+ at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo.issue.comment' || '/' || rkey) stored,
826826+827827+ -- at identifiers
828828+ issue_at text not null,
829829+ reply_to text, -- at_uri of parent comment
830830+831831+ -- content
832832+ body text not null,
833833+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
834834+ edited text,
835835+ deleted text,
836836+837837+ -- constraints
838838+ unique(did, rkey),
839839+ unique(at_uri),
840840+ foreign key (issue_at) references issues(at_uri) on delete cascade
841841+ );
842842+ `)
843843+ if err != nil {
844844+ return err
845845+ }
846846+847847+ // transfer data
848848+ _, err = tx.Exec(`
849849+ insert into issue_comments (id, did, rkey, issue_at, body, created, edited, deleted)
850850+ select
851851+ c.id,
852852+ c.owner_did,
853853+ c.rkey,
854854+ i.at_uri, -- get at_uri from issues table
855855+ c.body,
856856+ c.created,
857857+ c.edited,
858858+ c.deleted
859859+ from comments c
860860+ join issues i on c.repo_at = i.repo_at and c.issue_id = i.issue_id;
861861+ `)
862862+ if err != nil {
863863+ return err
864864+ }
865865+866866+ // drop old table
867867+ _, err = tx.Exec(`drop table comments`)
868868+ return err
869869+ })
870870+581871 return &DB{db}, nil
582872}
583873584874type migrationFn = func(*sql.Tx) error
585875586586-func runMigration(d *sql.DB, name string, migrationFn migrationFn) error {
587587- tx, err := d.Begin()
876876+func runMigration(c *sql.Conn, name string, migrationFn migrationFn) error {
877877+ tx, err := c.BeginTx(context.Background(), nil)
588878 if err != nil {
589879 return err
590880 }
···624914 return nil
625915}
626916917917+func (d *DB) Close() error {
918918+ return d.DB.Close()
919919+}
920920+627921type filter struct {
628922 key string
629923 arg any
···651945 kind := rv.Kind()
652946653947 // if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)`
654654- if kind == reflect.Slice || kind == reflect.Array {
948948+ if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
655949 if rv.Len() == 0 {
656950 // always false
657951 return "1 = 0"
···671965func (f filter) Arg() []any {
672966 rv := reflect.ValueOf(f.arg)
673967 kind := rv.Kind()
674674- if kind == reflect.Slice || kind == reflect.Array {
968968+ if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
675969 if rv.Len() == 0 {
676970 return nil
677971 }
+16-2
appview/db/email.go
···103103 query := `
104104 select email, did
105105 from emails
106106- where
107107- verified = ?
106106+ where
107107+ verified = ?
108108 and email in (` + strings.Join(placeholders, ",") + `)
109109 `
110110···153153 `
154154 var count int
155155 err := e.QueryRow(query, did, email).Scan(&count)
156156+ if err != nil {
157157+ return false, err
158158+ }
159159+ return count > 0, nil
160160+}
161161+162162+func CheckEmailExistsAtAll(e Execer, email string) (bool, error) {
163163+ query := `
164164+ select count(*)
165165+ from emails
166166+ where email = ?
167167+ `
168168+ var count int
169169+ err := e.QueryRow(query, email).Scan(&count)
156170 if err != nil {
157171 return false, err
158172 }
+145-42
appview/db/follow.go
···11package db
2233import (
44+ "fmt"
45 "log"
66+ "strings"
57 "time"
68)
79···5355 return err
5456}
55575656-func GetFollowerFollowing(e Execer, did string) (int, int, error) {
5757- followers, following := 0, 0
5858+type FollowStats struct {
5959+ Followers int64
6060+ Following int64
6161+}
6262+6363+func GetFollowerFollowingCount(e Execer, did string) (FollowStats, error) {
6464+ var followers, following int64
5865 err := e.QueryRow(
5959- `SELECT
6666+ `SELECT
6067 COUNT(CASE WHEN subject_did = ? THEN 1 END) AS followers,
6168 COUNT(CASE WHEN user_did = ? THEN 1 END) AS following
6269 FROM follows;`, did, did).Scan(&followers, &following)
6370 if err != nil {
6464- return 0, 0, err
7171+ return FollowStats{}, err
6572 }
6666- return followers, following, nil
7373+ return FollowStats{
7474+ Followers: followers,
7575+ Following: following,
7676+ }, nil
6777}
68786969-type FollowStatus int
7979+func GetFollowerFollowingCounts(e Execer, dids []string) (map[string]FollowStats, error) {
8080+ if len(dids) == 0 {
8181+ return nil, nil
8282+ }
70837171-const (
7272- IsNotFollowing FollowStatus = iota
7373- IsFollowing
7474- IsSelf
7575-)
8484+ placeholders := make([]string, len(dids))
8585+ for i := range placeholders {
8686+ placeholders[i] = "?"
8787+ }
8888+ placeholderStr := strings.Join(placeholders, ",")
76897777-func (s FollowStatus) String() string {
7878- switch s {
7979- case IsNotFollowing:
8080- return "IsNotFollowing"
8181- case IsFollowing:
8282- return "IsFollowing"
8383- case IsSelf:
8484- return "IsSelf"
8585- default:
8686- return "IsNotFollowing"
9090+ args := make([]any, len(dids)*2)
9191+ for i, did := range dids {
9292+ args[i] = did
9393+ args[i+len(dids)] = did
8794 }
8888-}
9595+9696+ query := fmt.Sprintf(`
9797+ select
9898+ coalesce(f.did, g.did) as did,
9999+ coalesce(f.followers, 0) as followers,
100100+ coalesce(g.following, 0) as following
101101+ from (
102102+ select subject_did as did, count(*) as followers
103103+ from follows
104104+ where subject_did in (%s)
105105+ group by subject_did
106106+ ) f
107107+ full outer join (
108108+ select user_did as did, count(*) as following
109109+ from follows
110110+ where user_did in (%s)
111111+ group by user_did
112112+ ) g on f.did = g.did`,
113113+ placeholderStr, placeholderStr)
114114+115115+ result := make(map[string]FollowStats)
116116+117117+ rows, err := e.Query(query, args...)
118118+ if err != nil {
119119+ return nil, err
120120+ }
121121+ defer rows.Close()
122122+123123+ for rows.Next() {
124124+ var did string
125125+ var followers, following int64
126126+ if err := rows.Scan(&did, &followers, &following); err != nil {
127127+ return nil, err
128128+ }
129129+ result[did] = FollowStats{
130130+ Followers: followers,
131131+ Following: following,
132132+ }
133133+ }
891349090-func GetFollowStatus(e Execer, userDid, subjectDid string) FollowStatus {
9191- if userDid == subjectDid {
9292- return IsSelf
9393- } else if _, err := GetFollow(e, userDid, subjectDid); err != nil {
9494- return IsNotFollowing
9595- } else {
9696- return IsFollowing
135135+ for _, did := range dids {
136136+ if _, exists := result[did]; !exists {
137137+ result[did] = FollowStats{
138138+ Followers: 0,
139139+ Following: 0,
140140+ }
141141+ }
97142 }
143143+144144+ return result, nil
98145}
99146100100-func GetAllFollows(e Execer, limit int) ([]Follow, error) {
147147+func GetFollows(e Execer, limit int, filters ...filter) ([]Follow, error) {
101148 var follows []Follow
102149103103- rows, err := e.Query(`
104104- select user_did, subject_did, followed_at, rkey
150150+ var conditions []string
151151+ var args []any
152152+ for _, filter := range filters {
153153+ conditions = append(conditions, filter.Condition())
154154+ args = append(args, filter.Arg()...)
155155+ }
156156+157157+ whereClause := ""
158158+ if conditions != nil {
159159+ whereClause = " where " + strings.Join(conditions, " and ")
160160+ }
161161+ limitClause := ""
162162+ if limit > 0 {
163163+ limitClause = " limit ?"
164164+ args = append(args, limit)
165165+ }
166166+167167+ query := fmt.Sprintf(
168168+ `select user_did, subject_did, followed_at, rkey
105169 from follows
170170+ %s
106171 order by followed_at desc
107107- limit ?`, limit,
108108- )
172172+ %s
173173+ `, whereClause, limitClause)
174174+175175+ rows, err := e.Query(query, args...)
109176 if err != nil {
110177 return nil, err
111178 }
112112- defer rows.Close()
113113-114179 for rows.Next() {
115180 var follow Follow
116181 var followedAt string
117117- if err := rows.Scan(&follow.UserDid, &follow.SubjectDid, &followedAt, &follow.Rkey); err != nil {
182182+ err := rows.Scan(
183183+ &follow.UserDid,
184184+ &follow.SubjectDid,
185185+ &followedAt,
186186+ &follow.Rkey,
187187+ )
188188+ if err != nil {
118189 return nil, err
119190 }
120120-121191 followedAtTime, err := time.Parse(time.RFC3339, followedAt)
122192 if err != nil {
123193 log.Println("unable to determine followed at time")
···125195 } else {
126196 follow.FollowedAt = followedAtTime
127197 }
128128-129198 follows = append(follows, follow)
130199 }
200200+ return follows, nil
201201+}
202202+203203+func GetFollowers(e Execer, did string) ([]Follow, error) {
204204+ return GetFollows(e, 0, FilterEq("subject_did", did))
205205+}
131206132132- if err := rows.Err(); err != nil {
133133- return nil, err
207207+func GetFollowing(e Execer, did string) ([]Follow, error) {
208208+ return GetFollows(e, 0, FilterEq("user_did", did))
209209+}
210210+211211+type FollowStatus int
212212+213213+const (
214214+ IsNotFollowing FollowStatus = iota
215215+ IsFollowing
216216+ IsSelf
217217+)
218218+219219+func (s FollowStatus) String() string {
220220+ switch s {
221221+ case IsNotFollowing:
222222+ return "IsNotFollowing"
223223+ case IsFollowing:
224224+ return "IsFollowing"
225225+ case IsSelf:
226226+ return "IsSelf"
227227+ default:
228228+ return "IsNotFollowing"
134229 }
230230+}
135231136136- return follows, nil
232232+func GetFollowStatus(e Execer, userDid, subjectDid string) FollowStatus {
233233+ if userDid == subjectDid {
234234+ return IsSelf
235235+ } else if _, err := GetFollow(e, userDid, subjectDid); err != nil {
236236+ return IsNotFollowing
237237+ } else {
238238+ return IsFollowing
239239+ }
137240}
+459-311
appview/db/issues.go
···2233import (
44 "database/sql"
55+ "fmt"
66+ "maps"
77+ "slices"
88+ "sort"
99+ "strings"
510 "time"
611712 "github.com/bluesky-social/indigo/atproto/syntax"
1313+ "tangled.sh/tangled.sh/core/api/tangled"
814 "tangled.sh/tangled.sh/core/appview/pagination"
915)
10161117type Issue struct {
1212- ID int64
1313- RepoAt syntax.ATURI
1414- OwnerDid string
1515- IssueId int
1616- IssueAt string
1717- Created time.Time
1818- Title string
1919- Body string
2020- Open bool
1818+ Id int64
1919+ Did string
2020+ Rkey string
2121+ RepoAt syntax.ATURI
2222+ IssueId int
2323+ Created time.Time
2424+ Edited *time.Time
2525+ Deleted *time.Time
2626+ Title string
2727+ Body string
2828+ Open bool
21292230 // optionally, populate this when querying for reverse mappings
2331 // like comment counts, parent repo etc.
2424- Metadata *IssueMetadata
3232+ Comments []IssueComment
3333+ Repo *Repo
2534}
26352727-type IssueMetadata struct {
2828- CommentCount int
2929- Repo *Repo
3030- // labels, assignee etc.
3636+func (i *Issue) AtUri() syntax.ATURI {
3737+ return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.Did, tangled.RepoIssueNSID, i.Rkey))
3838+}
3939+4040+func (i *Issue) AsRecord() tangled.RepoIssue {
4141+ return tangled.RepoIssue{
4242+ Repo: i.RepoAt.String(),
4343+ Title: i.Title,
4444+ Body: &i.Body,
4545+ CreatedAt: i.Created.Format(time.RFC3339),
4646+ }
4747+}
4848+4949+func (i *Issue) State() string {
5050+ if i.Open {
5151+ return "open"
5252+ }
5353+ return "closed"
3154}
32553333-type Comment struct {
3434- OwnerDid string
3535- RepoAt syntax.ATURI
3636- Rkey string
3737- Issue int
3838- CommentId int
3939- Body string
4040- Created *time.Time
4141- Deleted *time.Time
4242- Edited *time.Time
5656+type CommentListItem struct {
5757+ Self *IssueComment
5858+ Replies []*IssueComment
4359}
44604545-func NewIssue(tx *sql.Tx, issue *Issue) error {
4646- defer tx.Rollback()
6161+func (i *Issue) CommentList() []CommentListItem {
6262+ // Create a map to quickly find comments by their aturi
6363+ toplevel := make(map[string]*CommentListItem)
6464+ var replies []*IssueComment
47654848- _, err := tx.Exec(`
4949- insert or ignore into repo_issue_seqs (repo_at, next_issue_id)
5050- values (?, 1)
5151- `, issue.RepoAt)
5252- if err != nil {
5353- return err
6666+ // collect top level comments into the map
6767+ for _, comment := range i.Comments {
6868+ if comment.IsTopLevel() {
6969+ toplevel[comment.AtUri().String()] = &CommentListItem{
7070+ Self: &comment,
7171+ }
7272+ } else {
7373+ replies = append(replies, &comment)
7474+ }
7575+ }
7676+7777+ for _, r := range replies {
7878+ parentAt := *r.ReplyTo
7979+ if parent, exists := toplevel[parentAt]; exists {
8080+ parent.Replies = append(parent.Replies, r)
8181+ }
8282+ }
8383+8484+ var listing []CommentListItem
8585+ for _, v := range toplevel {
8686+ listing = append(listing, *v)
5487 }
55885656- var nextId int
5757- err = tx.QueryRow(`
5858- update repo_issue_seqs
5959- set next_issue_id = next_issue_id + 1
6060- where repo_at = ?
6161- returning next_issue_id - 1
6262- `, issue.RepoAt).Scan(&nextId)
6363- if err != nil {
6464- return err
8989+ // sort everything
9090+ sortFunc := func(a, b *IssueComment) bool {
9191+ return a.Created.Before(b.Created)
9292+ }
9393+ sort.Slice(listing, func(i, j int) bool {
9494+ return sortFunc(listing[i].Self, listing[j].Self)
9595+ })
9696+ for _, r := range listing {
9797+ sort.Slice(r.Replies, func(i, j int) bool {
9898+ return sortFunc(r.Replies[i], r.Replies[j])
9999+ })
65100 }
661016767- issue.IssueId = nextId
102102+ return listing
103103+}
681046969- res, err := tx.Exec(`
7070- insert into issues (repo_at, owner_did, issue_id, title, body)
7171- values (?, ?, ?, ?, ?)
7272- `, issue.RepoAt, issue.OwnerDid, issue.IssueId, issue.Title, issue.Body)
105105+func IssueFromRecord(did, rkey string, record tangled.RepoIssue) Issue {
106106+ created, err := time.Parse(time.RFC3339, record.CreatedAt)
73107 if err != nil {
7474- return err
108108+ created = time.Now()
75109 }
761107777- lastID, err := res.LastInsertId()
7878- if err != nil {
7979- return err
111111+ body := ""
112112+ if record.Body != nil {
113113+ body = *record.Body
80114 }
8181- issue.ID = lastID
821158383- if err := tx.Commit(); err != nil {
8484- return err
116116+ return Issue{
117117+ RepoAt: syntax.ATURI(record.Repo),
118118+ Did: did,
119119+ Rkey: rkey,
120120+ Created: created,
121121+ Title: record.Title,
122122+ Body: body,
123123+ Open: true, // new issues are open by default
85124 }
125125+}
861268787- return nil
127127+type IssueComment struct {
128128+ Id int64
129129+ Did string
130130+ Rkey string
131131+ IssueAt string
132132+ ReplyTo *string
133133+ Body string
134134+ Created time.Time
135135+ Edited *time.Time
136136+ Deleted *time.Time
88137}
891389090-func SetIssueAt(e Execer, repoAt syntax.ATURI, issueId int, issueAt string) error {
9191- _, err := e.Exec(`update issues set issue_at = ? where repo_at = ? and issue_id = ?`, issueAt, repoAt, issueId)
9292- return err
139139+func (i *IssueComment) AtUri() syntax.ATURI {
140140+ return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.Did, tangled.RepoIssueCommentNSID, i.Rkey))
93141}
941429595-func GetIssueAt(e Execer, repoAt syntax.ATURI, issueId int) (string, error) {
9696- var issueAt string
9797- err := e.QueryRow(`select issue_at from issues where repo_at = ? and issue_id = ?`, repoAt, issueId).Scan(&issueAt)
9898- return issueAt, err
143143+func (i *IssueComment) AsRecord() tangled.RepoIssueComment {
144144+ return tangled.RepoIssueComment{
145145+ Body: i.Body,
146146+ Issue: i.IssueAt,
147147+ CreatedAt: i.Created.Format(time.RFC3339),
148148+ ReplyTo: i.ReplyTo,
149149+ }
99150}
100151101101-func GetIssueOwnerDid(e Execer, repoAt syntax.ATURI, issueId int) (string, error) {
102102- var ownerDid string
103103- err := e.QueryRow(`select owner_did from issues where repo_at = ? and issue_id = ?`, repoAt, issueId).Scan(&ownerDid)
104104- return ownerDid, err
152152+func (i *IssueComment) IsTopLevel() bool {
153153+ return i.ReplyTo == nil
105154}
106155107107-func GetIssues(e Execer, repoAt syntax.ATURI, isOpen bool, page pagination.Page) ([]Issue, error) {
108108- var issues []Issue
109109- openValue := 0
110110- if isOpen {
111111- openValue = 1
156156+func IssueCommentFromRecord(e Execer, did, rkey string, record tangled.RepoIssueComment) (*IssueComment, error) {
157157+ created, err := time.Parse(time.RFC3339, record.CreatedAt)
158158+ if err != nil {
159159+ created = time.Now()
112160 }
113161114114- rows, err := e.Query(
115115- `
116116- with numbered_issue as (
117117- select
118118- i.id,
119119- i.owner_did,
120120- i.issue_id,
121121- i.created,
122122- i.title,
123123- i.body,
124124- i.open,
125125- count(c.id) as comment_count,
126126- row_number() over (order by i.created desc) as row_num
127127- from
128128- issues i
129129- left join
130130- comments c on i.repo_at = c.repo_at and i.issue_id = c.issue_id
131131- where
132132- i.repo_at = ? and i.open = ?
133133- group by
134134- i.id, i.owner_did, i.issue_id, i.created, i.title, i.body, i.open
135135- )
136136- select
137137- id,
138138- owner_did,
139139- issue_id,
140140- created,
141141- title,
142142- body,
143143- open,
144144- comment_count
145145- from
146146- numbered_issue
147147- where
148148- row_num between ? and ?`,
149149- repoAt, openValue, page.Offset+1, page.Offset+page.Limit)
150150- if err != nil {
162162+ ownerDid := did
163163+164164+ if _, err = syntax.ParseATURI(record.Issue); err != nil {
151165 return nil, err
152166 }
153153- defer rows.Close()
154167155155- for rows.Next() {
156156- var issue Issue
157157- var createdAt string
158158- var metadata IssueMetadata
159159- err := rows.Scan(&issue.ID, &issue.OwnerDid, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &metadata.CommentCount)
160160- if err != nil {
161161- return nil, err
162162- }
168168+ comment := IssueComment{
169169+ Did: ownerDid,
170170+ Rkey: rkey,
171171+ Body: record.Body,
172172+ IssueAt: record.Issue,
173173+ ReplyTo: record.ReplyTo,
174174+ Created: created,
175175+ }
163176164164- createdTime, err := time.Parse(time.RFC3339, createdAt)
165165- if err != nil {
166166- return nil, err
177177+ return &comment, nil
178178+}
179179+180180+func PutIssue(tx *sql.Tx, issue *Issue) error {
181181+ // ensure sequence exists
182182+ _, err := tx.Exec(`
183183+ insert or ignore into repo_issue_seqs (repo_at, next_issue_id)
184184+ values (?, 1)
185185+ `, issue.RepoAt)
186186+ if err != nil {
187187+ return err
188188+ }
189189+190190+ issues, err := GetIssues(
191191+ tx,
192192+ FilterEq("did", issue.Did),
193193+ FilterEq("rkey", issue.Rkey),
194194+ )
195195+ switch {
196196+ case err != nil:
197197+ return err
198198+ case len(issues) == 0:
199199+ return createNewIssue(tx, issue)
200200+ case len(issues) != 1: // should be unreachable
201201+ return fmt.Errorf("invalid number of issues returned: %d", len(issues))
202202+ default:
203203+ // if content is identical, do not edit
204204+ existingIssue := issues[0]
205205+ if existingIssue.Title == issue.Title && existingIssue.Body == issue.Body {
206206+ return nil
167207 }
168168- issue.Created = createdTime
169169- issue.Metadata = &metadata
170208171171- issues = append(issues, issue)
209209+ issue.Id = existingIssue.Id
210210+ issue.IssueId = existingIssue.IssueId
211211+ return updateIssue(tx, issue)
172212 }
213213+}
173214174174- if err := rows.Err(); err != nil {
175175- return nil, err
215215+func createNewIssue(tx *sql.Tx, issue *Issue) error {
216216+ // get next issue_id
217217+ var newIssueId int
218218+ err := tx.QueryRow(`
219219+ update repo_issue_seqs
220220+ set next_issue_id = next_issue_id + 1
221221+ where repo_at = ?
222222+ returning next_issue_id - 1
223223+ `, issue.RepoAt).Scan(&newIssueId)
224224+ if err != nil {
225225+ return err
176226 }
177227178178- return issues, nil
228228+ // insert new issue
229229+ row := tx.QueryRow(`
230230+ insert into issues (repo_at, did, rkey, issue_id, title, body)
231231+ values (?, ?, ?, ?, ?, ?)
232232+ returning rowid, issue_id
233233+ `, issue.RepoAt, issue.Did, issue.Rkey, newIssueId, issue.Title, issue.Body)
234234+235235+ return row.Scan(&issue.Id, &issue.IssueId)
179236}
180237181181-// timeframe here is directly passed into the sql query filter, and any
182182-// timeframe in the past should be negative; e.g.: "-3 months"
183183-func GetIssuesByOwnerDid(e Execer, ownerDid string, timeframe string) ([]Issue, error) {
184184- var issues []Issue
238238+func updateIssue(tx *sql.Tx, issue *Issue) error {
239239+ // update existing issue
240240+ _, err := tx.Exec(`
241241+ update issues
242242+ set title = ?, body = ?, edited = ?
243243+ where did = ? and rkey = ?
244244+ `, issue.Title, issue.Body, time.Now().Format(time.RFC3339), issue.Did, issue.Rkey)
245245+ return err
246246+}
185247186186- rows, err := e.Query(
187187- `select
188188- i.id,
189189- i.owner_did,
190190- i.repo_at,
191191- i.issue_id,
192192- i.created,
193193- i.title,
194194- i.body,
195195- i.open,
196196- r.did,
197197- r.name,
198198- r.knot,
199199- r.rkey,
200200- r.created
201201- from
202202- issues i
203203- join
204204- repos r on i.repo_at = r.at_uri
205205- where
206206- i.owner_did = ? and i.created >= date ('now', ?)
207207- order by
208208- i.created desc`,
209209- ownerDid, timeframe)
248248+func GetIssuesPaginated(e Execer, page pagination.Page, filters ...filter) ([]Issue, error) {
249249+ issueMap := make(map[string]*Issue) // at-uri -> issue
250250+251251+ var conditions []string
252252+ var args []any
253253+254254+ for _, filter := range filters {
255255+ conditions = append(conditions, filter.Condition())
256256+ args = append(args, filter.Arg()...)
257257+ }
258258+259259+ whereClause := ""
260260+ if conditions != nil {
261261+ whereClause = " where " + strings.Join(conditions, " and ")
262262+ }
263263+264264+ pLower := FilterGte("row_num", page.Offset+1)
265265+ pUpper := FilterLte("row_num", page.Offset+page.Limit)
266266+267267+ args = append(args, pLower.Arg()...)
268268+ args = append(args, pUpper.Arg()...)
269269+ pagination := " where " + pLower.Condition() + " and " + pUpper.Condition()
270270+271271+ query := fmt.Sprintf(
272272+ `
273273+ select * from (
274274+ select
275275+ id,
276276+ did,
277277+ rkey,
278278+ repo_at,
279279+ issue_id,
280280+ title,
281281+ body,
282282+ open,
283283+ created,
284284+ edited,
285285+ deleted,
286286+ row_number() over (order by created desc) as row_num
287287+ from
288288+ issues
289289+ %s
290290+ ) ranked_issues
291291+ %s
292292+ `,
293293+ whereClause,
294294+ pagination,
295295+ )
296296+297297+ rows, err := e.Query(query, args...)
210298 if err != nil {
211211- return nil, err
299299+ return nil, fmt.Errorf("failed to query issues table: %w", err)
212300 }
213301 defer rows.Close()
214302215303 for rows.Next() {
216304 var issue Issue
217217- var issueCreatedAt, repoCreatedAt string
218218- var repo Repo
305305+ var createdAt string
306306+ var editedAt, deletedAt sql.Null[string]
307307+ var rowNum int64
219308 err := rows.Scan(
220220- &issue.ID,
221221- &issue.OwnerDid,
309309+ &issue.Id,
310310+ &issue.Did,
311311+ &issue.Rkey,
222312 &issue.RepoAt,
223313 &issue.IssueId,
224224- &issueCreatedAt,
225314 &issue.Title,
226315 &issue.Body,
227316 &issue.Open,
228228- &repo.Did,
229229- &repo.Name,
230230- &repo.Knot,
231231- &repo.Rkey,
232232- &repoCreatedAt,
317317+ &createdAt,
318318+ &editedAt,
319319+ &deletedAt,
320320+ &rowNum,
233321 )
234322 if err != nil {
235235- return nil, err
323323+ return nil, fmt.Errorf("failed to scan issue: %w", err)
236324 }
237325238238- issueCreatedTime, err := time.Parse(time.RFC3339, issueCreatedAt)
239239- if err != nil {
240240- return nil, err
326326+ if t, err := time.Parse(time.RFC3339, createdAt); err == nil {
327327+ issue.Created = t
241328 }
242242- issue.Created = issueCreatedTime
243329244244- repoCreatedTime, err := time.Parse(time.RFC3339, repoCreatedAt)
245245- if err != nil {
246246- return nil, err
330330+ if editedAt.Valid {
331331+ if t, err := time.Parse(time.RFC3339, editedAt.V); err == nil {
332332+ issue.Edited = &t
333333+ }
334334+ }
335335+336336+ if deletedAt.Valid {
337337+ if t, err := time.Parse(time.RFC3339, deletedAt.V); err == nil {
338338+ issue.Deleted = &t
339339+ }
247340 }
248248- repo.Created = repoCreatedTime
249341250250- issue.Metadata = &IssueMetadata{
251251- Repo: &repo,
342342+ atUri := issue.AtUri().String()
343343+ issueMap[atUri] = &issue
344344+ }
345345+346346+ // collect reverse repos
347347+ repoAts := make([]string, 0, len(issueMap)) // or just []string{}
348348+ for _, issue := range issueMap {
349349+ repoAts = append(repoAts, string(issue.RepoAt))
350350+ }
351351+352352+ repos, err := GetRepos(e, 0, FilterIn("at_uri", repoAts))
353353+ if err != nil {
354354+ return nil, fmt.Errorf("failed to build repo mappings: %w", err)
355355+ }
356356+357357+ repoMap := make(map[string]*Repo)
358358+ for i := range repos {
359359+ repoMap[string(repos[i].RepoAt())] = &repos[i]
360360+ }
361361+362362+ for issueAt, i := range issueMap {
363363+ if r, ok := repoMap[string(i.RepoAt)]; ok {
364364+ i.Repo = r
365365+ } else {
366366+ // do not show up the issue if the repo is deleted
367367+ // TODO: foreign key where?
368368+ delete(issueMap, issueAt)
252369 }
370370+ }
253371254254- issues = append(issues, issue)
372372+ // collect comments
373373+ issueAts := slices.Collect(maps.Keys(issueMap))
374374+ comments, err := GetIssueComments(e, FilterIn("issue_at", issueAts))
375375+ if err != nil {
376376+ return nil, fmt.Errorf("failed to query comments: %w", err)
255377 }
256378257257- if err := rows.Err(); err != nil {
258258- return nil, err
379379+ for i := range comments {
380380+ issueAt := comments[i].IssueAt
381381+ if issue, ok := issueMap[issueAt]; ok {
382382+ issue.Comments = append(issue.Comments, comments[i])
383383+ }
384384+ }
385385+386386+ var issues []Issue
387387+ for _, i := range issueMap {
388388+ issues = append(issues, *i)
259389 }
260390391391+ sort.Slice(issues, func(i, j int) bool {
392392+ return issues[i].Created.After(issues[j].Created)
393393+ })
394394+261395 return issues, nil
396396+}
397397+398398+func GetIssues(e Execer, filters ...filter) ([]Issue, error) {
399399+ return GetIssuesPaginated(e, pagination.FirstPage(), filters...)
262400}
263401264402func GetIssue(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, error) {
265265- query := `select id, owner_did, created, title, body, open from issues where repo_at = ? and issue_id = ?`
403403+ query := `select id, owner_did, rkey, created, title, body, open from issues where repo_at = ? and issue_id = ?`
266404 row := e.QueryRow(query, repoAt, issueId)
267405268406 var issue Issue
269407 var createdAt string
270270- err := row.Scan(&issue.ID, &issue.OwnerDid, &createdAt, &issue.Title, &issue.Body, &issue.Open)
408408+ err := row.Scan(&issue.Id, &issue.Did, &issue.Rkey, &createdAt, &issue.Title, &issue.Body, &issue.Open)
271409 if err != nil {
272410 return nil, err
273411 }
···281419 return &issue, nil
282420}
283421284284-func GetIssueWithComments(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, []Comment, error) {
285285- query := `select id, owner_did, issue_id, created, title, body, open, issue_at from issues where repo_at = ? and issue_id = ?`
286286- row := e.QueryRow(query, repoAt, issueId)
287287-288288- var issue Issue
289289- var createdAt string
290290- err := row.Scan(&issue.ID, &issue.OwnerDid, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &issue.IssueAt)
422422+func AddIssueComment(e Execer, c IssueComment) (int64, error) {
423423+ result, err := e.Exec(
424424+ `insert into issue_comments (
425425+ did,
426426+ rkey,
427427+ issue_at,
428428+ body,
429429+ reply_to,
430430+ created,
431431+ edited
432432+ )
433433+ values (?, ?, ?, ?, ?, ?, null)
434434+ on conflict(did, rkey) do update set
435435+ issue_at = excluded.issue_at,
436436+ body = excluded.body,
437437+ edited = case
438438+ when
439439+ issue_comments.issue_at != excluded.issue_at
440440+ or issue_comments.body != excluded.body
441441+ or issue_comments.reply_to != excluded.reply_to
442442+ then ?
443443+ else issue_comments.edited
444444+ end`,
445445+ c.Did,
446446+ c.Rkey,
447447+ c.IssueAt,
448448+ c.Body,
449449+ c.ReplyTo,
450450+ c.Created.Format(time.RFC3339),
451451+ time.Now().Format(time.RFC3339),
452452+ )
291453 if err != nil {
292292- return nil, nil, err
454454+ return 0, err
293455 }
294456295295- createdTime, err := time.Parse(time.RFC3339, createdAt)
457457+ id, err := result.LastInsertId()
296458 if err != nil {
297297- return nil, nil, err
459459+ return 0, err
298460 }
299299- issue.Created = createdTime
300461301301- comments, err := GetComments(e, repoAt, issueId)
302302- if err != nil {
303303- return nil, nil, err
462462+ return id, nil
463463+}
464464+465465+func DeleteIssueComments(e Execer, filters ...filter) error {
466466+ var conditions []string
467467+ var args []any
468468+ for _, filter := range filters {
469469+ conditions = append(conditions, filter.Condition())
470470+ args = append(args, filter.Arg()...)
304471 }
305472306306- return &issue, comments, nil
307307-}
473473+ whereClause := ""
474474+ if conditions != nil {
475475+ whereClause = " where " + strings.Join(conditions, " and ")
476476+ }
308477309309-func NewIssueComment(e Execer, comment *Comment) error {
310310- query := `insert into comments (owner_did, repo_at, rkey, issue_id, comment_id, body) values (?, ?, ?, ?, ?, ?)`
311311- _, err := e.Exec(
312312- query,
313313- comment.OwnerDid,
314314- comment.RepoAt,
315315- comment.Rkey,
316316- comment.Issue,
317317- comment.CommentId,
318318- comment.Body,
319319- )
478478+ query := fmt.Sprintf(`update issue_comments set body = "", deleted = strftime('%%Y-%%m-%%dT%%H:%%M:%%SZ', 'now') %s`, whereClause)
479479+480480+ _, err := e.Exec(query, args...)
320481 return err
321482}
322483323323-func GetComments(e Execer, repoAt syntax.ATURI, issueId int) ([]Comment, error) {
324324- var comments []Comment
484484+func GetIssueComments(e Execer, filters ...filter) ([]IssueComment, error) {
485485+ var comments []IssueComment
486486+487487+ var conditions []string
488488+ var args []any
489489+ for _, filter := range filters {
490490+ conditions = append(conditions, filter.Condition())
491491+ args = append(args, filter.Arg()...)
492492+ }
325493326326- rows, err := e.Query(`
494494+ whereClause := ""
495495+ if conditions != nil {
496496+ whereClause = " where " + strings.Join(conditions, " and ")
497497+ }
498498+499499+ query := fmt.Sprintf(`
327500 select
328328- owner_did,
329329- issue_id,
330330- comment_id,
501501+ id,
502502+ did,
331503 rkey,
504504+ issue_at,
505505+ reply_to,
332506 body,
333507 created,
334508 edited,
335509 deleted
336510 from
337337- comments
338338- where
339339- repo_at = ? and issue_id = ?
340340- order by
341341- created asc`,
342342- repoAt,
343343- issueId,
344344- )
345345- if err == sql.ErrNoRows {
346346- return []Comment{}, nil
347347- }
511511+ issue_comments
512512+ %s
513513+ `, whereClause)
514514+515515+ rows, err := e.Query(query, args...)
348516 if err != nil {
349517 return nil, err
350518 }
351351- defer rows.Close()
352519353520 for rows.Next() {
354354- var comment Comment
355355- var createdAt string
356356- var deletedAt, editedAt, rkey sql.NullString
357357- err := rows.Scan(&comment.OwnerDid, &comment.Issue, &comment.CommentId, &rkey, &comment.Body, &createdAt, &editedAt, &deletedAt)
521521+ var comment IssueComment
522522+ var created string
523523+ var rkey, edited, deleted, replyTo sql.Null[string]
524524+ err := rows.Scan(
525525+ &comment.Id,
526526+ &comment.Did,
527527+ &rkey,
528528+ &comment.IssueAt,
529529+ &replyTo,
530530+ &comment.Body,
531531+ &created,
532532+ &edited,
533533+ &deleted,
534534+ )
358535 if err != nil {
359536 return nil, err
360537 }
361538362362- createdAtTime, err := time.Parse(time.RFC3339, createdAt)
363363- if err != nil {
364364- return nil, err
539539+ // this is a remnant from old times, newer comments always have rkey
540540+ if rkey.Valid {
541541+ comment.Rkey = rkey.V
365542 }
366366- comment.Created = &createdAtTime
367543368368- if deletedAt.Valid {
369369- deletedTime, err := time.Parse(time.RFC3339, deletedAt.String)
370370- if err != nil {
371371- return nil, err
544544+ if t, err := time.Parse(time.RFC3339, created); err == nil {
545545+ comment.Created = t
546546+ }
547547+548548+ if edited.Valid {
549549+ if t, err := time.Parse(time.RFC3339, edited.V); err == nil {
550550+ comment.Edited = &t
372551 }
373373- comment.Deleted = &deletedTime
374552 }
375553376376- if editedAt.Valid {
377377- editedTime, err := time.Parse(time.RFC3339, editedAt.String)
378378- if err != nil {
379379- return nil, err
554554+ if deleted.Valid {
555555+ if t, err := time.Parse(time.RFC3339, deleted.V); err == nil {
556556+ comment.Deleted = &t
380557 }
381381- comment.Edited = &editedTime
382558 }
383559384384- if rkey.Valid {
385385- comment.Rkey = rkey.String
560560+ if replyTo.Valid {
561561+ comment.ReplyTo = &replyTo.V
386562 }
387563388564 comments = append(comments, comment)
389565 }
390566391391- if err := rows.Err(); err != nil {
567567+ if err = rows.Err(); err != nil {
392568 return nil, err
393569 }
394570395571 return comments, nil
396572}
397573398398-func GetComment(e Execer, repoAt syntax.ATURI, issueId, commentId int) (*Comment, error) {
399399- query := `
400400- select
401401- owner_did, body, rkey, created, deleted, edited
402402- from
403403- comments where repo_at = ? and issue_id = ? and comment_id = ?
404404- `
405405- row := e.QueryRow(query, repoAt, issueId, commentId)
406406-407407- var comment Comment
408408- var createdAt string
409409- var deletedAt, editedAt, rkey sql.NullString
410410- err := row.Scan(&comment.OwnerDid, &comment.Body, &rkey, &createdAt, &deletedAt, &editedAt)
411411- if err != nil {
412412- return nil, err
574574+func DeleteIssues(e Execer, filters ...filter) error {
575575+ var conditions []string
576576+ var args []any
577577+ for _, filter := range filters {
578578+ conditions = append(conditions, filter.Condition())
579579+ args = append(args, filter.Arg()...)
413580 }
414581415415- createdTime, err := time.Parse(time.RFC3339, createdAt)
416416- if err != nil {
417417- return nil, err
582582+ whereClause := ""
583583+ if conditions != nil {
584584+ whereClause = " where " + strings.Join(conditions, " and ")
418585 }
419419- comment.Created = &createdTime
420586421421- if deletedAt.Valid {
422422- deletedTime, err := time.Parse(time.RFC3339, deletedAt.String)
423423- if err != nil {
424424- return nil, err
425425- }
426426- comment.Deleted = &deletedTime
427427- }
587587+ query := fmt.Sprintf(`delete from issues %s`, whereClause)
588588+ _, err := e.Exec(query, args...)
589589+ return err
590590+}
428591429429- if editedAt.Valid {
430430- editedTime, err := time.Parse(time.RFC3339, editedAt.String)
431431- if err != nil {
432432- return nil, err
433433- }
434434- comment.Edited = &editedTime
592592+func CloseIssues(e Execer, filters ...filter) error {
593593+ var conditions []string
594594+ var args []any
595595+ for _, filter := range filters {
596596+ conditions = append(conditions, filter.Condition())
597597+ args = append(args, filter.Arg()...)
435598 }
436599437437- if rkey.Valid {
438438- comment.Rkey = rkey.String
600600+ whereClause := ""
601601+ if conditions != nil {
602602+ whereClause = " where " + strings.Join(conditions, " and ")
439603 }
440604441441- comment.RepoAt = repoAt
442442- comment.Issue = issueId
443443- comment.CommentId = commentId
444444-445445- return &comment, nil
446446-}
447447-448448-func EditComment(e Execer, repoAt syntax.ATURI, issueId, commentId int, newBody string) error {
449449- _, err := e.Exec(
450450- `
451451- update comments
452452- set body = ?,
453453- edited = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
454454- where repo_at = ? and issue_id = ? and comment_id = ?
455455- `, newBody, repoAt, issueId, commentId)
605605+ query := fmt.Sprintf(`update issues set open = 0 %s`, whereClause)
606606+ _, err := e.Exec(query, args...)
456607 return err
457608}
458609459459-func DeleteComment(e Execer, repoAt syntax.ATURI, issueId, commentId int) error {
460460- _, err := e.Exec(
461461- `
462462- update comments
463463- set body = "",
464464- deleted = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
465465- where repo_at = ? and issue_id = ? and comment_id = ?
466466- `, repoAt, issueId, commentId)
467467- return err
468468-}
610610+func ReopenIssues(e Execer, filters ...filter) error {
611611+ var conditions []string
612612+ var args []any
613613+ for _, filter := range filters {
614614+ conditions = append(conditions, filter.Condition())
615615+ args = append(args, filter.Arg()...)
616616+ }
469617470470-func CloseIssue(e Execer, repoAt syntax.ATURI, issueId int) error {
471471- _, err := e.Exec(`update issues set open = 0 where repo_at = ? and issue_id = ?`, repoAt, issueId)
472472- return err
473473-}
618618+ whereClause := ""
619619+ if conditions != nil {
620620+ whereClause = " where " + strings.Join(conditions, " and ")
621621+ }
474622475475-func ReopenIssue(e Execer, repoAt syntax.ATURI, issueId int) error {
476476- _, err := e.Exec(`update issues set open = 1 where repo_at = ? and issue_id = ?`, repoAt, issueId)
623623+ query := fmt.Sprintf(`update issues set open = 1 %s`, whereClause)
624624+ _, err := e.Exec(query, args...)
477625 return err
478626}
479627
-62
appview/db/migrations/20250305_113405.sql
···11--- Simplified SQLite Database Migration Script for Issues and Comments
22-33--- Migration for issues table
44-CREATE TABLE issues_new (
55- id integer primary key autoincrement,
66- owner_did text not null,
77- repo_at text not null,
88- issue_id integer not null,
99- title text not null,
1010- body text not null,
1111- open integer not null default 1,
1212- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
1313- issue_at text,
1414- unique(repo_at, issue_id),
1515- foreign key (repo_at) references repos(at_uri) on delete cascade
1616-);
1717-1818--- Migrate data to new issues table
1919-INSERT INTO issues_new (
2020- id, owner_did, repo_at, issue_id,
2121- title, body, open, created, issue_at
2222-)
2323-SELECT
2424- id, owner_did, repo_at, issue_id,
2525- title, body, open, created, issue_at
2626-FROM issues;
2727-2828--- Drop old issues table
2929-DROP TABLE issues;
3030-3131--- Rename new issues table
3232-ALTER TABLE issues_new RENAME TO issues;
3333-3434--- Migration for comments table
3535-CREATE TABLE comments_new (
3636- id integer primary key autoincrement,
3737- owner_did text not null,
3838- issue_id integer not null,
3939- repo_at text not null,
4040- comment_id integer not null,
4141- comment_at text not null,
4242- body text not null,
4343- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
4444- unique(issue_id, comment_id),
4545- foreign key (repo_at, issue_id) references issues(repo_at, issue_id) on delete cascade
4646-);
4747-4848--- Migrate data to new comments table
4949-INSERT INTO comments_new (
5050- id, owner_did, issue_id, repo_at,
5151- comment_id, comment_at, body, created
5252-)
5353-SELECT
5454- id, owner_did, issue_id, repo_at,
5555- comment_id, comment_at, body, created
5656-FROM comments;
5757-5858--- Drop old comments table
5959-DROP TABLE comments;
6060-6161--- Rename new comments table
6262-ALTER TABLE comments_new RENAME TO comments;
-66
appview/db/migrations/validate.sql
···11--- Validation Queries for Database Migration
22-33--- 1. Verify Issues Table Structure
44-PRAGMA table_info(issues);
55-66--- 2. Verify Comments Table Structure
77-PRAGMA table_info(comments);
88-99--- 3. Check Total Row Count Consistency
1010-SELECT
1111- 'Issues Row Count' AS check_type,
1212- (SELECT COUNT(*) FROM issues) AS row_count
1313-UNION ALL
1414-SELECT
1515- 'Comments Row Count' AS check_type,
1616- (SELECT COUNT(*) FROM comments) AS row_count;
1717-1818--- 4. Verify Unique Constraint on Issues
1919-SELECT
2020- repo_at,
2121- issue_id,
2222- COUNT(*) as duplicate_count
2323-FROM issues
2424-GROUP BY repo_at, issue_id
2525-HAVING duplicate_count > 1;
2626-2727--- 5. Verify Foreign Key Integrity for Comments
2828-SELECT
2929- 'Orphaned Comments' AS check_type,
3030- COUNT(*) AS orphaned_count
3131-FROM comments c
3232-LEFT JOIN issues i ON c.repo_at = i.repo_at AND c.issue_id = i.issue_id
3333-WHERE i.id IS NULL;
3434-3535--- 6. Check Foreign Key Constraint
3636-PRAGMA foreign_key_list(comments);
3737-3838--- 7. Sample Data Integrity Check
3939-SELECT
4040- 'Sample Issues' AS check_type,
4141- repo_at,
4242- issue_id,
4343- title,
4444- created
4545-FROM issues
4646-LIMIT 5;
4747-4848--- 8. Sample Comments Data Integrity Check
4949-SELECT
5050- 'Sample Comments' AS check_type,
5151- repo_at,
5252- issue_id,
5353- comment_id,
5454- body,
5555- created
5656-FROM comments
5757-LIMIT 5;
5858-5959--- 9. Verify Constraint on Comments (Issue ID and Comment ID Uniqueness)
6060-SELECT
6161- issue_id,
6262- comment_id,
6363- COUNT(*) as duplicate_count
6464-FROM comments
6565-GROUP BY issue_id, comment_id
6666-HAVING duplicate_count > 1;
+25-12
appview/db/profile.go
···2222 ByMonth []ByMonth
2323}
24242525+func (p *ProfileTimeline) IsEmpty() bool {
2626+ if p == nil {
2727+ return true
2828+ }
2929+3030+ for _, m := range p.ByMonth {
3131+ if !m.IsEmpty() {
3232+ return false
3333+ }
3434+ }
3535+3636+ return true
3737+}
3838+2539type ByMonth struct {
2640 RepoEvents []RepoEvent
2741 IssueEvents IssueEvents
···118132 *items = append(*items, &pull)
119133 }
120134121121- issues, err := GetIssuesByOwnerDid(e, forDid, timeframe)
135135+ issues, err := GetIssues(
136136+ e,
137137+ FilterEq("did", forDid),
138138+ FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
139139+ )
122140 if err != nil {
123141 return nil, fmt.Errorf("error getting issues by owner did: %w", err)
124142 }
···137155 *items = append(*items, &issue)
138156 }
139157140140- repos, err := GetAllReposByDid(e, forDid)
158158+ repos, err := GetRepos(e, 0, FilterEq("did", forDid))
141159 if err != nil {
142160 return nil, fmt.Errorf("error getting all repos by did: %w", err)
143161 }
···348366 return tx.Commit()
349367}
350368351351-func GetProfiles(e Execer, filters ...filter) ([]Profile, error) {
369369+func GetProfiles(e Execer, filters ...filter) (map[string]*Profile, error) {
352370 var conditions []string
353371 var args []any
354372 for _, filter := range filters {
···448466 idxs[did] = idx + 1
449467 }
450468451451- var profiles []Profile
452452- for _, p := range profileMap {
453453- profiles = append(profiles, *p)
454454- }
455455-456456- return profiles, nil
469469+ return profileMap, nil
457470}
458471459472func GetProfile(e Execer, did string) (*Profile, error) {
···540553 query = `select count(id) from pulls where owner_did = ? and state = ?`
541554 args = append(args, did, PullOpen)
542555 case VanityStatOpenIssueCount:
543543- query = `select count(id) from issues where owner_did = ? and open = 1`
556556+ query = `select count(id) from issues where did = ? and open = 1`
544557 args = append(args, did)
545558 case VanityStatClosedIssueCount:
546546- query = `select count(id) from issues where owner_did = ? and open = 0`
559559+ query = `select count(id) from issues where did = ? and open = 0`
547560 args = append(args, did)
548561 case VanityStatRepositoryCount:
549562 query = `select count(id) from repos where did = ?`
···577590 }
578591579592 // ensure all pinned repos are either own repos or collaborating repos
580580- repos, err := GetAllReposByDid(e, profile.Did)
593593+ repos, err := GetRepos(e, 0, FilterEq("did", profile.Did))
581594 if err != nil {
582595 log.Printf("getting repos for %s: %s", profile.Did, err)
583596 }
···1313 FormatMarkdown: []string{".md", ".markdown", ".mdown", ".mkdn", ".mkd"},
1414}
15151616+// ReadmeFilenames contains the list of common README filenames to search for,
1717+// in order of preference. Only includes well-supported formats.
1818+var ReadmeFilenames = []string{
1919+ "README.md", "readme.md",
2020+ "README",
2121+ "readme",
2222+ "README.markdown",
2323+ "readme.markdown",
2424+ "README.txt",
2525+ "readme.txt",
2626+}
2727+1628func GetFormat(filename string) Format {
1729 for format, extensions := range FileTypes {
1830 for _, extension := range extensions {
···1111### message format
12121313```
1414-<service/top-level directory>: <affected package/directory>: <short summary of change>
1414+<service/top-level directory>/<affected package/directory>: <short summary of change>
151516161717Optional longer description can go here, if necessary. Explain what the
···2323Here are some examples:
24242525```
2626-appview: state: fix token expiry check in middleware
2626+appview/state: fix token expiry check in middleware
27272828The previous check did not account for clock drift, leading to premature
2929token invalidation.
3030```
31313232```
3333-knotserver: git/service: improve error checking in upload-pack
3333+knotserver/git/service: improve error checking in upload-pack
3434```
35353636···5454- Don't include unrelated changes in the same commit.
5555- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
5656before submitting if necessary.
5757+5858+## code formatting
5959+6060+We use a variety of tools to format our code, and multiplex them with
6161+[`treefmt`](https://treefmt.com): all you need to do to format your changes
6262+is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
57635864## proposals for bigger changes
5965···115121If you're submitting a PR with multiple commits, make sure each one is
116122signed.
117123118118-For [jj](https://jj-vcs.github.io/jj/latest/) users, you can add this to
119119-your jj config:
124124+For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125125+to make it sign off commits in the tangled repo:
120126121121-```
122122-ui.should-sign-off = true
123123-```
124124-125125-and to your `templates.draft_commit_description`, add the following `if`
126126-block:
127127-128128-```
129129- if(
130130- config("ui.should-sign-off").as_boolean() && !description.contains("Signed-off-by: " ++ author.name()),
131131- "\nSigned-off-by: " ++ author.name() ++ " <" ++ author.email() ++ ">",
132132- ),
127127+```shell
128128+# Safety check, should say "No matching config key..."
129129+jj config list templates.commit_trailers
130130+# The command below may need to be adjusted if the command above returned something.
131131+jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
133132```
134133135134Refer to the [jj
136136-documentation](https://jj-vcs.github.io/jj/latest/config/#default-description)
135135+documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
137136for more information.
+66-20
docs/hacking.md
···4848redis-server
4949```
50505151-## running a knot
5151+## running knots and spindles
52525353An end-to-end knot setup requires setting up a machine with
5454`sshd`, `AuthorizedKeysCommand`, and git user, which is
5555quite cumbersome. So the nix flake provides a
5656`nixosConfiguration` to do so.
57575858-To begin, head to `http://localhost:3000/knots` in the browser
5959-and generate a knot secret. Replace the existing secret in
6060-`nix/vm.nix` (`KNOT_SERVER_SECRET`) with the newly generated
6161-secret.
5858+<details>
5959+ <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
6060+6161+ In order to build Tangled's dev VM on macOS, you will
6262+ first need to set up a Linux Nix builder. The recommended
6363+ way to do so is to run a [`darwin.linux-builder`
6464+ VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
6565+ and to register it in `nix.conf` as a builder for Linux
6666+ with the same architecture as your Mac (`linux-aarch64` if
6767+ you are using Apple Silicon).
6868+6969+ > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
7070+ > the tangled repo so that it doesn't conflict with the other VM. For example,
7171+ > you can do
7272+ >
7373+ > ```shell
7474+ > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
7575+ > ```
7676+ >
7777+ > to store the builder VM in a temporary dir.
7878+ >
7979+ > You should read and follow [all the other intructions][darwin builder vm] to
8080+ > avoid subtle problems.
62816363-You can now start a lightweight NixOS VM using
6464-`nixos-shell` like so:
8282+ Alternatively, you can use any other method to set up a
8383+ Linux machine with `nix` installed that you can `sudo ssh`
8484+ into (in other words, root user on your Mac has to be able
8585+ to ssh into the Linux machine without entering a password)
8686+ and that has the same architecture as your Mac. See
8787+ [remote builder
8888+ instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
8989+ for how to register such a builder in `nix.conf`.
9090+9191+ > WARNING: If you'd like to use
9292+ > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
9393+ > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
9494+ > ssh` works can be tricky. It seems to be [possible with
9595+ > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
9696+9797+</details>
9898+9999+To begin, grab your DID from http://localhost:3000/settings.
100100+Then, set `TANGLED_VM_KNOT_OWNER` and
101101+`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
102102+lightweight NixOS VM like so:
6510366104```bash
6767-nix run .#vm
6868-# or nixos-shell --flake .#vm
105105+nix run --impure .#vm
691067070-# hit Ctrl-a + c + q to exit the VM
107107+# type `poweroff` at the shell to exit the VM
71108```
7210973110This starts a knot on port 6000, a spindle on port 6555
7474-with `ssh` exposed on port 2222. You can push repositories
7575-to this VM with this ssh config block on your main machine:
111111+with `ssh` exposed on port 2222.
112112+113113+Once the services are running, head to
114114+http://localhost:3000/knots and hit verify. It should
115115+verify the ownership of the services instantly if everything
116116+went smoothly.
117117+118118+You can push repositories to this VM with this ssh config
119119+block on your main machine:
7612077121```bash
78122Host nixos-shell
···89133git push local-dev main
90134```
911359292-## running a spindle
136136+### running a spindle
931379494-Be sure to change the `owner` field for the spindle in
9595-`nix/vm.nix` to your own DID. The above VM should already
9696-be running a spindle on `localhost:6555`. You can head to
9797-the spindle dashboard on `http://localhost:3000/spindles`,
9898-and register a spindle with hostname `localhost:6555`. It
9999-should instantly be verified. You can then configure each
100100-repository to use this spindle and run CI jobs.
138138+The above VM should already be running a spindle on
139139+`localhost:6555`. Head to http://localhost:3000/spindles and
140140+hit verify. You can then configure each repository to use
141141+this spindle and run CI jobs.
101142102143Of interest when debugging spindles:
103144···114155# litecli has a nicer REPL interface:
115156litecli /var/lib/spindle/spindle.db
116157```
158158+159159+If for any reason you wish to disable either one of the
160160+services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
161161+`services.tangled-spindle.enable` (or
162162+`services.tangled-knot.enable`) to `false`.
+27-7
docs/knot-hosting.md
···2233So you want to run your own knot server? Great! Here are a few prerequisites:
4455-1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux of some kind.
55+1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
662. A (sub)domain name. People generally use `knot.example.com`.
773. A valid SSL certificate for your domain.
88···5959EOF
6060```
61616262+Then, reload `sshd`:
6363+6464+```
6565+sudo systemctl reload ssh
6666+```
6767+6268Next, create the `git` user. We'll use the `git` user's home directory
6369to store repositories:
6470···6773```
68746975Create `/home/git/.knot.env` with the following, updating the values as
7070-necessary. The `KNOT_SERVER_SECRET` can be obtaind from the
7171-[/knots](/knots) page on Tangled.
7676+necessary. The `KNOT_SERVER_OWNER` should be set to your
7777+DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
72787379```
7480KNOT_REPO_SCAN_PATH=/home/git
7581KNOT_SERVER_HOSTNAME=knot.example.com
7682APPVIEW_ENDPOINT=https://tangled.sh
7777-KNOT_SERVER_SECRET=secret
8383+KNOT_SERVER_OWNER=did:plc:foobar
7884KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
7985KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
8086```
···8995systemctl start knotserver
9096```
91979292-The last step is to configure a reverse proxy like Nginx or Caddy to front yourself
9898+The last step is to configure a reverse proxy like Nginx or Caddy to front your
9399knot. Here's an example configuration for Nginx:
9410095101```
···122128Remember to use Let's Encrypt or similar to procure a certificate for your
123129knot domain.
124130125125-You should now have a running knot server! You can finalize your registration by hitting the
126126-`initialize` button on the [/knots](/knots) page.
131131+You should now have a running knot server! You can finalize
132132+your registration by hitting the `verify` button on the
133133+[/knots](https://tangled.sh/knots) page. This simply creates
134134+a record on your PDS to announce the existence of the knot.
127135128136### custom paths
129137···191199```
192200193201Make sure to restart your SSH server!
202202+203203+#### MOTD (message of the day)
204204+205205+To configure the MOTD used ("Welcome to this knot!" by default), edit the
206206+`/home/git/motd` file:
207207+208208+```
209209+printf "Hi from this knot!\n" > /home/git/motd
210210+```
211211+212212+Note that you should add a newline at the end if setting a non-empty message
213213+since the knot won't do this for you.
+60
docs/migrations.md
···11+# Migrations
22+33+This document is laid out in reverse-chronological order.
44+Newer migration guides are listed first, and older guides
55+are further down the page.
66+77+## Upgrading from v1.8.x
88+99+After v1.8.2, the HTTP API for knot and spindles have been
1010+deprecated and replaced with XRPC. Repositories on outdated
1111+knots will not be viewable from the appview. Upgrading is
1212+straightforward however.
1313+1414+For knots:
1515+1616+- Upgrade to latest tag (v1.9.0 or above)
1717+- Head to the [knot dashboard](https://tangled.sh/knots) and
1818+ hit the "retry" button to verify your knot
1919+2020+For spindles:
2121+2222+- Upgrade to latest tag (v1.9.0 or above)
2323+- Head to the [spindle
2424+ dashboard](https://tangled.sh/spindles) and hit the
2525+ "retry" button to verify your spindle
2626+2727+## Upgrading from v1.7.x
2828+2929+After v1.7.0, knot secrets have been deprecated. You no
3030+longer need a secret from the appview to run a knot. All
3131+authorized commands to knots are managed via [Inter-Service
3232+Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
3333+Knots will be read-only until upgraded.
3434+3535+Upgrading is quite easy, in essence:
3636+3737+- `KNOT_SERVER_SECRET` is no more, you can remove this
3838+ environment variable entirely
3939+- `KNOT_SERVER_OWNER` is now required on boot, set this to
4040+ your DID. You can find your DID in the
4141+ [settings](https://tangled.sh/settings) page.
4242+- Restart your knot once you have replaced the environment
4343+ variable
4444+- Head to the [knot dashboard](https://tangled.sh/knots) and
4545+ hit the "retry" button to verify your knot. This simply
4646+ writes a `sh.tangled.knot` record to your PDS.
4747+4848+If you use the nix module, simply bump the flake to the
4949+latest revision, and change your config block like so:
5050+5151+```diff
5252+ services.tangled-knot = {
5353+ enable = true;
5454+ server = {
5555+- secretFile = /path/to/secret;
5656++ owner = "did:plc:foo";
5757+ };
5858+ };
5959+```
6060+
+193-38
docs/spindle/openbao.md
···11# spindle secrets with openbao
2233This document covers setting up Spindle to use OpenBao for secrets
44-management instead of the default SQLite backend.
44+management via OpenBao Proxy instead of the default SQLite backend.
55+66+## overview
77+88+Spindle now uses OpenBao Proxy for secrets management. The proxy handles
99+authentication automatically using AppRole credentials, while Spindle
1010+connects to the local proxy instead of directly to the OpenBao server.
1111+1212+This approach provides better security, automatic token renewal, and
1313+simplified application code.
514615## installation
716817Install OpenBao from nixpkgs:
9181019```bash
1111-nix-env -iA nixpkgs.openbao
2020+nix shell nixpkgs#openbao # for a local server
1221```
13221414-## local development setup
2323+## setup
2424+2525+The setup process can is documented for both local development and production.
2626+2727+### local development
15281629Start OpenBao in dev mode:
17301831```bash
1919-bao server -dev
3232+bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
2033```
21342222-This starts OpenBao on `http://localhost:8200` with a root token. Save
2323-the root token from the output -- you'll need it.
3535+This starts OpenBao on `http://localhost:8201` with a root token.
24362537Set up environment for bao CLI:
26382739```bash
2840export BAO_ADDR=http://localhost:8200
2929-export BAO_TOKEN=hvs.your-root-token-here
4141+export BAO_TOKEN=root
3042```
31434444+### production
4545+4646+You would typically use a systemd service with a configuration file. Refer to
4747+[@tangled.sh/infra](https://tangled.sh/@tangled.sh/infra) for how this can be
4848+achieved using Nix.
4949+5050+Then, initialize the bao server:
5151+```bash
5252+bao operator init -key-shares=1 -key-threshold=1
5353+```
5454+5555+This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
5656+```bash
5757+bao operator unseal <unseal_key>
5858+```
5959+6060+All steps below remain the same across both dev and production setups.
6161+6262+### configure openbao server
6363+3264Create the spindle KV mount:
33653466```bash
3567bao secrets enable -path=spindle -version=2 kv
3668```
37693838-Set up AppRole authentication:
7070+Set up AppRole authentication and policy:
39714072Create a policy file `spindle-policy.hcl`:
41734274```hcl
7575+# Full access to spindle KV v2 data
4376path "spindle/data/*" {
4444- capabilities = ["create", "read", "update", "delete", "list"]
7777+ capabilities = ["create", "read", "update", "delete"]
4578}
46798080+# Access to metadata for listing and management
4781path "spindle/metadata/*" {
4848- capabilities = ["list", "read", "delete"]
8282+ capabilities = ["list", "read", "delete", "update"]
4983}
50845151-path "spindle/*" {
8585+# Allow listing at root level
8686+path "spindle/" {
5287 capabilities = ["list"]
5388}
8989+9090+# Required for connection testing and health checks
9191+path "auth/token/lookup-self" {
9292+ capabilities = ["read"]
9393+}
5494```
55955696Apply the policy and create an AppRole:
···61101bao write auth/approle/role/spindle \
62102 token_policies="spindle-policy" \
63103 token_ttl=1h \
6464- token_max_ttl=4h
104104+ token_max_ttl=4h \
105105+ bind_secret_id=true \
106106+ secret_id_ttl=0 \
107107+ secret_id_num_uses=0
65108```
6610967110Get the credentials:
6811169112```bash
7070-bao read auth/approle/role/spindle/role-id
7171-bao write -f auth/approle/role/spindle/secret-id
113113+# Get role ID (static)
114114+ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115115+116116+# Generate secret ID
117117+SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118118+119119+echo "Role ID: $ROLE_ID"
120120+echo "Secret ID: $SECRET_ID"
121121+```
122122+123123+### create proxy configuration
124124+125125+Create the credential files:
126126+127127+```bash
128128+# Create directory for OpenBao files
129129+mkdir -p /tmp/openbao
130130+131131+# Save credentials
132132+echo "$ROLE_ID" > /tmp/openbao/role-id
133133+echo "$SECRET_ID" > /tmp/openbao/secret-id
134134+chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135135+```
136136+137137+Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138138+139139+```hcl
140140+# OpenBao server connection
141141+vault {
142142+ address = "http://localhost:8200"
143143+}
144144+145145+# Auto-Auth using AppRole
146146+auto_auth {
147147+ method "approle" {
148148+ mount_path = "auth/approle"
149149+ config = {
150150+ role_id_file_path = "/tmp/openbao/role-id"
151151+ secret_id_file_path = "/tmp/openbao/secret-id"
152152+ }
153153+ }
154154+155155+ # Optional: write token to file for debugging
156156+ sink "file" {
157157+ config = {
158158+ path = "/tmp/openbao/token"
159159+ mode = 0640
160160+ }
161161+ }
162162+}
163163+164164+# Proxy listener for Spindle
165165+listener "tcp" {
166166+ address = "127.0.0.1:8201"
167167+ tls_disable = true
168168+}
169169+170170+# Enable API proxy with auto-auth token
171171+api_proxy {
172172+ use_auto_auth_token = true
173173+}
174174+175175+# Enable response caching
176176+cache {
177177+ use_auto_auth_token = true
178178+}
179179+180180+# Logging
181181+log_level = "info"
72182```
731837474-Configure Spindle:
184184+### start the proxy
185185+186186+Start OpenBao Proxy:
187187+188188+```bash
189189+bao proxy -config=/tmp/openbao/proxy.hcl
190190+```
191191+192192+The proxy will authenticate with OpenBao and start listening on
193193+`127.0.0.1:8201`.
194194+195195+### configure spindle
7519676197Set these environment variables for Spindle:
7719878199```bash
79200export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
8080-export SPINDLE_SERVER_SECRETS_OPENBAO_ADDR=http://localhost:8200
8181-export SPINDLE_SERVER_SECRETS_OPENBAO_ROLE_ID=your-role-id-from-above
8282-export SPINDLE_SERVER_SECRETS_OPENBAO_SECRET_ID=your-secret-id-from-above
201201+export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
83202export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
84203```
8520486205Start Spindle:
872068888-Spindle will now use OpenBao for secrets storage with automatic token
8989-renewal.
207207+Spindle will now connect to the local proxy, which handles all
208208+authentication automatically.
209209+210210+## production setup for proxy
211211+212212+For production, you'll want to run the proxy as a service:
213213+214214+Place your production configuration in `/etc/openbao/proxy.hcl` with
215215+proper TLS settings for the vault connection.
9021691217## verifying setup
922189393-List all secrets:
219219+Test the proxy directly:
9422095221```bash
9696-bao kv list spindle/
222222+# Check proxy health
223223+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224224+225225+# Test token lookup through proxy
226226+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
97227```
982289999-Add a test secret via Spindle API, then check it exists:
229229+Test OpenBao operations through the server:
100230101231```bash
232232+# List all secrets
233233+bao kv list spindle/
234234+235235+# Add a test secret via Spindle API, then check it exists
102236bao kv list spindle/repos/
103103-```
104237105105-Get a specific secret:
106106-107107-```bash
238238+# Get a specific secret
108239bao kv get spindle/repos/your_repo_path/SECRET_NAME
109240```
110241111242## how it works
112243244244+- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245245+- The proxy authenticates with OpenBao using AppRole credentials
246246+- All Spindle requests go through the proxy, which injects authentication tokens
113247- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
114114-- Each repository gets its own namespace
115115-- Repository paths like `at://did:plc:alice/myrepo` become
116116- `at_did_plc_alice_myrepo`
117117-- The system automatically handles token renewal using AppRole
118118- authentication
119119-- On shutdown, Spindle cleanly stops the token renewal process
248248+- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249249+- The proxy handles all token renewal automatically
250250+- Spindle no longer manages tokens or authentication directly
120251121252## troubleshooting
122253123123-**403 errors**: Check that your BAO_TOKEN is set and the spindle mount
124124-exists
254254+**Connection refused**: Check that the OpenBao Proxy is running and
255255+listening on the configured address.
256256+257257+**403 errors**: Verify the AppRole credentials are correct and the policy
258258+has the necessary permissions.
125259126260**404 route errors**: The spindle KV mount probably doesn't exist - run
127127-the mount creation step again
261261+the mount creation step again.
128262129129-**Token expired**: The AppRole system should handle this automatically,
130130-but you can check token status with `bao token lookup`
263263+**Proxy authentication failures**: Check the proxy logs and verify the
264264+role-id and secret-id files are readable and contain valid credentials.
265265+266266+**Secret not found after writing**: This can indicate policy permission
267267+issues. Verify the policy includes both `spindle/data/*` and
268268+`spindle/metadata/*` paths with appropriate capabilities.
269269+270270+Check proxy logs:
271271+272272+```bash
273273+# If running as systemd service
274274+journalctl -u openbao-proxy -f
275275+276276+# If running directly, check the console output
277277+```
278278+279279+Test AppRole authentication manually:
280280+281281+```bash
282282+bao write auth/approle/login \
283283+ role_id="$(cat /tmp/openbao/role-id)" \
284284+ secret_id="$(cat /tmp/openbao/secret-id)"
285285+```
+140-41
docs/spindle/pipeline.md
···11-# spindle pipeline manifest
11+# spindle pipelines
22+33+Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
44+55+The fields are:
2633-Spindle pipelines are defined under the `.tangled/workflows` directory in a
44-repo. Generally:
77+- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
88+- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
99+- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
1010+- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
1111+- [Environment](#environment): An **optional** field that allows you to define environment variables.
1212+- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
51366-* Pipelines are defined in YAML.
77-* Dependencies can be specified from
88-[Nixpkgs](https://search.nixos.org) or custom registries.
99-* Environment variables can be set globally or per-step.
1414+## Trigger
10151111-Here's an example that uses all fields:
1616+The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
1717+1818+- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
1919+ - `push`: The workflow should run every time a commit is pushed to the repository.
2020+ - `pull_request`: The workflow should run every time a pull request is made or updated.
2121+ - `manual`: The workflow can be triggered manually.
2222+- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
2323+2424+For example, if you'd like define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
12251326```yaml
1414-# build_and_test.yaml
1527when:
1616- - event: ["push", "pull_request"]
2828+ - event: ["push", "manual"]
1729 branch: ["main", "develop"]
1818- - event: ["manual"]
3030+ - event: ["pull_request"]
3131+ branch: ["main"]
3232+```
3333+3434+## Engine
3535+3636+Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
3737+3838+- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
3939+4040+Example:
4141+4242+```yaml
4343+engine: "nixery"
4444+```
4545+4646+## Clone options
4747+4848+When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
4949+5050+- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
5151+- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
5252+- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
5353+5454+The default settings are:
5555+5656+```yaml
5757+clone:
5858+ skip: false
5959+ depth: 1
6060+ submodules: false
6161+```
6262+6363+## Dependencies
6464+6565+Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
6666+6767+Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
19686969+```yaml
2070dependencies:
2121- ## from nixpkgs
7171+ # nixpkgs
2272 nixpkgs:
2373 - nodejs
2424- ## custom registry
2525- git+https://tangled.sh/@oppi.li/statix:
2626- - statix
7474+ - go
7575+ # custom registry
7676+ git+https://tangled.sh/@example.com/my_pkg:
7777+ - my_pkg
7878+```
7979+8080+Now these dependencies are available to use in your workflow!
8181+8282+## Environment
8383+8484+The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
8585+8686+Example:
8787+8888+```yaml
8989+environment:
9090+ GOOS: "linux"
9191+ GOARCH: "arm64"
9292+ NODE_ENV: "production"
9393+ MY_ENV_VAR: "MY_ENV_VALUE"
9494+```
27952828-steps:
2929- - name: "Install dependencies"
3030- command: "npm install"
3131- environment:
3232- NODE_ENV: "development"
3333- CI: "true"
9696+## Steps
9797+9898+The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
9999+100100+- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
101101+- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
102102+- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
341033535- - name: "Run linter"
3636- command: "npm run lint"
104104+Example:
371053838- - name: "Run tests"
3939- command: "npm test"
106106+```yaml
107107+steps:
108108+ - name: "Build backend"
109109+ command: "go build"
40110 environment:
4141- NODE_ENV: "test"
4242- JEST_WORKERS: "2"
4343-4444- - name: "Build application"
111111+ GOOS: "darwin"
112112+ GOARCH: "arm64"
113113+ - name: "Build frontend"
45114 command: "npm run build"
46115 environment:
47116 NODE_ENV: "production"
117117+```
481184949-environment:
5050- BUILD_NUMBER: "123"
5151- GIT_BRANCH: "main"
119119+## Complete workflow
521205353-## current repository is cloned and checked out at the target ref
5454-## by default.
121121+```yaml
122122+# .tangled/workflows/build.yml
123123+124124+when:
125125+ - event: ["push", "manual"]
126126+ branch: ["main", "develop"]
127127+ - event: ["pull_request"]
128128+ branch: ["main"]
129129+130130+engine: "nixery"
131131+132132+# using the default values
55133clone:
56134 skip: false
5757- depth: 50
5858- submodules: true
5959-```
135135+ depth: 1
136136+ submodules: false
137137+138138+dependencies:
139139+ # nixpkgs
140140+ nixpkgs:
141141+ - nodejs
142142+ - go
143143+ # custom registry
144144+ git+https://tangled.sh/@example.com/my_pkg:
145145+ - my_pkg
601466161-## git push options
147147+environment:
148148+ GOOS: "linux"
149149+ GOARCH: "arm64"
150150+ NODE_ENV: "production"
151151+ MY_ENV_VAR: "MY_ENV_VALUE"
621526363-These are push options that can be used with the `--push-option (-o)` flag of git push:
153153+steps:
154154+ - name: "Build backend"
155155+ command: "go build"
156156+ environment:
157157+ GOOS: "darwin"
158158+ GOARCH: "arm64"
159159+ - name: "Build frontend"
160160+ command: "npm run build"
161161+ environment:
162162+ NODE_ENV: "production"
163163+```
641646565-- `verbose-ci`, `ci-verbose`: enables diagnostics reporting for the CI pipeline, allowing you to see any issues when you push.
6666-- `skip-ci`, `ci-skip`: skips triggering the CI pipeline.
165165+If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
···11+package xrpc
22+33+import (
44+ "fmt"
55+ "net/http"
66+ "runtime/debug"
77+88+ "tangled.sh/tangled.sh/core/api/tangled"
99+)
1010+1111+// version is set during build time.
1212+var version string
1313+1414+func (x *Xrpc) Version(w http.ResponseWriter, r *http.Request) {
1515+ if version == "" {
1616+ info, ok := debug.ReadBuildInfo()
1717+ if !ok {
1818+ http.Error(w, "failed to read build info", http.StatusInternalServerError)
1919+ return
2020+ }
2121+2222+ var modVer string
2323+ var sha string
2424+ var modified bool
2525+2626+ for _, mod := range info.Deps {
2727+ if mod.Path == "tangled.sh/tangled.sh/knotserver/xrpc" {
2828+ modVer = mod.Version
2929+ break
3030+ }
3131+ }
3232+3333+ for _, setting := range info.Settings {
3434+ switch setting.Key {
3535+ case "vcs.revision":
3636+ sha = setting.Value
3737+ case "vcs.modified":
3838+ modified = setting.Value == "true"
3939+ }
4040+ }
4141+4242+ if modVer == "" {
4343+ modVer = "unknown"
4444+ }
4545+4646+ if sha == "" {
4747+ version = modVer
4848+ } else if modified {
4949+ version = fmt.Sprintf("%s (%s with modifications)", modVer, sha)
5050+ } else {
5151+ version = fmt.Sprintf("%s (%s)", modVer, sha)
5252+ }
5353+ }
5454+5555+ response := tangled.KnotVersion_Output{
5656+ Version: version,
5757+ }
5858+5959+ writeJson(w, response)
6060+}
+127
knotserver/xrpc/xrpc.go
···11+package xrpc
22+33+import (
44+ "encoding/json"
55+ "log/slog"
66+ "net/http"
77+ "strings"
88+99+ securejoin "github.com/cyphar/filepath-securejoin"
1010+ "tangled.sh/tangled.sh/core/api/tangled"
1111+ "tangled.sh/tangled.sh/core/idresolver"
1212+ "tangled.sh/tangled.sh/core/jetstream"
1313+ "tangled.sh/tangled.sh/core/knotserver/config"
1414+ "tangled.sh/tangled.sh/core/knotserver/db"
1515+ "tangled.sh/tangled.sh/core/notifier"
1616+ "tangled.sh/tangled.sh/core/rbac"
1717+ xrpcerr "tangled.sh/tangled.sh/core/xrpc/errors"
1818+ "tangled.sh/tangled.sh/core/xrpc/serviceauth"
1919+2020+ "github.com/go-chi/chi/v5"
2121+)
2222+2323+type Xrpc struct {
2424+ Config *config.Config
2525+ Db *db.DB
2626+ Ingester *jetstream.JetstreamClient
2727+ Enforcer *rbac.Enforcer
2828+ Logger *slog.Logger
2929+ Notifier *notifier.Notifier
3030+ Resolver *idresolver.Resolver
3131+ ServiceAuth *serviceauth.ServiceAuth
3232+}
3333+3434+func (x *Xrpc) Router() http.Handler {
3535+ r := chi.NewRouter()
3636+3737+ r.Group(func(r chi.Router) {
3838+ r.Use(x.ServiceAuth.VerifyServiceAuth)
3939+4040+ r.Post("/"+tangled.RepoSetDefaultBranchNSID, x.SetDefaultBranch)
4141+ r.Post("/"+tangled.RepoCreateNSID, x.CreateRepo)
4242+ r.Post("/"+tangled.RepoDeleteNSID, x.DeleteRepo)
4343+ r.Post("/"+tangled.RepoForkStatusNSID, x.ForkStatus)
4444+ r.Post("/"+tangled.RepoForkSyncNSID, x.ForkSync)
4545+ r.Post("/"+tangled.RepoHiddenRefNSID, x.HiddenRef)
4646+ r.Post("/"+tangled.RepoMergeNSID, x.Merge)
4747+ })
4848+4949+ // merge check is an open endpoint
5050+ //
5151+ // TODO: should we constrain this more?
5252+ // - we can calculate on PR submit/resubmit/gitRefUpdate etc.
5353+ // - use ETags on clients to keep requests to a minimum
5454+ r.Post("/"+tangled.RepoMergeCheckNSID, x.MergeCheck)
5555+5656+ // repo query endpoints (no auth required)
5757+ r.Get("/"+tangled.RepoTreeNSID, x.RepoTree)
5858+ r.Get("/"+tangled.RepoLogNSID, x.RepoLog)
5959+ r.Get("/"+tangled.RepoBranchesNSID, x.RepoBranches)
6060+ r.Get("/"+tangled.RepoTagsNSID, x.RepoTags)
6161+ r.Get("/"+tangled.RepoBlobNSID, x.RepoBlob)
6262+ r.Get("/"+tangled.RepoDiffNSID, x.RepoDiff)
6363+ r.Get("/"+tangled.RepoCompareNSID, x.RepoCompare)
6464+ r.Get("/"+tangled.RepoGetDefaultBranchNSID, x.RepoGetDefaultBranch)
6565+ r.Get("/"+tangled.RepoBranchNSID, x.RepoBranch)
6666+ r.Get("/"+tangled.RepoArchiveNSID, x.RepoArchive)
6767+ r.Get("/"+tangled.RepoLanguagesNSID, x.RepoLanguages)
6868+6969+ // knot query endpoints (no auth required)
7070+ r.Get("/"+tangled.KnotListKeysNSID, x.ListKeys)
7171+ r.Get("/"+tangled.KnotVersionNSID, x.Version)
7272+7373+ // service query endpoints (no auth required)
7474+ r.Get("/"+tangled.OwnerNSID, x.Owner)
7575+7676+ return r
7777+}
7878+7979+// parseRepoParam parses a repo parameter in 'did/repoName' format and returns
8080+// the full repository path on disk
8181+func (x *Xrpc) parseRepoParam(repo string) (string, error) {
8282+ if repo == "" {
8383+ return "", xrpcerr.NewXrpcError(
8484+ xrpcerr.WithTag("InvalidRequest"),
8585+ xrpcerr.WithMessage("missing repo parameter"),
8686+ )
8787+ }
8888+8989+ // Parse repo string (did/repoName format)
9090+ parts := strings.SplitN(repo, "/", 2)
9191+ if len(parts) != 2 {
9292+ return "", xrpcerr.NewXrpcError(
9393+ xrpcerr.WithTag("InvalidRequest"),
9494+ xrpcerr.WithMessage("invalid repo format, expected 'did/repoName'"),
9595+ )
9696+ }
9797+9898+ did := parts[0]
9999+ repoName := parts[1]
100100+101101+ // Construct repository path using the same logic as didPath
102102+ didRepoPath, err := securejoin.SecureJoin(did, repoName)
103103+ if err != nil {
104104+ return "", xrpcerr.RepoNotFoundError
105105+ }
106106+107107+ repoPath, err := securejoin.SecureJoin(x.Config.Repo.ScanPath, didRepoPath)
108108+ if err != nil {
109109+ return "", xrpcerr.RepoNotFoundError
110110+ }
111111+112112+ return repoPath, nil
113113+}
114114+115115+func writeError(w http.ResponseWriter, e xrpcerr.XrpcError, status int) {
116116+ w.Header().Set("Content-Type", "application/json")
117117+ w.WriteHeader(status)
118118+ json.NewEncoder(w).Encode(e)
119119+}
120120+121121+func writeJson(w http.ResponseWriter, response any) {
122122+ w.Header().Set("Content-Type", "application/json")
123123+ if err := json.NewEncoder(w).Encode(response); err != nil {
124124+ writeError(w, xrpcerr.GenericError(err), http.StatusInternalServerError)
125125+ return
126126+ }
127127+}
+158
legal/privacy.md
···11+# Privacy Policy
22+33+**Last updated:** January 15, 2025
44+55+This Privacy Policy describes how Tangled ("we," "us," or "our")
66+collects, uses, and shares your personal information when you use our
77+platform and services (the "Service").
88+99+## 1. Information We Collect
1010+1111+### Account Information
1212+1313+When you create an account, we collect:
1414+1515+- Your chosen username
1616+- Email address
1717+- Profile information you choose to provide
1818+- Authentication data
1919+2020+### Content and Activity
2121+2222+We store:
2323+2424+- Code repositories and associated metadata
2525+- Issues, pull requests, and comments
2626+- Activity logs and usage patterns
2727+- Public keys for authentication
2828+2929+## 2. Data Location and Hosting
3030+3131+### EU Data Hosting
3232+3333+**All Tangled service data is hosted within the European Union.**
3434+Specifically:
3535+3636+- **Personal Data Servers (PDS):** Accounts hosted on Tangled PDS
3737+ (*.tngl.sh) are located in Finland
3838+- **Application Data:** All other service data is stored on EU-based
3939+ servers
4040+- **Data Processing:** All data processing occurs within EU
4141+ jurisdiction
4242+4343+### External PDS Notice
4444+4545+**Important:** If your account is hosted on Bluesky's PDS or other
4646+self-hosted Personal Data Servers (not *.tngl.sh), we do not control
4747+that data. The data protection, storage location, and privacy
4848+practices for such accounts are governed by the respective PDS
4949+provider's policies, not this Privacy Policy. We only control data
5050+processing within our own services and infrastructure.
5151+5252+## 3. Third-Party Data Processors
5353+5454+We only share your data with the following third-party processors:
5555+5656+### Resend (Email Services)
5757+5858+- **Purpose:** Sending transactional emails (account verification,
5959+ notifications)
6060+- **Data Shared:** Email address and necessary message content
6161+6262+### Cloudflare (Image Caching)
6363+6464+- **Purpose:** Caching and optimizing image delivery
6565+- **Data Shared:** Public images and associated metadata for caching
6666+ purposes
6767+6868+### Posthog (Usage Metrics Tracking)
6969+7070+- **Purpose:** Tracking usage and platform metrics
7171+- **Data Shared:** Anonymous usage data, IP addresses, DIDs, and browser
7272+ information
7373+7474+## 4. How We Use Your Information
7575+7676+We use your information to:
7777+7878+- Provide and maintain the Service
7979+- Process your transactions and requests
8080+- Send you technical notices and support messages
8181+- Improve and develop new features
8282+- Ensure security and prevent fraud
8383+- Comply with legal obligations
8484+8585+## 5. Data Sharing and Disclosure
8686+8787+We do not sell, trade, or rent your personal information. We may share
8888+your information only in the following circumstances:
8989+9090+- With the third-party processors listed above
9191+- When required by law or legal process
9292+- To protect our rights, property, or safety, or that of our users
9393+- In connection with a merger, acquisition, or sale of assets (with
9494+ appropriate protections)
9595+9696+## 6. Data Security
9797+9898+We implement appropriate technical and organizational measures to
9999+protect your personal information against unauthorized access,
100100+alteration, disclosure, or destruction. However, no method of
101101+transmission over the Internet is 100% secure.
102102+103103+## 7. Data Retention
104104+105105+We retain your personal information for as long as necessary to provide
106106+the Service and fulfill the purposes outlined in this Privacy Policy,
107107+unless a longer retention period is required by law.
108108+109109+## 8. Your Rights
110110+111111+Under applicable data protection laws, you have the right to:
112112+113113+- Access your personal information
114114+- Correct inaccurate information
115115+- Request deletion of your information
116116+- Object to processing of your information
117117+- Data portability
118118+- Withdraw consent (where applicable)
119119+120120+## 9. Cookies and Tracking
121121+122122+We use cookies and similar technologies to:
123123+124124+- Maintain your login session
125125+- Remember your preferences
126126+- Analyze usage patterns to improve the Service
127127+128128+You can control cookie settings through your browser preferences.
129129+130130+## 10. Children's Privacy
131131+132132+The Service is not intended for children under 16 years of age. We do
133133+not knowingly collect personal information from children under 16. If
134134+we become aware that we have collected such information, we will take
135135+steps to delete it.
136136+137137+## 11. International Data Transfers
138138+139139+While all our primary data processing occurs within the EU, some of our
140140+third-party processors may process data outside the EU. When this
141141+occurs, we ensure appropriate safeguards are in place, such as Standard
142142+Contractual Clauses or adequacy decisions.
143143+144144+## 12. Changes to This Privacy Policy
145145+146146+We may update this Privacy Policy from time to time. We will notify you
147147+of any changes by posting the new Privacy Policy on this page and
148148+updating the "Last updated" date.
149149+150150+## 13. Contact Information
151151+152152+If you have any questions about this Privacy Policy or wish to exercise
153153+your rights, please contact us through our platform or via email.
154154+155155+---
156156+157157+This Privacy Policy complies with the EU General Data Protection
158158+Regulation (GDPR) and other applicable data protection laws.
+109
legal/terms.md
···11+# Terms of Service
22+33+**Last updated:** January 15, 2025
44+55+Welcome to Tangled. These Terms of Service ("Terms") govern your access
66+to and use of the Tangled platform and services (the "Service")
77+operated by us ("Tangled," "we," "us," or "our").
88+99+## 1. Acceptance of Terms
1010+1111+By accessing or using our Service, you agree to be bound by these Terms.
1212+If you disagree with any part of these terms, then you may not access
1313+the Service.
1414+1515+## 2. Account Registration
1616+1717+To use certain features of the Service, you must register for an
1818+account. You agree to provide accurate, current, and complete
1919+information during the registration process and to update such
2020+information to keep it accurate, current, and complete.
2121+2222+## 3. Account Termination
2323+2424+> **Important Notice**
2525+>
2626+> **We reserve the right to terminate, suspend, or restrict access to
2727+> your account at any time, for any reason, or for no reason at all, at
2828+> our sole discretion.** This includes, but is not limited to,
2929+> termination for violation of these Terms, inappropriate conduct, spam,
3030+> abuse, or any other behavior we deem harmful to the Service or other
3131+> users.
3232+>
3333+> Account termination may result in the loss of access to your
3434+> repositories, data, and other content associated with your account. We
3535+> are not obligated to provide advance notice of termination, though we
3636+> may do so in our discretion.
3737+3838+## 4. Acceptable Use
3939+4040+You agree not to use the Service to:
4141+4242+- Violate any applicable laws or regulations
4343+- Infringe upon the rights of others
4444+- Upload, store, or share content that is illegal, harmful, threatening,
4545+ abusive, harassing, defamatory, vulgar, obscene, or otherwise
4646+ objectionable
4747+- Engage in spam, phishing, or other deceptive practices
4848+- Attempt to gain unauthorized access to the Service or other users'
4949+ accounts
5050+- Interfere with or disrupt the Service or servers connected to the
5151+ Service
5252+5353+## 5. Content and Intellectual Property
5454+5555+You retain ownership of the content you upload to the Service. By
5656+uploading content, you grant us a non-exclusive, worldwide, royalty-free
5757+license to use, reproduce, modify, and distribute your content as
5858+necessary to provide the Service.
5959+6060+## 6. Privacy
6161+6262+Your privacy is important to us. Please review our [Privacy
6363+Policy](/privacy), which also governs your use of the Service.
6464+6565+## 7. Disclaimers
6666+6767+The Service is provided on an "AS IS" and "AS AVAILABLE" basis. We make
6868+no warranties, expressed or implied, and hereby disclaim and negate all
6969+other warranties including without limitation, implied warranties or
7070+conditions of merchantability, fitness for a particular purpose, or
7171+non-infringement of intellectual property or other violation of rights.
7272+7373+## 8. Limitation of Liability
7474+7575+In no event shall Tangled, nor its directors, employees, partners,
7676+agents, suppliers, or affiliates, be liable for any indirect,
7777+incidental, special, consequential, or punitive damages, including
7878+without limitation, loss of profits, data, use, goodwill, or other
7979+intangible losses, resulting from your use of the Service.
8080+8181+## 9. Indemnification
8282+8383+You agree to defend, indemnify, and hold harmless Tangled and its
8484+affiliates, officers, directors, employees, and agents from and against
8585+any and all claims, damages, obligations, losses, liabilities, costs,
8686+or debt, and expenses (including attorney's fees).
8787+8888+## 10. Governing Law
8989+9090+These Terms shall be interpreted and governed by the laws of Finland,
9191+without regard to its conflict of law provisions.
9292+9393+## 11. Changes to Terms
9494+9595+We reserve the right to modify or replace these Terms at any time. If a
9696+revision is material, we will try to provide at least 30 days notice
9797+prior to any new terms taking effect.
9898+9999+## 12. Contact Information
100100+101101+If you have any questions about these Terms of Service, please contact
102102+us through our platform or via email.
103103+104104+---
105105+106106+These terms are effective as of the last updated date shown above and
107107+will remain in effect except with respect to any changes in their
108108+provisions in the future, which will be in effect immediately after
109109+being posted on this page.
···99// NewHandler sets up a new slog.Handler with the service name
1010// as an attribute
1111func NewHandler(name string) slog.Handler {
1212- handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{})
1212+ handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
1313+ Level: slog.LevelDebug,
1414+ })
13151416 var attrs []slog.Attr
1517 attrs = append(attrs, slog.Attr{Key: "service", Value: slog.StringValue(name)})
+99-21
nix/gomod2nix.toml
···1111 version = "v0.6.2"
1212 hash = "sha256-tVNWDUMILZbJvarcl/E7tpSnkn7urqgSHa2Eaka5vSU="
1313 [mod."github.com/ProtonMail/go-crypto"]
1414- version = "v1.2.0"
1515- hash = "sha256-5fKgWUz6BoyFNNZ1OD9QjhBrhNEBCuVfO2WqH+X59oo="
1414+ version = "v1.3.0"
1515+ hash = "sha256-TUG+C4MyeWglOmiwiW2/NUVurFHXLgEPRd3X9uQ1NGI="
1616+ [mod."github.com/alecthomas/assert/v2"]
1717+ version = "v2.11.0"
1818+ hash = "sha256-tDJCDKZ0R4qNA7hgMKWrpDyogt1802LCJDBCExxdqaU="
1619 [mod."github.com/alecthomas/chroma/v2"]
1720 version = "v2.19.0"
1821 hash = "sha256-dxsu43a+PvHg2jYR0Tfys6a8x6IVR+9oCGAh+fvL3SM="
1922 replaced = "github.com/oppiliappan/chroma/v2"
2323+ [mod."github.com/alecthomas/repr"]
2424+ version = "v0.4.0"
2525+ hash = "sha256-CyAzMSTfLGHDtfGXi91y7XMVpPUDNOKjsznb+osl9dU="
2026 [mod."github.com/anmitsu/go-shlex"]
2127 version = "v0.0.0-20200514113438-38f4b401e2be"
2228 hash = "sha256-L3Ak4X2z7WXq7vMKuiHCOJ29nlpajUQ08Sfb9T0yP54="
···5157 [mod."github.com/casbin/govaluate"]
5258 version = "v1.3.0"
5359 hash = "sha256-vDUFEGt8oL4n/PHwlMZPjmaLvcpGTN4HEIRGl2FPxUA="
6060+ [mod."github.com/cenkalti/backoff/v4"]
6161+ version = "v4.3.0"
6262+ hash = "sha256-wfVjNZsGG1WoNC5aL+kdcy6QXPgZo4THAevZ1787md8="
5463 [mod."github.com/cespare/xxhash/v2"]
5564 version = "v2.3.0"
5665 hash = "sha256-7hRlwSR+fos1kx4VZmJ/7snR7zHh8ZFKX+qqqqGcQpY="
5766 [mod."github.com/cloudflare/circl"]
5858- version = "v1.6.0"
5959- hash = "sha256-a+SVfnHYC8Fb+NQLboNg5P9sry+WutzuNetVHFVAAo0="
6767+ version = "v1.6.2-0.20250618153321-aa837fd1539d"
6868+ hash = "sha256-0s/i/XmMcuvPQ+qK9OIU5KxwYZyLVXRtdlYvIXRJT3Y="
6969+ [mod."github.com/cloudflare/cloudflare-go"]
7070+ version = "v0.115.0"
7171+ hash = "sha256-jezmDs6IsHA4rag7DzcHDfDgde0vU4iKgCN9+0XDViw="
6072 [mod."github.com/containerd/errdefs"]
6173 version = "v1.0.0"
6274 hash = "sha256-wMZGoeqvRhuovYCJx0Js4P3qFCNTZ/6Atea/kNYoPMI="
···105117 [mod."github.com/felixge/httpsnoop"]
106118 version = "v1.0.4"
107119 hash = "sha256-c1JKoRSndwwOyOxq9ddCe+8qn7mG9uRq2o/822x5O/c="
120120+ [mod."github.com/fsnotify/fsnotify"]
121121+ version = "v1.6.0"
122122+ hash = "sha256-DQesOCweQPEwmAn6s7DCP/Dwy8IypC+osbpfsvpkdP0="
108123 [mod."github.com/gliderlabs/ssh"]
109124 version = "v0.3.8"
110125 hash = "sha256-FW+91qCB3rfTm0I1VmqfwA7o+2kDys2JHOudKKyxWwc="
···127142 version = "v5.17.0"
128143 hash = "sha256-gya68abB6GtejUqr60DyU7NIGtNzHQVCAeDTYKk1evQ="
129144 replaced = "github.com/oppiliappan/go-git/v5"
145145+ [mod."github.com/go-jose/go-jose/v3"]
146146+ version = "v3.0.4"
147147+ hash = "sha256-RrLHCu9l6k0XVobdZQJ9Sx/VTQcWjrdLR5BEG7yXTEQ="
130148 [mod."github.com/go-logr/logr"]
131149 version = "v1.4.3"
132150 hash = "sha256-Nnp/dEVNMxLp3RSPDHZzGbI8BkSNuZMX0I0cjWKXXLA="
···136154 [mod."github.com/go-redis/cache/v9"]
137155 version = "v9.0.0"
138156 hash = "sha256-b4S3K4KoZhF0otw6FRIOq/PTdHGrb/LumB4GKo4khsY="
157157+ [mod."github.com/go-test/deep"]
158158+ version = "v1.1.1"
159159+ hash = "sha256-WvPrTvUPmbQb4R6DrvSB9O3zm0IOk+n14YpnSl2deR8="
139160 [mod."github.com/goccy/go-json"]
140161 version = "v0.10.5"
141162 hash = "sha256-/EtlGihP0/7oInzMC5E0InZ4b5Ad3s4xOpqotloi3xw="
···148169 [mod."github.com/golang/groupcache"]
149170 version = "v0.0.0-20241129210726-2c02b8208cf8"
150171 hash = "sha256-AdLZ3dJLe/yduoNvZiXugZxNfmwJjNQyQGsIdzYzH74="
172172+ [mod."github.com/golang/mock"]
173173+ version = "v1.6.0"
174174+ hash = "sha256-fWdnMQisRbiRzGT3ISrUHovquzLRHWvcv1JEsJFZRno="
175175+ [mod."github.com/google/go-querystring"]
176176+ version = "v1.1.0"
177177+ hash = "sha256-itsKgKghuX26czU79cK6C2n+lc27jm5Dw1XbIRgwZJY="
151178 [mod."github.com/google/uuid"]
152179 version = "v1.6.0"
153180 hash = "sha256-VWl9sqUzdOuhW0KzQlv0gwwUQClYkmZwSydHG2sALYw="
154181 [mod."github.com/gorilla/css"]
155182 version = "v1.0.1"
156183 hash = "sha256-6JwNHqlY2NpZ0pSQTyYPSpiNqjXOdFHqrUT10sv3y8A="
184184+ [mod."github.com/gorilla/feeds"]
185185+ version = "v1.2.0"
186186+ hash = "sha256-ptczizo27t6Bsq6rHJ4WiHmBRP54UC5yNfHghAqOBQk="
157187 [mod."github.com/gorilla/securecookie"]
158188 version = "v1.1.2"
159189 hash = "sha256-KeMHNM9emxX+N0WYiZsTii7n8sNsmjWwbnQ9SaJfTKE="
···161191 version = "v1.4.0"
162192 hash = "sha256-cLK2z1uOEz7Wah/LclF65ptYMqzuvaRnfIGYqtn3b7g="
163193 [mod."github.com/gorilla/websocket"]
164164- version = "v1.5.3"
165165- hash = "sha256-vTIGEFMEi+30ZdO6ffMNJ/kId6pZs5bbyqov8xe9BM0="
194194+ version = "v1.5.4-0.20250319132907-e064f32e3674"
195195+ hash = "sha256-a8n6oe20JDpwThClgAyVhJDi6QVaS0qzT4PvRxlQ9to="
196196+ [mod."github.com/hashicorp/errwrap"]
197197+ version = "v1.1.0"
198198+ hash = "sha256-6lwuMQOfBq+McrViN3maJTIeh4f8jbEqvLy2c9FvvFw="
166199 [mod."github.com/hashicorp/go-cleanhttp"]
167200 version = "v0.5.2"
168201 hash = "sha256-N9GOKYo7tK6XQUFhvhImtL7PZW/mr4C4Manx/yPVvcQ="
202202+ [mod."github.com/hashicorp/go-multierror"]
203203+ version = "v1.1.1"
204204+ hash = "sha256-ANzPEUJIZIlToxR89Mn7Db73d9LGI51ssy7eNnUgmlA="
169205 [mod."github.com/hashicorp/go-retryablehttp"]
170206 version = "v0.7.8"
171207 hash = "sha256-4LZwKaFBbpKi9lSq5y6lOlYHU6WMnQdGNMxTd33rN80="
208208+ [mod."github.com/hashicorp/go-secure-stdlib/parseutil"]
209209+ version = "v0.2.0"
210210+ hash = "sha256-mb27ZKw5VDTmNj1QJvxHVR0GyY7UdacLJ0jWDV3nQd8="
211211+ [mod."github.com/hashicorp/go-secure-stdlib/strutil"]
212212+ version = "v0.1.2"
213213+ hash = "sha256-UmCMzjamCW1d9KNvNzELqKf1ElHOXPz+ZtdJkI+DV0A="
214214+ [mod."github.com/hashicorp/go-sockaddr"]
215215+ version = "v1.0.7"
216216+ hash = "sha256-p6eDOrGzN1jMmT/F/f/VJMq0cKNFhUcEuVVwTE6vSrs="
172217 [mod."github.com/hashicorp/golang-lru"]
173218 version = "v1.0.2"
174219 hash = "sha256-yy+5botc6T5wXgOe2mfNXJP3wr+MkVlUZ2JBkmmrA48="
175220 [mod."github.com/hashicorp/golang-lru/v2"]
176221 version = "v2.0.7"
177222 hash = "sha256-t1bcXLgrQNOYUVyYEZ0knxcXpsTk4IuJZDjKvyJX75g="
223223+ [mod."github.com/hashicorp/hcl"]
224224+ version = "v1.0.1-vault-7"
225225+ hash = "sha256-xqYtjCJQVsg04Yj2Uy2Q5bi6X6cDRYhJD/SUEWaHMDM="
226226+ [mod."github.com/hexops/gotextdiff"]
227227+ version = "v1.0.3"
228228+ hash = "sha256-wVs5uJs2KHU1HnDCDdSe0vIgNZylvs8oNidDxwA3+O0="
178229 [mod."github.com/hiddeco/sshsig"]
179230 version = "v0.2.0"
180231 hash = "sha256-Yc8Ip4XxrL5plb7Lq0ziYFznteVDZnskoyOZDIMsWOU="
···256307 [mod."github.com/minio/sha256-simd"]
257308 version = "v1.0.1"
258309 hash = "sha256-4hfGDIQaWq8fvtGzHDhoK9v2IocXnJY7OAL6saMJbmA="
310310+ [mod."github.com/mitchellh/mapstructure"]
311311+ version = "v1.5.0"
312312+ hash = "sha256-ztVhGQXs67MF8UadVvG72G3ly0ypQW0IRDdOOkjYwoE="
259313 [mod."github.com/moby/docker-image-spec"]
260314 version = "v1.3.1"
261315 hash = "sha256-xwSNLmMagzywdGJIuhrWl1r7cIWBYCOMNYbuDDT6Jhs="
···289343 [mod."github.com/munnerz/goautoneg"]
290344 version = "v0.0.0-20191010083416-a7dc8b61c822"
291345 hash = "sha256-79URDDFenmGc9JZu+5AXHToMrtTREHb3BC84b/gym9Q="
346346+ [mod."github.com/onsi/gomega"]
347347+ version = "v1.37.0"
348348+ hash = "sha256-PfHFYp365MwBo+CUZs+mN5QEk3Kqe9xrBX+twWfIc9o="
349349+ [mod."github.com/openbao/openbao/api/v2"]
350350+ version = "v2.3.0"
351351+ hash = "sha256-1bIyvL3GdzPUfsM+gxuKMaH5jKxMaucZQgL6/DfbmDM="
292352 [mod."github.com/opencontainers/go-digest"]
293353 version = "v1.0.0"
294354 hash = "sha256-cfVDjHyWItmUGZ2dzQhCHgmOmou8v7N+itDkLZVkqkQ="
···296356 version = "v1.1.1"
297357 hash = "sha256-bxBjtl+6846Ed3QHwdssOrNvlHV6b+Dn17zPISSQGP8="
298358 [mod."github.com/opentracing/opentracing-go"]
299299- version = "v1.2.0"
300300- hash = "sha256-kKTKFGXOsCF6QdVzI++GgaRzv2W+kWq5uDXOJChvLxM="
359359+ version = "v1.2.1-0.20220228012449-10b1cf09e00b"
360360+ hash = "sha256-77oWcDviIoGWHVAotbgmGRpLGpH5AUy+pM15pl3vRrw="
301361 [mod."github.com/pjbgf/sha1cd"]
302362 version = "v0.3.2"
303363 hash = "sha256-jdbiRhU8xc1C5c8m7BSCj71PUXHY3f7TWFfxDKKpUMk="
···326386 version = "v0.16.1"
327387 hash = "sha256-OBCvKlLW2obct35p0L9Q+1ZrxZjpTmbgHMP2rng9hpo="
328388 [mod."github.com/redis/go-redis/v9"]
329329- version = "v9.3.0"
330330- hash = "sha256-PNXDX3BH92d2jL/AkdK0eWMorh387Y6duwYNhsqNe+w="
389389+ version = "v9.7.3"
390390+ hash = "sha256-7ip5Ns/NEnFmVLr5iN8m3gS4RrzVAYJ7pmJeeaTmjjo="
331391 [mod."github.com/resend/resend-go/v2"]
332392 version = "v2.15.0"
333393 hash = "sha256-1lMoxuMLQXaNWFKadS6rpztAKwvIl3/LWMXqw7f5WYg="
394394+ [mod."github.com/ryanuber/go-glob"]
395395+ version = "v1.0.0"
396396+ hash = "sha256-YkMl1utwUhi3E0sHK23ISpAsPyj4+KeXyXKoFYGXGVY="
334397 [mod."github.com/segmentio/asm"]
335398 version = "v1.2.0"
336399 hash = "sha256-zbNuKxNrUDUc6IlmRQNuJQzVe5Ol/mqp7srDg9IMMqs="
···362425 [mod."github.com/whyrusleeping/cbor-gen"]
363426 version = "v0.3.1"
364427 hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc="
428428+ [mod."github.com/wyatt915/goldmark-treeblood"]
429429+ version = "v0.0.0-20250825231212-5dcbdb2f4b57"
430430+ hash = "sha256-IZEsUXTBTsNgWoD7vqRUc9aFCCHNjzk1IUmI9O+NCnM="
431431+ [mod."github.com/wyatt915/treeblood"]
432432+ version = "v0.1.15"
433433+ hash = "sha256-hb99exdkoY2Qv8WdDxhwgPXGbEYimUr6wFtPXEvcO9g="
365434 [mod."github.com/yuin/goldmark"]
366366- version = "v1.4.13"
367367- hash = "sha256-GVwFKZY6moIS6I0ZGuio/WtDif+lkZRfqWS6b4AAJyI="
435435+ version = "v1.7.12"
436436+ hash = "sha256-thLYBS4woL2X5qRdo7vP+xCvjlGRDU0jXtDCUt6vvWM="
437437+ [mod."github.com/yuin/goldmark-highlighting/v2"]
438438+ version = "v2.0.0-20230729083705-37449abec8cc"
439439+ hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
368440 [mod."gitlab.com/yawning/secp256k1-voi"]
369441 version = "v0.0.0-20230925100816-f2616030848b"
370442 hash = "sha256-X8INg01LTg13iOuwPI3uOhPN7r01sPZtmtwJ2sudjCA="
···380452 [mod."go.opentelemetry.io/otel"]
381453 version = "v1.37.0"
382454 hash = "sha256-zWpyp9K8/Te86uhNjamchZctTdAnmHhoVw9m4ACfSoo="
455455+ [mod."go.opentelemetry.io/otel/exporters/otlp/otlptrace"]
456456+ version = "v1.33.0"
457457+ hash = "sha256-D5BMzmtN1d3pRnxIcvDOyQrjerK1JoavtYjJLhPKv/I="
383458 [mod."go.opentelemetry.io/otel/metric"]
384459 version = "v1.37.0"
385460 hash = "sha256-BWnkdldA3xzGhnaConzMAuQzOnugytIvrP6GjkZVAYg="
···405480 version = "v0.0.0-20250620022241-b7579e27df2b"
406481 hash = "sha256-IsDTeuWLj4UkPO4NhWTvFeZ22WNtlxjoWiyAJh6zdig="
407482 [mod."golang.org/x/net"]
408408- version = "v0.41.0"
409409- hash = "sha256-6/pi8rNmGvBFzkJQXkXkMfL1Bjydhg3BgAMYDyQ/Uvg="
483483+ version = "v0.42.0"
484484+ hash = "sha256-YxileisIIez+kcAI+21kY5yk0iRuEqti2YdmS8jvP2s="
410485 [mod."golang.org/x/sync"]
411411- version = "v0.15.0"
412412- hash = "sha256-Jf4ehm8H8YAWY6mM151RI5CbG7JcOFtmN0AZx4bE3UE="
486486+ version = "v0.16.0"
487487+ hash = "sha256-sqKDRESeMzLe0jWGWltLZL/JIgrn0XaIeBWCzVN3Bks="
413488 [mod."golang.org/x/sys"]
414489 version = "v0.34.0"
415490 hash = "sha256-5rZ7p8IaGli5X1sJbfIKOcOEwY4c0yQhinJPh2EtK50="
491491+ [mod."golang.org/x/text"]
492492+ version = "v0.27.0"
493493+ hash = "sha256-VX0rOh6L3qIvquKSGjfZQFU8URNtGvkNvxE7OZtboW8="
416494 [mod."golang.org/x/time"]
417495 version = "v0.12.0"
418496 hash = "sha256-Cp3oxrCMH2wyxjzr5SHVmyhgaoUuSl56Uy00Q7DYEpw="
···420498 version = "v0.0.0-20240903120638-7835f813f4da"
421499 hash = "sha256-bE7CcrnAvryNvM26ieJGXqbAtuLwHaGcmtVMsVnksqo="
422500 [mod."google.golang.org/genproto/googleapis/api"]
423423- version = "v0.0.0-20250519155744-55703ea1f237"
424424- hash = "sha256-ivktx8ipWgWZgchh4FjKoWL7kU8kl/TtIavtZq/F5SQ="
501501+ version = "v0.0.0-20250603155806-513f23925822"
502502+ hash = "sha256-0CS432v9zVhkVLqFpZtxBX8rvVqP67lb7qQ3es7RqIU="
425503 [mod."google.golang.org/genproto/googleapis/rpc"]
426426- version = "v0.0.0-20250519155744-55703ea1f237"
504504+ version = "v0.0.0-20250603155806-513f23925822"
427505 hash = "sha256-WK7iDtAhH19NPe3TywTQlGjDawNaDKWnxhFL9PgVUwM="
428506 [mod."google.golang.org/grpc"]
429429- version = "v1.72.1"
430430- hash = "sha256-5JczomNvroKWtIYKDgXwaIaEfuNEK//MHPhJQiaxMXs="
507507+ version = "v1.73.0"
508508+ hash = "sha256-LfVlwip++q2DX70RU6CxoXglx1+r5l48DwlFD05G11c="
431509 [mod."google.golang.org/protobuf"]
432510 version = "v1.36.6"
433511 hash = "sha256-lT5qnefI5FDJnowz9PEkAGylH3+fE+A3DJDkAyy9RMc="
+14
nix/modules/appview.nix
···2727 default = "00000000000000000000000000000000";
2828 description = "Cookie secret";
2929 };
3030+ environmentFile = mkOption {
3131+ type = with types; nullOr path;
3232+ default = null;
3333+ example = "/etc/tangled-appview.env";
3434+ description = ''
3535+ Additional environment file as defined in {manpage}`systemd.exec(5)`.
3636+3737+ Sensitive secrets such as {env}`TANGLED_COOKIE_SECRET` may be
3838+ passed to the service without makeing them world readable in the
3939+ nix store.
4040+4141+ '';
4242+ };
3043 };
3144 };
3245···3952 ListenStream = "0.0.0.0:${toString cfg.port}";
4053 ExecStart = "${cfg.package}/bin/appview";
4154 Restart = "always";
5555+ EnvironmentFile = optional (cfg.environmentFile != null) cfg.environmentFile;
4256 };
43574458 environment = {
+54-20
nix/modules/knot.nix
···5858 };
5959 };
60606161+ motd = mkOption {
6262+ type = types.nullOr types.str;
6363+ default = null;
6464+ description = ''
6565+ Message of the day
6666+6767+ The contents are shown as-is; eg. you will want to add a newline if
6868+ setting a non-empty message since the knot won't do this for you.
6969+ '';
7070+ };
7171+7272+ motdFile = mkOption {
7373+ type = types.nullOr types.path;
7474+ default = null;
7575+ description = ''
7676+ File containing message of the day
7777+7878+ The contents are shown as-is; eg. you will want to add a newline if
7979+ setting a non-empty message since the knot won't do this for you.
8080+ '';
8181+ };
8282+6183 server = {
6284 listenAddr = mkOption {
6385 type = types.str;
···7193 description = "Internal address for inter-service communication";
7294 };
73957474- secretFile = mkOption {
7575- type = lib.types.path;
7676- example = "KNOT_SERVER_SECRET=<hash>";
7777- description = "File containing secret key provided by appview (required)";
9696+ owner = mkOption {
9797+ type = types.str;
9898+ example = "did:plc:qfpnj4og54vl56wngdriaxug";
9999+ description = "DID of owner (required)";
78100 };
7910180102 dbPath = mkOption {
···104126 cfg.package
105127 ];
106128107107- system.activationScripts.gitConfig = ''
108108- mkdir -p "${cfg.repo.scanPath}"
109109- chown -R ${cfg.gitUser}:${cfg.gitUser} "${cfg.repo.scanPath}"
110110-111111- mkdir -p "${cfg.stateDir}/.config/git"
112112- cat > "${cfg.stateDir}/.config/git/config" << EOF
113113- [user]
114114- name = Git User
115115- email = git@example.com
116116- [receive]
117117- advertisePushOptions = true
118118- EOF
119119- chown -R ${cfg.gitUser}:${cfg.gitUser} "${cfg.stateDir}"
120120- '';
121121-122129 users.users.${cfg.gitUser} = {
123130 isSystemUser = true;
124131 useDefaultShell = true;
···154161 description = "knot service";
155162 after = ["network.target" "sshd.service"];
156163 wantedBy = ["multi-user.target"];
164164+ enableStrictShellChecks = true;
165165+166166+ preStart = let
167167+ setMotd =
168168+ if cfg.motdFile != null && cfg.motd != null
169169+ then throw "motdFile and motd cannot be both set"
170170+ else ''
171171+ ${optionalString (cfg.motdFile != null) "cat ${cfg.motdFile} > ${cfg.stateDir}/motd"}
172172+ ${optionalString (cfg.motd != null) ''printf "${cfg.motd}" > ${cfg.stateDir}/motd''}
173173+ '';
174174+ in ''
175175+ mkdir -p "${cfg.repo.scanPath}"
176176+ chown -R ${cfg.gitUser}:${cfg.gitUser} "${cfg.repo.scanPath}"
177177+178178+ mkdir -p "${cfg.stateDir}/.config/git"
179179+ cat > "${cfg.stateDir}/.config/git/config" << EOF
180180+ [user]
181181+ name = Git User
182182+ email = git@example.com
183183+ [receive]
184184+ advertisePushOptions = true
185185+ EOF
186186+ ${setMotd}
187187+ chown -R ${cfg.gitUser}:${cfg.gitUser} "${cfg.stateDir}"
188188+ '';
189189+157190 serviceConfig = {
158191 User = cfg.gitUser;
192192+ PermissionsStartOnly = true;
159193 WorkingDirectory = cfg.stateDir;
160194 Environment = [
161195 "KNOT_REPO_SCAN_PATH=${cfg.repo.scanPath}"
···165199 "KNOT_SERVER_LISTEN_ADDR=${cfg.server.listenAddr}"
166200 "KNOT_SERVER_DB_PATH=${cfg.server.dbPath}"
167201 "KNOT_SERVER_HOSTNAME=${cfg.server.hostname}"
202202+ "KNOT_SERVER_OWNER=${cfg.server.owner}"
168203 ];
169169- EnvironmentFile = cfg.server.secretFile;
170204 ExecStart = "${cfg.package}/bin/knot server";
171205 Restart = "always";
172206 };
+40-2
nix/modules/spindle.nix
···5454 example = "did:plc:qfpnj4og54vl56wngdriaxug";
5555 description = "DID of owner (required)";
5656 };
5757+5858+ maxJobCount = mkOption {
5959+ type = types.int;
6060+ default = 2;
6161+ example = 5;
6262+ description = "Maximum number of concurrent jobs to run";
6363+ };
6464+6565+ queueSize = mkOption {
6666+ type = types.int;
6767+ default = 100;
6868+ example = 100;
6969+ description = "Maximum number of jobs queue up";
7070+ };
7171+7272+ secrets = {
7373+ provider = mkOption {
7474+ type = types.str;
7575+ default = "sqlite";
7676+ description = "Backend to use for secret management, valid options are 'sqlite', and 'openbao'.";
7777+ };
7878+7979+ openbao = {
8080+ proxyAddr = mkOption {
8181+ type = types.str;
8282+ default = "http://127.0.0.1:8200";
8383+ };
8484+ mount = mkOption {
8585+ type = types.str;
8686+ default = "spindle";
8787+ };
8888+ };
8989+ };
5790 };
58915992 pipelines = {
···89122 "SPINDLE_SERVER_JETSTREAM=${cfg.server.jetstreamEndpoint}"
90123 "SPINDLE_SERVER_DEV=${lib.boolToString cfg.server.dev}"
91124 "SPINDLE_SERVER_OWNER=${cfg.server.owner}"
9292- "SPINDLE_PIPELINES_NIXERY=${cfg.pipelines.nixery}"
9393- "SPINDLE_PIPELINES_WORKFLOW_TIMEOUT=${cfg.pipelines.workflowTimeout}"
125125+ "SPINDLE_SERVER_MAX_JOB_COUNT=${toString cfg.server.maxJobCount}"
126126+ "SPINDLE_SERVER_QUEUE_SIZE=${toString cfg.server.queueSize}"
127127+ "SPINDLE_SERVER_SECRETS_PROVIDER=${cfg.server.secrets.provider}"
128128+ "SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=${cfg.server.secrets.openbao.proxyAddr}"
129129+ "SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=${cfg.server.secrets.openbao.mount}"
130130+ "SPINDLE_NIXERY_PIPELINES_NIXERY=${cfg.pipelines.nixery}"
131131+ "SPINDLE_NIXERY_PIPELINES_WORKFLOW_TIMEOUT=${cfg.pipelines.workflowTimeout}"
94132 ];
95133 ExecStart = "${cfg.package}/bin/spindle";
96134 Restart = "always";
+29
nix/pkgs/appview-static-files.nix
···11+{
22+ runCommandLocal,
33+ htmx-src,
44+ htmx-ws-src,
55+ lucide-src,
66+ inter-fonts-src,
77+ ibm-plex-mono-src,
88+ sqlite-lib,
99+ tailwindcss,
1010+ src,
1111+}:
1212+runCommandLocal "appview-static-files" {
1313+ # TOOD(winter): figure out why this is even required after
1414+ # changing the libraries that the tailwindcss binary loads
1515+ sandboxProfile = ''
1616+ (allow file-read* (subpath "/System/Library/OpenSSL"))
1717+ '';
1818+} ''
1919+ mkdir -p $out/{fonts,icons} && cd $out
2020+ cp -f ${htmx-src} htmx.min.js
2121+ cp -f ${htmx-ws-src} htmx-ext-ws.min.js
2222+ cp -rf ${lucide-src}/*.svg icons/
2323+ cp -f ${inter-fonts-src}/web/InterVariable*.woff2 fonts/
2424+ cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 fonts/
2525+ cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 fonts/
2626+ # tailwindcss -c $src/tailwind.config.js -i $src/input.css -o tw.css won't work
2727+ # for whatever reason (produces broken css), so we are doing this instead
2828+ cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/tw.css
2929+''
···11{
22 nixpkgs,
33+ system,
44+ hostSystem,
35 self,
44-}:
55-nixpkgs.lib.nixosSystem {
66- system = "x86_64-linux";
77- modules = [
88- self.nixosModules.knot
99- self.nixosModules.spindle
1010- ({
1111- config,
1212- pkgs,
1313- ...
1414- }: {
1515- virtualisation = {
1616- memorySize = 2048;
1717- diskSize = 10 * 1024;
1818- cores = 2;
1919- forwardPorts = [
2020- # ssh
2121- {
2222- from = "host";
2323- host.port = 2222;
2424- guest.port = 22;
2525- }
2626- # knot
2727- {
2828- from = "host";
2929- host.port = 6000;
3030- guest.port = 6000;
3131- }
3232- # spindle
3333- {
3434- from = "host";
3535- host.port = 6555;
3636- guest.port = 6555;
3737- }
3838- ];
3939- };
4040- services.getty.autologinUser = "root";
4141- environment.systemPackages = with pkgs; [curl vim git];
4242- systemd.tmpfiles.rules = let
4343- u = config.services.tangled-knot.gitUser;
4444- g = config.services.tangled-knot.gitUser;
4545- in [
4646- "d /var/lib/knot 0770 ${u} ${g} - -" # Create the directory first
4747- "f+ /var/lib/knot/secret 0660 ${u} ${g} - KNOT_SERVER_SECRET=168c426fa6d9829fcbe85c96bdf144e800fb9737d6ca87f21acc543b1aa3e440"
4848- ];
4949- services.tangled-knot = {
5050- enable = true;
5151- server = {
5252- secretFile = "/var/lib/knot/secret";
5353- hostname = "localhost:6000";
5454- listenAddr = "0.0.0.0:6000";
66+}: let
77+ envVar = name: let
88+ var = builtins.getEnv name;
99+ in
1010+ if var == ""
1111+ then throw "\$${name} must be defined, see docs/hacking.md for more details"
1212+ else var;
1313+in
1414+ nixpkgs.lib.nixosSystem {
1515+ inherit system;
1616+ modules = [
1717+ self.nixosModules.knot
1818+ self.nixosModules.spindle
1919+ ({
2020+ lib,
2121+ config,
2222+ pkgs,
2323+ ...
2424+ }: {
2525+ virtualisation.vmVariant.virtualisation = {
2626+ host.pkgs = import nixpkgs {system = hostSystem;};
2727+2828+ graphics = false;
2929+ memorySize = 2048;
3030+ diskSize = 10 * 1024;
3131+ cores = 2;
3232+ forwardPorts = [
3333+ # ssh
3434+ {
3535+ from = "host";
3636+ host.port = 2222;
3737+ guest.port = 22;
3838+ }
3939+ # knot
4040+ {
4141+ from = "host";
4242+ host.port = 6000;
4343+ guest.port = 6000;
4444+ }
4545+ # spindle
4646+ {
4747+ from = "host";
4848+ host.port = 6555;
4949+ guest.port = 6555;
5050+ }
5151+ ];
5252+ sharedDirectories = {
5353+ # We can't use the 9p mounts directly for most of these
5454+ # as SQLite is incompatible with them. So instead we
5555+ # mount the shared directories to a different location
5656+ # and copy the contents around on service start/stop.
5757+ knotData = {
5858+ source = "$TANGLED_VM_DATA_DIR/knot";
5959+ target = "/mnt/knot-data";
6060+ };
6161+ spindleData = {
6262+ source = "$TANGLED_VM_DATA_DIR/spindle";
6363+ target = "/mnt/spindle-data";
6464+ };
6565+ spindleLogs = {
6666+ source = "$TANGLED_VM_DATA_DIR/spindle-logs";
6767+ target = "/var/log/spindle";
6868+ };
6969+ };
5570 };
5656- };
5757- services.tangled-spindle = {
5858- enable = true;
5959- server = {
6060- owner = "did:plc:qfpnj4og54vl56wngdriaxug";
6161- hostname = "localhost:6555";
6262- listenAddr = "0.0.0.0:6555";
6363- dev = true;
7171+ # This is fine because any and all ports that are forwarded to host are explicitly marked above, we don't need a separate guest firewall
7272+ networking.firewall.enable = false;
7373+ time.timeZone = "Europe/London";
7474+ services.getty.autologinUser = "root";
7575+ environment.systemPackages = with pkgs; [curl vim git sqlite litecli];
7676+ services.tangled-knot = {
7777+ enable = true;
7878+ motd = "Welcome to the development knot!\n";
7979+ server = {
8080+ owner = envVar "TANGLED_VM_KNOT_OWNER";
8181+ hostname = "localhost:6000";
8282+ listenAddr = "0.0.0.0:6000";
8383+ };
8484+ };
8585+ services.tangled-spindle = {
8686+ enable = true;
8787+ server = {
8888+ owner = envVar "TANGLED_VM_SPINDLE_OWNER";
8989+ hostname = "localhost:6555";
9090+ listenAddr = "0.0.0.0:6555";
9191+ dev = true;
9292+ queueSize = 100;
9393+ maxJobCount = 2;
9494+ secrets = {
9595+ provider = "sqlite";
9696+ };
9797+ };
6498 };
6565- };
6666- })
6767- ];
6868-}
9999+ users = {
100100+ # So we don't have to deal with permission clashing between
101101+ # blank disk VMs and existing state
102102+ users.${config.services.tangled-knot.gitUser}.uid = 666;
103103+ groups.${config.services.tangled-knot.gitUser}.gid = 666;
104104+105105+ # TODO: separate spindle user
106106+ };
107107+ systemd.services = let
108108+ mkDataSyncScripts = source: target: {
109109+ enableStrictShellChecks = true;
110110+111111+ preStart = lib.mkBefore ''
112112+ mkdir -p ${target}
113113+ ${lib.getExe pkgs.rsync} -a ${source}/ ${target}
114114+ '';
115115+116116+ postStop = lib.mkAfter ''
117117+ ${lib.getExe pkgs.rsync} -a ${target}/ ${source}
118118+ '';
119119+120120+ serviceConfig.PermissionsStartOnly = true;
121121+ };
122122+ in {
123123+ knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled-knot.stateDir;
124124+ spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled-spindle.server.dbPath);
125125+ };
126126+ })
127127+ ];
128128+ }
+1-1
patchutil/combinediff.go
···119119 // we have f1 and f2, combine them
120120 combined, err := combineFiles(f1, f2)
121121 if err != nil {
122122- fmt.Println(err)
122122+ // fmt.Println(err)
123123 }
124124125125 // combined can be nil commit 2 reverted all changes from commit 1