···11-// Folder-specific settings
22-//
33-// For a full list of overridable settings, and general information on folder-specific settings,
44-// see the documentation: https://zed.dev/docs/configuring-zed#settings-files
55-{
66- "languages": {
77- "HTML": {
88- "prettier": {
99- "format_on_save": false,
1010- "allowed": true,
1111- "parser": "go-template",
1212- "plugins": ["prettier-plugin-go-template"]
1313- }
1414- }
1515- }
1616-}
+571-1332
api/tangled/cbor_gen.go
···1202120212031203 return nil
12041204}
12051205-func (t *GitRefUpdate_Meta) MarshalCBOR(w io.Writer) error {
12061206- if t == nil {
12071207- _, err := w.Write(cbg.CborNull)
12081208- return err
12091209- }
12101210-12111211- cw := cbg.NewCborWriter(w)
12121212- fieldCount := 3
12131213-12141214- if t.LangBreakdown == nil {
12151215- fieldCount--
12161216- }
12171217-12181218- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
12191219- return err
12201220- }
12211221-12221222- // t.CommitCount (tangled.GitRefUpdate_Meta_CommitCount) (struct)
12231223- if len("commitCount") > 1000000 {
12241224- return xerrors.Errorf("Value in field \"commitCount\" was too long")
12251225- }
12261226-12271227- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commitCount"))); err != nil {
12281228- return err
12291229- }
12301230- if _, err := cw.WriteString(string("commitCount")); err != nil {
12311231- return err
12321232- }
12331233-12341234- if err := t.CommitCount.MarshalCBOR(cw); err != nil {
12351235- return err
12361236- }
12371237-12381238- // t.IsDefaultRef (bool) (bool)
12391239- if len("isDefaultRef") > 1000000 {
12401240- return xerrors.Errorf("Value in field \"isDefaultRef\" was too long")
12411241- }
12421242-12431243- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("isDefaultRef"))); err != nil {
12441244- return err
12451245- }
12461246- if _, err := cw.WriteString(string("isDefaultRef")); err != nil {
12471247- return err
12481248- }
12491249-12501250- if err := cbg.WriteBool(w, t.IsDefaultRef); err != nil {
12511251- return err
12521252- }
12531253-12541254- // t.LangBreakdown (tangled.GitRefUpdate_Meta_LangBreakdown) (struct)
12551255- if t.LangBreakdown != nil {
12561256-12571257- if len("langBreakdown") > 1000000 {
12581258- return xerrors.Errorf("Value in field \"langBreakdown\" was too long")
12591259- }
12601260-12611261- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("langBreakdown"))); err != nil {
12621262- return err
12631263- }
12641264- if _, err := cw.WriteString(string("langBreakdown")); err != nil {
12651265- return err
12661266- }
12671267-12681268- if err := t.LangBreakdown.MarshalCBOR(cw); err != nil {
12691269- return err
12701270- }
12711271- }
12721272- return nil
12731273-}
12741274-12751275-func (t *GitRefUpdate_Meta) UnmarshalCBOR(r io.Reader) (err error) {
12761276- *t = GitRefUpdate_Meta{}
12771277-12781278- cr := cbg.NewCborReader(r)
12791279-12801280- maj, extra, err := cr.ReadHeader()
12811281- if err != nil {
12821282- return err
12831283- }
12841284- defer func() {
12851285- if err == io.EOF {
12861286- err = io.ErrUnexpectedEOF
12871287- }
12881288- }()
12891289-12901290- if maj != cbg.MajMap {
12911291- return fmt.Errorf("cbor input should be of type map")
12921292- }
12931293-12941294- if extra > cbg.MaxLength {
12951295- return fmt.Errorf("GitRefUpdate_Meta: map struct too large (%d)", extra)
12961296- }
12971297-12981298- n := extra
12991299-13001300- nameBuf := make([]byte, 13)
13011301- for i := uint64(0); i < n; i++ {
13021302- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
13031303- if err != nil {
13041304- return err
13051305- }
13061306-13071307- if !ok {
13081308- // Field doesn't exist on this type, so ignore it
13091309- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
13101310- return err
13111311- }
13121312- continue
13131313- }
13141314-13151315- switch string(nameBuf[:nameLen]) {
13161316- // t.CommitCount (tangled.GitRefUpdate_Meta_CommitCount) (struct)
13171317- case "commitCount":
13181318-13191319- {
13201320-13211321- b, err := cr.ReadByte()
13221322- if err != nil {
13231323- return err
13241324- }
13251325- if b != cbg.CborNull[0] {
13261326- if err := cr.UnreadByte(); err != nil {
13271327- return err
13281328- }
13291329- t.CommitCount = new(GitRefUpdate_Meta_CommitCount)
13301330- if err := t.CommitCount.UnmarshalCBOR(cr); err != nil {
13311331- return xerrors.Errorf("unmarshaling t.CommitCount pointer: %w", err)
13321332- }
13331333- }
13341334-13351335- }
13361336- // t.IsDefaultRef (bool) (bool)
13371337- case "isDefaultRef":
13381338-13391339- maj, extra, err = cr.ReadHeader()
13401340- if err != nil {
13411341- return err
13421342- }
13431343- if maj != cbg.MajOther {
13441344- return fmt.Errorf("booleans must be major type 7")
13451345- }
13461346- switch extra {
13471347- case 20:
13481348- t.IsDefaultRef = false
13491349- case 21:
13501350- t.IsDefaultRef = true
13511351- default:
13521352- return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
13531353- }
13541354- // t.LangBreakdown (tangled.GitRefUpdate_Meta_LangBreakdown) (struct)
13551355- case "langBreakdown":
13561356-13571357- {
13581358-13591359- b, err := cr.ReadByte()
13601360- if err != nil {
13611361- return err
13621362- }
13631363- if b != cbg.CborNull[0] {
13641364- if err := cr.UnreadByte(); err != nil {
13651365- return err
13661366- }
13671367- t.LangBreakdown = new(GitRefUpdate_Meta_LangBreakdown)
13681368- if err := t.LangBreakdown.UnmarshalCBOR(cr); err != nil {
13691369- return xerrors.Errorf("unmarshaling t.LangBreakdown pointer: %w", err)
13701370- }
13711371- }
13721372-13731373- }
13741374-13751375- default:
13761376- // Field doesn't exist on this type, so ignore it
13771377- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
13781378- return err
13791379- }
13801380- }
13811381- }
13821382-13831383- return nil
13841384-}
13851385-func (t *GitRefUpdate_Meta_CommitCount) MarshalCBOR(w io.Writer) error {
12051205+func (t *GitRefUpdate_CommitCountBreakdown) MarshalCBOR(w io.Writer) error {
13861206 if t == nil {
13871207 _, err := w.Write(cbg.CborNull)
13881208 return err
···13991219 return err
14001220 }
1401122114021402- // t.ByEmail ([]*tangled.GitRefUpdate_Meta_CommitCount_ByEmail_Elem) (slice)
12221222+ // t.ByEmail ([]*tangled.GitRefUpdate_IndividualEmailCommitCount) (slice)
14031223 if t.ByEmail != nil {
1404122414051225 if len("byEmail") > 1000000 {
···14301250 return nil
14311251}
1432125214331433-func (t *GitRefUpdate_Meta_CommitCount) UnmarshalCBOR(r io.Reader) (err error) {
14341434- *t = GitRefUpdate_Meta_CommitCount{}
12531253+func (t *GitRefUpdate_CommitCountBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
12541254+ *t = GitRefUpdate_CommitCountBreakdown{}
1435125514361256 cr := cbg.NewCborReader(r)
14371257···14501270 }
1451127114521272 if extra > cbg.MaxLength {
14531453- return fmt.Errorf("GitRefUpdate_Meta_CommitCount: map struct too large (%d)", extra)
12731273+ return fmt.Errorf("GitRefUpdate_CommitCountBreakdown: map struct too large (%d)", extra)
14541274 }
1455127514561276 n := extra
···14711291 }
1472129214731293 switch string(nameBuf[:nameLen]) {
14741474- // t.ByEmail ([]*tangled.GitRefUpdate_Meta_CommitCount_ByEmail_Elem) (slice)
12941294+ // t.ByEmail ([]*tangled.GitRefUpdate_IndividualEmailCommitCount) (slice)
14751295 case "byEmail":
1476129614771297 maj, extra, err = cr.ReadHeader()
···14881308 }
1489130914901310 if extra > 0 {
14911491- t.ByEmail = make([]*GitRefUpdate_Meta_CommitCount_ByEmail_Elem, extra)
13111311+ t.ByEmail = make([]*GitRefUpdate_IndividualEmailCommitCount, extra)
14921312 }
1493131314941314 for i := 0; i < int(extra); i++ {
···15101330 if err := cr.UnreadByte(); err != nil {
15111331 return err
15121332 }
15131513- t.ByEmail[i] = new(GitRefUpdate_Meta_CommitCount_ByEmail_Elem)
13331333+ t.ByEmail[i] = new(GitRefUpdate_IndividualEmailCommitCount)
15141334 if err := t.ByEmail[i].UnmarshalCBOR(cr); err != nil {
15151335 return xerrors.Errorf("unmarshaling t.ByEmail[i] pointer: %w", err)
15161336 }
···1531135115321352 return nil
15331353}
15341534-func (t *GitRefUpdate_Meta_CommitCount_ByEmail_Elem) MarshalCBOR(w io.Writer) error {
13541354+func (t *GitRefUpdate_IndividualEmailCommitCount) MarshalCBOR(w io.Writer) error {
15351355 if t == nil {
15361356 _, err := w.Write(cbg.CborNull)
15371357 return err
···15901410 return nil
15911411}
1592141215931593-func (t *GitRefUpdate_Meta_CommitCount_ByEmail_Elem) UnmarshalCBOR(r io.Reader) (err error) {
15941594- *t = GitRefUpdate_Meta_CommitCount_ByEmail_Elem{}
14131413+func (t *GitRefUpdate_IndividualEmailCommitCount) UnmarshalCBOR(r io.Reader) (err error) {
14141414+ *t = GitRefUpdate_IndividualEmailCommitCount{}
1595141515961416 cr := cbg.NewCborReader(r)
15971417···16101430 }
1611143116121432 if extra > cbg.MaxLength {
16131613- return fmt.Errorf("GitRefUpdate_Meta_CommitCount_ByEmail_Elem: map struct too large (%d)", extra)
14331433+ return fmt.Errorf("GitRefUpdate_IndividualEmailCommitCount: map struct too large (%d)", extra)
16141434 }
1615143516161436 n := extra
···1679149916801500 return nil
16811501}
16821682-func (t *GitRefUpdate_Meta_LangBreakdown) MarshalCBOR(w io.Writer) error {
15021502+func (t *GitRefUpdate_LangBreakdown) MarshalCBOR(w io.Writer) error {
16831503 if t == nil {
16841504 _, err := w.Write(cbg.CborNull)
16851505 return err
···16961516 return err
16971517 }
1698151816991699- // t.Inputs ([]*tangled.GitRefUpdate_Pair) (slice)
15191519+ // t.Inputs ([]*tangled.GitRefUpdate_IndividualLanguageSize) (slice)
17001520 if t.Inputs != nil {
1701152117021522 if len("inputs") > 1000000 {
···17271547 return nil
17281548}
1729154917301730-func (t *GitRefUpdate_Meta_LangBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
17311731- *t = GitRefUpdate_Meta_LangBreakdown{}
15501550+func (t *GitRefUpdate_LangBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
15511551+ *t = GitRefUpdate_LangBreakdown{}
1732155217331553 cr := cbg.NewCborReader(r)
17341554···17471567 }
1748156817491569 if extra > cbg.MaxLength {
17501750- return fmt.Errorf("GitRefUpdate_Meta_LangBreakdown: map struct too large (%d)", extra)
15701570+ return fmt.Errorf("GitRefUpdate_LangBreakdown: map struct too large (%d)", extra)
17511571 }
1752157217531573 n := extra
···17681588 }
1769158917701590 switch string(nameBuf[:nameLen]) {
17711771- // t.Inputs ([]*tangled.GitRefUpdate_Pair) (slice)
15911591+ // t.Inputs ([]*tangled.GitRefUpdate_IndividualLanguageSize) (slice)
17721592 case "inputs":
1773159317741594 maj, extra, err = cr.ReadHeader()
···17851605 }
1786160617871607 if extra > 0 {
17881788- t.Inputs = make([]*GitRefUpdate_Pair, extra)
16081608+ t.Inputs = make([]*GitRefUpdate_IndividualLanguageSize, extra)
17891609 }
1790161017911611 for i := 0; i < int(extra); i++ {
···18071627 if err := cr.UnreadByte(); err != nil {
18081628 return err
18091629 }
18101810- t.Inputs[i] = new(GitRefUpdate_Pair)
16301630+ t.Inputs[i] = new(GitRefUpdate_IndividualLanguageSize)
18111631 if err := t.Inputs[i].UnmarshalCBOR(cr); err != nil {
18121632 return xerrors.Errorf("unmarshaling t.Inputs[i] pointer: %w", err)
18131633 }
···1828164818291649 return nil
18301650}
18311831-func (t *GitRefUpdate_Pair) MarshalCBOR(w io.Writer) error {
16511651+func (t *GitRefUpdate_IndividualLanguageSize) MarshalCBOR(w io.Writer) error {
18321652 if t == nil {
18331653 _, err := w.Write(cbg.CborNull)
18341654 return err
···18881708 return nil
18891709}
1890171018911891-func (t *GitRefUpdate_Pair) UnmarshalCBOR(r io.Reader) (err error) {
18921892- *t = GitRefUpdate_Pair{}
17111711+func (t *GitRefUpdate_IndividualLanguageSize) UnmarshalCBOR(r io.Reader) (err error) {
17121712+ *t = GitRefUpdate_IndividualLanguageSize{}
1893171318941714 cr := cbg.NewCborReader(r)
18951715···19081728 }
1909172919101730 if extra > cbg.MaxLength {
19111911- return fmt.Errorf("GitRefUpdate_Pair: map struct too large (%d)", extra)
17311731+ return fmt.Errorf("GitRefUpdate_IndividualLanguageSize: map struct too large (%d)", extra)
19121732 }
1913173319141734 n := extra
···1977179719781798 return nil
19791799}
18001800+func (t *GitRefUpdate_Meta) MarshalCBOR(w io.Writer) error {
18011801+ if t == nil {
18021802+ _, err := w.Write(cbg.CborNull)
18031803+ return err
18041804+ }
18051805+18061806+ cw := cbg.NewCborWriter(w)
18071807+ fieldCount := 3
18081808+18091809+ if t.LangBreakdown == nil {
18101810+ fieldCount--
18111811+ }
18121812+18131813+ if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
18141814+ return err
18151815+ }
18161816+18171817+ // t.CommitCount (tangled.GitRefUpdate_CommitCountBreakdown) (struct)
18181818+ if len("commitCount") > 1000000 {
18191819+ return xerrors.Errorf("Value in field \"commitCount\" was too long")
18201820+ }
18211821+18221822+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commitCount"))); err != nil {
18231823+ return err
18241824+ }
18251825+ if _, err := cw.WriteString(string("commitCount")); err != nil {
18261826+ return err
18271827+ }
18281828+18291829+ if err := t.CommitCount.MarshalCBOR(cw); err != nil {
18301830+ return err
18311831+ }
18321832+18331833+ // t.IsDefaultRef (bool) (bool)
18341834+ if len("isDefaultRef") > 1000000 {
18351835+ return xerrors.Errorf("Value in field \"isDefaultRef\" was too long")
18361836+ }
18371837+18381838+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("isDefaultRef"))); err != nil {
18391839+ return err
18401840+ }
18411841+ if _, err := cw.WriteString(string("isDefaultRef")); err != nil {
18421842+ return err
18431843+ }
18441844+18451845+ if err := cbg.WriteBool(w, t.IsDefaultRef); err != nil {
18461846+ return err
18471847+ }
18481848+18491849+ // t.LangBreakdown (tangled.GitRefUpdate_LangBreakdown) (struct)
18501850+ if t.LangBreakdown != nil {
18511851+18521852+ if len("langBreakdown") > 1000000 {
18531853+ return xerrors.Errorf("Value in field \"langBreakdown\" was too long")
18541854+ }
18551855+18561856+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("langBreakdown"))); err != nil {
18571857+ return err
18581858+ }
18591859+ if _, err := cw.WriteString(string("langBreakdown")); err != nil {
18601860+ return err
18611861+ }
18621862+18631863+ if err := t.LangBreakdown.MarshalCBOR(cw); err != nil {
18641864+ return err
18651865+ }
18661866+ }
18671867+ return nil
18681868+}
18691869+18701870+func (t *GitRefUpdate_Meta) UnmarshalCBOR(r io.Reader) (err error) {
18711871+ *t = GitRefUpdate_Meta{}
18721872+18731873+ cr := cbg.NewCborReader(r)
18741874+18751875+ maj, extra, err := cr.ReadHeader()
18761876+ if err != nil {
18771877+ return err
18781878+ }
18791879+ defer func() {
18801880+ if err == io.EOF {
18811881+ err = io.ErrUnexpectedEOF
18821882+ }
18831883+ }()
18841884+18851885+ if maj != cbg.MajMap {
18861886+ return fmt.Errorf("cbor input should be of type map")
18871887+ }
18881888+18891889+ if extra > cbg.MaxLength {
18901890+ return fmt.Errorf("GitRefUpdate_Meta: map struct too large (%d)", extra)
18911891+ }
18921892+18931893+ n := extra
18941894+18951895+ nameBuf := make([]byte, 13)
18961896+ for i := uint64(0); i < n; i++ {
18971897+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
18981898+ if err != nil {
18991899+ return err
19001900+ }
19011901+19021902+ if !ok {
19031903+ // Field doesn't exist on this type, so ignore it
19041904+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
19051905+ return err
19061906+ }
19071907+ continue
19081908+ }
19091909+19101910+ switch string(nameBuf[:nameLen]) {
19111911+ // t.CommitCount (tangled.GitRefUpdate_CommitCountBreakdown) (struct)
19121912+ case "commitCount":
19131913+19141914+ {
19151915+19161916+ b, err := cr.ReadByte()
19171917+ if err != nil {
19181918+ return err
19191919+ }
19201920+ if b != cbg.CborNull[0] {
19211921+ if err := cr.UnreadByte(); err != nil {
19221922+ return err
19231923+ }
19241924+ t.CommitCount = new(GitRefUpdate_CommitCountBreakdown)
19251925+ if err := t.CommitCount.UnmarshalCBOR(cr); err != nil {
19261926+ return xerrors.Errorf("unmarshaling t.CommitCount pointer: %w", err)
19271927+ }
19281928+ }
19291929+19301930+ }
19311931+ // t.IsDefaultRef (bool) (bool)
19321932+ case "isDefaultRef":
19331933+19341934+ maj, extra, err = cr.ReadHeader()
19351935+ if err != nil {
19361936+ return err
19371937+ }
19381938+ if maj != cbg.MajOther {
19391939+ return fmt.Errorf("booleans must be major type 7")
19401940+ }
19411941+ switch extra {
19421942+ case 20:
19431943+ t.IsDefaultRef = false
19441944+ case 21:
19451945+ t.IsDefaultRef = true
19461946+ default:
19471947+ return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
19481948+ }
19491949+ // t.LangBreakdown (tangled.GitRefUpdate_LangBreakdown) (struct)
19501950+ case "langBreakdown":
19511951+19521952+ {
19531953+19541954+ b, err := cr.ReadByte()
19551955+ if err != nil {
19561956+ return err
19571957+ }
19581958+ if b != cbg.CborNull[0] {
19591959+ if err := cr.UnreadByte(); err != nil {
19601960+ return err
19611961+ }
19621962+ t.LangBreakdown = new(GitRefUpdate_LangBreakdown)
19631963+ if err := t.LangBreakdown.UnmarshalCBOR(cr); err != nil {
19641964+ return xerrors.Errorf("unmarshaling t.LangBreakdown pointer: %w", err)
19651965+ }
19661966+ }
19671967+19681968+ }
19691969+19701970+ default:
19711971+ // Field doesn't exist on this type, so ignore it
19721972+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
19731973+ return err
19741974+ }
19751975+ }
19761976+ }
19771977+19781978+ return nil
19791979+}
19801980func (t *GraphFollow) MarshalCBOR(w io.Writer) error {
19811981 if t == nil {
19821982 _, err := w.Write(cbg.CborNull)
···2141214121422142 return nil
21432143}
21442144+func (t *Knot) MarshalCBOR(w io.Writer) error {
21452145+ if t == nil {
21462146+ _, err := w.Write(cbg.CborNull)
21472147+ return err
21482148+ }
21492149+21502150+ cw := cbg.NewCborWriter(w)
21512151+21522152+ if _, err := cw.Write([]byte{162}); err != nil {
21532153+ return err
21542154+ }
21552155+21562156+ // t.LexiconTypeID (string) (string)
21572157+ if len("$type") > 1000000 {
21582158+ return xerrors.Errorf("Value in field \"$type\" was too long")
21592159+ }
21602160+21612161+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
21622162+ return err
21632163+ }
21642164+ if _, err := cw.WriteString(string("$type")); err != nil {
21652165+ return err
21662166+ }
21672167+21682168+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.knot"))); err != nil {
21692169+ return err
21702170+ }
21712171+ if _, err := cw.WriteString(string("sh.tangled.knot")); err != nil {
21722172+ return err
21732173+ }
21742174+21752175+ // t.CreatedAt (string) (string)
21762176+ if len("createdAt") > 1000000 {
21772177+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
21782178+ }
21792179+21802180+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
21812181+ return err
21822182+ }
21832183+ if _, err := cw.WriteString(string("createdAt")); err != nil {
21842184+ return err
21852185+ }
21862186+21872187+ if len(t.CreatedAt) > 1000000 {
21882188+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
21892189+ }
21902190+21912191+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
21922192+ return err
21932193+ }
21942194+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
21952195+ return err
21962196+ }
21972197+ return nil
21982198+}
21992199+22002200+func (t *Knot) UnmarshalCBOR(r io.Reader) (err error) {
22012201+ *t = Knot{}
22022202+22032203+ cr := cbg.NewCborReader(r)
22042204+22052205+ maj, extra, err := cr.ReadHeader()
22062206+ if err != nil {
22072207+ return err
22082208+ }
22092209+ defer func() {
22102210+ if err == io.EOF {
22112211+ err = io.ErrUnexpectedEOF
22122212+ }
22132213+ }()
22142214+22152215+ if maj != cbg.MajMap {
22162216+ return fmt.Errorf("cbor input should be of type map")
22172217+ }
22182218+22192219+ if extra > cbg.MaxLength {
22202220+ return fmt.Errorf("Knot: map struct too large (%d)", extra)
22212221+ }
22222222+22232223+ n := extra
22242224+22252225+ nameBuf := make([]byte, 9)
22262226+ for i := uint64(0); i < n; i++ {
22272227+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
22282228+ if err != nil {
22292229+ return err
22302230+ }
22312231+22322232+ if !ok {
22332233+ // Field doesn't exist on this type, so ignore it
22342234+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
22352235+ return err
22362236+ }
22372237+ continue
22382238+ }
22392239+22402240+ switch string(nameBuf[:nameLen]) {
22412241+ // t.LexiconTypeID (string) (string)
22422242+ case "$type":
22432243+22442244+ {
22452245+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
22462246+ if err != nil {
22472247+ return err
22482248+ }
22492249+22502250+ t.LexiconTypeID = string(sval)
22512251+ }
22522252+ // t.CreatedAt (string) (string)
22532253+ case "createdAt":
22542254+22552255+ {
22562256+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
22572257+ if err != nil {
22582258+ return err
22592259+ }
22602260+22612261+ t.CreatedAt = string(sval)
22622262+ }
22632263+22642264+ default:
22652265+ // Field doesn't exist on this type, so ignore it
22662266+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
22672267+ return err
22682268+ }
22692269+ }
22702270+ }
22712271+22722272+ return nil
22732273+}
21442274func (t *KnotMember) MarshalCBOR(w io.Writer) error {
21452275 if t == nil {
21462276 _, err := w.Write(cbg.CborNull)
···27162846 t.Submodules = true
27172847 default:
27182848 return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
27192719- }
27202720-27212721- default:
27222722- // Field doesn't exist on this type, so ignore it
27232723- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
27242724- return err
27252725- }
27262726- }
27272727- }
27282728-27292729- return nil
27302730-}
27312731-func (t *Pipeline_Dependency) MarshalCBOR(w io.Writer) error {
27322732- if t == nil {
27332733- _, err := w.Write(cbg.CborNull)
27342734- return err
27352735- }
27362736-27372737- cw := cbg.NewCborWriter(w)
27382738-27392739- if _, err := cw.Write([]byte{162}); err != nil {
27402740- return err
27412741- }
27422742-27432743- // t.Packages ([]string) (slice)
27442744- if len("packages") > 1000000 {
27452745- return xerrors.Errorf("Value in field \"packages\" was too long")
27462746- }
27472747-27482748- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("packages"))); err != nil {
27492749- return err
27502750- }
27512751- if _, err := cw.WriteString(string("packages")); err != nil {
27522752- return err
27532753- }
27542754-27552755- if len(t.Packages) > 8192 {
27562756- return xerrors.Errorf("Slice value in field t.Packages was too long")
27572757- }
27582758-27592759- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Packages))); err != nil {
27602760- return err
27612761- }
27622762- for _, v := range t.Packages {
27632763- if len(v) > 1000000 {
27642764- return xerrors.Errorf("Value in field v was too long")
27652765- }
27662766-27672767- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
27682768- return err
27692769- }
27702770- if _, err := cw.WriteString(string(v)); err != nil {
27712771- return err
27722772- }
27732773-27742774- }
27752775-27762776- // t.Registry (string) (string)
27772777- if len("registry") > 1000000 {
27782778- return xerrors.Errorf("Value in field \"registry\" was too long")
27792779- }
27802780-27812781- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("registry"))); err != nil {
27822782- return err
27832783- }
27842784- if _, err := cw.WriteString(string("registry")); err != nil {
27852785- return err
27862786- }
27872787-27882788- if len(t.Registry) > 1000000 {
27892789- return xerrors.Errorf("Value in field t.Registry was too long")
27902790- }
27912791-27922792- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Registry))); err != nil {
27932793- return err
27942794- }
27952795- if _, err := cw.WriteString(string(t.Registry)); err != nil {
27962796- return err
27972797- }
27982798- return nil
27992799-}
28002800-28012801-func (t *Pipeline_Dependency) UnmarshalCBOR(r io.Reader) (err error) {
28022802- *t = Pipeline_Dependency{}
28032803-28042804- cr := cbg.NewCborReader(r)
28052805-28062806- maj, extra, err := cr.ReadHeader()
28072807- if err != nil {
28082808- return err
28092809- }
28102810- defer func() {
28112811- if err == io.EOF {
28122812- err = io.ErrUnexpectedEOF
28132813- }
28142814- }()
28152815-28162816- if maj != cbg.MajMap {
28172817- return fmt.Errorf("cbor input should be of type map")
28182818- }
28192819-28202820- if extra > cbg.MaxLength {
28212821- return fmt.Errorf("Pipeline_Dependency: map struct too large (%d)", extra)
28222822- }
28232823-28242824- n := extra
28252825-28262826- nameBuf := make([]byte, 8)
28272827- for i := uint64(0); i < n; i++ {
28282828- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
28292829- if err != nil {
28302830- return err
28312831- }
28322832-28332833- if !ok {
28342834- // Field doesn't exist on this type, so ignore it
28352835- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
28362836- return err
28372837- }
28382838- continue
28392839- }
28402840-28412841- switch string(nameBuf[:nameLen]) {
28422842- // t.Packages ([]string) (slice)
28432843- case "packages":
28442844-28452845- maj, extra, err = cr.ReadHeader()
28462846- if err != nil {
28472847- return err
28482848- }
28492849-28502850- if extra > 8192 {
28512851- return fmt.Errorf("t.Packages: array too large (%d)", extra)
28522852- }
28532853-28542854- if maj != cbg.MajArray {
28552855- return fmt.Errorf("expected cbor array")
28562856- }
28572857-28582858- if extra > 0 {
28592859- t.Packages = make([]string, extra)
28602860- }
28612861-28622862- for i := 0; i < int(extra); i++ {
28632863- {
28642864- var maj byte
28652865- var extra uint64
28662866- var err error
28672867- _ = maj
28682868- _ = extra
28692869- _ = err
28702870-28712871- {
28722872- sval, err := cbg.ReadStringWithMax(cr, 1000000)
28732873- if err != nil {
28742874- return err
28752875- }
28762876-28772877- t.Packages[i] = string(sval)
28782878- }
28792879-28802880- }
28812881- }
28822882- // t.Registry (string) (string)
28832883- case "registry":
28842884-28852885- {
28862886- sval, err := cbg.ReadStringWithMax(cr, 1000000)
28872887- if err != nil {
28882888- return err
28892889- }
28902890-28912891- t.Registry = string(sval)
28922849 }
2893285028942851 default:
···3916387339173874 return nil
39183875}
39193919-func (t *Pipeline_Step) MarshalCBOR(w io.Writer) error {
39203920- if t == nil {
39213921- _, err := w.Write(cbg.CborNull)
39223922- return err
39233923- }
39243924-39253925- cw := cbg.NewCborWriter(w)
39263926- fieldCount := 3
39273927-39283928- if t.Environment == nil {
39293929- fieldCount--
39303930- }
39313931-39323932- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
39333933- return err
39343934- }
39353935-39363936- // t.Name (string) (string)
39373937- if len("name") > 1000000 {
39383938- return xerrors.Errorf("Value in field \"name\" was too long")
39393939- }
39403940-39413941- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("name"))); err != nil {
39423942- return err
39433943- }
39443944- if _, err := cw.WriteString(string("name")); err != nil {
39453945- return err
39463946- }
39473947-39483948- if len(t.Name) > 1000000 {
39493949- return xerrors.Errorf("Value in field t.Name was too long")
39503950- }
39513951-39523952- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil {
39533953- return err
39543954- }
39553955- if _, err := cw.WriteString(string(t.Name)); err != nil {
39563956- return err
39573957- }
39583958-39593959- // t.Command (string) (string)
39603960- if len("command") > 1000000 {
39613961- return xerrors.Errorf("Value in field \"command\" was too long")
39623962- }
39633963-39643964- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("command"))); err != nil {
39653965- return err
39663966- }
39673967- if _, err := cw.WriteString(string("command")); err != nil {
39683968- return err
39693969- }
39703970-39713971- if len(t.Command) > 1000000 {
39723972- return xerrors.Errorf("Value in field t.Command was too long")
39733973- }
39743974-39753975- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Command))); err != nil {
39763976- return err
39773977- }
39783978- if _, err := cw.WriteString(string(t.Command)); err != nil {
39793979- return err
39803980- }
39813981-39823982- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
39833983- if t.Environment != nil {
39843984-39853985- if len("environment") > 1000000 {
39863986- return xerrors.Errorf("Value in field \"environment\" was too long")
39873987- }
39883988-39893989- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
39903990- return err
39913991- }
39923992- if _, err := cw.WriteString(string("environment")); err != nil {
39933993- return err
39943994- }
39953995-39963996- if len(t.Environment) > 8192 {
39973997- return xerrors.Errorf("Slice value in field t.Environment was too long")
39983998- }
39993999-40004000- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
40014001- return err
40024002- }
40034003- for _, v := range t.Environment {
40044004- if err := v.MarshalCBOR(cw); err != nil {
40054005- return err
40064006- }
40074007-40084008- }
40094009- }
40104010- return nil
40114011-}
40124012-40134013-func (t *Pipeline_Step) UnmarshalCBOR(r io.Reader) (err error) {
40144014- *t = Pipeline_Step{}
40154015-40164016- cr := cbg.NewCborReader(r)
40174017-40184018- maj, extra, err := cr.ReadHeader()
40194019- if err != nil {
40204020- return err
40214021- }
40224022- defer func() {
40234023- if err == io.EOF {
40244024- err = io.ErrUnexpectedEOF
40254025- }
40264026- }()
40274027-40284028- if maj != cbg.MajMap {
40294029- return fmt.Errorf("cbor input should be of type map")
40304030- }
40314031-40324032- if extra > cbg.MaxLength {
40334033- return fmt.Errorf("Pipeline_Step: map struct too large (%d)", extra)
40344034- }
40354035-40364036- n := extra
40374037-40384038- nameBuf := make([]byte, 11)
40394039- for i := uint64(0); i < n; i++ {
40404040- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
40414041- if err != nil {
40424042- return err
40434043- }
40444044-40454045- if !ok {
40464046- // Field doesn't exist on this type, so ignore it
40474047- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
40484048- return err
40494049- }
40504050- continue
40514051- }
40524052-40534053- switch string(nameBuf[:nameLen]) {
40544054- // t.Name (string) (string)
40554055- case "name":
40564056-40574057- {
40584058- sval, err := cbg.ReadStringWithMax(cr, 1000000)
40594059- if err != nil {
40604060- return err
40614061- }
40624062-40634063- t.Name = string(sval)
40644064- }
40654065- // t.Command (string) (string)
40664066- case "command":
40674067-40684068- {
40694069- sval, err := cbg.ReadStringWithMax(cr, 1000000)
40704070- if err != nil {
40714071- return err
40724072- }
40734073-40744074- t.Command = string(sval)
40754075- }
40764076- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
40774077- case "environment":
40784078-40794079- maj, extra, err = cr.ReadHeader()
40804080- if err != nil {
40814081- return err
40824082- }
40834083-40844084- if extra > 8192 {
40854085- return fmt.Errorf("t.Environment: array too large (%d)", extra)
40864086- }
40874087-40884088- if maj != cbg.MajArray {
40894089- return fmt.Errorf("expected cbor array")
40904090- }
40914091-40924092- if extra > 0 {
40934093- t.Environment = make([]*Pipeline_Pair, extra)
40944094- }
40954095-40964096- for i := 0; i < int(extra); i++ {
40974097- {
40984098- var maj byte
40994099- var extra uint64
41004100- var err error
41014101- _ = maj
41024102- _ = extra
41034103- _ = err
41044104-41054105- {
41064106-41074107- b, err := cr.ReadByte()
41084108- if err != nil {
41094109- return err
41104110- }
41114111- if b != cbg.CborNull[0] {
41124112- if err := cr.UnreadByte(); err != nil {
41134113- return err
41144114- }
41154115- t.Environment[i] = new(Pipeline_Pair)
41164116- if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
41174117- return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
41184118- }
41194119- }
41204120-41214121- }
41224122-41234123- }
41244124- }
41254125-41264126- default:
41274127- // Field doesn't exist on this type, so ignore it
41284128- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
41294129- return err
41304130- }
41314131- }
41324132- }
41334133-41344134- return nil
41354135-}
41363876func (t *Pipeline_TriggerMetadata) MarshalCBOR(w io.Writer) error {
41373877 if t == nil {
41383878 _, err := w.Write(cbg.CborNull)
···4609434946104350 cw := cbg.NewCborWriter(w)
4611435146124612- if _, err := cw.Write([]byte{165}); err != nil {
43524352+ if _, err := cw.Write([]byte{164}); err != nil {
43534353+ return err
43544354+ }
43554355+43564356+ // t.Raw (string) (string)
43574357+ if len("raw") > 1000000 {
43584358+ return xerrors.Errorf("Value in field \"raw\" was too long")
43594359+ }
43604360+43614361+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("raw"))); err != nil {
43624362+ return err
43634363+ }
43644364+ if _, err := cw.WriteString(string("raw")); err != nil {
43654365+ return err
43664366+ }
43674367+43684368+ if len(t.Raw) > 1000000 {
43694369+ return xerrors.Errorf("Value in field t.Raw was too long")
43704370+ }
43714371+43724372+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Raw))); err != nil {
43734373+ return err
43744374+ }
43754375+ if _, err := cw.WriteString(string(t.Raw)); err != nil {
46134376 return err
46144377 }
46154378···46524415 return err
46534416 }
4654441746554655- // t.Steps ([]*tangled.Pipeline_Step) (slice)
46564656- if len("steps") > 1000000 {
46574657- return xerrors.Errorf("Value in field \"steps\" was too long")
46584658- }
46594659-46604660- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("steps"))); err != nil {
46614661- return err
46624662- }
46634663- if _, err := cw.WriteString(string("steps")); err != nil {
46644664- return err
46654665- }
46664666-46674667- if len(t.Steps) > 8192 {
46684668- return xerrors.Errorf("Slice value in field t.Steps was too long")
46694669- }
46704670-46714671- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Steps))); err != nil {
46724672- return err
46734673- }
46744674- for _, v := range t.Steps {
46754675- if err := v.MarshalCBOR(cw); err != nil {
46764676- return err
46774677- }
46784678-46794679- }
46804680-46814681- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
46824682- if len("environment") > 1000000 {
46834683- return xerrors.Errorf("Value in field \"environment\" was too long")
44184418+ // t.Engine (string) (string)
44194419+ if len("engine") > 1000000 {
44204420+ return xerrors.Errorf("Value in field \"engine\" was too long")
46844421 }
4685442246864686- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
44234423+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("engine"))); err != nil {
46874424 return err
46884425 }
46894689- if _, err := cw.WriteString(string("environment")); err != nil {
44264426+ if _, err := cw.WriteString(string("engine")); err != nil {
46904427 return err
46914428 }
4692442946934693- if len(t.Environment) > 8192 {
46944694- return xerrors.Errorf("Slice value in field t.Environment was too long")
46954695- }
46964696-46974697- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
46984698- return err
46994699- }
47004700- for _, v := range t.Environment {
47014701- if err := v.MarshalCBOR(cw); err != nil {
47024702- return err
47034703- }
47044704-44304430+ if len(t.Engine) > 1000000 {
44314431+ return xerrors.Errorf("Value in field t.Engine was too long")
47054432 }
4706443347074707- // t.Dependencies ([]*tangled.Pipeline_Dependency) (slice)
47084708- if len("dependencies") > 1000000 {
47094709- return xerrors.Errorf("Value in field \"dependencies\" was too long")
47104710- }
47114711-47124712- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("dependencies"))); err != nil {
47134713- return err
47144714- }
47154715- if _, err := cw.WriteString(string("dependencies")); err != nil {
44344434+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Engine))); err != nil {
47164435 return err
47174436 }
47184718-47194719- if len(t.Dependencies) > 8192 {
47204720- return xerrors.Errorf("Slice value in field t.Dependencies was too long")
47214721- }
47224722-47234723- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Dependencies))); err != nil {
44374437+ if _, err := cw.WriteString(string(t.Engine)); err != nil {
47244438 return err
47254725- }
47264726- for _, v := range t.Dependencies {
47274727- if err := v.MarshalCBOR(cw); err != nil {
47284728- return err
47294729- }
47304730-47314439 }
47324440 return nil
47334441}
···4757446547584466 n := extra
4759446747604760- nameBuf := make([]byte, 12)
44684468+ nameBuf := make([]byte, 6)
47614469 for i := uint64(0); i < n; i++ {
47624470 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
47634471 if err != nil {
···47734481 }
4774448247754483 switch string(nameBuf[:nameLen]) {
47764776- // t.Name (string) (string)
44844484+ // t.Raw (string) (string)
44854485+ case "raw":
44864486+44874487+ {
44884488+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
44894489+ if err != nil {
44904490+ return err
44914491+ }
44924492+44934493+ t.Raw = string(sval)
44944494+ }
44954495+ // t.Name (string) (string)
47774496 case "name":
4778449747794498 {
···48044523 }
4805452448064525 }
48074807- // t.Steps ([]*tangled.Pipeline_Step) (slice)
48084808- case "steps":
48094809-48104810- maj, extra, err = cr.ReadHeader()
48114811- if err != nil {
48124812- return err
48134813- }
48144814-48154815- if extra > 8192 {
48164816- return fmt.Errorf("t.Steps: array too large (%d)", extra)
48174817- }
48184818-48194819- if maj != cbg.MajArray {
48204820- return fmt.Errorf("expected cbor array")
48214821- }
48224822-48234823- if extra > 0 {
48244824- t.Steps = make([]*Pipeline_Step, extra)
48254825- }
45264526+ // t.Engine (string) (string)
45274527+ case "engine":
4826452848274827- for i := 0; i < int(extra); i++ {
48284828- {
48294829- var maj byte
48304830- var extra uint64
48314831- var err error
48324832- _ = maj
48334833- _ = extra
48344834- _ = err
48354835-48364836- {
48374837-48384838- b, err := cr.ReadByte()
48394839- if err != nil {
48404840- return err
48414841- }
48424842- if b != cbg.CborNull[0] {
48434843- if err := cr.UnreadByte(); err != nil {
48444844- return err
48454845- }
48464846- t.Steps[i] = new(Pipeline_Step)
48474847- if err := t.Steps[i].UnmarshalCBOR(cr); err != nil {
48484848- return xerrors.Errorf("unmarshaling t.Steps[i] pointer: %w", err)
48494849- }
48504850- }
48514851-48524852- }
48534853-45294529+ {
45304530+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
45314531+ if err != nil {
45324532+ return err
48544533 }
48554855- }
48564856- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
48574857- case "environment":
4858453448594859- maj, extra, err = cr.ReadHeader()
48604860- if err != nil {
48614861- return err
48624862- }
48634863-48644864- if extra > 8192 {
48654865- return fmt.Errorf("t.Environment: array too large (%d)", extra)
48664866- }
48674867-48684868- if maj != cbg.MajArray {
48694869- return fmt.Errorf("expected cbor array")
48704870- }
48714871-48724872- if extra > 0 {
48734873- t.Environment = make([]*Pipeline_Pair, extra)
48744874- }
48754875-48764876- for i := 0; i < int(extra); i++ {
48774877- {
48784878- var maj byte
48794879- var extra uint64
48804880- var err error
48814881- _ = maj
48824882- _ = extra
48834883- _ = err
48844884-48854885- {
48864886-48874887- b, err := cr.ReadByte()
48884888- if err != nil {
48894889- return err
48904890- }
48914891- if b != cbg.CborNull[0] {
48924892- if err := cr.UnreadByte(); err != nil {
48934893- return err
48944894- }
48954895- t.Environment[i] = new(Pipeline_Pair)
48964896- if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
48974897- return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
48984898- }
48994899- }
49004900-49014901- }
49024902-49034903- }
49044904- }
49054905- // t.Dependencies ([]*tangled.Pipeline_Dependency) (slice)
49064906- case "dependencies":
49074907-49084908- maj, extra, err = cr.ReadHeader()
49094909- if err != nil {
49104910- return err
49114911- }
49124912-49134913- if extra > 8192 {
49144914- return fmt.Errorf("t.Dependencies: array too large (%d)", extra)
49154915- }
49164916-49174917- if maj != cbg.MajArray {
49184918- return fmt.Errorf("expected cbor array")
49194919- }
49204920-49214921- if extra > 0 {
49224922- t.Dependencies = make([]*Pipeline_Dependency, extra)
49234923- }
49244924-49254925- for i := 0; i < int(extra); i++ {
49264926- {
49274927- var maj byte
49284928- var extra uint64
49294929- var err error
49304930- _ = maj
49314931- _ = extra
49324932- _ = err
49334933-49344934- {
49354935-49364936- b, err := cr.ReadByte()
49374937- if err != nil {
49384938- return err
49394939- }
49404940- if b != cbg.CborNull[0] {
49414941- if err := cr.UnreadByte(); err != nil {
49424942- return err
49434943- }
49444944- t.Dependencies[i] = new(Pipeline_Dependency)
49454945- if err := t.Dependencies[i].UnmarshalCBOR(cr); err != nil {
49464946- return xerrors.Errorf("unmarshaling t.Dependencies[i] pointer: %w", err)
49474947- }
49484948- }
49494949-49504950- }
49514951-49524952- }
45354535+ t.Engine = string(sval)
49534536 }
4954453749554538 default:
···60595642 }
6060564360615644 cw := cbg.NewCborWriter(w)
60626062- fieldCount := 7
56455645+ fieldCount := 5
6063564660645647 if t.Body == nil {
60655648 fieldCount--
···61435726 return err
61445727 }
6145572861466146- // t.Owner (string) (string)
61476147- if len("owner") > 1000000 {
61486148- return xerrors.Errorf("Value in field \"owner\" was too long")
61496149- }
61506150-61516151- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
61526152- return err
61536153- }
61546154- if _, err := cw.WriteString(string("owner")); err != nil {
61556155- return err
61566156- }
61576157-61586158- if len(t.Owner) > 1000000 {
61596159- return xerrors.Errorf("Value in field t.Owner was too long")
61606160- }
61616161-61626162- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Owner))); err != nil {
61636163- return err
61646164- }
61656165- if _, err := cw.WriteString(string(t.Owner)); err != nil {
61666166- return err
61676167- }
61686168-61695729 // t.Title (string) (string)
61705730 if len("title") > 1000000 {
61715731 return xerrors.Errorf("Value in field \"title\" was too long")
···61895749 return err
61905750 }
6191575161926192- // t.IssueId (int64) (int64)
61936193- if len("issueId") > 1000000 {
61946194- return xerrors.Errorf("Value in field \"issueId\" was too long")
61956195- }
61966196-61976197- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("issueId"))); err != nil {
61986198- return err
61996199- }
62006200- if _, err := cw.WriteString(string("issueId")); err != nil {
62016201- return err
62026202- }
62036203-62046204- if t.IssueId >= 0 {
62056205- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.IssueId)); err != nil {
62066206- return err
62076207- }
62086208- } else {
62096209- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.IssueId-1)); err != nil {
62106210- return err
62116211- }
62126212- }
62136213-62145752 // t.CreatedAt (string) (string)
62155753 if len("createdAt") > 1000000 {
62165754 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···6320585863215859 t.LexiconTypeID = string(sval)
63225860 }
63236323- // t.Owner (string) (string)
63246324- case "owner":
63256325-63266326- {
63276327- sval, err := cbg.ReadStringWithMax(cr, 1000000)
63286328- if err != nil {
63296329- return err
63306330- }
63316331-63326332- t.Owner = string(sval)
63336333- }
63345861 // t.Title (string) (string)
63355862 case "title":
63365863···6342586963435870 t.Title = string(sval)
63445871 }
63456345- // t.IssueId (int64) (int64)
63466346- case "issueId":
63476347- {
63486348- maj, extra, err := cr.ReadHeader()
63496349- if err != nil {
63506350- return err
63516351- }
63526352- var extraI int64
63536353- switch maj {
63546354- case cbg.MajUnsignedInt:
63556355- extraI = int64(extra)
63566356- if extraI < 0 {
63576357- return fmt.Errorf("int64 positive overflow")
63586358- }
63596359- case cbg.MajNegativeInt:
63606360- extraI = int64(extra)
63616361- if extraI < 0 {
63626362- return fmt.Errorf("int64 negative overflow")
63636363- }
63646364- extraI = -1 - extraI
63656365- default:
63666366- return fmt.Errorf("wrong type for int64 field: %d", maj)
63676367- }
63686368-63696369- t.IssueId = int64(extraI)
63706370- }
63715872 // t.CreatedAt (string) (string)
63725873 case "createdAt":
63735874···63975898 }
6398589963995900 cw := cbg.NewCborWriter(w)
64006400- fieldCount := 7
59015901+ fieldCount := 5
6401590264026402- if t.CommentId == nil {
64036403- fieldCount--
64046404- }
64056405-64066406- if t.Owner == nil {
64076407- fieldCount--
64086408- }
64096409-64106410- if t.Repo == nil {
59035903+ if t.ReplyTo == nil {
64115904 fieldCount--
64125905 }
64135906···64385931 return err
64395932 }
6440593364416441- // t.Repo (string) (string)
64426442- if t.Repo != nil {
64436443-64446444- if len("repo") > 1000000 {
64456445- return xerrors.Errorf("Value in field \"repo\" was too long")
64466446- }
64476447-64486448- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
64496449- return err
64506450- }
64516451- if _, err := cw.WriteString(string("repo")); err != nil {
64526452- return err
64536453- }
64546454-64556455- if t.Repo == nil {
64566456- if _, err := cw.Write(cbg.CborNull); err != nil {
64576457- return err
64586458- }
64596459- } else {
64606460- if len(*t.Repo) > 1000000 {
64616461- return xerrors.Errorf("Value in field t.Repo was too long")
64626462- }
64636463-64646464- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Repo))); err != nil {
64656465- return err
64666466- }
64676467- if _, err := cw.WriteString(string(*t.Repo)); err != nil {
64686468- return err
64696469- }
64706470- }
64716471- }
64726472-64735934 // t.LexiconTypeID (string) (string)
64745935 if len("$type") > 1000000 {
64755936 return xerrors.Errorf("Value in field \"$type\" was too long")
···65125973 return err
65135974 }
6514597565156515- // t.Owner (string) (string)
65166516- if t.Owner != nil {
59765976+ // t.ReplyTo (string) (string)
59775977+ if t.ReplyTo != nil {
6517597865186518- if len("owner") > 1000000 {
65196519- return xerrors.Errorf("Value in field \"owner\" was too long")
59795979+ if len("replyTo") > 1000000 {
59805980+ return xerrors.Errorf("Value in field \"replyTo\" was too long")
65205981 }
6521598265226522- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
59835983+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("replyTo"))); err != nil {
65235984 return err
65245985 }
65256525- if _, err := cw.WriteString(string("owner")); err != nil {
59865986+ if _, err := cw.WriteString(string("replyTo")); err != nil {
65265987 return err
65275988 }
6528598965296529- if t.Owner == nil {
59905990+ if t.ReplyTo == nil {
65305991 if _, err := cw.Write(cbg.CborNull); err != nil {
65315992 return err
65325993 }
65335994 } else {
65346534- if len(*t.Owner) > 1000000 {
65356535- return xerrors.Errorf("Value in field t.Owner was too long")
59955995+ if len(*t.ReplyTo) > 1000000 {
59965996+ return xerrors.Errorf("Value in field t.ReplyTo was too long")
65365997 }
6537599865386538- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Owner))); err != nil {
59995999+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.ReplyTo))); err != nil {
65396000 return err
65406001 }
65416541- if _, err := cw.WriteString(string(*t.Owner)); err != nil {
60026002+ if _, err := cw.WriteString(string(*t.ReplyTo)); err != nil {
65426003 return err
65436004 }
65446005 }
65456006 }
6546600765476547- // t.CommentId (int64) (int64)
65486548- if t.CommentId != nil {
65496549-65506550- if len("commentId") > 1000000 {
65516551- return xerrors.Errorf("Value in field \"commentId\" was too long")
65526552- }
65536553-65546554- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commentId"))); err != nil {
65556555- return err
65566556- }
65576557- if _, err := cw.WriteString(string("commentId")); err != nil {
65586558- return err
65596559- }
65606560-65616561- if t.CommentId == nil {
65626562- if _, err := cw.Write(cbg.CborNull); err != nil {
65636563- return err
65646564- }
65656565- } else {
65666566- if *t.CommentId >= 0 {
65676567- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.CommentId)); err != nil {
65686568- return err
65696569- }
65706570- } else {
65716571- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-*t.CommentId-1)); err != nil {
65726572- return err
65736573- }
65746574- }
65756575- }
65766576-65776577- }
65786578-65796008 // t.CreatedAt (string) (string)
65806009 if len("createdAt") > 1000000 {
65816010 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···6653608266546083 t.Body = string(sval)
66556084 }
66566656- // t.Repo (string) (string)
66576657- case "repo":
66586658-66596659- {
66606660- b, err := cr.ReadByte()
66616661- if err != nil {
66626662- return err
66636663- }
66646664- if b != cbg.CborNull[0] {
66656665- if err := cr.UnreadByte(); err != nil {
66666666- return err
66676667- }
66686668-66696669- sval, err := cbg.ReadStringWithMax(cr, 1000000)
66706670- if err != nil {
66716671- return err
66726672- }
66736673-66746674- t.Repo = (*string)(&sval)
66756675- }
66766676- }
66776085 // t.LexiconTypeID (string) (string)
66786086 case "$type":
66796087···6696610466976105 t.Issue = string(sval)
66986106 }
66996699- // t.Owner (string) (string)
67006700- case "owner":
61076107+ // t.ReplyTo (string) (string)
61086108+ case "replyTo":
6701610967026110 {
67036111 b, err := cr.ReadByte()
···67146122 return err
67156123 }
6716612467176717- t.Owner = (*string)(&sval)
67186718- }
67196719- }
67206720- // t.CommentId (int64) (int64)
67216721- case "commentId":
67226722- {
67236723-67246724- b, err := cr.ReadByte()
67256725- if err != nil {
67266726- return err
67276727- }
67286728- if b != cbg.CborNull[0] {
67296729- if err := cr.UnreadByte(); err != nil {
67306730- return err
67316731- }
67326732- maj, extra, err := cr.ReadHeader()
67336733- if err != nil {
67346734- return err
67356735- }
67366736- var extraI int64
67376737- switch maj {
67386738- case cbg.MajUnsignedInt:
67396739- extraI = int64(extra)
67406740- if extraI < 0 {
67416741- return fmt.Errorf("int64 positive overflow")
67426742- }
67436743- case cbg.MajNegativeInt:
67446744- extraI = int64(extra)
67456745- if extraI < 0 {
67466746- return fmt.Errorf("int64 negative overflow")
67476747- }
67486748- extraI = -1 - extraI
67496749- default:
67506750- return fmt.Errorf("wrong type for int64 field: %d", maj)
67516751- }
67526752-67536753- t.CommentId = (*int64)(&extraI)
61256125+ t.ReplyTo = (*string)(&sval)
67546126 }
67556127 }
67566128 // t.CreatedAt (string) (string)
···69466318 }
6947631969486320 cw := cbg.NewCborWriter(w)
69496949- fieldCount := 9
63216321+ fieldCount := 7
6950632269516323 if t.Body == nil {
69526324 fieldCount--
···70576429 return err
70586430 }
7059643170607060- // t.PullId (int64) (int64)
70617061- if len("pullId") > 1000000 {
70627062- return xerrors.Errorf("Value in field \"pullId\" was too long")
70637063- }
70647064-70657065- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("pullId"))); err != nil {
70667066- return err
70677067- }
70687068- if _, err := cw.WriteString(string("pullId")); err != nil {
70697069- return err
70707070- }
70717071-70727072- if t.PullId >= 0 {
70737073- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PullId)); err != nil {
70747074- return err
70757075- }
70767076- } else {
70777077- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.PullId-1)); err != nil {
70787078- return err
70797079- }
70807080- }
70817081-70826432 // t.Source (tangled.RepoPull_Source) (struct)
70836433 if t.Source != nil {
70846434···70986448 }
70996449 }
7100645071017101- // t.CreatedAt (string) (string)
71027102- if len("createdAt") > 1000000 {
71037103- return xerrors.Errorf("Value in field \"createdAt\" was too long")
64516451+ // t.Target (tangled.RepoPull_Target) (struct)
64526452+ if len("target") > 1000000 {
64536453+ return xerrors.Errorf("Value in field \"target\" was too long")
71046454 }
7105645571067106- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
64566456+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("target"))); err != nil {
71076457 return err
71086458 }
71097109- if _, err := cw.WriteString(string("createdAt")); err != nil {
64596459+ if _, err := cw.WriteString(string("target")); err != nil {
71106460 return err
71116461 }
7112646271137113- if len(t.CreatedAt) > 1000000 {
71147114- return xerrors.Errorf("Value in field t.CreatedAt was too long")
71157115- }
71167116-71177117- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
71187118- return err
71197119- }
71207120- if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
64636463+ if err := t.Target.MarshalCBOR(cw); err != nil {
71216464 return err
71226465 }
7123646671247124- // t.TargetRepo (string) (string)
71257125- if len("targetRepo") > 1000000 {
71267126- return xerrors.Errorf("Value in field \"targetRepo\" was too long")
64676467+ // t.CreatedAt (string) (string)
64686468+ if len("createdAt") > 1000000 {
64696469+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
71276470 }
7128647171297129- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("targetRepo"))); err != nil {
64726472+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
71306473 return err
71316474 }
71327132- if _, err := cw.WriteString(string("targetRepo")); err != nil {
64756475+ if _, err := cw.WriteString(string("createdAt")); err != nil {
71336476 return err
71346477 }
7135647871367136- if len(t.TargetRepo) > 1000000 {
71377137- return xerrors.Errorf("Value in field t.TargetRepo was too long")
71387138- }
71397139-71407140- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TargetRepo))); err != nil {
71417141- return err
71427142- }
71437143- if _, err := cw.WriteString(string(t.TargetRepo)); err != nil {
71447144- return err
71457145- }
71467146-71477147- // t.TargetBranch (string) (string)
71487148- if len("targetBranch") > 1000000 {
71497149- return xerrors.Errorf("Value in field \"targetBranch\" was too long")
64796479+ if len(t.CreatedAt) > 1000000 {
64806480+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
71506481 }
7151648271527152- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("targetBranch"))); err != nil {
64836483+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
71536484 return err
71546485 }
71557155- if _, err := cw.WriteString(string("targetBranch")); err != nil {
71567156- return err
71577157- }
71587158-71597159- if len(t.TargetBranch) > 1000000 {
71607160- return xerrors.Errorf("Value in field t.TargetBranch was too long")
71617161- }
71627162-71637163- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TargetBranch))); err != nil {
71647164- return err
71657165- }
71667166- if _, err := cw.WriteString(string(t.TargetBranch)); err != nil {
64866486+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
71676487 return err
71686488 }
71696489 return nil
···7194651471956515 n := extra
7196651671977197- nameBuf := make([]byte, 12)
65176517+ nameBuf := make([]byte, 9)
71986518 for i := uint64(0); i < n; i++ {
71996519 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
72006520 if err != nil {
···7264658472656585 t.Title = string(sval)
72666586 }
72677267- // t.PullId (int64) (int64)
72687268- case "pullId":
72697269- {
72707270- maj, extra, err := cr.ReadHeader()
72717271- if err != nil {
72727272- return err
72737273- }
72747274- var extraI int64
72757275- switch maj {
72767276- case cbg.MajUnsignedInt:
72777277- extraI = int64(extra)
72787278- if extraI < 0 {
72797279- return fmt.Errorf("int64 positive overflow")
72807280- }
72817281- case cbg.MajNegativeInt:
72827282- extraI = int64(extra)
72837283- if extraI < 0 {
72847284- return fmt.Errorf("int64 negative overflow")
72857285- }
72867286- extraI = -1 - extraI
72877287- default:
72887288- return fmt.Errorf("wrong type for int64 field: %d", maj)
72897289- }
72907290-72917291- t.PullId = int64(extraI)
72927292- }
72936587 // t.Source (tangled.RepoPull_Source) (struct)
72946588 case "source":
72956589···73106604 }
7311660573126606 }
73137313- // t.CreatedAt (string) (string)
73147314- case "createdAt":
66076607+ // t.Target (tangled.RepoPull_Target) (struct)
66086608+ case "target":
7315660973166610 {
73177317- sval, err := cbg.ReadStringWithMax(cr, 1000000)
73187318- if err != nil {
73197319- return err
73207320- }
7321661173227322- t.CreatedAt = string(sval)
73237323- }
73247324- // t.TargetRepo (string) (string)
73257325- case "targetRepo":
73267326-73277327- {
73287328- sval, err := cbg.ReadStringWithMax(cr, 1000000)
66126612+ b, err := cr.ReadByte()
73296613 if err != nil {
73306614 return err
73316615 }
66166616+ if b != cbg.CborNull[0] {
66176617+ if err := cr.UnreadByte(); err != nil {
66186618+ return err
66196619+ }
66206620+ t.Target = new(RepoPull_Target)
66216621+ if err := t.Target.UnmarshalCBOR(cr); err != nil {
66226622+ return xerrors.Errorf("unmarshaling t.Target pointer: %w", err)
66236623+ }
66246624+ }
7332662573337333- t.TargetRepo = string(sval)
73346626 }
73357335- // t.TargetBranch (string) (string)
73367336- case "targetBranch":
66276627+ // t.CreatedAt (string) (string)
66286628+ case "createdAt":
7337662973386630 {
73396631 sval, err := cbg.ReadStringWithMax(cr, 1000000)
···73416633 return err
73426634 }
7343663573447344- t.TargetBranch = string(sval)
66366636+ t.CreatedAt = string(sval)
73456637 }
7346663873476639 default:
···73616653 }
7362665473636655 cw := cbg.NewCborWriter(w)
73647364- fieldCount := 7
73657365-73667366- if t.CommentId == nil {
73677367- fieldCount--
73687368- }
7369665673707370- if t.Owner == nil {
73717371- fieldCount--
73727372- }
73737373-73747374- if t.Repo == nil {
73757375- fieldCount--
73767376- }
73777377-73787378- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
66576657+ if _, err := cw.Write([]byte{164}); err != nil {
73796658 return err
73806659 }
73816660···74256704 return err
74266705 }
7427670674287428- // t.Repo (string) (string)
74297429- if t.Repo != nil {
74307430-74317431- if len("repo") > 1000000 {
74327432- return xerrors.Errorf("Value in field \"repo\" was too long")
74337433- }
74347434-74357435- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
74367436- return err
74377437- }
74387438- if _, err := cw.WriteString(string("repo")); err != nil {
74397439- return err
74407440- }
74417441-74427442- if t.Repo == nil {
74437443- if _, err := cw.Write(cbg.CborNull); err != nil {
74447444- return err
74457445- }
74467446- } else {
74477447- if len(*t.Repo) > 1000000 {
74487448- return xerrors.Errorf("Value in field t.Repo was too long")
74497449- }
74507450-74517451- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Repo))); err != nil {
74527452- return err
74537453- }
74547454- if _, err := cw.WriteString(string(*t.Repo)); err != nil {
74557455- return err
74567456- }
74577457- }
74587458- }
74597459-74606707 // t.LexiconTypeID (string) (string)
74616708 if len("$type") > 1000000 {
74626709 return xerrors.Errorf("Value in field \"$type\" was too long")
···74766723 return err
74776724 }
7478672574797479- // t.Owner (string) (string)
74807480- if t.Owner != nil {
74817481-74827482- if len("owner") > 1000000 {
74837483- return xerrors.Errorf("Value in field \"owner\" was too long")
74847484- }
74857485-74867486- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
74877487- return err
74887488- }
74897489- if _, err := cw.WriteString(string("owner")); err != nil {
74907490- return err
74917491- }
74927492-74937493- if t.Owner == nil {
74947494- if _, err := cw.Write(cbg.CborNull); err != nil {
74957495- return err
74967496- }
74977497- } else {
74987498- if len(*t.Owner) > 1000000 {
74997499- return xerrors.Errorf("Value in field t.Owner was too long")
75007500- }
75017501-75027502- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Owner))); err != nil {
75037503- return err
75047504- }
75057505- if _, err := cw.WriteString(string(*t.Owner)); err != nil {
75067506- return err
75077507- }
75087508- }
75097509- }
75107510-75117511- // t.CommentId (int64) (int64)
75127512- if t.CommentId != nil {
75137513-75147514- if len("commentId") > 1000000 {
75157515- return xerrors.Errorf("Value in field \"commentId\" was too long")
75167516- }
75177517-75187518- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commentId"))); err != nil {
75197519- return err
75207520- }
75217521- if _, err := cw.WriteString(string("commentId")); err != nil {
75227522- return err
75237523- }
75247524-75257525- if t.CommentId == nil {
75267526- if _, err := cw.Write(cbg.CborNull); err != nil {
75277527- return err
75287528- }
75297529- } else {
75307530- if *t.CommentId >= 0 {
75317531- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.CommentId)); err != nil {
75327532- return err
75337533- }
75347534- } else {
75357535- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-*t.CommentId-1)); err != nil {
75367536- return err
75377537- }
75387538- }
75397539- }
75407540-75417541- }
75427542-75436726 // t.CreatedAt (string) (string)
75446727 if len("createdAt") > 1000000 {
75456728 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···7628681176296812 t.Pull = string(sval)
76306813 }
76317631- // t.Repo (string) (string)
76327632- case "repo":
76337633-76347634- {
76357635- b, err := cr.ReadByte()
76367636- if err != nil {
76377637- return err
76387638- }
76397639- if b != cbg.CborNull[0] {
76407640- if err := cr.UnreadByte(); err != nil {
76417641- return err
76427642- }
76437643-76447644- sval, err := cbg.ReadStringWithMax(cr, 1000000)
76457645- if err != nil {
76467646- return err
76477647- }
76487648-76497649- t.Repo = (*string)(&sval)
76507650- }
76517651- }
76526814 // t.LexiconTypeID (string) (string)
76536815 case "$type":
76546816···76596821 }
7660682276616823 t.LexiconTypeID = string(sval)
76627662- }
76637663- // t.Owner (string) (string)
76647664- case "owner":
76657665-76667666- {
76677667- b, err := cr.ReadByte()
76687668- if err != nil {
76697669- return err
76707670- }
76717671- if b != cbg.CborNull[0] {
76727672- if err := cr.UnreadByte(); err != nil {
76737673- return err
76747674- }
76757675-76767676- sval, err := cbg.ReadStringWithMax(cr, 1000000)
76777677- if err != nil {
76787678- return err
76797679- }
76807680-76817681- t.Owner = (*string)(&sval)
76827682- }
76837683- }
76847684- // t.CommentId (int64) (int64)
76857685- case "commentId":
76867686- {
76877687-76887688- b, err := cr.ReadByte()
76897689- if err != nil {
76907690- return err
76917691- }
76927692- if b != cbg.CborNull[0] {
76937693- if err := cr.UnreadByte(); err != nil {
76947694- return err
76957695- }
76967696- maj, extra, err := cr.ReadHeader()
76977697- if err != nil {
76987698- return err
76997699- }
77007700- var extraI int64
77017701- switch maj {
77027702- case cbg.MajUnsignedInt:
77037703- extraI = int64(extra)
77047704- if extraI < 0 {
77057705- return fmt.Errorf("int64 positive overflow")
77067706- }
77077707- case cbg.MajNegativeInt:
77087708- extraI = int64(extra)
77097709- if extraI < 0 {
77107710- return fmt.Errorf("int64 negative overflow")
77117711- }
77127712- extraI = -1 - extraI
77137713- default:
77147714- return fmt.Errorf("wrong type for int64 field: %d", maj)
77157715- }
77167716-77177717- t.CommentId = (*int64)(&extraI)
77187718- }
77196824 }
77206825 // t.CreatedAt (string) (string)
77216826 case "createdAt":
···80837188 }
8084718980857190 t.Status = string(sval)
71917191+ }
71927192+71937193+ default:
71947194+ // Field doesn't exist on this type, so ignore it
71957195+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
71967196+ return err
71977197+ }
71987198+ }
71997199+ }
72007200+72017201+ return nil
72027202+}
72037203+func (t *RepoPull_Target) MarshalCBOR(w io.Writer) error {
72047204+ if t == nil {
72057205+ _, err := w.Write(cbg.CborNull)
72067206+ return err
72077207+ }
72087208+72097209+ cw := cbg.NewCborWriter(w)
72107210+72117211+ if _, err := cw.Write([]byte{162}); err != nil {
72127212+ return err
72137213+ }
72147214+72157215+ // t.Repo (string) (string)
72167216+ if len("repo") > 1000000 {
72177217+ return xerrors.Errorf("Value in field \"repo\" was too long")
72187218+ }
72197219+72207220+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
72217221+ return err
72227222+ }
72237223+ if _, err := cw.WriteString(string("repo")); err != nil {
72247224+ return err
72257225+ }
72267226+72277227+ if len(t.Repo) > 1000000 {
72287228+ return xerrors.Errorf("Value in field t.Repo was too long")
72297229+ }
72307230+72317231+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repo))); err != nil {
72327232+ return err
72337233+ }
72347234+ if _, err := cw.WriteString(string(t.Repo)); err != nil {
72357235+ return err
72367236+ }
72377237+72387238+ // t.Branch (string) (string)
72397239+ if len("branch") > 1000000 {
72407240+ return xerrors.Errorf("Value in field \"branch\" was too long")
72417241+ }
72427242+72437243+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("branch"))); err != nil {
72447244+ return err
72457245+ }
72467246+ if _, err := cw.WriteString(string("branch")); err != nil {
72477247+ return err
72487248+ }
72497249+72507250+ if len(t.Branch) > 1000000 {
72517251+ return xerrors.Errorf("Value in field t.Branch was too long")
72527252+ }
72537253+72547254+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Branch))); err != nil {
72557255+ return err
72567256+ }
72577257+ if _, err := cw.WriteString(string(t.Branch)); err != nil {
72587258+ return err
72597259+ }
72607260+ return nil
72617261+}
72627262+72637263+func (t *RepoPull_Target) UnmarshalCBOR(r io.Reader) (err error) {
72647264+ *t = RepoPull_Target{}
72657265+72667266+ cr := cbg.NewCborReader(r)
72677267+72687268+ maj, extra, err := cr.ReadHeader()
72697269+ if err != nil {
72707270+ return err
72717271+ }
72727272+ defer func() {
72737273+ if err == io.EOF {
72747274+ err = io.ErrUnexpectedEOF
72757275+ }
72767276+ }()
72777277+72787278+ if maj != cbg.MajMap {
72797279+ return fmt.Errorf("cbor input should be of type map")
72807280+ }
72817281+72827282+ if extra > cbg.MaxLength {
72837283+ return fmt.Errorf("RepoPull_Target: map struct too large (%d)", extra)
72847284+ }
72857285+72867286+ n := extra
72877287+72887288+ nameBuf := make([]byte, 6)
72897289+ for i := uint64(0); i < n; i++ {
72907290+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
72917291+ if err != nil {
72927292+ return err
72937293+ }
72947294+72957295+ if !ok {
72967296+ // Field doesn't exist on this type, so ignore it
72977297+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
72987298+ return err
72997299+ }
73007300+ continue
73017301+ }
73027302+73037303+ switch string(nameBuf[:nameLen]) {
73047304+ // t.Repo (string) (string)
73057305+ case "repo":
73067306+73077307+ {
73087308+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
73097309+ if err != nil {
73107310+ return err
73117311+ }
73127312+73137313+ t.Repo = string(sval)
73147314+ }
73157315+ // t.Branch (string) (string)
73167316+ case "branch":
73177317+73187318+ {
73197319+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
73207320+ if err != nil {
73217321+ return err
73227322+ }
73237323+73247324+ t.Branch = string(sval)
80867325 }
8087732680887327 default:
+19-15
api/tangled/gitrefUpdate.go
···3333 RepoName string `json:"repoName" cborgen:"repoName"`
3434}
35353636-type GitRefUpdate_Meta struct {
3737- CommitCount *GitRefUpdate_Meta_CommitCount `json:"commitCount" cborgen:"commitCount"`
3838- IsDefaultRef bool `json:"isDefaultRef" cborgen:"isDefaultRef"`
3939- LangBreakdown *GitRefUpdate_Meta_LangBreakdown `json:"langBreakdown,omitempty" cborgen:"langBreakdown,omitempty"`
3636+// GitRefUpdate_CommitCountBreakdown is a "commitCountBreakdown" in the sh.tangled.git.refUpdate schema.
3737+type GitRefUpdate_CommitCountBreakdown struct {
3838+ ByEmail []*GitRefUpdate_IndividualEmailCommitCount `json:"byEmail,omitempty" cborgen:"byEmail,omitempty"`
4039}
41404242-type GitRefUpdate_Meta_CommitCount struct {
4343- ByEmail []*GitRefUpdate_Meta_CommitCount_ByEmail_Elem `json:"byEmail,omitempty" cborgen:"byEmail,omitempty"`
4444-}
4545-4646-type GitRefUpdate_Meta_CommitCount_ByEmail_Elem struct {
4141+// GitRefUpdate_IndividualEmailCommitCount is a "individualEmailCommitCount" in the sh.tangled.git.refUpdate schema.
4242+type GitRefUpdate_IndividualEmailCommitCount struct {
4743 Count int64 `json:"count" cborgen:"count"`
4844 Email string `json:"email" cborgen:"email"`
4945}
50465151-type GitRefUpdate_Meta_LangBreakdown struct {
5252- Inputs []*GitRefUpdate_Pair `json:"inputs,omitempty" cborgen:"inputs,omitempty"`
5353-}
5454-5555-// GitRefUpdate_Pair is a "pair" in the sh.tangled.git.refUpdate schema.
5656-type GitRefUpdate_Pair struct {
4747+// GitRefUpdate_IndividualLanguageSize is a "individualLanguageSize" in the sh.tangled.git.refUpdate schema.
4848+type GitRefUpdate_IndividualLanguageSize struct {
5749 Lang string `json:"lang" cborgen:"lang"`
5850 Size int64 `json:"size" cborgen:"size"`
5951}
5252+5353+// GitRefUpdate_LangBreakdown is a "langBreakdown" in the sh.tangled.git.refUpdate schema.
5454+type GitRefUpdate_LangBreakdown struct {
5555+ Inputs []*GitRefUpdate_IndividualLanguageSize `json:"inputs,omitempty" cborgen:"inputs,omitempty"`
5656+}
5757+5858+// GitRefUpdate_Meta is a "meta" in the sh.tangled.git.refUpdate schema.
5959+type GitRefUpdate_Meta struct {
6060+ CommitCount *GitRefUpdate_CommitCountBreakdown `json:"commitCount" cborgen:"commitCount"`
6161+ IsDefaultRef bool `json:"isDefaultRef" cborgen:"isDefaultRef"`
6262+ LangBreakdown *GitRefUpdate_LangBreakdown `json:"langBreakdown,omitempty" cborgen:"langBreakdown,omitempty"`
6363+}
···1717 Dev bool `env:"DEV, default=false"`
1818 DisallowedNicknamesFile string `env:"DISALLOWED_NICKNAMES_FILE"`
19192020- // temporarily, to add users to default spindle
2020+ // temporarily, to add users to default knot and spindle
2121 AppPassword string `env:"APP_PASSWORD"`
2222+2323+ // uhhhh this is because knot1 is under icy's did
2424+ TmpAltAppPassword string `env:"ALT_APP_PASSWORD"`
2225}
23262427type OAuthConfig struct {
+240-23
appview/db/db.go
···2727}
28282929func Make(dbPath string) (*DB, error) {
3030- db, err := sql.Open("sqlite3", dbPath)
3030+ // https://github.com/mattn/go-sqlite3#connection-string
3131+ opts := []string{
3232+ "_foreign_keys=1",
3333+ "_journal_mode=WAL",
3434+ "_synchronous=NORMAL",
3535+ "_auto_vacuum=incremental",
3636+ }
3737+3838+ db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
3139 if err != nil {
3240 return nil, err
3341 }
3434- _, err = db.Exec(`
3535- pragma journal_mode = WAL;
3636- pragma synchronous = normal;
3737- pragma foreign_keys = on;
3838- pragma temp_store = memory;
3939- pragma mmap_size = 30000000000;
4040- pragma page_size = 32768;
4141- pragma auto_vacuum = incremental;
4242- pragma busy_timeout = 5000;
4242+4343+ ctx := context.Background()
43444545+ conn, err := db.Conn(ctx)
4646+ if err != nil {
4747+ return nil, err
4848+ }
4949+ defer conn.Close()
5050+5151+ _, err = conn.ExecContext(ctx, `
4452 create table if not exists registrations (
4553 id integer primary key autoincrement,
4654 domain text not null unique,
···462470 id integer primary key autoincrement,
463471 name text unique
464472 );
473473+474474+ -- indexes for better star query performance
475475+ create index if not exists idx_stars_created on stars(created);
476476+ create index if not exists idx_stars_repo_at_created on stars(repo_at, created);
465477 `)
466478 if err != nil {
467479 return nil, err
468480 }
469481470482 // run migrations
471471- runMigration(db, "add-description-to-repos", func(tx *sql.Tx) error {
483483+ runMigration(conn, "add-description-to-repos", func(tx *sql.Tx) error {
472484 tx.Exec(`
473485 alter table repos add column description text check (length(description) <= 200);
474486 `)
475487 return nil
476488 })
477489478478- runMigration(db, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
490490+ runMigration(conn, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
479491 // add unconstrained column
480492 _, err := tx.Exec(`
481493 alter table public_keys
···498510 return nil
499511 })
500512501501- runMigration(db, "add-rkey-to-comments", func(tx *sql.Tx) error {
513513+ runMigration(conn, "add-rkey-to-comments", func(tx *sql.Tx) error {
502514 _, err := tx.Exec(`
503515 alter table comments drop column comment_at;
504516 alter table comments add column rkey text;
···506518 return err
507519 })
508520509509- runMigration(db, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
521521+ runMigration(conn, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
510522 _, err := tx.Exec(`
511523 alter table comments add column deleted text; -- timestamp
512524 alter table comments add column edited text; -- timestamp
···514526 return err
515527 })
516528517517- runMigration(db, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
529529+ runMigration(conn, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
518530 _, err := tx.Exec(`
519531 alter table pulls add column source_branch text;
520532 alter table pulls add column source_repo_at text;
···523535 return err
524536 })
525537526526- runMigration(db, "add-source-to-repos", func(tx *sql.Tx) error {
538538+ runMigration(conn, "add-source-to-repos", func(tx *sql.Tx) error {
527539 _, err := tx.Exec(`
528540 alter table repos add column source text;
529541 `)
···534546 // NOTE: this cannot be done in a transaction, so it is run outside [0]
535547 //
536548 // [0]: https://sqlite.org/pragma.html#pragma_foreign_keys
537537- db.Exec("pragma foreign_keys = off;")
538538- runMigration(db, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
549549+ conn.ExecContext(ctx, "pragma foreign_keys = off;")
550550+ runMigration(conn, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
539551 _, err := tx.Exec(`
540552 create table pulls_new (
541553 -- identifiers
···590602 `)
591603 return err
592604 })
593593- db.Exec("pragma foreign_keys = on;")
605605+ conn.ExecContext(ctx, "pragma foreign_keys = on;")
594606595607 // run migrations
596596- runMigration(db, "add-spindle-to-repos", func(tx *sql.Tx) error {
608608+ runMigration(conn, "add-spindle-to-repos", func(tx *sql.Tx) error {
597609 tx.Exec(`
598610 alter table repos add column spindle text;
599611 `)
600612 return nil
601613 })
602614615615+ // drop all knot secrets, add unique constraint to knots
616616+ //
617617+ // knots will henceforth use service auth for signed requests
618618+ runMigration(conn, "no-more-secrets", func(tx *sql.Tx) error {
619619+ _, err := tx.Exec(`
620620+ create table registrations_new (
621621+ id integer primary key autoincrement,
622622+ domain text not null,
623623+ did text not null,
624624+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
625625+ registered text,
626626+ read_only integer not null default 0,
627627+ unique(domain, did)
628628+ );
629629+630630+ insert into registrations_new (id, domain, did, created, registered, read_only)
631631+ select id, domain, did, created, registered, 1 from registrations
632632+ where registered is not null;
633633+634634+ drop table registrations;
635635+ alter table registrations_new rename to registrations;
636636+ `)
637637+ return err
638638+ })
639639+603640 // recreate and add rkey + created columns with default constraint
604604- runMigration(db, "rework-collaborators-table", func(tx *sql.Tx) error {
641641+ runMigration(conn, "rework-collaborators-table", func(tx *sql.Tx) error {
605642 // create new table
606643 // - repo_at instead of repo integer
607644 // - rkey field
···655692 return err
656693 })
657694695695+ runMigration(conn, "add-rkey-to-issues", func(tx *sql.Tx) error {
696696+ _, err := tx.Exec(`
697697+ alter table issues add column rkey text not null default '';
698698+699699+ -- get last url section from issue_at and save to rkey column
700700+ update issues
701701+ set rkey = replace(issue_at, rtrim(issue_at, replace(issue_at, '/', '')), '');
702702+ `)
703703+ return err
704704+ })
705705+706706+ // repurpose the read-only column to "needs-upgrade"
707707+ runMigration(conn, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error {
708708+ _, err := tx.Exec(`
709709+ alter table registrations rename column read_only to needs_upgrade;
710710+ `)
711711+ return err
712712+ })
713713+714714+ // require all knots to upgrade after the release of total xrpc
715715+ runMigration(conn, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error {
716716+ _, err := tx.Exec(`
717717+ update registrations set needs_upgrade = 1;
718718+ `)
719719+ return err
720720+ })
721721+722722+ // require all knots to upgrade after the release of total xrpc
723723+ runMigration(conn, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error {
724724+ _, err := tx.Exec(`
725725+ alter table spindles add column needs_upgrade integer not null default 0;
726726+ `)
727727+ if err != nil {
728728+ return err
729729+ }
730730+731731+ _, err = tx.Exec(`
732732+ update spindles set needs_upgrade = 1;
733733+ `)
734734+ return err
735735+ })
736736+737737+ // remove issue_at from issues and replace with generated column
738738+ //
739739+ // this requires a full table recreation because stored columns
740740+ // cannot be added via alter
741741+ //
742742+ // couple other changes:
743743+ // - columns renamed to be more consistent
744744+ // - adds edited and deleted fields
745745+ //
746746+ // disable foreign-keys for the next migration
747747+ conn.ExecContext(ctx, "pragma foreign_keys = off;")
748748+ runMigration(conn, "remove-issue-at-from-issues", func(tx *sql.Tx) error {
749749+ _, err := tx.Exec(`
750750+ create table if not exists issues_new (
751751+ -- identifiers
752752+ id integer primary key autoincrement,
753753+ did text not null,
754754+ rkey text not null,
755755+ at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo.issue' || '/' || rkey) stored,
756756+757757+ -- at identifiers
758758+ repo_at text not null,
759759+760760+ -- content
761761+ issue_id integer not null,
762762+ title text not null,
763763+ body text not null,
764764+ open integer not null default 1,
765765+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
766766+ edited text, -- timestamp
767767+ deleted text, -- timestamp
768768+769769+ unique(did, rkey),
770770+ unique(repo_at, issue_id),
771771+ unique(at_uri),
772772+ foreign key (repo_at) references repos(at_uri) on delete cascade
773773+ );
774774+ `)
775775+ if err != nil {
776776+ return err
777777+ }
778778+779779+ // transfer data
780780+ _, err = tx.Exec(`
781781+ insert into issues_new (id, did, rkey, repo_at, issue_id, title, body, open, created)
782782+ select
783783+ i.id,
784784+ i.owner_did,
785785+ i.rkey,
786786+ i.repo_at,
787787+ i.issue_id,
788788+ i.title,
789789+ i.body,
790790+ i.open,
791791+ i.created
792792+ from issues i;
793793+ `)
794794+ if err != nil {
795795+ return err
796796+ }
797797+798798+ // drop old table
799799+ _, err = tx.Exec(`drop table issues`)
800800+ if err != nil {
801801+ return err
802802+ }
803803+804804+ // rename new table
805805+ _, err = tx.Exec(`alter table issues_new rename to issues`)
806806+ return err
807807+ })
808808+ conn.ExecContext(ctx, "pragma foreign_keys = on;")
809809+810810+ // - renames the comments table to 'issue_comments'
811811+ // - rework issue comments to update constraints:
812812+ // * unique(did, rkey)
813813+ // * remove comment-id and just use the global ID
814814+ // * foreign key (repo_at, issue_id)
815815+ // - new columns
816816+ // * column "reply_to" which can be any other comment
817817+ // * column "at-uri" which is a generated column
818818+ runMigration(conn, "rework-issue-comments", func(tx *sql.Tx) error {
819819+ _, err := tx.Exec(`
820820+ create table if not exists issue_comments (
821821+ -- identifiers
822822+ id integer primary key autoincrement,
823823+ did text not null,
824824+ rkey text,
825825+ at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo.issue.comment' || '/' || rkey) stored,
826826+827827+ -- at identifiers
828828+ issue_at text not null,
829829+ reply_to text, -- at_uri of parent comment
830830+831831+ -- content
832832+ body text not null,
833833+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
834834+ edited text,
835835+ deleted text,
836836+837837+ -- constraints
838838+ unique(did, rkey),
839839+ unique(at_uri),
840840+ foreign key (issue_at) references issues(at_uri) on delete cascade
841841+ );
842842+ `)
843843+ if err != nil {
844844+ return err
845845+ }
846846+847847+ // transfer data
848848+ _, err = tx.Exec(`
849849+ insert into issue_comments (id, did, rkey, issue_at, body, created, edited, deleted)
850850+ select
851851+ c.id,
852852+ c.owner_did,
853853+ c.rkey,
854854+ i.at_uri, -- get at_uri from issues table
855855+ c.body,
856856+ c.created,
857857+ c.edited,
858858+ c.deleted
859859+ from comments c
860860+ join issues i on c.repo_at = i.repo_at and c.issue_id = i.issue_id;
861861+ `)
862862+ if err != nil {
863863+ return err
864864+ }
865865+866866+ // drop old table
867867+ _, err = tx.Exec(`drop table comments`)
868868+ return err
869869+ })
870870+658871 return &DB{db}, nil
659872}
660873661874type migrationFn = func(*sql.Tx) error
662875663663-func runMigration(d *sql.DB, name string, migrationFn migrationFn) error {
664664- tx, err := d.Begin()
876876+func runMigration(c *sql.Conn, name string, migrationFn migrationFn) error {
877877+ tx, err := c.BeginTx(context.Background(), nil)
665878 if err != nil {
666879 return err
667880 }
···699912 }
700913701914 return nil
915915+}
916916+917917+func (d *DB) Close() error {
918918+ return d.DB.Close()
702919}
703920704921type filter struct {
+145-42
appview/db/follow.go
···11package db
2233import (
44+ "fmt"
45 "log"
66+ "strings"
57 "time"
68)
79···5355 return err
5456}
55575656-func GetFollowerFollowingCount(e Execer, did string) (int, int, error) {
5757- followers, following := 0, 0
5858+type FollowStats struct {
5959+ Followers int64
6060+ Following int64
6161+}
6262+6363+func GetFollowerFollowingCount(e Execer, did string) (FollowStats, error) {
6464+ var followers, following int64
5865 err := e.QueryRow(
5959- `SELECT
6666+ `SELECT
6067 COUNT(CASE WHEN subject_did = ? THEN 1 END) AS followers,
6168 COUNT(CASE WHEN user_did = ? THEN 1 END) AS following
6269 FROM follows;`, did, did).Scan(&followers, &following)
6370 if err != nil {
6464- return 0, 0, err
7171+ return FollowStats{}, err
6572 }
6666- return followers, following, nil
7373+ return FollowStats{
7474+ Followers: followers,
7575+ Following: following,
7676+ }, nil
6777}
68786969-type FollowStatus int
7979+func GetFollowerFollowingCounts(e Execer, dids []string) (map[string]FollowStats, error) {
8080+ if len(dids) == 0 {
8181+ return nil, nil
8282+ }
70837171-const (
7272- IsNotFollowing FollowStatus = iota
7373- IsFollowing
7474- IsSelf
7575-)
8484+ placeholders := make([]string, len(dids))
8585+ for i := range placeholders {
8686+ placeholders[i] = "?"
8787+ }
8888+ placeholderStr := strings.Join(placeholders, ",")
76897777-func (s FollowStatus) String() string {
7878- switch s {
7979- case IsNotFollowing:
8080- return "IsNotFollowing"
8181- case IsFollowing:
8282- return "IsFollowing"
8383- case IsSelf:
8484- return "IsSelf"
8585- default:
8686- return "IsNotFollowing"
9090+ args := make([]any, len(dids)*2)
9191+ for i, did := range dids {
9292+ args[i] = did
9393+ args[i+len(dids)] = did
8794 }
8888-}
9595+9696+ query := fmt.Sprintf(`
9797+ select
9898+ coalesce(f.did, g.did) as did,
9999+ coalesce(f.followers, 0) as followers,
100100+ coalesce(g.following, 0) as following
101101+ from (
102102+ select subject_did as did, count(*) as followers
103103+ from follows
104104+ where subject_did in (%s)
105105+ group by subject_did
106106+ ) f
107107+ full outer join (
108108+ select user_did as did, count(*) as following
109109+ from follows
110110+ where user_did in (%s)
111111+ group by user_did
112112+ ) g on f.did = g.did`,
113113+ placeholderStr, placeholderStr)
114114+115115+ result := make(map[string]FollowStats)
116116+117117+ rows, err := e.Query(query, args...)
118118+ if err != nil {
119119+ return nil, err
120120+ }
121121+ defer rows.Close()
122122+123123+ for rows.Next() {
124124+ var did string
125125+ var followers, following int64
126126+ if err := rows.Scan(&did, &followers, &following); err != nil {
127127+ return nil, err
128128+ }
129129+ result[did] = FollowStats{
130130+ Followers: followers,
131131+ Following: following,
132132+ }
133133+ }
891349090-func GetFollowStatus(e Execer, userDid, subjectDid string) FollowStatus {
9191- if userDid == subjectDid {
9292- return IsSelf
9393- } else if _, err := GetFollow(e, userDid, subjectDid); err != nil {
9494- return IsNotFollowing
9595- } else {
9696- return IsFollowing
135135+ for _, did := range dids {
136136+ if _, exists := result[did]; !exists {
137137+ result[did] = FollowStats{
138138+ Followers: 0,
139139+ Following: 0,
140140+ }
141141+ }
97142 }
143143+144144+ return result, nil
98145}
99146100100-func GetAllFollows(e Execer, limit int) ([]Follow, error) {
147147+func GetFollows(e Execer, limit int, filters ...filter) ([]Follow, error) {
101148 var follows []Follow
102149103103- rows, err := e.Query(`
104104- select user_did, subject_did, followed_at, rkey
150150+ var conditions []string
151151+ var args []any
152152+ for _, filter := range filters {
153153+ conditions = append(conditions, filter.Condition())
154154+ args = append(args, filter.Arg()...)
155155+ }
156156+157157+ whereClause := ""
158158+ if conditions != nil {
159159+ whereClause = " where " + strings.Join(conditions, " and ")
160160+ }
161161+ limitClause := ""
162162+ if limit > 0 {
163163+ limitClause = " limit ?"
164164+ args = append(args, limit)
165165+ }
166166+167167+ query := fmt.Sprintf(
168168+ `select user_did, subject_did, followed_at, rkey
105169 from follows
170170+ %s
106171 order by followed_at desc
107107- limit ?`, limit,
108108- )
172172+ %s
173173+ `, whereClause, limitClause)
174174+175175+ rows, err := e.Query(query, args...)
109176 if err != nil {
110177 return nil, err
111178 }
112112- defer rows.Close()
113113-114179 for rows.Next() {
115180 var follow Follow
116181 var followedAt string
117117- if err := rows.Scan(&follow.UserDid, &follow.SubjectDid, &followedAt, &follow.Rkey); err != nil {
182182+ err := rows.Scan(
183183+ &follow.UserDid,
184184+ &follow.SubjectDid,
185185+ &followedAt,
186186+ &follow.Rkey,
187187+ )
188188+ if err != nil {
118189 return nil, err
119190 }
120120-121191 followedAtTime, err := time.Parse(time.RFC3339, followedAt)
122192 if err != nil {
123193 log.Println("unable to determine followed at time")
···125195 } else {
126196 follow.FollowedAt = followedAtTime
127197 }
128128-129198 follows = append(follows, follow)
130199 }
200200+ return follows, nil
201201+}
202202+203203+func GetFollowers(e Execer, did string) ([]Follow, error) {
204204+ return GetFollows(e, 0, FilterEq("subject_did", did))
205205+}
131206132132- if err := rows.Err(); err != nil {
133133- return nil, err
207207+func GetFollowing(e Execer, did string) ([]Follow, error) {
208208+ return GetFollows(e, 0, FilterEq("user_did", did))
209209+}
210210+211211+type FollowStatus int
212212+213213+const (
214214+ IsNotFollowing FollowStatus = iota
215215+ IsFollowing
216216+ IsSelf
217217+)
218218+219219+func (s FollowStatus) String() string {
220220+ switch s {
221221+ case IsNotFollowing:
222222+ return "IsNotFollowing"
223223+ case IsFollowing:
224224+ return "IsFollowing"
225225+ case IsSelf:
226226+ return "IsSelf"
227227+ default:
228228+ return "IsNotFollowing"
134229 }
230230+}
135231136136- return follows, nil
232232+func GetFollowStatus(e Execer, userDid, subjectDid string) FollowStatus {
233233+ if userDid == subjectDid {
234234+ return IsSelf
235235+ } else if _, err := GetFollow(e, userDid, subjectDid); err != nil {
236236+ return IsNotFollowing
237237+ } else {
238238+ return IsFollowing
239239+ }
137240}
+459-311
appview/db/issues.go
···2233import (
44 "database/sql"
55+ "fmt"
66+ "maps"
77+ "slices"
88+ "sort"
99+ "strings"
510 "time"
611712 "github.com/bluesky-social/indigo/atproto/syntax"
1313+ "tangled.sh/tangled.sh/core/api/tangled"
814 "tangled.sh/tangled.sh/core/appview/pagination"
915)
10161117type Issue struct {
1212- ID int64
1313- RepoAt syntax.ATURI
1414- OwnerDid string
1515- IssueId int
1616- IssueAt string
1717- Created time.Time
1818- Title string
1919- Body string
2020- Open bool
1818+ Id int64
1919+ Did string
2020+ Rkey string
2121+ RepoAt syntax.ATURI
2222+ IssueId int
2323+ Created time.Time
2424+ Edited *time.Time
2525+ Deleted *time.Time
2626+ Title string
2727+ Body string
2828+ Open bool
21292230 // optionally, populate this when querying for reverse mappings
2331 // like comment counts, parent repo etc.
2424- Metadata *IssueMetadata
3232+ Comments []IssueComment
3333+ Repo *Repo
2534}
26352727-type IssueMetadata struct {
2828- CommentCount int
2929- Repo *Repo
3030- // labels, assignee etc.
3636+func (i *Issue) AtUri() syntax.ATURI {
3737+ return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.Did, tangled.RepoIssueNSID, i.Rkey))
3838+}
3939+4040+func (i *Issue) AsRecord() tangled.RepoIssue {
4141+ return tangled.RepoIssue{
4242+ Repo: i.RepoAt.String(),
4343+ Title: i.Title,
4444+ Body: &i.Body,
4545+ CreatedAt: i.Created.Format(time.RFC3339),
4646+ }
4747+}
4848+4949+func (i *Issue) State() string {
5050+ if i.Open {
5151+ return "open"
5252+ }
5353+ return "closed"
3154}
32553333-type Comment struct {
3434- OwnerDid string
3535- RepoAt syntax.ATURI
3636- Rkey string
3737- Issue int
3838- CommentId int
3939- Body string
4040- Created *time.Time
4141- Deleted *time.Time
4242- Edited *time.Time
5656+type CommentListItem struct {
5757+ Self *IssueComment
5858+ Replies []*IssueComment
4359}
44604545-func NewIssue(tx *sql.Tx, issue *Issue) error {
4646- defer tx.Rollback()
6161+func (i *Issue) CommentList() []CommentListItem {
6262+ // Create a map to quickly find comments by their aturi
6363+ toplevel := make(map[string]*CommentListItem)
6464+ var replies []*IssueComment
47654848- _, err := tx.Exec(`
4949- insert or ignore into repo_issue_seqs (repo_at, next_issue_id)
5050- values (?, 1)
5151- `, issue.RepoAt)
5252- if err != nil {
5353- return err
6666+ // collect top level comments into the map
6767+ for _, comment := range i.Comments {
6868+ if comment.IsTopLevel() {
6969+ toplevel[comment.AtUri().String()] = &CommentListItem{
7070+ Self: &comment,
7171+ }
7272+ } else {
7373+ replies = append(replies, &comment)
7474+ }
7575+ }
7676+7777+ for _, r := range replies {
7878+ parentAt := *r.ReplyTo
7979+ if parent, exists := toplevel[parentAt]; exists {
8080+ parent.Replies = append(parent.Replies, r)
8181+ }
8282+ }
8383+8484+ var listing []CommentListItem
8585+ for _, v := range toplevel {
8686+ listing = append(listing, *v)
5487 }
55885656- var nextId int
5757- err = tx.QueryRow(`
5858- update repo_issue_seqs
5959- set next_issue_id = next_issue_id + 1
6060- where repo_at = ?
6161- returning next_issue_id - 1
6262- `, issue.RepoAt).Scan(&nextId)
6363- if err != nil {
6464- return err
8989+ // sort everything
9090+ sortFunc := func(a, b *IssueComment) bool {
9191+ return a.Created.Before(b.Created)
9292+ }
9393+ sort.Slice(listing, func(i, j int) bool {
9494+ return sortFunc(listing[i].Self, listing[j].Self)
9595+ })
9696+ for _, r := range listing {
9797+ sort.Slice(r.Replies, func(i, j int) bool {
9898+ return sortFunc(r.Replies[i], r.Replies[j])
9999+ })
65100 }
661016767- issue.IssueId = nextId
102102+ return listing
103103+}
681046969- res, err := tx.Exec(`
7070- insert into issues (repo_at, owner_did, issue_id, title, body)
7171- values (?, ?, ?, ?, ?)
7272- `, issue.RepoAt, issue.OwnerDid, issue.IssueId, issue.Title, issue.Body)
105105+func IssueFromRecord(did, rkey string, record tangled.RepoIssue) Issue {
106106+ created, err := time.Parse(time.RFC3339, record.CreatedAt)
73107 if err != nil {
7474- return err
108108+ created = time.Now()
75109 }
761107777- lastID, err := res.LastInsertId()
7878- if err != nil {
7979- return err
111111+ body := ""
112112+ if record.Body != nil {
113113+ body = *record.Body
80114 }
8181- issue.ID = lastID
821158383- if err := tx.Commit(); err != nil {
8484- return err
116116+ return Issue{
117117+ RepoAt: syntax.ATURI(record.Repo),
118118+ Did: did,
119119+ Rkey: rkey,
120120+ Created: created,
121121+ Title: record.Title,
122122+ Body: body,
123123+ Open: true, // new issues are open by default
85124 }
125125+}
861268787- return nil
127127+type IssueComment struct {
128128+ Id int64
129129+ Did string
130130+ Rkey string
131131+ IssueAt string
132132+ ReplyTo *string
133133+ Body string
134134+ Created time.Time
135135+ Edited *time.Time
136136+ Deleted *time.Time
88137}
891389090-func SetIssueAt(e Execer, repoAt syntax.ATURI, issueId int, issueAt string) error {
9191- _, err := e.Exec(`update issues set issue_at = ? where repo_at = ? and issue_id = ?`, issueAt, repoAt, issueId)
9292- return err
139139+func (i *IssueComment) AtUri() syntax.ATURI {
140140+ return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.Did, tangled.RepoIssueCommentNSID, i.Rkey))
93141}
941429595-func GetIssueAt(e Execer, repoAt syntax.ATURI, issueId int) (string, error) {
9696- var issueAt string
9797- err := e.QueryRow(`select issue_at from issues where repo_at = ? and issue_id = ?`, repoAt, issueId).Scan(&issueAt)
9898- return issueAt, err
143143+func (i *IssueComment) AsRecord() tangled.RepoIssueComment {
144144+ return tangled.RepoIssueComment{
145145+ Body: i.Body,
146146+ Issue: i.IssueAt,
147147+ CreatedAt: i.Created.Format(time.RFC3339),
148148+ ReplyTo: i.ReplyTo,
149149+ }
99150}
100151101101-func GetIssueOwnerDid(e Execer, repoAt syntax.ATURI, issueId int) (string, error) {
102102- var ownerDid string
103103- err := e.QueryRow(`select owner_did from issues where repo_at = ? and issue_id = ?`, repoAt, issueId).Scan(&ownerDid)
104104- return ownerDid, err
152152+func (i *IssueComment) IsTopLevel() bool {
153153+ return i.ReplyTo == nil
105154}
106155107107-func GetIssues(e Execer, repoAt syntax.ATURI, isOpen bool, page pagination.Page) ([]Issue, error) {
108108- var issues []Issue
109109- openValue := 0
110110- if isOpen {
111111- openValue = 1
156156+func IssueCommentFromRecord(e Execer, did, rkey string, record tangled.RepoIssueComment) (*IssueComment, error) {
157157+ created, err := time.Parse(time.RFC3339, record.CreatedAt)
158158+ if err != nil {
159159+ created = time.Now()
112160 }
113161114114- rows, err := e.Query(
115115- `
116116- with numbered_issue as (
117117- select
118118- i.id,
119119- i.owner_did,
120120- i.issue_id,
121121- i.created,
122122- i.title,
123123- i.body,
124124- i.open,
125125- count(c.id) as comment_count,
126126- row_number() over (order by i.created desc) as row_num
127127- from
128128- issues i
129129- left join
130130- comments c on i.repo_at = c.repo_at and i.issue_id = c.issue_id
131131- where
132132- i.repo_at = ? and i.open = ?
133133- group by
134134- i.id, i.owner_did, i.issue_id, i.created, i.title, i.body, i.open
135135- )
136136- select
137137- id,
138138- owner_did,
139139- issue_id,
140140- created,
141141- title,
142142- body,
143143- open,
144144- comment_count
145145- from
146146- numbered_issue
147147- where
148148- row_num between ? and ?`,
149149- repoAt, openValue, page.Offset+1, page.Offset+page.Limit)
150150- if err != nil {
162162+ ownerDid := did
163163+164164+ if _, err = syntax.ParseATURI(record.Issue); err != nil {
151165 return nil, err
152166 }
153153- defer rows.Close()
154167155155- for rows.Next() {
156156- var issue Issue
157157- var createdAt string
158158- var metadata IssueMetadata
159159- err := rows.Scan(&issue.ID, &issue.OwnerDid, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &metadata.CommentCount)
160160- if err != nil {
161161- return nil, err
162162- }
168168+ comment := IssueComment{
169169+ Did: ownerDid,
170170+ Rkey: rkey,
171171+ Body: record.Body,
172172+ IssueAt: record.Issue,
173173+ ReplyTo: record.ReplyTo,
174174+ Created: created,
175175+ }
163176164164- createdTime, err := time.Parse(time.RFC3339, createdAt)
165165- if err != nil {
166166- return nil, err
177177+ return &comment, nil
178178+}
179179+180180+func PutIssue(tx *sql.Tx, issue *Issue) error {
181181+ // ensure sequence exists
182182+ _, err := tx.Exec(`
183183+ insert or ignore into repo_issue_seqs (repo_at, next_issue_id)
184184+ values (?, 1)
185185+ `, issue.RepoAt)
186186+ if err != nil {
187187+ return err
188188+ }
189189+190190+ issues, err := GetIssues(
191191+ tx,
192192+ FilterEq("did", issue.Did),
193193+ FilterEq("rkey", issue.Rkey),
194194+ )
195195+ switch {
196196+ case err != nil:
197197+ return err
198198+ case len(issues) == 0:
199199+ return createNewIssue(tx, issue)
200200+ case len(issues) != 1: // should be unreachable
201201+ return fmt.Errorf("invalid number of issues returned: %d", len(issues))
202202+ default:
203203+ // if content is identical, do not edit
204204+ existingIssue := issues[0]
205205+ if existingIssue.Title == issue.Title && existingIssue.Body == issue.Body {
206206+ return nil
167207 }
168168- issue.Created = createdTime
169169- issue.Metadata = &metadata
170208171171- issues = append(issues, issue)
209209+ issue.Id = existingIssue.Id
210210+ issue.IssueId = existingIssue.IssueId
211211+ return updateIssue(tx, issue)
172212 }
213213+}
173214174174- if err := rows.Err(); err != nil {
175175- return nil, err
215215+func createNewIssue(tx *sql.Tx, issue *Issue) error {
216216+ // get next issue_id
217217+ var newIssueId int
218218+ err := tx.QueryRow(`
219219+ update repo_issue_seqs
220220+ set next_issue_id = next_issue_id + 1
221221+ where repo_at = ?
222222+ returning next_issue_id - 1
223223+ `, issue.RepoAt).Scan(&newIssueId)
224224+ if err != nil {
225225+ return err
176226 }
177227178178- return issues, nil
228228+ // insert new issue
229229+ row := tx.QueryRow(`
230230+ insert into issues (repo_at, did, rkey, issue_id, title, body)
231231+ values (?, ?, ?, ?, ?, ?)
232232+ returning rowid, issue_id
233233+ `, issue.RepoAt, issue.Did, issue.Rkey, newIssueId, issue.Title, issue.Body)
234234+235235+ return row.Scan(&issue.Id, &issue.IssueId)
179236}
180237181181-// timeframe here is directly passed into the sql query filter, and any
182182-// timeframe in the past should be negative; e.g.: "-3 months"
183183-func GetIssuesByOwnerDid(e Execer, ownerDid string, timeframe string) ([]Issue, error) {
184184- var issues []Issue
238238+func updateIssue(tx *sql.Tx, issue *Issue) error {
239239+ // update existing issue
240240+ _, err := tx.Exec(`
241241+ update issues
242242+ set title = ?, body = ?, edited = ?
243243+ where did = ? and rkey = ?
244244+ `, issue.Title, issue.Body, time.Now().Format(time.RFC3339), issue.Did, issue.Rkey)
245245+ return err
246246+}
185247186186- rows, err := e.Query(
187187- `select
188188- i.id,
189189- i.owner_did,
190190- i.repo_at,
191191- i.issue_id,
192192- i.created,
193193- i.title,
194194- i.body,
195195- i.open,
196196- r.did,
197197- r.name,
198198- r.knot,
199199- r.rkey,
200200- r.created
201201- from
202202- issues i
203203- join
204204- repos r on i.repo_at = r.at_uri
205205- where
206206- i.owner_did = ? and i.created >= date ('now', ?)
207207- order by
208208- i.created desc`,
209209- ownerDid, timeframe)
248248+func GetIssuesPaginated(e Execer, page pagination.Page, filters ...filter) ([]Issue, error) {
249249+ issueMap := make(map[string]*Issue) // at-uri -> issue
250250+251251+ var conditions []string
252252+ var args []any
253253+254254+ for _, filter := range filters {
255255+ conditions = append(conditions, filter.Condition())
256256+ args = append(args, filter.Arg()...)
257257+ }
258258+259259+ whereClause := ""
260260+ if conditions != nil {
261261+ whereClause = " where " + strings.Join(conditions, " and ")
262262+ }
263263+264264+ pLower := FilterGte("row_num", page.Offset+1)
265265+ pUpper := FilterLte("row_num", page.Offset+page.Limit)
266266+267267+ args = append(args, pLower.Arg()...)
268268+ args = append(args, pUpper.Arg()...)
269269+ pagination := " where " + pLower.Condition() + " and " + pUpper.Condition()
270270+271271+ query := fmt.Sprintf(
272272+ `
273273+ select * from (
274274+ select
275275+ id,
276276+ did,
277277+ rkey,
278278+ repo_at,
279279+ issue_id,
280280+ title,
281281+ body,
282282+ open,
283283+ created,
284284+ edited,
285285+ deleted,
286286+ row_number() over (order by created desc) as row_num
287287+ from
288288+ issues
289289+ %s
290290+ ) ranked_issues
291291+ %s
292292+ `,
293293+ whereClause,
294294+ pagination,
295295+ )
296296+297297+ rows, err := e.Query(query, args...)
210298 if err != nil {
211211- return nil, err
299299+ return nil, fmt.Errorf("failed to query issues table: %w", err)
212300 }
213301 defer rows.Close()
214302215303 for rows.Next() {
216304 var issue Issue
217217- var issueCreatedAt, repoCreatedAt string
218218- var repo Repo
305305+ var createdAt string
306306+ var editedAt, deletedAt sql.Null[string]
307307+ var rowNum int64
219308 err := rows.Scan(
220220- &issue.ID,
221221- &issue.OwnerDid,
309309+ &issue.Id,
310310+ &issue.Did,
311311+ &issue.Rkey,
222312 &issue.RepoAt,
223313 &issue.IssueId,
224224- &issueCreatedAt,
225314 &issue.Title,
226315 &issue.Body,
227316 &issue.Open,
228228- &repo.Did,
229229- &repo.Name,
230230- &repo.Knot,
231231- &repo.Rkey,
232232- &repoCreatedAt,
317317+ &createdAt,
318318+ &editedAt,
319319+ &deletedAt,
320320+ &rowNum,
233321 )
234322 if err != nil {
235235- return nil, err
323323+ return nil, fmt.Errorf("failed to scan issue: %w", err)
236324 }
237325238238- issueCreatedTime, err := time.Parse(time.RFC3339, issueCreatedAt)
239239- if err != nil {
240240- return nil, err
326326+ if t, err := time.Parse(time.RFC3339, createdAt); err == nil {
327327+ issue.Created = t
241328 }
242242- issue.Created = issueCreatedTime
243329244244- repoCreatedTime, err := time.Parse(time.RFC3339, repoCreatedAt)
245245- if err != nil {
246246- return nil, err
330330+ if editedAt.Valid {
331331+ if t, err := time.Parse(time.RFC3339, editedAt.V); err == nil {
332332+ issue.Edited = &t
333333+ }
334334+ }
335335+336336+ if deletedAt.Valid {
337337+ if t, err := time.Parse(time.RFC3339, deletedAt.V); err == nil {
338338+ issue.Deleted = &t
339339+ }
247340 }
248248- repo.Created = repoCreatedTime
249341250250- issue.Metadata = &IssueMetadata{
251251- Repo: &repo,
342342+ atUri := issue.AtUri().String()
343343+ issueMap[atUri] = &issue
344344+ }
345345+346346+ // collect reverse repos
347347+ repoAts := make([]string, 0, len(issueMap)) // or just []string{}
348348+ for _, issue := range issueMap {
349349+ repoAts = append(repoAts, string(issue.RepoAt))
350350+ }
351351+352352+ repos, err := GetRepos(e, 0, FilterIn("at_uri", repoAts))
353353+ if err != nil {
354354+ return nil, fmt.Errorf("failed to build repo mappings: %w", err)
355355+ }
356356+357357+ repoMap := make(map[string]*Repo)
358358+ for i := range repos {
359359+ repoMap[string(repos[i].RepoAt())] = &repos[i]
360360+ }
361361+362362+ for issueAt, i := range issueMap {
363363+ if r, ok := repoMap[string(i.RepoAt)]; ok {
364364+ i.Repo = r
365365+ } else {
366366+ // do not show up the issue if the repo is deleted
367367+ // TODO: foreign key where?
368368+ delete(issueMap, issueAt)
252369 }
370370+ }
253371254254- issues = append(issues, issue)
372372+ // collect comments
373373+ issueAts := slices.Collect(maps.Keys(issueMap))
374374+ comments, err := GetIssueComments(e, FilterIn("issue_at", issueAts))
375375+ if err != nil {
376376+ return nil, fmt.Errorf("failed to query comments: %w", err)
255377 }
256378257257- if err := rows.Err(); err != nil {
258258- return nil, err
379379+ for i := range comments {
380380+ issueAt := comments[i].IssueAt
381381+ if issue, ok := issueMap[issueAt]; ok {
382382+ issue.Comments = append(issue.Comments, comments[i])
383383+ }
384384+ }
385385+386386+ var issues []Issue
387387+ for _, i := range issueMap {
388388+ issues = append(issues, *i)
259389 }
260390391391+ sort.Slice(issues, func(i, j int) bool {
392392+ return issues[i].Created.After(issues[j].Created)
393393+ })
394394+261395 return issues, nil
396396+}
397397+398398+func GetIssues(e Execer, filters ...filter) ([]Issue, error) {
399399+ return GetIssuesPaginated(e, pagination.FirstPage(), filters...)
262400}
263401264402func GetIssue(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, error) {
265265- query := `select id, owner_did, created, title, body, open from issues where repo_at = ? and issue_id = ?`
403403+ query := `select id, owner_did, rkey, created, title, body, open from issues where repo_at = ? and issue_id = ?`
266404 row := e.QueryRow(query, repoAt, issueId)
267405268406 var issue Issue
269407 var createdAt string
270270- err := row.Scan(&issue.ID, &issue.OwnerDid, &createdAt, &issue.Title, &issue.Body, &issue.Open)
408408+ err := row.Scan(&issue.Id, &issue.Did, &issue.Rkey, &createdAt, &issue.Title, &issue.Body, &issue.Open)
271409 if err != nil {
272410 return nil, err
273411 }
···281419 return &issue, nil
282420}
283421284284-func GetIssueWithComments(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, []Comment, error) {
285285- query := `select id, owner_did, issue_id, created, title, body, open, issue_at from issues where repo_at = ? and issue_id = ?`
286286- row := e.QueryRow(query, repoAt, issueId)
287287-288288- var issue Issue
289289- var createdAt string
290290- err := row.Scan(&issue.ID, &issue.OwnerDid, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &issue.IssueAt)
422422+func AddIssueComment(e Execer, c IssueComment) (int64, error) {
423423+ result, err := e.Exec(
424424+ `insert into issue_comments (
425425+ did,
426426+ rkey,
427427+ issue_at,
428428+ body,
429429+ reply_to,
430430+ created,
431431+ edited
432432+ )
433433+ values (?, ?, ?, ?, ?, ?, null)
434434+ on conflict(did, rkey) do update set
435435+ issue_at = excluded.issue_at,
436436+ body = excluded.body,
437437+ edited = case
438438+ when
439439+ issue_comments.issue_at != excluded.issue_at
440440+ or issue_comments.body != excluded.body
441441+ or issue_comments.reply_to != excluded.reply_to
442442+ then ?
443443+ else issue_comments.edited
444444+ end`,
445445+ c.Did,
446446+ c.Rkey,
447447+ c.IssueAt,
448448+ c.Body,
449449+ c.ReplyTo,
450450+ c.Created.Format(time.RFC3339),
451451+ time.Now().Format(time.RFC3339),
452452+ )
291453 if err != nil {
292292- return nil, nil, err
454454+ return 0, err
293455 }
294456295295- createdTime, err := time.Parse(time.RFC3339, createdAt)
457457+ id, err := result.LastInsertId()
296458 if err != nil {
297297- return nil, nil, err
459459+ return 0, err
298460 }
299299- issue.Created = createdTime
300461301301- comments, err := GetComments(e, repoAt, issueId)
302302- if err != nil {
303303- return nil, nil, err
462462+ return id, nil
463463+}
464464+465465+func DeleteIssueComments(e Execer, filters ...filter) error {
466466+ var conditions []string
467467+ var args []any
468468+ for _, filter := range filters {
469469+ conditions = append(conditions, filter.Condition())
470470+ args = append(args, filter.Arg()...)
304471 }
305472306306- return &issue, comments, nil
307307-}
473473+ whereClause := ""
474474+ if conditions != nil {
475475+ whereClause = " where " + strings.Join(conditions, " and ")
476476+ }
308477309309-func NewIssueComment(e Execer, comment *Comment) error {
310310- query := `insert into comments (owner_did, repo_at, rkey, issue_id, comment_id, body) values (?, ?, ?, ?, ?, ?)`
311311- _, err := e.Exec(
312312- query,
313313- comment.OwnerDid,
314314- comment.RepoAt,
315315- comment.Rkey,
316316- comment.Issue,
317317- comment.CommentId,
318318- comment.Body,
319319- )
478478+ query := fmt.Sprintf(`update issue_comments set body = "", deleted = strftime('%%Y-%%m-%%dT%%H:%%M:%%SZ', 'now') %s`, whereClause)
479479+480480+ _, err := e.Exec(query, args...)
320481 return err
321482}
322483323323-func GetComments(e Execer, repoAt syntax.ATURI, issueId int) ([]Comment, error) {
324324- var comments []Comment
484484+func GetIssueComments(e Execer, filters ...filter) ([]IssueComment, error) {
485485+ var comments []IssueComment
486486+487487+ var conditions []string
488488+ var args []any
489489+ for _, filter := range filters {
490490+ conditions = append(conditions, filter.Condition())
491491+ args = append(args, filter.Arg()...)
492492+ }
325493326326- rows, err := e.Query(`
494494+ whereClause := ""
495495+ if conditions != nil {
496496+ whereClause = " where " + strings.Join(conditions, " and ")
497497+ }
498498+499499+ query := fmt.Sprintf(`
327500 select
328328- owner_did,
329329- issue_id,
330330- comment_id,
501501+ id,
502502+ did,
331503 rkey,
504504+ issue_at,
505505+ reply_to,
332506 body,
333507 created,
334508 edited,
335509 deleted
336510 from
337337- comments
338338- where
339339- repo_at = ? and issue_id = ?
340340- order by
341341- created asc`,
342342- repoAt,
343343- issueId,
344344- )
345345- if err == sql.ErrNoRows {
346346- return []Comment{}, nil
347347- }
511511+ issue_comments
512512+ %s
513513+ `, whereClause)
514514+515515+ rows, err := e.Query(query, args...)
348516 if err != nil {
349517 return nil, err
350518 }
351351- defer rows.Close()
352519353520 for rows.Next() {
354354- var comment Comment
355355- var createdAt string
356356- var deletedAt, editedAt, rkey sql.NullString
357357- err := rows.Scan(&comment.OwnerDid, &comment.Issue, &comment.CommentId, &rkey, &comment.Body, &createdAt, &editedAt, &deletedAt)
521521+ var comment IssueComment
522522+ var created string
523523+ var rkey, edited, deleted, replyTo sql.Null[string]
524524+ err := rows.Scan(
525525+ &comment.Id,
526526+ &comment.Did,
527527+ &rkey,
528528+ &comment.IssueAt,
529529+ &replyTo,
530530+ &comment.Body,
531531+ &created,
532532+ &edited,
533533+ &deleted,
534534+ )
358535 if err != nil {
359536 return nil, err
360537 }
361538362362- createdAtTime, err := time.Parse(time.RFC3339, createdAt)
363363- if err != nil {
364364- return nil, err
539539+ // this is a remnant from old times, newer comments always have rkey
540540+ if rkey.Valid {
541541+ comment.Rkey = rkey.V
365542 }
366366- comment.Created = &createdAtTime
367543368368- if deletedAt.Valid {
369369- deletedTime, err := time.Parse(time.RFC3339, deletedAt.String)
370370- if err != nil {
371371- return nil, err
544544+ if t, err := time.Parse(time.RFC3339, created); err == nil {
545545+ comment.Created = t
546546+ }
547547+548548+ if edited.Valid {
549549+ if t, err := time.Parse(time.RFC3339, edited.V); err == nil {
550550+ comment.Edited = &t
372551 }
373373- comment.Deleted = &deletedTime
374552 }
375553376376- if editedAt.Valid {
377377- editedTime, err := time.Parse(time.RFC3339, editedAt.String)
378378- if err != nil {
379379- return nil, err
554554+ if deleted.Valid {
555555+ if t, err := time.Parse(time.RFC3339, deleted.V); err == nil {
556556+ comment.Deleted = &t
380557 }
381381- comment.Edited = &editedTime
382558 }
383559384384- if rkey.Valid {
385385- comment.Rkey = rkey.String
560560+ if replyTo.Valid {
561561+ comment.ReplyTo = &replyTo.V
386562 }
387563388564 comments = append(comments, comment)
389565 }
390566391391- if err := rows.Err(); err != nil {
567567+ if err = rows.Err(); err != nil {
392568 return nil, err
393569 }
394570395571 return comments, nil
396572}
397573398398-func GetComment(e Execer, repoAt syntax.ATURI, issueId, commentId int) (*Comment, error) {
399399- query := `
400400- select
401401- owner_did, body, rkey, created, deleted, edited
402402- from
403403- comments where repo_at = ? and issue_id = ? and comment_id = ?
404404- `
405405- row := e.QueryRow(query, repoAt, issueId, commentId)
406406-407407- var comment Comment
408408- var createdAt string
409409- var deletedAt, editedAt, rkey sql.NullString
410410- err := row.Scan(&comment.OwnerDid, &comment.Body, &rkey, &createdAt, &deletedAt, &editedAt)
411411- if err != nil {
412412- return nil, err
574574+func DeleteIssues(e Execer, filters ...filter) error {
575575+ var conditions []string
576576+ var args []any
577577+ for _, filter := range filters {
578578+ conditions = append(conditions, filter.Condition())
579579+ args = append(args, filter.Arg()...)
413580 }
414581415415- createdTime, err := time.Parse(time.RFC3339, createdAt)
416416- if err != nil {
417417- return nil, err
582582+ whereClause := ""
583583+ if conditions != nil {
584584+ whereClause = " where " + strings.Join(conditions, " and ")
418585 }
419419- comment.Created = &createdTime
420586421421- if deletedAt.Valid {
422422- deletedTime, err := time.Parse(time.RFC3339, deletedAt.String)
423423- if err != nil {
424424- return nil, err
425425- }
426426- comment.Deleted = &deletedTime
427427- }
587587+ query := fmt.Sprintf(`delete from issues %s`, whereClause)
588588+ _, err := e.Exec(query, args...)
589589+ return err
590590+}
428591429429- if editedAt.Valid {
430430- editedTime, err := time.Parse(time.RFC3339, editedAt.String)
431431- if err != nil {
432432- return nil, err
433433- }
434434- comment.Edited = &editedTime
592592+func CloseIssues(e Execer, filters ...filter) error {
593593+ var conditions []string
594594+ var args []any
595595+ for _, filter := range filters {
596596+ conditions = append(conditions, filter.Condition())
597597+ args = append(args, filter.Arg()...)
435598 }
436599437437- if rkey.Valid {
438438- comment.Rkey = rkey.String
600600+ whereClause := ""
601601+ if conditions != nil {
602602+ whereClause = " where " + strings.Join(conditions, " and ")
439603 }
440604441441- comment.RepoAt = repoAt
442442- comment.Issue = issueId
443443- comment.CommentId = commentId
444444-445445- return &comment, nil
446446-}
447447-448448-func EditComment(e Execer, repoAt syntax.ATURI, issueId, commentId int, newBody string) error {
449449- _, err := e.Exec(
450450- `
451451- update comments
452452- set body = ?,
453453- edited = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
454454- where repo_at = ? and issue_id = ? and comment_id = ?
455455- `, newBody, repoAt, issueId, commentId)
605605+ query := fmt.Sprintf(`update issues set open = 0 %s`, whereClause)
606606+ _, err := e.Exec(query, args...)
456607 return err
457608}
458609459459-func DeleteComment(e Execer, repoAt syntax.ATURI, issueId, commentId int) error {
460460- _, err := e.Exec(
461461- `
462462- update comments
463463- set body = "",
464464- deleted = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
465465- where repo_at = ? and issue_id = ? and comment_id = ?
466466- `, repoAt, issueId, commentId)
467467- return err
468468-}
610610+func ReopenIssues(e Execer, filters ...filter) error {
611611+ var conditions []string
612612+ var args []any
613613+ for _, filter := range filters {
614614+ conditions = append(conditions, filter.Condition())
615615+ args = append(args, filter.Arg()...)
616616+ }
469617470470-func CloseIssue(e Execer, repoAt syntax.ATURI, issueId int) error {
471471- _, err := e.Exec(`update issues set open = 0 where repo_at = ? and issue_id = ?`, repoAt, issueId)
472472- return err
473473-}
618618+ whereClause := ""
619619+ if conditions != nil {
620620+ whereClause = " where " + strings.Join(conditions, " and ")
621621+ }
474622475475-func ReopenIssue(e Execer, repoAt syntax.ATURI, issueId int) error {
476476- _, err := e.Exec(`update issues set open = 1 where repo_at = ? and issue_id = ?`, repoAt, issueId)
623623+ query := fmt.Sprintf(`update issues set open = 1 %s`, whereClause)
624624+ _, err := e.Exec(query, args...)
477625 return err
478626}
479627
+25-12
appview/db/profile.go
···2222 ByMonth []ByMonth
2323}
24242525+func (p *ProfileTimeline) IsEmpty() bool {
2626+ if p == nil {
2727+ return true
2828+ }
2929+3030+ for _, m := range p.ByMonth {
3131+ if !m.IsEmpty() {
3232+ return false
3333+ }
3434+ }
3535+3636+ return true
3737+}
3838+2539type ByMonth struct {
2640 RepoEvents []RepoEvent
2741 IssueEvents IssueEvents
···118132 *items = append(*items, &pull)
119133 }
120134121121- issues, err := GetIssuesByOwnerDid(e, forDid, timeframe)
135135+ issues, err := GetIssues(
136136+ e,
137137+ FilterEq("did", forDid),
138138+ FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
139139+ )
122140 if err != nil {
123141 return nil, fmt.Errorf("error getting issues by owner did: %w", err)
124142 }
···137155 *items = append(*items, &issue)
138156 }
139157140140- repos, err := GetAllReposByDid(e, forDid)
158158+ repos, err := GetRepos(e, 0, FilterEq("did", forDid))
141159 if err != nil {
142160 return nil, fmt.Errorf("error getting all repos by did: %w", err)
143161 }
···348366 return tx.Commit()
349367}
350368351351-func GetProfiles(e Execer, filters ...filter) ([]Profile, error) {
369369+func GetProfiles(e Execer, filters ...filter) (map[string]*Profile, error) {
352370 var conditions []string
353371 var args []any
354372 for _, filter := range filters {
···448466 idxs[did] = idx + 1
449467 }
450468451451- var profiles []Profile
452452- for _, p := range profileMap {
453453- profiles = append(profiles, *p)
454454- }
455455-456456- return profiles, nil
469469+ return profileMap, nil
457470}
458471459472func GetProfile(e Execer, did string) (*Profile, error) {
···540553 query = `select count(id) from pulls where owner_did = ? and state = ?`
541554 args = append(args, did, PullOpen)
542555 case VanityStatOpenIssueCount:
543543- query = `select count(id) from issues where owner_did = ? and open = 1`
556556+ query = `select count(id) from issues where did = ? and open = 1`
544557 args = append(args, did)
545558 case VanityStatClosedIssueCount:
546546- query = `select count(id) from issues where owner_did = ? and open = 0`
559559+ query = `select count(id) from issues where did = ? and open = 0`
547560 args = append(args, did)
548561 case VanityStatRepositoryCount:
549562 query = `select count(id) from repos where did = ?`
···577590 }
578591579592 // ensure all pinned repos are either own repos or collaborating repos
580580- repos, err := GetAllReposByDid(e, profile.Did)
593593+ repos, err := GetRepos(e, 0, FilterEq("did", profile.Did))
581594 if err != nil {
582595 log.Printf("getting repos for %s: %s", profile.Did, err)
583596 }
···1313 FormatMarkdown: []string{".md", ".markdown", ".mdown", ".mkdn", ".mkd"},
1414}
15151616+// ReadmeFilenames contains the list of common README filenames to search for,
1717+// in order of preference. Only includes well-supported formats.
1818+var ReadmeFilenames = []string{
1919+ "README.md", "readme.md",
2020+ "README",
2121+ "readme",
2222+ "README.markdown",
2323+ "readme.markdown",
2424+ "README.txt",
2525+ "readme.txt",
2626+}
2727+1628func GetFormat(filename string) Format {
1729 for format, extensions := range FileTypes {
1830 for _, extension := range extensions {
···11-{{ define "title" }} privacy policy {{ end }}
11+{{ define "title" }}privacy policy{{ end }}
22+23{{ define "content" }}
34<div class="max-w-4xl mx-auto px-4 py-8">
45 <div class="bg-white dark:bg-gray-800 rounded-lg shadow-sm p-8">
56 <div class="prose prose-gray dark:prose-invert max-w-none">
66- <h1>Privacy Policy</h1>
77-88- <p><strong>Last updated:</strong> {{ now.Format "January 2, 2006" }}</p>
99-1010- <p>This Privacy Policy describes how Tangled ("we," "us," or "our") collects, uses, and shares your personal information when you use our platform and services (the "Service").</p>
1111-1212- <h2>1. Information We Collect</h2>
1313-1414- <h3>Account Information</h3>
1515- <p>When you create an account, we collect:</p>
1616- <ul>
1717- <li>Your chosen username</li>
1818- <li>Email address</li>
1919- <li>Profile information you choose to provide</li>
2020- <li>Authentication data</li>
2121- </ul>
2222-2323- <h3>Content and Activity</h3>
2424- <p>We store:</p>
2525- <ul>
2626- <li>Code repositories and associated metadata</li>
2727- <li>Issues, pull requests, and comments</li>
2828- <li>Activity logs and usage patterns</li>
2929- <li>Public keys for authentication</li>
3030- </ul>
3131-3232- <h2>2. Data Location and Hosting</h2>
3333- <div class="bg-blue-50 dark:bg-blue-900/20 border border-blue-200 dark:border-blue-800 rounded-lg p-4 my-6">
3434- <h3 class="text-blue-800 dark:text-blue-200 font-semibold mb-2">EU Data Hosting</h3>
3535- <p class="text-blue-700 dark:text-blue-300">
3636- <strong>All Tangled service data is hosted within the European Union.</strong> Specifically:
3737- </p>
3838- <ul class="text-blue-700 dark:text-blue-300 mt-2">
3939- <li><strong>Personal Data Servers (PDS):</strong> Accounts hosted on Tangled PDS (*.tngl.sh) are located in Finland</li>
4040- <li><strong>Application Data:</strong> All other service data is stored on EU-based servers</li>
4141- <li><strong>Data Processing:</strong> All data processing occurs within EU jurisdiction</li>
4242- </ul>
4343- </div>
4444-4545- <div class="bg-yellow-50 dark:bg-yellow-900/20 border border-yellow-200 dark:border-yellow-800 rounded-lg p-4 my-6">
4646- <h3 class="text-yellow-800 dark:text-yellow-200 font-semibold mb-2">External PDS Notice</h3>
4747- <p class="text-yellow-700 dark:text-yellow-300">
4848- <strong>Important:</strong> If your account is hosted on Bluesky's PDS or other self-hosted Personal Data Servers (not *.tngl.sh), we do not control that data. The data protection, storage location, and privacy practices for such accounts are governed by the respective PDS provider's policies, not this Privacy Policy. We only control data processing within our own services and infrastructure.
4949- </p>
5050- </div>
5151-5252- <h2>3. Third-Party Data Processors</h2>
5353- <p>We only share your data with the following third-party processors:</p>
5454-5555- <h3>Resend (Email Services)</h3>
5656- <ul>
5757- <li><strong>Purpose:</strong> Sending transactional emails (account verification, notifications)</li>
5858- <li><strong>Data Shared:</strong> Email address and necessary message content</li>
5959- <li><strong>Location:</strong> EU-compliant email delivery service</li>
6060- </ul>
6161-6262- <h3>Cloudflare (Image Caching)</h3>
6363- <ul>
6464- <li><strong>Purpose:</strong> Caching and optimizing image delivery</li>
6565- <li><strong>Data Shared:</strong> Public images and associated metadata for caching purposes</li>
6666- <li><strong>Location:</strong> Global CDN with EU data protection compliance</li>
6767- </ul>
6868-6969- <h2>4. How We Use Your Information</h2>
7070- <p>We use your information to:</p>
7171- <ul>
7272- <li>Provide and maintain the Service</li>
7373- <li>Process your transactions and requests</li>
7474- <li>Send you technical notices and support messages</li>
7575- <li>Improve and develop new features</li>
7676- <li>Ensure security and prevent fraud</li>
7777- <li>Comply with legal obligations</li>
7878- </ul>
7979-8080- <h2>5. Data Sharing and Disclosure</h2>
8181- <p>We do not sell, trade, or rent your personal information. We may share your information only in the following circumstances:</p>
8282- <ul>
8383- <li>With the third-party processors listed above</li>
8484- <li>When required by law or legal process</li>
8585- <li>To protect our rights, property, or safety, or that of our users</li>
8686- <li>In connection with a merger, acquisition, or sale of assets (with appropriate protections)</li>
8787- </ul>
8888-8989- <h2>6. Data Security</h2>
9090- <p>We implement appropriate technical and organizational measures to protect your personal information against unauthorized access, alteration, disclosure, or destruction. However, no method of transmission over the Internet is 100% secure.</p>
9191-9292- <h2>7. Data Retention</h2>
9393- <p>We retain your personal information for as long as necessary to provide the Service and fulfill the purposes outlined in this Privacy Policy, unless a longer retention period is required by law.</p>
9494-9595- <h2>8. Your Rights</h2>
9696- <p>Under applicable data protection laws, you have the right to:</p>
9797- <ul>
9898- <li>Access your personal information</li>
9999- <li>Correct inaccurate information</li>
100100- <li>Request deletion of your information</li>
101101- <li>Object to processing of your information</li>
102102- <li>Data portability</li>
103103- <li>Withdraw consent (where applicable)</li>
104104- </ul>
105105-106106- <h2>9. Cookies and Tracking</h2>
107107- <p>We use cookies and similar technologies to:</p>
108108- <ul>
109109- <li>Maintain your login session</li>
110110- <li>Remember your preferences</li>
111111- <li>Analyze usage patterns to improve the Service</li>
112112- </ul>
113113- <p>You can control cookie settings through your browser preferences.</p>
114114-115115- <h2>10. Children's Privacy</h2>
116116- <p>The Service is not intended for children under 16 years of age. We do not knowingly collect personal information from children under 16. If we become aware that we have collected such information, we will take steps to delete it.</p>
117117-118118- <h2>11. International Data Transfers</h2>
119119- <p>While all our primary data processing occurs within the EU, some of our third-party processors may process data outside the EU. When this occurs, we ensure appropriate safeguards are in place, such as Standard Contractual Clauses or adequacy decisions.</p>
120120-121121- <h2>12. Changes to This Privacy Policy</h2>
122122- <p>We may update this Privacy Policy from time to time. We will notify you of any changes by posting the new Privacy Policy on this page and updating the "Last updated" date.</p>
123123-124124- <h2>13. Contact Information</h2>
125125- <p>If you have any questions about this Privacy Policy or wish to exercise your rights, please contact us through our platform or via email.</p>
126126-127127- <div class="mt-8 pt-6 border-t border-gray-200 dark:border-gray-700 text-sm text-gray-600 dark:text-gray-400">
128128- <p>This Privacy Policy complies with the EU General Data Protection Regulation (GDPR) and other applicable data protection laws.</p>
129129- </div>
77+ {{ .Content }}
1308 </div>
1319 </div>
13210</div>
133133-{{ end }}
1111+{{ end }}
+2-62
appview/pages/templates/legal/terms.html
···44<div class="max-w-4xl mx-auto px-4 py-8">
55 <div class="bg-white dark:bg-gray-800 rounded-lg shadow-sm p-8">
66 <div class="prose prose-gray dark:prose-invert max-w-none">
77- <h1>Terms of Service</h1>
88-99- <p><strong>Last updated:</strong> {{ now.Format "January 2, 2006" }}</p>
1010-1111- <p>Welcome to Tangled. These Terms of Service ("Terms") govern your access to and use of the Tangled platform and services (the "Service") operated by us ("Tangled," "we," "us," or "our").</p>
1212-1313- <h2>1. Acceptance of Terms</h2>
1414- <p>By accessing or using our Service, you agree to be bound by these Terms. If you disagree with any part of these terms, then you may not access the Service.</p>
1515-1616- <h2>2. Account Registration</h2>
1717- <p>To use certain features of the Service, you must register for an account. You agree to provide accurate, current, and complete information during the registration process and to update such information to keep it accurate, current, and complete.</p>
1818-1919- <h2>3. Account Termination</h2>
2020- <div class="bg-red-50 dark:bg-red-900/20 border border-red-200 dark:border-red-800 rounded-lg p-4 my-6">
2121- <h3 class="text-red-800 dark:text-red-200 font-semibold mb-2">Important Notice</h3>
2222- <p class="text-red-700 dark:text-red-300">
2323- <strong>We reserve the right to terminate, suspend, or restrict access to your account at any time, for any reason, or for no reason at all, at our sole discretion.</strong> This includes, but is not limited to, termination for violation of these Terms, inappropriate conduct, spam, abuse, or any other behavior we deem harmful to the Service or other users.
2424- </p>
2525- <p class="text-red-700 dark:text-red-300 mt-2">
2626- Account termination may result in the loss of access to your repositories, data, and other content associated with your account. We are not obligated to provide advance notice of termination, though we may do so in our discretion.
2727- </p>
2828- </div>
2929-3030- <h2>4. Acceptable Use</h2>
3131- <p>You agree not to use the Service to:</p>
3232- <ul>
3333- <li>Violate any applicable laws or regulations</li>
3434- <li>Infringe upon the rights of others</li>
3535- <li>Upload, store, or share content that is illegal, harmful, threatening, abusive, harassing, defamatory, vulgar, obscene, or otherwise objectionable</li>
3636- <li>Engage in spam, phishing, or other deceptive practices</li>
3737- <li>Attempt to gain unauthorized access to the Service or other users' accounts</li>
3838- <li>Interfere with or disrupt the Service or servers connected to the Service</li>
3939- </ul>
4040-4141- <h2>5. Content and Intellectual Property</h2>
4242- <p>You retain ownership of the content you upload to the Service. By uploading content, you grant us a non-exclusive, worldwide, royalty-free license to use, reproduce, modify, and distribute your content as necessary to provide the Service.</p>
4343-4444- <h2>6. Privacy</h2>
4545- <p>Your privacy is important to us. Please review our <a href="/privacy" class="text-blue-600 hover:text-blue-800 dark:text-blue-400 dark:hover:text-blue-300">Privacy Policy</a>, which also governs your use of the Service.</p>
4646-4747- <h2>7. Disclaimers</h2>
4848- <p>The Service is provided on an "AS IS" and "AS AVAILABLE" basis. We make no warranties, expressed or implied, and hereby disclaim and negate all other warranties including without limitation, implied warranties or conditions of merchantability, fitness for a particular purpose, or non-infringement of intellectual property or other violation of rights.</p>
4949-5050- <h2>8. Limitation of Liability</h2>
5151- <p>In no event shall Tangled, nor its directors, employees, partners, agents, suppliers, or affiliates, be liable for any indirect, incidental, special, consequential, or punitive damages, including without limitation, loss of profits, data, use, goodwill, or other intangible losses, resulting from your use of the Service.</p>
5252-5353- <h2>9. Indemnification</h2>
5454- <p>You agree to defend, indemnify, and hold harmless Tangled and its affiliates, officers, directors, employees, and agents from and against any and all claims, damages, obligations, losses, liabilities, costs, or debt, and expenses (including attorney's fees).</p>
5555-5656- <h2>10. Governing Law</h2>
5757- <p>These Terms shall be interpreted and governed by the laws of Finland, without regard to its conflict of law provisions.</p>
5858-5959- <h2>11. Changes to Terms</h2>
6060- <p>We reserve the right to modify or replace these Terms at any time. If a revision is material, we will try to provide at least 30 days notice prior to any new terms taking effect.</p>
6161-6262- <h2>12. Contact Information</h2>
6363- <p>If you have any questions about these Terms of Service, please contact us through our platform or via email.</p>
6464-6565- <div class="mt-8 pt-6 border-t border-gray-200 dark:border-gray-700 text-sm text-gray-600 dark:text-gray-400">
6666- <p>These terms are effective as of the last updated date shown above and will remain in effect except with respect to any changes in their provisions in the future, which will be in effect immediately after being posted on this page.</p>
6767- </div>
77+ {{ .Content }}
688 </div>
699 </div>
7010</div>
7171-{{ end }}
1111+{{ end }}
+1
appview/pages/templates/repo/blob.html
···7878 {{ end }}
7979 </div>
8080 {{ end }}
8181+ {{ template "fragments/multiline-select" }}
8182{{ end }}
···1111### message format
12121313```
1414-<service/top-level directory>: <affected package/directory>: <short summary of change>
1414+<service/top-level directory>/<affected package/directory>: <short summary of change>
151516161717Optional longer description can go here, if necessary. Explain what the
···2323Here are some examples:
24242525```
2626-appview: state: fix token expiry check in middleware
2626+appview/state: fix token expiry check in middleware
27272828The previous check did not account for clock drift, leading to premature
2929token invalidation.
3030```
31313232```
3333-knotserver: git/service: improve error checking in upload-pack
3333+knotserver/git/service: improve error checking in upload-pack
3434```
35353636···5454- Don't include unrelated changes in the same commit.
5555- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
5656before submitting if necessary.
5757+5858+## code formatting
5959+6060+We use a variety of tools to format our code, and multiplex them with
6161+[`treefmt`](https://treefmt.com): all you need to do to format your changes
6262+is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
57635864## proposals for bigger changes
5965
+63-22
docs/hacking.md
···4848redis-server
4949```
50505151-## running a knot
5151+## running knots and spindles
52525353An end-to-end knot setup requires setting up a machine with
5454`sshd`, `AuthorizedKeysCommand`, and git user, which is
5555quite cumbersome. So the nix flake provides a
5656`nixosConfiguration` to do so.
57575858-To begin, head to `http://localhost:3000/knots` in the browser
5959-and create a knot with hostname `localhost:6000`. This will
6060-generate a knot secret. Set `$TANGLED_VM_KNOT_SECRET` to it,
6161-ideally in a `.envrc` with [direnv](https://direnv.net) so you
6262-don't lose it.
5858+<details>
5959+ <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
6060+6161+ In order to build Tangled's dev VM on macOS, you will
6262+ first need to set up a Linux Nix builder. The recommended
6363+ way to do so is to run a [`darwin.linux-builder`
6464+ VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
6565+ and to register it in `nix.conf` as a builder for Linux
6666+ with the same architecture as your Mac (`linux-aarch64` if
6767+ you are using Apple Silicon).
6868+6969+ > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
7070+ > the tangled repo so that it doesn't conflict with the other VM. For example,
7171+ > you can do
7272+ >
7373+ > ```shell
7474+ > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
7575+ > ```
7676+ >
7777+ > to store the builder VM in a temporary dir.
7878+ >
7979+ > You should read and follow [all the other intructions][darwin builder vm] to
8080+ > avoid subtle problems.
8181+8282+ Alternatively, you can use any other method to set up a
8383+ Linux machine with `nix` installed that you can `sudo ssh`
8484+ into (in other words, root user on your Mac has to be able
8585+ to ssh into the Linux machine without entering a password)
8686+ and that has the same architecture as your Mac. See
8787+ [remote builder
8888+ instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
8989+ for how to register such a builder in `nix.conf`.
9090+9191+ > WARNING: If you'd like to use
9292+ > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
9393+ > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
9494+ > ssh` works can be tricky. It seems to be [possible with
9595+ > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
63966464-You will also need to set the `$TANGLED_VM_SPINDLE_OWNER`
6565-variable to some value. If you don't want to [set up a
6666-spindle](#running-a-spindle), you can use any placeholder
6767-value.
9797+</details>
68986969-You can now start a lightweight NixOS VM like so:
9999+To begin, grab your DID from http://localhost:3000/settings.
100100+Then, set `TANGLED_VM_KNOT_OWNER` and
101101+`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
102102+lightweight NixOS VM like so:
7010371104```bash
72105nix run --impure .#vm
···75108```
7610977110This starts a knot on port 6000, a spindle on port 6555
7878-with `ssh` exposed on port 2222. You can push repositories
7979-to this VM with this ssh config block on your main machine:
111111+with `ssh` exposed on port 2222.
112112+113113+Once the services are running, head to
114114+http://localhost:3000/knots and hit verify. It should
115115+verify the ownership of the services instantly if everything
116116+went smoothly.
117117+118118+You can push repositories to this VM with this ssh config
119119+block on your main machine:
8012081121```bash
82122Host nixos-shell
···93133git push local-dev main
94134```
951359696-## running a spindle
136136+### running a spindle
971379898-You will need to find out your DID by entering your login handle into
9999-<https://pdsls.dev/>. Set `$TANGLED_VM_SPINDLE_OWNER` to your DID.
100100-101101-The above VM should already be running a spindle on `localhost:6555`.
102102-You can head to the spindle dashboard on `http://localhost:3000/spindles`,
103103-and register a spindle with hostname `localhost:6555`. It should instantly
104104-be verified. You can then configure each repository to use this spindle
105105-and run CI jobs.
138138+The above VM should already be running a spindle on
139139+`localhost:6555`. Head to http://localhost:3000/spindles and
140140+hit verify. You can then configure each repository to use
141141+this spindle and run CI jobs.
106142107143Of interest when debugging spindles:
108144···119155# litecli has a nicer REPL interface:
120156litecli /var/lib/spindle/spindle.db
121157```
158158+159159+If for any reason you wish to disable either one of the
160160+services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
161161+`services.tangled-spindle.enable` (or
162162+`services.tangled-knot.enable`) to `false`.
+7-5
docs/knot-hosting.md
···7373```
74747575Create `/home/git/.knot.env` with the following, updating the values as
7676-necessary. The `KNOT_SERVER_SECRET` can be obtained from the
7777-[/knots](https://tangled.sh/knots) page on Tangled.
7676+necessary. The `KNOT_SERVER_OWNER` should be set to your
7777+DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
78787979```
8080KNOT_REPO_SCAN_PATH=/home/git
8181KNOT_SERVER_HOSTNAME=knot.example.com
8282APPVIEW_ENDPOINT=https://tangled.sh
8383-KNOT_SERVER_SECRET=secret
8383+KNOT_SERVER_OWNER=did:plc:foobar
8484KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
8585KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
8686```
···128128Remember to use Let's Encrypt or similar to procure a certificate for your
129129knot domain.
130130131131-You should now have a running knot server! You can finalize your registration by hitting the
132132-`initialize` button on the [/knots](https://tangled.sh/knots) page.
131131+You should now have a running knot server! You can finalize
132132+your registration by hitting the `verify` button on the
133133+[/knots](https://tangled.sh/knots) page. This simply creates
134134+a record on your PDS to announce the existence of the knot.
133135134136### custom paths
135137
+60
docs/migrations.md
···11+# Migrations
22+33+This document is laid out in reverse-chronological order.
44+Newer migration guides are listed first, and older guides
55+are further down the page.
66+77+## Upgrading from v1.8.x
88+99+After v1.8.2, the HTTP API for knot and spindles have been
1010+deprecated and replaced with XRPC. Repositories on outdated
1111+knots will not be viewable from the appview. Upgrading is
1212+straightforward however.
1313+1414+For knots:
1515+1616+- Upgrade to latest tag (v1.9.0 or above)
1717+- Head to the [knot dashboard](https://tangled.sh/knots) and
1818+ hit the "retry" button to verify your knot
1919+2020+For spindles:
2121+2222+- Upgrade to latest tag (v1.9.0 or above)
2323+- Head to the [spindle
2424+ dashboard](https://tangled.sh/spindles) and hit the
2525+ "retry" button to verify your spindle
2626+2727+## Upgrading from v1.7.x
2828+2929+After v1.7.0, knot secrets have been deprecated. You no
3030+longer need a secret from the appview to run a knot. All
3131+authorized commands to knots are managed via [Inter-Service
3232+Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
3333+Knots will be read-only until upgraded.
3434+3535+Upgrading is quite easy, in essence:
3636+3737+- `KNOT_SERVER_SECRET` is no more, you can remove this
3838+ environment variable entirely
3939+- `KNOT_SERVER_OWNER` is now required on boot, set this to
4040+ your DID. You can find your DID in the
4141+ [settings](https://tangled.sh/settings) page.
4242+- Restart your knot once you have replaced the environment
4343+ variable
4444+- Head to the [knot dashboard](https://tangled.sh/knots) and
4545+ hit the "retry" button to verify your knot. This simply
4646+ writes a `sh.tangled.knot` record to your PDS.
4747+4848+If you use the nix module, simply bump the flake to the
4949+latest revision, and change your config block like so:
5050+5151+```diff
5252+ services.tangled-knot = {
5353+ enable = true;
5454+ server = {
5555+- secretFile = /path/to/secret;
5656++ owner = "did:plc:foo";
5757+ };
5858+ };
5959+```
6060+
+140-41
docs/spindle/pipeline.md
···11-# spindle pipeline manifest
11+# spindle pipelines
22+33+Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
44+55+The fields are:
2633-Spindle pipelines are defined under the `.tangled/workflows` directory in a
44-repo. Generally:
77+- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
88+- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
99+- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
1010+- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
1111+- [Environment](#environment): An **optional** field that allows you to define environment variables.
1212+- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
51366-* Pipelines are defined in YAML.
77-* Dependencies can be specified from
88-[Nixpkgs](https://search.nixos.org) or custom registries.
99-* Environment variables can be set globally or per-step.
1414+## Trigger
10151111-Here's an example that uses all fields:
1616+The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
1717+1818+- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
1919+ - `push`: The workflow should run every time a commit is pushed to the repository.
2020+ - `pull_request`: The workflow should run every time a pull request is made or updated.
2121+ - `manual`: The workflow can be triggered manually.
2222+- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
2323+2424+For example, if you'd like define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
12251326```yaml
1414-# build_and_test.yaml
1527when:
1616- - event: ["push", "pull_request"]
2828+ - event: ["push", "manual"]
1729 branch: ["main", "develop"]
1818- - event: ["manual"]
3030+ - event: ["pull_request"]
3131+ branch: ["main"]
3232+```
3333+3434+## Engine
3535+3636+Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
3737+3838+- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
3939+4040+Example:
4141+4242+```yaml
4343+engine: "nixery"
4444+```
4545+4646+## Clone options
4747+4848+When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
4949+5050+- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
5151+- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
5252+- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
5353+5454+The default settings are:
5555+5656+```yaml
5757+clone:
5858+ skip: false
5959+ depth: 1
6060+ submodules: false
6161+```
6262+6363+## Dependencies
6464+6565+Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
6666+6767+Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
19686969+```yaml
2070dependencies:
2121- ## from nixpkgs
7171+ # nixpkgs
2272 nixpkgs:
2373 - nodejs
2424- ## custom registry
2525- git+https://tangled.sh/@oppi.li/statix:
2626- - statix
7474+ - go
7575+ # custom registry
7676+ git+https://tangled.sh/@example.com/my_pkg:
7777+ - my_pkg
7878+```
7979+8080+Now these dependencies are available to use in your workflow!
8181+8282+## Environment
8383+8484+The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
8585+8686+Example:
8787+8888+```yaml
8989+environment:
9090+ GOOS: "linux"
9191+ GOARCH: "arm64"
9292+ NODE_ENV: "production"
9393+ MY_ENV_VAR: "MY_ENV_VALUE"
9494+```
27952828-steps:
2929- - name: "Install dependencies"
3030- command: "npm install"
3131- environment:
3232- NODE_ENV: "development"
3333- CI: "true"
9696+## Steps
9797+9898+The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
9999+100100+- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
101101+- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
102102+- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
341033535- - name: "Run linter"
3636- command: "npm run lint"
104104+Example:
371053838- - name: "Run tests"
3939- command: "npm test"
106106+```yaml
107107+steps:
108108+ - name: "Build backend"
109109+ command: "go build"
40110 environment:
4141- NODE_ENV: "test"
4242- JEST_WORKERS: "2"
4343-4444- - name: "Build application"
111111+ GOOS: "darwin"
112112+ GOARCH: "arm64"
113113+ - name: "Build frontend"
45114 command: "npm run build"
46115 environment:
47116 NODE_ENV: "production"
117117+```
481184949-environment:
5050- BUILD_NUMBER: "123"
5151- GIT_BRANCH: "main"
119119+## Complete workflow
521205353-## current repository is cloned and checked out at the target ref
5454-## by default.
121121+```yaml
122122+# .tangled/workflows/build.yml
123123+124124+when:
125125+ - event: ["push", "manual"]
126126+ branch: ["main", "develop"]
127127+ - event: ["pull_request"]
128128+ branch: ["main"]
129129+130130+engine: "nixery"
131131+132132+# using the default values
55133clone:
56134 skip: false
5757- depth: 50
5858- submodules: true
5959-```
135135+ depth: 1
136136+ submodules: false
137137+138138+dependencies:
139139+ # nixpkgs
140140+ nixpkgs:
141141+ - nodejs
142142+ - go
143143+ # custom registry
144144+ git+https://tangled.sh/@example.com/my_pkg:
145145+ - my_pkg
601466161-## git push options
147147+environment:
148148+ GOOS: "linux"
149149+ GOARCH: "arm64"
150150+ NODE_ENV: "production"
151151+ MY_ENV_VAR: "MY_ENV_VALUE"
621526363-These are push options that can be used with the `--push-option (-o)` flag of git push:
153153+steps:
154154+ - name: "Build backend"
155155+ command: "go build"
156156+ environment:
157157+ GOOS: "darwin"
158158+ GOARCH: "arm64"
159159+ - name: "Build frontend"
160160+ command: "npm run build"
161161+ environment:
162162+ NODE_ENV: "production"
163163+```
641646565-- `verbose-ci`, `ci-verbose`: enables diagnostics reporting for the CI pipeline, allowing you to see any issues when you push.
6666-- `skip-ci`, `ci-skip`: skips triggering the CI pipeline.
165165+If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
···11+package xrpc
22+33+import (
44+ "fmt"
55+ "net/http"
66+ "runtime/debug"
77+88+ "tangled.sh/tangled.sh/core/api/tangled"
99+)
1010+1111+// version is set during build time.
1212+var version string
1313+1414+func (x *Xrpc) Version(w http.ResponseWriter, r *http.Request) {
1515+ if version == "" {
1616+ info, ok := debug.ReadBuildInfo()
1717+ if !ok {
1818+ http.Error(w, "failed to read build info", http.StatusInternalServerError)
1919+ return
2020+ }
2121+2222+ var modVer string
2323+ var sha string
2424+ var modified bool
2525+2626+ for _, mod := range info.Deps {
2727+ if mod.Path == "tangled.sh/tangled.sh/knotserver/xrpc" {
2828+ modVer = mod.Version
2929+ break
3030+ }
3131+ }
3232+3333+ for _, setting := range info.Settings {
3434+ switch setting.Key {
3535+ case "vcs.revision":
3636+ sha = setting.Value
3737+ case "vcs.modified":
3838+ modified = setting.Value == "true"
3939+ }
4040+ }
4141+4242+ if modVer == "" {
4343+ modVer = "unknown"
4444+ }
4545+4646+ if sha == "" {
4747+ version = modVer
4848+ } else if modified {
4949+ version = fmt.Sprintf("%s (%s with modifications)", modVer, sha)
5050+ } else {
5151+ version = fmt.Sprintf("%s (%s)", modVer, sha)
5252+ }
5353+ }
5454+5555+ response := tangled.KnotVersion_Output{
5656+ Version: version,
5757+ }
5858+5959+ writeJson(w, response)
6060+}
+127
knotserver/xrpc/xrpc.go
···11+package xrpc
22+33+import (
44+ "encoding/json"
55+ "log/slog"
66+ "net/http"
77+ "strings"
88+99+ securejoin "github.com/cyphar/filepath-securejoin"
1010+ "tangled.sh/tangled.sh/core/api/tangled"
1111+ "tangled.sh/tangled.sh/core/idresolver"
1212+ "tangled.sh/tangled.sh/core/jetstream"
1313+ "tangled.sh/tangled.sh/core/knotserver/config"
1414+ "tangled.sh/tangled.sh/core/knotserver/db"
1515+ "tangled.sh/tangled.sh/core/notifier"
1616+ "tangled.sh/tangled.sh/core/rbac"
1717+ xrpcerr "tangled.sh/tangled.sh/core/xrpc/errors"
1818+ "tangled.sh/tangled.sh/core/xrpc/serviceauth"
1919+2020+ "github.com/go-chi/chi/v5"
2121+)
2222+2323+type Xrpc struct {
2424+ Config *config.Config
2525+ Db *db.DB
2626+ Ingester *jetstream.JetstreamClient
2727+ Enforcer *rbac.Enforcer
2828+ Logger *slog.Logger
2929+ Notifier *notifier.Notifier
3030+ Resolver *idresolver.Resolver
3131+ ServiceAuth *serviceauth.ServiceAuth
3232+}
3333+3434+func (x *Xrpc) Router() http.Handler {
3535+ r := chi.NewRouter()
3636+3737+ r.Group(func(r chi.Router) {
3838+ r.Use(x.ServiceAuth.VerifyServiceAuth)
3939+4040+ r.Post("/"+tangled.RepoSetDefaultBranchNSID, x.SetDefaultBranch)
4141+ r.Post("/"+tangled.RepoCreateNSID, x.CreateRepo)
4242+ r.Post("/"+tangled.RepoDeleteNSID, x.DeleteRepo)
4343+ r.Post("/"+tangled.RepoForkStatusNSID, x.ForkStatus)
4444+ r.Post("/"+tangled.RepoForkSyncNSID, x.ForkSync)
4545+ r.Post("/"+tangled.RepoHiddenRefNSID, x.HiddenRef)
4646+ r.Post("/"+tangled.RepoMergeNSID, x.Merge)
4747+ })
4848+4949+ // merge check is an open endpoint
5050+ //
5151+ // TODO: should we constrain this more?
5252+ // - we can calculate on PR submit/resubmit/gitRefUpdate etc.
5353+ // - use ETags on clients to keep requests to a minimum
5454+ r.Post("/"+tangled.RepoMergeCheckNSID, x.MergeCheck)
5555+5656+ // repo query endpoints (no auth required)
5757+ r.Get("/"+tangled.RepoTreeNSID, x.RepoTree)
5858+ r.Get("/"+tangled.RepoLogNSID, x.RepoLog)
5959+ r.Get("/"+tangled.RepoBranchesNSID, x.RepoBranches)
6060+ r.Get("/"+tangled.RepoTagsNSID, x.RepoTags)
6161+ r.Get("/"+tangled.RepoBlobNSID, x.RepoBlob)
6262+ r.Get("/"+tangled.RepoDiffNSID, x.RepoDiff)
6363+ r.Get("/"+tangled.RepoCompareNSID, x.RepoCompare)
6464+ r.Get("/"+tangled.RepoGetDefaultBranchNSID, x.RepoGetDefaultBranch)
6565+ r.Get("/"+tangled.RepoBranchNSID, x.RepoBranch)
6666+ r.Get("/"+tangled.RepoArchiveNSID, x.RepoArchive)
6767+ r.Get("/"+tangled.RepoLanguagesNSID, x.RepoLanguages)
6868+6969+ // knot query endpoints (no auth required)
7070+ r.Get("/"+tangled.KnotListKeysNSID, x.ListKeys)
7171+ r.Get("/"+tangled.KnotVersionNSID, x.Version)
7272+7373+ // service query endpoints (no auth required)
7474+ r.Get("/"+tangled.OwnerNSID, x.Owner)
7575+7676+ return r
7777+}
7878+7979+// parseRepoParam parses a repo parameter in 'did/repoName' format and returns
8080+// the full repository path on disk
8181+func (x *Xrpc) parseRepoParam(repo string) (string, error) {
8282+ if repo == "" {
8383+ return "", xrpcerr.NewXrpcError(
8484+ xrpcerr.WithTag("InvalidRequest"),
8585+ xrpcerr.WithMessage("missing repo parameter"),
8686+ )
8787+ }
8888+8989+ // Parse repo string (did/repoName format)
9090+ parts := strings.SplitN(repo, "/", 2)
9191+ if len(parts) != 2 {
9292+ return "", xrpcerr.NewXrpcError(
9393+ xrpcerr.WithTag("InvalidRequest"),
9494+ xrpcerr.WithMessage("invalid repo format, expected 'did/repoName'"),
9595+ )
9696+ }
9797+9898+ did := parts[0]
9999+ repoName := parts[1]
100100+101101+ // Construct repository path using the same logic as didPath
102102+ didRepoPath, err := securejoin.SecureJoin(did, repoName)
103103+ if err != nil {
104104+ return "", xrpcerr.RepoNotFoundError
105105+ }
106106+107107+ repoPath, err := securejoin.SecureJoin(x.Config.Repo.ScanPath, didRepoPath)
108108+ if err != nil {
109109+ return "", xrpcerr.RepoNotFoundError
110110+ }
111111+112112+ return repoPath, nil
113113+}
114114+115115+func writeError(w http.ResponseWriter, e xrpcerr.XrpcError, status int) {
116116+ w.Header().Set("Content-Type", "application/json")
117117+ w.WriteHeader(status)
118118+ json.NewEncoder(w).Encode(e)
119119+}
120120+121121+func writeJson(w http.ResponseWriter, response any) {
122122+ w.Header().Set("Content-Type", "application/json")
123123+ if err := json.NewEncoder(w).Encode(response); err != nil {
124124+ writeError(w, xrpcerr.GenericError(err), http.StatusInternalServerError)
125125+ return
126126+ }
127127+}
+158
legal/privacy.md
···11+# Privacy Policy
22+33+**Last updated:** January 15, 2025
44+55+This Privacy Policy describes how Tangled ("we," "us," or "our")
66+collects, uses, and shares your personal information when you use our
77+platform and services (the "Service").
88+99+## 1. Information We Collect
1010+1111+### Account Information
1212+1313+When you create an account, we collect:
1414+1515+- Your chosen username
1616+- Email address
1717+- Profile information you choose to provide
1818+- Authentication data
1919+2020+### Content and Activity
2121+2222+We store:
2323+2424+- Code repositories and associated metadata
2525+- Issues, pull requests, and comments
2626+- Activity logs and usage patterns
2727+- Public keys for authentication
2828+2929+## 2. Data Location and Hosting
3030+3131+### EU Data Hosting
3232+3333+**All Tangled service data is hosted within the European Union.**
3434+Specifically:
3535+3636+- **Personal Data Servers (PDS):** Accounts hosted on Tangled PDS
3737+ (*.tngl.sh) are located in Finland
3838+- **Application Data:** All other service data is stored on EU-based
3939+ servers
4040+- **Data Processing:** All data processing occurs within EU
4141+ jurisdiction
4242+4343+### External PDS Notice
4444+4545+**Important:** If your account is hosted on Bluesky's PDS or other
4646+self-hosted Personal Data Servers (not *.tngl.sh), we do not control
4747+that data. The data protection, storage location, and privacy
4848+practices for such accounts are governed by the respective PDS
4949+provider's policies, not this Privacy Policy. We only control data
5050+processing within our own services and infrastructure.
5151+5252+## 3. Third-Party Data Processors
5353+5454+We only share your data with the following third-party processors:
5555+5656+### Resend (Email Services)
5757+5858+- **Purpose:** Sending transactional emails (account verification,
5959+ notifications)
6060+- **Data Shared:** Email address and necessary message content
6161+6262+### Cloudflare (Image Caching)
6363+6464+- **Purpose:** Caching and optimizing image delivery
6565+- **Data Shared:** Public images and associated metadata for caching
6666+ purposes
6767+6868+### Posthog (Usage Metrics Tracking)
6969+7070+- **Purpose:** Tracking usage and platform metrics
7171+- **Data Shared:** Anonymous usage data, IP addresses, DIDs, and browser
7272+ information
7373+7474+## 4. How We Use Your Information
7575+7676+We use your information to:
7777+7878+- Provide and maintain the Service
7979+- Process your transactions and requests
8080+- Send you technical notices and support messages
8181+- Improve and develop new features
8282+- Ensure security and prevent fraud
8383+- Comply with legal obligations
8484+8585+## 5. Data Sharing and Disclosure
8686+8787+We do not sell, trade, or rent your personal information. We may share
8888+your information only in the following circumstances:
8989+9090+- With the third-party processors listed above
9191+- When required by law or legal process
9292+- To protect our rights, property, or safety, or that of our users
9393+- In connection with a merger, acquisition, or sale of assets (with
9494+ appropriate protections)
9595+9696+## 6. Data Security
9797+9898+We implement appropriate technical and organizational measures to
9999+protect your personal information against unauthorized access,
100100+alteration, disclosure, or destruction. However, no method of
101101+transmission over the Internet is 100% secure.
102102+103103+## 7. Data Retention
104104+105105+We retain your personal information for as long as necessary to provide
106106+the Service and fulfill the purposes outlined in this Privacy Policy,
107107+unless a longer retention period is required by law.
108108+109109+## 8. Your Rights
110110+111111+Under applicable data protection laws, you have the right to:
112112+113113+- Access your personal information
114114+- Correct inaccurate information
115115+- Request deletion of your information
116116+- Object to processing of your information
117117+- Data portability
118118+- Withdraw consent (where applicable)
119119+120120+## 9. Cookies and Tracking
121121+122122+We use cookies and similar technologies to:
123123+124124+- Maintain your login session
125125+- Remember your preferences
126126+- Analyze usage patterns to improve the Service
127127+128128+You can control cookie settings through your browser preferences.
129129+130130+## 10. Children's Privacy
131131+132132+The Service is not intended for children under 16 years of age. We do
133133+not knowingly collect personal information from children under 16. If
134134+we become aware that we have collected such information, we will take
135135+steps to delete it.
136136+137137+## 11. International Data Transfers
138138+139139+While all our primary data processing occurs within the EU, some of our
140140+third-party processors may process data outside the EU. When this
141141+occurs, we ensure appropriate safeguards are in place, such as Standard
142142+Contractual Clauses or adequacy decisions.
143143+144144+## 12. Changes to This Privacy Policy
145145+146146+We may update this Privacy Policy from time to time. We will notify you
147147+of any changes by posting the new Privacy Policy on this page and
148148+updating the "Last updated" date.
149149+150150+## 13. Contact Information
151151+152152+If you have any questions about this Privacy Policy or wish to exercise
153153+your rights, please contact us through our platform or via email.
154154+155155+---
156156+157157+This Privacy Policy complies with the EU General Data Protection
158158+Regulation (GDPR) and other applicable data protection laws.
+109
legal/terms.md
···11+# Terms of Service
22+33+**Last updated:** January 15, 2025
44+55+Welcome to Tangled. These Terms of Service ("Terms") govern your access
66+to and use of the Tangled platform and services (the "Service")
77+operated by us ("Tangled," "we," "us," or "our").
88+99+## 1. Acceptance of Terms
1010+1111+By accessing or using our Service, you agree to be bound by these Terms.
1212+If you disagree with any part of these terms, then you may not access
1313+the Service.
1414+1515+## 2. Account Registration
1616+1717+To use certain features of the Service, you must register for an
1818+account. You agree to provide accurate, current, and complete
1919+information during the registration process and to update such
2020+information to keep it accurate, current, and complete.
2121+2222+## 3. Account Termination
2323+2424+> **Important Notice**
2525+>
2626+> **We reserve the right to terminate, suspend, or restrict access to
2727+> your account at any time, for any reason, or for no reason at all, at
2828+> our sole discretion.** This includes, but is not limited to,
2929+> termination for violation of these Terms, inappropriate conduct, spam,
3030+> abuse, or any other behavior we deem harmful to the Service or other
3131+> users.
3232+>
3333+> Account termination may result in the loss of access to your
3434+> repositories, data, and other content associated with your account. We
3535+> are not obligated to provide advance notice of termination, though we
3636+> may do so in our discretion.
3737+3838+## 4. Acceptable Use
3939+4040+You agree not to use the Service to:
4141+4242+- Violate any applicable laws or regulations
4343+- Infringe upon the rights of others
4444+- Upload, store, or share content that is illegal, harmful, threatening,
4545+ abusive, harassing, defamatory, vulgar, obscene, or otherwise
4646+ objectionable
4747+- Engage in spam, phishing, or other deceptive practices
4848+- Attempt to gain unauthorized access to the Service or other users'
4949+ accounts
5050+- Interfere with or disrupt the Service or servers connected to the
5151+ Service
5252+5353+## 5. Content and Intellectual Property
5454+5555+You retain ownership of the content you upload to the Service. By
5656+uploading content, you grant us a non-exclusive, worldwide, royalty-free
5757+license to use, reproduce, modify, and distribute your content as
5858+necessary to provide the Service.
5959+6060+## 6. Privacy
6161+6262+Your privacy is important to us. Please review our [Privacy
6363+Policy](/privacy), which also governs your use of the Service.
6464+6565+## 7. Disclaimers
6666+6767+The Service is provided on an "AS IS" and "AS AVAILABLE" basis. We make
6868+no warranties, expressed or implied, and hereby disclaim and negate all
6969+other warranties including without limitation, implied warranties or
7070+conditions of merchantability, fitness for a particular purpose, or
7171+non-infringement of intellectual property or other violation of rights.
7272+7373+## 8. Limitation of Liability
7474+7575+In no event shall Tangled, nor its directors, employees, partners,
7676+agents, suppliers, or affiliates, be liable for any indirect,
7777+incidental, special, consequential, or punitive damages, including
7878+without limitation, loss of profits, data, use, goodwill, or other
7979+intangible losses, resulting from your use of the Service.
8080+8181+## 9. Indemnification
8282+8383+You agree to defend, indemnify, and hold harmless Tangled and its
8484+affiliates, officers, directors, employees, and agents from and against
8585+any and all claims, damages, obligations, losses, liabilities, costs,
8686+or debt, and expenses (including attorney's fees).
8787+8888+## 10. Governing Law
8989+9090+These Terms shall be interpreted and governed by the laws of Finland,
9191+without regard to its conflict of law provisions.
9292+9393+## 11. Changes to Terms
9494+9595+We reserve the right to modify or replace these Terms at any time. If a
9696+revision is material, we will try to provide at least 30 days notice
9797+prior to any new terms taking effect.
9898+9999+## 12. Contact Information
100100+101101+If you have any questions about these Terms of Service, please contact
102102+us through our platform or via email.
103103+104104+---
105105+106106+These terms are effective as of the last updated date shown above and
107107+will remain in effect except with respect to any changes in their
108108+provisions in the future, which will be in effect immediately after
109109+being posted on this page.
···99// NewHandler sets up a new slog.Handler with the service name
1010// as an attribute
1111func NewHandler(name string) slog.Handler {
1212- handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{})
1212+ handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
1313+ Level: slog.LevelDebug,
1414+ })
13151416 var attrs []slog.Attr
1517 attrs = append(attrs, slog.Attr{Key: "service", Value: slog.StringValue(name)})
+11-2
nix/gomod2nix.toml
···425425 [mod."github.com/whyrusleeping/cbor-gen"]
426426 version = "v0.3.1"
427427 hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc="
428428+ [mod."github.com/wyatt915/goldmark-treeblood"]
429429+ version = "v0.0.0-20250825231212-5dcbdb2f4b57"
430430+ hash = "sha256-IZEsUXTBTsNgWoD7vqRUc9aFCCHNjzk1IUmI9O+NCnM="
431431+ [mod."github.com/wyatt915/treeblood"]
432432+ version = "v0.1.15"
433433+ hash = "sha256-hb99exdkoY2Qv8WdDxhwgPXGbEYimUr6wFtPXEvcO9g="
428434 [mod."github.com/yuin/goldmark"]
429429- version = "v1.4.13"
430430- hash = "sha256-GVwFKZY6moIS6I0ZGuio/WtDif+lkZRfqWS6b4AAJyI="
435435+ version = "v1.7.12"
436436+ hash = "sha256-thLYBS4woL2X5qRdo7vP+xCvjlGRDU0jXtDCUt6vvWM="
437437+ [mod."github.com/yuin/goldmark-highlighting/v2"]
438438+ version = "v2.0.0-20230729083705-37449abec8cc"
439439+ hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
431440 [mod."gitlab.com/yawning/secp256k1-voi"]
432441 version = "v0.0.0-20230925100816-f2616030848b"
433442 hash = "sha256-X8INg01LTg13iOuwPI3uOhPN7r01sPZtmtwJ2sudjCA="
···7070 };
7171 # This is fine because any and all ports that are forwarded to host are explicitly marked above, we don't need a separate guest firewall
7272 networking.firewall.enable = false;
7373+ time.timeZone = "Europe/London";
7374 services.getty.autologinUser = "root";
7475 environment.systemPackages = with pkgs; [curl vim git sqlite litecli];
7576 services.tangled-knot = {
7677 enable = true;
7778 motd = "Welcome to the development knot!\n";
7879 server = {
7979- secretFile = builtins.toFile "knot-secret" ("KNOT_SERVER_SECRET=" + (envVar "TANGLED_VM_KNOT_SECRET"));
8080+ owner = envVar "TANGLED_VM_KNOT_OWNER";
8081 hostname = "localhost:6000";
8182 listenAddr = "0.0.0.0:6000";
8283 };
···8889 hostname = "localhost:6555";
8990 listenAddr = "0.0.0.0:6555";
9091 dev = true;
9292+ queueSize = 100;
9393+ maxJobCount = 2;
9194 secrets = {
9295 provider = "sqlite";
9396 };
+1-1
patchutil/combinediff.go
···119119 // we have f1 and f2, combine them
120120 combined, err := combineFiles(f1, f2)
121121 if err != nil {
122122- fmt.Println(err)
122122+ // fmt.Println(err)
123123 }
124124125125 // combined can be nil commit 2 reverted all changes from commit 1