···1-// Folder-specific settings
2-//
3-// For a full list of overridable settings, and general information on folder-specific settings,
4-// see the documentation: https://zed.dev/docs/configuring-zed#settings-files
5-{
6- "languages": {
7- "HTML": {
8- "prettier": {
9- "format_on_save": false,
10- "allowed": true,
11- "parser": "go-template",
12- "plugins": ["prettier-plugin-go-template"]
13- }
14- }
15- }
16-}
···0000000000000000
+1001-1332
api/tangled/cbor_gen.go
···12021203 return nil
1204}
1205-func (t *GitRefUpdate_Meta) MarshalCBOR(w io.Writer) error {
1206- if t == nil {
1207- _, err := w.Write(cbg.CborNull)
1208- return err
1209- }
1210-1211- cw := cbg.NewCborWriter(w)
1212- fieldCount := 3
1213-1214- if t.LangBreakdown == nil {
1215- fieldCount--
1216- }
1217-1218- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
1219- return err
1220- }
1221-1222- // t.CommitCount (tangled.GitRefUpdate_Meta_CommitCount) (struct)
1223- if len("commitCount") > 1000000 {
1224- return xerrors.Errorf("Value in field \"commitCount\" was too long")
1225- }
1226-1227- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commitCount"))); err != nil {
1228- return err
1229- }
1230- if _, err := cw.WriteString(string("commitCount")); err != nil {
1231- return err
1232- }
1233-1234- if err := t.CommitCount.MarshalCBOR(cw); err != nil {
1235- return err
1236- }
1237-1238- // t.IsDefaultRef (bool) (bool)
1239- if len("isDefaultRef") > 1000000 {
1240- return xerrors.Errorf("Value in field \"isDefaultRef\" was too long")
1241- }
1242-1243- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("isDefaultRef"))); err != nil {
1244- return err
1245- }
1246- if _, err := cw.WriteString(string("isDefaultRef")); err != nil {
1247- return err
1248- }
1249-1250- if err := cbg.WriteBool(w, t.IsDefaultRef); err != nil {
1251- return err
1252- }
1253-1254- // t.LangBreakdown (tangled.GitRefUpdate_Meta_LangBreakdown) (struct)
1255- if t.LangBreakdown != nil {
1256-1257- if len("langBreakdown") > 1000000 {
1258- return xerrors.Errorf("Value in field \"langBreakdown\" was too long")
1259- }
1260-1261- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("langBreakdown"))); err != nil {
1262- return err
1263- }
1264- if _, err := cw.WriteString(string("langBreakdown")); err != nil {
1265- return err
1266- }
1267-1268- if err := t.LangBreakdown.MarshalCBOR(cw); err != nil {
1269- return err
1270- }
1271- }
1272- return nil
1273-}
1274-1275-func (t *GitRefUpdate_Meta) UnmarshalCBOR(r io.Reader) (err error) {
1276- *t = GitRefUpdate_Meta{}
1277-1278- cr := cbg.NewCborReader(r)
1279-1280- maj, extra, err := cr.ReadHeader()
1281- if err != nil {
1282- return err
1283- }
1284- defer func() {
1285- if err == io.EOF {
1286- err = io.ErrUnexpectedEOF
1287- }
1288- }()
1289-1290- if maj != cbg.MajMap {
1291- return fmt.Errorf("cbor input should be of type map")
1292- }
1293-1294- if extra > cbg.MaxLength {
1295- return fmt.Errorf("GitRefUpdate_Meta: map struct too large (%d)", extra)
1296- }
1297-1298- n := extra
1299-1300- nameBuf := make([]byte, 13)
1301- for i := uint64(0); i < n; i++ {
1302- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
1303- if err != nil {
1304- return err
1305- }
1306-1307- if !ok {
1308- // Field doesn't exist on this type, so ignore it
1309- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
1310- return err
1311- }
1312- continue
1313- }
1314-1315- switch string(nameBuf[:nameLen]) {
1316- // t.CommitCount (tangled.GitRefUpdate_Meta_CommitCount) (struct)
1317- case "commitCount":
1318-1319- {
1320-1321- b, err := cr.ReadByte()
1322- if err != nil {
1323- return err
1324- }
1325- if b != cbg.CborNull[0] {
1326- if err := cr.UnreadByte(); err != nil {
1327- return err
1328- }
1329- t.CommitCount = new(GitRefUpdate_Meta_CommitCount)
1330- if err := t.CommitCount.UnmarshalCBOR(cr); err != nil {
1331- return xerrors.Errorf("unmarshaling t.CommitCount pointer: %w", err)
1332- }
1333- }
1334-1335- }
1336- // t.IsDefaultRef (bool) (bool)
1337- case "isDefaultRef":
1338-1339- maj, extra, err = cr.ReadHeader()
1340- if err != nil {
1341- return err
1342- }
1343- if maj != cbg.MajOther {
1344- return fmt.Errorf("booleans must be major type 7")
1345- }
1346- switch extra {
1347- case 20:
1348- t.IsDefaultRef = false
1349- case 21:
1350- t.IsDefaultRef = true
1351- default:
1352- return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
1353- }
1354- // t.LangBreakdown (tangled.GitRefUpdate_Meta_LangBreakdown) (struct)
1355- case "langBreakdown":
1356-1357- {
1358-1359- b, err := cr.ReadByte()
1360- if err != nil {
1361- return err
1362- }
1363- if b != cbg.CborNull[0] {
1364- if err := cr.UnreadByte(); err != nil {
1365- return err
1366- }
1367- t.LangBreakdown = new(GitRefUpdate_Meta_LangBreakdown)
1368- if err := t.LangBreakdown.UnmarshalCBOR(cr); err != nil {
1369- return xerrors.Errorf("unmarshaling t.LangBreakdown pointer: %w", err)
1370- }
1371- }
1372-1373- }
1374-1375- default:
1376- // Field doesn't exist on this type, so ignore it
1377- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
1378- return err
1379- }
1380- }
1381- }
1382-1383- return nil
1384-}
1385-func (t *GitRefUpdate_Meta_CommitCount) MarshalCBOR(w io.Writer) error {
1386 if t == nil {
1387 _, err := w.Write(cbg.CborNull)
1388 return err
···1399 return err
1400 }
14011402- // t.ByEmail ([]*tangled.GitRefUpdate_Meta_CommitCount_ByEmail_Elem) (slice)
1403 if t.ByEmail != nil {
14041405 if len("byEmail") > 1000000 {
···1430 return nil
1431}
14321433-func (t *GitRefUpdate_Meta_CommitCount) UnmarshalCBOR(r io.Reader) (err error) {
1434- *t = GitRefUpdate_Meta_CommitCount{}
14351436 cr := cbg.NewCborReader(r)
1437···1450 }
14511452 if extra > cbg.MaxLength {
1453- return fmt.Errorf("GitRefUpdate_Meta_CommitCount: map struct too large (%d)", extra)
1454 }
14551456 n := extra
···1471 }
14721473 switch string(nameBuf[:nameLen]) {
1474- // t.ByEmail ([]*tangled.GitRefUpdate_Meta_CommitCount_ByEmail_Elem) (slice)
1475 case "byEmail":
14761477 maj, extra, err = cr.ReadHeader()
···1488 }
14891490 if extra > 0 {
1491- t.ByEmail = make([]*GitRefUpdate_Meta_CommitCount_ByEmail_Elem, extra)
1492 }
14931494 for i := 0; i < int(extra); i++ {
···1510 if err := cr.UnreadByte(); err != nil {
1511 return err
1512 }
1513- t.ByEmail[i] = new(GitRefUpdate_Meta_CommitCount_ByEmail_Elem)
1514 if err := t.ByEmail[i].UnmarshalCBOR(cr); err != nil {
1515 return xerrors.Errorf("unmarshaling t.ByEmail[i] pointer: %w", err)
1516 }
···15311532 return nil
1533}
1534-func (t *GitRefUpdate_Meta_CommitCount_ByEmail_Elem) MarshalCBOR(w io.Writer) error {
1535 if t == nil {
1536 _, err := w.Write(cbg.CborNull)
1537 return err
···1590 return nil
1591}
15921593-func (t *GitRefUpdate_Meta_CommitCount_ByEmail_Elem) UnmarshalCBOR(r io.Reader) (err error) {
1594- *t = GitRefUpdate_Meta_CommitCount_ByEmail_Elem{}
15951596 cr := cbg.NewCborReader(r)
1597···1610 }
16111612 if extra > cbg.MaxLength {
1613- return fmt.Errorf("GitRefUpdate_Meta_CommitCount_ByEmail_Elem: map struct too large (%d)", extra)
1614 }
16151616 n := extra
···16791680 return nil
1681}
1682-func (t *GitRefUpdate_Meta_LangBreakdown) MarshalCBOR(w io.Writer) error {
1683 if t == nil {
1684 _, err := w.Write(cbg.CborNull)
1685 return err
···1696 return err
1697 }
16981699- // t.Inputs ([]*tangled.GitRefUpdate_Pair) (slice)
1700 if t.Inputs != nil {
17011702 if len("inputs") > 1000000 {
···1727 return nil
1728}
17291730-func (t *GitRefUpdate_Meta_LangBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
1731- *t = GitRefUpdate_Meta_LangBreakdown{}
17321733 cr := cbg.NewCborReader(r)
1734···1747 }
17481749 if extra > cbg.MaxLength {
1750- return fmt.Errorf("GitRefUpdate_Meta_LangBreakdown: map struct too large (%d)", extra)
1751 }
17521753 n := extra
···1768 }
17691770 switch string(nameBuf[:nameLen]) {
1771- // t.Inputs ([]*tangled.GitRefUpdate_Pair) (slice)
1772 case "inputs":
17731774 maj, extra, err = cr.ReadHeader()
···1785 }
17861787 if extra > 0 {
1788- t.Inputs = make([]*GitRefUpdate_Pair, extra)
1789 }
17901791 for i := 0; i < int(extra); i++ {
···1807 if err := cr.UnreadByte(); err != nil {
1808 return err
1809 }
1810- t.Inputs[i] = new(GitRefUpdate_Pair)
1811 if err := t.Inputs[i].UnmarshalCBOR(cr); err != nil {
1812 return xerrors.Errorf("unmarshaling t.Inputs[i] pointer: %w", err)
1813 }
···18281829 return nil
1830}
1831-func (t *GitRefUpdate_Pair) MarshalCBOR(w io.Writer) error {
1832 if t == nil {
1833 _, err := w.Write(cbg.CborNull)
1834 return err
···1888 return nil
1889}
18901891-func (t *GitRefUpdate_Pair) UnmarshalCBOR(r io.Reader) (err error) {
1892- *t = GitRefUpdate_Pair{}
18931894 cr := cbg.NewCborReader(r)
1895···1908 }
19091910 if extra > cbg.MaxLength {
1911- return fmt.Errorf("GitRefUpdate_Pair: map struct too large (%d)", extra)
1912 }
19131914 n := extra
···19771978 return nil
1979}
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001980func (t *GraphFollow) MarshalCBOR(w io.Writer) error {
1981 if t == nil {
1982 _, err := w.Write(cbg.CborNull)
···2118 }
21192120 t.Subject = string(sval)
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002121 }
2122 // t.CreatedAt (string) (string)
2123 case "createdAt":
···27282729 return nil
2730}
2731-func (t *Pipeline_Dependency) MarshalCBOR(w io.Writer) error {
2732- if t == nil {
2733- _, err := w.Write(cbg.CborNull)
2734- return err
2735- }
2736-2737- cw := cbg.NewCborWriter(w)
2738-2739- if _, err := cw.Write([]byte{162}); err != nil {
2740- return err
2741- }
2742-2743- // t.Packages ([]string) (slice)
2744- if len("packages") > 1000000 {
2745- return xerrors.Errorf("Value in field \"packages\" was too long")
2746- }
2747-2748- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("packages"))); err != nil {
2749- return err
2750- }
2751- if _, err := cw.WriteString(string("packages")); err != nil {
2752- return err
2753- }
2754-2755- if len(t.Packages) > 8192 {
2756- return xerrors.Errorf("Slice value in field t.Packages was too long")
2757- }
2758-2759- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Packages))); err != nil {
2760- return err
2761- }
2762- for _, v := range t.Packages {
2763- if len(v) > 1000000 {
2764- return xerrors.Errorf("Value in field v was too long")
2765- }
2766-2767- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
2768- return err
2769- }
2770- if _, err := cw.WriteString(string(v)); err != nil {
2771- return err
2772- }
2773-2774- }
2775-2776- // t.Registry (string) (string)
2777- if len("registry") > 1000000 {
2778- return xerrors.Errorf("Value in field \"registry\" was too long")
2779- }
2780-2781- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("registry"))); err != nil {
2782- return err
2783- }
2784- if _, err := cw.WriteString(string("registry")); err != nil {
2785- return err
2786- }
2787-2788- if len(t.Registry) > 1000000 {
2789- return xerrors.Errorf("Value in field t.Registry was too long")
2790- }
2791-2792- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Registry))); err != nil {
2793- return err
2794- }
2795- if _, err := cw.WriteString(string(t.Registry)); err != nil {
2796- return err
2797- }
2798- return nil
2799-}
2800-2801-func (t *Pipeline_Dependency) UnmarshalCBOR(r io.Reader) (err error) {
2802- *t = Pipeline_Dependency{}
2803-2804- cr := cbg.NewCborReader(r)
2805-2806- maj, extra, err := cr.ReadHeader()
2807- if err != nil {
2808- return err
2809- }
2810- defer func() {
2811- if err == io.EOF {
2812- err = io.ErrUnexpectedEOF
2813- }
2814- }()
2815-2816- if maj != cbg.MajMap {
2817- return fmt.Errorf("cbor input should be of type map")
2818- }
2819-2820- if extra > cbg.MaxLength {
2821- return fmt.Errorf("Pipeline_Dependency: map struct too large (%d)", extra)
2822- }
2823-2824- n := extra
2825-2826- nameBuf := make([]byte, 8)
2827- for i := uint64(0); i < n; i++ {
2828- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
2829- if err != nil {
2830- return err
2831- }
2832-2833- if !ok {
2834- // Field doesn't exist on this type, so ignore it
2835- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
2836- return err
2837- }
2838- continue
2839- }
2840-2841- switch string(nameBuf[:nameLen]) {
2842- // t.Packages ([]string) (slice)
2843- case "packages":
2844-2845- maj, extra, err = cr.ReadHeader()
2846- if err != nil {
2847- return err
2848- }
2849-2850- if extra > 8192 {
2851- return fmt.Errorf("t.Packages: array too large (%d)", extra)
2852- }
2853-2854- if maj != cbg.MajArray {
2855- return fmt.Errorf("expected cbor array")
2856- }
2857-2858- if extra > 0 {
2859- t.Packages = make([]string, extra)
2860- }
2861-2862- for i := 0; i < int(extra); i++ {
2863- {
2864- var maj byte
2865- var extra uint64
2866- var err error
2867- _ = maj
2868- _ = extra
2869- _ = err
2870-2871- {
2872- sval, err := cbg.ReadStringWithMax(cr, 1000000)
2873- if err != nil {
2874- return err
2875- }
2876-2877- t.Packages[i] = string(sval)
2878- }
2879-2880- }
2881- }
2882- // t.Registry (string) (string)
2883- case "registry":
2884-2885- {
2886- sval, err := cbg.ReadStringWithMax(cr, 1000000)
2887- if err != nil {
2888- return err
2889- }
2890-2891- t.Registry = string(sval)
2892- }
2893-2894- default:
2895- // Field doesn't exist on this type, so ignore it
2896- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
2897- return err
2898- }
2899- }
2900- }
2901-2902- return nil
2903-}
2904func (t *Pipeline_ManualTriggerData) MarshalCBOR(w io.Writer) error {
2905 if t == nil {
2906 _, err := w.Write(cbg.CborNull)
···39163917 return nil
3918}
3919-func (t *Pipeline_Step) MarshalCBOR(w io.Writer) error {
3920- if t == nil {
3921- _, err := w.Write(cbg.CborNull)
3922- return err
3923- }
3924-3925- cw := cbg.NewCborWriter(w)
3926- fieldCount := 3
3927-3928- if t.Environment == nil {
3929- fieldCount--
3930- }
3931-3932- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
3933- return err
3934- }
3935-3936- // t.Name (string) (string)
3937- if len("name") > 1000000 {
3938- return xerrors.Errorf("Value in field \"name\" was too long")
3939- }
3940-3941- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("name"))); err != nil {
3942- return err
3943- }
3944- if _, err := cw.WriteString(string("name")); err != nil {
3945- return err
3946- }
3947-3948- if len(t.Name) > 1000000 {
3949- return xerrors.Errorf("Value in field t.Name was too long")
3950- }
3951-3952- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil {
3953- return err
3954- }
3955- if _, err := cw.WriteString(string(t.Name)); err != nil {
3956- return err
3957- }
3958-3959- // t.Command (string) (string)
3960- if len("command") > 1000000 {
3961- return xerrors.Errorf("Value in field \"command\" was too long")
3962- }
3963-3964- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("command"))); err != nil {
3965- return err
3966- }
3967- if _, err := cw.WriteString(string("command")); err != nil {
3968- return err
3969- }
3970-3971- if len(t.Command) > 1000000 {
3972- return xerrors.Errorf("Value in field t.Command was too long")
3973- }
3974-3975- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Command))); err != nil {
3976- return err
3977- }
3978- if _, err := cw.WriteString(string(t.Command)); err != nil {
3979- return err
3980- }
3981-3982- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
3983- if t.Environment != nil {
3984-3985- if len("environment") > 1000000 {
3986- return xerrors.Errorf("Value in field \"environment\" was too long")
3987- }
3988-3989- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
3990- return err
3991- }
3992- if _, err := cw.WriteString(string("environment")); err != nil {
3993- return err
3994- }
3995-3996- if len(t.Environment) > 8192 {
3997- return xerrors.Errorf("Slice value in field t.Environment was too long")
3998- }
3999-4000- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
4001- return err
4002- }
4003- for _, v := range t.Environment {
4004- if err := v.MarshalCBOR(cw); err != nil {
4005- return err
4006- }
4007-4008- }
4009- }
4010- return nil
4011-}
4012-4013-func (t *Pipeline_Step) UnmarshalCBOR(r io.Reader) (err error) {
4014- *t = Pipeline_Step{}
4015-4016- cr := cbg.NewCborReader(r)
4017-4018- maj, extra, err := cr.ReadHeader()
4019- if err != nil {
4020- return err
4021- }
4022- defer func() {
4023- if err == io.EOF {
4024- err = io.ErrUnexpectedEOF
4025- }
4026- }()
4027-4028- if maj != cbg.MajMap {
4029- return fmt.Errorf("cbor input should be of type map")
4030- }
4031-4032- if extra > cbg.MaxLength {
4033- return fmt.Errorf("Pipeline_Step: map struct too large (%d)", extra)
4034- }
4035-4036- n := extra
4037-4038- nameBuf := make([]byte, 11)
4039- for i := uint64(0); i < n; i++ {
4040- nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
4041- if err != nil {
4042- return err
4043- }
4044-4045- if !ok {
4046- // Field doesn't exist on this type, so ignore it
4047- if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
4048- return err
4049- }
4050- continue
4051- }
4052-4053- switch string(nameBuf[:nameLen]) {
4054- // t.Name (string) (string)
4055- case "name":
4056-4057- {
4058- sval, err := cbg.ReadStringWithMax(cr, 1000000)
4059- if err != nil {
4060- return err
4061- }
4062-4063- t.Name = string(sval)
4064- }
4065- // t.Command (string) (string)
4066- case "command":
4067-4068- {
4069- sval, err := cbg.ReadStringWithMax(cr, 1000000)
4070- if err != nil {
4071- return err
4072- }
4073-4074- t.Command = string(sval)
4075- }
4076- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
4077- case "environment":
4078-4079- maj, extra, err = cr.ReadHeader()
4080- if err != nil {
4081- return err
4082- }
4083-4084- if extra > 8192 {
4085- return fmt.Errorf("t.Environment: array too large (%d)", extra)
4086- }
4087-4088- if maj != cbg.MajArray {
4089- return fmt.Errorf("expected cbor array")
4090- }
4091-4092- if extra > 0 {
4093- t.Environment = make([]*Pipeline_Pair, extra)
4094- }
4095-4096- for i := 0; i < int(extra); i++ {
4097- {
4098- var maj byte
4099- var extra uint64
4100- var err error
4101- _ = maj
4102- _ = extra
4103- _ = err
4104-4105- {
4106-4107- b, err := cr.ReadByte()
4108- if err != nil {
4109- return err
4110- }
4111- if b != cbg.CborNull[0] {
4112- if err := cr.UnreadByte(); err != nil {
4113- return err
4114- }
4115- t.Environment[i] = new(Pipeline_Pair)
4116- if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
4117- return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
4118- }
4119- }
4120-4121- }
4122-4123- }
4124- }
4125-4126- default:
4127- // Field doesn't exist on this type, so ignore it
4128- if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
4129- return err
4130- }
4131- }
4132- }
4133-4134- return nil
4135-}
4136func (t *Pipeline_TriggerMetadata) MarshalCBOR(w io.Writer) error {
4137 if t == nil {
4138 _, err := w.Write(cbg.CborNull)
···46094610 cw := cbg.NewCborWriter(w)
46114612- if _, err := cw.Write([]byte{165}); err != nil {
000000000000000000000004613 return err
4614 }
4615···4652 return err
4653 }
46544655- // t.Steps ([]*tangled.Pipeline_Step) (slice)
4656- if len("steps") > 1000000 {
4657- return xerrors.Errorf("Value in field \"steps\" was too long")
4658- }
4659-4660- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("steps"))); err != nil {
4661- return err
4662- }
4663- if _, err := cw.WriteString(string("steps")); err != nil {
4664- return err
4665 }
46664667- if len(t.Steps) > 8192 {
4668- return xerrors.Errorf("Slice value in field t.Steps was too long")
4669- }
4670-4671- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Steps))); err != nil {
4672- return err
4673- }
4674- for _, v := range t.Steps {
4675- if err := v.MarshalCBOR(cw); err != nil {
4676- return err
4677- }
4678-4679- }
4680-4681- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
4682- if len("environment") > 1000000 {
4683- return xerrors.Errorf("Value in field \"environment\" was too long")
4684- }
4685-4686- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
4687- return err
4688- }
4689- if _, err := cw.WriteString(string("environment")); err != nil {
4690 return err
4691 }
4692-4693- if len(t.Environment) > 8192 {
4694- return xerrors.Errorf("Slice value in field t.Environment was too long")
4695- }
4696-4697- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
4698 return err
4699- }
4700- for _, v := range t.Environment {
4701- if err := v.MarshalCBOR(cw); err != nil {
4702- return err
4703- }
4704-4705 }
47064707- // t.Dependencies ([]*tangled.Pipeline_Dependency) (slice)
4708- if len("dependencies") > 1000000 {
4709- return xerrors.Errorf("Value in field \"dependencies\" was too long")
4710 }
47114712- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("dependencies"))); err != nil {
4713 return err
4714 }
4715- if _, err := cw.WriteString(string("dependencies")); err != nil {
4716- return err
4717- }
4718-4719- if len(t.Dependencies) > 8192 {
4720- return xerrors.Errorf("Slice value in field t.Dependencies was too long")
4721- }
4722-4723- if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Dependencies))); err != nil {
4724 return err
4725- }
4726- for _, v := range t.Dependencies {
4727- if err := v.MarshalCBOR(cw); err != nil {
4728- return err
4729- }
4730-4731 }
4732 return nil
4733}
···47574758 n := extra
47594760- nameBuf := make([]byte, 12)
4761 for i := uint64(0); i < n; i++ {
4762 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
4763 if err != nil {
···4773 }
47744775 switch string(nameBuf[:nameLen]) {
4776- // t.Name (string) (string)
000000000004777 case "name":
47784779 {
···4804 }
48054806 }
4807- // t.Steps ([]*tangled.Pipeline_Step) (slice)
4808- case "steps":
48094810- maj, extra, err = cr.ReadHeader()
4811- if err != nil {
4812- return err
4813- }
4814-4815- if extra > 8192 {
4816- return fmt.Errorf("t.Steps: array too large (%d)", extra)
4817- }
4818-4819- if maj != cbg.MajArray {
4820- return fmt.Errorf("expected cbor array")
4821- }
4822-4823- if extra > 0 {
4824- t.Steps = make([]*Pipeline_Step, extra)
4825- }
4826-4827- for i := 0; i < int(extra); i++ {
4828- {
4829- var maj byte
4830- var extra uint64
4831- var err error
4832- _ = maj
4833- _ = extra
4834- _ = err
4835-4836- {
4837-4838- b, err := cr.ReadByte()
4839- if err != nil {
4840- return err
4841- }
4842- if b != cbg.CborNull[0] {
4843- if err := cr.UnreadByte(); err != nil {
4844- return err
4845- }
4846- t.Steps[i] = new(Pipeline_Step)
4847- if err := t.Steps[i].UnmarshalCBOR(cr); err != nil {
4848- return xerrors.Errorf("unmarshaling t.Steps[i] pointer: %w", err)
4849- }
4850- }
4851-4852- }
4853-4854 }
4855- }
4856- // t.Environment ([]*tangled.Pipeline_Pair) (slice)
4857- case "environment":
48584859- maj, extra, err = cr.ReadHeader()
4860- if err != nil {
4861- return err
4862- }
4863-4864- if extra > 8192 {
4865- return fmt.Errorf("t.Environment: array too large (%d)", extra)
4866- }
4867-4868- if maj != cbg.MajArray {
4869- return fmt.Errorf("expected cbor array")
4870- }
4871-4872- if extra > 0 {
4873- t.Environment = make([]*Pipeline_Pair, extra)
4874- }
4875-4876- for i := 0; i < int(extra); i++ {
4877- {
4878- var maj byte
4879- var extra uint64
4880- var err error
4881- _ = maj
4882- _ = extra
4883- _ = err
4884-4885- {
4886-4887- b, err := cr.ReadByte()
4888- if err != nil {
4889- return err
4890- }
4891- if b != cbg.CborNull[0] {
4892- if err := cr.UnreadByte(); err != nil {
4893- return err
4894- }
4895- t.Environment[i] = new(Pipeline_Pair)
4896- if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
4897- return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
4898- }
4899- }
4900-4901- }
4902-4903- }
4904- }
4905- // t.Dependencies ([]*tangled.Pipeline_Dependency) (slice)
4906- case "dependencies":
4907-4908- maj, extra, err = cr.ReadHeader()
4909- if err != nil {
4910- return err
4911- }
4912-4913- if extra > 8192 {
4914- return fmt.Errorf("t.Dependencies: array too large (%d)", extra)
4915- }
4916-4917- if maj != cbg.MajArray {
4918- return fmt.Errorf("expected cbor array")
4919- }
4920-4921- if extra > 0 {
4922- t.Dependencies = make([]*Pipeline_Dependency, extra)
4923- }
4924-4925- for i := 0; i < int(extra); i++ {
4926- {
4927- var maj byte
4928- var extra uint64
4929- var err error
4930- _ = maj
4931- _ = extra
4932- _ = err
4933-4934- {
4935-4936- b, err := cr.ReadByte()
4937- if err != nil {
4938- return err
4939- }
4940- if b != cbg.CborNull[0] {
4941- if err := cr.UnreadByte(); err != nil {
4942- return err
4943- }
4944- t.Dependencies[i] = new(Pipeline_Dependency)
4945- if err := t.Dependencies[i].UnmarshalCBOR(cr); err != nil {
4946- return xerrors.Errorf("unmarshaling t.Dependencies[i] pointer: %w", err)
4947- }
4948- }
4949-4950- }
4951-4952- }
4953 }
49544955 default:
···58545855 return nil
5856}
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005857func (t *RepoIssue) MarshalCBOR(w io.Writer) error {
5858 if t == nil {
5859 _, err := w.Write(cbg.CborNull)
···5861 }
58625863 cw := cbg.NewCborWriter(w)
5864- fieldCount := 7
58655866 if t.Body == nil {
5867 fieldCount--
···5945 return err
5946 }
59475948- // t.Owner (string) (string)
5949- if len("owner") > 1000000 {
5950- return xerrors.Errorf("Value in field \"owner\" was too long")
5951- }
5952-5953- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
5954- return err
5955- }
5956- if _, err := cw.WriteString(string("owner")); err != nil {
5957- return err
5958- }
5959-5960- if len(t.Owner) > 1000000 {
5961- return xerrors.Errorf("Value in field t.Owner was too long")
5962- }
5963-5964- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Owner))); err != nil {
5965- return err
5966- }
5967- if _, err := cw.WriteString(string(t.Owner)); err != nil {
5968- return err
5969- }
5970-5971 // t.Title (string) (string)
5972 if len("title") > 1000000 {
5973 return xerrors.Errorf("Value in field \"title\" was too long")
···5989 }
5990 if _, err := cw.WriteString(string(t.Title)); err != nil {
5991 return err
5992- }
5993-5994- // t.IssueId (int64) (int64)
5995- if len("issueId") > 1000000 {
5996- return xerrors.Errorf("Value in field \"issueId\" was too long")
5997- }
5998-5999- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("issueId"))); err != nil {
6000- return err
6001- }
6002- if _, err := cw.WriteString(string("issueId")); err != nil {
6003- return err
6004- }
6005-6006- if t.IssueId >= 0 {
6007- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.IssueId)); err != nil {
6008- return err
6009- }
6010- } else {
6011- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.IssueId-1)); err != nil {
6012- return err
6013- }
6014 }
60156016 // t.CreatedAt (string) (string)
···61226123 t.LexiconTypeID = string(sval)
6124 }
6125- // t.Owner (string) (string)
6126- case "owner":
6127-6128- {
6129- sval, err := cbg.ReadStringWithMax(cr, 1000000)
6130- if err != nil {
6131- return err
6132- }
6133-6134- t.Owner = string(sval)
6135- }
6136 // t.Title (string) (string)
6137 case "title":
6138···61446145 t.Title = string(sval)
6146 }
6147- // t.IssueId (int64) (int64)
6148- case "issueId":
6149- {
6150- maj, extra, err := cr.ReadHeader()
6151- if err != nil {
6152- return err
6153- }
6154- var extraI int64
6155- switch maj {
6156- case cbg.MajUnsignedInt:
6157- extraI = int64(extra)
6158- if extraI < 0 {
6159- return fmt.Errorf("int64 positive overflow")
6160- }
6161- case cbg.MajNegativeInt:
6162- extraI = int64(extra)
6163- if extraI < 0 {
6164- return fmt.Errorf("int64 negative overflow")
6165- }
6166- extraI = -1 - extraI
6167- default:
6168- return fmt.Errorf("wrong type for int64 field: %d", maj)
6169- }
6170-6171- t.IssueId = int64(extraI)
6172- }
6173 // t.CreatedAt (string) (string)
6174 case "createdAt":
6175···6199 }
62006201 cw := cbg.NewCborWriter(w)
6202- fieldCount := 7
62036204- if t.CommentId == nil {
6205- fieldCount--
6206- }
6207-6208- if t.Owner == nil {
6209- fieldCount--
6210- }
6211-6212- if t.Repo == nil {
6213 fieldCount--
6214 }
6215···6240 return err
6241 }
62426243- // t.Repo (string) (string)
6244- if t.Repo != nil {
6245-6246- if len("repo") > 1000000 {
6247- return xerrors.Errorf("Value in field \"repo\" was too long")
6248- }
6249-6250- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
6251- return err
6252- }
6253- if _, err := cw.WriteString(string("repo")); err != nil {
6254- return err
6255- }
6256-6257- if t.Repo == nil {
6258- if _, err := cw.Write(cbg.CborNull); err != nil {
6259- return err
6260- }
6261- } else {
6262- if len(*t.Repo) > 1000000 {
6263- return xerrors.Errorf("Value in field t.Repo was too long")
6264- }
6265-6266- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Repo))); err != nil {
6267- return err
6268- }
6269- if _, err := cw.WriteString(string(*t.Repo)); err != nil {
6270- return err
6271- }
6272- }
6273- }
6274-6275 // t.LexiconTypeID (string) (string)
6276 if len("$type") > 1000000 {
6277 return xerrors.Errorf("Value in field \"$type\" was too long")
···6314 return err
6315 }
63166317- // t.Owner (string) (string)
6318- if t.Owner != nil {
63196320- if len("owner") > 1000000 {
6321- return xerrors.Errorf("Value in field \"owner\" was too long")
6322 }
63236324- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
6325 return err
6326 }
6327- if _, err := cw.WriteString(string("owner")); err != nil {
6328 return err
6329 }
63306331- if t.Owner == nil {
6332 if _, err := cw.Write(cbg.CborNull); err != nil {
6333 return err
6334 }
6335 } else {
6336- if len(*t.Owner) > 1000000 {
6337- return xerrors.Errorf("Value in field t.Owner was too long")
6338 }
63396340- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Owner))); err != nil {
6341 return err
6342 }
6343- if _, err := cw.WriteString(string(*t.Owner)); err != nil {
6344 return err
6345 }
6346 }
6347 }
63486349- // t.CommentId (int64) (int64)
6350- if t.CommentId != nil {
6351-6352- if len("commentId") > 1000000 {
6353- return xerrors.Errorf("Value in field \"commentId\" was too long")
6354- }
6355-6356- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commentId"))); err != nil {
6357- return err
6358- }
6359- if _, err := cw.WriteString(string("commentId")); err != nil {
6360- return err
6361- }
6362-6363- if t.CommentId == nil {
6364- if _, err := cw.Write(cbg.CborNull); err != nil {
6365- return err
6366- }
6367- } else {
6368- if *t.CommentId >= 0 {
6369- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.CommentId)); err != nil {
6370- return err
6371- }
6372- } else {
6373- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-*t.CommentId-1)); err != nil {
6374- return err
6375- }
6376- }
6377- }
6378-6379- }
6380-6381 // t.CreatedAt (string) (string)
6382 if len("createdAt") > 1000000 {
6383 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···64556456 t.Body = string(sval)
6457 }
6458- // t.Repo (string) (string)
6459- case "repo":
6460-6461- {
6462- b, err := cr.ReadByte()
6463- if err != nil {
6464- return err
6465- }
6466- if b != cbg.CborNull[0] {
6467- if err := cr.UnreadByte(); err != nil {
6468- return err
6469- }
6470-6471- sval, err := cbg.ReadStringWithMax(cr, 1000000)
6472- if err != nil {
6473- return err
6474- }
6475-6476- t.Repo = (*string)(&sval)
6477- }
6478- }
6479 // t.LexiconTypeID (string) (string)
6480 case "$type":
6481···64986499 t.Issue = string(sval)
6500 }
6501- // t.Owner (string) (string)
6502- case "owner":
65036504 {
6505 b, err := cr.ReadByte()
···6516 return err
6517 }
65186519- t.Owner = (*string)(&sval)
6520- }
6521- }
6522- // t.CommentId (int64) (int64)
6523- case "commentId":
6524- {
6525-6526- b, err := cr.ReadByte()
6527- if err != nil {
6528- return err
6529- }
6530- if b != cbg.CborNull[0] {
6531- if err := cr.UnreadByte(); err != nil {
6532- return err
6533- }
6534- maj, extra, err := cr.ReadHeader()
6535- if err != nil {
6536- return err
6537- }
6538- var extraI int64
6539- switch maj {
6540- case cbg.MajUnsignedInt:
6541- extraI = int64(extra)
6542- if extraI < 0 {
6543- return fmt.Errorf("int64 positive overflow")
6544- }
6545- case cbg.MajNegativeInt:
6546- extraI = int64(extra)
6547- if extraI < 0 {
6548- return fmt.Errorf("int64 negative overflow")
6549- }
6550- extraI = -1 - extraI
6551- default:
6552- return fmt.Errorf("wrong type for int64 field: %d", maj)
6553- }
6554-6555- t.CommentId = (*int64)(&extraI)
6556 }
6557 }
6558 // t.CreatedAt (string) (string)
···6748 }
67496750 cw := cbg.NewCborWriter(w)
6751- fieldCount := 9
67526753 if t.Body == nil {
6754 fieldCount--
···6859 return err
6860 }
68616862- // t.PullId (int64) (int64)
6863- if len("pullId") > 1000000 {
6864- return xerrors.Errorf("Value in field \"pullId\" was too long")
6865- }
6866-6867- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("pullId"))); err != nil {
6868- return err
6869- }
6870- if _, err := cw.WriteString(string("pullId")); err != nil {
6871- return err
6872- }
6873-6874- if t.PullId >= 0 {
6875- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PullId)); err != nil {
6876- return err
6877- }
6878- } else {
6879- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.PullId-1)); err != nil {
6880- return err
6881- }
6882- }
6883-6884 // t.Source (tangled.RepoPull_Source) (struct)
6885 if t.Source != nil {
6886···6900 }
6901 }
69026903- // t.CreatedAt (string) (string)
6904- if len("createdAt") > 1000000 {
6905- return xerrors.Errorf("Value in field \"createdAt\" was too long")
6906 }
69076908- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
6909 return err
6910 }
6911- if _, err := cw.WriteString(string("createdAt")); err != nil {
6912 return err
6913 }
69146915- if len(t.CreatedAt) > 1000000 {
6916- return xerrors.Errorf("Value in field t.CreatedAt was too long")
6917- }
6918-6919- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
6920- return err
6921- }
6922- if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
6923 return err
6924 }
69256926- // t.TargetRepo (string) (string)
6927- if len("targetRepo") > 1000000 {
6928- return xerrors.Errorf("Value in field \"targetRepo\" was too long")
6929- }
6930-6931- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("targetRepo"))); err != nil {
6932- return err
6933- }
6934- if _, err := cw.WriteString(string("targetRepo")); err != nil {
6935- return err
6936- }
6937-6938- if len(t.TargetRepo) > 1000000 {
6939- return xerrors.Errorf("Value in field t.TargetRepo was too long")
6940- }
6941-6942- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TargetRepo))); err != nil {
6943- return err
6944- }
6945- if _, err := cw.WriteString(string(t.TargetRepo)); err != nil {
6946- return err
6947- }
6948-6949- // t.TargetBranch (string) (string)
6950- if len("targetBranch") > 1000000 {
6951- return xerrors.Errorf("Value in field \"targetBranch\" was too long")
6952 }
69536954- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("targetBranch"))); err != nil {
6955 return err
6956 }
6957- if _, err := cw.WriteString(string("targetBranch")); err != nil {
6958 return err
6959 }
69606961- if len(t.TargetBranch) > 1000000 {
6962- return xerrors.Errorf("Value in field t.TargetBranch was too long")
6963 }
69646965- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.TargetBranch))); err != nil {
6966 return err
6967 }
6968- if _, err := cw.WriteString(string(t.TargetBranch)); err != nil {
6969 return err
6970 }
6971 return nil
···69966997 n := extra
69986999- nameBuf := make([]byte, 12)
7000 for i := uint64(0); i < n; i++ {
7001 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
7002 if err != nil {
···70667067 t.Title = string(sval)
7068 }
7069- // t.PullId (int64) (int64)
7070- case "pullId":
7071- {
7072- maj, extra, err := cr.ReadHeader()
7073- if err != nil {
7074- return err
7075- }
7076- var extraI int64
7077- switch maj {
7078- case cbg.MajUnsignedInt:
7079- extraI = int64(extra)
7080- if extraI < 0 {
7081- return fmt.Errorf("int64 positive overflow")
7082- }
7083- case cbg.MajNegativeInt:
7084- extraI = int64(extra)
7085- if extraI < 0 {
7086- return fmt.Errorf("int64 negative overflow")
7087- }
7088- extraI = -1 - extraI
7089- default:
7090- return fmt.Errorf("wrong type for int64 field: %d", maj)
7091- }
7092-7093- t.PullId = int64(extraI)
7094- }
7095 // t.Source (tangled.RepoPull_Source) (struct)
7096 case "source":
7097···7112 }
71137114 }
7115- // t.CreatedAt (string) (string)
7116- case "createdAt":
71177118 {
7119- sval, err := cbg.ReadStringWithMax(cr, 1000000)
07120 if err != nil {
7121 return err
7122 }
7123-7124- t.CreatedAt = string(sval)
7125- }
7126- // t.TargetRepo (string) (string)
7127- case "targetRepo":
7128-7129- {
7130- sval, err := cbg.ReadStringWithMax(cr, 1000000)
7131- if err != nil {
7132- return err
7133 }
71347135- t.TargetRepo = string(sval)
7136 }
7137- // t.TargetBranch (string) (string)
7138- case "targetBranch":
71397140 {
7141 sval, err := cbg.ReadStringWithMax(cr, 1000000)
···7143 return err
7144 }
71457146- t.TargetBranch = string(sval)
7147 }
71487149 default:
···7163 }
71647165 cw := cbg.NewCborWriter(w)
7166- fieldCount := 7
71677168- if t.CommentId == nil {
7169- fieldCount--
7170- }
7171-7172- if t.Owner == nil {
7173- fieldCount--
7174- }
7175-7176- if t.Repo == nil {
7177- fieldCount--
7178- }
7179-7180- if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
7181 return err
7182 }
7183···7227 return err
7228 }
72297230- // t.Repo (string) (string)
7231- if t.Repo != nil {
7232-7233- if len("repo") > 1000000 {
7234- return xerrors.Errorf("Value in field \"repo\" was too long")
7235- }
7236-7237- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
7238- return err
7239- }
7240- if _, err := cw.WriteString(string("repo")); err != nil {
7241- return err
7242- }
7243-7244- if t.Repo == nil {
7245- if _, err := cw.Write(cbg.CborNull); err != nil {
7246- return err
7247- }
7248- } else {
7249- if len(*t.Repo) > 1000000 {
7250- return xerrors.Errorf("Value in field t.Repo was too long")
7251- }
7252-7253- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Repo))); err != nil {
7254- return err
7255- }
7256- if _, err := cw.WriteString(string(*t.Repo)); err != nil {
7257- return err
7258- }
7259- }
7260- }
7261-7262 // t.LexiconTypeID (string) (string)
7263 if len("$type") > 1000000 {
7264 return xerrors.Errorf("Value in field \"$type\" was too long")
···7276 }
7277 if _, err := cw.WriteString(string("sh.tangled.repo.pull.comment")); err != nil {
7278 return err
7279- }
7280-7281- // t.Owner (string) (string)
7282- if t.Owner != nil {
7283-7284- if len("owner") > 1000000 {
7285- return xerrors.Errorf("Value in field \"owner\" was too long")
7286- }
7287-7288- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("owner"))); err != nil {
7289- return err
7290- }
7291- if _, err := cw.WriteString(string("owner")); err != nil {
7292- return err
7293- }
7294-7295- if t.Owner == nil {
7296- if _, err := cw.Write(cbg.CborNull); err != nil {
7297- return err
7298- }
7299- } else {
7300- if len(*t.Owner) > 1000000 {
7301- return xerrors.Errorf("Value in field t.Owner was too long")
7302- }
7303-7304- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Owner))); err != nil {
7305- return err
7306- }
7307- if _, err := cw.WriteString(string(*t.Owner)); err != nil {
7308- return err
7309- }
7310- }
7311- }
7312-7313- // t.CommentId (int64) (int64)
7314- if t.CommentId != nil {
7315-7316- if len("commentId") > 1000000 {
7317- return xerrors.Errorf("Value in field \"commentId\" was too long")
7318- }
7319-7320- if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commentId"))); err != nil {
7321- return err
7322- }
7323- if _, err := cw.WriteString(string("commentId")); err != nil {
7324- return err
7325- }
7326-7327- if t.CommentId == nil {
7328- if _, err := cw.Write(cbg.CborNull); err != nil {
7329- return err
7330- }
7331- } else {
7332- if *t.CommentId >= 0 {
7333- if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.CommentId)); err != nil {
7334- return err
7335- }
7336- } else {
7337- if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-*t.CommentId-1)); err != nil {
7338- return err
7339- }
7340- }
7341- }
7342-7343 }
73447345 // t.CreatedAt (string) (string)
···74307431 t.Pull = string(sval)
7432 }
7433- // t.Repo (string) (string)
7434- case "repo":
7435-7436- {
7437- b, err := cr.ReadByte()
7438- if err != nil {
7439- return err
7440- }
7441- if b != cbg.CborNull[0] {
7442- if err := cr.UnreadByte(); err != nil {
7443- return err
7444- }
7445-7446- sval, err := cbg.ReadStringWithMax(cr, 1000000)
7447- if err != nil {
7448- return err
7449- }
7450-7451- t.Repo = (*string)(&sval)
7452- }
7453- }
7454 // t.LexiconTypeID (string) (string)
7455 case "$type":
7456···7461 }
74627463 t.LexiconTypeID = string(sval)
7464- }
7465- // t.Owner (string) (string)
7466- case "owner":
7467-7468- {
7469- b, err := cr.ReadByte()
7470- if err != nil {
7471- return err
7472- }
7473- if b != cbg.CborNull[0] {
7474- if err := cr.UnreadByte(); err != nil {
7475- return err
7476- }
7477-7478- sval, err := cbg.ReadStringWithMax(cr, 1000000)
7479- if err != nil {
7480- return err
7481- }
7482-7483- t.Owner = (*string)(&sval)
7484- }
7485- }
7486- // t.CommentId (int64) (int64)
7487- case "commentId":
7488- {
7489-7490- b, err := cr.ReadByte()
7491- if err != nil {
7492- return err
7493- }
7494- if b != cbg.CborNull[0] {
7495- if err := cr.UnreadByte(); err != nil {
7496- return err
7497- }
7498- maj, extra, err := cr.ReadHeader()
7499- if err != nil {
7500- return err
7501- }
7502- var extraI int64
7503- switch maj {
7504- case cbg.MajUnsignedInt:
7505- extraI = int64(extra)
7506- if extraI < 0 {
7507- return fmt.Errorf("int64 positive overflow")
7508- }
7509- case cbg.MajNegativeInt:
7510- extraI = int64(extra)
7511- if extraI < 0 {
7512- return fmt.Errorf("int64 negative overflow")
7513- }
7514- extraI = -1 - extraI
7515- default:
7516- return fmt.Errorf("wrong type for int64 field: %d", maj)
7517- }
7518-7519- t.CommentId = (*int64)(&extraI)
7520- }
7521 }
7522 // t.CreatedAt (string) (string)
7523 case "createdAt":
···78977898 return nil
7899}
000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007900func (t *Spindle) MarshalCBOR(w io.Writer) error {
7901 if t == nil {
7902 _, err := w.Write(cbg.CborNull)
···82258226 return nil
8227}
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
···12021203 return nil
1204}
1205+func (t *GitRefUpdate_CommitCountBreakdown) MarshalCBOR(w io.Writer) error {
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001206 if t == nil {
1207 _, err := w.Write(cbg.CborNull)
1208 return err
···1219 return err
1220 }
12211222+ // t.ByEmail ([]*tangled.GitRefUpdate_IndividualEmailCommitCount) (slice)
1223 if t.ByEmail != nil {
12241225 if len("byEmail") > 1000000 {
···1250 return nil
1251}
12521253+func (t *GitRefUpdate_CommitCountBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
1254+ *t = GitRefUpdate_CommitCountBreakdown{}
12551256 cr := cbg.NewCborReader(r)
1257···1270 }
12711272 if extra > cbg.MaxLength {
1273+ return fmt.Errorf("GitRefUpdate_CommitCountBreakdown: map struct too large (%d)", extra)
1274 }
12751276 n := extra
···1291 }
12921293 switch string(nameBuf[:nameLen]) {
1294+ // t.ByEmail ([]*tangled.GitRefUpdate_IndividualEmailCommitCount) (slice)
1295 case "byEmail":
12961297 maj, extra, err = cr.ReadHeader()
···1308 }
13091310 if extra > 0 {
1311+ t.ByEmail = make([]*GitRefUpdate_IndividualEmailCommitCount, extra)
1312 }
13131314 for i := 0; i < int(extra); i++ {
···1330 if err := cr.UnreadByte(); err != nil {
1331 return err
1332 }
1333+ t.ByEmail[i] = new(GitRefUpdate_IndividualEmailCommitCount)
1334 if err := t.ByEmail[i].UnmarshalCBOR(cr); err != nil {
1335 return xerrors.Errorf("unmarshaling t.ByEmail[i] pointer: %w", err)
1336 }
···13511352 return nil
1353}
1354+func (t *GitRefUpdate_IndividualEmailCommitCount) MarshalCBOR(w io.Writer) error {
1355 if t == nil {
1356 _, err := w.Write(cbg.CborNull)
1357 return err
···1410 return nil
1411}
14121413+func (t *GitRefUpdate_IndividualEmailCommitCount) UnmarshalCBOR(r io.Reader) (err error) {
1414+ *t = GitRefUpdate_IndividualEmailCommitCount{}
14151416 cr := cbg.NewCborReader(r)
1417···1430 }
14311432 if extra > cbg.MaxLength {
1433+ return fmt.Errorf("GitRefUpdate_IndividualEmailCommitCount: map struct too large (%d)", extra)
1434 }
14351436 n := extra
···14991500 return nil
1501}
1502+func (t *GitRefUpdate_LangBreakdown) MarshalCBOR(w io.Writer) error {
1503 if t == nil {
1504 _, err := w.Write(cbg.CborNull)
1505 return err
···1516 return err
1517 }
15181519+ // t.Inputs ([]*tangled.GitRefUpdate_IndividualLanguageSize) (slice)
1520 if t.Inputs != nil {
15211522 if len("inputs") > 1000000 {
···1547 return nil
1548}
15491550+func (t *GitRefUpdate_LangBreakdown) UnmarshalCBOR(r io.Reader) (err error) {
1551+ *t = GitRefUpdate_LangBreakdown{}
15521553 cr := cbg.NewCborReader(r)
1554···1567 }
15681569 if extra > cbg.MaxLength {
1570+ return fmt.Errorf("GitRefUpdate_LangBreakdown: map struct too large (%d)", extra)
1571 }
15721573 n := extra
···1588 }
15891590 switch string(nameBuf[:nameLen]) {
1591+ // t.Inputs ([]*tangled.GitRefUpdate_IndividualLanguageSize) (slice)
1592 case "inputs":
15931594 maj, extra, err = cr.ReadHeader()
···1605 }
16061607 if extra > 0 {
1608+ t.Inputs = make([]*GitRefUpdate_IndividualLanguageSize, extra)
1609 }
16101611 for i := 0; i < int(extra); i++ {
···1627 if err := cr.UnreadByte(); err != nil {
1628 return err
1629 }
1630+ t.Inputs[i] = new(GitRefUpdate_IndividualLanguageSize)
1631 if err := t.Inputs[i].UnmarshalCBOR(cr); err != nil {
1632 return xerrors.Errorf("unmarshaling t.Inputs[i] pointer: %w", err)
1633 }
···16481649 return nil
1650}
1651+func (t *GitRefUpdate_IndividualLanguageSize) MarshalCBOR(w io.Writer) error {
1652 if t == nil {
1653 _, err := w.Write(cbg.CborNull)
1654 return err
···1708 return nil
1709}
17101711+func (t *GitRefUpdate_IndividualLanguageSize) UnmarshalCBOR(r io.Reader) (err error) {
1712+ *t = GitRefUpdate_IndividualLanguageSize{}
17131714 cr := cbg.NewCborReader(r)
1715···1728 }
17291730 if extra > cbg.MaxLength {
1731+ return fmt.Errorf("GitRefUpdate_IndividualLanguageSize: map struct too large (%d)", extra)
1732 }
17331734 n := extra
···17971798 return nil
1799}
1800+func (t *GitRefUpdate_Meta) MarshalCBOR(w io.Writer) error {
1801+ if t == nil {
1802+ _, err := w.Write(cbg.CborNull)
1803+ return err
1804+ }
1805+1806+ cw := cbg.NewCborWriter(w)
1807+ fieldCount := 3
1808+1809+ if t.LangBreakdown == nil {
1810+ fieldCount--
1811+ }
1812+1813+ if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
1814+ return err
1815+ }
1816+1817+ // t.CommitCount (tangled.GitRefUpdate_CommitCountBreakdown) (struct)
1818+ if len("commitCount") > 1000000 {
1819+ return xerrors.Errorf("Value in field \"commitCount\" was too long")
1820+ }
1821+1822+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commitCount"))); err != nil {
1823+ return err
1824+ }
1825+ if _, err := cw.WriteString(string("commitCount")); err != nil {
1826+ return err
1827+ }
1828+1829+ if err := t.CommitCount.MarshalCBOR(cw); err != nil {
1830+ return err
1831+ }
1832+1833+ // t.IsDefaultRef (bool) (bool)
1834+ if len("isDefaultRef") > 1000000 {
1835+ return xerrors.Errorf("Value in field \"isDefaultRef\" was too long")
1836+ }
1837+1838+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("isDefaultRef"))); err != nil {
1839+ return err
1840+ }
1841+ if _, err := cw.WriteString(string("isDefaultRef")); err != nil {
1842+ return err
1843+ }
1844+1845+ if err := cbg.WriteBool(w, t.IsDefaultRef); err != nil {
1846+ return err
1847+ }
1848+1849+ // t.LangBreakdown (tangled.GitRefUpdate_LangBreakdown) (struct)
1850+ if t.LangBreakdown != nil {
1851+1852+ if len("langBreakdown") > 1000000 {
1853+ return xerrors.Errorf("Value in field \"langBreakdown\" was too long")
1854+ }
1855+1856+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("langBreakdown"))); err != nil {
1857+ return err
1858+ }
1859+ if _, err := cw.WriteString(string("langBreakdown")); err != nil {
1860+ return err
1861+ }
1862+1863+ if err := t.LangBreakdown.MarshalCBOR(cw); err != nil {
1864+ return err
1865+ }
1866+ }
1867+ return nil
1868+}
1869+1870+func (t *GitRefUpdate_Meta) UnmarshalCBOR(r io.Reader) (err error) {
1871+ *t = GitRefUpdate_Meta{}
1872+1873+ cr := cbg.NewCborReader(r)
1874+1875+ maj, extra, err := cr.ReadHeader()
1876+ if err != nil {
1877+ return err
1878+ }
1879+ defer func() {
1880+ if err == io.EOF {
1881+ err = io.ErrUnexpectedEOF
1882+ }
1883+ }()
1884+1885+ if maj != cbg.MajMap {
1886+ return fmt.Errorf("cbor input should be of type map")
1887+ }
1888+1889+ if extra > cbg.MaxLength {
1890+ return fmt.Errorf("GitRefUpdate_Meta: map struct too large (%d)", extra)
1891+ }
1892+1893+ n := extra
1894+1895+ nameBuf := make([]byte, 13)
1896+ for i := uint64(0); i < n; i++ {
1897+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
1898+ if err != nil {
1899+ return err
1900+ }
1901+1902+ if !ok {
1903+ // Field doesn't exist on this type, so ignore it
1904+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
1905+ return err
1906+ }
1907+ continue
1908+ }
1909+1910+ switch string(nameBuf[:nameLen]) {
1911+ // t.CommitCount (tangled.GitRefUpdate_CommitCountBreakdown) (struct)
1912+ case "commitCount":
1913+1914+ {
1915+1916+ b, err := cr.ReadByte()
1917+ if err != nil {
1918+ return err
1919+ }
1920+ if b != cbg.CborNull[0] {
1921+ if err := cr.UnreadByte(); err != nil {
1922+ return err
1923+ }
1924+ t.CommitCount = new(GitRefUpdate_CommitCountBreakdown)
1925+ if err := t.CommitCount.UnmarshalCBOR(cr); err != nil {
1926+ return xerrors.Errorf("unmarshaling t.CommitCount pointer: %w", err)
1927+ }
1928+ }
1929+1930+ }
1931+ // t.IsDefaultRef (bool) (bool)
1932+ case "isDefaultRef":
1933+1934+ maj, extra, err = cr.ReadHeader()
1935+ if err != nil {
1936+ return err
1937+ }
1938+ if maj != cbg.MajOther {
1939+ return fmt.Errorf("booleans must be major type 7")
1940+ }
1941+ switch extra {
1942+ case 20:
1943+ t.IsDefaultRef = false
1944+ case 21:
1945+ t.IsDefaultRef = true
1946+ default:
1947+ return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
1948+ }
1949+ // t.LangBreakdown (tangled.GitRefUpdate_LangBreakdown) (struct)
1950+ case "langBreakdown":
1951+1952+ {
1953+1954+ b, err := cr.ReadByte()
1955+ if err != nil {
1956+ return err
1957+ }
1958+ if b != cbg.CborNull[0] {
1959+ if err := cr.UnreadByte(); err != nil {
1960+ return err
1961+ }
1962+ t.LangBreakdown = new(GitRefUpdate_LangBreakdown)
1963+ if err := t.LangBreakdown.UnmarshalCBOR(cr); err != nil {
1964+ return xerrors.Errorf("unmarshaling t.LangBreakdown pointer: %w", err)
1965+ }
1966+ }
1967+1968+ }
1969+1970+ default:
1971+ // Field doesn't exist on this type, so ignore it
1972+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
1973+ return err
1974+ }
1975+ }
1976+ }
1977+1978+ return nil
1979+}
1980func (t *GraphFollow) MarshalCBOR(w io.Writer) error {
1981 if t == nil {
1982 _, err := w.Write(cbg.CborNull)
···2118 }
21192120 t.Subject = string(sval)
2121+ }
2122+ // t.CreatedAt (string) (string)
2123+ case "createdAt":
2124+2125+ {
2126+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
2127+ if err != nil {
2128+ return err
2129+ }
2130+2131+ t.CreatedAt = string(sval)
2132+ }
2133+2134+ default:
2135+ // Field doesn't exist on this type, so ignore it
2136+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
2137+ return err
2138+ }
2139+ }
2140+ }
2141+2142+ return nil
2143+}
2144+func (t *Knot) MarshalCBOR(w io.Writer) error {
2145+ if t == nil {
2146+ _, err := w.Write(cbg.CborNull)
2147+ return err
2148+ }
2149+2150+ cw := cbg.NewCborWriter(w)
2151+2152+ if _, err := cw.Write([]byte{162}); err != nil {
2153+ return err
2154+ }
2155+2156+ // t.LexiconTypeID (string) (string)
2157+ if len("$type") > 1000000 {
2158+ return xerrors.Errorf("Value in field \"$type\" was too long")
2159+ }
2160+2161+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
2162+ return err
2163+ }
2164+ if _, err := cw.WriteString(string("$type")); err != nil {
2165+ return err
2166+ }
2167+2168+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.knot"))); err != nil {
2169+ return err
2170+ }
2171+ if _, err := cw.WriteString(string("sh.tangled.knot")); err != nil {
2172+ return err
2173+ }
2174+2175+ // t.CreatedAt (string) (string)
2176+ if len("createdAt") > 1000000 {
2177+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
2178+ }
2179+2180+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
2181+ return err
2182+ }
2183+ if _, err := cw.WriteString(string("createdAt")); err != nil {
2184+ return err
2185+ }
2186+2187+ if len(t.CreatedAt) > 1000000 {
2188+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
2189+ }
2190+2191+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
2192+ return err
2193+ }
2194+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
2195+ return err
2196+ }
2197+ return nil
2198+}
2199+2200+func (t *Knot) UnmarshalCBOR(r io.Reader) (err error) {
2201+ *t = Knot{}
2202+2203+ cr := cbg.NewCborReader(r)
2204+2205+ maj, extra, err := cr.ReadHeader()
2206+ if err != nil {
2207+ return err
2208+ }
2209+ defer func() {
2210+ if err == io.EOF {
2211+ err = io.ErrUnexpectedEOF
2212+ }
2213+ }()
2214+2215+ if maj != cbg.MajMap {
2216+ return fmt.Errorf("cbor input should be of type map")
2217+ }
2218+2219+ if extra > cbg.MaxLength {
2220+ return fmt.Errorf("Knot: map struct too large (%d)", extra)
2221+ }
2222+2223+ n := extra
2224+2225+ nameBuf := make([]byte, 9)
2226+ for i := uint64(0); i < n; i++ {
2227+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
2228+ if err != nil {
2229+ return err
2230+ }
2231+2232+ if !ok {
2233+ // Field doesn't exist on this type, so ignore it
2234+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
2235+ return err
2236+ }
2237+ continue
2238+ }
2239+2240+ switch string(nameBuf[:nameLen]) {
2241+ // t.LexiconTypeID (string) (string)
2242+ case "$type":
2243+2244+ {
2245+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
2246+ if err != nil {
2247+ return err
2248+ }
2249+2250+ t.LexiconTypeID = string(sval)
2251 }
2252 // t.CreatedAt (string) (string)
2253 case "createdAt":
···28582859 return nil
2860}
000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002861func (t *Pipeline_ManualTriggerData) MarshalCBOR(w io.Writer) error {
2862 if t == nil {
2863 _, err := w.Write(cbg.CborNull)
···38733874 return nil
3875}
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003876func (t *Pipeline_TriggerMetadata) MarshalCBOR(w io.Writer) error {
3877 if t == nil {
3878 _, err := w.Write(cbg.CborNull)
···43494350 cw := cbg.NewCborWriter(w)
43514352+ if _, err := cw.Write([]byte{164}); err != nil {
4353+ return err
4354+ }
4355+4356+ // t.Raw (string) (string)
4357+ if len("raw") > 1000000 {
4358+ return xerrors.Errorf("Value in field \"raw\" was too long")
4359+ }
4360+4361+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("raw"))); err != nil {
4362+ return err
4363+ }
4364+ if _, err := cw.WriteString(string("raw")); err != nil {
4365+ return err
4366+ }
4367+4368+ if len(t.Raw) > 1000000 {
4369+ return xerrors.Errorf("Value in field t.Raw was too long")
4370+ }
4371+4372+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Raw))); err != nil {
4373+ return err
4374+ }
4375+ if _, err := cw.WriteString(string(t.Raw)); err != nil {
4376 return err
4377 }
4378···4415 return err
4416 }
44174418+ // t.Engine (string) (string)
4419+ if len("engine") > 1000000 {
4420+ return xerrors.Errorf("Value in field \"engine\" was too long")
00000004421 }
44224423+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("engine"))); err != nil {
00000000000000000000004424 return err
4425 }
4426+ if _, err := cw.WriteString(string("engine")); err != nil {
000004427 return err
0000004428 }
44294430+ if len(t.Engine) > 1000000 {
4431+ return xerrors.Errorf("Value in field t.Engine was too long")
04432 }
44334434+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Engine))); err != nil {
4435 return err
4436 }
4437+ if _, err := cw.WriteString(string(t.Engine)); err != nil {
000000004438 return err
0000004439 }
4440 return nil
4441}
···44654466 n := extra
44674468+ nameBuf := make([]byte, 6)
4469 for i := uint64(0); i < n; i++ {
4470 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
4471 if err != nil {
···4481 }
44824483 switch string(nameBuf[:nameLen]) {
4484+ // t.Raw (string) (string)
4485+ case "raw":
4486+4487+ {
4488+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
4489+ if err != nil {
4490+ return err
4491+ }
4492+4493+ t.Raw = string(sval)
4494+ }
4495+ // t.Name (string) (string)
4496 case "name":
44974498 {
···4523 }
45244525 }
4526+ // t.Engine (string) (string)
4527+ case "engine":
45284529+ {
4530+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
4531+ if err != nil {
4532+ return err
00000000000000000000000000000000000000004533 }
00045344535+ t.Engine = string(sval)
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004536 }
45374538 default:
···54375438 return nil
5439}
5440+func (t *RepoCollaborator) MarshalCBOR(w io.Writer) error {
5441+ if t == nil {
5442+ _, err := w.Write(cbg.CborNull)
5443+ return err
5444+ }
5445+5446+ cw := cbg.NewCborWriter(w)
5447+5448+ if _, err := cw.Write([]byte{164}); err != nil {
5449+ return err
5450+ }
5451+5452+ // t.Repo (string) (string)
5453+ if len("repo") > 1000000 {
5454+ return xerrors.Errorf("Value in field \"repo\" was too long")
5455+ }
5456+5457+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
5458+ return err
5459+ }
5460+ if _, err := cw.WriteString(string("repo")); err != nil {
5461+ return err
5462+ }
5463+5464+ if len(t.Repo) > 1000000 {
5465+ return xerrors.Errorf("Value in field t.Repo was too long")
5466+ }
5467+5468+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repo))); err != nil {
5469+ return err
5470+ }
5471+ if _, err := cw.WriteString(string(t.Repo)); err != nil {
5472+ return err
5473+ }
5474+5475+ // t.LexiconTypeID (string) (string)
5476+ if len("$type") > 1000000 {
5477+ return xerrors.Errorf("Value in field \"$type\" was too long")
5478+ }
5479+5480+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
5481+ return err
5482+ }
5483+ if _, err := cw.WriteString(string("$type")); err != nil {
5484+ return err
5485+ }
5486+5487+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.repo.collaborator"))); err != nil {
5488+ return err
5489+ }
5490+ if _, err := cw.WriteString(string("sh.tangled.repo.collaborator")); err != nil {
5491+ return err
5492+ }
5493+5494+ // t.Subject (string) (string)
5495+ if len("subject") > 1000000 {
5496+ return xerrors.Errorf("Value in field \"subject\" was too long")
5497+ }
5498+5499+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("subject"))); err != nil {
5500+ return err
5501+ }
5502+ if _, err := cw.WriteString(string("subject")); err != nil {
5503+ return err
5504+ }
5505+5506+ if len(t.Subject) > 1000000 {
5507+ return xerrors.Errorf("Value in field t.Subject was too long")
5508+ }
5509+5510+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Subject))); err != nil {
5511+ return err
5512+ }
5513+ if _, err := cw.WriteString(string(t.Subject)); err != nil {
5514+ return err
5515+ }
5516+5517+ // t.CreatedAt (string) (string)
5518+ if len("createdAt") > 1000000 {
5519+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
5520+ }
5521+5522+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
5523+ return err
5524+ }
5525+ if _, err := cw.WriteString(string("createdAt")); err != nil {
5526+ return err
5527+ }
5528+5529+ if len(t.CreatedAt) > 1000000 {
5530+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
5531+ }
5532+5533+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
5534+ return err
5535+ }
5536+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
5537+ return err
5538+ }
5539+ return nil
5540+}
5541+5542+func (t *RepoCollaborator) UnmarshalCBOR(r io.Reader) (err error) {
5543+ *t = RepoCollaborator{}
5544+5545+ cr := cbg.NewCborReader(r)
5546+5547+ maj, extra, err := cr.ReadHeader()
5548+ if err != nil {
5549+ return err
5550+ }
5551+ defer func() {
5552+ if err == io.EOF {
5553+ err = io.ErrUnexpectedEOF
5554+ }
5555+ }()
5556+5557+ if maj != cbg.MajMap {
5558+ return fmt.Errorf("cbor input should be of type map")
5559+ }
5560+5561+ if extra > cbg.MaxLength {
5562+ return fmt.Errorf("RepoCollaborator: map struct too large (%d)", extra)
5563+ }
5564+5565+ n := extra
5566+5567+ nameBuf := make([]byte, 9)
5568+ for i := uint64(0); i < n; i++ {
5569+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
5570+ if err != nil {
5571+ return err
5572+ }
5573+5574+ if !ok {
5575+ // Field doesn't exist on this type, so ignore it
5576+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
5577+ return err
5578+ }
5579+ continue
5580+ }
5581+5582+ switch string(nameBuf[:nameLen]) {
5583+ // t.Repo (string) (string)
5584+ case "repo":
5585+5586+ {
5587+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
5588+ if err != nil {
5589+ return err
5590+ }
5591+5592+ t.Repo = string(sval)
5593+ }
5594+ // t.LexiconTypeID (string) (string)
5595+ case "$type":
5596+5597+ {
5598+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
5599+ if err != nil {
5600+ return err
5601+ }
5602+5603+ t.LexiconTypeID = string(sval)
5604+ }
5605+ // t.Subject (string) (string)
5606+ case "subject":
5607+5608+ {
5609+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
5610+ if err != nil {
5611+ return err
5612+ }
5613+5614+ t.Subject = string(sval)
5615+ }
5616+ // t.CreatedAt (string) (string)
5617+ case "createdAt":
5618+5619+ {
5620+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
5621+ if err != nil {
5622+ return err
5623+ }
5624+5625+ t.CreatedAt = string(sval)
5626+ }
5627+5628+ default:
5629+ // Field doesn't exist on this type, so ignore it
5630+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
5631+ return err
5632+ }
5633+ }
5634+ }
5635+5636+ return nil
5637+}
5638func (t *RepoIssue) MarshalCBOR(w io.Writer) error {
5639 if t == nil {
5640 _, err := w.Write(cbg.CborNull)
···5642 }
56435644 cw := cbg.NewCborWriter(w)
5645+ fieldCount := 5
56465647 if t.Body == nil {
5648 fieldCount--
···5726 return err
5727 }
5728000000000000000000000005729 // t.Title (string) (string)
5730 if len("title") > 1000000 {
5731 return xerrors.Errorf("Value in field \"title\" was too long")
···5747 }
5748 if _, err := cw.WriteString(string(t.Title)); err != nil {
5749 return err
00000000000000000000005750 }
57515752 // t.CreatedAt (string) (string)
···58585859 t.LexiconTypeID = string(sval)
5860 }
000000000005861 // t.Title (string) (string)
5862 case "title":
5863···58695870 t.Title = string(sval)
5871 }
000000000000000000000000005872 // t.CreatedAt (string) (string)
5873 case "createdAt":
5874···5898 }
58995900 cw := cbg.NewCborWriter(w)
5901+ fieldCount := 5
59025903+ if t.ReplyTo == nil {
000000005904 fieldCount--
5905 }
5906···5931 return err
5932 }
5933000000000000000000000000000000005934 // t.LexiconTypeID (string) (string)
5935 if len("$type") > 1000000 {
5936 return xerrors.Errorf("Value in field \"$type\" was too long")
···5973 return err
5974 }
59755976+ // t.ReplyTo (string) (string)
5977+ if t.ReplyTo != nil {
59785979+ if len("replyTo") > 1000000 {
5980+ return xerrors.Errorf("Value in field \"replyTo\" was too long")
5981 }
59825983+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("replyTo"))); err != nil {
5984 return err
5985 }
5986+ if _, err := cw.WriteString(string("replyTo")); err != nil {
5987 return err
5988 }
59895990+ if t.ReplyTo == nil {
5991 if _, err := cw.Write(cbg.CborNull); err != nil {
5992 return err
5993 }
5994 } else {
5995+ if len(*t.ReplyTo) > 1000000 {
5996+ return xerrors.Errorf("Value in field t.ReplyTo was too long")
5997 }
59985999+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.ReplyTo))); err != nil {
6000 return err
6001 }
6002+ if _, err := cw.WriteString(string(*t.ReplyTo)); err != nil {
6003 return err
6004 }
6005 }
6006 }
6007000000000000000000000000000000006008 // t.CreatedAt (string) (string)
6009 if len("createdAt") > 1000000 {
6010 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···60826083 t.Body = string(sval)
6084 }
0000000000000000000006085 // t.LexiconTypeID (string) (string)
6086 case "$type":
6087···61046105 t.Issue = string(sval)
6106 }
6107+ // t.ReplyTo (string) (string)
6108+ case "replyTo":
61096110 {
6111 b, err := cr.ReadByte()
···6122 return err
6123 }
61246125+ t.ReplyTo = (*string)(&sval)
0000000000000000000000000000000000006126 }
6127 }
6128 // t.CreatedAt (string) (string)
···6318 }
63196320 cw := cbg.NewCborWriter(w)
6321+ fieldCount := 7
63226323 if t.Body == nil {
6324 fieldCount--
···6429 return err
6430 }
643100000000000000000000006432 // t.Source (tangled.RepoPull_Source) (struct)
6433 if t.Source != nil {
6434···6448 }
6449 }
64506451+ // t.Target (tangled.RepoPull_Target) (struct)
6452+ if len("target") > 1000000 {
6453+ return xerrors.Errorf("Value in field \"target\" was too long")
6454 }
64556456+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("target"))); err != nil {
6457 return err
6458 }
6459+ if _, err := cw.WriteString(string("target")); err != nil {
6460 return err
6461 }
64626463+ if err := t.Target.MarshalCBOR(cw); err != nil {
00000006464 return err
6465 }
64666467+ // t.CreatedAt (string) (string)
6468+ if len("createdAt") > 1000000 {
6469+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
000000000000000000000006470 }
64716472+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
6473 return err
6474 }
6475+ if _, err := cw.WriteString(string("createdAt")); err != nil {
6476 return err
6477 }
64786479+ if len(t.CreatedAt) > 1000000 {
6480+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
6481 }
64826483+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
6484 return err
6485 }
6486+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
6487 return err
6488 }
6489 return nil
···65146515 n := extra
65166517+ nameBuf := make([]byte, 9)
6518 for i := uint64(0); i < n; i++ {
6519 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
6520 if err != nil {
···65846585 t.Title = string(sval)
6586 }
000000000000000000000000006587 // t.Source (tangled.RepoPull_Source) (struct)
6588 case "source":
6589···6604 }
66056606 }
6607+ // t.Target (tangled.RepoPull_Target) (struct)
6608+ case "target":
66096610 {
6611+6612+ b, err := cr.ReadByte()
6613 if err != nil {
6614 return err
6615 }
6616+ if b != cbg.CborNull[0] {
6617+ if err := cr.UnreadByte(); err != nil {
6618+ return err
6619+ }
6620+ t.Target = new(RepoPull_Target)
6621+ if err := t.Target.UnmarshalCBOR(cr); err != nil {
6622+ return xerrors.Errorf("unmarshaling t.Target pointer: %w", err)
6623+ }
006624 }
662506626 }
6627+ // t.CreatedAt (string) (string)
6628+ case "createdAt":
66296630 {
6631 sval, err := cbg.ReadStringWithMax(cr, 1000000)
···6633 return err
6634 }
66356636+ t.CreatedAt = string(sval)
6637 }
66386639 default:
···6653 }
66546655 cw := cbg.NewCborWriter(w)
066566657+ if _, err := cw.Write([]byte{164}); err != nil {
0000000000006658 return err
6659 }
6660···6704 return err
6705 }
6706000000000000000000000000000000006707 // t.LexiconTypeID (string) (string)
6708 if len("$type") > 1000000 {
6709 return xerrors.Errorf("Value in field \"$type\" was too long")
···6721 }
6722 if _, err := cw.WriteString(string("sh.tangled.repo.pull.comment")); err != nil {
6723 return err
00000000000000000000000000000000000000000000000000000000000000006724 }
67256726 // t.CreatedAt (string) (string)
···68116812 t.Pull = string(sval)
6813 }
0000000000000000000006814 // t.LexiconTypeID (string) (string)
6815 case "$type":
6816···6821 }
68226823 t.LexiconTypeID = string(sval)
0000000000000000000000000000000000000000000000000000000006824 }
6825 // t.CreatedAt (string) (string)
6826 case "createdAt":
···72007201 return nil
7202}
7203+func (t *RepoPull_Target) MarshalCBOR(w io.Writer) error {
7204+ if t == nil {
7205+ _, err := w.Write(cbg.CborNull)
7206+ return err
7207+ }
7208+7209+ cw := cbg.NewCborWriter(w)
7210+7211+ if _, err := cw.Write([]byte{162}); err != nil {
7212+ return err
7213+ }
7214+7215+ // t.Repo (string) (string)
7216+ if len("repo") > 1000000 {
7217+ return xerrors.Errorf("Value in field \"repo\" was too long")
7218+ }
7219+7220+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repo"))); err != nil {
7221+ return err
7222+ }
7223+ if _, err := cw.WriteString(string("repo")); err != nil {
7224+ return err
7225+ }
7226+7227+ if len(t.Repo) > 1000000 {
7228+ return xerrors.Errorf("Value in field t.Repo was too long")
7229+ }
7230+7231+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repo))); err != nil {
7232+ return err
7233+ }
7234+ if _, err := cw.WriteString(string(t.Repo)); err != nil {
7235+ return err
7236+ }
7237+7238+ // t.Branch (string) (string)
7239+ if len("branch") > 1000000 {
7240+ return xerrors.Errorf("Value in field \"branch\" was too long")
7241+ }
7242+7243+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("branch"))); err != nil {
7244+ return err
7245+ }
7246+ if _, err := cw.WriteString(string("branch")); err != nil {
7247+ return err
7248+ }
7249+7250+ if len(t.Branch) > 1000000 {
7251+ return xerrors.Errorf("Value in field t.Branch was too long")
7252+ }
7253+7254+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Branch))); err != nil {
7255+ return err
7256+ }
7257+ if _, err := cw.WriteString(string(t.Branch)); err != nil {
7258+ return err
7259+ }
7260+ return nil
7261+}
7262+7263+func (t *RepoPull_Target) UnmarshalCBOR(r io.Reader) (err error) {
7264+ *t = RepoPull_Target{}
7265+7266+ cr := cbg.NewCborReader(r)
7267+7268+ maj, extra, err := cr.ReadHeader()
7269+ if err != nil {
7270+ return err
7271+ }
7272+ defer func() {
7273+ if err == io.EOF {
7274+ err = io.ErrUnexpectedEOF
7275+ }
7276+ }()
7277+7278+ if maj != cbg.MajMap {
7279+ return fmt.Errorf("cbor input should be of type map")
7280+ }
7281+7282+ if extra > cbg.MaxLength {
7283+ return fmt.Errorf("RepoPull_Target: map struct too large (%d)", extra)
7284+ }
7285+7286+ n := extra
7287+7288+ nameBuf := make([]byte, 6)
7289+ for i := uint64(0); i < n; i++ {
7290+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
7291+ if err != nil {
7292+ return err
7293+ }
7294+7295+ if !ok {
7296+ // Field doesn't exist on this type, so ignore it
7297+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
7298+ return err
7299+ }
7300+ continue
7301+ }
7302+7303+ switch string(nameBuf[:nameLen]) {
7304+ // t.Repo (string) (string)
7305+ case "repo":
7306+7307+ {
7308+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
7309+ if err != nil {
7310+ return err
7311+ }
7312+7313+ t.Repo = string(sval)
7314+ }
7315+ // t.Branch (string) (string)
7316+ case "branch":
7317+7318+ {
7319+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
7320+ if err != nil {
7321+ return err
7322+ }
7323+7324+ t.Branch = string(sval)
7325+ }
7326+7327+ default:
7328+ // Field doesn't exist on this type, so ignore it
7329+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
7330+ return err
7331+ }
7332+ }
7333+ }
7334+7335+ return nil
7336+}
7337func (t *Spindle) MarshalCBOR(w io.Writer) error {
7338 if t == nil {
7339 _, err := w.Write(cbg.CborNull)
···76627663 return nil
7664}
7665+func (t *String) MarshalCBOR(w io.Writer) error {
7666+ if t == nil {
7667+ _, err := w.Write(cbg.CborNull)
7668+ return err
7669+ }
7670+7671+ cw := cbg.NewCborWriter(w)
7672+7673+ if _, err := cw.Write([]byte{165}); err != nil {
7674+ return err
7675+ }
7676+7677+ // t.LexiconTypeID (string) (string)
7678+ if len("$type") > 1000000 {
7679+ return xerrors.Errorf("Value in field \"$type\" was too long")
7680+ }
7681+7682+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
7683+ return err
7684+ }
7685+ if _, err := cw.WriteString(string("$type")); err != nil {
7686+ return err
7687+ }
7688+7689+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.string"))); err != nil {
7690+ return err
7691+ }
7692+ if _, err := cw.WriteString(string("sh.tangled.string")); err != nil {
7693+ return err
7694+ }
7695+7696+ // t.Contents (string) (string)
7697+ if len("contents") > 1000000 {
7698+ return xerrors.Errorf("Value in field \"contents\" was too long")
7699+ }
7700+7701+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("contents"))); err != nil {
7702+ return err
7703+ }
7704+ if _, err := cw.WriteString(string("contents")); err != nil {
7705+ return err
7706+ }
7707+7708+ if len(t.Contents) > 1000000 {
7709+ return xerrors.Errorf("Value in field t.Contents was too long")
7710+ }
7711+7712+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Contents))); err != nil {
7713+ return err
7714+ }
7715+ if _, err := cw.WriteString(string(t.Contents)); err != nil {
7716+ return err
7717+ }
7718+7719+ // t.Filename (string) (string)
7720+ if len("filename") > 1000000 {
7721+ return xerrors.Errorf("Value in field \"filename\" was too long")
7722+ }
7723+7724+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("filename"))); err != nil {
7725+ return err
7726+ }
7727+ if _, err := cw.WriteString(string("filename")); err != nil {
7728+ return err
7729+ }
7730+7731+ if len(t.Filename) > 1000000 {
7732+ return xerrors.Errorf("Value in field t.Filename was too long")
7733+ }
7734+7735+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Filename))); err != nil {
7736+ return err
7737+ }
7738+ if _, err := cw.WriteString(string(t.Filename)); err != nil {
7739+ return err
7740+ }
7741+7742+ // t.CreatedAt (string) (string)
7743+ if len("createdAt") > 1000000 {
7744+ return xerrors.Errorf("Value in field \"createdAt\" was too long")
7745+ }
7746+7747+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
7748+ return err
7749+ }
7750+ if _, err := cw.WriteString(string("createdAt")); err != nil {
7751+ return err
7752+ }
7753+7754+ if len(t.CreatedAt) > 1000000 {
7755+ return xerrors.Errorf("Value in field t.CreatedAt was too long")
7756+ }
7757+7758+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
7759+ return err
7760+ }
7761+ if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
7762+ return err
7763+ }
7764+7765+ // t.Description (string) (string)
7766+ if len("description") > 1000000 {
7767+ return xerrors.Errorf("Value in field \"description\" was too long")
7768+ }
7769+7770+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("description"))); err != nil {
7771+ return err
7772+ }
7773+ if _, err := cw.WriteString(string("description")); err != nil {
7774+ return err
7775+ }
7776+7777+ if len(t.Description) > 1000000 {
7778+ return xerrors.Errorf("Value in field t.Description was too long")
7779+ }
7780+7781+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Description))); err != nil {
7782+ return err
7783+ }
7784+ if _, err := cw.WriteString(string(t.Description)); err != nil {
7785+ return err
7786+ }
7787+ return nil
7788+}
7789+7790+func (t *String) UnmarshalCBOR(r io.Reader) (err error) {
7791+ *t = String{}
7792+7793+ cr := cbg.NewCborReader(r)
7794+7795+ maj, extra, err := cr.ReadHeader()
7796+ if err != nil {
7797+ return err
7798+ }
7799+ defer func() {
7800+ if err == io.EOF {
7801+ err = io.ErrUnexpectedEOF
7802+ }
7803+ }()
7804+7805+ if maj != cbg.MajMap {
7806+ return fmt.Errorf("cbor input should be of type map")
7807+ }
7808+7809+ if extra > cbg.MaxLength {
7810+ return fmt.Errorf("String: map struct too large (%d)", extra)
7811+ }
7812+7813+ n := extra
7814+7815+ nameBuf := make([]byte, 11)
7816+ for i := uint64(0); i < n; i++ {
7817+ nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
7818+ if err != nil {
7819+ return err
7820+ }
7821+7822+ if !ok {
7823+ // Field doesn't exist on this type, so ignore it
7824+ if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
7825+ return err
7826+ }
7827+ continue
7828+ }
7829+7830+ switch string(nameBuf[:nameLen]) {
7831+ // t.LexiconTypeID (string) (string)
7832+ case "$type":
7833+7834+ {
7835+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
7836+ if err != nil {
7837+ return err
7838+ }
7839+7840+ t.LexiconTypeID = string(sval)
7841+ }
7842+ // t.Contents (string) (string)
7843+ case "contents":
7844+7845+ {
7846+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
7847+ if err != nil {
7848+ return err
7849+ }
7850+7851+ t.Contents = string(sval)
7852+ }
7853+ // t.Filename (string) (string)
7854+ case "filename":
7855+7856+ {
7857+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
7858+ if err != nil {
7859+ return err
7860+ }
7861+7862+ t.Filename = string(sval)
7863+ }
7864+ // t.CreatedAt (string) (string)
7865+ case "createdAt":
7866+7867+ {
7868+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
7869+ if err != nil {
7870+ return err
7871+ }
7872+7873+ t.CreatedAt = string(sval)
7874+ }
7875+ // t.Description (string) (string)
7876+ case "description":
7877+7878+ {
7879+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
7880+ if err != nil {
7881+ return err
7882+ }
7883+7884+ t.Description = string(sval)
7885+ }
7886+7887+ default:
7888+ // Field doesn't exist on this type, so ignore it
7889+ if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
7890+ return err
7891+ }
7892+ }
7893+ }
7894+7895+ return nil
7896+}
···23import (
4 "database/sql"
5+ "fmt"
6+ "maps"
7+ "slices"
8+ "sort"
9+ "strings"
10 "time"
1112 "github.com/bluesky-social/indigo/atproto/syntax"
13+ "tangled.sh/tangled.sh/core/api/tangled"
14 "tangled.sh/tangled.sh/core/appview/pagination"
15)
1617type Issue struct {
18+ Id int64
19+ Did string
20+ Rkey string
21+ RepoAt syntax.ATURI
22+ IssueId int
23+ Created time.Time
24+ Edited *time.Time
25+ Deleted *time.Time
26+ Title string
27+ Body string
28+ Open bool
2930 // optionally, populate this when querying for reverse mappings
31 // like comment counts, parent repo etc.
32+ Comments []IssueComment
33+ Repo *Repo
34}
3536+func (i *Issue) AtUri() syntax.ATURI {
37+ return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.Did, tangled.RepoIssueNSID, i.Rkey))
38+}
39+40+func (i *Issue) AsRecord() tangled.RepoIssue {
41+ return tangled.RepoIssue{
42+ Repo: i.RepoAt.String(),
43+ Title: i.Title,
44+ Body: &i.Body,
45+ CreatedAt: i.Created.Format(time.RFC3339),
46+ }
47+}
48+49+func (i *Issue) State() string {
50+ if i.Open {
51+ return "open"
52+ }
53+ return "closed"
54}
5556+type CommentListItem struct {
57+ Self *IssueComment
58+ Replies []*IssueComment
000000059}
6061+func (i *Issue) CommentList() []CommentListItem {
62+ // Create a map to quickly find comments by their aturi
63+ toplevel := make(map[string]*CommentListItem)
64+ var replies []*IssueComment
6566+ // collect top level comments into the map
67+ for _, comment := range i.Comments {
68+ if comment.IsTopLevel() {
69+ toplevel[comment.AtUri().String()] = &CommentListItem{
70+ Self: &comment,
71+ }
72+ } else {
73+ replies = append(replies, &comment)
74+ }
75+ }
76+77+ for _, r := range replies {
78+ parentAt := *r.ReplyTo
79+ if parent, exists := toplevel[parentAt]; exists {
80+ parent.Replies = append(parent.Replies, r)
81+ }
82+ }
83+84+ var listing []CommentListItem
85+ for _, v := range toplevel {
86+ listing = append(listing, *v)
87 }
8889+ // sort everything
90+ sortFunc := func(a, b *IssueComment) bool {
91+ return a.Created.Before(b.Created)
92+ }
93+ sort.Slice(listing, func(i, j int) bool {
94+ return sortFunc(listing[i].Self, listing[j].Self)
95+ })
96+ for _, r := range listing {
97+ sort.Slice(r.Replies, func(i, j int) bool {
98+ return sortFunc(r.Replies[i], r.Replies[j])
99+ })
100 }
101102+ return listing
103+}
104105+func IssueFromRecord(did, rkey string, record tangled.RepoIssue) Issue {
106+ created, err := time.Parse(time.RFC3339, record.CreatedAt)
00107 if err != nil {
108+ created = time.Now()
109 }
110111+ body := ""
112+ if record.Body != nil {
113+ body = *record.Body
114 }
0115116+ return Issue{
117+ RepoAt: syntax.ATURI(record.Repo),
118+ Did: did,
119+ Rkey: rkey,
120+ Created: created,
121+ Title: record.Title,
122+ Body: body,
123+ Open: true, // new issues are open by default
124 }
125+}
126127+type IssueComment struct {
128+ Id int64
129+ Did string
130+ Rkey string
131+ IssueAt string
132+ ReplyTo *string
133+ Body string
134+ Created time.Time
135+ Edited *time.Time
136+ Deleted *time.Time
137}
138139+func (i *IssueComment) AtUri() syntax.ATURI {
140+ return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.Did, tangled.RepoIssueCommentNSID, i.Rkey))
0141}
142143+func (i *IssueComment) AsRecord() tangled.RepoIssueComment {
144+ return tangled.RepoIssueComment{
145+ Body: i.Body,
146+ Issue: i.IssueAt,
147+ CreatedAt: i.Created.Format(time.RFC3339),
148+ ReplyTo: i.ReplyTo,
149+ }
150}
151152+func (i *IssueComment) IsTopLevel() bool {
153+ return i.ReplyTo == nil
00154}
155156+func IssueCommentFromRecord(e Execer, did, rkey string, record tangled.RepoIssueComment) (*IssueComment, error) {
157+ created, err := time.Parse(time.RFC3339, record.CreatedAt)
158+ if err != nil {
159+ created = time.Now()
0160 }
161162+ ownerDid := did
163+164+ if _, err = syntax.ParseATURI(record.Issue); err != nil {
0000000000000000000000000000000000165 return nil, err
166 }
0167168+ comment := IssueComment{
169+ Did: ownerDid,
170+ Rkey: rkey,
171+ Body: record.Body,
172+ IssueAt: record.Issue,
173+ ReplyTo: record.ReplyTo,
174+ Created: created,
175+ }
176177+ return &comment, nil
178+}
179+180+func PutIssue(tx *sql.Tx, issue *Issue) error {
181+ // ensure sequence exists
182+ _, err := tx.Exec(`
183+ insert or ignore into repo_issue_seqs (repo_at, next_issue_id)
184+ values (?, 1)
185+ `, issue.RepoAt)
186+ if err != nil {
187+ return err
188+ }
189+190+ issues, err := GetIssues(
191+ tx,
192+ FilterEq("did", issue.Did),
193+ FilterEq("rkey", issue.Rkey),
194+ )
195+ switch {
196+ case err != nil:
197+ return err
198+ case len(issues) == 0:
199+ return createNewIssue(tx, issue)
200+ case len(issues) != 1: // should be unreachable
201+ return fmt.Errorf("invalid number of issues returned: %d", len(issues))
202+ default:
203+ // if content is identical, do not edit
204+ existingIssue := issues[0]
205+ if existingIssue.Title == issue.Title && existingIssue.Body == issue.Body {
206+ return nil
207 }
00208209+ issue.Id = existingIssue.Id
210+ issue.IssueId = existingIssue.IssueId
211+ return updateIssue(tx, issue)
212 }
213+}
214215+func createNewIssue(tx *sql.Tx, issue *Issue) error {
216+ // get next issue_id
217+ var newIssueId int
218+ err := tx.QueryRow(`
219+ update repo_issue_seqs
220+ set next_issue_id = next_issue_id + 1
221+ where repo_at = ?
222+ returning next_issue_id - 1
223+ `, issue.RepoAt).Scan(&newIssueId)
224+ if err != nil {
225+ return err
226 }
227228+ // insert new issue
229+ row := tx.QueryRow(`
230+ insert into issues (repo_at, did, rkey, issue_id, title, body)
231+ values (?, ?, ?, ?, ?, ?)
232+ returning rowid, issue_id
233+ `, issue.RepoAt, issue.Did, issue.Rkey, newIssueId, issue.Title, issue.Body)
234+235+ return row.Scan(&issue.Id, &issue.IssueId)
236}
237238+func updateIssue(tx *sql.Tx, issue *Issue) error {
239+ // update existing issue
240+ _, err := tx.Exec(`
241+ update issues
242+ set title = ?, body = ?, edited = ?
243+ where did = ? and rkey = ?
244+ `, issue.Title, issue.Body, time.Now().Format(time.RFC3339), issue.Did, issue.Rkey)
245+ return err
246+}
247248+func GetIssuesPaginated(e Execer, page pagination.Page, filters ...filter) ([]Issue, error) {
249+ issueMap := make(map[string]*Issue) // at-uri -> issue
250+251+ var conditions []string
252+ var args []any
253+254+ for _, filter := range filters {
255+ conditions = append(conditions, filter.Condition())
256+ args = append(args, filter.Arg()...)
257+ }
258+259+ whereClause := ""
260+ if conditions != nil {
261+ whereClause = " where " + strings.Join(conditions, " and ")
262+ }
263+264+ pLower := FilterGte("row_num", page.Offset+1)
265+ pUpper := FilterLte("row_num", page.Offset+page.Limit)
266+267+ args = append(args, pLower.Arg()...)
268+ args = append(args, pUpper.Arg()...)
269+ pagination := " where " + pLower.Condition() + " and " + pUpper.Condition()
270+271+ query := fmt.Sprintf(
272+ `
273+ select * from (
274+ select
275+ id,
276+ did,
277+ rkey,
278+ repo_at,
279+ issue_id,
280+ title,
281+ body,
282+ open,
283+ created,
284+ edited,
285+ deleted,
286+ row_number() over (order by created desc) as row_num
287+ from
288+ issues
289+ %s
290+ ) ranked_issues
291+ %s
292+ `,
293+ whereClause,
294+ pagination,
295+ )
296+297+ rows, err := e.Query(query, args...)
298 if err != nil {
299+ return nil, fmt.Errorf("failed to query issues table: %w", err)
300 }
301 defer rows.Close()
302303 for rows.Next() {
304 var issue Issue
305+ var createdAt string
306+ var editedAt, deletedAt sql.Null[string]
307+ var rowNum int64
308 err := rows.Scan(
309+ &issue.Id,
310+ &issue.Did,
311+ &issue.Rkey,
312 &issue.RepoAt,
313 &issue.IssueId,
0314 &issue.Title,
315 &issue.Body,
316 &issue.Open,
317+ &createdAt,
318+ &editedAt,
319+ &deletedAt,
320+ &rowNum,
0321 )
322 if err != nil {
323+ return nil, fmt.Errorf("failed to scan issue: %w", err)
324 }
325326+ if t, err := time.Parse(time.RFC3339, createdAt); err == nil {
327+ issue.Created = t
0328 }
0329330+ if editedAt.Valid {
331+ if t, err := time.Parse(time.RFC3339, editedAt.V); err == nil {
332+ issue.Edited = &t
333+ }
334+ }
335+336+ if deletedAt.Valid {
337+ if t, err := time.Parse(time.RFC3339, deletedAt.V); err == nil {
338+ issue.Deleted = &t
339+ }
340 }
0341342+ atUri := issue.AtUri().String()
343+ issueMap[atUri] = &issue
344+ }
345+346+ // collect reverse repos
347+ repoAts := make([]string, 0, len(issueMap)) // or just []string{}
348+ for _, issue := range issueMap {
349+ repoAts = append(repoAts, string(issue.RepoAt))
350+ }
351+352+ repos, err := GetRepos(e, 0, FilterIn("at_uri", repoAts))
353+ if err != nil {
354+ return nil, fmt.Errorf("failed to build repo mappings: %w", err)
355+ }
356+357+ repoMap := make(map[string]*Repo)
358+ for i := range repos {
359+ repoMap[string(repos[i].RepoAt())] = &repos[i]
360+ }
361+362+ for issueAt, i := range issueMap {
363+ if r, ok := repoMap[string(i.RepoAt)]; ok {
364+ i.Repo = r
365+ } else {
366+ // do not show up the issue if the repo is deleted
367+ // TODO: foreign key where?
368+ delete(issueMap, issueAt)
369 }
370+ }
371372+ // collect comments
373+ issueAts := slices.Collect(maps.Keys(issueMap))
374+ comments, err := GetIssueComments(e, FilterIn("issue_at", issueAts))
375+ if err != nil {
376+ return nil, fmt.Errorf("failed to query comments: %w", err)
377 }
378379+ for i := range comments {
380+ issueAt := comments[i].IssueAt
381+ if issue, ok := issueMap[issueAt]; ok {
382+ issue.Comments = append(issue.Comments, comments[i])
383+ }
384+ }
385+386+ var issues []Issue
387+ for _, i := range issueMap {
388+ issues = append(issues, *i)
389 }
390391+ sort.Slice(issues, func(i, j int) bool {
392+ return issues[i].Created.After(issues[j].Created)
393+ })
394+395 return issues, nil
396+}
397+398+func GetIssues(e Execer, filters ...filter) ([]Issue, error) {
399+ return GetIssuesPaginated(e, pagination.FirstPage(), filters...)
400}
401402func GetIssue(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, error) {
403+ query := `select id, owner_did, rkey, created, title, body, open from issues where repo_at = ? and issue_id = ?`
404 row := e.QueryRow(query, repoAt, issueId)
405406 var issue Issue
407 var createdAt string
408+ err := row.Scan(&issue.Id, &issue.Did, &issue.Rkey, &createdAt, &issue.Title, &issue.Body, &issue.Open)
409 if err != nil {
410 return nil, err
411 }
···419 return &issue, nil
420}
421422+func AddIssueComment(e Execer, c IssueComment) (int64, error) {
423+ result, err := e.Exec(
424+ `insert into issue_comments (
425+ did,
426+ rkey,
427+ issue_at,
428+ body,
429+ reply_to,
430+ created,
431+ edited
432+ )
433+ values (?, ?, ?, ?, ?, ?, null)
434+ on conflict(did, rkey) do update set
435+ issue_at = excluded.issue_at,
436+ body = excluded.body,
437+ edited = case
438+ when
439+ issue_comments.issue_at != excluded.issue_at
440+ or issue_comments.body != excluded.body
441+ or issue_comments.reply_to != excluded.reply_to
442+ then ?
443+ else issue_comments.edited
444+ end`,
445+ c.Did,
446+ c.Rkey,
447+ c.IssueAt,
448+ c.Body,
449+ c.ReplyTo,
450+ c.Created.Format(time.RFC3339),
451+ time.Now().Format(time.RFC3339),
452+ )
453 if err != nil {
454+ return 0, err
455 }
456457+ id, err := result.LastInsertId()
458 if err != nil {
459+ return 0, err
460 }
0461462+ return id, nil
463+}
464+465+func DeleteIssueComments(e Execer, filters ...filter) error {
466+ var conditions []string
467+ var args []any
468+ for _, filter := range filters {
469+ conditions = append(conditions, filter.Condition())
470+ args = append(args, filter.Arg()...)
471 }
472473+ whereClause := ""
474+ if conditions != nil {
475+ whereClause = " where " + strings.Join(conditions, " and ")
476+ }
477478+ query := fmt.Sprintf(`update issue_comments set body = "", deleted = strftime('%%Y-%%m-%%dT%%H:%%M:%%SZ', 'now') %s`, whereClause)
479+480+ _, err := e.Exec(query, args...)
00000000481 return err
482}
483484+func GetIssueComments(e Execer, filters ...filter) ([]IssueComment, error) {
485+ var comments []IssueComment
486+487+ var conditions []string
488+ var args []any
489+ for _, filter := range filters {
490+ conditions = append(conditions, filter.Condition())
491+ args = append(args, filter.Arg()...)
492+ }
493494+ whereClause := ""
495+ if conditions != nil {
496+ whereClause = " where " + strings.Join(conditions, " and ")
497+ }
498+499+ query := fmt.Sprintf(`
500 select
501+ id,
502+ did,
0503 rkey,
504+ issue_at,
505+ reply_to,
506 body,
507 created,
508 edited,
509 deleted
510 from
511+ issue_comments
512+ %s
513+ `, whereClause)
514+515+ rows, err := e.Query(query, args...)
000000516 if err != nil {
517 return nil, err
518 }
0519520 for rows.Next() {
521+ var comment IssueComment
522+ var created string
523+ var rkey, edited, deleted, replyTo sql.Null[string]
524+ err := rows.Scan(
525+ &comment.Id,
526+ &comment.Did,
527+ &rkey,
528+ &comment.IssueAt,
529+ &replyTo,
530+ &comment.Body,
531+ &created,
532+ &edited,
533+ &deleted,
534+ )
535 if err != nil {
536 return nil, err
537 }
538539+ // this is a remnant from old times, newer comments always have rkey
540+ if rkey.Valid {
541+ comment.Rkey = rkey.V
542 }
0543544+ if t, err := time.Parse(time.RFC3339, created); err == nil {
545+ comment.Created = t
546+ }
547+548+ if edited.Valid {
549+ if t, err := time.Parse(time.RFC3339, edited.V); err == nil {
550+ comment.Edited = &t
551 }
0552 }
553554+ if deleted.Valid {
555+ if t, err := time.Parse(time.RFC3339, deleted.V); err == nil {
556+ comment.Deleted = &t
0557 }
0558 }
559560+ if replyTo.Valid {
561+ comment.ReplyTo = &replyTo.V
562 }
563564 comments = append(comments, comment)
565 }
566567+ if err = rows.Err(); err != nil {
568 return nil, err
569 }
570571 return comments, nil
572}
573574+func DeleteIssues(e Execer, filters ...filter) error {
575+ var conditions []string
576+ var args []any
577+ for _, filter := range filters {
578+ conditions = append(conditions, filter.Condition())
579+ args = append(args, filter.Arg()...)
000000000580 }
581582+ whereClause := ""
583+ if conditions != nil {
584+ whereClause = " where " + strings.Join(conditions, " and ")
585 }
0586587+ query := fmt.Sprintf(`delete from issues %s`, whereClause)
588+ _, err := e.Exec(query, args...)
589+ return err
590+}
000591592+func CloseIssues(e Execer, filters ...filter) error {
593+ var conditions []string
594+ var args []any
595+ for _, filter := range filters {
596+ conditions = append(conditions, filter.Condition())
597+ args = append(args, filter.Arg()...)
598 }
599600+ whereClause := ""
601+ if conditions != nil {
602+ whereClause = " where " + strings.Join(conditions, " and ")
603 }
604605+ query := fmt.Sprintf(`update issues set open = 0 %s`, whereClause)
606+ _, err := e.Exec(query, args...)
0000000000000607 return err
608}
609610+func ReopenIssues(e Execer, filters ...filter) error {
611+ var conditions []string
612+ var args []any
613+ for _, filter := range filters {
614+ conditions = append(conditions, filter.Condition())
615+ args = append(args, filter.Arg()...)
616+ }
000617618+ whereClause := ""
619+ if conditions != nil {
620+ whereClause = " where " + strings.Join(conditions, " and ")
621+ }
622623+ query := fmt.Sprintf(`update issues set open = 1 %s`, whereClause)
624+ _, err := e.Exec(query, args...)
625 return err
626}
627
-62
appview/db/migrations/20250305_113405.sql
···1--- Simplified SQLite Database Migration Script for Issues and Comments
2-3--- Migration for issues table
4-CREATE TABLE issues_new (
5- id integer primary key autoincrement,
6- owner_did text not null,
7- repo_at text not null,
8- issue_id integer not null,
9- title text not null,
10- body text not null,
11- open integer not null default 1,
12- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
13- issue_at text,
14- unique(repo_at, issue_id),
15- foreign key (repo_at) references repos(at_uri) on delete cascade
16-);
17-18--- Migrate data to new issues table
19-INSERT INTO issues_new (
20- id, owner_did, repo_at, issue_id,
21- title, body, open, created, issue_at
22-)
23-SELECT
24- id, owner_did, repo_at, issue_id,
25- title, body, open, created, issue_at
26-FROM issues;
27-28--- Drop old issues table
29-DROP TABLE issues;
30-31--- Rename new issues table
32-ALTER TABLE issues_new RENAME TO issues;
33-34--- Migration for comments table
35-CREATE TABLE comments_new (
36- id integer primary key autoincrement,
37- owner_did text not null,
38- issue_id integer not null,
39- repo_at text not null,
40- comment_id integer not null,
41- comment_at text not null,
42- body text not null,
43- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
44- unique(issue_id, comment_id),
45- foreign key (repo_at, issue_id) references issues(repo_at, issue_id) on delete cascade
46-);
47-48--- Migrate data to new comments table
49-INSERT INTO comments_new (
50- id, owner_did, issue_id, repo_at,
51- comment_id, comment_at, body, created
52-)
53-SELECT
54- id, owner_did, issue_id, repo_at,
55- comment_id, comment_at, body, created
56-FROM comments;
57-58--- Drop old comments table
59-DROP TABLE comments;
60-61--- Rename new comments table
62-ALTER TABLE comments_new RENAME TO comments;
···13 FormatMarkdown: []string{".md", ".markdown", ".mdown", ".mkdn", ".mkd"},
14}
1500000000000016func GetFormat(filename string) Format {
17 for format, extensions := range FileTypes {
18 for _, extension := range extensions {
···13 FormatMarkdown: []string{".md", ".markdown", ".mdown", ".mkdn", ".mkd"},
14}
1516+// ReadmeFilenames contains the list of common README filenames to search for,
17+// in order of preference. Only includes well-supported formats.
18+var ReadmeFilenames = []string{
19+ "README.md", "readme.md",
20+ "README",
21+ "readme",
22+ "README.markdown",
23+ "readme.markdown",
24+ "README.txt",
25+ "readme.txt",
26+}
27+28func GetFormat(filename string) Format {
29 for format, extensions := range FileTypes {
30 for _, extension := range extensions {
···1+{{ define "banner" }}
2+<div class="flex items-center justify-center mx-auto w-full bg-red-100 dark:bg-red-900 border border-red-200 dark:border-red-800 rounded-b drop-shadow-sm text-red-800 dark:text-red-200">
3+ <details class="group p-2">
4+ <summary class="list-none cursor-pointer">
5+ <div class="flex gap-4 items-center">
6+ <span class="group-open:hidden inline">{{ i "triangle-alert" "w-4 h-4" }}</span>
7+ <span class="hidden group-open:inline">{{ i "x" "w-4 h-4" }}</span>
8+9+ <span class="group-open:hidden inline">Some services that you administer require an update. Click to show more.</span>
10+ <span class="hidden group-open:inline">Some services that you administer will have to be updated to be compatible with Tangled.</span>
11+ </div>
12+ </summary>
13+14+ {{ if .Registrations }}
15+ <ul class="list-disc mx-12 my-2">
16+ {{range .Registrations}}
17+ <li>Knot: {{ .Domain }}</li>
18+ {{ end }}
19+ </ul>
20+ {{ end }}
21+22+ {{ if .Spindles }}
23+ <ul class="list-disc mx-12 my-2">
24+ {{range .Spindles}}
25+ <li>Spindle: {{ .Instance }}</li>
26+ {{ end }}
27+ </ul>
28+ {{ end }}
29+30+ <div class="mx-6">
31+ These services may not be fully accessible until upgraded.
32+ <a class="underline text-red-800 dark:text-red-200"
33+ href="https://tangled.sh/@tangled.sh/core/tree/master/docs/migrations.md">
34+ Click to read the upgrade guide</a>.
35+ </div>
36+ </details>
37+</div>
38+{{ end }}
+24-4
appview/pages/templates/errors/404.html
···1{{ define "title" }}404 · tangled{{ end }}
23{{ define "content" }}
4- <h1>404 — nothing like that here!</h1>
5- <p>
6- It seems we couldn't find what you were looking for. Sorry about that!
7- </p>
000000000000000000008{{ end }}
···1{{ define "title" }}404 · tangled{{ end }}
23{{ define "content" }}
4+<div class="flex flex-col items-center justify-center min-h-[60vh] text-center">
5+ <div class="bg-white dark:bg-gray-800 rounded-lg drop-shadow-sm p-8 max-w-lg mx-auto">
6+ <div class="mb-6">
7+ <div class="w-16 h-16 mx-auto mb-4 rounded-full bg-gray-100 dark:bg-gray-700 flex items-center justify-center">
8+ {{ i "search-x" "w-8 h-8 text-gray-400 dark:text-gray-500" }}
9+ </div>
10+ </div>
11+12+ <div class="space-y-4">
13+ <h1 class="text-2xl sm:text-3xl font-bold text-gray-900 dark:text-white">
14+ 404 — page not found
15+ </h1>
16+ <p class="text-gray-600 dark:text-gray-300">
17+ The page you're looking for doesn't exist. It may have been moved, deleted, or you have the wrong URL.
18+ </p>
19+ <div class="flex flex-col sm:flex-row gap-3 justify-center items-center mt-6">
20+ <a href="javascript:history.back()" class="btn no-underline hover:no-underline gap-2">
21+ {{ i "arrow-left" "w-4 h-4" }}
22+ go back
23+ </a>
24+ </div>
25+ </div>
26+ </div>
27+</div>
28{{ end }}
+35-2
appview/pages/templates/errors/500.html
···1{{ define "title" }}500 · tangled{{ end }}
23{{ define "content" }}
4- <h1>500 — something broke!</h1>
5- <p>We're working on getting service back up. Hang tight!</p>
0000000000000000000000000000000006{{ end }}
···1{{ define "title" }}500 · tangled{{ end }}
23{{ define "content" }}
4+<div class="flex flex-col items-center justify-center min-h-[60vh] text-center">
5+ <div class="bg-white dark:bg-gray-800 rounded-lg drop-shadow-sm p-8 max-w-lg mx-auto">
6+ <div class="mb-6">
7+ <div class="w-16 h-16 mx-auto mb-4 rounded-full bg-red-100 dark:bg-red-900/30 flex items-center justify-center">
8+ {{ i "alert-triangle" "w-8 h-8 text-red-500 dark:text-red-400" }}
9+ </div>
10+ </div>
11+12+ <div class="space-y-4">
13+ <h1 class="text-2xl sm:text-3xl font-bold text-gray-900 dark:text-white">
14+ 500 — internal server error
15+ </h1>
16+ <p class="text-gray-600 dark:text-gray-300">
17+ Something went wrong on our end. We've been notified and are working to fix the issue.
18+ </p>
19+ <div class="bg-yellow-50 dark:bg-yellow-900/20 border border-yellow-200 dark:border-yellow-800 rounded p-3 text-sm text-yellow-800 dark:text-yellow-200">
20+ <div class="flex items-center gap-2">
21+ {{ i "info" "w-4 h-4" }}
22+ <span class="font-medium">we're on it!</span>
23+ </div>
24+ <p class="mt-1">Our team has been automatically notified about this error.</p>
25+ </div>
26+ <div class="flex flex-col sm:flex-row gap-3 justify-center items-center mt-6">
27+ <button onclick="location.reload()" class="btn-create gap-2">
28+ {{ i "refresh-cw" "w-4 h-4" }}
29+ try again
30+ </button>
31+ <a href="/" class="btn no-underline hover:no-underline gap-2">
32+ {{ i "home" "w-4 h-4" }}
33+ back to home
34+ </a>
35+ </div>
36+ </div>
37+ </div>
38+</div>
39{{ end }}
+28-5
appview/pages/templates/errors/503.html
···1{{ define "title" }}503 · tangled{{ end }}
23{{ define "content" }}
4- <h1>503 — unable to reach knot</h1>
5- <p>
6- We were unable to reach the knot hosting this repository. Try again
7- later.
8- </p>
000000000000000000000009{{ end }}
···1{{ define "title" }}503 · tangled{{ end }}
23{{ define "content" }}
4+<div class="flex flex-col items-center justify-center min-h-[60vh] text-center">
5+ <div class="bg-white dark:bg-gray-800 rounded-lg drop-shadow-sm p-8 max-w-lg mx-auto">
6+ <div class="mb-6">
7+ <div class="w-16 h-16 mx-auto mb-4 rounded-full bg-blue-100 dark:bg-blue-900/30 flex items-center justify-center">
8+ {{ i "server-off" "w-8 h-8 text-blue-500 dark:text-blue-400" }}
9+ </div>
10+ </div>
11+12+ <div class="space-y-4">
13+ <h1 class="text-2xl sm:text-3xl font-bold text-gray-900 dark:text-white">
14+ 503 — service unavailable
15+ </h1>
16+ <p class="text-gray-600 dark:text-gray-300">
17+ We were unable to reach the knot hosting this repository. The service may be temporarily unavailable.
18+ </p>
19+ <div class="flex flex-col sm:flex-row gap-3 justify-center items-center mt-6">
20+ <button onclick="location.reload()" class="btn-create gap-2">
21+ {{ i "refresh-cw" "w-4 h-4" }}
22+ try again
23+ </button>
24+ <a href="/" class="btn gap-2 no-underline hover:no-underline">
25+ {{ i "arrow-left" "w-4 h-4" }}
26+ back to timeline
27+ </a>
28+ </div>
29+ </div>
30+ </div>
31+</div>
32{{ end }}
+28
appview/pages/templates/errors/knot404.html
···0000000000000000000000000000
···1+{{ define "title" }}404 · tangled{{ end }}
2+3+{{ define "content" }}
4+<div class="flex flex-col items-center justify-center min-h-[60vh] text-center">
5+ <div class="bg-white dark:bg-gray-800 rounded-lg drop-shadow-sm p-8 max-w-lg mx-auto">
6+ <div class="mb-6">
7+ <div class="w-16 h-16 mx-auto mb-4 rounded-full bg-orange-100 dark:bg-orange-900/30 flex items-center justify-center">
8+ {{ i "book-x" "w-8 h-8 text-orange-500 dark:text-orange-400" }}
9+ </div>
10+ </div>
11+12+ <div class="space-y-4">
13+ <h1 class="text-2xl sm:text-3xl font-bold text-gray-900 dark:text-white">
14+ 404 — repository not found
15+ </h1>
16+ <p class="text-gray-600 dark:text-gray-300">
17+ The repository you were looking for could not be found. The knot serving the repository may be unavailable.
18+ </p>
19+ <div class="flex flex-col sm:flex-row gap-3 justify-center items-center mt-6">
20+ <a href="/" class="btn flex items-center gap-2 no-underline hover:no-underline">
21+ {{ i "arrow-left" "w-4 h-4" }}
22+ back to timeline
23+ </a>
24+ </div>
25+ </div>
26+ </div>
27+</div>
28+{{ end }}
···11### message format
1213```
14-<service/top-level directory>: <affected package/directory>: <short summary of change>
151617Optional longer description can go here, if necessary. Explain what the
···23Here are some examples:
2425```
26-appview: state: fix token expiry check in middleware
2728The previous check did not account for clock drift, leading to premature
29token invalidation.
30```
3132```
33-knotserver: git/service: improve error checking in upload-pack
34```
3536···54- Don't include unrelated changes in the same commit.
55- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
56before submitting if necessary.
0000005758## proposals for bigger changes
59···115If you're submitting a PR with multiple commits, make sure each one is
116signed.
117118-For [jj](https://jj-vcs.github.io/jj/latest/) users, you can add this to
119-your jj config:
120121-```
122-ui.should-sign-off = true
123-```
124-125-and to your `templates.draft_commit_description`, add the following `if`
126-block:
127-128-```
129- if(
130- config("ui.should-sign-off").as_boolean() && !description.contains("Signed-off-by: " ++ author.name()),
131- "\nSigned-off-by: " ++ author.name() ++ " <" ++ author.email() ++ ">",
132- ),
133```
134135Refer to the [jj
136-documentation](https://jj-vcs.github.io/jj/latest/config/#default-description)
137for more information.
···11### message format
1213```
14+<service/top-level directory>/<affected package/directory>: <short summary of change>
151617Optional longer description can go here, if necessary. Explain what the
···23Here are some examples:
2425```
26+appview/state: fix token expiry check in middleware
2728The previous check did not account for clock drift, leading to premature
29token invalidation.
30```
3132```
33+knotserver/git/service: improve error checking in upload-pack
34```
3536···54- Don't include unrelated changes in the same commit.
55- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
56before submitting if necessary.
57+58+## code formatting
59+60+We use a variety of tools to format our code, and multiplex them with
61+[`treefmt`](https://treefmt.com): all you need to do to format your changes
62+is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
6364## proposals for bigger changes
65···121If you're submitting a PR with multiple commits, make sure each one is
122signed.
123124+For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125+to make it sign off commits in the tangled repo:
126127+```shell
128+# Safety check, should say "No matching config key..."
129+jj config list templates.commit_trailers
130+# The command below may need to be adjusted if the command above returned something.
131+jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
0000000132```
133134Refer to the [jj
135+documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
136for more information.
+66-20
docs/hacking.md
···48redis-server
49```
5051-## running a knot
5253An end-to-end knot setup requires setting up a machine with
54`sshd`, `AuthorizedKeysCommand`, and git user, which is
55quite cumbersome. So the nix flake provides a
56`nixosConfiguration` to do so.
5758-To begin, head to `http://localhost:3000/knots` in the browser
59-and generate a knot secret. Replace the existing secret in
60-`nix/vm.nix` (`KNOT_SERVER_SECRET`) with the newly generated
61-secret.
00000000000000000006263-You can now start a lightweight NixOS VM using
64-`nixos-shell` like so:
00000000000000000006566```bash
67-nix run .#vm
68-# or nixos-shell --flake .#vm
6970-# hit Ctrl-a + c + q to exit the VM
71```
7273This starts a knot on port 6000, a spindle on port 6555
74-with `ssh` exposed on port 2222. You can push repositories
75-to this VM with this ssh config block on your main machine:
00000007677```bash
78Host nixos-shell
···89git push local-dev main
90```
9192-## running a spindle
9394-Be sure to change the `owner` field for the spindle in
95-`nix/vm.nix` to your own DID. The above VM should already
96-be running a spindle on `localhost:6555`. You can head to
97-the spindle dashboard on `http://localhost:3000/spindles`,
98-and register a spindle with hostname `localhost:6555`. It
99-should instantly be verified. You can then configure each
100-repository to use this spindle and run CI jobs.
101102Of interest when debugging spindles:
103···114# litecli has a nicer REPL interface:
115litecli /var/lib/spindle/spindle.db
116```
00000
···48redis-server
49```
5051+## running knots and spindles
5253An end-to-end knot setup requires setting up a machine with
54`sshd`, `AuthorizedKeysCommand`, and git user, which is
55quite cumbersome. So the nix flake provides a
56`nixosConfiguration` to do so.
5758+<details>
59+ <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
60+61+ In order to build Tangled's dev VM on macOS, you will
62+ first need to set up a Linux Nix builder. The recommended
63+ way to do so is to run a [`darwin.linux-builder`
64+ VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
65+ and to register it in `nix.conf` as a builder for Linux
66+ with the same architecture as your Mac (`linux-aarch64` if
67+ you are using Apple Silicon).
68+69+ > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
70+ > the tangled repo so that it doesn't conflict with the other VM. For example,
71+ > you can do
72+ >
73+ > ```shell
74+ > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
75+ > ```
76+ >
77+ > to store the builder VM in a temporary dir.
78+ >
79+ > You should read and follow [all the other intructions][darwin builder vm] to
80+ > avoid subtle problems.
8182+ Alternatively, you can use any other method to set up a
83+ Linux machine with `nix` installed that you can `sudo ssh`
84+ into (in other words, root user on your Mac has to be able
85+ to ssh into the Linux machine without entering a password)
86+ and that has the same architecture as your Mac. See
87+ [remote builder
88+ instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
89+ for how to register such a builder in `nix.conf`.
90+91+ > WARNING: If you'd like to use
92+ > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
93+ > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
94+ > ssh` works can be tricky. It seems to be [possible with
95+ > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
96+97+</details>
98+99+To begin, grab your DID from http://localhost:3000/settings.
100+Then, set `TANGLED_VM_KNOT_OWNER` and
101+`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
102+lightweight NixOS VM like so:
103104```bash
105+nix run --impure .#vm
0106107+# type `poweroff` at the shell to exit the VM
108```
109110This starts a knot on port 6000, a spindle on port 6555
111+with `ssh` exposed on port 2222.
112+113+Once the services are running, head to
114+http://localhost:3000/knots and hit verify. It should
115+verify the ownership of the services instantly if everything
116+went smoothly.
117+118+You can push repositories to this VM with this ssh config
119+block on your main machine:
120121```bash
122Host nixos-shell
···133git push local-dev main
134```
135136+### running a spindle
137138+The above VM should already be running a spindle on
139+`localhost:6555`. Head to http://localhost:3000/spindles and
140+hit verify. You can then configure each repository to use
141+this spindle and run CI jobs.
000142143Of interest when debugging spindles:
144···155# litecli has a nicer REPL interface:
156litecli /var/lib/spindle/spindle.db
157```
158+159+If for any reason you wish to disable either one of the
160+services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
161+`services.tangled-spindle.enable` (or
162+`services.tangled-knot.enable`) to `false`.
+27-7
docs/knot-hosting.md
···23So you want to run your own knot server? Great! Here are a few prerequisites:
45-1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux of some kind.
62. A (sub)domain name. People generally use `knot.example.com`.
73. A valid SSL certificate for your domain.
8···59EOF
60```
6100000062Next, create the `git` user. We'll use the `git` user's home directory
63to store repositories:
64···67```
6869Create `/home/git/.knot.env` with the following, updating the values as
70-necessary. The `KNOT_SERVER_SECRET` can be obtaind from the
71-[/knots](/knots) page on Tangled.
7273```
74KNOT_REPO_SCAN_PATH=/home/git
75KNOT_SERVER_HOSTNAME=knot.example.com
76APPVIEW_ENDPOINT=https://tangled.sh
77-KNOT_SERVER_SECRET=secret
78KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
79KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
80```
···89systemctl start knotserver
90```
9192-The last step is to configure a reverse proxy like Nginx or Caddy to front yourself
93knot. Here's an example configuration for Nginx:
9495```
···122Remember to use Let's Encrypt or similar to procure a certificate for your
123knot domain.
124125-You should now have a running knot server! You can finalize your registration by hitting the
126-`initialize` button on the [/knots](/knots) page.
00127128### custom paths
129···191```
192193Make sure to restart your SSH server!
000000000000
···23So you want to run your own knot server? Great! Here are a few prerequisites:
45+1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
62. A (sub)domain name. People generally use `knot.example.com`.
73. A valid SSL certificate for your domain.
8···59EOF
60```
6162+Then, reload `sshd`:
63+64+```
65+sudo systemctl reload ssh
66+```
67+68Next, create the `git` user. We'll use the `git` user's home directory
69to store repositories:
70···73```
7475Create `/home/git/.knot.env` with the following, updating the values as
76+necessary. The `KNOT_SERVER_OWNER` should be set to your
77+DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
7879```
80KNOT_REPO_SCAN_PATH=/home/git
81KNOT_SERVER_HOSTNAME=knot.example.com
82APPVIEW_ENDPOINT=https://tangled.sh
83+KNOT_SERVER_OWNER=did:plc:foobar
84KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
85KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
86```
···95systemctl start knotserver
96```
9798+The last step is to configure a reverse proxy like Nginx or Caddy to front your
99knot. Here's an example configuration for Nginx:
100101```
···128Remember to use Let's Encrypt or similar to procure a certificate for your
129knot domain.
130131+You should now have a running knot server! You can finalize
132+your registration by hitting the `verify` button on the
133+[/knots](https://tangled.sh/knots) page. This simply creates
134+a record on your PDS to announce the existence of the knot.
135136### custom paths
137···199```
200201Make sure to restart your SSH server!
202+203+#### MOTD (message of the day)
204+205+To configure the MOTD used ("Welcome to this knot!" by default), edit the
206+`/home/git/motd` file:
207+208+```
209+printf "Hi from this knot!\n" > /home/git/motd
210+```
211+212+Note that you should add a newline at the end if setting a non-empty message
213+since the knot won't do this for you.
···1+# Migrations
2+3+This document is laid out in reverse-chronological order.
4+Newer migration guides are listed first, and older guides
5+are further down the page.
6+7+## Upgrading from v1.8.x
8+9+After v1.8.2, the HTTP API for knot and spindles have been
10+deprecated and replaced with XRPC. Repositories on outdated
11+knots will not be viewable from the appview. Upgrading is
12+straightforward however.
13+14+For knots:
15+16+- Upgrade to latest tag (v1.9.0 or above)
17+- Head to the [knot dashboard](https://tangled.sh/knots) and
18+ hit the "retry" button to verify your knot
19+20+For spindles:
21+22+- Upgrade to latest tag (v1.9.0 or above)
23+- Head to the [spindle
24+ dashboard](https://tangled.sh/spindles) and hit the
25+ "retry" button to verify your spindle
26+27+## Upgrading from v1.7.x
28+29+After v1.7.0, knot secrets have been deprecated. You no
30+longer need a secret from the appview to run a knot. All
31+authorized commands to knots are managed via [Inter-Service
32+Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
33+Knots will be read-only until upgraded.
34+35+Upgrading is quite easy, in essence:
36+37+- `KNOT_SERVER_SECRET` is no more, you can remove this
38+ environment variable entirely
39+- `KNOT_SERVER_OWNER` is now required on boot, set this to
40+ your DID. You can find your DID in the
41+ [settings](https://tangled.sh/settings) page.
42+- Restart your knot once you have replaced the environment
43+ variable
44+- Head to the [knot dashboard](https://tangled.sh/knots) and
45+ hit the "retry" button to verify your knot. This simply
46+ writes a `sh.tangled.knot` record to your PDS.
47+48+If you use the nix module, simply bump the flake to the
49+latest revision, and change your config block like so:
50+51+```diff
52+ services.tangled-knot = {
53+ enable = true;
54+ server = {
55+- secretFile = /path/to/secret;
56++ owner = "did:plc:foo";
57+ };
58+ };
59+```
60+
+193-38
docs/spindle/openbao.md
···1# spindle secrets with openbao
23This document covers setting up Spindle to use OpenBao for secrets
4-management instead of the default SQLite backend.
00000000056## installation
78Install OpenBao from nixpkgs:
910```bash
11-nix-env -iA nixpkgs.openbao
12```
1314-## local development setup
00001516Start OpenBao in dev mode:
1718```bash
19-bao server -dev
20```
2122-This starts OpenBao on `http://localhost:8200` with a root token. Save
23-the root token from the output -- you'll need it.
2425Set up environment for bao CLI:
2627```bash
28export BAO_ADDR=http://localhost:8200
29-export BAO_TOKEN=hvs.your-root-token-here
30```
310000000000000000000032Create the spindle KV mount:
3334```bash
35bao secrets enable -path=spindle -version=2 kv
36```
3738-Set up AppRole authentication:
3940Create a policy file `spindle-policy.hcl`:
4142```hcl
043path "spindle/data/*" {
44- capabilities = ["create", "read", "update", "delete", "list"]
45}
46047path "spindle/metadata/*" {
48- capabilities = ["list", "read", "delete"]
49}
5051-path "spindle/*" {
052 capabilities = ["list"]
53}
0000054```
5556Apply the policy and create an AppRole:
···61bao write auth/approle/role/spindle \
62 token_policies="spindle-policy" \
63 token_ttl=1h \
64- token_max_ttl=4h
00065```
6667Get the credentials:
6869```bash
70-bao read auth/approle/role/spindle/role-id
71-bao write -f auth/approle/role/spindle/secret-id
000000000000000000000000000000000000000000000000000000000000000000072```
7374-Configure Spindle:
000000000007576Set these environment variables for Spindle:
7778```bash
79export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
80-export SPINDLE_SERVER_SECRETS_OPENBAO_ADDR=http://localhost:8200
81-export SPINDLE_SERVER_SECRETS_OPENBAO_ROLE_ID=your-role-id-from-above
82-export SPINDLE_SERVER_SECRETS_OPENBAO_SECRET_ID=your-secret-id-from-above
83export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
84```
8586Start Spindle:
8788-Spindle will now use OpenBao for secrets storage with automatic token
89-renewal.
00000009091## verifying setup
9293-List all secrets:
9495```bash
96-bao kv list spindle/
000097```
9899-Add a test secret via Spindle API, then check it exists:
100101```bash
0000102bao kv list spindle/repos/
103-```
104105-Get a specific secret:
106-107-```bash
108bao kv get spindle/repos/your_repo_path/SECRET_NAME
109```
110111## how it works
112000113- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
114-- Each repository gets its own namespace
115-- Repository paths like `at://did:plc:alice/myrepo` become
116- `at_did_plc_alice_myrepo`
117-- The system automatically handles token renewal using AppRole
118- authentication
119-- On shutdown, Spindle cleanly stops the token renewal process
120121## troubleshooting
122123-**403 errors**: Check that your BAO_TOKEN is set and the spindle mount
124-exists
000125126**404 route errors**: The spindle KV mount probably doesn't exist - run
127-the mount creation step again
128129-**Token expired**: The AppRole system should handle this automatically,
130-but you can check token status with `bao token lookup`
000000000000000000000
···1# spindle secrets with openbao
23This document covers setting up Spindle to use OpenBao for secrets
4+management via OpenBao Proxy instead of the default SQLite backend.
5+6+## overview
7+8+Spindle now uses OpenBao Proxy for secrets management. The proxy handles
9+authentication automatically using AppRole credentials, while Spindle
10+connects to the local proxy instead of directly to the OpenBao server.
11+12+This approach provides better security, automatic token renewal, and
13+simplified application code.
1415## installation
1617Install OpenBao from nixpkgs:
1819```bash
20+nix shell nixpkgs#openbao # for a local server
21```
2223+## setup
24+25+The setup process can is documented for both local development and production.
26+27+### local development
2829Start OpenBao in dev mode:
3031```bash
32+bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
33```
3435+This starts OpenBao on `http://localhost:8201` with a root token.
03637Set up environment for bao CLI:
3839```bash
40export BAO_ADDR=http://localhost:8200
41+export BAO_TOKEN=root
42```
4344+### production
45+46+You would typically use a systemd service with a configuration file. Refer to
47+[@tangled.sh/infra](https://tangled.sh/@tangled.sh/infra) for how this can be
48+achieved using Nix.
49+50+Then, initialize the bao server:
51+```bash
52+bao operator init -key-shares=1 -key-threshold=1
53+```
54+55+This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
56+```bash
57+bao operator unseal <unseal_key>
58+```
59+60+All steps below remain the same across both dev and production setups.
61+62+### configure openbao server
63+64Create the spindle KV mount:
6566```bash
67bao secrets enable -path=spindle -version=2 kv
68```
6970+Set up AppRole authentication and policy:
7172Create a policy file `spindle-policy.hcl`:
7374```hcl
75+# Full access to spindle KV v2 data
76path "spindle/data/*" {
77+ capabilities = ["create", "read", "update", "delete"]
78}
7980+# Access to metadata for listing and management
81path "spindle/metadata/*" {
82+ capabilities = ["list", "read", "delete", "update"]
83}
8485+# Allow listing at root level
86+path "spindle/" {
87 capabilities = ["list"]
88}
89+90+# Required for connection testing and health checks
91+path "auth/token/lookup-self" {
92+ capabilities = ["read"]
93+}
94```
9596Apply the policy and create an AppRole:
···101bao write auth/approle/role/spindle \
102 token_policies="spindle-policy" \
103 token_ttl=1h \
104+ token_max_ttl=4h \
105+ bind_secret_id=true \
106+ secret_id_ttl=0 \
107+ secret_id_num_uses=0
108```
109110Get the credentials:
111112```bash
113+# Get role ID (static)
114+ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115+116+# Generate secret ID
117+SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118+119+echo "Role ID: $ROLE_ID"
120+echo "Secret ID: $SECRET_ID"
121+```
122+123+### create proxy configuration
124+125+Create the credential files:
126+127+```bash
128+# Create directory for OpenBao files
129+mkdir -p /tmp/openbao
130+131+# Save credentials
132+echo "$ROLE_ID" > /tmp/openbao/role-id
133+echo "$SECRET_ID" > /tmp/openbao/secret-id
134+chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135+```
136+137+Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138+139+```hcl
140+# OpenBao server connection
141+vault {
142+ address = "http://localhost:8200"
143+}
144+145+# Auto-Auth using AppRole
146+auto_auth {
147+ method "approle" {
148+ mount_path = "auth/approle"
149+ config = {
150+ role_id_file_path = "/tmp/openbao/role-id"
151+ secret_id_file_path = "/tmp/openbao/secret-id"
152+ }
153+ }
154+155+ # Optional: write token to file for debugging
156+ sink "file" {
157+ config = {
158+ path = "/tmp/openbao/token"
159+ mode = 0640
160+ }
161+ }
162+}
163+164+# Proxy listener for Spindle
165+listener "tcp" {
166+ address = "127.0.0.1:8201"
167+ tls_disable = true
168+}
169+170+# Enable API proxy with auto-auth token
171+api_proxy {
172+ use_auto_auth_token = true
173+}
174+175+# Enable response caching
176+cache {
177+ use_auto_auth_token = true
178+}
179+180+# Logging
181+log_level = "info"
182```
183184+### start the proxy
185+186+Start OpenBao Proxy:
187+188+```bash
189+bao proxy -config=/tmp/openbao/proxy.hcl
190+```
191+192+The proxy will authenticate with OpenBao and start listening on
193+`127.0.0.1:8201`.
194+195+### configure spindle
196197Set these environment variables for Spindle:
198199```bash
200export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201+export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
00202export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203```
204205Start Spindle:
206207+Spindle will now connect to the local proxy, which handles all
208+authentication automatically.
209+210+## production setup for proxy
211+212+For production, you'll want to run the proxy as a service:
213+214+Place your production configuration in `/etc/openbao/proxy.hcl` with
215+proper TLS settings for the vault connection.
216217## verifying setup
218219+Test the proxy directly:
220221```bash
222+# Check proxy health
223+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224+225+# Test token lookup through proxy
226+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227```
228229+Test OpenBao operations through the server:
230231```bash
232+# List all secrets
233+bao kv list spindle/
234+235+# Add a test secret via Spindle API, then check it exists
236bao kv list spindle/repos/
0237238+# Get a specific secret
00239bao kv get spindle/repos/your_repo_path/SECRET_NAME
240```
241242## how it works
243244+- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245+- The proxy authenticates with OpenBao using AppRole credentials
246+- All Spindle requests go through the proxy, which injects authentication tokens
247- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248+- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249+- The proxy handles all token renewal automatically
250+- Spindle no longer manages tokens or authentication directly
000251252## troubleshooting
253254+**Connection refused**: Check that the OpenBao Proxy is running and
255+listening on the configured address.
256+257+**403 errors**: Verify the AppRole credentials are correct and the policy
258+has the necessary permissions.
259260**404 route errors**: The spindle KV mount probably doesn't exist - run
261+the mount creation step again.
262263+**Proxy authentication failures**: Check the proxy logs and verify the
264+role-id and secret-id files are readable and contain valid credentials.
265+266+**Secret not found after writing**: This can indicate policy permission
267+issues. Verify the policy includes both `spindle/data/*` and
268+`spindle/metadata/*` paths with appropriate capabilities.
269+270+Check proxy logs:
271+272+```bash
273+# If running as systemd service
274+journalctl -u openbao-proxy -f
275+276+# If running directly, check the console output
277+```
278+279+Test AppRole authentication manually:
280+281+```bash
282+bao write auth/approle/login \
283+ role_id="$(cat /tmp/openbao/role-id)" \
284+ secret_id="$(cat /tmp/openbao/secret-id)"
285+```
+140-41
docs/spindle/pipeline.md
···1-# spindle pipeline manifest
000023-Spindle pipelines are defined under the `.tangled/workflows` directory in a
4-repo. Generally:
000056-* Pipelines are defined in YAML.
7-* Dependencies can be specified from
8-[Nixpkgs](https://search.nixos.org) or custom registries.
9-* Environment variables can be set globally or per-step.
1011-Here's an example that uses all fields:
000000001213```yaml
14-# build_and_test.yaml
15when:
16- - event: ["push", "pull_request"]
17 branch: ["main", "develop"]
18- - event: ["manual"]
000000000000000000000000000000000000019020dependencies:
21- ## from nixpkgs
22 nixpkgs:
23 - nodejs
24- ## custom registry
25- git+https://tangled.sh/@oppi.li/statix:
26- - statix
0000000000000000002728-steps:
29- - name: "Install dependencies"
30- command: "npm install"
31- environment:
32- NODE_ENV: "development"
33- CI: "true"
03435- - name: "Run linter"
36- command: "npm run lint"
3738- - name: "Run tests"
39- command: "npm test"
0040 environment:
41- NODE_ENV: "test"
42- JEST_WORKERS: "2"
43-44- - name: "Build application"
45 command: "npm run build"
46 environment:
47 NODE_ENV: "production"
04849-environment:
50- BUILD_NUMBER: "123"
51- GIT_BRANCH: "main"
5253-## current repository is cloned and checked out at the target ref
54-## by default.
000000000055clone:
56 skip: false
57- depth: 50
58- submodules: true
59-```
000000006061-## git push options
00006263-These are push options that can be used with the `--push-option (-o)` flag of git push:
00000000006465-- `verbose-ci`, `ci-verbose`: enables diagnostics reporting for the CI pipeline, allowing you to see any issues when you push.
66-- `skip-ci`, `ci-skip`: skips triggering the CI pipeline.
···1+# spindle pipelines
2+3+Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
4+5+The fields are:
67+- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
8+- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
9+- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
10+- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
11+- [Environment](#environment): An **optional** field that allows you to define environment variables.
12+- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
1314+## Trigger
0001516+The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
17+18+- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
19+ - `push`: The workflow should run every time a commit is pushed to the repository.
20+ - `pull_request`: The workflow should run every time a pull request is made or updated.
21+ - `manual`: The workflow can be triggered manually.
22+- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
23+24+For example, if you'd like define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
2526```yaml
027when:
28+ - event: ["push", "manual"]
29 branch: ["main", "develop"]
30+ - event: ["pull_request"]
31+ branch: ["main"]
32+```
33+34+## Engine
35+36+Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
37+38+- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
39+40+Example:
41+42+```yaml
43+engine: "nixery"
44+```
45+46+## Clone options
47+48+When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
49+50+- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
51+- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
52+- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
53+54+The default settings are:
55+56+```yaml
57+clone:
58+ skip: false
59+ depth: 1
60+ submodules: false
61+```
62+63+## Dependencies
64+65+Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
66+67+Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
6869+```yaml
70dependencies:
71+ # nixpkgs
72 nixpkgs:
73 - nodejs
74+ - go
75+ # custom registry
76+ git+https://tangled.sh/@example.com/my_pkg:
77+ - my_pkg
78+```
79+80+Now these dependencies are available to use in your workflow!
81+82+## Environment
83+84+The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
85+86+Example:
87+88+```yaml
89+environment:
90+ GOOS: "linux"
91+ GOARCH: "arm64"
92+ NODE_ENV: "production"
93+ MY_ENV_VAR: "MY_ENV_VALUE"
94+```
9596+## Steps
97+98+The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
99+100+- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
101+- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
102+- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103104+Example:
0105106+```yaml
107+steps:
108+ - name: "Build backend"
109+ command: "go build"
110 environment:
111+ GOOS: "darwin"
112+ GOARCH: "arm64"
113+ - name: "Build frontend"
0114 command: "npm run build"
115 environment:
116 NODE_ENV: "production"
117+```
118119+## Complete workflow
00120121+```yaml
122+# .tangled/workflows/build.yml
123+124+when:
125+ - event: ["push", "manual"]
126+ branch: ["main", "develop"]
127+ - event: ["pull_request"]
128+ branch: ["main"]
129+130+engine: "nixery"
131+132+# using the default values
133clone:
134 skip: false
135+ depth: 1
136+ submodules: false
137+138+dependencies:
139+ # nixpkgs
140+ nixpkgs:
141+ - nodejs
142+ - go
143+ # custom registry
144+ git+https://tangled.sh/@example.com/my_pkg:
145+ - my_pkg
146147+environment:
148+ GOOS: "linux"
149+ GOARCH: "arm64"
150+ NODE_ENV: "production"
151+ MY_ENV_VAR: "MY_ENV_VALUE"
152153+steps:
154+ - name: "Build backend"
155+ command: "go build"
156+ environment:
157+ GOOS: "darwin"
158+ GOARCH: "arm64"
159+ - name: "Build frontend"
160+ command: "npm run build"
161+ environment:
162+ NODE_ENV: "production"
163+```
164165+If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
0
···1+# Privacy Policy
2+3+**Last updated:** January 15, 2025
4+5+This Privacy Policy describes how Tangled ("we," "us," or "our")
6+collects, uses, and shares your personal information when you use our
7+platform and services (the "Service").
8+9+## 1. Information We Collect
10+11+### Account Information
12+13+When you create an account, we collect:
14+15+- Your chosen username
16+- Email address
17+- Profile information you choose to provide
18+- Authentication data
19+20+### Content and Activity
21+22+We store:
23+24+- Code repositories and associated metadata
25+- Issues, pull requests, and comments
26+- Activity logs and usage patterns
27+- Public keys for authentication
28+29+## 2. Data Location and Hosting
30+31+### EU Data Hosting
32+33+**All Tangled service data is hosted within the European Union.**
34+Specifically:
35+36+- **Personal Data Servers (PDS):** Accounts hosted on Tangled PDS
37+ (*.tngl.sh) are located in Finland
38+- **Application Data:** All other service data is stored on EU-based
39+ servers
40+- **Data Processing:** All data processing occurs within EU
41+ jurisdiction
42+43+### External PDS Notice
44+45+**Important:** If your account is hosted on Bluesky's PDS or other
46+self-hosted Personal Data Servers (not *.tngl.sh), we do not control
47+that data. The data protection, storage location, and privacy
48+practices for such accounts are governed by the respective PDS
49+provider's policies, not this Privacy Policy. We only control data
50+processing within our own services and infrastructure.
51+52+## 3. Third-Party Data Processors
53+54+We only share your data with the following third-party processors:
55+56+### Resend (Email Services)
57+58+- **Purpose:** Sending transactional emails (account verification,
59+ notifications)
60+- **Data Shared:** Email address and necessary message content
61+62+### Cloudflare (Image Caching)
63+64+- **Purpose:** Caching and optimizing image delivery
65+- **Data Shared:** Public images and associated metadata for caching
66+ purposes
67+68+### Posthog (Usage Metrics Tracking)
69+70+- **Purpose:** Tracking usage and platform metrics
71+- **Data Shared:** Anonymous usage data, IP addresses, DIDs, and browser
72+ information
73+74+## 4. How We Use Your Information
75+76+We use your information to:
77+78+- Provide and maintain the Service
79+- Process your transactions and requests
80+- Send you technical notices and support messages
81+- Improve and develop new features
82+- Ensure security and prevent fraud
83+- Comply with legal obligations
84+85+## 5. Data Sharing and Disclosure
86+87+We do not sell, trade, or rent your personal information. We may share
88+your information only in the following circumstances:
89+90+- With the third-party processors listed above
91+- When required by law or legal process
92+- To protect our rights, property, or safety, or that of our users
93+- In connection with a merger, acquisition, or sale of assets (with
94+ appropriate protections)
95+96+## 6. Data Security
97+98+We implement appropriate technical and organizational measures to
99+protect your personal information against unauthorized access,
100+alteration, disclosure, or destruction. However, no method of
101+transmission over the Internet is 100% secure.
102+103+## 7. Data Retention
104+105+We retain your personal information for as long as necessary to provide
106+the Service and fulfill the purposes outlined in this Privacy Policy,
107+unless a longer retention period is required by law.
108+109+## 8. Your Rights
110+111+Under applicable data protection laws, you have the right to:
112+113+- Access your personal information
114+- Correct inaccurate information
115+- Request deletion of your information
116+- Object to processing of your information
117+- Data portability
118+- Withdraw consent (where applicable)
119+120+## 9. Cookies and Tracking
121+122+We use cookies and similar technologies to:
123+124+- Maintain your login session
125+- Remember your preferences
126+- Analyze usage patterns to improve the Service
127+128+You can control cookie settings through your browser preferences.
129+130+## 10. Children's Privacy
131+132+The Service is not intended for children under 16 years of age. We do
133+not knowingly collect personal information from children under 16. If
134+we become aware that we have collected such information, we will take
135+steps to delete it.
136+137+## 11. International Data Transfers
138+139+While all our primary data processing occurs within the EU, some of our
140+third-party processors may process data outside the EU. When this
141+occurs, we ensure appropriate safeguards are in place, such as Standard
142+Contractual Clauses or adequacy decisions.
143+144+## 12. Changes to This Privacy Policy
145+146+We may update this Privacy Policy from time to time. We will notify you
147+of any changes by posting the new Privacy Policy on this page and
148+updating the "Last updated" date.
149+150+## 13. Contact Information
151+152+If you have any questions about this Privacy Policy or wish to exercise
153+your rights, please contact us through our platform or via email.
154+155+---
156+157+This Privacy Policy complies with the EU General Data Protection
158+Regulation (GDPR) and other applicable data protection laws.
···1+# Terms of Service
2+3+**Last updated:** January 15, 2025
4+5+Welcome to Tangled. These Terms of Service ("Terms") govern your access
6+to and use of the Tangled platform and services (the "Service")
7+operated by us ("Tangled," "we," "us," or "our").
8+9+## 1. Acceptance of Terms
10+11+By accessing or using our Service, you agree to be bound by these Terms.
12+If you disagree with any part of these terms, then you may not access
13+the Service.
14+15+## 2. Account Registration
16+17+To use certain features of the Service, you must register for an
18+account. You agree to provide accurate, current, and complete
19+information during the registration process and to update such
20+information to keep it accurate, current, and complete.
21+22+## 3. Account Termination
23+24+> **Important Notice**
25+>
26+> **We reserve the right to terminate, suspend, or restrict access to
27+> your account at any time, for any reason, or for no reason at all, at
28+> our sole discretion.** This includes, but is not limited to,
29+> termination for violation of these Terms, inappropriate conduct, spam,
30+> abuse, or any other behavior we deem harmful to the Service or other
31+> users.
32+>
33+> Account termination may result in the loss of access to your
34+> repositories, data, and other content associated with your account. We
35+> are not obligated to provide advance notice of termination, though we
36+> may do so in our discretion.
37+38+## 4. Acceptable Use
39+40+You agree not to use the Service to:
41+42+- Violate any applicable laws or regulations
43+- Infringe upon the rights of others
44+- Upload, store, or share content that is illegal, harmful, threatening,
45+ abusive, harassing, defamatory, vulgar, obscene, or otherwise
46+ objectionable
47+- Engage in spam, phishing, or other deceptive practices
48+- Attempt to gain unauthorized access to the Service or other users'
49+ accounts
50+- Interfere with or disrupt the Service or servers connected to the
51+ Service
52+53+## 5. Content and Intellectual Property
54+55+You retain ownership of the content you upload to the Service. By
56+uploading content, you grant us a non-exclusive, worldwide, royalty-free
57+license to use, reproduce, modify, and distribute your content as
58+necessary to provide the Service.
59+60+## 6. Privacy
61+62+Your privacy is important to us. Please review our [Privacy
63+Policy](/privacy), which also governs your use of the Service.
64+65+## 7. Disclaimers
66+67+The Service is provided on an "AS IS" and "AS AVAILABLE" basis. We make
68+no warranties, expressed or implied, and hereby disclaim and negate all
69+other warranties including without limitation, implied warranties or
70+conditions of merchantability, fitness for a particular purpose, or
71+non-infringement of intellectual property or other violation of rights.
72+73+## 8. Limitation of Liability
74+75+In no event shall Tangled, nor its directors, employees, partners,
76+agents, suppliers, or affiliates, be liable for any indirect,
77+incidental, special, consequential, or punitive damages, including
78+without limitation, loss of profits, data, use, goodwill, or other
79+intangible losses, resulting from your use of the Service.
80+81+## 9. Indemnification
82+83+You agree to defend, indemnify, and hold harmless Tangled and its
84+affiliates, officers, directors, employees, and agents from and against
85+any and all claims, damages, obligations, losses, liabilities, costs,
86+or debt, and expenses (including attorney's fees).
87+88+## 10. Governing Law
89+90+These Terms shall be interpreted and governed by the laws of Finland,
91+without regard to its conflict of law provisions.
92+93+## 11. Changes to Terms
94+95+We reserve the right to modify or replace these Terms at any time. If a
96+revision is material, we will try to provide at least 30 days notice
97+prior to any new terms taking effect.
98+99+## 12. Contact Information
100+101+If you have any questions about these Terms of Service, please contact
102+us through our platform or via email.
103+104+---
105+106+These terms are effective as of the last updated date shown above and
107+will remain in effect except with respect to any changes in their
108+provisions in the future, which will be in effect immediately after
109+being posted on this page.
···9// NewHandler sets up a new slog.Handler with the service name
10// as an attribute
11func NewHandler(name string) slog.Handler {
12- handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{})
001314 var attrs []slog.Attr
15 attrs = append(attrs, slog.Attr{Key: "service", Value: slog.StringValue(name)})
···9// NewHandler sets up a new slog.Handler with the service name
10// as an attribute
11func NewHandler(name string) slog.Handler {
12+ handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
13+ Level: slog.LevelDebug,
14+ })
1516 var attrs []slog.Attr
17 attrs = append(attrs, slog.Attr{Key: "service", Value: slog.StringValue(name)})
+99-21
nix/gomod2nix.toml
···11 version = "v0.6.2"
12 hash = "sha256-tVNWDUMILZbJvarcl/E7tpSnkn7urqgSHa2Eaka5vSU="
13 [mod."github.com/ProtonMail/go-crypto"]
14- version = "v1.2.0"
15- hash = "sha256-5fKgWUz6BoyFNNZ1OD9QjhBrhNEBCuVfO2WqH+X59oo="
00016 [mod."github.com/alecthomas/chroma/v2"]
17 version = "v2.19.0"
18 hash = "sha256-dxsu43a+PvHg2jYR0Tfys6a8x6IVR+9oCGAh+fvL3SM="
19 replaced = "github.com/oppiliappan/chroma/v2"
00020 [mod."github.com/anmitsu/go-shlex"]
21 version = "v0.0.0-20200514113438-38f4b401e2be"
22 hash = "sha256-L3Ak4X2z7WXq7vMKuiHCOJ29nlpajUQ08Sfb9T0yP54="
···51 [mod."github.com/casbin/govaluate"]
52 version = "v1.3.0"
53 hash = "sha256-vDUFEGt8oL4n/PHwlMZPjmaLvcpGTN4HEIRGl2FPxUA="
00054 [mod."github.com/cespare/xxhash/v2"]
55 version = "v2.3.0"
56 hash = "sha256-7hRlwSR+fos1kx4VZmJ/7snR7zHh8ZFKX+qqqqGcQpY="
57 [mod."github.com/cloudflare/circl"]
58- version = "v1.6.0"
59- hash = "sha256-a+SVfnHYC8Fb+NQLboNg5P9sry+WutzuNetVHFVAAo0="
00060 [mod."github.com/containerd/errdefs"]
61 version = "v1.0.0"
62 hash = "sha256-wMZGoeqvRhuovYCJx0Js4P3qFCNTZ/6Atea/kNYoPMI="
···105 [mod."github.com/felixge/httpsnoop"]
106 version = "v1.0.4"
107 hash = "sha256-c1JKoRSndwwOyOxq9ddCe+8qn7mG9uRq2o/822x5O/c="
000108 [mod."github.com/gliderlabs/ssh"]
109 version = "v0.3.8"
110 hash = "sha256-FW+91qCB3rfTm0I1VmqfwA7o+2kDys2JHOudKKyxWwc="
···127 version = "v5.17.0"
128 hash = "sha256-gya68abB6GtejUqr60DyU7NIGtNzHQVCAeDTYKk1evQ="
129 replaced = "github.com/oppiliappan/go-git/v5"
000130 [mod."github.com/go-logr/logr"]
131 version = "v1.4.3"
132 hash = "sha256-Nnp/dEVNMxLp3RSPDHZzGbI8BkSNuZMX0I0cjWKXXLA="
···136 [mod."github.com/go-redis/cache/v9"]
137 version = "v9.0.0"
138 hash = "sha256-b4S3K4KoZhF0otw6FRIOq/PTdHGrb/LumB4GKo4khsY="
000139 [mod."github.com/goccy/go-json"]
140 version = "v0.10.5"
141 hash = "sha256-/EtlGihP0/7oInzMC5E0InZ4b5Ad3s4xOpqotloi3xw="
···148 [mod."github.com/golang/groupcache"]
149 version = "v0.0.0-20241129210726-2c02b8208cf8"
150 hash = "sha256-AdLZ3dJLe/yduoNvZiXugZxNfmwJjNQyQGsIdzYzH74="
000000151 [mod."github.com/google/uuid"]
152 version = "v1.6.0"
153 hash = "sha256-VWl9sqUzdOuhW0KzQlv0gwwUQClYkmZwSydHG2sALYw="
154 [mod."github.com/gorilla/css"]
155 version = "v1.0.1"
156 hash = "sha256-6JwNHqlY2NpZ0pSQTyYPSpiNqjXOdFHqrUT10sv3y8A="
000157 [mod."github.com/gorilla/securecookie"]
158 version = "v1.1.2"
159 hash = "sha256-KeMHNM9emxX+N0WYiZsTii7n8sNsmjWwbnQ9SaJfTKE="
···161 version = "v1.4.0"
162 hash = "sha256-cLK2z1uOEz7Wah/LclF65ptYMqzuvaRnfIGYqtn3b7g="
163 [mod."github.com/gorilla/websocket"]
164- version = "v1.5.3"
165- hash = "sha256-vTIGEFMEi+30ZdO6ffMNJ/kId6pZs5bbyqov8xe9BM0="
000166 [mod."github.com/hashicorp/go-cleanhttp"]
167 version = "v0.5.2"
168 hash = "sha256-N9GOKYo7tK6XQUFhvhImtL7PZW/mr4C4Manx/yPVvcQ="
000169 [mod."github.com/hashicorp/go-retryablehttp"]
170 version = "v0.7.8"
171 hash = "sha256-4LZwKaFBbpKi9lSq5y6lOlYHU6WMnQdGNMxTd33rN80="
000000000172 [mod."github.com/hashicorp/golang-lru"]
173 version = "v1.0.2"
174 hash = "sha256-yy+5botc6T5wXgOe2mfNXJP3wr+MkVlUZ2JBkmmrA48="
175 [mod."github.com/hashicorp/golang-lru/v2"]
176 version = "v2.0.7"
177 hash = "sha256-t1bcXLgrQNOYUVyYEZ0knxcXpsTk4IuJZDjKvyJX75g="
000000178 [mod."github.com/hiddeco/sshsig"]
179 version = "v0.2.0"
180 hash = "sha256-Yc8Ip4XxrL5plb7Lq0ziYFznteVDZnskoyOZDIMsWOU="
···256 [mod."github.com/minio/sha256-simd"]
257 version = "v1.0.1"
258 hash = "sha256-4hfGDIQaWq8fvtGzHDhoK9v2IocXnJY7OAL6saMJbmA="
000259 [mod."github.com/moby/docker-image-spec"]
260 version = "v1.3.1"
261 hash = "sha256-xwSNLmMagzywdGJIuhrWl1r7cIWBYCOMNYbuDDT6Jhs="
···289 [mod."github.com/munnerz/goautoneg"]
290 version = "v0.0.0-20191010083416-a7dc8b61c822"
291 hash = "sha256-79URDDFenmGc9JZu+5AXHToMrtTREHb3BC84b/gym9Q="
000000292 [mod."github.com/opencontainers/go-digest"]
293 version = "v1.0.0"
294 hash = "sha256-cfVDjHyWItmUGZ2dzQhCHgmOmou8v7N+itDkLZVkqkQ="
···296 version = "v1.1.1"
297 hash = "sha256-bxBjtl+6846Ed3QHwdssOrNvlHV6b+Dn17zPISSQGP8="
298 [mod."github.com/opentracing/opentracing-go"]
299- version = "v1.2.0"
300- hash = "sha256-kKTKFGXOsCF6QdVzI++GgaRzv2W+kWq5uDXOJChvLxM="
301 [mod."github.com/pjbgf/sha1cd"]
302 version = "v0.3.2"
303 hash = "sha256-jdbiRhU8xc1C5c8m7BSCj71PUXHY3f7TWFfxDKKpUMk="
···326 version = "v0.16.1"
327 hash = "sha256-OBCvKlLW2obct35p0L9Q+1ZrxZjpTmbgHMP2rng9hpo="
328 [mod."github.com/redis/go-redis/v9"]
329- version = "v9.3.0"
330- hash = "sha256-PNXDX3BH92d2jL/AkdK0eWMorh387Y6duwYNhsqNe+w="
331 [mod."github.com/resend/resend-go/v2"]
332 version = "v2.15.0"
333 hash = "sha256-1lMoxuMLQXaNWFKadS6rpztAKwvIl3/LWMXqw7f5WYg="
000334 [mod."github.com/segmentio/asm"]
335 version = "v1.2.0"
336 hash = "sha256-zbNuKxNrUDUc6IlmRQNuJQzVe5Ol/mqp7srDg9IMMqs="
···362 [mod."github.com/whyrusleeping/cbor-gen"]
363 version = "v0.3.1"
364 hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc="
000000365 [mod."github.com/yuin/goldmark"]
366- version = "v1.4.13"
367- hash = "sha256-GVwFKZY6moIS6I0ZGuio/WtDif+lkZRfqWS6b4AAJyI="
000368 [mod."gitlab.com/yawning/secp256k1-voi"]
369 version = "v0.0.0-20230925100816-f2616030848b"
370 hash = "sha256-X8INg01LTg13iOuwPI3uOhPN7r01sPZtmtwJ2sudjCA="
···380 [mod."go.opentelemetry.io/otel"]
381 version = "v1.37.0"
382 hash = "sha256-zWpyp9K8/Te86uhNjamchZctTdAnmHhoVw9m4ACfSoo="
000383 [mod."go.opentelemetry.io/otel/metric"]
384 version = "v1.37.0"
385 hash = "sha256-BWnkdldA3xzGhnaConzMAuQzOnugytIvrP6GjkZVAYg="
···405 version = "v0.0.0-20250620022241-b7579e27df2b"
406 hash = "sha256-IsDTeuWLj4UkPO4NhWTvFeZ22WNtlxjoWiyAJh6zdig="
407 [mod."golang.org/x/net"]
408- version = "v0.41.0"
409- hash = "sha256-6/pi8rNmGvBFzkJQXkXkMfL1Bjydhg3BgAMYDyQ/Uvg="
410 [mod."golang.org/x/sync"]
411- version = "v0.15.0"
412- hash = "sha256-Jf4ehm8H8YAWY6mM151RI5CbG7JcOFtmN0AZx4bE3UE="
413 [mod."golang.org/x/sys"]
414 version = "v0.34.0"
415 hash = "sha256-5rZ7p8IaGli5X1sJbfIKOcOEwY4c0yQhinJPh2EtK50="
000416 [mod."golang.org/x/time"]
417 version = "v0.12.0"
418 hash = "sha256-Cp3oxrCMH2wyxjzr5SHVmyhgaoUuSl56Uy00Q7DYEpw="
···420 version = "v0.0.0-20240903120638-7835f813f4da"
421 hash = "sha256-bE7CcrnAvryNvM26ieJGXqbAtuLwHaGcmtVMsVnksqo="
422 [mod."google.golang.org/genproto/googleapis/api"]
423- version = "v0.0.0-20250519155744-55703ea1f237"
424- hash = "sha256-ivktx8ipWgWZgchh4FjKoWL7kU8kl/TtIavtZq/F5SQ="
425 [mod."google.golang.org/genproto/googleapis/rpc"]
426- version = "v0.0.0-20250519155744-55703ea1f237"
427 hash = "sha256-WK7iDtAhH19NPe3TywTQlGjDawNaDKWnxhFL9PgVUwM="
428 [mod."google.golang.org/grpc"]
429- version = "v1.72.1"
430- hash = "sha256-5JczomNvroKWtIYKDgXwaIaEfuNEK//MHPhJQiaxMXs="
431 [mod."google.golang.org/protobuf"]
432 version = "v1.36.6"
433 hash = "sha256-lT5qnefI5FDJnowz9PEkAGylH3+fE+A3DJDkAyy9RMc="
···11 version = "v0.6.2"
12 hash = "sha256-tVNWDUMILZbJvarcl/E7tpSnkn7urqgSHa2Eaka5vSU="
13 [mod."github.com/ProtonMail/go-crypto"]
14+ version = "v1.3.0"
15+ hash = "sha256-TUG+C4MyeWglOmiwiW2/NUVurFHXLgEPRd3X9uQ1NGI="
16+ [mod."github.com/alecthomas/assert/v2"]
17+ version = "v2.11.0"
18+ hash = "sha256-tDJCDKZ0R4qNA7hgMKWrpDyogt1802LCJDBCExxdqaU="
19 [mod."github.com/alecthomas/chroma/v2"]
20 version = "v2.19.0"
21 hash = "sha256-dxsu43a+PvHg2jYR0Tfys6a8x6IVR+9oCGAh+fvL3SM="
22 replaced = "github.com/oppiliappan/chroma/v2"
23+ [mod."github.com/alecthomas/repr"]
24+ version = "v0.4.0"
25+ hash = "sha256-CyAzMSTfLGHDtfGXi91y7XMVpPUDNOKjsznb+osl9dU="
26 [mod."github.com/anmitsu/go-shlex"]
27 version = "v0.0.0-20200514113438-38f4b401e2be"
28 hash = "sha256-L3Ak4X2z7WXq7vMKuiHCOJ29nlpajUQ08Sfb9T0yP54="
···57 [mod."github.com/casbin/govaluate"]
58 version = "v1.3.0"
59 hash = "sha256-vDUFEGt8oL4n/PHwlMZPjmaLvcpGTN4HEIRGl2FPxUA="
60+ [mod."github.com/cenkalti/backoff/v4"]
61+ version = "v4.3.0"
62+ hash = "sha256-wfVjNZsGG1WoNC5aL+kdcy6QXPgZo4THAevZ1787md8="
63 [mod."github.com/cespare/xxhash/v2"]
64 version = "v2.3.0"
65 hash = "sha256-7hRlwSR+fos1kx4VZmJ/7snR7zHh8ZFKX+qqqqGcQpY="
66 [mod."github.com/cloudflare/circl"]
67+ version = "v1.6.2-0.20250618153321-aa837fd1539d"
68+ hash = "sha256-0s/i/XmMcuvPQ+qK9OIU5KxwYZyLVXRtdlYvIXRJT3Y="
69+ [mod."github.com/cloudflare/cloudflare-go"]
70+ version = "v0.115.0"
71+ hash = "sha256-jezmDs6IsHA4rag7DzcHDfDgde0vU4iKgCN9+0XDViw="
72 [mod."github.com/containerd/errdefs"]
73 version = "v1.0.0"
74 hash = "sha256-wMZGoeqvRhuovYCJx0Js4P3qFCNTZ/6Atea/kNYoPMI="
···117 [mod."github.com/felixge/httpsnoop"]
118 version = "v1.0.4"
119 hash = "sha256-c1JKoRSndwwOyOxq9ddCe+8qn7mG9uRq2o/822x5O/c="
120+ [mod."github.com/fsnotify/fsnotify"]
121+ version = "v1.6.0"
122+ hash = "sha256-DQesOCweQPEwmAn6s7DCP/Dwy8IypC+osbpfsvpkdP0="
123 [mod."github.com/gliderlabs/ssh"]
124 version = "v0.3.8"
125 hash = "sha256-FW+91qCB3rfTm0I1VmqfwA7o+2kDys2JHOudKKyxWwc="
···142 version = "v5.17.0"
143 hash = "sha256-gya68abB6GtejUqr60DyU7NIGtNzHQVCAeDTYKk1evQ="
144 replaced = "github.com/oppiliappan/go-git/v5"
145+ [mod."github.com/go-jose/go-jose/v3"]
146+ version = "v3.0.4"
147+ hash = "sha256-RrLHCu9l6k0XVobdZQJ9Sx/VTQcWjrdLR5BEG7yXTEQ="
148 [mod."github.com/go-logr/logr"]
149 version = "v1.4.3"
150 hash = "sha256-Nnp/dEVNMxLp3RSPDHZzGbI8BkSNuZMX0I0cjWKXXLA="
···154 [mod."github.com/go-redis/cache/v9"]
155 version = "v9.0.0"
156 hash = "sha256-b4S3K4KoZhF0otw6FRIOq/PTdHGrb/LumB4GKo4khsY="
157+ [mod."github.com/go-test/deep"]
158+ version = "v1.1.1"
159+ hash = "sha256-WvPrTvUPmbQb4R6DrvSB9O3zm0IOk+n14YpnSl2deR8="
160 [mod."github.com/goccy/go-json"]
161 version = "v0.10.5"
162 hash = "sha256-/EtlGihP0/7oInzMC5E0InZ4b5Ad3s4xOpqotloi3xw="
···169 [mod."github.com/golang/groupcache"]
170 version = "v0.0.0-20241129210726-2c02b8208cf8"
171 hash = "sha256-AdLZ3dJLe/yduoNvZiXugZxNfmwJjNQyQGsIdzYzH74="
172+ [mod."github.com/golang/mock"]
173+ version = "v1.6.0"
174+ hash = "sha256-fWdnMQisRbiRzGT3ISrUHovquzLRHWvcv1JEsJFZRno="
175+ [mod."github.com/google/go-querystring"]
176+ version = "v1.1.0"
177+ hash = "sha256-itsKgKghuX26czU79cK6C2n+lc27jm5Dw1XbIRgwZJY="
178 [mod."github.com/google/uuid"]
179 version = "v1.6.0"
180 hash = "sha256-VWl9sqUzdOuhW0KzQlv0gwwUQClYkmZwSydHG2sALYw="
181 [mod."github.com/gorilla/css"]
182 version = "v1.0.1"
183 hash = "sha256-6JwNHqlY2NpZ0pSQTyYPSpiNqjXOdFHqrUT10sv3y8A="
184+ [mod."github.com/gorilla/feeds"]
185+ version = "v1.2.0"
186+ hash = "sha256-ptczizo27t6Bsq6rHJ4WiHmBRP54UC5yNfHghAqOBQk="
187 [mod."github.com/gorilla/securecookie"]
188 version = "v1.1.2"
189 hash = "sha256-KeMHNM9emxX+N0WYiZsTii7n8sNsmjWwbnQ9SaJfTKE="
···191 version = "v1.4.0"
192 hash = "sha256-cLK2z1uOEz7Wah/LclF65ptYMqzuvaRnfIGYqtn3b7g="
193 [mod."github.com/gorilla/websocket"]
194+ version = "v1.5.4-0.20250319132907-e064f32e3674"
195+ hash = "sha256-a8n6oe20JDpwThClgAyVhJDi6QVaS0qzT4PvRxlQ9to="
196+ [mod."github.com/hashicorp/errwrap"]
197+ version = "v1.1.0"
198+ hash = "sha256-6lwuMQOfBq+McrViN3maJTIeh4f8jbEqvLy2c9FvvFw="
199 [mod."github.com/hashicorp/go-cleanhttp"]
200 version = "v0.5.2"
201 hash = "sha256-N9GOKYo7tK6XQUFhvhImtL7PZW/mr4C4Manx/yPVvcQ="
202+ [mod."github.com/hashicorp/go-multierror"]
203+ version = "v1.1.1"
204+ hash = "sha256-ANzPEUJIZIlToxR89Mn7Db73d9LGI51ssy7eNnUgmlA="
205 [mod."github.com/hashicorp/go-retryablehttp"]
206 version = "v0.7.8"
207 hash = "sha256-4LZwKaFBbpKi9lSq5y6lOlYHU6WMnQdGNMxTd33rN80="
208+ [mod."github.com/hashicorp/go-secure-stdlib/parseutil"]
209+ version = "v0.2.0"
210+ hash = "sha256-mb27ZKw5VDTmNj1QJvxHVR0GyY7UdacLJ0jWDV3nQd8="
211+ [mod."github.com/hashicorp/go-secure-stdlib/strutil"]
212+ version = "v0.1.2"
213+ hash = "sha256-UmCMzjamCW1d9KNvNzELqKf1ElHOXPz+ZtdJkI+DV0A="
214+ [mod."github.com/hashicorp/go-sockaddr"]
215+ version = "v1.0.7"
216+ hash = "sha256-p6eDOrGzN1jMmT/F/f/VJMq0cKNFhUcEuVVwTE6vSrs="
217 [mod."github.com/hashicorp/golang-lru"]
218 version = "v1.0.2"
219 hash = "sha256-yy+5botc6T5wXgOe2mfNXJP3wr+MkVlUZ2JBkmmrA48="
220 [mod."github.com/hashicorp/golang-lru/v2"]
221 version = "v2.0.7"
222 hash = "sha256-t1bcXLgrQNOYUVyYEZ0knxcXpsTk4IuJZDjKvyJX75g="
223+ [mod."github.com/hashicorp/hcl"]
224+ version = "v1.0.1-vault-7"
225+ hash = "sha256-xqYtjCJQVsg04Yj2Uy2Q5bi6X6cDRYhJD/SUEWaHMDM="
226+ [mod."github.com/hexops/gotextdiff"]
227+ version = "v1.0.3"
228+ hash = "sha256-wVs5uJs2KHU1HnDCDdSe0vIgNZylvs8oNidDxwA3+O0="
229 [mod."github.com/hiddeco/sshsig"]
230 version = "v0.2.0"
231 hash = "sha256-Yc8Ip4XxrL5plb7Lq0ziYFznteVDZnskoyOZDIMsWOU="
···307 [mod."github.com/minio/sha256-simd"]
308 version = "v1.0.1"
309 hash = "sha256-4hfGDIQaWq8fvtGzHDhoK9v2IocXnJY7OAL6saMJbmA="
310+ [mod."github.com/mitchellh/mapstructure"]
311+ version = "v1.5.0"
312+ hash = "sha256-ztVhGQXs67MF8UadVvG72G3ly0ypQW0IRDdOOkjYwoE="
313 [mod."github.com/moby/docker-image-spec"]
314 version = "v1.3.1"
315 hash = "sha256-xwSNLmMagzywdGJIuhrWl1r7cIWBYCOMNYbuDDT6Jhs="
···343 [mod."github.com/munnerz/goautoneg"]
344 version = "v0.0.0-20191010083416-a7dc8b61c822"
345 hash = "sha256-79URDDFenmGc9JZu+5AXHToMrtTREHb3BC84b/gym9Q="
346+ [mod."github.com/onsi/gomega"]
347+ version = "v1.37.0"
348+ hash = "sha256-PfHFYp365MwBo+CUZs+mN5QEk3Kqe9xrBX+twWfIc9o="
349+ [mod."github.com/openbao/openbao/api/v2"]
350+ version = "v2.3.0"
351+ hash = "sha256-1bIyvL3GdzPUfsM+gxuKMaH5jKxMaucZQgL6/DfbmDM="
352 [mod."github.com/opencontainers/go-digest"]
353 version = "v1.0.0"
354 hash = "sha256-cfVDjHyWItmUGZ2dzQhCHgmOmou8v7N+itDkLZVkqkQ="
···356 version = "v1.1.1"
357 hash = "sha256-bxBjtl+6846Ed3QHwdssOrNvlHV6b+Dn17zPISSQGP8="
358 [mod."github.com/opentracing/opentracing-go"]
359+ version = "v1.2.1-0.20220228012449-10b1cf09e00b"
360+ hash = "sha256-77oWcDviIoGWHVAotbgmGRpLGpH5AUy+pM15pl3vRrw="
361 [mod."github.com/pjbgf/sha1cd"]
362 version = "v0.3.2"
363 hash = "sha256-jdbiRhU8xc1C5c8m7BSCj71PUXHY3f7TWFfxDKKpUMk="
···386 version = "v0.16.1"
387 hash = "sha256-OBCvKlLW2obct35p0L9Q+1ZrxZjpTmbgHMP2rng9hpo="
388 [mod."github.com/redis/go-redis/v9"]
389+ version = "v9.7.3"
390+ hash = "sha256-7ip5Ns/NEnFmVLr5iN8m3gS4RrzVAYJ7pmJeeaTmjjo="
391 [mod."github.com/resend/resend-go/v2"]
392 version = "v2.15.0"
393 hash = "sha256-1lMoxuMLQXaNWFKadS6rpztAKwvIl3/LWMXqw7f5WYg="
394+ [mod."github.com/ryanuber/go-glob"]
395+ version = "v1.0.0"
396+ hash = "sha256-YkMl1utwUhi3E0sHK23ISpAsPyj4+KeXyXKoFYGXGVY="
397 [mod."github.com/segmentio/asm"]
398 version = "v1.2.0"
399 hash = "sha256-zbNuKxNrUDUc6IlmRQNuJQzVe5Ol/mqp7srDg9IMMqs="
···425 [mod."github.com/whyrusleeping/cbor-gen"]
426 version = "v0.3.1"
427 hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc="
428+ [mod."github.com/wyatt915/goldmark-treeblood"]
429+ version = "v0.0.0-20250825231212-5dcbdb2f4b57"
430+ hash = "sha256-IZEsUXTBTsNgWoD7vqRUc9aFCCHNjzk1IUmI9O+NCnM="
431+ [mod."github.com/wyatt915/treeblood"]
432+ version = "v0.1.15"
433+ hash = "sha256-hb99exdkoY2Qv8WdDxhwgPXGbEYimUr6wFtPXEvcO9g="
434 [mod."github.com/yuin/goldmark"]
435+ version = "v1.7.12"
436+ hash = "sha256-thLYBS4woL2X5qRdo7vP+xCvjlGRDU0jXtDCUt6vvWM="
437+ [mod."github.com/yuin/goldmark-highlighting/v2"]
438+ version = "v2.0.0-20230729083705-37449abec8cc"
439+ hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
440 [mod."gitlab.com/yawning/secp256k1-voi"]
441 version = "v0.0.0-20230925100816-f2616030848b"
442 hash = "sha256-X8INg01LTg13iOuwPI3uOhPN7r01sPZtmtwJ2sudjCA="
···452 [mod."go.opentelemetry.io/otel"]
453 version = "v1.37.0"
454 hash = "sha256-zWpyp9K8/Te86uhNjamchZctTdAnmHhoVw9m4ACfSoo="
455+ [mod."go.opentelemetry.io/otel/exporters/otlp/otlptrace"]
456+ version = "v1.33.0"
457+ hash = "sha256-D5BMzmtN1d3pRnxIcvDOyQrjerK1JoavtYjJLhPKv/I="
458 [mod."go.opentelemetry.io/otel/metric"]
459 version = "v1.37.0"
460 hash = "sha256-BWnkdldA3xzGhnaConzMAuQzOnugytIvrP6GjkZVAYg="
···480 version = "v0.0.0-20250620022241-b7579e27df2b"
481 hash = "sha256-IsDTeuWLj4UkPO4NhWTvFeZ22WNtlxjoWiyAJh6zdig="
482 [mod."golang.org/x/net"]
483+ version = "v0.42.0"
484+ hash = "sha256-YxileisIIez+kcAI+21kY5yk0iRuEqti2YdmS8jvP2s="
485 [mod."golang.org/x/sync"]
486+ version = "v0.16.0"
487+ hash = "sha256-sqKDRESeMzLe0jWGWltLZL/JIgrn0XaIeBWCzVN3Bks="
488 [mod."golang.org/x/sys"]
489 version = "v0.34.0"
490 hash = "sha256-5rZ7p8IaGli5X1sJbfIKOcOEwY4c0yQhinJPh2EtK50="
491+ [mod."golang.org/x/text"]
492+ version = "v0.27.0"
493+ hash = "sha256-VX0rOh6L3qIvquKSGjfZQFU8URNtGvkNvxE7OZtboW8="
494 [mod."golang.org/x/time"]
495 version = "v0.12.0"
496 hash = "sha256-Cp3oxrCMH2wyxjzr5SHVmyhgaoUuSl56Uy00Q7DYEpw="
···498 version = "v0.0.0-20240903120638-7835f813f4da"
499 hash = "sha256-bE7CcrnAvryNvM26ieJGXqbAtuLwHaGcmtVMsVnksqo="
500 [mod."google.golang.org/genproto/googleapis/api"]
501+ version = "v0.0.0-20250603155806-513f23925822"
502+ hash = "sha256-0CS432v9zVhkVLqFpZtxBX8rvVqP67lb7qQ3es7RqIU="
503 [mod."google.golang.org/genproto/googleapis/rpc"]
504+ version = "v0.0.0-20250603155806-513f23925822"
505 hash = "sha256-WK7iDtAhH19NPe3TywTQlGjDawNaDKWnxhFL9PgVUwM="
506 [mod."google.golang.org/grpc"]
507+ version = "v1.73.0"
508+ hash = "sha256-LfVlwip++q2DX70RU6CxoXglx1+r5l48DwlFD05G11c="
509 [mod."google.golang.org/protobuf"]
510 version = "v1.36.6"
511 hash = "sha256-lT5qnefI5FDJnowz9PEkAGylH3+fE+A3DJDkAyy9RMc="
···1{
2 nixpkgs,
3+ system,
4+ hostSystem,
5 self,
6+}: let
7+ envVar = name: let
8+ var = builtins.getEnv name;
9+ in
10+ if var == ""
11+ then throw "\$${name} must be defined, see docs/hacking.md for more details"
12+ else var;
13+in
14+ nixpkgs.lib.nixosSystem {
15+ inherit system;
16+ modules = [
17+ self.nixosModules.knot
18+ self.nixosModules.spindle
19+ ({
20+ lib,
21+ config,
22+ pkgs,
23+ ...
24+ }: {
25+ virtualisation.vmVariant.virtualisation = {
26+ host.pkgs = import nixpkgs {system = hostSystem;};
27+28+ graphics = false;
29+ memorySize = 2048;
30+ diskSize = 10 * 1024;
31+ cores = 2;
32+ forwardPorts = [
33+ # ssh
34+ {
35+ from = "host";
36+ host.port = 2222;
37+ guest.port = 22;
38+ }
39+ # knot
40+ {
41+ from = "host";
42+ host.port = 6000;
43+ guest.port = 6000;
44+ }
45+ # spindle
46+ {
47+ from = "host";
48+ host.port = 6555;
49+ guest.port = 6555;
50+ }
51+ ];
52+ sharedDirectories = {
53+ # We can't use the 9p mounts directly for most of these
54+ # as SQLite is incompatible with them. So instead we
55+ # mount the shared directories to a different location
56+ # and copy the contents around on service start/stop.
57+ knotData = {
58+ source = "$TANGLED_VM_DATA_DIR/knot";
59+ target = "/mnt/knot-data";
60+ };
61+ spindleData = {
62+ source = "$TANGLED_VM_DATA_DIR/spindle";
63+ target = "/mnt/spindle-data";
64+ };
65+ spindleLogs = {
66+ source = "$TANGLED_VM_DATA_DIR/spindle-logs";
67+ target = "/var/log/spindle";
68+ };
69+ };
70 };
71+ # This is fine because any and all ports that are forwarded to host are explicitly marked above, we don't need a separate guest firewall
72+ networking.firewall.enable = false;
73+ time.timeZone = "Europe/London";
74+ services.getty.autologinUser = "root";
75+ environment.systemPackages = with pkgs; [curl vim git sqlite litecli];
76+ services.tangled-knot = {
77+ enable = true;
78+ motd = "Welcome to the development knot!\n";
79+ server = {
80+ owner = envVar "TANGLED_VM_KNOT_OWNER";
81+ hostname = "localhost:6000";
82+ listenAddr = "0.0.0.0:6000";
83+ };
84+ };
85+ services.tangled-spindle = {
86+ enable = true;
87+ server = {
88+ owner = envVar "TANGLED_VM_SPINDLE_OWNER";
89+ hostname = "localhost:6555";
90+ listenAddr = "0.0.0.0:6555";
91+ dev = true;
92+ queueSize = 100;
93+ maxJobCount = 2;
94+ secrets = {
95+ provider = "sqlite";
96+ };
97+ };
98 };
99+ users = {
100+ # So we don't have to deal with permission clashing between
101+ # blank disk VMs and existing state
102+ users.${config.services.tangled-knot.gitUser}.uid = 666;
103+ groups.${config.services.tangled-knot.gitUser}.gid = 666;
104+105+ # TODO: separate spindle user
106+ };
107+ systemd.services = let
108+ mkDataSyncScripts = source: target: {
109+ enableStrictShellChecks = true;
110+111+ preStart = lib.mkBefore ''
112+ mkdir -p ${target}
113+ ${lib.getExe pkgs.rsync} -a ${source}/ ${target}
114+ '';
115+116+ postStop = lib.mkAfter ''
117+ ${lib.getExe pkgs.rsync} -a ${target}/ ${source}
118+ '';
119+120+ serviceConfig.PermissionsStartOnly = true;
121+ };
122+ in {
123+ knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled-knot.stateDir;
124+ spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled-spindle.server.dbPath);
125+ };
126+ })
127+ ];
128+ }
+1-1
patchutil/combinediff.go
···119 // we have f1 and f2, combine them
120 combined, err := combineFiles(f1, f2)
121 if err != nil {
122- fmt.Println(err)
123 }
124125 // combined can be nil commit 2 reverted all changes from commit 1
···119 // we have f1 and f2, combine them
120 combined, err := combineFiles(f1, f2)
121 if err != nil {
122+ // fmt.Println(err)
123 }
124125 // combined can be nil commit 2 reverted all changes from commit 1
···23import (
4 "database/sql"
5+ "strings"
67 _ "github.com/mattn/go-sqlite3"
8)
···12}
1314func Make(dbPath string) (*DB, error) {
15+ // https://github.com/mattn/go-sqlite3#connection-string
16+ opts := []string{
17+ "_foreign_keys=1",
18+ "_journal_mode=WAL",
19+ "_synchronous=NORMAL",
20+ "_auto_vacuum=incremental",
21+ }
22+23+ db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
24 if err != nil {
25 return nil, err
26 }
27+28+ // NOTE: If any other migration is added here, you MUST
29+ // copy the pattern in appview: use a single sql.Conn
30+ // for every migration.
3132 _, err = db.Exec(`
00000000033 create table if not exists _jetstream (
34 id integer primary key autoincrement,
35 last_time_us integer not null
···47 addedAt text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
4849 unique(owner, name)
50+ );
51+52+ create table if not exists spindle_members (
53+ -- identifiers for the record
54+ id integer primary key autoincrement,
55+ did text not null,
56+ rkey text not null,
57+58+ -- data
59+ instance text not null,
60+ subject text not null,
61+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
62+63+ -- constraints
64+ unique (did, instance, subject)
65 );
6667 -- status event for a single workflow