+495
-20
api/tangled/cbor_gen.go
+495
-20
api/tangled/cbor_gen.go
···
561
561
562
562
return nil
563
563
}
564
+
func (t *Comment) MarshalCBOR(w io.Writer) error {
565
+
if t == nil {
566
+
_, err := w.Write(cbg.CborNull)
567
+
return err
568
+
}
569
+
570
+
cw := cbg.NewCborWriter(w)
571
+
fieldCount := 7
572
+
573
+
if t.Mentions == nil {
574
+
fieldCount--
575
+
}
576
+
577
+
if t.References == nil {
578
+
fieldCount--
579
+
}
580
+
581
+
if t.ReplyTo == nil {
582
+
fieldCount--
583
+
}
584
+
585
+
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
586
+
return err
587
+
}
588
+
589
+
// t.Body (string) (string)
590
+
if len("body") > 1000000 {
591
+
return xerrors.Errorf("Value in field \"body\" was too long")
592
+
}
593
+
594
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("body"))); err != nil {
595
+
return err
596
+
}
597
+
if _, err := cw.WriteString(string("body")); err != nil {
598
+
return err
599
+
}
600
+
601
+
if len(t.Body) > 1000000 {
602
+
return xerrors.Errorf("Value in field t.Body was too long")
603
+
}
604
+
605
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Body))); err != nil {
606
+
return err
607
+
}
608
+
if _, err := cw.WriteString(string(t.Body)); err != nil {
609
+
return err
610
+
}
611
+
612
+
// t.LexiconTypeID (string) (string)
613
+
if len("$type") > 1000000 {
614
+
return xerrors.Errorf("Value in field \"$type\" was too long")
615
+
}
616
+
617
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil {
618
+
return err
619
+
}
620
+
if _, err := cw.WriteString(string("$type")); err != nil {
621
+
return err
622
+
}
623
+
624
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.comment"))); err != nil {
625
+
return err
626
+
}
627
+
if _, err := cw.WriteString(string("sh.tangled.comment")); err != nil {
628
+
return err
629
+
}
630
+
631
+
// t.ReplyTo (string) (string)
632
+
if t.ReplyTo != nil {
633
+
634
+
if len("replyTo") > 1000000 {
635
+
return xerrors.Errorf("Value in field \"replyTo\" was too long")
636
+
}
637
+
638
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("replyTo"))); err != nil {
639
+
return err
640
+
}
641
+
if _, err := cw.WriteString(string("replyTo")); err != nil {
642
+
return err
643
+
}
644
+
645
+
if t.ReplyTo == nil {
646
+
if _, err := cw.Write(cbg.CborNull); err != nil {
647
+
return err
648
+
}
649
+
} else {
650
+
if len(*t.ReplyTo) > 1000000 {
651
+
return xerrors.Errorf("Value in field t.ReplyTo was too long")
652
+
}
653
+
654
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.ReplyTo))); err != nil {
655
+
return err
656
+
}
657
+
if _, err := cw.WriteString(string(*t.ReplyTo)); err != nil {
658
+
return err
659
+
}
660
+
}
661
+
}
662
+
663
+
// t.Subject (string) (string)
664
+
if len("subject") > 1000000 {
665
+
return xerrors.Errorf("Value in field \"subject\" was too long")
666
+
}
667
+
668
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("subject"))); err != nil {
669
+
return err
670
+
}
671
+
if _, err := cw.WriteString(string("subject")); err != nil {
672
+
return err
673
+
}
674
+
675
+
if len(t.Subject) > 1000000 {
676
+
return xerrors.Errorf("Value in field t.Subject was too long")
677
+
}
678
+
679
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Subject))); err != nil {
680
+
return err
681
+
}
682
+
if _, err := cw.WriteString(string(t.Subject)); err != nil {
683
+
return err
684
+
}
685
+
686
+
// t.Mentions ([]string) (slice)
687
+
if t.Mentions != nil {
688
+
689
+
if len("mentions") > 1000000 {
690
+
return xerrors.Errorf("Value in field \"mentions\" was too long")
691
+
}
692
+
693
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil {
694
+
return err
695
+
}
696
+
if _, err := cw.WriteString(string("mentions")); err != nil {
697
+
return err
698
+
}
699
+
700
+
if len(t.Mentions) > 8192 {
701
+
return xerrors.Errorf("Slice value in field t.Mentions was too long")
702
+
}
703
+
704
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil {
705
+
return err
706
+
}
707
+
for _, v := range t.Mentions {
708
+
if len(v) > 1000000 {
709
+
return xerrors.Errorf("Value in field v was too long")
710
+
}
711
+
712
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
713
+
return err
714
+
}
715
+
if _, err := cw.WriteString(string(v)); err != nil {
716
+
return err
717
+
}
718
+
719
+
}
720
+
}
721
+
722
+
// t.CreatedAt (string) (string)
723
+
if len("createdAt") > 1000000 {
724
+
return xerrors.Errorf("Value in field \"createdAt\" was too long")
725
+
}
726
+
727
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
728
+
return err
729
+
}
730
+
if _, err := cw.WriteString(string("createdAt")); err != nil {
731
+
return err
732
+
}
733
+
734
+
if len(t.CreatedAt) > 1000000 {
735
+
return xerrors.Errorf("Value in field t.CreatedAt was too long")
736
+
}
737
+
738
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
739
+
return err
740
+
}
741
+
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
742
+
return err
743
+
}
744
+
745
+
// t.References ([]string) (slice)
746
+
if t.References != nil {
747
+
748
+
if len("references") > 1000000 {
749
+
return xerrors.Errorf("Value in field \"references\" was too long")
750
+
}
751
+
752
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil {
753
+
return err
754
+
}
755
+
if _, err := cw.WriteString(string("references")); err != nil {
756
+
return err
757
+
}
758
+
759
+
if len(t.References) > 8192 {
760
+
return xerrors.Errorf("Slice value in field t.References was too long")
761
+
}
762
+
763
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil {
764
+
return err
765
+
}
766
+
for _, v := range t.References {
767
+
if len(v) > 1000000 {
768
+
return xerrors.Errorf("Value in field v was too long")
769
+
}
770
+
771
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
772
+
return err
773
+
}
774
+
if _, err := cw.WriteString(string(v)); err != nil {
775
+
return err
776
+
}
777
+
778
+
}
779
+
}
780
+
return nil
781
+
}
782
+
783
+
func (t *Comment) UnmarshalCBOR(r io.Reader) (err error) {
784
+
*t = Comment{}
785
+
786
+
cr := cbg.NewCborReader(r)
787
+
788
+
maj, extra, err := cr.ReadHeader()
789
+
if err != nil {
790
+
return err
791
+
}
792
+
defer func() {
793
+
if err == io.EOF {
794
+
err = io.ErrUnexpectedEOF
795
+
}
796
+
}()
797
+
798
+
if maj != cbg.MajMap {
799
+
return fmt.Errorf("cbor input should be of type map")
800
+
}
801
+
802
+
if extra > cbg.MaxLength {
803
+
return fmt.Errorf("Comment: map struct too large (%d)", extra)
804
+
}
805
+
806
+
n := extra
807
+
808
+
nameBuf := make([]byte, 10)
809
+
for i := uint64(0); i < n; i++ {
810
+
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
811
+
if err != nil {
812
+
return err
813
+
}
814
+
815
+
if !ok {
816
+
// Field doesn't exist on this type, so ignore it
817
+
if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
818
+
return err
819
+
}
820
+
continue
821
+
}
822
+
823
+
switch string(nameBuf[:nameLen]) {
824
+
// t.Body (string) (string)
825
+
case "body":
826
+
827
+
{
828
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
829
+
if err != nil {
830
+
return err
831
+
}
832
+
833
+
t.Body = string(sval)
834
+
}
835
+
// t.LexiconTypeID (string) (string)
836
+
case "$type":
837
+
838
+
{
839
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
840
+
if err != nil {
841
+
return err
842
+
}
843
+
844
+
t.LexiconTypeID = string(sval)
845
+
}
846
+
// t.ReplyTo (string) (string)
847
+
case "replyTo":
848
+
849
+
{
850
+
b, err := cr.ReadByte()
851
+
if err != nil {
852
+
return err
853
+
}
854
+
if b != cbg.CborNull[0] {
855
+
if err := cr.UnreadByte(); err != nil {
856
+
return err
857
+
}
858
+
859
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
860
+
if err != nil {
861
+
return err
862
+
}
863
+
864
+
t.ReplyTo = (*string)(&sval)
865
+
}
866
+
}
867
+
// t.Subject (string) (string)
868
+
case "subject":
869
+
870
+
{
871
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
872
+
if err != nil {
873
+
return err
874
+
}
875
+
876
+
t.Subject = string(sval)
877
+
}
878
+
// t.Mentions ([]string) (slice)
879
+
case "mentions":
880
+
881
+
maj, extra, err = cr.ReadHeader()
882
+
if err != nil {
883
+
return err
884
+
}
885
+
886
+
if extra > 8192 {
887
+
return fmt.Errorf("t.Mentions: array too large (%d)", extra)
888
+
}
889
+
890
+
if maj != cbg.MajArray {
891
+
return fmt.Errorf("expected cbor array")
892
+
}
893
+
894
+
if extra > 0 {
895
+
t.Mentions = make([]string, extra)
896
+
}
897
+
898
+
for i := 0; i < int(extra); i++ {
899
+
{
900
+
var maj byte
901
+
var extra uint64
902
+
var err error
903
+
_ = maj
904
+
_ = extra
905
+
_ = err
906
+
907
+
{
908
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
909
+
if err != nil {
910
+
return err
911
+
}
912
+
913
+
t.Mentions[i] = string(sval)
914
+
}
915
+
916
+
}
917
+
}
918
+
// t.CreatedAt (string) (string)
919
+
case "createdAt":
920
+
921
+
{
922
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
923
+
if err != nil {
924
+
return err
925
+
}
926
+
927
+
t.CreatedAt = string(sval)
928
+
}
929
+
// t.References ([]string) (slice)
930
+
case "references":
931
+
932
+
maj, extra, err = cr.ReadHeader()
933
+
if err != nil {
934
+
return err
935
+
}
936
+
937
+
if extra > 8192 {
938
+
return fmt.Errorf("t.References: array too large (%d)", extra)
939
+
}
940
+
941
+
if maj != cbg.MajArray {
942
+
return fmt.Errorf("expected cbor array")
943
+
}
944
+
945
+
if extra > 0 {
946
+
t.References = make([]string, extra)
947
+
}
948
+
949
+
for i := 0; i < int(extra); i++ {
950
+
{
951
+
var maj byte
952
+
var extra uint64
953
+
var err error
954
+
_ = maj
955
+
_ = extra
956
+
_ = err
957
+
958
+
{
959
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
960
+
if err != nil {
961
+
return err
962
+
}
963
+
964
+
t.References[i] = string(sval)
965
+
}
966
+
967
+
}
968
+
}
969
+
970
+
default:
971
+
// Field doesn't exist on this type, so ignore it
972
+
if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
973
+
return err
974
+
}
975
+
}
976
+
}
977
+
978
+
return nil
979
+
}
564
980
func (t *FeedReaction) MarshalCBOR(w io.Writer) error {
565
981
if t == nil {
566
982
_, err := w.Write(cbg.CborNull)
···
7934
8350
}
7935
8351
7936
8352
cw := cbg.NewCborWriter(w)
7937
-
fieldCount := 9
8353
+
fieldCount := 10
7938
8354
7939
8355
if t.Body == nil {
7940
8356
fieldCount--
7941
8357
}
7942
8358
7943
8359
if t.Mentions == nil {
8360
+
fieldCount--
8361
+
}
8362
+
8363
+
if t.Patch == nil {
7944
8364
fieldCount--
7945
8365
}
7946
8366
···
8008
8428
}
8009
8429
8010
8430
// t.Patch (string) (string)
8011
-
if len("patch") > 1000000 {
8012
-
return xerrors.Errorf("Value in field \"patch\" was too long")
8013
-
}
8431
+
if t.Patch != nil {
8014
8432
8015
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil {
8016
-
return err
8017
-
}
8018
-
if _, err := cw.WriteString(string("patch")); err != nil {
8019
-
return err
8020
-
}
8433
+
if len("patch") > 1000000 {
8434
+
return xerrors.Errorf("Value in field \"patch\" was too long")
8435
+
}
8021
8436
8022
-
if len(t.Patch) > 1000000 {
8023
-
return xerrors.Errorf("Value in field t.Patch was too long")
8024
-
}
8437
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil {
8438
+
return err
8439
+
}
8440
+
if _, err := cw.WriteString(string("patch")); err != nil {
8441
+
return err
8442
+
}
8443
+
8444
+
if t.Patch == nil {
8445
+
if _, err := cw.Write(cbg.CborNull); err != nil {
8446
+
return err
8447
+
}
8448
+
} else {
8449
+
if len(*t.Patch) > 1000000 {
8450
+
return xerrors.Errorf("Value in field t.Patch was too long")
8451
+
}
8025
8452
8026
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Patch))); err != nil {
8027
-
return err
8028
-
}
8029
-
if _, err := cw.WriteString(string(t.Patch)); err != nil {
8030
-
return err
8453
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Patch))); err != nil {
8454
+
return err
8455
+
}
8456
+
if _, err := cw.WriteString(string(*t.Patch)); err != nil {
8457
+
return err
8458
+
}
8459
+
}
8031
8460
}
8032
8461
8033
8462
// t.Title (string) (string)
···
8147
8576
return err
8148
8577
}
8149
8578
8579
+
// t.PatchBlob (util.LexBlob) (struct)
8580
+
if len("patchBlob") > 1000000 {
8581
+
return xerrors.Errorf("Value in field \"patchBlob\" was too long")
8582
+
}
8583
+
8584
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patchBlob"))); err != nil {
8585
+
return err
8586
+
}
8587
+
if _, err := cw.WriteString(string("patchBlob")); err != nil {
8588
+
return err
8589
+
}
8590
+
8591
+
if err := t.PatchBlob.MarshalCBOR(cw); err != nil {
8592
+
return err
8593
+
}
8594
+
8150
8595
// t.References ([]string) (slice)
8151
8596
if t.References != nil {
8152
8597
···
8262
8707
case "patch":
8263
8708
8264
8709
{
8265
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
8710
+
b, err := cr.ReadByte()
8266
8711
if err != nil {
8267
8712
return err
8268
8713
}
8714
+
if b != cbg.CborNull[0] {
8715
+
if err := cr.UnreadByte(); err != nil {
8716
+
return err
8717
+
}
8269
8718
8270
-
t.Patch = string(sval)
8719
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
8720
+
if err != nil {
8721
+
return err
8722
+
}
8723
+
8724
+
t.Patch = (*string)(&sval)
8725
+
}
8271
8726
}
8272
8727
// t.Title (string) (string)
8273
8728
case "title":
···
8370
8825
}
8371
8826
8372
8827
t.CreatedAt = string(sval)
8828
+
}
8829
+
// t.PatchBlob (util.LexBlob) (struct)
8830
+
case "patchBlob":
8831
+
8832
+
{
8833
+
8834
+
b, err := cr.ReadByte()
8835
+
if err != nil {
8836
+
return err
8837
+
}
8838
+
if b != cbg.CborNull[0] {
8839
+
if err := cr.UnreadByte(); err != nil {
8840
+
return err
8841
+
}
8842
+
t.PatchBlob = new(util.LexBlob)
8843
+
if err := t.PatchBlob.UnmarshalCBOR(cr); err != nil {
8844
+
return xerrors.Errorf("unmarshaling t.PatchBlob pointer: %w", err)
8845
+
}
8846
+
}
8847
+
8373
8848
}
8374
8849
// t.References ([]string) (slice)
8375
8850
case "references":
+12
-9
api/tangled/repopull.go
+12
-9
api/tangled/repopull.go
···
17
17
} //
18
18
// RECORDTYPE: RepoPull
19
19
type RepoPull struct {
20
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
21
-
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
-
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
24
-
Patch string `json:"patch" cborgen:"patch"`
25
-
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
26
-
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
27
-
Target *RepoPull_Target `json:"target" cborgen:"target"`
28
-
Title string `json:"title" cborgen:"title"`
20
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
21
+
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
24
+
// patch: (deprecated) use patchBlob instead
25
+
Patch *string `json:"patch,omitempty" cborgen:"patch,omitempty"`
26
+
// patchBlob: patch content
27
+
PatchBlob *util.LexBlob `json:"patchBlob" cborgen:"patchBlob"`
28
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
29
+
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
30
+
Target *RepoPull_Target `json:"target" cborgen:"target"`
31
+
Title string `json:"title" cborgen:"title"`
29
32
}
30
33
31
34
// RepoPull_Source is a "source" in the sh.tangled.repo.pull schema.
+27
api/tangled/tangledcomment.go
+27
api/tangled/tangledcomment.go
···
1
+
// Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT.
2
+
3
+
package tangled
4
+
5
+
// schema: sh.tangled.comment
6
+
7
+
import (
8
+
"github.com/bluesky-social/indigo/lex/util"
9
+
)
10
+
11
+
const (
12
+
CommentNSID = "sh.tangled.comment"
13
+
)
14
+
15
+
func init() {
16
+
util.RegisterType("sh.tangled.comment", &Comment{})
17
+
} //
18
+
// RECORDTYPE: Comment
19
+
type Comment struct {
20
+
LexiconTypeID string `json:"$type,const=sh.tangled.comment" cborgen:"$type,const=sh.tangled.comment"`
21
+
Body string `json:"body" cborgen:"body"`
22
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
24
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
25
+
ReplyTo *string `json:"replyTo,omitempty" cborgen:"replyTo,omitempty"`
26
+
Subject string `json:"subject" cborgen:"subject"`
27
+
}
+199
appview/db/comments.go
+199
appview/db/comments.go
···
1
+
package db
2
+
3
+
import (
4
+
"database/sql"
5
+
"fmt"
6
+
"maps"
7
+
"slices"
8
+
"sort"
9
+
"strings"
10
+
"time"
11
+
12
+
"github.com/bluesky-social/indigo/atproto/syntax"
13
+
"tangled.org/core/appview/models"
14
+
"tangled.org/core/orm"
15
+
)
16
+
17
+
func PutComment(tx *sql.Tx, c *models.Comment) error {
18
+
result, err := tx.Exec(
19
+
`insert into comments (
20
+
did,
21
+
rkey,
22
+
subject_at,
23
+
reply_to,
24
+
body,
25
+
pull_submission_id,
26
+
created
27
+
)
28
+
values (?, ?, ?, ?, ?, ?, ?)
29
+
on conflict(did, rkey) do update set
30
+
subject_at = excluded.subject_at,
31
+
reply_to = excluded.reply_to,
32
+
body = excluded.body,
33
+
edited = case
34
+
when
35
+
comments.subject_at != excluded.subject_at
36
+
or comments.body != excluded.body
37
+
or comments.reply_to != excluded.reply_to
38
+
then ?
39
+
else comments.edited
40
+
end`,
41
+
c.Did,
42
+
c.Rkey,
43
+
c.Subject,
44
+
c.ReplyTo,
45
+
c.Body,
46
+
c.PullSubmissionId,
47
+
c.Created.Format(time.RFC3339),
48
+
time.Now().Format(time.RFC3339),
49
+
)
50
+
if err != nil {
51
+
return err
52
+
}
53
+
54
+
c.Id, err = result.LastInsertId()
55
+
if err != nil {
56
+
return err
57
+
}
58
+
59
+
if err := putReferences(tx, c.AtUri(), c.References); err != nil {
60
+
return fmt.Errorf("put reference_links: %w", err)
61
+
}
62
+
63
+
return nil
64
+
}
65
+
66
+
func DeleteComments(e Execer, filters ...orm.Filter) error {
67
+
var conditions []string
68
+
var args []any
69
+
for _, filter := range filters {
70
+
conditions = append(conditions, filter.Condition())
71
+
args = append(args, filter.Arg()...)
72
+
}
73
+
74
+
whereClause := ""
75
+
if conditions != nil {
76
+
whereClause = " where " + strings.Join(conditions, " and ")
77
+
}
78
+
79
+
query := fmt.Sprintf(`update comments set body = "", deleted = strftime('%%Y-%%m-%%dT%%H:%%M:%%SZ', 'now') %s`, whereClause)
80
+
81
+
_, err := e.Exec(query, args...)
82
+
return err
83
+
}
84
+
85
+
func GetComments(e Execer, filters ...orm.Filter) ([]models.Comment, error) {
86
+
commentMap := make(map[string]*models.Comment)
87
+
88
+
var conditions []string
89
+
var args []any
90
+
for _, filter := range filters {
91
+
conditions = append(conditions, filter.Condition())
92
+
args = append(args, filter.Arg()...)
93
+
}
94
+
95
+
whereClause := ""
96
+
if conditions != nil {
97
+
whereClause = " where " + strings.Join(conditions, " and ")
98
+
}
99
+
100
+
query := fmt.Sprintf(`
101
+
select
102
+
id,
103
+
did,
104
+
rkey,
105
+
subject_at,
106
+
reply_to,
107
+
body,
108
+
pull_submission_id,
109
+
created,
110
+
edited,
111
+
deleted
112
+
from
113
+
comments
114
+
%s
115
+
`, whereClause)
116
+
117
+
rows, err := e.Query(query, args...)
118
+
if err != nil {
119
+
return nil, err
120
+
}
121
+
122
+
for rows.Next() {
123
+
var comment models.Comment
124
+
var created string
125
+
var rkey, edited, deleted, replyTo sql.Null[string]
126
+
err := rows.Scan(
127
+
&comment.Id,
128
+
&comment.Did,
129
+
&rkey,
130
+
&comment.Subject,
131
+
&replyTo,
132
+
&comment.Body,
133
+
&comment.PullSubmissionId,
134
+
&created,
135
+
&edited,
136
+
&deleted,
137
+
)
138
+
if err != nil {
139
+
return nil, err
140
+
}
141
+
142
+
// this is a remnant from old times, newer comments always have rkey
143
+
if rkey.Valid {
144
+
comment.Rkey = rkey.V
145
+
}
146
+
147
+
if t, err := time.Parse(time.RFC3339, created); err == nil {
148
+
comment.Created = t
149
+
}
150
+
151
+
if edited.Valid {
152
+
if t, err := time.Parse(time.RFC3339, edited.V); err == nil {
153
+
comment.Edited = &t
154
+
}
155
+
}
156
+
157
+
if deleted.Valid {
158
+
if t, err := time.Parse(time.RFC3339, deleted.V); err == nil {
159
+
comment.Deleted = &t
160
+
}
161
+
}
162
+
163
+
if replyTo.Valid {
164
+
rt := syntax.ATURI(replyTo.V)
165
+
comment.ReplyTo = &rt
166
+
}
167
+
168
+
atUri := comment.AtUri().String()
169
+
commentMap[atUri] = &comment
170
+
}
171
+
172
+
if err := rows.Err(); err != nil {
173
+
return nil, err
174
+
}
175
+
defer rows.Close()
176
+
177
+
// collect references from each comments
178
+
commentAts := slices.Collect(maps.Keys(commentMap))
179
+
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
180
+
if err != nil {
181
+
return nil, fmt.Errorf("failed to query reference_links: %w", err)
182
+
}
183
+
for commentAt, references := range allReferencs {
184
+
if comment, ok := commentMap[commentAt.String()]; ok {
185
+
comment.References = references
186
+
}
187
+
}
188
+
189
+
var comments []models.Comment
190
+
for _, c := range commentMap {
191
+
comments = append(comments, *c)
192
+
}
193
+
194
+
sort.Slice(comments, func(i, j int) bool {
195
+
return comments[i].Created.After(comments[j].Created)
196
+
})
197
+
198
+
return comments, nil
199
+
}
+81
appview/db/db.go
+81
appview/db/db.go
···
1173
1173
return err
1174
1174
})
1175
1175
1176
+
orm.RunMigration(conn, logger, "add-comments-table", func(tx *sql.Tx) error {
1177
+
_, err := tx.Exec(`
1178
+
drop table if exists comments;
1179
+
1180
+
create table comments (
1181
+
-- identifiers
1182
+
id integer primary key autoincrement,
1183
+
did text not null,
1184
+
collection text not null default 'sh.tangled.comment',
1185
+
rkey text not null,
1186
+
at_uri text generated always as ('at://' || did || '/' || collection || '/' || rkey) stored,
1187
+
1188
+
-- at identifiers
1189
+
subject_at text not null,
1190
+
reply_to text, -- at_uri of parent comment
1191
+
1192
+
pull_submission_id integer, -- dirty fix until we atprotate the pull-rounds
1193
+
1194
+
-- content
1195
+
body text not null,
1196
+
created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
1197
+
edited text,
1198
+
deleted text,
1199
+
1200
+
-- constraints
1201
+
unique(did, rkey)
1202
+
);
1203
+
1204
+
insert into comments (
1205
+
did,
1206
+
collection,
1207
+
rkey,
1208
+
subject_at,
1209
+
reply_to,
1210
+
body,
1211
+
created,
1212
+
edited,
1213
+
deleted
1214
+
)
1215
+
select
1216
+
did,
1217
+
'sh.tangled.repo.issue.comment',
1218
+
rkey,
1219
+
issue_at,
1220
+
reply_to,
1221
+
body,
1222
+
created,
1223
+
edited,
1224
+
deleted
1225
+
from issue_comments
1226
+
where rkey is not null;
1227
+
1228
+
insert into comments (
1229
+
did,
1230
+
collection,
1231
+
rkey,
1232
+
subject_at,
1233
+
pull_submission_id,
1234
+
body,
1235
+
created
1236
+
)
1237
+
select
1238
+
c.owner_did,
1239
+
'sh.tangled.repo.pull.comment',
1240
+
substr(
1241
+
substr(c.comment_at, 6 + instr(substr(c.comment_at, 6), '/')), -- nsid/rkey
1242
+
instr(
1243
+
substr(c.comment_at, 6 + instr(substr(c.comment_at, 6), '/')), -- nsid/rkey
1244
+
'/'
1245
+
) + 1
1246
+
), -- rkey
1247
+
p.at_uri,
1248
+
c.submission_id,
1249
+
c.body,
1250
+
c.created
1251
+
from pull_comments c
1252
+
join pulls p on c.repo_at = p.repo_at and c.pull_id = p.pull_id;
1253
+
`)
1254
+
return err
1255
+
})
1256
+
1176
1257
return &DB{
1177
1258
db,
1178
1259
logger,
+6
-186
appview/db/issues.go
+6
-186
appview/db/issues.go
···
100
100
}
101
101
102
102
func GetIssuesPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]models.Issue, error) {
103
-
issueMap := make(map[string]*models.Issue) // at-uri -> issue
103
+
issueMap := make(map[syntax.ATURI]*models.Issue) // at-uri -> issue
104
104
105
105
var conditions []string
106
106
var args []any
···
196
196
}
197
197
}
198
198
199
-
atUri := issue.AtUri().String()
200
-
issueMap[atUri] = &issue
199
+
issueMap[issue.AtUri()] = &issue
201
200
}
202
201
203
202
// collect reverse repos
···
229
228
// collect comments
230
229
issueAts := slices.Collect(maps.Keys(issueMap))
231
230
232
-
comments, err := GetIssueComments(e, orm.FilterIn("issue_at", issueAts))
231
+
comments, err := GetComments(e, orm.FilterIn("subject_at", issueAts))
233
232
if err != nil {
234
233
return nil, fmt.Errorf("failed to query comments: %w", err)
235
234
}
236
235
for i := range comments {
237
-
issueAt := comments[i].IssueAt
236
+
issueAt := comments[i].Subject
238
237
if issue, ok := issueMap[issueAt]; ok {
239
238
issue.Comments = append(issue.Comments, comments[i])
240
239
}
···
246
245
return nil, fmt.Errorf("failed to query labels: %w", err)
247
246
}
248
247
for issueAt, labels := range allLabels {
249
-
if issue, ok := issueMap[issueAt.String()]; ok {
248
+
if issue, ok := issueMap[issueAt]; ok {
250
249
issue.Labels = labels
251
250
}
252
251
}
···
257
256
return nil, fmt.Errorf("failed to query reference_links: %w", err)
258
257
}
259
258
for issueAt, references := range allReferencs {
260
-
if issue, ok := issueMap[issueAt.String()]; ok {
259
+
if issue, ok := issueMap[issueAt]; ok {
261
260
issue.References = references
262
261
}
263
262
}
···
349
348
}
350
349
351
350
return ids, nil
352
-
}
353
-
354
-
func AddIssueComment(tx *sql.Tx, c models.IssueComment) (int64, error) {
355
-
result, err := tx.Exec(
356
-
`insert into issue_comments (
357
-
did,
358
-
rkey,
359
-
issue_at,
360
-
body,
361
-
reply_to,
362
-
created,
363
-
edited
364
-
)
365
-
values (?, ?, ?, ?, ?, ?, null)
366
-
on conflict(did, rkey) do update set
367
-
issue_at = excluded.issue_at,
368
-
body = excluded.body,
369
-
edited = case
370
-
when
371
-
issue_comments.issue_at != excluded.issue_at
372
-
or issue_comments.body != excluded.body
373
-
or issue_comments.reply_to != excluded.reply_to
374
-
then ?
375
-
else issue_comments.edited
376
-
end`,
377
-
c.Did,
378
-
c.Rkey,
379
-
c.IssueAt,
380
-
c.Body,
381
-
c.ReplyTo,
382
-
c.Created.Format(time.RFC3339),
383
-
time.Now().Format(time.RFC3339),
384
-
)
385
-
if err != nil {
386
-
return 0, err
387
-
}
388
-
389
-
id, err := result.LastInsertId()
390
-
if err != nil {
391
-
return 0, err
392
-
}
393
-
394
-
if err := putReferences(tx, c.AtUri(), c.References); err != nil {
395
-
return 0, fmt.Errorf("put reference_links: %w", err)
396
-
}
397
-
398
-
return id, nil
399
-
}
400
-
401
-
func DeleteIssueComments(e Execer, filters ...orm.Filter) error {
402
-
var conditions []string
403
-
var args []any
404
-
for _, filter := range filters {
405
-
conditions = append(conditions, filter.Condition())
406
-
args = append(args, filter.Arg()...)
407
-
}
408
-
409
-
whereClause := ""
410
-
if conditions != nil {
411
-
whereClause = " where " + strings.Join(conditions, " and ")
412
-
}
413
-
414
-
query := fmt.Sprintf(`update issue_comments set body = "", deleted = strftime('%%Y-%%m-%%dT%%H:%%M:%%SZ', 'now') %s`, whereClause)
415
-
416
-
_, err := e.Exec(query, args...)
417
-
return err
418
-
}
419
-
420
-
func GetIssueComments(e Execer, filters ...orm.Filter) ([]models.IssueComment, error) {
421
-
commentMap := make(map[string]*models.IssueComment)
422
-
423
-
var conditions []string
424
-
var args []any
425
-
for _, filter := range filters {
426
-
conditions = append(conditions, filter.Condition())
427
-
args = append(args, filter.Arg()...)
428
-
}
429
-
430
-
whereClause := ""
431
-
if conditions != nil {
432
-
whereClause = " where " + strings.Join(conditions, " and ")
433
-
}
434
-
435
-
query := fmt.Sprintf(`
436
-
select
437
-
id,
438
-
did,
439
-
rkey,
440
-
issue_at,
441
-
reply_to,
442
-
body,
443
-
created,
444
-
edited,
445
-
deleted
446
-
from
447
-
issue_comments
448
-
%s
449
-
`, whereClause)
450
-
451
-
rows, err := e.Query(query, args...)
452
-
if err != nil {
453
-
return nil, err
454
-
}
455
-
defer rows.Close()
456
-
457
-
for rows.Next() {
458
-
var comment models.IssueComment
459
-
var created string
460
-
var rkey, edited, deleted, replyTo sql.Null[string]
461
-
err := rows.Scan(
462
-
&comment.Id,
463
-
&comment.Did,
464
-
&rkey,
465
-
&comment.IssueAt,
466
-
&replyTo,
467
-
&comment.Body,
468
-
&created,
469
-
&edited,
470
-
&deleted,
471
-
)
472
-
if err != nil {
473
-
return nil, err
474
-
}
475
-
476
-
// this is a remnant from old times, newer comments always have rkey
477
-
if rkey.Valid {
478
-
comment.Rkey = rkey.V
479
-
}
480
-
481
-
if t, err := time.Parse(time.RFC3339, created); err == nil {
482
-
comment.Created = t
483
-
}
484
-
485
-
if edited.Valid {
486
-
if t, err := time.Parse(time.RFC3339, edited.V); err == nil {
487
-
comment.Edited = &t
488
-
}
489
-
}
490
-
491
-
if deleted.Valid {
492
-
if t, err := time.Parse(time.RFC3339, deleted.V); err == nil {
493
-
comment.Deleted = &t
494
-
}
495
-
}
496
-
497
-
if replyTo.Valid {
498
-
comment.ReplyTo = &replyTo.V
499
-
}
500
-
501
-
atUri := comment.AtUri().String()
502
-
commentMap[atUri] = &comment
503
-
}
504
-
505
-
if err = rows.Err(); err != nil {
506
-
return nil, err
507
-
}
508
-
509
-
// collect references for each comments
510
-
commentAts := slices.Collect(maps.Keys(commentMap))
511
-
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
512
-
if err != nil {
513
-
return nil, fmt.Errorf("failed to query reference_links: %w", err)
514
-
}
515
-
for commentAt, references := range allReferencs {
516
-
if comment, ok := commentMap[commentAt.String()]; ok {
517
-
comment.References = references
518
-
}
519
-
}
520
-
521
-
var comments []models.IssueComment
522
-
for _, c := range commentMap {
523
-
comments = append(comments, *c)
524
-
}
525
-
526
-
sort.Slice(comments, func(i, j int) bool {
527
-
return comments[i].Created.After(comments[j].Created)
528
-
})
529
-
530
-
return comments, nil
531
351
}
532
352
533
353
func DeleteIssues(tx *sql.Tx, did, rkey string) error {
+18
-11
appview/db/profile.go
+18
-11
appview/db/profile.go
···
20
20
timeline := models.ProfileTimeline{
21
21
ByMonth: make([]models.ByMonth, TimeframeMonths),
22
22
}
23
-
currentMonth := time.Now().Month()
23
+
now := time.Now()
24
24
timeframe := fmt.Sprintf("-%d months", TimeframeMonths)
25
25
26
26
pulls, err := GetPullsByOwnerDid(e, forDid, timeframe)
···
30
30
31
31
// group pulls by month
32
32
for _, pull := range pulls {
33
-
pullMonth := pull.Created.Month()
33
+
monthsAgo := monthsBetween(pull.Created, now)
34
34
35
-
if currentMonth-pullMonth >= TimeframeMonths {
35
+
if monthsAgo >= TimeframeMonths {
36
36
// shouldn't happen; but times are weird
37
37
continue
38
38
}
39
39
40
-
idx := currentMonth - pullMonth
40
+
idx := monthsAgo
41
41
items := &timeline.ByMonth[idx].PullEvents.Items
42
42
43
43
*items = append(*items, &pull)
···
53
53
}
54
54
55
55
for _, issue := range issues {
56
-
issueMonth := issue.Created.Month()
56
+
monthsAgo := monthsBetween(issue.Created, now)
57
57
58
-
if currentMonth-issueMonth >= TimeframeMonths {
58
+
if monthsAgo >= TimeframeMonths {
59
59
// shouldn't happen; but times are weird
60
60
continue
61
61
}
62
62
63
-
idx := currentMonth - issueMonth
63
+
idx := monthsAgo
64
64
items := &timeline.ByMonth[idx].IssueEvents.Items
65
65
66
66
*items = append(*items, &issue)
···
77
77
if repo.Source != "" {
78
78
sourceRepo, err = GetRepoByAtUri(e, repo.Source)
79
79
if err != nil {
80
-
return nil, err
80
+
// the source repo was not found, skip this bit
81
+
log.Println("profile", "err", err)
81
82
}
82
83
}
83
84
84
-
repoMonth := repo.Created.Month()
85
+
monthsAgo := monthsBetween(repo.Created, now)
85
86
86
-
if currentMonth-repoMonth >= TimeframeMonths {
87
+
if monthsAgo >= TimeframeMonths {
87
88
// shouldn't happen; but times are weird
88
89
continue
89
90
}
90
91
91
-
idx := currentMonth - repoMonth
92
+
idx := monthsAgo
92
93
93
94
items := &timeline.ByMonth[idx].RepoEvents
94
95
*items = append(*items, models.RepoEvent{
···
98
99
}
99
100
100
101
return &timeline, nil
102
+
}
103
+
104
+
func monthsBetween(from, to time.Time) int {
105
+
years := to.Year() - from.Year()
106
+
months := int(to.Month() - from.Month())
107
+
return years*12 + months
101
108
}
102
109
103
110
func UpsertProfile(tx *sql.Tx, profile *models.Profile) error {
+6
-121
appview/db/pulls.go
+6
-121
appview/db/pulls.go
···
447
447
return nil, err
448
448
}
449
449
450
-
// Get comments for all submissions using GetPullComments
450
+
// Get comments for all submissions using GetComments
451
451
submissionIds := slices.Collect(maps.Keys(submissionMap))
452
-
comments, err := GetPullComments(e, orm.FilterIn("submission_id", submissionIds))
452
+
comments, err := GetComments(e, orm.FilterIn("pull_submission_id", submissionIds))
453
453
if err != nil {
454
454
return nil, fmt.Errorf("failed to get pull comments: %w", err)
455
455
}
456
456
for _, comment := range comments {
457
-
if submission, ok := submissionMap[comment.SubmissionId]; ok {
458
-
submission.Comments = append(submission.Comments, comment)
457
+
if comment.PullSubmissionId != nil {
458
+
if submission, ok := submissionMap[*comment.PullSubmissionId]; ok {
459
+
submission.Comments = append(submission.Comments, comment)
460
+
}
459
461
}
460
462
}
461
463
···
475
477
return m, nil
476
478
}
477
479
478
-
func GetPullComments(e Execer, filters ...orm.Filter) ([]models.PullComment, error) {
479
-
var conditions []string
480
-
var args []any
481
-
for _, filter := range filters {
482
-
conditions = append(conditions, filter.Condition())
483
-
args = append(args, filter.Arg()...)
484
-
}
485
-
486
-
whereClause := ""
487
-
if conditions != nil {
488
-
whereClause = " where " + strings.Join(conditions, " and ")
489
-
}
490
-
491
-
query := fmt.Sprintf(`
492
-
select
493
-
id,
494
-
pull_id,
495
-
submission_id,
496
-
repo_at,
497
-
owner_did,
498
-
comment_at,
499
-
body,
500
-
created
501
-
from
502
-
pull_comments
503
-
%s
504
-
order by
505
-
created asc
506
-
`, whereClause)
507
-
508
-
rows, err := e.Query(query, args...)
509
-
if err != nil {
510
-
return nil, err
511
-
}
512
-
defer rows.Close()
513
-
514
-
commentMap := make(map[string]*models.PullComment)
515
-
for rows.Next() {
516
-
var comment models.PullComment
517
-
var createdAt string
518
-
err := rows.Scan(
519
-
&comment.ID,
520
-
&comment.PullId,
521
-
&comment.SubmissionId,
522
-
&comment.RepoAt,
523
-
&comment.OwnerDid,
524
-
&comment.CommentAt,
525
-
&comment.Body,
526
-
&createdAt,
527
-
)
528
-
if err != nil {
529
-
return nil, err
530
-
}
531
-
532
-
if t, err := time.Parse(time.RFC3339, createdAt); err == nil {
533
-
comment.Created = t
534
-
}
535
-
536
-
atUri := comment.AtUri().String()
537
-
commentMap[atUri] = &comment
538
-
}
539
-
540
-
if err := rows.Err(); err != nil {
541
-
return nil, err
542
-
}
543
-
544
-
// collect references for each comments
545
-
commentAts := slices.Collect(maps.Keys(commentMap))
546
-
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
547
-
if err != nil {
548
-
return nil, fmt.Errorf("failed to query reference_links: %w", err)
549
-
}
550
-
for commentAt, references := range allReferencs {
551
-
if comment, ok := commentMap[commentAt.String()]; ok {
552
-
comment.References = references
553
-
}
554
-
}
555
-
556
-
var comments []models.PullComment
557
-
for _, c := range commentMap {
558
-
comments = append(comments, *c)
559
-
}
560
-
561
-
sort.Slice(comments, func(i, j int) bool {
562
-
return comments[i].Created.Before(comments[j].Created)
563
-
})
564
-
565
-
return comments, nil
566
-
}
567
-
568
480
// timeframe here is directly passed into the sql query filter, and any
569
481
// timeframe in the past should be negative; e.g.: "-3 months"
570
482
func GetPullsByOwnerDid(e Execer, did, timeframe string) ([]models.Pull, error) {
···
639
551
}
640
552
641
553
return pulls, nil
642
-
}
643
-
644
-
func NewPullComment(tx *sql.Tx, comment *models.PullComment) (int64, error) {
645
-
query := `insert into pull_comments (owner_did, repo_at, submission_id, comment_at, pull_id, body) values (?, ?, ?, ?, ?, ?)`
646
-
res, err := tx.Exec(
647
-
query,
648
-
comment.OwnerDid,
649
-
comment.RepoAt,
650
-
comment.SubmissionId,
651
-
comment.CommentAt,
652
-
comment.PullId,
653
-
comment.Body,
654
-
)
655
-
if err != nil {
656
-
return 0, err
657
-
}
658
-
659
-
i, err := res.LastInsertId()
660
-
if err != nil {
661
-
return 0, err
662
-
}
663
-
664
-
if err := putReferences(tx, comment.AtUri(), comment.References); err != nil {
665
-
return 0, fmt.Errorf("put reference_links: %w", err)
666
-
}
667
-
668
-
return i, nil
669
554
}
670
555
671
556
func SetPullState(e Execer, repoAt syntax.ATURI, pullId int, pullState models.PullState) error {
+1
-1
appview/db/punchcard.go
+1
-1
appview/db/punchcard.go
+20
-32
appview/db/reference.go
+20
-32
appview/db/reference.go
···
11
11
"tangled.org/core/orm"
12
12
)
13
13
14
-
// ValidateReferenceLinks resolves refLinks to Issue/PR/IssueComment/PullComment ATURIs.
14
+
// ValidateReferenceLinks resolves refLinks to Issue/PR/Comment ATURIs.
15
15
// It will ignore missing refLinks.
16
16
func ValidateReferenceLinks(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) {
17
17
var (
···
53
53
values %s
54
54
)
55
55
select
56
-
i.did, i.rkey,
57
-
c.did, c.rkey
56
+
i.at_uri, c.at_uri
58
57
from input inp
59
58
join repos r
60
59
on r.did = inp.owner_did
···
62
61
join issues i
63
62
on i.repo_at = r.at_uri
64
63
and i.issue_id = inp.issue_id
65
-
left join issue_comments c
64
+
left join comments c
66
65
on inp.comment_id is not null
67
-
and c.issue_at = i.at_uri
66
+
and c.subject_at = i.at_uri
68
67
and c.id = inp.comment_id
69
68
`,
70
69
strings.Join(vals, ","),
···
79
78
80
79
for rows.Next() {
81
80
// Scan rows
82
-
var issueOwner, issueRkey string
83
-
var commentOwner, commentRkey sql.NullString
81
+
var issueUri string
82
+
var commentUri sql.NullString
84
83
var uri syntax.ATURI
85
-
if err := rows.Scan(&issueOwner, &issueRkey, &commentOwner, &commentRkey); err != nil {
84
+
if err := rows.Scan(&issueUri, &commentUri); err != nil {
86
85
return nil, err
87
86
}
88
-
if commentOwner.Valid && commentRkey.Valid {
89
-
uri = syntax.ATURI(fmt.Sprintf(
90
-
"at://%s/%s/%s",
91
-
commentOwner.String,
92
-
tangled.RepoIssueCommentNSID,
93
-
commentRkey.String,
94
-
))
87
+
if commentUri.Valid {
88
+
uri = syntax.ATURI(commentUri.String)
95
89
} else {
96
-
uri = syntax.ATURI(fmt.Sprintf(
97
-
"at://%s/%s/%s",
98
-
issueOwner,
99
-
tangled.RepoIssueNSID,
100
-
issueRkey,
101
-
))
90
+
uri = syntax.ATURI(issueUri)
102
91
}
103
92
uris = append(uris, uri)
104
93
}
···
124
113
values %s
125
114
)
126
115
select
127
-
p.owner_did, p.rkey,
128
-
c.comment_at
116
+
p.owner_did, p.rkey, c.at_uri
129
117
from input inp
130
118
join repos r
131
119
on r.did = inp.owner_did
···
133
121
join pulls p
134
122
on p.repo_at = r.at_uri
135
123
and p.pull_id = inp.pull_id
136
-
left join pull_comments c
124
+
left join comments c
137
125
on inp.comment_id is not null
138
-
and c.repo_at = r.at_uri and c.pull_id = p.pull_id
126
+
and c.subject_at = ('at://' || p.owner_did || '/' || 'sh.tangled.repo.pull' || '/' || p.rkey)
139
127
and c.id = inp.comment_id
140
128
`,
141
129
strings.Join(vals, ","),
···
283
271
return nil, fmt.Errorf("get issue backlinks: %w", err)
284
272
}
285
273
backlinks = append(backlinks, ls...)
286
-
ls, err = getIssueCommentBacklinks(e, backlinksMap[tangled.RepoIssueCommentNSID])
274
+
ls, err = getIssueCommentBacklinks(e, backlinksMap[tangled.CommentNSID])
287
275
if err != nil {
288
276
return nil, fmt.Errorf("get issue_comment backlinks: %w", err)
289
277
}
···
293
281
return nil, fmt.Errorf("get pull backlinks: %w", err)
294
282
}
295
283
backlinks = append(backlinks, ls...)
296
-
ls, err = getPullCommentBacklinks(e, backlinksMap[tangled.RepoPullCommentNSID])
284
+
ls, err = getPullCommentBacklinks(e, backlinksMap[tangled.CommentNSID])
297
285
if err != nil {
298
286
return nil, fmt.Errorf("get pull_comment backlinks: %w", err)
299
287
}
···
352
340
rows, err := e.Query(
353
341
fmt.Sprintf(
354
342
`select r.did, r.name, i.issue_id, c.id, i.title, i.open
355
-
from issue_comments c
343
+
from comments c
356
344
join issues i
357
-
on i.at_uri = c.issue_at
345
+
on i.at_uri = c.subject_at
358
346
join repos r
359
347
on r.at_uri = i.repo_at
360
348
where %s`,
···
428
416
if len(aturis) == 0 {
429
417
return nil, nil
430
418
}
431
-
filter := orm.FilterIn("c.comment_at", aturis)
419
+
filter := orm.FilterIn("c.at_uri", aturis)
432
420
rows, err := e.Query(
433
421
fmt.Sprintf(
434
422
`select r.did, r.name, p.pull_id, c.id, p.title, p.state
435
423
from repos r
436
424
join pulls p
437
425
on r.at_uri = p.repo_at
438
-
join pull_comments c
439
-
on r.at_uri = c.repo_at and p.pull_id = c.pull_id
426
+
join comments c
427
+
on ('at://' || p.owner_did || '/' || 'sh.tangled.repo.pull' || '/' || p.rkey) = c.subject_at
440
428
where %s`,
441
429
filter.Condition(),
442
430
),
+19
-11
appview/ingester.go
+19
-11
appview/ingester.go
···
79
79
err = i.ingestString(e)
80
80
case tangled.RepoIssueNSID:
81
81
err = i.ingestIssue(ctx, e)
82
-
case tangled.RepoIssueCommentNSID:
83
-
err = i.ingestIssueComment(e)
82
+
case tangled.CommentNSID:
83
+
err = i.ingestComment(e)
84
84
case tangled.LabelDefinitionNSID:
85
85
err = i.ingestLabelDefinition(e)
86
86
case tangled.LabelOpNSID:
···
868
868
return nil
869
869
}
870
870
871
-
func (i *Ingester) ingestIssueComment(e *jmodels.Event) error {
871
+
func (i *Ingester) ingestComment(e *jmodels.Event) error {
872
872
did := e.Did
873
873
rkey := e.Commit.RKey
874
874
875
875
var err error
876
876
877
-
l := i.Logger.With("handler", "ingestIssueComment", "nsid", e.Commit.Collection, "did", did, "rkey", rkey)
877
+
l := i.Logger.With("handler", "ingestComment", "nsid", e.Commit.Collection, "did", did, "rkey", rkey)
878
878
l.Info("ingesting record")
879
879
880
880
ddb, ok := i.Db.Execer.(*db.DB)
···
885
885
switch e.Commit.Operation {
886
886
case jmodels.CommitOperationCreate, jmodels.CommitOperationUpdate:
887
887
raw := json.RawMessage(e.Commit.Record)
888
-
record := tangled.RepoIssueComment{}
888
+
record := tangled.Comment{}
889
889
err = json.Unmarshal(raw, &record)
890
890
if err != nil {
891
891
return fmt.Errorf("invalid record: %w", err)
892
892
}
893
893
894
-
comment, err := models.IssueCommentFromRecord(did, rkey, record)
894
+
comment, err := models.CommentFromRecord(did, rkey, record)
895
895
if err != nil {
896
896
return fmt.Errorf("failed to parse comment from record: %w", err)
897
897
}
898
898
899
-
if err := i.Validator.ValidateIssueComment(comment); err != nil {
899
+
// TODO: ingest pull comments
900
+
// we aren't ingesting pull comments yet because pull itself isn't fully atprotated.
901
+
// so we cannot know which round this comment is pointing to
902
+
if comment.Subject.Collection().String() == tangled.RepoPullNSID {
903
+
l.Info("skip ingesting pull comments")
904
+
return nil
905
+
}
906
+
907
+
if err := comment.Validate(); err != nil {
900
908
return fmt.Errorf("failed to validate comment: %w", err)
901
909
}
902
910
···
906
914
}
907
915
defer tx.Rollback()
908
916
909
-
_, err = db.AddIssueComment(tx, *comment)
917
+
err = db.PutComment(tx, comment)
910
918
if err != nil {
911
-
return fmt.Errorf("failed to create issue comment: %w", err)
919
+
return fmt.Errorf("failed to create comment: %w", err)
912
920
}
913
921
914
922
return tx.Commit()
915
923
916
924
case jmodels.CommitOperationDelete:
917
-
if err := db.DeleteIssueComments(
925
+
if err := db.DeleteComments(
918
926
ddb,
919
927
orm.FilterEq("did", did),
920
928
orm.FilterEq("rkey", rkey),
921
929
); err != nil {
922
-
return fmt.Errorf("failed to delete issue comment record: %w", err)
930
+
return fmt.Errorf("failed to delete comment record: %w", err)
923
931
}
924
932
925
933
return nil
+31
-29
appview/issues/issues.go
+31
-29
appview/issues/issues.go
···
403
403
404
404
body := r.FormValue("body")
405
405
if body == "" {
406
-
rp.pages.Notice(w, "issue", "Body is required")
406
+
rp.pages.Notice(w, "issue-comment", "Body is required")
407
407
return
408
408
}
409
409
410
-
replyToUri := r.FormValue("reply-to")
411
-
var replyTo *string
412
-
if replyToUri != "" {
413
-
replyTo = &replyToUri
410
+
var replyTo *syntax.ATURI
411
+
replyToRaw := r.FormValue("reply-to")
412
+
if replyToRaw != "" {
413
+
aturi, err := syntax.ParseATURI(r.FormValue("reply-to"))
414
+
if err != nil {
415
+
rp.pages.Notice(w, "issue-comment", "reply-to should be valid AT-URI")
416
+
return
417
+
}
418
+
replyTo = &aturi
414
419
}
415
420
416
421
mentions, references := rp.mentionsResolver.Resolve(r.Context(), body)
417
422
418
-
comment := models.IssueComment{
419
-
Did: user.Did,
423
+
comment := models.Comment{
424
+
Did: syntax.DID(user.Did),
420
425
Rkey: tid.TID(),
421
-
IssueAt: issue.AtUri().String(),
426
+
Subject: issue.AtUri(),
422
427
ReplyTo: replyTo,
423
428
Body: body,
424
429
Created: time.Now(),
425
430
Mentions: mentions,
426
431
References: references,
427
432
}
428
-
if err = rp.validator.ValidateIssueComment(&comment); err != nil {
433
+
if err = comment.Validate(); err != nil {
429
434
l.Error("failed to validate comment", "err", err)
430
435
rp.pages.Notice(w, "issue-comment", "Failed to create comment.")
431
436
return
···
441
446
442
447
// create a record first
443
448
resp, err := comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
444
-
Collection: tangled.RepoIssueCommentNSID,
445
-
Repo: comment.Did,
449
+
Collection: tangled.CommentNSID,
450
+
Repo: user.Did,
446
451
Rkey: comment.Rkey,
447
452
Record: &lexutil.LexiconTypeDecoder{
448
453
Val: &record,
···
468
473
}
469
474
defer tx.Rollback()
470
475
471
-
commentId, err := db.AddIssueComment(tx, comment)
476
+
err = db.PutComment(tx, &comment)
472
477
if err != nil {
473
478
l.Error("failed to create comment", "err", err)
474
479
rp.pages.Notice(w, "issue-comment", "Failed to create comment.")
···
484
489
// reset atUri to make rollback a no-op
485
490
atUri = ""
486
491
487
-
// notify about the new comment
488
-
comment.Id = commentId
489
-
490
-
rp.notifier.NewIssueComment(r.Context(), &comment, mentions)
492
+
rp.notifier.NewComment(r.Context(), &comment)
491
493
492
494
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
493
-
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d#comment-%d", ownerSlashRepo, issue.IssueId, commentId))
495
+
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d#comment-%d", ownerSlashRepo, issue.IssueId, comment.Id))
494
496
}
495
497
496
498
func (rp *Issues) IssueComment(w http.ResponseWriter, r *http.Request) {
···
505
507
}
506
508
507
509
commentId := chi.URLParam(r, "commentId")
508
-
comments, err := db.GetIssueComments(
510
+
comments, err := db.GetComments(
509
511
rp.db,
510
512
orm.FilterEq("id", commentId),
511
513
)
···
541
543
}
542
544
543
545
commentId := chi.URLParam(r, "commentId")
544
-
comments, err := db.GetIssueComments(
546
+
comments, err := db.GetComments(
545
547
rp.db,
546
548
orm.FilterEq("id", commentId),
547
549
)
···
557
559
}
558
560
comment := comments[0]
559
561
560
-
if comment.Did != user.Did {
562
+
if comment.Did.String() != user.Did {
561
563
l.Error("unauthorized comment edit", "expectedDid", comment.Did, "gotDid", user.Did)
562
564
http.Error(w, "you are not the author of this comment", http.StatusUnauthorized)
563
565
return
···
597
599
}
598
600
defer tx.Rollback()
599
601
600
-
_, err = db.AddIssueComment(tx, newComment)
602
+
err = db.PutComment(tx, &newComment)
601
603
if err != nil {
602
604
l.Error("failed to perferom update-description query", "err", err)
603
605
rp.pages.Notice(w, "repo-notice", "Failed to update description, try again later.")
···
608
610
// rkey is optional, it was introduced later
609
611
if newComment.Rkey != "" {
610
612
// update the record on pds
611
-
ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoIssueCommentNSID, user.Did, comment.Rkey)
613
+
ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.CommentNSID, user.Did, comment.Rkey)
612
614
if err != nil {
613
615
l.Error("failed to get record", "err", err, "did", newComment.Did, "rkey", newComment.Rkey)
614
616
rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "Failed to update description, no record found on PDS.")
···
616
618
}
617
619
618
620
_, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
619
-
Collection: tangled.RepoIssueCommentNSID,
621
+
Collection: tangled.CommentNSID,
620
622
Repo: user.Did,
621
623
Rkey: newComment.Rkey,
622
624
SwapRecord: ex.Cid,
···
651
653
}
652
654
653
655
commentId := chi.URLParam(r, "commentId")
654
-
comments, err := db.GetIssueComments(
656
+
comments, err := db.GetComments(
655
657
rp.db,
656
658
orm.FilterEq("id", commentId),
657
659
)
···
687
689
}
688
690
689
691
commentId := chi.URLParam(r, "commentId")
690
-
comments, err := db.GetIssueComments(
692
+
comments, err := db.GetComments(
691
693
rp.db,
692
694
orm.FilterEq("id", commentId),
693
695
)
···
723
725
}
724
726
725
727
commentId := chi.URLParam(r, "commentId")
726
-
comments, err := db.GetIssueComments(
728
+
comments, err := db.GetComments(
727
729
rp.db,
728
730
orm.FilterEq("id", commentId),
729
731
)
···
739
741
}
740
742
comment := comments[0]
741
743
742
-
if comment.Did != user.Did {
744
+
if comment.Did.String() != user.Did {
743
745
l.Error("unauthorized action", "expectedDid", comment.Did, "gotDid", user.Did)
744
746
http.Error(w, "you are not the author of this comment", http.StatusUnauthorized)
745
747
return
···
752
754
753
755
// optimistic deletion
754
756
deleted := time.Now()
755
-
err = db.DeleteIssueComments(rp.db, orm.FilterEq("id", comment.Id))
757
+
err = db.DeleteComments(rp.db, orm.FilterEq("id", comment.Id))
756
758
if err != nil {
757
759
l.Error("failed to delete comment", "err", err)
758
760
rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "failed to delete comment")
···
768
770
return
769
771
}
770
772
_, err = comatproto.RepoDeleteRecord(r.Context(), client, &comatproto.RepoDeleteRecord_Input{
771
-
Collection: tangled.RepoIssueCommentNSID,
773
+
Collection: tangled.CommentNSID,
772
774
Repo: user.Did,
773
775
Rkey: comment.Rkey,
774
776
})
-5
appview/knots/knots.go
-5
appview/knots/knots.go
···
666
666
k.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.")
667
667
return
668
668
}
669
-
if memberId.Handle.IsInvalidHandle() {
670
-
l.Error("failed to resolve member identity to handle")
671
-
k.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.")
672
-
return
673
-
}
674
669
675
670
// remove from enforcer
676
671
err = k.Enforcer.RemoveKnotMember(domain, memberId.DID.String())
+4
appview/middleware/middleware.go
+4
appview/middleware/middleware.go
···
223
223
)
224
224
if err != nil {
225
225
log.Println("failed to resolve repo", "err", err)
226
+
w.WriteHeader(http.StatusNotFound)
226
227
mw.pages.ErrorKnot404(w)
227
228
return
228
229
}
···
240
241
f, err := mw.repoResolver.Resolve(r)
241
242
if err != nil {
242
243
log.Println("failed to fully resolve repo", err)
244
+
w.WriteHeader(http.StatusNotFound)
243
245
mw.pages.ErrorKnot404(w)
244
246
return
245
247
}
···
288
290
f, err := mw.repoResolver.Resolve(r)
289
291
if err != nil {
290
292
log.Println("failed to fully resolve repo", err)
293
+
w.WriteHeader(http.StatusNotFound)
291
294
mw.pages.ErrorKnot404(w)
292
295
return
293
296
}
···
324
327
f, err := mw.repoResolver.Resolve(r)
325
328
if err != nil {
326
329
log.Println("failed to fully resolve repo", err)
330
+
w.WriteHeader(http.StatusNotFound)
327
331
mw.pages.ErrorKnot404(w)
328
332
return
329
333
}
+117
appview/models/comment.go
+117
appview/models/comment.go
···
1
+
package models
2
+
3
+
import (
4
+
"fmt"
5
+
"strings"
6
+
"time"
7
+
8
+
"github.com/bluesky-social/indigo/atproto/syntax"
9
+
"tangled.org/core/api/tangled"
10
+
)
11
+
12
+
type Comment struct {
13
+
Id int64
14
+
Did syntax.DID
15
+
Rkey string
16
+
Subject syntax.ATURI
17
+
ReplyTo *syntax.ATURI
18
+
Body string
19
+
Created time.Time
20
+
Edited *time.Time
21
+
Deleted *time.Time
22
+
Mentions []syntax.DID
23
+
References []syntax.ATURI
24
+
PullSubmissionId *int
25
+
}
26
+
27
+
func (c *Comment) AtUri() syntax.ATURI {
28
+
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", c.Did, tangled.CommentNSID, c.Rkey))
29
+
}
30
+
31
+
func (c *Comment) AsRecord() tangled.Comment {
32
+
mentions := make([]string, len(c.Mentions))
33
+
for i, did := range c.Mentions {
34
+
mentions[i] = string(did)
35
+
}
36
+
references := make([]string, len(c.References))
37
+
for i, uri := range c.References {
38
+
references[i] = string(uri)
39
+
}
40
+
var replyTo *string
41
+
if c.ReplyTo != nil {
42
+
replyToStr := c.ReplyTo.String()
43
+
replyTo = &replyToStr
44
+
}
45
+
return tangled.Comment{
46
+
Subject: c.Subject.String(),
47
+
Body: c.Body,
48
+
CreatedAt: c.Created.Format(time.RFC3339),
49
+
ReplyTo: replyTo,
50
+
Mentions: mentions,
51
+
References: references,
52
+
}
53
+
}
54
+
55
+
func (c *Comment) IsTopLevel() bool {
56
+
return c.ReplyTo == nil
57
+
}
58
+
59
+
func (c *Comment) IsReply() bool {
60
+
return c.ReplyTo != nil
61
+
}
62
+
63
+
func (c *Comment) Validate() error {
64
+
// TODO: sanitize the body and then trim space
65
+
if sb := strings.TrimSpace(c.Body); sb == "" {
66
+
return fmt.Errorf("body is empty after HTML sanitization")
67
+
}
68
+
69
+
// if it's for PR, PullSubmissionId should not be nil
70
+
if c.Subject.Collection().String() == tangled.RepoPullNSID {
71
+
if c.PullSubmissionId == nil {
72
+
return fmt.Errorf("PullSubmissionId should not be nil")
73
+
}
74
+
}
75
+
return nil
76
+
}
77
+
78
+
func CommentFromRecord(did, rkey string, record tangled.Comment) (*Comment, error) {
79
+
created, err := time.Parse(time.RFC3339, record.CreatedAt)
80
+
if err != nil {
81
+
created = time.Now()
82
+
}
83
+
84
+
ownerDid := did
85
+
86
+
if _, err = syntax.ParseATURI(record.Subject); err != nil {
87
+
return nil, err
88
+
}
89
+
90
+
i := record
91
+
mentions := make([]syntax.DID, len(record.Mentions))
92
+
for i, did := range record.Mentions {
93
+
mentions[i] = syntax.DID(did)
94
+
}
95
+
references := make([]syntax.ATURI, len(record.References))
96
+
for i, uri := range i.References {
97
+
references[i] = syntax.ATURI(uri)
98
+
}
99
+
var replyTo *syntax.ATURI
100
+
if record.ReplyTo != nil {
101
+
replyToAtUri := syntax.ATURI(*record.ReplyTo)
102
+
replyTo = &replyToAtUri
103
+
}
104
+
105
+
comment := Comment{
106
+
Did: syntax.DID(ownerDid),
107
+
Rkey: rkey,
108
+
Body: record.Body,
109
+
Subject: syntax.ATURI(record.Subject),
110
+
ReplyTo: replyTo,
111
+
Created: created,
112
+
Mentions: mentions,
113
+
References: references,
114
+
}
115
+
116
+
return &comment, nil
117
+
}
+8
-89
appview/models/issue.go
+8
-89
appview/models/issue.go
···
26
26
27
27
// optionally, populate this when querying for reverse mappings
28
28
// like comment counts, parent repo etc.
29
-
Comments []IssueComment
29
+
Comments []Comment
30
30
Labels LabelState
31
31
Repo *Repo
32
32
}
···
62
62
}
63
63
64
64
type CommentListItem struct {
65
-
Self *IssueComment
66
-
Replies []*IssueComment
65
+
Self *Comment
66
+
Replies []*Comment
67
67
}
68
68
69
69
func (it *CommentListItem) Participants() []syntax.DID {
···
88
88
89
89
func (i *Issue) CommentList() []CommentListItem {
90
90
// Create a map to quickly find comments by their aturi
91
-
toplevel := make(map[string]*CommentListItem)
92
-
var replies []*IssueComment
91
+
toplevel := make(map[syntax.ATURI]*CommentListItem)
92
+
var replies []*Comment
93
93
94
94
// collect top level comments into the map
95
95
for _, comment := range i.Comments {
96
96
if comment.IsTopLevel() {
97
-
toplevel[comment.AtUri().String()] = &CommentListItem{
97
+
toplevel[comment.AtUri()] = &CommentListItem{
98
98
Self: &comment,
99
99
}
100
100
} else {
···
115
115
}
116
116
117
117
// sort everything
118
-
sortFunc := func(a, b *IssueComment) bool {
118
+
sortFunc := func(a, b *Comment) bool {
119
119
return a.Created.Before(b.Created)
120
120
}
121
121
sort.Slice(listing, func(i, j int) bool {
···
144
144
addParticipant(i.Did)
145
145
146
146
for _, c := range i.Comments {
147
-
addParticipant(c.Did)
147
+
addParticipant(c.Did.String())
148
148
}
149
149
150
150
return participants
···
171
171
Open: true, // new issues are open by default
172
172
}
173
173
}
174
-
175
-
type IssueComment struct {
176
-
Id int64
177
-
Did string
178
-
Rkey string
179
-
IssueAt string
180
-
ReplyTo *string
181
-
Body string
182
-
Created time.Time
183
-
Edited *time.Time
184
-
Deleted *time.Time
185
-
Mentions []syntax.DID
186
-
References []syntax.ATURI
187
-
}
188
-
189
-
func (i *IssueComment) AtUri() syntax.ATURI {
190
-
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.Did, tangled.RepoIssueCommentNSID, i.Rkey))
191
-
}
192
-
193
-
func (i *IssueComment) AsRecord() tangled.RepoIssueComment {
194
-
mentions := make([]string, len(i.Mentions))
195
-
for i, did := range i.Mentions {
196
-
mentions[i] = string(did)
197
-
}
198
-
references := make([]string, len(i.References))
199
-
for i, uri := range i.References {
200
-
references[i] = string(uri)
201
-
}
202
-
return tangled.RepoIssueComment{
203
-
Body: i.Body,
204
-
Issue: i.IssueAt,
205
-
CreatedAt: i.Created.Format(time.RFC3339),
206
-
ReplyTo: i.ReplyTo,
207
-
Mentions: mentions,
208
-
References: references,
209
-
}
210
-
}
211
-
212
-
func (i *IssueComment) IsTopLevel() bool {
213
-
return i.ReplyTo == nil
214
-
}
215
-
216
-
func (i *IssueComment) IsReply() bool {
217
-
return i.ReplyTo != nil
218
-
}
219
-
220
-
func IssueCommentFromRecord(did, rkey string, record tangled.RepoIssueComment) (*IssueComment, error) {
221
-
created, err := time.Parse(time.RFC3339, record.CreatedAt)
222
-
if err != nil {
223
-
created = time.Now()
224
-
}
225
-
226
-
ownerDid := did
227
-
228
-
if _, err = syntax.ParseATURI(record.Issue); err != nil {
229
-
return nil, err
230
-
}
231
-
232
-
i := record
233
-
mentions := make([]syntax.DID, len(record.Mentions))
234
-
for i, did := range record.Mentions {
235
-
mentions[i] = syntax.DID(did)
236
-
}
237
-
references := make([]syntax.ATURI, len(record.References))
238
-
for i, uri := range i.References {
239
-
references[i] = syntax.ATURI(uri)
240
-
}
241
-
242
-
comment := IssueComment{
243
-
Did: ownerDid,
244
-
Rkey: rkey,
245
-
Body: record.Body,
246
-
IssueAt: record.Issue,
247
-
ReplyTo: record.ReplyTo,
248
-
Created: created,
249
-
Mentions: mentions,
250
-
References: references,
251
-
}
252
-
253
-
return &comment, nil
254
-
}
+3
-47
appview/models/pull.go
+3
-47
appview/models/pull.go
···
83
83
Repo *Repo
84
84
}
85
85
86
+
// NOTE: This method does not include patch blob in returned atproto record
86
87
func (p Pull) AsRecord() tangled.RepoPull {
87
88
var source *tangled.RepoPull_Source
88
89
if p.PullSource != nil {
···
113
114
Repo: p.RepoAt.String(),
114
115
Branch: p.TargetBranch,
115
116
},
116
-
Patch: p.LatestPatch(),
117
117
Source: source,
118
118
}
119
119
return record
···
138
138
RoundNumber int
139
139
Patch string
140
140
Combined string
141
-
Comments []PullComment
141
+
Comments []Comment
142
142
SourceRev string // include the rev that was used to create this submission: only for branch/fork PRs
143
143
144
144
// meta
145
145
Created time.Time
146
146
}
147
147
148
-
type PullComment struct {
149
-
// ids
150
-
ID int
151
-
PullId int
152
-
SubmissionId int
153
-
154
-
// at ids
155
-
RepoAt string
156
-
OwnerDid string
157
-
CommentAt string
158
-
159
-
// content
160
-
Body string
161
-
162
-
// meta
163
-
Mentions []syntax.DID
164
-
References []syntax.ATURI
165
-
166
-
// meta
167
-
Created time.Time
168
-
}
169
-
170
-
func (p *PullComment) AtUri() syntax.ATURI {
171
-
return syntax.ATURI(p.CommentAt)
172
-
}
173
-
174
-
// func (p *PullComment) AsRecord() tangled.RepoPullComment {
175
-
// mentions := make([]string, len(p.Mentions))
176
-
// for i, did := range p.Mentions {
177
-
// mentions[i] = string(did)
178
-
// }
179
-
// references := make([]string, len(p.References))
180
-
// for i, uri := range p.References {
181
-
// references[i] = string(uri)
182
-
// }
183
-
// return tangled.RepoPullComment{
184
-
// Pull: p.PullAt,
185
-
// Body: p.Body,
186
-
// Mentions: mentions,
187
-
// References: references,
188
-
// CreatedAt: p.Created.Format(time.RFC3339),
189
-
// }
190
-
// }
191
-
192
148
func (p *Pull) LastRoundNumber() int {
193
149
return len(p.Submissions) - 1
194
150
}
···
289
245
addParticipant(s.PullAt.Authority().String())
290
246
291
247
for _, c := range s.Comments {
292
-
addParticipant(c.OwnerDid)
248
+
addParticipant(c.Did.String())
293
249
}
294
250
295
251
return participants
+111
-113
appview/notify/db/db.go
+111
-113
appview/notify/db/db.go
···
74
74
// no-op
75
75
}
76
76
77
-
func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {
78
-
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt()))
77
+
func (n *databaseNotifier) NewComment(ctx context.Context, comment *models.Comment) {
78
+
var (
79
+
// built the recipients list:
80
+
// - the owner of the repo
81
+
// - | if the comment is a reply -> everybody on that thread
82
+
// | if the comment is a top level -> just the issue owner
83
+
// - remove mentioned users from the recipients list
84
+
recipients = sets.New[syntax.DID]()
85
+
entityType string
86
+
entityId string
87
+
repoId *int64
88
+
issueId *int64
89
+
pullId *int64
90
+
)
91
+
92
+
subjectDid, err := comment.Subject.Authority().AsDID()
79
93
if err != nil {
80
-
log.Printf("failed to fetch collaborators: %v", err)
94
+
log.Printf("NewComment: expected did based at-uri for comment.subject")
81
95
return
82
96
}
97
+
switch comment.Subject.Collection() {
98
+
case tangled.RepoIssueNSID:
99
+
issues, err := db.GetIssues(
100
+
n.db,
101
+
orm.FilterEq("did", subjectDid),
102
+
orm.FilterEq("rkey", comment.Subject.RecordKey()),
103
+
)
104
+
if err != nil {
105
+
log.Printf("NewComment: failed to get issues: %v", err)
106
+
return
107
+
}
108
+
if len(issues) == 0 {
109
+
log.Printf("NewComment: no issue found for %s", comment.Subject)
110
+
return
111
+
}
112
+
issue := issues[0]
83
113
84
-
// build the recipients list
85
-
// - owner of the repo
86
-
// - collaborators in the repo
87
-
// - remove users already mentioned
88
-
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
89
-
for _, c := range collaborators {
90
-
recipients.Insert(c.SubjectDid)
114
+
recipients.Insert(syntax.DID(issue.Repo.Did))
115
+
if comment.IsReply() {
116
+
// if this comment is a reply, then notify everybody in that thread
117
+
parentAtUri := *comment.ReplyTo
118
+
119
+
// find the parent thread, and add all DIDs from here to the recipient list
120
+
for _, t := range issue.CommentList() {
121
+
if t.Self.AtUri() == parentAtUri {
122
+
for _, p := range t.Participants() {
123
+
recipients.Insert(p)
124
+
}
125
+
}
126
+
}
127
+
} else {
128
+
// not a reply, notify just the issue author
129
+
recipients.Insert(syntax.DID(issue.Did))
130
+
}
131
+
132
+
entityType = "issue"
133
+
entityId = issue.AtUri().String()
134
+
repoId = &issue.Repo.Id
135
+
issueId = &issue.Id
136
+
case tangled.RepoPullNSID:
137
+
pulls, err := db.GetPullsWithLimit(
138
+
n.db,
139
+
1,
140
+
orm.FilterEq("owner_did", subjectDid),
141
+
orm.FilterEq("rkey", comment.Subject.RecordKey()),
142
+
)
143
+
if err != nil {
144
+
log.Printf("NewComment: failed to get pulls: %v", err)
145
+
return
146
+
}
147
+
if len(pulls) == 0 {
148
+
log.Printf("NewComment: no pull found for %s", comment.Subject)
149
+
return
150
+
}
151
+
pull := pulls[0]
152
+
153
+
pull.Repo, err = db.GetRepo(n.db, orm.FilterEq("at_uri", pull.RepoAt))
154
+
if err != nil {
155
+
log.Printf("NewComment: failed to get repos: %v", err)
156
+
return
157
+
}
158
+
159
+
recipients.Insert(syntax.DID(pull.Repo.Did))
160
+
for _, p := range pull.Participants() {
161
+
recipients.Insert(syntax.DID(p))
162
+
}
163
+
164
+
entityType = "pull"
165
+
entityId = pull.AtUri().String()
166
+
repoId = &pull.Repo.Id
167
+
p := int64(pull.ID)
168
+
pullId = &p
169
+
default:
170
+
return // no-op
91
171
}
92
-
for _, m := range mentions {
172
+
173
+
for _, m := range comment.Mentions {
93
174
recipients.Remove(m)
94
175
}
95
176
96
-
actorDid := syntax.DID(issue.Did)
97
-
entityType := "issue"
98
-
entityId := issue.AtUri().String()
99
-
repoId := &issue.Repo.Id
100
-
issueId := &issue.Id
101
-
var pullId *int64
102
-
103
177
n.notifyEvent(
104
-
actorDid,
178
+
comment.Did,
105
179
recipients,
106
-
models.NotificationTypeIssueCreated,
180
+
models.NotificationTypeIssueCommented,
107
181
entityType,
108
182
entityId,
109
183
repoId,
···
111
185
pullId,
112
186
)
113
187
n.notifyEvent(
114
-
actorDid,
115
-
sets.Collect(slices.Values(mentions)),
188
+
comment.Did,
189
+
sets.Collect(slices.Values(comment.Mentions)),
116
190
models.NotificationTypeUserMentioned,
117
191
entityType,
118
192
entityId,
···
122
196
)
123
197
}
124
198
125
-
func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) {
126
-
issues, err := db.GetIssues(n.db, orm.FilterEq("at_uri", comment.IssueAt))
199
+
func (n *databaseNotifier) DeleteComment(ctx context.Context, comment *models.Comment) {
200
+
// no-op
201
+
}
202
+
203
+
func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {
204
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt()))
127
205
if err != nil {
128
-
log.Printf("NewIssueComment: failed to get issues: %v", err)
206
+
log.Printf("failed to fetch collaborators: %v", err)
129
207
return
130
208
}
131
-
if len(issues) == 0 {
132
-
log.Printf("NewIssueComment: no issue found for %s", comment.IssueAt)
133
-
return
134
-
}
135
-
issue := issues[0]
136
209
137
-
// built the recipients list:
138
-
// - the owner of the repo
139
-
// - | if the comment is a reply -> everybody on that thread
140
-
// | if the comment is a top level -> just the issue owner
141
-
// - remove mentioned users from the recipients list
210
+
// build the recipients list
211
+
// - owner of the repo
212
+
// - collaborators in the repo
213
+
// - remove users already mentioned
142
214
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
143
-
144
-
if comment.IsReply() {
145
-
// if this comment is a reply, then notify everybody in that thread
146
-
parentAtUri := *comment.ReplyTo
147
-
148
-
// find the parent thread, and add all DIDs from here to the recipient list
149
-
for _, t := range issue.CommentList() {
150
-
if t.Self.AtUri().String() == parentAtUri {
151
-
for _, p := range t.Participants() {
152
-
recipients.Insert(p)
153
-
}
154
-
}
155
-
}
156
-
} else {
157
-
// not a reply, notify just the issue author
158
-
recipients.Insert(syntax.DID(issue.Did))
215
+
for _, c := range collaborators {
216
+
recipients.Insert(c.SubjectDid)
159
217
}
160
-
161
218
for _, m := range mentions {
162
219
recipients.Remove(m)
163
220
}
164
221
165
-
actorDid := syntax.DID(comment.Did)
222
+
actorDid := syntax.DID(issue.Did)
166
223
entityType := "issue"
167
224
entityId := issue.AtUri().String()
168
225
repoId := &issue.Repo.Id
···
172
229
n.notifyEvent(
173
230
actorDid,
174
231
recipients,
175
-
models.NotificationTypeIssueCommented,
232
+
models.NotificationTypeIssueCreated,
176
233
entityType,
177
234
entityId,
178
235
repoId,
···
252
309
actorDid,
253
310
recipients,
254
311
eventType,
255
-
entityType,
256
-
entityId,
257
-
repoId,
258
-
issueId,
259
-
pullId,
260
-
)
261
-
}
262
-
263
-
func (n *databaseNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) {
264
-
pull, err := db.GetPull(n.db,
265
-
syntax.ATURI(comment.RepoAt),
266
-
comment.PullId,
267
-
)
268
-
if err != nil {
269
-
log.Printf("NewPullComment: failed to get pulls: %v", err)
270
-
return
271
-
}
272
-
273
-
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", comment.RepoAt))
274
-
if err != nil {
275
-
log.Printf("NewPullComment: failed to get repos: %v", err)
276
-
return
277
-
}
278
-
279
-
// build up the recipients list:
280
-
// - repo owner
281
-
// - all pull participants
282
-
// - remove those already mentioned
283
-
recipients := sets.Singleton(syntax.DID(repo.Did))
284
-
for _, p := range pull.Participants() {
285
-
recipients.Insert(syntax.DID(p))
286
-
}
287
-
for _, m := range mentions {
288
-
recipients.Remove(m)
289
-
}
290
-
291
-
actorDid := syntax.DID(comment.OwnerDid)
292
-
eventType := models.NotificationTypePullCommented
293
-
entityType := "pull"
294
-
entityId := pull.AtUri().String()
295
-
repoId := &repo.Id
296
-
var issueId *int64
297
-
p := int64(pull.ID)
298
-
pullId := &p
299
-
300
-
n.notifyEvent(
301
-
actorDid,
302
-
recipients,
303
-
eventType,
304
-
entityType,
305
-
entityId,
306
-
repoId,
307
-
issueId,
308
-
pullId,
309
-
)
310
-
n.notifyEvent(
311
-
actorDid,
312
-
sets.Collect(slices.Values(mentions)),
313
-
models.NotificationTypeUserMentioned,
314
312
entityType,
315
313
entityId,
316
314
repoId,
+8
-8
appview/notify/merged_notifier.go
+8
-8
appview/notify/merged_notifier.go
···
53
53
m.fanout("DeleteStar", ctx, star)
54
54
}
55
55
56
-
func (m *mergedNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {
57
-
m.fanout("NewIssue", ctx, issue, mentions)
56
+
func (m *mergedNotifier) NewComment(ctx context.Context, comment *models.Comment) {
57
+
m.fanout("NewComment", ctx, comment)
58
58
}
59
59
60
-
func (m *mergedNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) {
61
-
m.fanout("NewIssueComment", ctx, comment, mentions)
60
+
func (m *mergedNotifier) DeleteComment(ctx context.Context, comment *models.Comment) {
61
+
m.fanout("DeleteComment", ctx, comment)
62
+
}
63
+
64
+
func (m *mergedNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {
65
+
m.fanout("NewIssue", ctx, issue, mentions)
62
66
}
63
67
64
68
func (m *mergedNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) {
···
79
83
80
84
func (m *mergedNotifier) NewPull(ctx context.Context, pull *models.Pull) {
81
85
m.fanout("NewPull", ctx, pull)
82
-
}
83
-
84
-
func (m *mergedNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) {
85
-
m.fanout("NewPullComment", ctx, comment, mentions)
86
86
}
87
87
88
88
func (m *mergedNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {
+7
-7
appview/notify/notifier.go
+7
-7
appview/notify/notifier.go
···
13
13
NewStar(ctx context.Context, star *models.Star)
14
14
DeleteStar(ctx context.Context, star *models.Star)
15
15
16
+
NewComment(ctx context.Context, comment *models.Comment)
17
+
DeleteComment(ctx context.Context, comment *models.Comment)
18
+
16
19
NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID)
17
-
NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID)
18
20
NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue)
19
21
DeleteIssue(ctx context.Context, issue *models.Issue)
20
22
···
22
24
DeleteFollow(ctx context.Context, follow *models.Follow)
23
25
24
26
NewPull(ctx context.Context, pull *models.Pull)
25
-
NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID)
26
27
NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull)
27
28
28
29
UpdateProfile(ctx context.Context, profile *models.Profile)
···
42
43
func (m *BaseNotifier) NewStar(ctx context.Context, star *models.Star) {}
43
44
func (m *BaseNotifier) DeleteStar(ctx context.Context, star *models.Star) {}
44
45
46
+
func (m *BaseNotifier) NewComment(ctx context.Context, comment *models.Comment) {}
47
+
func (m *BaseNotifier) DeleteComment(ctx context.Context, comment *models.Comment) {}
48
+
45
49
func (m *BaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {}
46
-
func (m *BaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) {
47
-
}
48
50
func (m *BaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) {}
49
51
func (m *BaseNotifier) DeleteIssue(ctx context.Context, issue *models.Issue) {}
50
52
51
53
func (m *BaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) {}
52
54
func (m *BaseNotifier) DeleteFollow(ctx context.Context, follow *models.Follow) {}
53
55
54
-
func (m *BaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {}
55
-
func (m *BaseNotifier) NewPullComment(ctx context.Context, models *models.PullComment, mentions []syntax.DID) {
56
-
}
56
+
func (m *BaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {}
57
57
func (m *BaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {}
58
58
59
59
func (m *BaseNotifier) UpdateProfile(ctx context.Context, profile *models.Profile) {}
+5
-20
appview/notify/posthog/notifier.go
+5
-20
appview/notify/posthog/notifier.go
···
86
86
}
87
87
}
88
88
89
-
func (n *posthogNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) {
90
-
err := n.client.Enqueue(posthog.Capture{
91
-
DistinctId: comment.OwnerDid,
92
-
Event: "new_pull_comment",
93
-
Properties: posthog.Properties{
94
-
"repo_at": comment.RepoAt,
95
-
"pull_id": comment.PullId,
96
-
"mentions": mentions,
97
-
},
98
-
})
99
-
if err != nil {
100
-
log.Println("failed to enqueue posthog event:", err)
101
-
}
102
-
}
103
-
104
89
func (n *posthogNotifier) NewPullClosed(ctx context.Context, pull *models.Pull) {
105
90
err := n.client.Enqueue(posthog.Capture{
106
91
DistinctId: pull.OwnerDid,
···
180
165
}
181
166
}
182
167
183
-
func (n *posthogNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) {
168
+
func (n *posthogNotifier) NewComment(ctx context.Context, comment *models.Comment) {
184
169
err := n.client.Enqueue(posthog.Capture{
185
-
DistinctId: comment.Did,
186
-
Event: "new_issue_comment",
170
+
DistinctId: comment.Did.String(),
171
+
Event: "new_comment",
187
172
Properties: posthog.Properties{
188
-
"issue_at": comment.IssueAt,
189
-
"mentions": mentions,
173
+
"subject_at": comment.Subject,
174
+
"mentions": comment.Mentions,
190
175
},
191
176
})
192
177
if err != nil {
+13
-3
appview/pages/markup/extension/atlink.go
+13
-3
appview/pages/markup/extension/atlink.go
···
35
35
return KindAt
36
36
}
37
37
38
-
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`)
38
+
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`)
39
+
var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`)
39
40
40
41
type atParser struct{}
41
42
···
55
56
if m == nil {
56
57
return nil
57
58
}
59
+
60
+
// Check for all links in the markdown to see if the handle found is inside one
61
+
linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1)
62
+
for _, linkMatch := range linksIndexes {
63
+
if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] {
64
+
return nil
65
+
}
66
+
}
67
+
58
68
atSegment := text.NewSegment(segment.Start, segment.Start+m[1])
59
69
block.Advance(m[1])
60
70
node := &AtNode{}
···
87
97
88
98
func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
89
99
if entering {
90
-
w.WriteString(`<a href="/@`)
100
+
w.WriteString(`<a href="/`)
91
101
w.WriteString(n.(*AtNode).Handle)
92
-
w.WriteString(`" class="mention font-bold">`)
102
+
w.WriteString(`" class="mention">`)
93
103
} else {
94
104
w.WriteString("</a>")
95
105
}
+121
appview/pages/markup/markdown_test.go
+121
appview/pages/markup/markdown_test.go
···
1
+
package markup
2
+
3
+
import (
4
+
"bytes"
5
+
"testing"
6
+
)
7
+
8
+
func TestAtExtension_Rendering(t *testing.T) {
9
+
tests := []struct {
10
+
name string
11
+
markdown string
12
+
expected string
13
+
}{
14
+
{
15
+
name: "renders simple at mention",
16
+
markdown: "Hello @user.tngl.sh!",
17
+
expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`,
18
+
},
19
+
{
20
+
name: "renders multiple at mentions",
21
+
markdown: "Hi @alice.tngl.sh and @bob.example.com",
22
+
expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`,
23
+
},
24
+
{
25
+
name: "renders at mention in parentheses",
26
+
markdown: "Check this out (@user.tngl.sh)",
27
+
expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`,
28
+
},
29
+
{
30
+
name: "does not render email",
31
+
markdown: "Contact me at test@example.com",
32
+
expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`,
33
+
},
34
+
{
35
+
name: "renders at mention with hyphen",
36
+
markdown: "Follow @user-name.tngl.sh",
37
+
expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`,
38
+
},
39
+
{
40
+
name: "renders at mention with numbers",
41
+
markdown: "@user123.test456.social",
42
+
expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`,
43
+
},
44
+
{
45
+
name: "at mention at start of line",
46
+
markdown: "@user.tngl.sh is cool",
47
+
expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`,
48
+
},
49
+
}
50
+
51
+
for _, tt := range tests {
52
+
t.Run(tt.name, func(t *testing.T) {
53
+
md := NewMarkdown()
54
+
55
+
var buf bytes.Buffer
56
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
57
+
t.Fatalf("failed to convert markdown: %v", err)
58
+
}
59
+
60
+
result := buf.String()
61
+
if result != tt.expected+"\n" {
62
+
t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result)
63
+
}
64
+
})
65
+
}
66
+
}
67
+
68
+
func TestAtExtension_WithOtherMarkdown(t *testing.T) {
69
+
tests := []struct {
70
+
name string
71
+
markdown string
72
+
contains string
73
+
}{
74
+
{
75
+
name: "at mention with bold",
76
+
markdown: "**Hello @user.tngl.sh**",
77
+
contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`,
78
+
},
79
+
{
80
+
name: "at mention with italic",
81
+
markdown: "*Check @user.tngl.sh*",
82
+
contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`,
83
+
},
84
+
{
85
+
name: "at mention in list",
86
+
markdown: "- Item 1\n- @user.tngl.sh\n- Item 3",
87
+
contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`,
88
+
},
89
+
{
90
+
name: "at mention in link",
91
+
markdown: "[@regnault.dev](https://regnault.dev)",
92
+
contains: `<a href="https://regnault.dev">@regnault.dev</a>`,
93
+
},
94
+
{
95
+
name: "at mention in link again",
96
+
markdown: "[check out @regnault.dev](https://regnault.dev)",
97
+
contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`,
98
+
},
99
+
{
100
+
name: "at mention in link again, multiline",
101
+
markdown: "[\ncheck out @regnault.dev](https://regnault.dev)",
102
+
contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>",
103
+
},
104
+
}
105
+
106
+
for _, tt := range tests {
107
+
t.Run(tt.name, func(t *testing.T) {
108
+
md := NewMarkdown()
109
+
110
+
var buf bytes.Buffer
111
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
112
+
t.Fatalf("failed to convert markdown: %v", err)
113
+
}
114
+
115
+
result := buf.String()
116
+
if !bytes.Contains([]byte(result), []byte(tt.contains)) {
117
+
t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result)
118
+
}
119
+
})
120
+
}
121
+
}
+4
-4
appview/pages/pages.go
+4
-4
appview/pages/pages.go
···
988
988
LoggedInUser *oauth.User
989
989
RepoInfo repoinfo.RepoInfo
990
990
Issue *models.Issue
991
-
Comment *models.IssueComment
991
+
Comment *models.Comment
992
992
}
993
993
994
994
func (p *Pages) EditIssueCommentFragment(w io.Writer, params EditIssueCommentParams) error {
···
999
999
LoggedInUser *oauth.User
1000
1000
RepoInfo repoinfo.RepoInfo
1001
1001
Issue *models.Issue
1002
-
Comment *models.IssueComment
1002
+
Comment *models.Comment
1003
1003
}
1004
1004
1005
1005
func (p *Pages) ReplyIssueCommentPlaceholderFragment(w io.Writer, params ReplyIssueCommentPlaceholderParams) error {
···
1010
1010
LoggedInUser *oauth.User
1011
1011
RepoInfo repoinfo.RepoInfo
1012
1012
Issue *models.Issue
1013
-
Comment *models.IssueComment
1013
+
Comment *models.Comment
1014
1014
}
1015
1015
1016
1016
func (p *Pages) ReplyIssueCommentFragment(w io.Writer, params ReplyIssueCommentParams) error {
···
1021
1021
LoggedInUser *oauth.User
1022
1022
RepoInfo repoinfo.RepoInfo
1023
1023
Issue *models.Issue
1024
-
Comment *models.IssueComment
1024
+
Comment *models.Comment
1025
1025
}
1026
1026
1027
1027
func (p *Pages) IssueCommentBodyFragment(w io.Writer, params IssueCommentBodyParams) error {
+1
-1
appview/pages/templates/knots/index.html
+1
-1
appview/pages/templates/knots/index.html
···
105
105
{{ define "docsButton" }}
106
106
<a
107
107
class="btn flex items-center gap-2"
108
-
href="https://tangled.org/@tangled.org/core/blob/master/docs/knot-hosting.md">
108
+
href="https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide">
109
109
{{ i "book" "size-4" }}
110
110
docs
111
111
</a>
+1
-1
appview/pages/templates/repo/fragments/diff.html
+1
-1
appview/pages/templates/repo/fragments/diff.html
···
17
17
{{ else }}
18
18
{{ range $idx, $hunk := $diff }}
19
19
{{ with $hunk }}
20
-
<details open id="file-{{ .Name.New }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}">
20
+
<details open id="file-{{ .Id }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}">
21
21
<summary class="list-none cursor-pointer sticky top-0">
22
22
<div id="diff-file-header" class="rounded cursor-pointer bg-white dark:bg-gray-800 flex justify-between">
23
23
<div id="left-side-items" class="p-2 flex gap-2 items-center overflow-x-auto">
+35
-35
appview/pages/templates/repo/fragments/splitDiff.html
+35
-35
appview/pages/templates/repo/fragments/splitDiff.html
···
3
3
{{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800" -}}
4
4
{{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}}
5
5
{{- $lineNrSepStyle := "pr-2 border-r border-gray-200 dark:border-gray-700" -}}
6
-
{{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
6
+
{{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
7
7
{{- $emptyStyle := "bg-gray-200/30 dark:bg-gray-700/30" -}}
8
8
{{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400" -}}
9
9
{{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}}
10
10
{{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}}
11
11
{{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}}
12
12
<div class="grid grid-cols-2 divide-x divide-gray-200 dark:divide-gray-700">
13
-
<pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
13
+
<div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
14
14
{{- range .LeftLines -}}
15
15
{{- if .IsEmpty -}}
16
-
<div class="{{ $emptyStyle }} {{ $containerStyle }}">
17
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div>
18
-
<div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div>
19
-
<div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div>
20
-
</div>
16
+
<span class="{{ $emptyStyle }} {{ $containerStyle }}">
17
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span>
18
+
<span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span>
19
+
<span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span>
20
+
</span>
21
21
{{- else if eq .Op.String "-" -}}
22
-
<div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
23
-
<div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div>
24
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
25
-
<div class="px-2">{{ .Content }}</div>
26
-
</div>
22
+
<span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
23
+
<span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span>
24
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
25
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
26
+
</span>
27
27
{{- else if eq .Op.String " " -}}
28
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
29
-
<div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div>
30
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
31
-
<div class="px-2">{{ .Content }}</div>
32
-
</div>
28
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
29
+
<span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span>
30
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
31
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
32
+
</span>
33
33
{{- end -}}
34
34
{{- end -}}
35
-
{{- end -}}</div></div></pre>
35
+
{{- end -}}</div></div></div>
36
36
37
-
<pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
37
+
<div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
38
38
{{- range .RightLines -}}
39
39
{{- if .IsEmpty -}}
40
-
<div class="{{ $emptyStyle }} {{ $containerStyle }}">
41
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div>
42
-
<div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div>
43
-
<div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div>
44
-
</div>
40
+
<span class="{{ $emptyStyle }} {{ $containerStyle }}">
41
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span>
42
+
<span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span>
43
+
<span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span>
44
+
</span>
45
45
{{- else if eq .Op.String "+" -}}
46
-
<div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
47
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div>
48
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
49
-
<div class="px-2" >{{ .Content }}</div>
50
-
</div>
46
+
<span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
47
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></span>
48
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
49
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
50
+
</span>
51
51
{{- else if eq .Op.String " " -}}
52
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
53
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div>
54
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
55
-
<div class="px-2">{{ .Content }}</div>
56
-
</div>
52
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
53
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a> </span>
54
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
55
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
56
+
</span>
57
57
{{- end -}}
58
58
{{- end -}}
59
-
{{- end -}}</div></div></pre>
59
+
{{- end -}}</div></div></div>
60
60
</div>
61
61
{{ end }}
+21
-22
appview/pages/templates/repo/fragments/unifiedDiff.html
+21
-22
appview/pages/templates/repo/fragments/unifiedDiff.html
···
1
1
{{ define "repo/fragments/unifiedDiff" }}
2
2
{{ $name := .Id }}
3
-
<pre class="overflow-x-auto"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
3
+
<div class="overflow-x-auto font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
4
4
{{- $oldStart := .OldPosition -}}
5
5
{{- $newStart := .NewPosition -}}
6
6
{{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800 target:bg-yellow-200 target:dark:bg-yellow-600" -}}
7
7
{{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}}
8
8
{{- $lineNrSepStyle1 := "" -}}
9
9
{{- $lineNrSepStyle2 := "pr-2 border-r border-gray-200 dark:border-gray-700" -}}
10
-
{{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
10
+
{{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
11
11
{{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400 " -}}
12
12
{{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}}
13
13
{{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}}
14
14
{{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}}
15
15
{{- range .Lines -}}
16
16
{{- if eq .Op.String "+" -}}
17
-
<div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}">
18
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></div>
19
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></div>
20
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
21
-
<div class="px-2">{{ .Line }}</div>
22
-
</div>
17
+
<span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}">
18
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></span>
19
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></span>
20
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
21
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
22
+
</span>
23
23
{{- $newStart = add64 $newStart 1 -}}
24
24
{{- end -}}
25
25
{{- if eq .Op.String "-" -}}
26
-
<div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}">
27
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></div>
28
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></div>
29
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
30
-
<div class="px-2">{{ .Line }}</div>
31
-
</div>
26
+
<span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}">
27
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></span>
28
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></span>
29
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
30
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
31
+
</span>
32
32
{{- $oldStart = add64 $oldStart 1 -}}
33
33
{{- end -}}
34
34
{{- if eq .Op.String " " -}}
35
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}">
36
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></div>
37
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></div>
38
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
39
-
<div class="px-2">{{ .Line }}</div>
40
-
</div>
35
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}">
36
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></span>
37
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></span>
38
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
39
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
40
+
</span>
41
41
{{- $newStart = add64 $newStart 1 -}}
42
42
{{- $oldStart = add64 $oldStart 1 -}}
43
43
{{- end -}}
44
44
{{- end -}}
45
-
{{- end -}}</div></div></pre>
45
+
{{- end -}}</div></div></div>
46
46
{{ end }}
47
-
+2
-2
appview/pages/templates/repo/issues/fragments/issueCommentHeader.html
+2
-2
appview/pages/templates/repo/issues/fragments/issueCommentHeader.html
···
1
1
{{ define "repo/issues/fragments/issueCommentHeader" }}
2
2
<div class="flex flex-wrap items-center gap-2 text-sm text-gray-500 dark:text-gray-400 ">
3
-
{{ template "user/fragments/picHandleLink" .Comment.Did }}
3
+
{{ template "user/fragments/picHandleLink" .Comment.Did.String }}
4
4
{{ template "hats" $ }}
5
5
{{ template "timestamp" . }}
6
-
{{ $isCommentOwner := and .LoggedInUser (eq .LoggedInUser.Did .Comment.Did) }}
6
+
{{ $isCommentOwner := and .LoggedInUser (eq .LoggedInUser.Did .Comment.Did.String) }}
7
7
{{ if and $isCommentOwner (not .Comment.Deleted) }}
8
8
{{ template "editIssueComment" . }}
9
9
{{ template "deleteIssueComment" . }}
+1
-1
appview/pages/templates/repo/pipelines/pipelines.html
+1
-1
appview/pages/templates/repo/pipelines/pipelines.html
···
23
23
</p>
24
24
<p>
25
25
<span class="{{ $bullet }}">2</span>Configure your CI/CD
26
-
<a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/pipeline.md" class="underline">pipeline</a>.
26
+
<a href="https://docs.tangled.org/spindles.html#pipelines" class="underline">pipeline</a>.
27
27
</p>
28
28
<p><span class="{{ $bullet }}">3</span>Trigger a workflow with a push or a pull-request!</p>
29
29
</div>
+3
-3
appview/pages/templates/repo/pulls/pull.html
+3
-3
appview/pages/templates/repo/pulls/pull.html
···
165
165
166
166
<div class="md:pl-[3.5rem] flex flex-col gap-2 mt-2 relative">
167
167
{{ range $cidx, $c := .Comments }}
168
-
<div id="comment-{{$c.ID}}" class="bg-white dark:bg-gray-800 rounded drop-shadow-sm py-2 px-4 relative w-full">
168
+
<div id="comment-{{$c.Id}}" class="bg-white dark:bg-gray-800 rounded drop-shadow-sm py-2 px-4 relative w-full">
169
169
{{ if gt $cidx 0 }}
170
170
<div class="absolute left-8 -top-2 w-px h-2 bg-gray-300 dark:bg-gray-600"></div>
171
171
{{ end }}
172
172
<div class="text-sm text-gray-500 dark:text-gray-400 flex items-center gap-1">
173
-
{{ template "user/fragments/picHandleLink" $c.OwnerDid }}
173
+
{{ template "user/fragments/picHandleLink" $c.Did.String }}
174
174
<span class="before:content-['ยท']"></span>
175
-
<a class="text-gray-500 dark:text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" href="#comment-{{.ID}}">{{ template "repo/fragments/time" $c.Created }}</a>
175
+
<a class="text-gray-500 dark:text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" href="#comment-{{.Id}}">{{ template "repo/fragments/time" $c.Created }}</a>
176
176
</div>
177
177
<div class="prose dark:prose-invert">
178
178
{{ $c.Body | markdown }}
+1
-1
appview/pages/templates/repo/settings/pipelines.html
+1
-1
appview/pages/templates/repo/settings/pipelines.html
···
22
22
<p class="text-gray-500 dark:text-gray-400">
23
23
Choose a spindle to execute your workflows on. Only repository owners
24
24
can configure spindles. Spindles can be selfhosted,
25
-
<a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
25
+
<a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide">
26
26
click to learn more.
27
27
</a>
28
28
</p>
+1
-1
appview/pages/templates/spindles/index.html
+1
-1
appview/pages/templates/spindles/index.html
···
102
102
{{ define "docsButton" }}
103
103
<a
104
104
class="btn flex items-center gap-2"
105
-
href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
105
+
href="https://docs.tangled.org/spindles.html#self-hosting-guide">
106
106
{{ i "book" "size-4" }}
107
107
docs
108
108
</a>
+1
-1
appview/pulls/opengraph.go
+1
-1
appview/pulls/opengraph.go
···
277
277
}
278
278
279
279
// Get comment count from database
280
-
comments, err := db.GetPullComments(s.db, orm.FilterEq("pull_id", pull.ID))
280
+
comments, err := db.GetComments(s.db, orm.FilterEq("subject_at", pull.AtUri()))
281
281
if err != nil {
282
282
log.Printf("failed to get pull comments: %v", err)
283
283
}
+72
-59
appview/pulls/pulls.go
+72
-59
appview/pulls/pulls.go
···
741
741
}
742
742
defer tx.Rollback()
743
743
744
-
createdAt := time.Now().Format(time.RFC3339)
744
+
comment := models.Comment{
745
+
Did: syntax.DID(user.Did),
746
+
Rkey: tid.TID(),
747
+
Subject: pull.AtUri(),
748
+
ReplyTo: nil,
749
+
Body: body,
750
+
Created: time.Now(),
751
+
Mentions: mentions,
752
+
References: references,
753
+
PullSubmissionId: &pull.Submissions[roundNumber].ID,
754
+
}
755
+
if err = comment.Validate(); err != nil {
756
+
log.Println("failed to validate comment", err)
757
+
s.pages.Notice(w, "pull-comment", "Failed to create comment.")
758
+
return
759
+
}
760
+
record := comment.AsRecord()
745
761
746
762
client, err := s.oauth.AuthorizedClient(r)
747
763
if err != nil {
···
749
765
s.pages.Notice(w, "pull-comment", "Failed to create comment.")
750
766
return
751
767
}
752
-
atResp, err := comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
753
-
Collection: tangled.RepoPullCommentNSID,
768
+
_, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
769
+
Collection: tangled.CommentNSID,
754
770
Repo: user.Did,
755
-
Rkey: tid.TID(),
771
+
Rkey: comment.Rkey,
756
772
Record: &lexutil.LexiconTypeDecoder{
757
-
Val: &tangled.RepoPullComment{
758
-
Pull: pull.AtUri().String(),
759
-
Body: body,
760
-
CreatedAt: createdAt,
761
-
},
773
+
Val: &record,
762
774
},
763
775
})
764
776
if err != nil {
···
767
779
return
768
780
}
769
781
770
-
comment := &models.PullComment{
771
-
OwnerDid: user.Did,
772
-
RepoAt: f.RepoAt().String(),
773
-
PullId: pull.PullId,
774
-
Body: body,
775
-
CommentAt: atResp.Uri,
776
-
SubmissionId: pull.Submissions[roundNumber].ID,
777
-
Mentions: mentions,
778
-
References: references,
779
-
}
780
-
781
782
// Create the pull comment in the database with the commentAt field
782
-
commentId, err := db.NewPullComment(tx, comment)
783
+
err = db.PutComment(tx, &comment)
783
784
if err != nil {
784
785
log.Println("failed to create pull comment", err)
785
786
s.pages.Notice(w, "pull-comment", "Failed to create comment.")
···
793
794
return
794
795
}
795
796
796
-
s.notifier.NewPullComment(r.Context(), comment, mentions)
797
+
s.notifier.NewComment(r.Context(), &comment)
797
798
798
799
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
799
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", ownerSlashRepo, pull.PullId, commentId))
800
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", ownerSlashRepo, pull.PullId, comment.Id))
800
801
return
801
802
}
802
803
}
···
1241
1242
return
1242
1243
}
1243
1244
1245
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
1246
+
if err != nil {
1247
+
log.Println("failed to upload patch", err)
1248
+
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1249
+
return
1250
+
}
1251
+
1244
1252
_, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
1245
1253
Collection: tangled.RepoPullNSID,
1246
1254
Repo: user.Did,
···
1252
1260
Repo: string(repo.RepoAt()),
1253
1261
Branch: targetBranch,
1254
1262
},
1255
-
Patch: patch,
1263
+
PatchBlob: blob.Blob,
1256
1264
Source: recordPullSource,
1257
1265
CreatedAt: time.Now().Format(time.RFC3339),
1258
1266
},
···
1328
1336
// apply all record creations at once
1329
1337
var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem
1330
1338
for _, p := range stack {
1339
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(p.LatestPatch()))
1340
+
if err != nil {
1341
+
log.Println("failed to upload patch blob", err)
1342
+
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1343
+
return
1344
+
}
1345
+
1331
1346
record := p.AsRecord()
1332
-
write := comatproto.RepoApplyWrites_Input_Writes_Elem{
1347
+
record.PatchBlob = blob.Blob
1348
+
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
1333
1349
RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{
1334
1350
Collection: tangled.RepoPullNSID,
1335
1351
Rkey: &p.Rkey,
···
1337
1353
Val: &record,
1338
1354
},
1339
1355
},
1340
-
}
1341
-
writes = append(writes, &write)
1356
+
})
1342
1357
}
1343
1358
_, err = comatproto.RepoApplyWrites(r.Context(), client, &comatproto.RepoApplyWrites_Input{
1344
1359
Repo: user.Did,
···
1871
1886
return
1872
1887
}
1873
1888
1874
-
var recordPullSource *tangled.RepoPull_Source
1875
-
if pull.IsBranchBased() {
1876
-
recordPullSource = &tangled.RepoPull_Source{
1877
-
Branch: pull.PullSource.Branch,
1878
-
Sha: sourceRev,
1879
-
}
1889
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
1890
+
if err != nil {
1891
+
log.Println("failed to upload patch blob", err)
1892
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
1893
+
return
1880
1894
}
1881
-
if pull.IsForkBased() {
1882
-
repoAt := pull.PullSource.RepoAt.String()
1883
-
recordPullSource = &tangled.RepoPull_Source{
1884
-
Branch: pull.PullSource.Branch,
1885
-
Repo: &repoAt,
1886
-
Sha: sourceRev,
1887
-
}
1888
-
}
1895
+
record := pull.AsRecord()
1896
+
record.PatchBlob = blob.Blob
1897
+
record.CreatedAt = time.Now().Format(time.RFC3339)
1889
1898
1890
1899
_, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
1891
1900
Collection: tangled.RepoPullNSID,
···
1893
1902
Rkey: pull.Rkey,
1894
1903
SwapRecord: ex.Cid,
1895
1904
Record: &lexutil.LexiconTypeDecoder{
1896
-
Val: &tangled.RepoPull{
1897
-
Title: pull.Title,
1898
-
Target: &tangled.RepoPull_Target{
1899
-
Repo: string(repo.RepoAt()),
1900
-
Branch: pull.TargetBranch,
1901
-
},
1902
-
Patch: patch, // new patch
1903
-
Source: recordPullSource,
1904
-
CreatedAt: time.Now().Format(time.RFC3339),
1905
-
},
1905
+
Val: &record,
1906
1906
},
1907
1907
})
1908
1908
if err != nil {
···
1988
1988
}
1989
1989
defer tx.Rollback()
1990
1990
1991
+
client, err := s.oauth.AuthorizedClient(r)
1992
+
if err != nil {
1993
+
log.Println("failed to authorize client")
1994
+
s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
1995
+
return
1996
+
}
1997
+
1991
1998
// pds updates to make
1992
1999
var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem
1993
2000
···
2021
2028
return
2022
2029
}
2023
2030
2031
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
2032
+
if err != nil {
2033
+
log.Println("failed to upload patch blob", err)
2034
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
2035
+
return
2036
+
}
2024
2037
record := p.AsRecord()
2038
+
record.PatchBlob = blob.Blob
2025
2039
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
2026
2040
RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{
2027
2041
Collection: tangled.RepoPullNSID,
···
2056
2070
return
2057
2071
}
2058
2072
2073
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
2074
+
if err != nil {
2075
+
log.Println("failed to upload patch blob", err)
2076
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
2077
+
return
2078
+
}
2059
2079
record := np.AsRecord()
2060
-
2080
+
record.PatchBlob = blob.Blob
2061
2081
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
2062
2082
RepoApplyWrites_Update: &comatproto.RepoApplyWrites_Update{
2063
2083
Collection: tangled.RepoPullNSID,
···
2091
2111
if err != nil {
2092
2112
log.Println("failed to resubmit pull", err)
2093
2113
s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.")
2094
-
return
2095
-
}
2096
-
2097
-
client, err := s.oauth.AuthorizedClient(r)
2098
-
if err != nil {
2099
-
log.Println("failed to authorize client")
2100
-
s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
2101
2114
return
2102
2115
}
2103
2116
+1
appview/repo/archive.go
+1
appview/repo/archive.go
···
18
18
l := rp.logger.With("handler", "DownloadArchive")
19
19
ref := chi.URLParam(r, "ref")
20
20
ref, _ = url.PathUnescape(ref)
21
+
ref = strings.TrimSuffix(ref, ".tar.gz")
21
22
f, err := rp.repoResolver.Resolve(r)
22
23
if err != nil {
23
24
l.Error("failed to get repo and knot", "err", err)
+26
-1
appview/reporesolver/resolver.go
+26
-1
appview/reporesolver/resolver.go
···
63
63
}
64
64
65
65
// get dir/ref
66
-
currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath()))
66
+
currentDir := extractCurrentDir(r.URL.EscapedPath())
67
67
ref := chi.URLParam(r, "ref")
68
68
69
69
repoAt := repo.RepoAt()
···
130
130
}
131
131
132
132
return repoInfo
133
+
}
134
+
135
+
// extractCurrentDir gets the current directory for markdown link resolution.
136
+
// for blob paths, returns the parent dir. for tree paths, returns the path itself.
137
+
//
138
+
// /@user/repo/blob/main/docs/README.md => docs
139
+
// /@user/repo/tree/main/docs => docs
140
+
func extractCurrentDir(fullPath string) string {
141
+
fullPath = strings.TrimPrefix(fullPath, "/")
142
+
143
+
blobPattern := regexp.MustCompile(`blob/[^/]+/(.*)$`)
144
+
if matches := blobPattern.FindStringSubmatch(fullPath); len(matches) > 1 {
145
+
return path.Dir(matches[1])
146
+
}
147
+
148
+
treePattern := regexp.MustCompile(`tree/[^/]+/(.*)$`)
149
+
if matches := treePattern.FindStringSubmatch(fullPath); len(matches) > 1 {
150
+
dir := strings.TrimSuffix(matches[1], "/")
151
+
if dir == "" {
152
+
return "."
153
+
}
154
+
return dir
155
+
}
156
+
157
+
return "."
133
158
}
134
159
135
160
// extractPathAfterRef gets the actual repository path
+22
appview/reporesolver/resolver_test.go
+22
appview/reporesolver/resolver_test.go
···
1
+
package reporesolver
2
+
3
+
import "testing"
4
+
5
+
func TestExtractCurrentDir(t *testing.T) {
6
+
tests := []struct {
7
+
path string
8
+
want string
9
+
}{
10
+
{"/@user/repo/blob/main/docs/README.md", "docs"},
11
+
{"/@user/repo/blob/main/README.md", "."},
12
+
{"/@user/repo/tree/main/docs", "docs"},
13
+
{"/@user/repo/tree/main/docs/", "docs"},
14
+
{"/@user/repo/tree/main", "."},
15
+
}
16
+
17
+
for _, tt := range tests {
18
+
if got := extractCurrentDir(tt.path); got != tt.want {
19
+
t.Errorf("extractCurrentDir(%q) = %q, want %q", tt.path, got, tt.want)
20
+
}
21
+
}
22
+
}
-5
appview/spindles/spindles.go
-5
appview/spindles/spindles.go
···
653
653
s.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.")
654
654
return
655
655
}
656
-
if memberId.Handle.IsInvalidHandle() {
657
-
l.Error("failed to resolve member identity to handle")
658
-
s.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.")
659
-
return
660
-
}
661
656
662
657
tx, err := s.Db.Begin()
663
658
if err != nil {
+6
-4
appview/state/profile.go
+6
-4
appview/state/profile.go
···
163
163
}
164
164
165
165
// populate commit counts in the timeline, using the punchcard
166
-
currentMonth := time.Now().Month()
166
+
now := time.Now()
167
167
for _, p := range profile.Punchcard.Punches {
168
-
idx := currentMonth - p.Date.Month()
169
-
if int(idx) < len(timeline.ByMonth) {
170
-
timeline.ByMonth[idx].Commits += p.Count
168
+
years := now.Year() - p.Date.Year()
169
+
months := int(now.Month() - p.Date.Month())
170
+
monthsAgo := years*12 + months
171
+
if monthsAgo >= 0 && monthsAgo < len(timeline.ByMonth) {
172
+
timeline.ByMonth[monthsAgo].Commits += p.Count
171
173
}
172
174
}
173
175
+2
appview/state/router.go
+2
appview/state/router.go
···
109
109
})
110
110
111
111
r.NotFound(func(w http.ResponseWriter, r *http.Request) {
112
+
w.WriteHeader(http.StatusNotFound)
112
113
s.pages.Error404(w)
113
114
})
114
115
···
182
183
r.Get("/brand", s.Brand)
183
184
184
185
r.NotFound(func(w http.ResponseWriter, r *http.Request) {
186
+
w.WriteHeader(http.StatusNotFound)
185
187
s.pages.Error404(w)
186
188
})
187
189
return r
+1
-1
appview/state/state.go
+1
-1
appview/state/state.go
-27
appview/validator/issue.go
-27
appview/validator/issue.go
···
4
4
"fmt"
5
5
"strings"
6
6
7
-
"tangled.org/core/appview/db"
8
7
"tangled.org/core/appview/models"
9
-
"tangled.org/core/orm"
10
8
)
11
-
12
-
func (v *Validator) ValidateIssueComment(comment *models.IssueComment) error {
13
-
// if comments have parents, only ingest ones that are 1 level deep
14
-
if comment.ReplyTo != nil {
15
-
parents, err := db.GetIssueComments(v.db, orm.FilterEq("at_uri", *comment.ReplyTo))
16
-
if err != nil {
17
-
return fmt.Errorf("failed to fetch parent comment: %w", err)
18
-
}
19
-
if len(parents) != 1 {
20
-
return fmt.Errorf("incorrect number of parent comments returned: %d", len(parents))
21
-
}
22
-
23
-
// depth check
24
-
parent := parents[0]
25
-
if parent.ReplyTo != nil {
26
-
return fmt.Errorf("incorrect depth, this comment is replying at depth >1")
27
-
}
28
-
}
29
-
30
-
if sb := strings.TrimSpace(v.sanitizer.SanitizeDefault(comment.Body)); sb == "" {
31
-
return fmt.Errorf("body is empty after HTML sanitization")
32
-
}
33
-
34
-
return nil
35
-
}
36
9
37
10
func (v *Validator) ValidateIssue(issue *models.Issue) error {
38
11
if issue.Title == "" {
+1
cmd/cborgen/cborgen.go
+1
cmd/cborgen/cborgen.go
+1527
docs/DOCS.md
+1527
docs/DOCS.md
···
1
+
---
2
+
title: Tangled docs
3
+
author: The Tangled Contributors
4
+
date: 21 Sun, Dec 2025
5
+
abstract: |
6
+
Tangled is a decentralized code hosting and collaboration
7
+
platform. Every component of Tangled is open-source and
8
+
self-hostable. [tangled.org](https://tangled.org) also
9
+
provides hosting and CI services that are free to use.
10
+
11
+
There are several models for decentralized code
12
+
collaboration platforms, ranging from ActivityPubโs
13
+
(Forgejo) federated model, to Radicleโs entirely P2P model.
14
+
Our approach attempts to be the best of both worlds by
15
+
adopting the AT Protocolโa protocol for building decentralized
16
+
social applications with a central identity
17
+
18
+
Our approach to this is the idea of โknotsโ. Knots are
19
+
lightweight, headless servers that enable users to host Git
20
+
repositories with ease. Knots are designed for either single
21
+
or multi-tenant use which is perfect for self-hosting on a
22
+
Raspberry Pi at home, or larger โcommunityโ servers. By
23
+
default, Tangled provides managed knots where you can host
24
+
your repositories for free.
25
+
26
+
The appview at tangled.org acts as a consolidated "view"
27
+
into the whole network, allowing users to access, clone and
28
+
contribute to repositories hosted across different knots
29
+
seamlessly.
30
+
---
31
+
32
+
# Quick start guide
33
+
34
+
## Login or sign up
35
+
36
+
You can [login](https://tangled.org) by using your AT Protocol
37
+
account. If you are unclear on what that means, simply head
38
+
to the [signup](https://tangled.org/signup) page and create
39
+
an account. By doing so, you will be choosing Tangled as
40
+
your account provider (you will be granted a handle of the
41
+
form `user.tngl.sh`).
42
+
43
+
In the AT Protocol network, users are free to choose their account
44
+
provider (known as a "Personal Data Service", or PDS), and
45
+
login to applications that support AT accounts.
46
+
47
+
You can think of it as "one account for all of the atmosphere"!
48
+
49
+
If you already have an AT account (you may have one if you
50
+
signed up to Bluesky, for example), you can login with the
51
+
same handle on Tangled (so just use `user.bsky.social` on
52
+
the login page).
53
+
54
+
## Add an SSH key
55
+
56
+
Once you are logged in, you can start creating repositories
57
+
and pushing code. Tangled supports pushing git repositories
58
+
over SSH.
59
+
60
+
First, you'll need to generate an SSH key if you don't
61
+
already have one:
62
+
63
+
```bash
64
+
ssh-keygen -t ed25519 -C "foo@bar.com"
65
+
```
66
+
67
+
When prompted, save the key to the default location
68
+
(`~/.ssh/id_ed25519`) and optionally set a passphrase.
69
+
70
+
Copy your public key to your clipboard:
71
+
72
+
```bash
73
+
# on X11
74
+
cat ~/.ssh/id_ed25519.pub | xclip -sel c
75
+
76
+
# on wayland
77
+
cat ~/.ssh/id_ed25519.pub | wl-copy
78
+
79
+
# on macos
80
+
cat ~/.ssh/id_ed25519.pub | pbcopy
81
+
```
82
+
83
+
Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
84
+
paste your public key, give it a descriptive name, and hit
85
+
save.
86
+
87
+
## Create a repository
88
+
89
+
Once your SSH key is added, create your first repository:
90
+
91
+
1. Hit the green `+` icon on the topbar, and select
92
+
repository
93
+
2. Enter a repository name
94
+
3. Add a description
95
+
4. Choose a knotserver to host this repository on
96
+
5. Hit create
97
+
98
+
Knots are self-hostable, lightweight Git servers that can
99
+
host your repository. Unlike traditional code forges, your
100
+
code can live on any server. Read the [Knots](TODO) section
101
+
for more.
102
+
103
+
## Configure SSH
104
+
105
+
To ensure Git uses the correct SSH key and connects smoothly
106
+
to Tangled, add this configuration to your `~/.ssh/config`
107
+
file:
108
+
109
+
```
110
+
Host tangled.org
111
+
Hostname tangled.org
112
+
User git
113
+
IdentityFile ~/.ssh/id_ed25519
114
+
AddressFamily inet
115
+
```
116
+
117
+
This tells SSH to use your specific key when connecting to
118
+
Tangled and prevents authentication issues if you have
119
+
multiple SSH keys.
120
+
121
+
Note that this configuration only works for knotservers that
122
+
are hosted by tangled.org. If you use a custom knot, refer
123
+
to the [Knots](TODO) section.
124
+
125
+
## Push your first repository
126
+
127
+
Initialize a new Git repository:
128
+
129
+
```bash
130
+
mkdir my-project
131
+
cd my-project
132
+
133
+
git init
134
+
echo "# My Project" > README.md
135
+
```
136
+
137
+
Add some content and push!
138
+
139
+
```bash
140
+
git add README.md
141
+
git commit -m "Initial commit"
142
+
git remote add origin git@tangled.org:user.tngl.sh/my-project
143
+
git push -u origin main
144
+
```
145
+
146
+
That's it! Your code is now hosted on Tangled.
147
+
148
+
## Migrating an existing repository
149
+
150
+
Moving your repositories from GitHub, GitLab, Bitbucket, or
151
+
any other Git forge to Tangled is straightforward. You'll
152
+
simply change your repository's remote URL. At the moment,
153
+
Tangled does not have any tooling to migrate data such as
154
+
GitHub issues or pull requests.
155
+
156
+
First, create a new repository on tangled.org as described
157
+
in the [Quick Start Guide](#create-a-repository).
158
+
159
+
Navigate to your existing local repository:
160
+
161
+
```bash
162
+
cd /path/to/your/existing/repo
163
+
```
164
+
165
+
You can inspect your existing Git remote like so:
166
+
167
+
```bash
168
+
git remote -v
169
+
```
170
+
171
+
You'll see something like:
172
+
173
+
```
174
+
origin git@github.com:username/my-project (fetch)
175
+
origin git@github.com:username/my-project (push)
176
+
```
177
+
178
+
Update the remote URL to point to tangled:
179
+
180
+
```bash
181
+
git remote set-url origin git@tangled.org:user.tngl.sh/my-project
182
+
```
183
+
184
+
Verify the change:
185
+
186
+
```bash
187
+
git remote -v
188
+
```
189
+
190
+
You should now see:
191
+
192
+
```
193
+
origin git@tangled.org:user.tngl.sh/my-project (fetch)
194
+
origin git@tangled.org:user.tngl.sh/my-project (push)
195
+
```
196
+
197
+
Push all your branches and tags to Tangled:
198
+
199
+
```bash
200
+
git push -u origin --all
201
+
git push -u origin --tags
202
+
```
203
+
204
+
Your repository is now migrated to Tangled! All commit
205
+
history, branches, and tags have been preserved.
206
+
207
+
## Mirroring a repository to Tangled
208
+
209
+
If you want to maintain your repository on multiple forges
210
+
simultaneously, for example, keeping your primary repository
211
+
on GitHub while mirroring to Tangled for backup or
212
+
redundancy, you can do so by adding multiple remotes.
213
+
214
+
You can configure your local repository to push to both
215
+
Tangled and, say, GitHub. You may already have the following
216
+
setup:
217
+
218
+
```
219
+
$ git remote -v
220
+
origin git@github.com:username/my-project (fetch)
221
+
origin git@github.com:username/my-project (push)
222
+
```
223
+
224
+
Now add Tangled as an additional push URL to the same
225
+
remote:
226
+
227
+
```bash
228
+
git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
229
+
```
230
+
231
+
You also need to re-add the original URL as a push
232
+
destination (Git replaces the push URL when you use `--add`
233
+
the first time):
234
+
235
+
```bash
236
+
git remote set-url --add --push origin git@github.com:username/my-project
237
+
```
238
+
239
+
Verify your configuration:
240
+
241
+
```
242
+
$ git remote -v
243
+
origin git@github.com:username/repo (fetch)
244
+
origin git@tangled.org:username/my-project (push)
245
+
origin git@github.com:username/repo (push)
246
+
```
247
+
248
+
Notice that there's one fetch URL (the primary remote) and
249
+
two push URLs. Now, whenever you push, Git will
250
+
automatically push to both remotes:
251
+
252
+
```bash
253
+
git push origin main
254
+
```
255
+
256
+
This single command pushes your `main` branch to both GitHub
257
+
and Tangled simultaneously.
258
+
259
+
To push all branches and tags:
260
+
261
+
```bash
262
+
git push origin --all
263
+
git push origin --tags
264
+
```
265
+
266
+
If you prefer more control over which remote you push to,
267
+
you can maintain separate remotes:
268
+
269
+
```bash
270
+
git remote add github git@github.com:username/my-project
271
+
git remote add tangled git@tangled.org:username/my-project
272
+
```
273
+
274
+
Then push to each explicitly:
275
+
276
+
```bash
277
+
git push github main
278
+
git push tangled main
279
+
```
280
+
281
+
# Knot self-hosting guide
282
+
283
+
So you want to run your own knot server? Great! Here are a few prerequisites:
284
+
285
+
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
286
+
2. A (sub)domain name. People generally use `knot.example.com`.
287
+
3. A valid SSL certificate for your domain.
288
+
289
+
## NixOS
290
+
291
+
Refer to the [knot
292
+
module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
293
+
for a full list of options. Sample configurations:
294
+
295
+
- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
296
+
- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
297
+
298
+
## Docker
299
+
300
+
Refer to
301
+
[@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker).
302
+
Note that this is community maintained.
303
+
304
+
## Manual setup
305
+
306
+
First, clone this repository:
307
+
308
+
```
309
+
git clone https://tangled.org/@tangled.org/core
310
+
```
311
+
312
+
Then, build the `knot` CLI. This is the knot administration
313
+
and operation tool. For the purpose of this guide, we're
314
+
only concerned with these subcommands:
315
+
316
+
* `knot server`: the main knot server process, typically
317
+
run as a supervised service
318
+
* `knot guard`: handles role-based access control for git
319
+
over SSH (you'll never have to run this yourself)
320
+
* `knot keys`: fetches SSH keys associated with your knot;
321
+
we'll use this to generate the SSH
322
+
`AuthorizedKeysCommand`
323
+
324
+
```
325
+
cd core
326
+
export CGO_ENABLED=1
327
+
go build -o knot ./cmd/knot
328
+
```
329
+
330
+
Next, move the `knot` binary to a location owned by `root` --
331
+
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
332
+
333
+
```
334
+
sudo mv knot /usr/local/bin/knot
335
+
sudo chown root:root /usr/local/bin/knot
336
+
```
337
+
338
+
This is necessary because SSH `AuthorizedKeysCommand` requires [really
339
+
specific permissions](https://stackoverflow.com/a/27638306). The
340
+
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
341
+
retrieve a user's public SSH keys dynamically for authentication. Let's
342
+
set that up.
343
+
344
+
```
345
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
346
+
Match User git
347
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
348
+
AuthorizedKeysCommandUser nobody
349
+
EOF
350
+
```
351
+
352
+
Then, reload `sshd`:
353
+
354
+
```
355
+
sudo systemctl reload ssh
356
+
```
357
+
358
+
Next, create the `git` user. We'll use the `git` user's home directory
359
+
to store repositories:
360
+
361
+
```
362
+
sudo adduser git
363
+
```
364
+
365
+
Create `/home/git/.knot.env` with the following, updating the values as
366
+
necessary. The `KNOT_SERVER_OWNER` should be set to your
367
+
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
368
+
369
+
```
370
+
KNOT_REPO_SCAN_PATH=/home/git
371
+
KNOT_SERVER_HOSTNAME=knot.example.com
372
+
APPVIEW_ENDPOINT=https://tangled.org
373
+
KNOT_SERVER_OWNER=did:plc:foobar
374
+
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
375
+
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
376
+
```
377
+
378
+
If you run a Linux distribution that uses systemd, you can use the provided
379
+
service file to run the server. Copy
380
+
[`knotserver.service`](/systemd/knotserver.service)
381
+
to `/etc/systemd/system/`. Then, run:
382
+
383
+
```
384
+
systemctl enable knotserver
385
+
systemctl start knotserver
386
+
```
387
+
388
+
The last step is to configure a reverse proxy like Nginx or Caddy to front your
389
+
knot. Here's an example configuration for Nginx:
390
+
391
+
```
392
+
server {
393
+
listen 80;
394
+
listen [::]:80;
395
+
server_name knot.example.com;
396
+
397
+
location / {
398
+
proxy_pass http://localhost:5555;
399
+
proxy_set_header Host $host;
400
+
proxy_set_header X-Real-IP $remote_addr;
401
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
402
+
proxy_set_header X-Forwarded-Proto $scheme;
403
+
}
404
+
405
+
# wss endpoint for git events
406
+
location /events {
407
+
proxy_set_header X-Forwarded-For $remote_addr;
408
+
proxy_set_header Host $http_host;
409
+
proxy_set_header Upgrade websocket;
410
+
proxy_set_header Connection Upgrade;
411
+
proxy_pass http://localhost:5555;
412
+
}
413
+
# additional config for SSL/TLS go here.
414
+
}
415
+
416
+
```
417
+
418
+
Remember to use Let's Encrypt or similar to procure a certificate for your
419
+
knot domain.
420
+
421
+
You should now have a running knot server! You can finalize
422
+
your registration by hitting the `verify` button on the
423
+
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
424
+
a record on your PDS to announce the existence of the knot.
425
+
426
+
### Custom paths
427
+
428
+
(This section applies to manual setup only. Docker users should edit the mounts
429
+
in `docker-compose.yml` instead.)
430
+
431
+
Right now, the database and repositories of your knot lives in `/home/git`. You
432
+
can move these paths if you'd like to store them in another folder. Be careful
433
+
when adjusting these paths:
434
+
435
+
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
436
+
any possible side effects. Remember to restart it once you're done.
437
+
* Make backups before moving in case something goes wrong.
438
+
* Make sure the `git` user can read and write from the new paths.
439
+
440
+
#### Database
441
+
442
+
As an example, let's say the current database is at `/home/git/knotserver.db`,
443
+
and we want to move it to `/home/git/database/knotserver.db`.
444
+
445
+
Copy the current database to the new location. Make sure to copy the `.db-shm`
446
+
and `.db-wal` files if they exist.
447
+
448
+
```
449
+
mkdir /home/git/database
450
+
cp /home/git/knotserver.db* /home/git/database
451
+
```
452
+
453
+
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
454
+
the new file path (_not_ the directory):
455
+
456
+
```
457
+
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
458
+
```
459
+
460
+
#### Repositories
461
+
462
+
As an example, let's say the repositories are currently in `/home/git`, and we
463
+
want to move them into `/home/git/repositories`.
464
+
465
+
Create the new folder, then move the existing repositories (if there are any):
466
+
467
+
```
468
+
mkdir /home/git/repositories
469
+
# move all DIDs into the new folder; these will vary for you!
470
+
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
471
+
```
472
+
473
+
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
474
+
to the new directory:
475
+
476
+
```
477
+
KNOT_REPO_SCAN_PATH=/home/git/repositories
478
+
```
479
+
480
+
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
481
+
repository path:
482
+
483
+
```
484
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
485
+
Match User git
486
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
487
+
AuthorizedKeysCommandUser nobody
488
+
EOF
489
+
```
490
+
491
+
Make sure to restart your SSH server!
492
+
493
+
#### MOTD (message of the day)
494
+
495
+
To configure the MOTD used ("Welcome to this knot!" by default), edit the
496
+
`/home/git/motd` file:
497
+
498
+
```
499
+
printf "Hi from this knot!\n" > /home/git/motd
500
+
```
501
+
502
+
Note that you should add a newline at the end if setting a non-empty message
503
+
since the knot won't do this for you.
504
+
505
+
# Spindles
506
+
507
+
## Pipelines
508
+
509
+
Spindle workflows allow you to write CI/CD pipelines in a
510
+
simple format. They're located in the `.tangled/workflows`
511
+
directory at the root of your repository, and are defined
512
+
using YAML.
513
+
514
+
The fields are:
515
+
516
+
- [Trigger](#trigger): A **required** field that defines
517
+
when a workflow should be triggered.
518
+
- [Engine](#engine): A **required** field that defines which
519
+
engine a workflow should run on.
520
+
- [Clone options](#clone-options): An **optional** field
521
+
that defines how the repository should be cloned.
522
+
- [Dependencies](#dependencies): An **optional** field that
523
+
allows you to list dependencies you may need.
524
+
- [Environment](#environment): An **optional** field that
525
+
allows you to define environment variables.
526
+
- [Steps](#steps): An **optional** field that allows you to
527
+
define what steps should run in the workflow.
528
+
529
+
### Trigger
530
+
531
+
The first thing to add to a workflow is the trigger, which
532
+
defines when a workflow runs. This is defined using a `when`
533
+
field, which takes in a list of conditions. Each condition
534
+
has the following fields:
535
+
536
+
- `event`: This is a **required** field that defines when
537
+
your workflow should run. It's a list that can take one or
538
+
more of the following values:
539
+
- `push`: The workflow should run every time a commit is
540
+
pushed to the repository.
541
+
- `pull_request`: The workflow should run every time a
542
+
pull request is made or updated.
543
+
- `manual`: The workflow can be triggered manually.
544
+
- `branch`: Defines which branches the workflow should run
545
+
for. If used with the `push` event, commits to the
546
+
branch(es) listed here will trigger the workflow. If used
547
+
with the `pull_request` event, updates to pull requests
548
+
targeting the branch(es) listed here will trigger the
549
+
workflow. This field has no effect with the `manual`
550
+
event. Supports glob patterns using `*` and `**` (e.g.,
551
+
`main`, `develop`, `release-*`). Either `branch` or `tag`
552
+
(or both) must be specified for `push` events.
553
+
- `tag`: Defines which tags the workflow should run for.
554
+
Only used with the `push` event - when tags matching the
555
+
pattern(s) listed here are pushed, the workflow will
556
+
trigger. This field has no effect with `pull_request` or
557
+
`manual` events. Supports glob patterns using `*` and `**`
558
+
(e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
559
+
`tag` (or both) must be specified for `push` events.
560
+
561
+
For example, if you'd like to define a workflow that runs
562
+
when commits are pushed to the `main` and `develop`
563
+
branches, or when pull requests that target the `main`
564
+
branch are updated, or manually, you can do so with:
565
+
566
+
```yaml
567
+
when:
568
+
- event: ["push", "manual"]
569
+
branch: ["main", "develop"]
570
+
- event: ["pull_request"]
571
+
branch: ["main"]
572
+
```
573
+
574
+
You can also trigger workflows on tag pushes. For instance,
575
+
to run a deployment workflow when tags matching `v*` are
576
+
pushed:
577
+
578
+
```yaml
579
+
when:
580
+
- event: ["push"]
581
+
tag: ["v*"]
582
+
```
583
+
584
+
You can even combine branch and tag patterns in a single
585
+
constraint (the workflow triggers if either matches):
586
+
587
+
```yaml
588
+
when:
589
+
- event: ["push"]
590
+
branch: ["main", "release-*"]
591
+
tag: ["v*", "stable"]
592
+
```
593
+
594
+
### Engine
595
+
596
+
Next is the engine on which the workflow should run, defined
597
+
using the **required** `engine` field. The currently
598
+
supported engines are:
599
+
600
+
- `nixery`: This uses an instance of
601
+
[Nixery](https://nixery.dev) to run steps, which allows
602
+
you to add [dependencies](#dependencies) from
603
+
Nixpkgs (https://github.com/NixOS/nixpkgs). You can
604
+
search for packages on https://search.nixos.org, and
605
+
there's a pretty good chance the package(s) you're looking
606
+
for will be there.
607
+
608
+
Example:
609
+
610
+
```yaml
611
+
engine: "nixery"
612
+
```
613
+
614
+
### Clone options
615
+
616
+
When a workflow starts, the first step is to clone the
617
+
repository. You can customize this behavior using the
618
+
**optional** `clone` field. It has the following fields:
619
+
620
+
- `skip`: Setting this to `true` will skip cloning the
621
+
repository. This can be useful if your workflow is doing
622
+
something that doesn't require anything from the
623
+
repository itself. This is `false` by default.
624
+
- `depth`: This sets the number of commits, or the "clone
625
+
depth", to fetch from the repository. For example, if you
626
+
set this to 2, the last 2 commits will be fetched. By
627
+
default, the depth is set to 1, meaning only the most
628
+
recent commit will be fetched, which is the commit that
629
+
triggered the workflow.
630
+
- `submodules`: If you use Git submodules
631
+
(https://git-scm.com/book/en/v2/Git-Tools-Submodules)
632
+
in your repository, setting this field to `true` will
633
+
recursively fetch all submodules. This is `false` by
634
+
default.
635
+
636
+
The default settings are:
637
+
638
+
```yaml
639
+
clone:
640
+
skip: false
641
+
depth: 1
642
+
submodules: false
643
+
```
644
+
645
+
### Dependencies
646
+
647
+
Usually when you're running a workflow, you'll need
648
+
additional dependencies. The `dependencies` field lets you
649
+
define which dependencies to get, and from where. It's a
650
+
key-value map, with the key being the registry to fetch
651
+
dependencies from, and the value being the list of
652
+
dependencies to fetch.
653
+
654
+
Say you want to fetch Node.js and Go from `nixpkgs`, and a
655
+
package called `my_pkg` you've made from your own registry
656
+
at your repository at
657
+
`https://tangled.org/@example.com/my_pkg`. You can define
658
+
those dependencies like so:
659
+
660
+
```yaml
661
+
dependencies:
662
+
# nixpkgs
663
+
nixpkgs:
664
+
- nodejs
665
+
- go
666
+
# custom registry
667
+
git+https://tangled.org/@example.com/my_pkg:
668
+
- my_pkg
669
+
```
670
+
671
+
Now these dependencies are available to use in your
672
+
workflow!
673
+
674
+
### Environment
675
+
676
+
The `environment` field allows you define environment
677
+
variables that will be available throughout the entire
678
+
workflow. **Do not put secrets here, these environment
679
+
variables are visible to anyone viewing the repository. You
680
+
can add secrets for pipelines in your repository's
681
+
settings.**
682
+
683
+
Example:
684
+
685
+
```yaml
686
+
environment:
687
+
GOOS: "linux"
688
+
GOARCH: "arm64"
689
+
NODE_ENV: "production"
690
+
MY_ENV_VAR: "MY_ENV_VALUE"
691
+
```
692
+
693
+
### Steps
694
+
695
+
The `steps` field allows you to define what steps should run
696
+
in the workflow. It's a list of step objects, each with the
697
+
following fields:
698
+
699
+
- `name`: This field allows you to give your step a name.
700
+
This name is visible in your workflow runs, and is used to
701
+
describe what the step is doing.
702
+
- `command`: This field allows you to define a command to
703
+
run in that step. The step is run in a Bash shell, and the
704
+
logs from the command will be visible in the pipelines
705
+
page on the Tangled website. The
706
+
[dependencies](#dependencies) you added will be available
707
+
to use here.
708
+
- `environment`: Similar to the global
709
+
[environment](#environment) config, this **optional**
710
+
field is a key-value map that allows you to set
711
+
environment variables for the step. **Do not put secrets
712
+
here, these environment variables are visible to anyone
713
+
viewing the repository. You can add secrets for pipelines
714
+
in your repository's settings.**
715
+
716
+
Example:
717
+
718
+
```yaml
719
+
steps:
720
+
- name: "Build backend"
721
+
command: "go build"
722
+
environment:
723
+
GOOS: "darwin"
724
+
GOARCH: "arm64"
725
+
- name: "Build frontend"
726
+
command: "npm run build"
727
+
environment:
728
+
NODE_ENV: "production"
729
+
```
730
+
731
+
### Complete workflow
732
+
733
+
```yaml
734
+
# .tangled/workflows/build.yml
735
+
736
+
when:
737
+
- event: ["push", "manual"]
738
+
branch: ["main", "develop"]
739
+
- event: ["pull_request"]
740
+
branch: ["main"]
741
+
742
+
engine: "nixery"
743
+
744
+
# using the default values
745
+
clone:
746
+
skip: false
747
+
depth: 1
748
+
submodules: false
749
+
750
+
dependencies:
751
+
# nixpkgs
752
+
nixpkgs:
753
+
- nodejs
754
+
- go
755
+
# custom registry
756
+
git+https://tangled.org/@example.com/my_pkg:
757
+
- my_pkg
758
+
759
+
environment:
760
+
GOOS: "linux"
761
+
GOARCH: "arm64"
762
+
NODE_ENV: "production"
763
+
MY_ENV_VAR: "MY_ENV_VALUE"
764
+
765
+
steps:
766
+
- name: "Build backend"
767
+
command: "go build"
768
+
environment:
769
+
GOOS: "darwin"
770
+
GOARCH: "arm64"
771
+
- name: "Build frontend"
772
+
command: "npm run build"
773
+
environment:
774
+
NODE_ENV: "production"
775
+
```
776
+
777
+
If you want another example of a workflow, you can look at
778
+
the one [Tangled uses to build the
779
+
project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml).
780
+
781
+
## Self-hosting guide
782
+
783
+
### Prerequisites
784
+
785
+
* Go
786
+
* Docker (the only supported backend currently)
787
+
788
+
### Configuration
789
+
790
+
Spindle is configured using environment variables. The following environment variables are available:
791
+
792
+
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
793
+
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
794
+
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
795
+
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
796
+
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
797
+
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
798
+
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
799
+
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
800
+
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
801
+
802
+
### Running spindle
803
+
804
+
1. **Set the environment variables.** For example:
805
+
806
+
```shell
807
+
export SPINDLE_SERVER_HOSTNAME="your-hostname"
808
+
export SPINDLE_SERVER_OWNER="your-did"
809
+
```
810
+
811
+
2. **Build the Spindle binary.**
812
+
813
+
```shell
814
+
cd core
815
+
go mod download
816
+
go build -o cmd/spindle/spindle cmd/spindle/main.go
817
+
```
818
+
819
+
3. **Create the log directory.**
820
+
821
+
```shell
822
+
sudo mkdir -p /var/log/spindle
823
+
sudo chown $USER:$USER -R /var/log/spindle
824
+
```
825
+
826
+
4. **Run the Spindle binary.**
827
+
828
+
```shell
829
+
./cmd/spindle/spindle
830
+
```
831
+
832
+
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
833
+
834
+
## Architecture
835
+
836
+
Spindle is a small CI runner service. Here's a high-level overview of how it operates:
837
+
838
+
* Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
839
+
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
840
+
* When a new repo record comes through (typically when you add a spindle to a
841
+
repo from the settings), spindle then resolves the underlying knot and
842
+
subscribes to repo events (see:
843
+
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
844
+
* The spindle engine then handles execution of the pipeline, with results and
845
+
logs beamed on the spindle event stream over WebSocket
846
+
847
+
### The engine
848
+
849
+
At present, the only supported backend is Docker (and Podman, if Docker
850
+
compatibility is enabled, so that `/run/docker.sock` is created). spindle
851
+
executes each step in the pipeline in a fresh container, with state persisted
852
+
across steps within the `/tangled/workspace` directory.
853
+
854
+
The base image for the container is constructed on the fly using
855
+
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
856
+
used packages.
857
+
858
+
The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines).
859
+
860
+
## Secrets with openbao
861
+
862
+
This document covers setting up spindle to use OpenBao for secrets
863
+
management via OpenBao Proxy instead of the default SQLite backend.
864
+
865
+
### Overview
866
+
867
+
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
868
+
authentication automatically using AppRole credentials, while spindle
869
+
connects to the local proxy instead of directly to the OpenBao server.
870
+
871
+
This approach provides better security, automatic token renewal, and
872
+
simplified application code.
873
+
874
+
### Installation
875
+
876
+
Install OpenBao from Nixpkgs:
877
+
878
+
```bash
879
+
nix shell nixpkgs#openbao # for a local server
880
+
```
881
+
882
+
### Setup
883
+
884
+
The setup process can is documented for both local development and production.
885
+
886
+
#### Local development
887
+
888
+
Start OpenBao in dev mode:
889
+
890
+
```bash
891
+
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
892
+
```
893
+
894
+
This starts OpenBao on `http://localhost:8201` with a root token.
895
+
896
+
Set up environment for bao CLI:
897
+
898
+
```bash
899
+
export BAO_ADDR=http://localhost:8200
900
+
export BAO_TOKEN=root
901
+
```
902
+
903
+
#### Production
904
+
905
+
You would typically use a systemd service with a
906
+
configuration file. Refer to
907
+
[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
908
+
for how this can be achieved using Nix.
909
+
910
+
Then, initialize the bao server:
911
+
912
+
```bash
913
+
bao operator init -key-shares=1 -key-threshold=1
914
+
```
915
+
916
+
This will print out an unseal key and a root key. Save them
917
+
somewhere (like a password manager). Then unseal the vault
918
+
to begin setting it up:
919
+
920
+
```bash
921
+
bao operator unseal <unseal_key>
922
+
```
923
+
924
+
All steps below remain the same across both dev and
925
+
production setups.
926
+
927
+
#### Configure openbao server
928
+
929
+
Create the spindle KV mount:
930
+
931
+
```bash
932
+
bao secrets enable -path=spindle -version=2 kv
933
+
```
934
+
935
+
Set up AppRole authentication and policy:
936
+
937
+
Create a policy file `spindle-policy.hcl`:
938
+
939
+
```hcl
940
+
# Full access to spindle KV v2 data
941
+
path "spindle/data/*" {
942
+
capabilities = ["create", "read", "update", "delete"]
943
+
}
944
+
945
+
# Access to metadata for listing and management
946
+
path "spindle/metadata/*" {
947
+
capabilities = ["list", "read", "delete", "update"]
948
+
}
949
+
950
+
# Allow listing at root level
951
+
path "spindle/" {
952
+
capabilities = ["list"]
953
+
}
954
+
955
+
# Required for connection testing and health checks
956
+
path "auth/token/lookup-self" {
957
+
capabilities = ["read"]
958
+
}
959
+
```
960
+
961
+
Apply the policy and create an AppRole:
962
+
963
+
```bash
964
+
bao policy write spindle-policy spindle-policy.hcl
965
+
bao auth enable approle
966
+
bao write auth/approle/role/spindle \
967
+
token_policies="spindle-policy" \
968
+
token_ttl=1h \
969
+
token_max_ttl=4h \
970
+
bind_secret_id=true \
971
+
secret_id_ttl=0 \
972
+
secret_id_num_uses=0
973
+
```
974
+
975
+
Get the credentials:
976
+
977
+
```bash
978
+
# Get role ID (static)
979
+
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
980
+
981
+
# Generate secret ID
982
+
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
983
+
984
+
echo "Role ID: $ROLE_ID"
985
+
echo "Secret ID: $SECRET_ID"
986
+
```
987
+
988
+
#### Create proxy configuration
989
+
990
+
Create the credential files:
991
+
992
+
```bash
993
+
# Create directory for OpenBao files
994
+
mkdir -p /tmp/openbao
995
+
996
+
# Save credentials
997
+
echo "$ROLE_ID" > /tmp/openbao/role-id
998
+
echo "$SECRET_ID" > /tmp/openbao/secret-id
999
+
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
1000
+
```
1001
+
1002
+
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
1003
+
1004
+
```hcl
1005
+
# OpenBao server connection
1006
+
vault {
1007
+
address = "http://localhost:8200"
1008
+
}
1009
+
1010
+
# Auto-Auth using AppRole
1011
+
auto_auth {
1012
+
method "approle" {
1013
+
mount_path = "auth/approle"
1014
+
config = {
1015
+
role_id_file_path = "/tmp/openbao/role-id"
1016
+
secret_id_file_path = "/tmp/openbao/secret-id"
1017
+
}
1018
+
}
1019
+
1020
+
# Optional: write token to file for debugging
1021
+
sink "file" {
1022
+
config = {
1023
+
path = "/tmp/openbao/token"
1024
+
mode = 0640
1025
+
}
1026
+
}
1027
+
}
1028
+
1029
+
# Proxy listener for spindle
1030
+
listener "tcp" {
1031
+
address = "127.0.0.1:8201"
1032
+
tls_disable = true
1033
+
}
1034
+
1035
+
# Enable API proxy with auto-auth token
1036
+
api_proxy {
1037
+
use_auto_auth_token = true
1038
+
}
1039
+
1040
+
# Enable response caching
1041
+
cache {
1042
+
use_auto_auth_token = true
1043
+
}
1044
+
1045
+
# Logging
1046
+
log_level = "info"
1047
+
```
1048
+
1049
+
#### Start the proxy
1050
+
1051
+
Start OpenBao Proxy:
1052
+
1053
+
```bash
1054
+
bao proxy -config=/tmp/openbao/proxy.hcl
1055
+
```
1056
+
1057
+
The proxy will authenticate with OpenBao and start listening on
1058
+
`127.0.0.1:8201`.
1059
+
1060
+
#### Configure spindle
1061
+
1062
+
Set these environment variables for spindle:
1063
+
1064
+
```bash
1065
+
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
1066
+
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
1067
+
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
1068
+
```
1069
+
1070
+
On startup, spindle will now connect to the local proxy,
1071
+
which handles all authentication automatically.
1072
+
1073
+
### Production setup for proxy
1074
+
1075
+
For production, you'll want to run the proxy as a service:
1076
+
1077
+
Place your production configuration in
1078
+
`/etc/openbao/proxy.hcl` with proper TLS settings for the
1079
+
vault connection.
1080
+
1081
+
### Verifying setup
1082
+
1083
+
Test the proxy directly:
1084
+
1085
+
```bash
1086
+
# Check proxy health
1087
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
1088
+
1089
+
# Test token lookup through proxy
1090
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
1091
+
```
1092
+
1093
+
Test OpenBao operations through the server:
1094
+
1095
+
```bash
1096
+
# List all secrets
1097
+
bao kv list spindle/
1098
+
1099
+
# Add a test secret via the spindle API, then check it exists
1100
+
bao kv list spindle/repos/
1101
+
1102
+
# Get a specific secret
1103
+
bao kv get spindle/repos/your_repo_path/SECRET_NAME
1104
+
```
1105
+
1106
+
### How it works
1107
+
1108
+
- Spindle connects to OpenBao Proxy on localhost (typically
1109
+
port 8200 or 8201)
1110
+
- The proxy authenticates with OpenBao using AppRole
1111
+
credentials
1112
+
- All spindle requests go through the proxy, which injects
1113
+
authentication tokens
1114
+
- Secrets are stored at
1115
+
`spindle/repos/{sanitized_repo_path}/{secret_key}`
1116
+
- Repository paths like `did:plc:alice/myrepo` become
1117
+
`did_plc_alice_myrepo`
1118
+
- The proxy handles all token renewal automatically
1119
+
- Spindle no longer manages tokens or authentication
1120
+
directly
1121
+
1122
+
### Troubleshooting
1123
+
1124
+
**Connection refused**: Check that the OpenBao Proxy is
1125
+
running and listening on the configured address.
1126
+
1127
+
**403 errors**: Verify the AppRole credentials are correct
1128
+
and the policy has the necessary permissions.
1129
+
1130
+
**404 route errors**: The spindle KV mount probably doesn't
1131
+
existโrun the mount creation step again.
1132
+
1133
+
**Proxy authentication failures**: Check the proxy logs and
1134
+
verify the role-id and secret-id files are readable and
1135
+
contain valid credentials.
1136
+
1137
+
**Secret not found after writing**: This can indicate policy
1138
+
permission issues. Verify the policy includes both
1139
+
`spindle/data/*` and `spindle/metadata/*` paths with
1140
+
appropriate capabilities.
1141
+
1142
+
Check proxy logs:
1143
+
1144
+
```bash
1145
+
# If running as systemd service
1146
+
journalctl -u openbao-proxy -f
1147
+
1148
+
# If running directly, check the console output
1149
+
```
1150
+
1151
+
Test AppRole authentication manually:
1152
+
1153
+
```bash
1154
+
bao write auth/approle/login \
1155
+
role_id="$(cat /tmp/openbao/role-id)" \
1156
+
secret_id="$(cat /tmp/openbao/secret-id)"
1157
+
```
1158
+
1159
+
# Migrating knots and spindles
1160
+
1161
+
Sometimes, non-backwards compatible changes are made to the
1162
+
knot/spindle XRPC APIs. If you host a knot or a spindle, you
1163
+
will need to follow this guide to upgrade. Typically, this
1164
+
only requires you to deploy the newest version.
1165
+
1166
+
This document is laid out in reverse-chronological order.
1167
+
Newer migration guides are listed first, and older guides
1168
+
are further down the page.
1169
+
1170
+
## Upgrading from v1.8.x
1171
+
1172
+
After v1.8.2, the HTTP API for knots and spindles has been
1173
+
deprecated and replaced with XRPC. Repositories on outdated
1174
+
knots will not be viewable from the appview. Upgrading is
1175
+
straightforward however.
1176
+
1177
+
For knots:
1178
+
1179
+
- Upgrade to the latest tag (v1.9.0 or above)
1180
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1181
+
hit the "retry" button to verify your knot
1182
+
1183
+
For spindles:
1184
+
1185
+
- Upgrade to the latest tag (v1.9.0 or above)
1186
+
- Head to the [spindle
1187
+
dashboard](https://tangled.org/settings/spindles) and hit the
1188
+
"retry" button to verify your spindle
1189
+
1190
+
## Upgrading from v1.7.x
1191
+
1192
+
After v1.7.0, knot secrets have been deprecated. You no
1193
+
longer need a secret from the appview to run a knot. All
1194
+
authorized commands to knots are managed via [Inter-Service
1195
+
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
1196
+
Knots will be read-only until upgraded.
1197
+
1198
+
Upgrading is quite easy, in essence:
1199
+
1200
+
- `KNOT_SERVER_SECRET` is no more, you can remove this
1201
+
environment variable entirely
1202
+
- `KNOT_SERVER_OWNER` is now required on boot, set this to
1203
+
your DID. You can find your DID in the
1204
+
[settings](https://tangled.org/settings) page.
1205
+
- Restart your knot once you have replaced the environment
1206
+
variable
1207
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1208
+
hit the "retry" button to verify your knot. This simply
1209
+
writes a `sh.tangled.knot` record to your PDS.
1210
+
1211
+
If you use the nix module, simply bump the flake to the
1212
+
latest revision, and change your config block like so:
1213
+
1214
+
```diff
1215
+
services.tangled.knot = {
1216
+
enable = true;
1217
+
server = {
1218
+
- secretFile = /path/to/secret;
1219
+
+ owner = "did:plc:foo";
1220
+
};
1221
+
};
1222
+
```
1223
+
1224
+
# Hacking on Tangled
1225
+
1226
+
We highly recommend [installing
1227
+
Nix](https://nixos.org/download/) (the package manager)
1228
+
before working on the codebase. The Nix flake provides a lot
1229
+
of helpers to get started and most importantly, builds and
1230
+
dev shells are entirely deterministic.
1231
+
1232
+
To set up your dev environment:
1233
+
1234
+
```bash
1235
+
nix develop
1236
+
```
1237
+
1238
+
Non-Nix users can look at the `devShell` attribute in the
1239
+
`flake.nix` file to determine necessary dependencies.
1240
+
1241
+
## Running the appview
1242
+
1243
+
The Nix flake also exposes a few `app` attributes (run `nix
1244
+
flake show` to see a full list of what the flake provides),
1245
+
one of the apps runs the appview with the `air`
1246
+
live-reloader:
1247
+
1248
+
```bash
1249
+
TANGLED_DEV=true nix run .#watch-appview
1250
+
1251
+
# TANGLED_DB_PATH might be of interest to point to
1252
+
# different sqlite DBs
1253
+
1254
+
# in a separate shell, you can live-reload tailwind
1255
+
nix run .#watch-tailwind
1256
+
```
1257
+
1258
+
To authenticate with the appview, you will need Redis and
1259
+
OAuth JWKs to be set up:
1260
+
1261
+
```
1262
+
# OAuth JWKs should already be set up by the Nix devshell:
1263
+
echo $TANGLED_OAUTH_CLIENT_SECRET
1264
+
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
1265
+
1266
+
echo $TANGLED_OAUTH_CLIENT_KID
1267
+
1761667908
1268
+
1269
+
# if not, you can set it up yourself:
1270
+
goat key generate -t P-256
1271
+
Key Type: P-256 / secp256r1 / ES256 private key
1272
+
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
1273
+
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
1274
+
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
1275
+
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
1276
+
1277
+
# the secret key from above
1278
+
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
1279
+
1280
+
# Run Redis in a new shell to store OAuth sessions
1281
+
redis-server
1282
+
```
1283
+
1284
+
## Running knots and spindles
1285
+
1286
+
An end-to-end knot setup requires setting up a machine with
1287
+
`sshd`, `AuthorizedKeysCommand`, and a Git user, which is
1288
+
quite cumbersome. So the Nix flake provides a
1289
+
`nixosConfiguration` to do so.
1290
+
1291
+
<details>
1292
+
<summary><strong>macOS users will have to set up a Nix Builder first</strong></summary>
1293
+
1294
+
In order to build Tangled's dev VM on macOS, you will
1295
+
first need to set up a Linux Nix builder. The recommended
1296
+
way to do so is to run a [`darwin.linux-builder`
1297
+
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
1298
+
and to register it in `nix.conf` as a builder for Linux
1299
+
with the same architecture as your Mac (`linux-aarch64` if
1300
+
you are using Apple Silicon).
1301
+
1302
+
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
1303
+
> the Tangled repo so that it doesn't conflict with the other VM. For example,
1304
+
> you can do
1305
+
>
1306
+
> ```shell
1307
+
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
1308
+
> ```
1309
+
>
1310
+
> to store the builder VM in a temporary dir.
1311
+
>
1312
+
> You should read and follow [all the other intructions][darwin builder vm] to
1313
+
> avoid subtle problems.
1314
+
1315
+
Alternatively, you can use any other method to set up a
1316
+
Linux machine with Nix installed that you can `sudo ssh`
1317
+
into (in other words, root user on your Mac has to be able
1318
+
to ssh into the Linux machine without entering a password)
1319
+
and that has the same architecture as your Mac. See
1320
+
[remote builder
1321
+
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
1322
+
for how to register such a builder in `nix.conf`.
1323
+
1324
+
> WARNING: If you'd like to use
1325
+
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
1326
+
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
1327
+
> ssh` works can be tricky. It seems to be [possible with
1328
+
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
1329
+
1330
+
</details>
1331
+
1332
+
To begin, grab your DID from http://localhost:3000/settings.
1333
+
Then, set `TANGLED_VM_KNOT_OWNER` and
1334
+
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
1335
+
lightweight NixOS VM like so:
1336
+
1337
+
```bash
1338
+
nix run --impure .#vm
1339
+
1340
+
# type `poweroff` at the shell to exit the VM
1341
+
```
1342
+
1343
+
This starts a knot on port 6444, a spindle on port 6555
1344
+
with `ssh` exposed on port 2222.
1345
+
1346
+
Once the services are running, head to
1347
+
http://localhost:3000/settings/knots and hit "Verify". It should
1348
+
verify the ownership of the services instantly if everything
1349
+
went smoothly.
1350
+
1351
+
You can push repositories to this VM with this ssh config
1352
+
block on your main machine:
1353
+
1354
+
```bash
1355
+
Host nixos-shell
1356
+
Hostname localhost
1357
+
Port 2222
1358
+
User git
1359
+
IdentityFile ~/.ssh/my_tangled_key
1360
+
```
1361
+
1362
+
Set up a remote called `local-dev` on a git repo:
1363
+
1364
+
```bash
1365
+
git remote add local-dev git@nixos-shell:user/repo
1366
+
git push local-dev main
1367
+
```
1368
+
1369
+
The above VM should already be running a spindle on
1370
+
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
1371
+
hit "Verify". You can then configure each repository to use
1372
+
this spindle and run CI jobs.
1373
+
1374
+
Of interest when debugging spindles:
1375
+
1376
+
```
1377
+
# Service logs from journald:
1378
+
journalctl -xeu spindle
1379
+
1380
+
# CI job logs from disk:
1381
+
ls /var/log/spindle
1382
+
1383
+
# Debugging spindle database:
1384
+
sqlite3 /var/lib/spindle/spindle.db
1385
+
1386
+
# litecli has a nicer REPL interface:
1387
+
litecli /var/lib/spindle/spindle.db
1388
+
```
1389
+
1390
+
If for any reason you wish to disable either one of the
1391
+
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
1392
+
`services.tangled.spindle.enable` (or
1393
+
`services.tangled.knot.enable`) to `false`.
1394
+
1395
+
# Contribution guide
1396
+
1397
+
## Commit guidelines
1398
+
1399
+
We follow a commit style similar to the Go project. Please keep commits:
1400
+
1401
+
* **atomic**: each commit should represent one logical change
1402
+
* **descriptive**: the commit message should clearly describe what the
1403
+
change does and why it's needed
1404
+
1405
+
### Message format
1406
+
1407
+
```
1408
+
<service/top-level directory>/<affected package/directory>: <short summary of change>
1409
+
1410
+
Optional longer description can go here, if necessary. Explain what the
1411
+
change does and why, especially if not obvious. Reference relevant
1412
+
issues or PRs when applicable. These can be links for now since we don't
1413
+
auto-link issues/PRs yet.
1414
+
```
1415
+
1416
+
Here are some examples:
1417
+
1418
+
```
1419
+
appview/state: fix token expiry check in middleware
1420
+
1421
+
The previous check did not account for clock drift, leading to premature
1422
+
token invalidation.
1423
+
```
1424
+
1425
+
```
1426
+
knotserver/git/service: improve error checking in upload-pack
1427
+
```
1428
+
1429
+
1430
+
### General notes
1431
+
1432
+
- PRs get merged "as-is" (fast-forward)โlike applying a patch-series
1433
+
using `git am`. At present, there is no squashingโso please author
1434
+
your commits as they would appear on `master`, following the above
1435
+
guidelines.
1436
+
- If there is a lot of nesting, for example "appview:
1437
+
pages/templates/repo/fragments: ...", these can be truncated down to
1438
+
just "appview: repo/fragments: ...". If the change affects a lot of
1439
+
subdirectories, you may abbreviate to just the top-level names, e.g.
1440
+
"appview: ..." or "knotserver: ...".
1441
+
- Keep commits lowercased with no trailing period.
1442
+
- Use the imperative mood in the summary line (e.g., "fix bug" not
1443
+
"fixed bug" or "fixes bug").
1444
+
- Try to keep the summary line under 72 characters, but we aren't too
1445
+
fussed about this.
1446
+
- Follow the same formatting for PR titles if filled manually.
1447
+
- Don't include unrelated changes in the same commit.
1448
+
- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
1449
+
before submitting if necessary.
1450
+
1451
+
## Code formatting
1452
+
1453
+
We use a variety of tools to format our code, and multiplex them with
1454
+
[`treefmt`](https://treefmt.com). All you need to do to format your changes
1455
+
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
1456
+
1457
+
## Proposals for bigger changes
1458
+
1459
+
Small fixes like typos, minor bugs, or trivial refactors can be
1460
+
submitted directly as PRs.
1461
+
1462
+
For larger changesโespecially those introducing new features, significant
1463
+
refactoring, or altering system behaviorโplease open a proposal first. This
1464
+
helps us evaluate the scope, design, and potential impact before implementation.
1465
+
1466
+
Create a new issue titled:
1467
+
1468
+
```
1469
+
proposal: <affected scope>: <summary of change>
1470
+
```
1471
+
1472
+
In the description, explain:
1473
+
1474
+
- What the change is
1475
+
- Why it's needed
1476
+
- How you plan to implement it (roughly)
1477
+
- Any open questions or tradeoffs
1478
+
1479
+
We'll use the issue thread to discuss and refine the idea before moving
1480
+
forward.
1481
+
1482
+
## Developer Certificate of Origin (DCO)
1483
+
1484
+
We require all contributors to certify that they have the right to
1485
+
submit the code they're contributing. To do this, we follow the
1486
+
[Developer Certificate of Origin
1487
+
(DCO)](https://developercertificate.org/).
1488
+
1489
+
By signing your commits, you're stating that the contribution is your
1490
+
own work, or that you have the right to submit it under the project's
1491
+
license. This helps us keep things clean and legally sound.
1492
+
1493
+
To sign your commit, just add the `-s` flag when committing:
1494
+
1495
+
```sh
1496
+
git commit -s -m "your commit message"
1497
+
```
1498
+
1499
+
This appends a line like:
1500
+
1501
+
```
1502
+
Signed-off-by: Your Name <your.email@example.com>
1503
+
```
1504
+
1505
+
We won't merge commits if they aren't signed off. If you forget, you can
1506
+
amend the last commit like this:
1507
+
1508
+
```sh
1509
+
git commit --amend -s
1510
+
```
1511
+
1512
+
If you're submitting a PR with multiple commits, make sure each one is
1513
+
signed.
1514
+
1515
+
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
1516
+
to make it sign off commits in the tangled repo:
1517
+
1518
+
```shell
1519
+
# Safety check, should say "No matching config key..."
1520
+
jj config list templates.commit_trailers
1521
+
# The command below may need to be adjusted if the command above returned something.
1522
+
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
1523
+
```
1524
+
1525
+
Refer to the [jujutsu
1526
+
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
1527
+
for more information.
-136
docs/contributing.md
-136
docs/contributing.md
···
1
-
# tangled contributing guide
2
-
3
-
## commit guidelines
4
-
5
-
We follow a commit style similar to the Go project. Please keep commits:
6
-
7
-
* **atomic**: each commit should represent one logical change
8
-
* **descriptive**: the commit message should clearly describe what the
9
-
change does and why it's needed
10
-
11
-
### message format
12
-
13
-
```
14
-
<service/top-level directory>/<affected package/directory>: <short summary of change>
15
-
16
-
17
-
Optional longer description can go here, if necessary. Explain what the
18
-
change does and why, especially if not obvious. Reference relevant
19
-
issues or PRs when applicable. These can be links for now since we don't
20
-
auto-link issues/PRs yet.
21
-
```
22
-
23
-
Here are some examples:
24
-
25
-
```
26
-
appview/state: fix token expiry check in middleware
27
-
28
-
The previous check did not account for clock drift, leading to premature
29
-
token invalidation.
30
-
```
31
-
32
-
```
33
-
knotserver/git/service: improve error checking in upload-pack
34
-
```
35
-
36
-
37
-
### general notes
38
-
39
-
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
40
-
using `git am`. At present, there is no squashing -- so please author
41
-
your commits as they would appear on `master`, following the above
42
-
guidelines.
43
-
- If there is a lot of nesting, for example "appview:
44
-
pages/templates/repo/fragments: ...", these can be truncated down to
45
-
just "appview: repo/fragments: ...". If the change affects a lot of
46
-
subdirectories, you may abbreviate to just the top-level names, e.g.
47
-
"appview: ..." or "knotserver: ...".
48
-
- Keep commits lowercased with no trailing period.
49
-
- Use the imperative mood in the summary line (e.g., "fix bug" not
50
-
"fixed bug" or "fixes bug").
51
-
- Try to keep the summary line under 72 characters, but we aren't too
52
-
fussed about this.
53
-
- Follow the same formatting for PR titles if filled manually.
54
-
- Don't include unrelated changes in the same commit.
55
-
- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
56
-
before submitting if necessary.
57
-
58
-
## code formatting
59
-
60
-
We use a variety of tools to format our code, and multiplex them with
61
-
[`treefmt`](https://treefmt.com): all you need to do to format your changes
62
-
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
63
-
64
-
## proposals for bigger changes
65
-
66
-
Small fixes like typos, minor bugs, or trivial refactors can be
67
-
submitted directly as PRs.
68
-
69
-
For larger changesโespecially those introducing new features, significant
70
-
refactoring, or altering system behaviorโplease open a proposal first. This
71
-
helps us evaluate the scope, design, and potential impact before implementation.
72
-
73
-
### proposal format
74
-
75
-
Create a new issue titled:
76
-
77
-
```
78
-
proposal: <affected scope>: <summary of change>
79
-
```
80
-
81
-
In the description, explain:
82
-
83
-
- What the change is
84
-
- Why it's needed
85
-
- How you plan to implement it (roughly)
86
-
- Any open questions or tradeoffs
87
-
88
-
We'll use the issue thread to discuss and refine the idea before moving
89
-
forward.
90
-
91
-
## developer certificate of origin (DCO)
92
-
93
-
We require all contributors to certify that they have the right to
94
-
submit the code they're contributing. To do this, we follow the
95
-
[Developer Certificate of Origin
96
-
(DCO)](https://developercertificate.org/).
97
-
98
-
By signing your commits, you're stating that the contribution is your
99
-
own work, or that you have the right to submit it under the project's
100
-
license. This helps us keep things clean and legally sound.
101
-
102
-
To sign your commit, just add the `-s` flag when committing:
103
-
104
-
```sh
105
-
git commit -s -m "your commit message"
106
-
```
107
-
108
-
This appends a line like:
109
-
110
-
```
111
-
Signed-off-by: Your Name <your.email@example.com>
112
-
```
113
-
114
-
We won't merge commits if they aren't signed off. If you forget, you can
115
-
amend the last commit like this:
116
-
117
-
```sh
118
-
git commit --amend -s
119
-
```
120
-
121
-
If you're submitting a PR with multiple commits, make sure each one is
122
-
signed.
123
-
124
-
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125
-
to make it sign off commits in the tangled repo:
126
-
127
-
```shell
128
-
# Safety check, should say "No matching config key..."
129
-
jj config list templates.commit_trailers
130
-
# The command below may need to be adjusted if the command above returned something.
131
-
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
132
-
```
133
-
134
-
Refer to the [jj
135
-
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
136
-
for more information.
-172
docs/hacking.md
-172
docs/hacking.md
···
1
-
# hacking on tangled
2
-
3
-
We highly recommend [installing
4
-
nix](https://nixos.org/download/) (the package manager)
5
-
before working on the codebase. The nix flake provides a lot
6
-
of helpers to get started and most importantly, builds and
7
-
dev shells are entirely deterministic.
8
-
9
-
To set up your dev environment:
10
-
11
-
```bash
12
-
nix develop
13
-
```
14
-
15
-
Non-nix users can look at the `devShell` attribute in the
16
-
`flake.nix` file to determine necessary dependencies.
17
-
18
-
## running the appview
19
-
20
-
The nix flake also exposes a few `app` attributes (run `nix
21
-
flake show` to see a full list of what the flake provides),
22
-
one of the apps runs the appview with the `air`
23
-
live-reloader:
24
-
25
-
```bash
26
-
TANGLED_DEV=true nix run .#watch-appview
27
-
28
-
# TANGLED_DB_PATH might be of interest to point to
29
-
# different sqlite DBs
30
-
31
-
# in a separate shell, you can live-reload tailwind
32
-
nix run .#watch-tailwind
33
-
```
34
-
35
-
To authenticate with the appview, you will need redis and
36
-
OAUTH JWKs to be setup:
37
-
38
-
```
39
-
# oauth jwks should already be setup by the nix devshell:
40
-
echo $TANGLED_OAUTH_CLIENT_SECRET
41
-
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
42
-
43
-
echo $TANGLED_OAUTH_CLIENT_KID
44
-
1761667908
45
-
46
-
# if not, you can set it up yourself:
47
-
goat key generate -t P-256
48
-
Key Type: P-256 / secp256r1 / ES256 private key
49
-
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
50
-
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
51
-
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
52
-
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
53
-
54
-
# the secret key from above
55
-
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
56
-
57
-
# run redis in at a new shell to store oauth sessions
58
-
redis-server
59
-
```
60
-
61
-
## running knots and spindles
62
-
63
-
An end-to-end knot setup requires setting up a machine with
64
-
`sshd`, `AuthorizedKeysCommand`, and git user, which is
65
-
quite cumbersome. So the nix flake provides a
66
-
`nixosConfiguration` to do so.
67
-
68
-
<details>
69
-
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
70
-
71
-
In order to build Tangled's dev VM on macOS, you will
72
-
first need to set up a Linux Nix builder. The recommended
73
-
way to do so is to run a [`darwin.linux-builder`
74
-
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
75
-
and to register it in `nix.conf` as a builder for Linux
76
-
with the same architecture as your Mac (`linux-aarch64` if
77
-
you are using Apple Silicon).
78
-
79
-
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
80
-
> the tangled repo so that it doesn't conflict with the other VM. For example,
81
-
> you can do
82
-
>
83
-
> ```shell
84
-
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
85
-
> ```
86
-
>
87
-
> to store the builder VM in a temporary dir.
88
-
>
89
-
> You should read and follow [all the other intructions][darwin builder vm] to
90
-
> avoid subtle problems.
91
-
92
-
Alternatively, you can use any other method to set up a
93
-
Linux machine with `nix` installed that you can `sudo ssh`
94
-
into (in other words, root user on your Mac has to be able
95
-
to ssh into the Linux machine without entering a password)
96
-
and that has the same architecture as your Mac. See
97
-
[remote builder
98
-
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
99
-
for how to register such a builder in `nix.conf`.
100
-
101
-
> WARNING: If you'd like to use
102
-
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
103
-
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
104
-
> ssh` works can be tricky. It seems to be [possible with
105
-
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
106
-
107
-
</details>
108
-
109
-
To begin, grab your DID from http://localhost:3000/settings.
110
-
Then, set `TANGLED_VM_KNOT_OWNER` and
111
-
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
112
-
lightweight NixOS VM like so:
113
-
114
-
```bash
115
-
nix run --impure .#vm
116
-
117
-
# type `poweroff` at the shell to exit the VM
118
-
```
119
-
120
-
This starts a knot on port 6444, a spindle on port 6555
121
-
with `ssh` exposed on port 2222.
122
-
123
-
Once the services are running, head to
124
-
http://localhost:3000/settings/knots and hit verify. It should
125
-
verify the ownership of the services instantly if everything
126
-
went smoothly.
127
-
128
-
You can push repositories to this VM with this ssh config
129
-
block on your main machine:
130
-
131
-
```bash
132
-
Host nixos-shell
133
-
Hostname localhost
134
-
Port 2222
135
-
User git
136
-
IdentityFile ~/.ssh/my_tangled_key
137
-
```
138
-
139
-
Set up a remote called `local-dev` on a git repo:
140
-
141
-
```bash
142
-
git remote add local-dev git@nixos-shell:user/repo
143
-
git push local-dev main
144
-
```
145
-
146
-
### running a spindle
147
-
148
-
The above VM should already be running a spindle on
149
-
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
150
-
hit verify. You can then configure each repository to use
151
-
this spindle and run CI jobs.
152
-
153
-
Of interest when debugging spindles:
154
-
155
-
```
156
-
# service logs from journald:
157
-
journalctl -xeu spindle
158
-
159
-
# CI job logs from disk:
160
-
ls /var/log/spindle
161
-
162
-
# debugging spindle db:
163
-
sqlite3 /var/lib/spindle/spindle.db
164
-
165
-
# litecli has a nicer REPL interface:
166
-
litecli /var/lib/spindle/spindle.db
167
-
```
168
-
169
-
If for any reason you wish to disable either one of the
170
-
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171
-
`services.tangled.spindle.enable` (or
172
-
`services.tangled.knot.enable`) to `false`.
+93
docs/highlight.theme
+93
docs/highlight.theme
···
1
+
{
2
+
"text-color": null,
3
+
"background-color": null,
4
+
"line-number-color": null,
5
+
"line-number-background-color": null,
6
+
"text-styles": {
7
+
"Annotation": {
8
+
"text-color": null,
9
+
"background-color": null,
10
+
"bold": false,
11
+
"italic": true,
12
+
"underline": false
13
+
},
14
+
"ControlFlow": {
15
+
"text-color": null,
16
+
"background-color": null,
17
+
"bold": true,
18
+
"italic": false,
19
+
"underline": false
20
+
},
21
+
"Error": {
22
+
"text-color": null,
23
+
"background-color": null,
24
+
"bold": true,
25
+
"italic": false,
26
+
"underline": false
27
+
},
28
+
"Alert": {
29
+
"text-color": null,
30
+
"background-color": null,
31
+
"bold": true,
32
+
"italic": false,
33
+
"underline": false
34
+
},
35
+
"Preprocessor": {
36
+
"text-color": null,
37
+
"background-color": null,
38
+
"bold": true,
39
+
"italic": false,
40
+
"underline": false
41
+
},
42
+
"Information": {
43
+
"text-color": null,
44
+
"background-color": null,
45
+
"bold": false,
46
+
"italic": true,
47
+
"underline": false
48
+
},
49
+
"Warning": {
50
+
"text-color": null,
51
+
"background-color": null,
52
+
"bold": false,
53
+
"italic": true,
54
+
"underline": false
55
+
},
56
+
"Documentation": {
57
+
"text-color": null,
58
+
"background-color": null,
59
+
"bold": false,
60
+
"italic": true,
61
+
"underline": false
62
+
},
63
+
"DataType": {
64
+
"text-color": "#8f4e8b",
65
+
"background-color": null,
66
+
"bold": false,
67
+
"italic": false,
68
+
"underline": false
69
+
},
70
+
"Comment": {
71
+
"text-color": null,
72
+
"background-color": null,
73
+
"bold": false,
74
+
"italic": true,
75
+
"underline": false
76
+
},
77
+
"CommentVar": {
78
+
"text-color": null,
79
+
"background-color": null,
80
+
"bold": false,
81
+
"italic": true,
82
+
"underline": false
83
+
},
84
+
"Keyword": {
85
+
"text-color": null,
86
+
"background-color": null,
87
+
"bold": true,
88
+
"italic": false,
89
+
"underline": false
90
+
}
91
+
}
92
+
}
93
+
-214
docs/knot-hosting.md
-214
docs/knot-hosting.md
···
1
-
# knot self-hosting guide
2
-
3
-
So you want to run your own knot server? Great! Here are a few prerequisites:
4
-
5
-
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
6
-
2. A (sub)domain name. People generally use `knot.example.com`.
7
-
3. A valid SSL certificate for your domain.
8
-
9
-
There's a couple of ways to get started:
10
-
* NixOS: refer to
11
-
[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
12
-
* Docker: Documented at
13
-
[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
14
-
(community maintained: support is not guaranteed!)
15
-
* Manual: Documented below.
16
-
17
-
## manual setup
18
-
19
-
First, clone this repository:
20
-
21
-
```
22
-
git clone https://tangled.org/@tangled.org/core
23
-
```
24
-
25
-
Then, build the `knot` CLI. This is the knot administration and operation tool.
26
-
For the purpose of this guide, we're only concerned with these subcommands:
27
-
28
-
* `knot server`: the main knot server process, typically run as a
29
-
supervised service
30
-
* `knot guard`: handles role-based access control for git over SSH
31
-
(you'll never have to run this yourself)
32
-
* `knot keys`: fetches SSH keys associated with your knot; we'll use
33
-
this to generate the SSH `AuthorizedKeysCommand`
34
-
35
-
```
36
-
cd core
37
-
export CGO_ENABLED=1
38
-
go build -o knot ./cmd/knot
39
-
```
40
-
41
-
Next, move the `knot` binary to a location owned by `root` --
42
-
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
43
-
44
-
```
45
-
sudo mv knot /usr/local/bin/knot
46
-
sudo chown root:root /usr/local/bin/knot
47
-
```
48
-
49
-
This is necessary because SSH `AuthorizedKeysCommand` requires [really
50
-
specific permissions](https://stackoverflow.com/a/27638306). The
51
-
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
52
-
retrieve a user's public SSH keys dynamically for authentication. Let's
53
-
set that up.
54
-
55
-
```
56
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
57
-
Match User git
58
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
59
-
AuthorizedKeysCommandUser nobody
60
-
EOF
61
-
```
62
-
63
-
Then, reload `sshd`:
64
-
65
-
```
66
-
sudo systemctl reload ssh
67
-
```
68
-
69
-
Next, create the `git` user. We'll use the `git` user's home directory
70
-
to store repositories:
71
-
72
-
```
73
-
sudo adduser git
74
-
```
75
-
76
-
Create `/home/git/.knot.env` with the following, updating the values as
77
-
necessary. The `KNOT_SERVER_OWNER` should be set to your
78
-
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
79
-
80
-
```
81
-
KNOT_REPO_SCAN_PATH=/home/git
82
-
KNOT_SERVER_HOSTNAME=knot.example.com
83
-
APPVIEW_ENDPOINT=https://tangled.sh
84
-
KNOT_SERVER_OWNER=did:plc:foobar
85
-
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
86
-
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
87
-
```
88
-
89
-
If you run a Linux distribution that uses systemd, you can use the provided
90
-
service file to run the server. Copy
91
-
[`knotserver.service`](/systemd/knotserver.service)
92
-
to `/etc/systemd/system/`. Then, run:
93
-
94
-
```
95
-
systemctl enable knotserver
96
-
systemctl start knotserver
97
-
```
98
-
99
-
The last step is to configure a reverse proxy like Nginx or Caddy to front your
100
-
knot. Here's an example configuration for Nginx:
101
-
102
-
```
103
-
server {
104
-
listen 80;
105
-
listen [::]:80;
106
-
server_name knot.example.com;
107
-
108
-
location / {
109
-
proxy_pass http://localhost:5555;
110
-
proxy_set_header Host $host;
111
-
proxy_set_header X-Real-IP $remote_addr;
112
-
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
113
-
proxy_set_header X-Forwarded-Proto $scheme;
114
-
}
115
-
116
-
# wss endpoint for git events
117
-
location /events {
118
-
proxy_set_header X-Forwarded-For $remote_addr;
119
-
proxy_set_header Host $http_host;
120
-
proxy_set_header Upgrade websocket;
121
-
proxy_set_header Connection Upgrade;
122
-
proxy_pass http://localhost:5555;
123
-
}
124
-
# additional config for SSL/TLS go here.
125
-
}
126
-
127
-
```
128
-
129
-
Remember to use Let's Encrypt or similar to procure a certificate for your
130
-
knot domain.
131
-
132
-
You should now have a running knot server! You can finalize
133
-
your registration by hitting the `verify` button on the
134
-
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
135
-
a record on your PDS to announce the existence of the knot.
136
-
137
-
### custom paths
138
-
139
-
(This section applies to manual setup only. Docker users should edit the mounts
140
-
in `docker-compose.yml` instead.)
141
-
142
-
Right now, the database and repositories of your knot lives in `/home/git`. You
143
-
can move these paths if you'd like to store them in another folder. Be careful
144
-
when adjusting these paths:
145
-
146
-
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
147
-
any possible side effects. Remember to restart it once you're done.
148
-
* Make backups before moving in case something goes wrong.
149
-
* Make sure the `git` user can read and write from the new paths.
150
-
151
-
#### database
152
-
153
-
As an example, let's say the current database is at `/home/git/knotserver.db`,
154
-
and we want to move it to `/home/git/database/knotserver.db`.
155
-
156
-
Copy the current database to the new location. Make sure to copy the `.db-shm`
157
-
and `.db-wal` files if they exist.
158
-
159
-
```
160
-
mkdir /home/git/database
161
-
cp /home/git/knotserver.db* /home/git/database
162
-
```
163
-
164
-
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
165
-
the new file path (_not_ the directory):
166
-
167
-
```
168
-
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
169
-
```
170
-
171
-
#### repositories
172
-
173
-
As an example, let's say the repositories are currently in `/home/git`, and we
174
-
want to move them into `/home/git/repositories`.
175
-
176
-
Create the new folder, then move the existing repositories (if there are any):
177
-
178
-
```
179
-
mkdir /home/git/repositories
180
-
# move all DIDs into the new folder; these will vary for you!
181
-
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
182
-
```
183
-
184
-
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
185
-
to the new directory:
186
-
187
-
```
188
-
KNOT_REPO_SCAN_PATH=/home/git/repositories
189
-
```
190
-
191
-
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
192
-
repository path:
193
-
194
-
```
195
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
196
-
Match User git
197
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
198
-
AuthorizedKeysCommandUser nobody
199
-
EOF
200
-
```
201
-
202
-
Make sure to restart your SSH server!
203
-
204
-
#### MOTD (message of the day)
205
-
206
-
To configure the MOTD used ("Welcome to this knot!" by default), edit the
207
-
`/home/git/motd` file:
208
-
209
-
```
210
-
printf "Hi from this knot!\n" > /home/git/motd
211
-
```
212
-
213
-
Note that you should add a newline at the end if setting a non-empty message
214
-
since the knot won't do this for you.
-59
docs/migrations.md
-59
docs/migrations.md
···
1
-
# Migrations
2
-
3
-
This document is laid out in reverse-chronological order.
4
-
Newer migration guides are listed first, and older guides
5
-
are further down the page.
6
-
7
-
## Upgrading from v1.8.x
8
-
9
-
After v1.8.2, the HTTP API for knot and spindles have been
10
-
deprecated and replaced with XRPC. Repositories on outdated
11
-
knots will not be viewable from the appview. Upgrading is
12
-
straightforward however.
13
-
14
-
For knots:
15
-
16
-
- Upgrade to latest tag (v1.9.0 or above)
17
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
18
-
hit the "retry" button to verify your knot
19
-
20
-
For spindles:
21
-
22
-
- Upgrade to latest tag (v1.9.0 or above)
23
-
- Head to the [spindle
24
-
dashboard](https://tangled.org/settings/spindles) and hit the
25
-
"retry" button to verify your spindle
26
-
27
-
## Upgrading from v1.7.x
28
-
29
-
After v1.7.0, knot secrets have been deprecated. You no
30
-
longer need a secret from the appview to run a knot. All
31
-
authorized commands to knots are managed via [Inter-Service
32
-
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
33
-
Knots will be read-only until upgraded.
34
-
35
-
Upgrading is quite easy, in essence:
36
-
37
-
- `KNOT_SERVER_SECRET` is no more, you can remove this
38
-
environment variable entirely
39
-
- `KNOT_SERVER_OWNER` is now required on boot, set this to
40
-
your DID. You can find your DID in the
41
-
[settings](https://tangled.org/settings) page.
42
-
- Restart your knot once you have replaced the environment
43
-
variable
44
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
45
-
hit the "retry" button to verify your knot. This simply
46
-
writes a `sh.tangled.knot` record to your PDS.
47
-
48
-
If you use the nix module, simply bump the flake to the
49
-
latest revision, and change your config block like so:
50
-
51
-
```diff
52
-
services.tangled.knot = {
53
-
enable = true;
54
-
server = {
55
-
- secretFile = /path/to/secret;
56
-
+ owner = "did:plc:foo";
57
-
};
58
-
};
59
-
```
+3
docs/mode.html
+3
docs/mode.html
+7
docs/search.html
+7
docs/search.html
···
1
+
<form action="https://google.com/search" role="search" aria-label="Sitewide" class="w-full">
2
+
<input type="hidden" name="q" value="+[inurl:https://docs.tangled.org]">
3
+
<label>
4
+
<span style="display:none;">Search</span>
5
+
<input type="text" name="q" placeholder="Search docs ..." class="w-full font-normal">
6
+
</label>
7
+
</form>
-25
docs/spindle/architecture.md
-25
docs/spindle/architecture.md
···
1
-
# spindle architecture
2
-
3
-
Spindle is a small CI runner service. Here's a high level overview of how it operates:
4
-
5
-
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
6
-
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
7
-
* when a new repo record comes through (typically when you add a spindle to a
8
-
repo from the settings), spindle then resolves the underlying knot and
9
-
subscribes to repo events (see:
10
-
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
11
-
* the spindle engine then handles execution of the pipeline, with results and
12
-
logs beamed on the spindle event stream over wss
13
-
14
-
### the engine
15
-
16
-
At present, the only supported backend is Docker (and Podman, if Docker
17
-
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
18
-
executes each step in the pipeline in a fresh container, with state persisted
19
-
across steps within the `/tangled/workspace` directory.
20
-
21
-
The base image for the container is constructed on the fly using
22
-
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
23
-
used packages.
24
-
25
-
The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
-52
docs/spindle/hosting.md
···
1
-
# spindle self-hosting guide
2
-
3
-
## prerequisites
4
-
5
-
* Go
6
-
* Docker (the only supported backend currently)
7
-
8
-
## configuration
9
-
10
-
Spindle is configured using environment variables. The following environment variables are available:
11
-
12
-
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
13
-
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
14
-
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
15
-
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
16
-
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
17
-
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
18
-
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
19
-
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
20
-
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
21
-
22
-
## running spindle
23
-
24
-
1. **Set the environment variables.** For example:
25
-
26
-
```shell
27
-
export SPINDLE_SERVER_HOSTNAME="your-hostname"
28
-
export SPINDLE_SERVER_OWNER="your-did"
29
-
```
30
-
31
-
2. **Build the Spindle binary.**
32
-
33
-
```shell
34
-
cd core
35
-
go mod download
36
-
go build -o cmd/spindle/spindle cmd/spindle/main.go
37
-
```
38
-
39
-
3. **Create the log directory.**
40
-
41
-
```shell
42
-
sudo mkdir -p /var/log/spindle
43
-
sudo chown $USER:$USER -R /var/log/spindle
44
-
```
45
-
46
-
4. **Run the Spindle binary.**
47
-
48
-
```shell
49
-
./cmd/spindle/spindle
50
-
```
51
-
52
-
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
-285
docs/spindle/openbao.md
···
1
-
# spindle secrets with openbao
2
-
3
-
This document covers setting up Spindle to use OpenBao for secrets
4
-
management via OpenBao Proxy instead of the default SQLite backend.
5
-
6
-
## overview
7
-
8
-
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
9
-
authentication automatically using AppRole credentials, while Spindle
10
-
connects to the local proxy instead of directly to the OpenBao server.
11
-
12
-
This approach provides better security, automatic token renewal, and
13
-
simplified application code.
14
-
15
-
## installation
16
-
17
-
Install OpenBao from nixpkgs:
18
-
19
-
```bash
20
-
nix shell nixpkgs#openbao # for a local server
21
-
```
22
-
23
-
## setup
24
-
25
-
The setup process can is documented for both local development and production.
26
-
27
-
### local development
28
-
29
-
Start OpenBao in dev mode:
30
-
31
-
```bash
32
-
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
33
-
```
34
-
35
-
This starts OpenBao on `http://localhost:8201` with a root token.
36
-
37
-
Set up environment for bao CLI:
38
-
39
-
```bash
40
-
export BAO_ADDR=http://localhost:8200
41
-
export BAO_TOKEN=root
42
-
```
43
-
44
-
### production
45
-
46
-
You would typically use a systemd service with a configuration file. Refer to
47
-
[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
48
-
achieved using Nix.
49
-
50
-
Then, initialize the bao server:
51
-
```bash
52
-
bao operator init -key-shares=1 -key-threshold=1
53
-
```
54
-
55
-
This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
56
-
```bash
57
-
bao operator unseal <unseal_key>
58
-
```
59
-
60
-
All steps below remain the same across both dev and production setups.
61
-
62
-
### configure openbao server
63
-
64
-
Create the spindle KV mount:
65
-
66
-
```bash
67
-
bao secrets enable -path=spindle -version=2 kv
68
-
```
69
-
70
-
Set up AppRole authentication and policy:
71
-
72
-
Create a policy file `spindle-policy.hcl`:
73
-
74
-
```hcl
75
-
# Full access to spindle KV v2 data
76
-
path "spindle/data/*" {
77
-
capabilities = ["create", "read", "update", "delete"]
78
-
}
79
-
80
-
# Access to metadata for listing and management
81
-
path "spindle/metadata/*" {
82
-
capabilities = ["list", "read", "delete", "update"]
83
-
}
84
-
85
-
# Allow listing at root level
86
-
path "spindle/" {
87
-
capabilities = ["list"]
88
-
}
89
-
90
-
# Required for connection testing and health checks
91
-
path "auth/token/lookup-self" {
92
-
capabilities = ["read"]
93
-
}
94
-
```
95
-
96
-
Apply the policy and create an AppRole:
97
-
98
-
```bash
99
-
bao policy write spindle-policy spindle-policy.hcl
100
-
bao auth enable approle
101
-
bao write auth/approle/role/spindle \
102
-
token_policies="spindle-policy" \
103
-
token_ttl=1h \
104
-
token_max_ttl=4h \
105
-
bind_secret_id=true \
106
-
secret_id_ttl=0 \
107
-
secret_id_num_uses=0
108
-
```
109
-
110
-
Get the credentials:
111
-
112
-
```bash
113
-
# Get role ID (static)
114
-
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115
-
116
-
# Generate secret ID
117
-
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118
-
119
-
echo "Role ID: $ROLE_ID"
120
-
echo "Secret ID: $SECRET_ID"
121
-
```
122
-
123
-
### create proxy configuration
124
-
125
-
Create the credential files:
126
-
127
-
```bash
128
-
# Create directory for OpenBao files
129
-
mkdir -p /tmp/openbao
130
-
131
-
# Save credentials
132
-
echo "$ROLE_ID" > /tmp/openbao/role-id
133
-
echo "$SECRET_ID" > /tmp/openbao/secret-id
134
-
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135
-
```
136
-
137
-
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138
-
139
-
```hcl
140
-
# OpenBao server connection
141
-
vault {
142
-
address = "http://localhost:8200"
143
-
}
144
-
145
-
# Auto-Auth using AppRole
146
-
auto_auth {
147
-
method "approle" {
148
-
mount_path = "auth/approle"
149
-
config = {
150
-
role_id_file_path = "/tmp/openbao/role-id"
151
-
secret_id_file_path = "/tmp/openbao/secret-id"
152
-
}
153
-
}
154
-
155
-
# Optional: write token to file for debugging
156
-
sink "file" {
157
-
config = {
158
-
path = "/tmp/openbao/token"
159
-
mode = 0640
160
-
}
161
-
}
162
-
}
163
-
164
-
# Proxy listener for Spindle
165
-
listener "tcp" {
166
-
address = "127.0.0.1:8201"
167
-
tls_disable = true
168
-
}
169
-
170
-
# Enable API proxy with auto-auth token
171
-
api_proxy {
172
-
use_auto_auth_token = true
173
-
}
174
-
175
-
# Enable response caching
176
-
cache {
177
-
use_auto_auth_token = true
178
-
}
179
-
180
-
# Logging
181
-
log_level = "info"
182
-
```
183
-
184
-
### start the proxy
185
-
186
-
Start OpenBao Proxy:
187
-
188
-
```bash
189
-
bao proxy -config=/tmp/openbao/proxy.hcl
190
-
```
191
-
192
-
The proxy will authenticate with OpenBao and start listening on
193
-
`127.0.0.1:8201`.
194
-
195
-
### configure spindle
196
-
197
-
Set these environment variables for Spindle:
198
-
199
-
```bash
200
-
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201
-
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
202
-
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203
-
```
204
-
205
-
Start Spindle:
206
-
207
-
Spindle will now connect to the local proxy, which handles all
208
-
authentication automatically.
209
-
210
-
## production setup for proxy
211
-
212
-
For production, you'll want to run the proxy as a service:
213
-
214
-
Place your production configuration in `/etc/openbao/proxy.hcl` with
215
-
proper TLS settings for the vault connection.
216
-
217
-
## verifying setup
218
-
219
-
Test the proxy directly:
220
-
221
-
```bash
222
-
# Check proxy health
223
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224
-
225
-
# Test token lookup through proxy
226
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227
-
```
228
-
229
-
Test OpenBao operations through the server:
230
-
231
-
```bash
232
-
# List all secrets
233
-
bao kv list spindle/
234
-
235
-
# Add a test secret via Spindle API, then check it exists
236
-
bao kv list spindle/repos/
237
-
238
-
# Get a specific secret
239
-
bao kv get spindle/repos/your_repo_path/SECRET_NAME
240
-
```
241
-
242
-
## how it works
243
-
244
-
- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245
-
- The proxy authenticates with OpenBao using AppRole credentials
246
-
- All Spindle requests go through the proxy, which injects authentication tokens
247
-
- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248
-
- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249
-
- The proxy handles all token renewal automatically
250
-
- Spindle no longer manages tokens or authentication directly
251
-
252
-
## troubleshooting
253
-
254
-
**Connection refused**: Check that the OpenBao Proxy is running and
255
-
listening on the configured address.
256
-
257
-
**403 errors**: Verify the AppRole credentials are correct and the policy
258
-
has the necessary permissions.
259
-
260
-
**404 route errors**: The spindle KV mount probably doesn't exist - run
261
-
the mount creation step again.
262
-
263
-
**Proxy authentication failures**: Check the proxy logs and verify the
264
-
role-id and secret-id files are readable and contain valid credentials.
265
-
266
-
**Secret not found after writing**: This can indicate policy permission
267
-
issues. Verify the policy includes both `spindle/data/*` and
268
-
`spindle/metadata/*` paths with appropriate capabilities.
269
-
270
-
Check proxy logs:
271
-
272
-
```bash
273
-
# If running as systemd service
274
-
journalctl -u openbao-proxy -f
275
-
276
-
# If running directly, check the console output
277
-
```
278
-
279
-
Test AppRole authentication manually:
280
-
281
-
```bash
282
-
bao write auth/approle/login \
283
-
role_id="$(cat /tmp/openbao/role-id)" \
284
-
secret_id="$(cat /tmp/openbao/secret-id)"
285
-
```
-183
docs/spindle/pipeline.md
-183
docs/spindle/pipeline.md
···
1
-
# spindle pipelines
2
-
3
-
Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
4
-
5
-
The fields are:
6
-
7
-
- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
8
-
- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
9
-
- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
10
-
- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
11
-
- [Environment](#environment): An **optional** field that allows you to define environment variables.
12
-
- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
13
-
14
-
## Trigger
15
-
16
-
The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
17
-
18
-
- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
19
-
- `push`: The workflow should run every time a commit is pushed to the repository.
20
-
- `pull_request`: The workflow should run every time a pull request is made or updated.
21
-
- `manual`: The workflow can be triggered manually.
22
-
- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
23
-
- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
24
-
25
-
For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
26
-
27
-
```yaml
28
-
when:
29
-
- event: ["push", "manual"]
30
-
branch: ["main", "develop"]
31
-
- event: ["pull_request"]
32
-
branch: ["main"]
33
-
```
34
-
35
-
You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
36
-
37
-
```yaml
38
-
when:
39
-
- event: ["push"]
40
-
tag: ["v*"]
41
-
```
42
-
43
-
You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
44
-
45
-
```yaml
46
-
when:
47
-
- event: ["push"]
48
-
branch: ["main", "release-*"]
49
-
tag: ["v*", "stable"]
50
-
```
51
-
52
-
## Engine
53
-
54
-
Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
55
-
56
-
- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
57
-
58
-
Example:
59
-
60
-
```yaml
61
-
engine: "nixery"
62
-
```
63
-
64
-
## Clone options
65
-
66
-
When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
67
-
68
-
- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
69
-
- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
70
-
- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
71
-
72
-
The default settings are:
73
-
74
-
```yaml
75
-
clone:
76
-
skip: false
77
-
depth: 1
78
-
submodules: false
79
-
```
80
-
81
-
## Dependencies
82
-
83
-
Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
84
-
85
-
Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
86
-
87
-
```yaml
88
-
dependencies:
89
-
# nixpkgs
90
-
nixpkgs:
91
-
- nodejs
92
-
- go
93
-
# custom registry
94
-
git+https://tangled.org/@example.com/my_pkg:
95
-
- my_pkg
96
-
```
97
-
98
-
Now these dependencies are available to use in your workflow!
99
-
100
-
## Environment
101
-
102
-
The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103
-
104
-
Example:
105
-
106
-
```yaml
107
-
environment:
108
-
GOOS: "linux"
109
-
GOARCH: "arm64"
110
-
NODE_ENV: "production"
111
-
MY_ENV_VAR: "MY_ENV_VALUE"
112
-
```
113
-
114
-
## Steps
115
-
116
-
The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
117
-
118
-
- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
119
-
- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
120
-
- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
121
-
122
-
Example:
123
-
124
-
```yaml
125
-
steps:
126
-
- name: "Build backend"
127
-
command: "go build"
128
-
environment:
129
-
GOOS: "darwin"
130
-
GOARCH: "arm64"
131
-
- name: "Build frontend"
132
-
command: "npm run build"
133
-
environment:
134
-
NODE_ENV: "production"
135
-
```
136
-
137
-
## Complete workflow
138
-
139
-
```yaml
140
-
# .tangled/workflows/build.yml
141
-
142
-
when:
143
-
- event: ["push", "manual"]
144
-
branch: ["main", "develop"]
145
-
- event: ["pull_request"]
146
-
branch: ["main"]
147
-
148
-
engine: "nixery"
149
-
150
-
# using the default values
151
-
clone:
152
-
skip: false
153
-
depth: 1
154
-
submodules: false
155
-
156
-
dependencies:
157
-
# nixpkgs
158
-
nixpkgs:
159
-
- nodejs
160
-
- go
161
-
# custom registry
162
-
git+https://tangled.org/@example.com/my_pkg:
163
-
- my_pkg
164
-
165
-
environment:
166
-
GOOS: "linux"
167
-
GOARCH: "arm64"
168
-
NODE_ENV: "production"
169
-
MY_ENV_VAR: "MY_ENV_VALUE"
170
-
171
-
steps:
172
-
- name: "Build backend"
173
-
command: "go build"
174
-
environment:
175
-
GOOS: "darwin"
176
-
GOARCH: "arm64"
177
-
- name: "Build frontend"
178
-
command: "npm run build"
179
-
environment:
180
-
NODE_ENV: "production"
181
-
```
182
-
183
-
If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+101
docs/styles.css
+101
docs/styles.css
···
1
+
svg {
2
+
width: 16px;
3
+
height: 16px;
4
+
}
5
+
6
+
:root {
7
+
--syntax-alert: #d20f39;
8
+
--syntax-annotation: #fe640b;
9
+
--syntax-attribute: #df8e1d;
10
+
--syntax-basen: #40a02b;
11
+
--syntax-builtin: #1e66f5;
12
+
--syntax-controlflow: #8839ef;
13
+
--syntax-char: #04a5e5;
14
+
--syntax-constant: #fe640b;
15
+
--syntax-comment: #9ca0b0;
16
+
--syntax-commentvar: #7c7f93;
17
+
--syntax-documentation: #9ca0b0;
18
+
--syntax-datatype: #df8e1d;
19
+
--syntax-decval: #40a02b;
20
+
--syntax-error: #d20f39;
21
+
--syntax-extension: #4c4f69;
22
+
--syntax-float: #40a02b;
23
+
--syntax-function: #1e66f5;
24
+
--syntax-import: #40a02b;
25
+
--syntax-information: #04a5e5;
26
+
--syntax-keyword: #8839ef;
27
+
--syntax-operator: #179299;
28
+
--syntax-other: #8839ef;
29
+
--syntax-preprocessor: #ea76cb;
30
+
--syntax-specialchar: #04a5e5;
31
+
--syntax-specialstring: #ea76cb;
32
+
--syntax-string: #40a02b;
33
+
--syntax-variable: #8839ef;
34
+
--syntax-verbatimstring: #40a02b;
35
+
--syntax-warning: #df8e1d;
36
+
}
37
+
38
+
@media (prefers-color-scheme: dark) {
39
+
:root {
40
+
--syntax-alert: #f38ba8;
41
+
--syntax-annotation: #fab387;
42
+
--syntax-attribute: #f9e2af;
43
+
--syntax-basen: #a6e3a1;
44
+
--syntax-builtin: #89b4fa;
45
+
--syntax-controlflow: #cba6f7;
46
+
--syntax-char: #89dceb;
47
+
--syntax-constant: #fab387;
48
+
--syntax-comment: #6c7086;
49
+
--syntax-commentvar: #585b70;
50
+
--syntax-documentation: #6c7086;
51
+
--syntax-datatype: #f9e2af;
52
+
--syntax-decval: #a6e3a1;
53
+
--syntax-error: #f38ba8;
54
+
--syntax-extension: #cdd6f4;
55
+
--syntax-float: #a6e3a1;
56
+
--syntax-function: #89b4fa;
57
+
--syntax-import: #a6e3a1;
58
+
--syntax-information: #89dceb;
59
+
--syntax-keyword: #cba6f7;
60
+
--syntax-operator: #94e2d5;
61
+
--syntax-other: #cba6f7;
62
+
--syntax-preprocessor: #f5c2e7;
63
+
--syntax-specialchar: #89dceb;
64
+
--syntax-specialstring: #f5c2e7;
65
+
--syntax-string: #a6e3a1;
66
+
--syntax-variable: #cba6f7;
67
+
--syntax-verbatimstring: #a6e3a1;
68
+
--syntax-warning: #f9e2af;
69
+
}
70
+
}
71
+
72
+
/* pandoc syntax highlighting classes */
73
+
code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */
74
+
code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */
75
+
code span.at { color: var(--syntax-attribute); } /* attribute */
76
+
code span.bn { color: var(--syntax-basen); } /* basen */
77
+
code span.bu { color: var(--syntax-builtin); } /* builtin */
78
+
code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */
79
+
code span.ch { color: var(--syntax-char); } /* char */
80
+
code span.cn { color: var(--syntax-constant); } /* constant */
81
+
code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */
82
+
code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */
83
+
code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */
84
+
code span.dt { color: var(--syntax-datatype); } /* datatype */
85
+
code span.dv { color: var(--syntax-decval); } /* decval */
86
+
code span.er { color: var(--syntax-error); font-weight: bold; } /* error */
87
+
code span.ex { color: var(--syntax-extension); } /* extension */
88
+
code span.fl { color: var(--syntax-float); } /* float */
89
+
code span.fu { color: var(--syntax-function); } /* function */
90
+
code span.im { color: var(--syntax-import); font-weight: bold; } /* import */
91
+
code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */
92
+
code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */
93
+
code span.op { color: var(--syntax-operator); } /* operator */
94
+
code span.ot { color: var(--syntax-other); } /* other */
95
+
code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */
96
+
code span.sc { color: var(--syntax-specialchar); } /* specialchar */
97
+
code span.ss { color: var(--syntax-specialstring); } /* specialstring */
98
+
code span.st { color: var(--syntax-string); } /* string */
99
+
code span.va { color: var(--syntax-variable); } /* variable */
100
+
code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */
101
+
code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
+156
docs/template.html
+156
docs/template.html
···
1
+
<!DOCTYPE html>
2
+
<html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$>
3
+
<head>
4
+
<meta charset="utf-8" />
5
+
<meta name="generator" content="pandoc" />
6
+
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
7
+
$for(author-meta)$
8
+
<meta name="author" content="$author-meta$" />
9
+
$endfor$
10
+
11
+
$if(date-meta)$
12
+
<meta name="dcterms.date" content="$date-meta$" />
13
+
$endif$
14
+
15
+
$if(keywords)$
16
+
<meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" />
17
+
$endif$
18
+
19
+
$if(description-meta)$
20
+
<meta name="description" content="$description-meta$" />
21
+
$endif$
22
+
23
+
<title>$pagetitle$</title>
24
+
25
+
<style>
26
+
$styles.css()$
27
+
</style>
28
+
29
+
$for(css)$
30
+
<link rel="stylesheet" href="$css$" />
31
+
$endfor$
32
+
33
+
$for(header-includes)$
34
+
$header-includes$
35
+
$endfor$
36
+
37
+
<link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin />
38
+
39
+
</head>
40
+
<body class="bg-white dark:bg-gray-900 flex flex-col min-h-svh">
41
+
$for(include-before)$
42
+
$include-before$
43
+
$endfor$
44
+
45
+
$if(toc)$
46
+
<!-- mobile TOC trigger -->
47
+
<div class="md:hidden px-6 py-4 border-b border-gray-200 dark:border-gray-700">
48
+
<button
49
+
type="button"
50
+
popovertarget="mobile-toc-popover"
51
+
popovertargetaction="toggle"
52
+
class="w-full flex gap-2 items-center text-sm font-semibold dark:text-white"
53
+
>
54
+
${ menu.svg() }
55
+
$if(toc-title)$$toc-title$$else$Table of Contents$endif$
56
+
</button>
57
+
</div>
58
+
59
+
<div
60
+
id="mobile-toc-popover"
61
+
popover
62
+
class="mobile-toc-popover
63
+
bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700
64
+
h-full overflow-y-auto shadow-sm
65
+
px-6 py-4 fixed inset-x-0 top-0 w-fit max-w-4/5 m-0"
66
+
>
67
+
<div class="flex flex-col min-h-full">
68
+
<div class="flex-1 space-y-4">
69
+
<button
70
+
type="button"
71
+
popovertarget="mobile-toc-popover"
72
+
popovertargetaction="toggle"
73
+
class="w-full flex gap-2 items-center text-sm font-semibold dark:text-white mb-4">
74
+
${ x.svg() }
75
+
$if(toc-title)$$toc-title$$else$Table of Contents$endif$
76
+
</button>
77
+
${ search.html() }
78
+
${ table-of-contents:toc.html() }
79
+
</div>
80
+
${ single-page:mode.html() }
81
+
</div>
82
+
</div>
83
+
84
+
<!-- desktop sidebar toc -->
85
+
<nav
86
+
id="$idprefix$TOC"
87
+
role="doc-toc"
88
+
class="hidden md:flex md:flex-col gap-4 fixed left-0 top-0 w-80 h-screen
89
+
bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700
90
+
p-4 z-50 overflow-y-auto">
91
+
${ search.html() }
92
+
<div class="flex-1">
93
+
$if(toc-title)$
94
+
<h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2>
95
+
$endif$
96
+
${ table-of-contents:toc.html() }
97
+
</div>
98
+
${ single-page:mode.html() }
99
+
</nav>
100
+
$endif$
101
+
102
+
<div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col">
103
+
<main class="max-w-4xl w-full mx-auto p-6 flex-1">
104
+
$if(top)$
105
+
$-- only print title block if this is NOT the top page
106
+
$else$
107
+
$if(title)$
108
+
<header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700">
109
+
<h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1>
110
+
$if(subtitle)$
111
+
<p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p>
112
+
$endif$
113
+
$for(author)$
114
+
<p class="text-sm text-gray-500 dark:text-gray-400">$author$</p>
115
+
$endfor$
116
+
$if(date)$
117
+
<p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p>
118
+
$endif$
119
+
$endif$
120
+
</header>
121
+
$endif$
122
+
123
+
$if(abstract)$
124
+
<article class="prose dark:prose-invert max-w-none">
125
+
$abstract$
126
+
</article>
127
+
$endif$
128
+
129
+
<article class="prose dark:prose-invert max-w-none">
130
+
$body$
131
+
</article>
132
+
</main>
133
+
<nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800">
134
+
<div class="max-w-4xl mx-auto px-8 py-4">
135
+
<div class="flex justify-between gap-4">
136
+
<span class="flex-1">
137
+
$if(previous.url)$
138
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span>
139
+
<a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a>
140
+
$endif$
141
+
</span>
142
+
<span class="flex-1 text-right">
143
+
$if(next.url)$
144
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span>
145
+
<a href="$next.url$" accesskey="n" rel="next">$next.title$</a>
146
+
$endif$
147
+
</span>
148
+
</div>
149
+
</div>
150
+
</nav>
151
+
</div>
152
+
$for(include-after)$
153
+
$include-after$
154
+
$endfor$
155
+
</body>
156
+
</html>
+4
docs/toc.html
+4
docs/toc.html
+6
-3
flake.nix
+6
-3
flake.nix
···
76
76
};
77
77
buildGoApplication =
78
78
(self.callPackage "${gomod2nix}/builder" {
79
-
gomod2nix = gomod2nix.legacyPackages.${pkgs.system}.gomod2nix;
79
+
gomod2nix = gomod2nix.legacyPackages.${pkgs.stdenv.hostPlatform.system}.gomod2nix;
80
80
}).buildGoApplication;
81
81
modules = ./nix/gomod2nix.toml;
82
82
sqlite-lib = self.callPackage ./nix/pkgs/sqlite-lib.nix {
···
88
88
inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src;
89
89
};
90
90
appview = self.callPackage ./nix/pkgs/appview.nix {};
91
+
docs = self.callPackage ./nix/pkgs/docs.nix {
92
+
inherit inter-fonts-src ibm-plex-mono-src lucide-src;
93
+
};
91
94
spindle = self.callPackage ./nix/pkgs/spindle.nix {};
92
95
knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {};
93
96
knot = self.callPackage ./nix/pkgs/knot.nix {};
94
97
});
95
98
in {
96
99
overlays.default = final: prev: {
97
-
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview;
100
+
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs;
98
101
};
99
102
100
103
packages = forAllSystems (system: let
···
103
106
staticPackages = mkPackageSet pkgs.pkgsStatic;
104
107
crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic;
105
108
in {
106
-
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib;
109
+
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs;
107
110
108
111
pkgsStatic-appview = staticPackages.appview;
109
112
pkgsStatic-knot = staticPackages.knot;
+2
-1
input.css
+2
-1
input.css
···
162
162
}
163
163
164
164
.prose a.mention {
165
-
@apply no-underline hover:underline;
165
+
@apply no-underline hover:underline font-bold;
166
166
}
167
167
168
168
.prose li {
···
255
255
@apply py-1 text-gray-900 dark:text-gray-100;
256
256
}
257
257
}
258
+
258
259
}
259
260
260
261
/* Background */
+51
lexicons/comment/comment.json
+51
lexicons/comment/comment.json
···
1
+
{
2
+
"lexicon": 1,
3
+
"id": "sh.tangled.comment",
4
+
"needsCbor": true,
5
+
"needsType": true,
6
+
"defs": {
7
+
"main": {
8
+
"type": "record",
9
+
"key": "tid",
10
+
"record": {
11
+
"type": "object",
12
+
"required": [
13
+
"subject",
14
+
"body",
15
+
"createdAt"
16
+
],
17
+
"properties": {
18
+
"subject": {
19
+
"type": "string",
20
+
"format": "at-uri"
21
+
},
22
+
"body": {
23
+
"type": "string"
24
+
},
25
+
"createdAt": {
26
+
"type": "string",
27
+
"format": "datetime"
28
+
},
29
+
"replyTo": {
30
+
"type": "string",
31
+
"format": "at-uri"
32
+
},
33
+
"mentions": {
34
+
"type": "array",
35
+
"items": {
36
+
"type": "string",
37
+
"format": "did"
38
+
}
39
+
},
40
+
"references": {
41
+
"type": "array",
42
+
"items": {
43
+
"type": "string",
44
+
"format": "at-uri"
45
+
}
46
+
}
47
+
}
48
+
}
49
+
}
50
+
}
51
+
}
+10
-2
lexicons/pulls/pull.json
+10
-2
lexicons/pulls/pull.json
···
12
12
"required": [
13
13
"target",
14
14
"title",
15
-
"patch",
15
+
"patchBlob",
16
16
"createdAt"
17
17
],
18
18
"properties": {
···
27
27
"type": "string"
28
28
},
29
29
"patch": {
30
-
"type": "string"
30
+
"type": "string",
31
+
"description": "(deprecated) use patchBlob instead"
32
+
},
33
+
"patchBlob": {
34
+
"type": "blob",
35
+
"accept": [
36
+
"text/x-patch"
37
+
],
38
+
"description": "patch content"
31
39
},
32
40
"source": {
33
41
"type": "ref",
+53
nix/pkgs/docs.nix
+53
nix/pkgs/docs.nix
···
1
+
{
2
+
pandoc,
3
+
tailwindcss,
4
+
runCommandLocal,
5
+
inter-fonts-src,
6
+
ibm-plex-mono-src,
7
+
lucide-src,
8
+
src,
9
+
}:
10
+
runCommandLocal "docs" {} ''
11
+
mkdir -p working
12
+
13
+
# copy templates, themes, styles, filters to working directory
14
+
cp ${src}/docs/*.html working/
15
+
cp ${src}/docs/*.theme working/
16
+
cp ${src}/docs/*.css working/
17
+
18
+
# icons
19
+
cp -rf ${lucide-src}/*.svg working/
20
+
21
+
# content - chunked
22
+
${pandoc}/bin/pandoc ${src}/docs/DOCS.md \
23
+
-o $out/ \
24
+
-t chunkedhtml \
25
+
--variable toc \
26
+
--variable-json single-page=false \
27
+
--toc-depth=2 \
28
+
--css=stylesheet.css \
29
+
--chunk-template="%i.html" \
30
+
--highlight-style=working/highlight.theme \
31
+
--template=working/template.html
32
+
33
+
# content - single page
34
+
${pandoc}/bin/pandoc ${src}/docs/DOCS.md \
35
+
-o $out/single-page.html \
36
+
--toc \
37
+
--variable toc \
38
+
--variable single-page \
39
+
--toc-depth=2 \
40
+
--css=stylesheet.css \
41
+
--highlight-style=working/highlight.theme \
42
+
--template=working/template.html
43
+
44
+
# fonts
45
+
mkdir -p $out/static/fonts
46
+
cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/
47
+
cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/
48
+
cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/
49
+
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/
50
+
51
+
# styles
52
+
cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css
53
+
''
+1
-1
nix/vm.nix
+1
-1
nix/vm.nix
···
8
8
var = builtins.getEnv name;
9
9
in
10
10
if var == ""
11
-
then throw "\$${name} must be defined, see docs/hacking.md for more details"
11
+
then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details"
12
12
else var;
13
13
envVarOr = name: default: let
14
14
var = builtins.getEnv name;
+3
-3
readme.md
+3
-3
readme.md
···
10
10
11
11
## docs
12
12
13
-
* [knot hosting guide](/docs/knot-hosting.md)
14
-
* [contributing guide](/docs/contributing.md) **please read before opening a PR!**
15
-
* [hacking on tangled](/docs/hacking.md)
13
+
- [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide)
14
+
- [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!**
15
+
- [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled)
16
16
17
17
## security
18
18
+1
-1
spindle/motd
+1
-1
spindle/motd
+31
-13
spindle/server.go
+31
-13
spindle/server.go
···
8
8
"log/slog"
9
9
"maps"
10
10
"net/http"
11
+
"sync"
11
12
12
13
"github.com/go-chi/chi/v5"
13
14
"tangled.org/core/api/tangled"
···
30
31
)
31
32
32
33
//go:embed motd
33
-
var motd []byte
34
+
var defaultMotd []byte
34
35
35
36
const (
36
37
rbacDomain = "thisserver"
37
38
)
38
39
39
40
type Spindle struct {
40
-
jc *jetstream.JetstreamClient
41
-
db *db.DB
42
-
e *rbac.Enforcer
43
-
l *slog.Logger
44
-
n *notifier.Notifier
45
-
engs map[string]models.Engine
46
-
jq *queue.Queue
47
-
cfg *config.Config
48
-
ks *eventconsumer.Consumer
49
-
res *idresolver.Resolver
50
-
vault secrets.Manager
41
+
jc *jetstream.JetstreamClient
42
+
db *db.DB
43
+
e *rbac.Enforcer
44
+
l *slog.Logger
45
+
n *notifier.Notifier
46
+
engs map[string]models.Engine
47
+
jq *queue.Queue
48
+
cfg *config.Config
49
+
ks *eventconsumer.Consumer
50
+
res *idresolver.Resolver
51
+
vault secrets.Manager
52
+
motd []byte
53
+
motdMu sync.RWMutex
51
54
}
52
55
53
56
// New creates a new Spindle server with the provided configuration and engines.
···
128
131
cfg: cfg,
129
132
res: resolver,
130
133
vault: vault,
134
+
motd: defaultMotd,
131
135
}
132
136
133
137
err = e.AddSpindle(rbacDomain)
···
201
205
return s.e
202
206
}
203
207
208
+
// SetMotdContent sets custom MOTD content, replacing the embedded default.
209
+
func (s *Spindle) SetMotdContent(content []byte) {
210
+
s.motdMu.Lock()
211
+
defer s.motdMu.Unlock()
212
+
s.motd = content
213
+
}
214
+
215
+
// GetMotdContent returns the current MOTD content.
216
+
func (s *Spindle) GetMotdContent() []byte {
217
+
s.motdMu.RLock()
218
+
defer s.motdMu.RUnlock()
219
+
return s.motd
220
+
}
221
+
204
222
// Start starts the Spindle server (blocking).
205
223
func (s *Spindle) Start(ctx context.Context) error {
206
224
// starts a job queue runner in the background
···
246
264
mux := chi.NewRouter()
247
265
248
266
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
249
-
w.Write(motd)
267
+
w.Write(s.GetMotdContent())
250
268
})
251
269
mux.HandleFunc("/events", s.Events)
252
270
mux.HandleFunc("/logs/{knot}/{rkey}/{name}", s.Logs)
+1
-1
tailwind.config.js
+1
-1
tailwind.config.js
···
2
2
const colors = require("tailwindcss/colors");
3
3
4
4
module.exports = {
5
-
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"],
5
+
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"],
6
6
darkMode: "media",
7
7
theme: {
8
8
container: {
+3
types/diff.go
+3
types/diff.go
+112
types/diff_test.go
+112
types/diff_test.go
···
1
+
package types
2
+
3
+
import "testing"
4
+
5
+
func TestDiffId(t *testing.T) {
6
+
tests := []struct {
7
+
name string
8
+
diff Diff
9
+
expected string
10
+
}{
11
+
{
12
+
name: "regular file uses new name",
13
+
diff: Diff{
14
+
Name: struct {
15
+
Old string `json:"old"`
16
+
New string `json:"new"`
17
+
}{Old: "", New: "src/main.go"},
18
+
},
19
+
expected: "src/main.go",
20
+
},
21
+
{
22
+
name: "new file uses new name",
23
+
diff: Diff{
24
+
Name: struct {
25
+
Old string `json:"old"`
26
+
New string `json:"new"`
27
+
}{Old: "", New: "src/new.go"},
28
+
IsNew: true,
29
+
},
30
+
expected: "src/new.go",
31
+
},
32
+
{
33
+
name: "deleted file uses old name",
34
+
diff: Diff{
35
+
Name: struct {
36
+
Old string `json:"old"`
37
+
New string `json:"new"`
38
+
}{Old: "src/deleted.go", New: ""},
39
+
IsDelete: true,
40
+
},
41
+
expected: "src/deleted.go",
42
+
},
43
+
{
44
+
name: "renamed file uses new name",
45
+
diff: Diff{
46
+
Name: struct {
47
+
Old string `json:"old"`
48
+
New string `json:"new"`
49
+
}{Old: "src/old.go", New: "src/renamed.go"},
50
+
IsRename: true,
51
+
},
52
+
expected: "src/renamed.go",
53
+
},
54
+
}
55
+
56
+
for _, tt := range tests {
57
+
t.Run(tt.name, func(t *testing.T) {
58
+
if got := tt.diff.Id(); got != tt.expected {
59
+
t.Errorf("Diff.Id() = %q, want %q", got, tt.expected)
60
+
}
61
+
})
62
+
}
63
+
}
64
+
65
+
func TestChangedFilesMatchesDiffId(t *testing.T) {
66
+
// ChangedFiles() must return values matching each Diff's Id()
67
+
// so that sidebar links point to the correct anchors.
68
+
// Tests existing, deleted, new, and renamed files.
69
+
nd := NiceDiff{
70
+
Diff: []Diff{
71
+
{
72
+
Name: struct {
73
+
Old string `json:"old"`
74
+
New string `json:"new"`
75
+
}{Old: "", New: "src/modified.go"},
76
+
},
77
+
{
78
+
Name: struct {
79
+
Old string `json:"old"`
80
+
New string `json:"new"`
81
+
}{Old: "src/deleted.go", New: ""},
82
+
IsDelete: true,
83
+
},
84
+
{
85
+
Name: struct {
86
+
Old string `json:"old"`
87
+
New string `json:"new"`
88
+
}{Old: "", New: "src/new.go"},
89
+
IsNew: true,
90
+
},
91
+
{
92
+
Name: struct {
93
+
Old string `json:"old"`
94
+
New string `json:"new"`
95
+
}{Old: "src/old.go", New: "src/renamed.go"},
96
+
IsRename: true,
97
+
},
98
+
},
99
+
}
100
+
101
+
changedFiles := nd.ChangedFiles()
102
+
103
+
if len(changedFiles) != len(nd.Diff) {
104
+
t.Fatalf("ChangedFiles() returned %d items, want %d", len(changedFiles), len(nd.Diff))
105
+
}
106
+
107
+
for i, diff := range nd.Diff {
108
+
if changedFiles[i] != diff.Id() {
109
+
t.Errorf("ChangedFiles()[%d] = %q, but Diff.Id() = %q", i, changedFiles[i], diff.Id())
110
+
}
111
+
}
112
+
}