+79
-20
api/tangled/cbor_gen.go
+79
-20
api/tangled/cbor_gen.go
···
7934
7934
}
7935
7935
7936
7936
cw := cbg.NewCborWriter(w)
7937
-
fieldCount := 9
7937
+
fieldCount := 10
7938
7938
7939
7939
if t.Body == nil {
7940
7940
fieldCount--
7941
7941
}
7942
7942
7943
7943
if t.Mentions == nil {
7944
+
fieldCount--
7945
+
}
7946
+
7947
+
if t.Patch == nil {
7944
7948
fieldCount--
7945
7949
}
7946
7950
···
8008
8012
}
8009
8013
8010
8014
// t.Patch (string) (string)
8011
-
if len("patch") > 1000000 {
8012
-
return xerrors.Errorf("Value in field \"patch\" was too long")
8013
-
}
8015
+
if t.Patch != nil {
8014
8016
8015
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil {
8016
-
return err
8017
-
}
8018
-
if _, err := cw.WriteString(string("patch")); err != nil {
8019
-
return err
8020
-
}
8017
+
if len("patch") > 1000000 {
8018
+
return xerrors.Errorf("Value in field \"patch\" was too long")
8019
+
}
8021
8020
8022
-
if len(t.Patch) > 1000000 {
8023
-
return xerrors.Errorf("Value in field t.Patch was too long")
8024
-
}
8021
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil {
8022
+
return err
8023
+
}
8024
+
if _, err := cw.WriteString(string("patch")); err != nil {
8025
+
return err
8026
+
}
8027
+
8028
+
if t.Patch == nil {
8029
+
if _, err := cw.Write(cbg.CborNull); err != nil {
8030
+
return err
8031
+
}
8032
+
} else {
8033
+
if len(*t.Patch) > 1000000 {
8034
+
return xerrors.Errorf("Value in field t.Patch was too long")
8035
+
}
8025
8036
8026
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Patch))); err != nil {
8027
-
return err
8028
-
}
8029
-
if _, err := cw.WriteString(string(t.Patch)); err != nil {
8030
-
return err
8037
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Patch))); err != nil {
8038
+
return err
8039
+
}
8040
+
if _, err := cw.WriteString(string(*t.Patch)); err != nil {
8041
+
return err
8042
+
}
8043
+
}
8031
8044
}
8032
8045
8033
8046
// t.Title (string) (string)
···
8147
8160
return err
8148
8161
}
8149
8162
8163
+
// t.PatchBlob (util.LexBlob) (struct)
8164
+
if len("patchBlob") > 1000000 {
8165
+
return xerrors.Errorf("Value in field \"patchBlob\" was too long")
8166
+
}
8167
+
8168
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patchBlob"))); err != nil {
8169
+
return err
8170
+
}
8171
+
if _, err := cw.WriteString(string("patchBlob")); err != nil {
8172
+
return err
8173
+
}
8174
+
8175
+
if err := t.PatchBlob.MarshalCBOR(cw); err != nil {
8176
+
return err
8177
+
}
8178
+
8150
8179
// t.References ([]string) (slice)
8151
8180
if t.References != nil {
8152
8181
···
8262
8291
case "patch":
8263
8292
8264
8293
{
8265
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
8294
+
b, err := cr.ReadByte()
8266
8295
if err != nil {
8267
8296
return err
8268
8297
}
8298
+
if b != cbg.CborNull[0] {
8299
+
if err := cr.UnreadByte(); err != nil {
8300
+
return err
8301
+
}
8269
8302
8270
-
t.Patch = string(sval)
8303
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
8304
+
if err != nil {
8305
+
return err
8306
+
}
8307
+
8308
+
t.Patch = (*string)(&sval)
8309
+
}
8271
8310
}
8272
8311
// t.Title (string) (string)
8273
8312
case "title":
···
8370
8409
}
8371
8410
8372
8411
t.CreatedAt = string(sval)
8412
+
}
8413
+
// t.PatchBlob (util.LexBlob) (struct)
8414
+
case "patchBlob":
8415
+
8416
+
{
8417
+
8418
+
b, err := cr.ReadByte()
8419
+
if err != nil {
8420
+
return err
8421
+
}
8422
+
if b != cbg.CborNull[0] {
8423
+
if err := cr.UnreadByte(); err != nil {
8424
+
return err
8425
+
}
8426
+
t.PatchBlob = new(util.LexBlob)
8427
+
if err := t.PatchBlob.UnmarshalCBOR(cr); err != nil {
8428
+
return xerrors.Errorf("unmarshaling t.PatchBlob pointer: %w", err)
8429
+
}
8430
+
}
8431
+
8373
8432
}
8374
8433
// t.References ([]string) (slice)
8375
8434
case "references":
+12
-9
api/tangled/repopull.go
+12
-9
api/tangled/repopull.go
···
17
17
} //
18
18
// RECORDTYPE: RepoPull
19
19
type RepoPull struct {
20
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
21
-
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
-
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
24
-
Patch string `json:"patch" cborgen:"patch"`
25
-
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
26
-
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
27
-
Target *RepoPull_Target `json:"target" cborgen:"target"`
28
-
Title string `json:"title" cborgen:"title"`
20
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
21
+
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
24
+
// patch: (deprecated) use patchBlob instead
25
+
Patch *string `json:"patch,omitempty" cborgen:"patch,omitempty"`
26
+
// patchBlob: patch content
27
+
PatchBlob *util.LexBlob `json:"patchBlob" cborgen:"patchBlob"`
28
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
29
+
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
30
+
Target *RepoPull_Target `json:"target" cborgen:"target"`
31
+
Title string `json:"title" cborgen:"title"`
29
32
}
30
33
31
34
// RepoPull_Source is a "source" in the sh.tangled.repo.pull schema.
+6
-45
appview/commitverify/verify.go
+6
-45
appview/commitverify/verify.go
···
3
3
import (
4
4
"log"
5
5
6
-
"github.com/go-git/go-git/v5/plumbing/object"
7
6
"tangled.org/core/appview/db"
8
7
"tangled.org/core/appview/models"
9
8
"tangled.org/core/crypto"
···
35
34
return ""
36
35
}
37
36
38
-
func GetVerifiedObjectCommits(e db.Execer, emailToDid map[string]string, commits []*object.Commit) (VerifiedCommits, error) {
39
-
ndCommits := []types.NiceDiff{}
40
-
for _, commit := range commits {
41
-
ndCommits = append(ndCommits, ObjectCommitToNiceDiff(commit))
42
-
}
43
-
return GetVerifiedCommits(e, emailToDid, ndCommits)
44
-
}
45
-
46
-
func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.NiceDiff) (VerifiedCommits, error) {
37
+
func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.Commit) (VerifiedCommits, error) {
47
38
vcs := VerifiedCommits{}
48
39
49
40
didPubkeyCache := make(map[string][]models.PublicKey)
50
41
51
42
for _, commit := range ndCommits {
52
-
c := commit.Commit
53
-
54
-
committerEmail := c.Committer.Email
43
+
committerEmail := commit.Committer.Email
55
44
if did, exists := emailToDid[committerEmail]; exists {
56
45
// check if we've already fetched public keys for this did
57
46
pubKeys, ok := didPubkeyCache[did]
···
67
56
}
68
57
69
58
// try to verify with any associated pubkeys
59
+
payload := commit.Payload()
60
+
signature := commit.PGPSignature
70
61
for _, pk := range pubKeys {
71
-
if _, ok := crypto.VerifyCommitSignature(pk.Key, commit); ok {
62
+
if _, ok := crypto.VerifySignature([]byte(pk.Key), []byte(signature), []byte(payload)); ok {
72
63
73
64
fp, err := crypto.SSHFingerprint(pk.Key)
74
65
if err != nil {
75
66
log.Println("error computing ssh fingerprint:", err)
76
67
}
77
68
78
-
vc := verifiedCommit{fingerprint: fp, hash: c.This}
69
+
vc := verifiedCommit{fingerprint: fp, hash: commit.This}
79
70
vcs[vc] = struct{}{}
80
71
break
81
72
}
···
86
77
87
78
return vcs, nil
88
79
}
89
-
90
-
// ObjectCommitToNiceDiff is a compatibility function to convert a
91
-
// commit object into a NiceDiff structure.
92
-
func ObjectCommitToNiceDiff(c *object.Commit) types.NiceDiff {
93
-
var niceDiff types.NiceDiff
94
-
95
-
// set commit information
96
-
niceDiff.Commit.Message = c.Message
97
-
niceDiff.Commit.Author = c.Author
98
-
niceDiff.Commit.This = c.Hash.String()
99
-
niceDiff.Commit.Committer = c.Committer
100
-
niceDiff.Commit.Tree = c.TreeHash.String()
101
-
niceDiff.Commit.PGPSignature = c.PGPSignature
102
-
103
-
changeId, ok := c.ExtraHeaders["change-id"]
104
-
if ok {
105
-
niceDiff.Commit.ChangedId = string(changeId)
106
-
}
107
-
108
-
// set parent hash if available
109
-
if len(c.ParentHashes) > 0 {
110
-
niceDiff.Commit.Parent = c.ParentHashes[0].String()
111
-
}
112
-
113
-
// XXX: Stats and Diff fields are typically populated
114
-
// after fetching the actual diff information, which isn't
115
-
// directly available in the commit object itself.
116
-
117
-
return niceDiff
118
-
}
+3
-2
appview/db/artifact.go
+3
-2
appview/db/artifact.go
···
8
8
"github.com/go-git/go-git/v5/plumbing"
9
9
"github.com/ipfs/go-cid"
10
10
"tangled.org/core/appview/models"
11
+
"tangled.org/core/orm"
11
12
)
12
13
13
14
func AddArtifact(e Execer, artifact models.Artifact) error {
···
37
38
return err
38
39
}
39
40
40
-
func GetArtifact(e Execer, filters ...filter) ([]models.Artifact, error) {
41
+
func GetArtifact(e Execer, filters ...orm.Filter) ([]models.Artifact, error) {
41
42
var artifacts []models.Artifact
42
43
43
44
var conditions []string
···
109
110
return artifacts, nil
110
111
}
111
112
112
-
func DeleteArtifact(e Execer, filters ...filter) error {
113
+
func DeleteArtifact(e Execer, filters ...orm.Filter) error {
113
114
var conditions []string
114
115
var args []any
115
116
for _, filter := range filters {
+4
-3
appview/db/collaborators.go
+4
-3
appview/db/collaborators.go
···
6
6
"time"
7
7
8
8
"tangled.org/core/appview/models"
9
+
"tangled.org/core/orm"
9
10
)
10
11
11
12
func AddCollaborator(e Execer, c models.Collaborator) error {
···
16
17
return err
17
18
}
18
19
19
-
func DeleteCollaborator(e Execer, filters ...filter) error {
20
+
func DeleteCollaborator(e Execer, filters ...orm.Filter) error {
20
21
var conditions []string
21
22
var args []any
22
23
for _, filter := range filters {
···
58
59
return nil, nil
59
60
}
60
61
61
-
return GetRepos(e, 0, FilterIn("at_uri", repoAts))
62
+
return GetRepos(e, 0, orm.FilterIn("at_uri", repoAts))
62
63
}
63
64
64
-
func GetCollaborators(e Execer, filters ...filter) ([]models.Collaborator, error) {
65
+
func GetCollaborators(e Execer, filters ...orm.Filter) ([]models.Collaborator, error) {
65
66
var collaborators []models.Collaborator
66
67
var conditions []string
67
68
var args []any
+24
-137
appview/db/db.go
+24
-137
appview/db/db.go
···
3
3
import (
4
4
"context"
5
5
"database/sql"
6
-
"fmt"
7
6
"log/slog"
8
-
"reflect"
9
7
"strings"
10
8
11
9
_ "github.com/mattn/go-sqlite3"
12
10
"tangled.org/core/log"
11
+
"tangled.org/core/orm"
13
12
)
14
13
15
14
type DB struct {
···
584
583
}
585
584
586
585
// run migrations
587
-
runMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error {
586
+
orm.RunMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error {
588
587
tx.Exec(`
589
588
alter table repos add column description text check (length(description) <= 200);
590
589
`)
591
590
return nil
592
591
})
593
592
594
-
runMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
593
+
orm.RunMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
595
594
// add unconstrained column
596
595
_, err := tx.Exec(`
597
596
alter table public_keys
···
614
613
return nil
615
614
})
616
615
617
-
runMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error {
616
+
orm.RunMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error {
618
617
_, err := tx.Exec(`
619
618
alter table comments drop column comment_at;
620
619
alter table comments add column rkey text;
···
622
621
return err
623
622
})
624
623
625
-
runMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
624
+
orm.RunMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
626
625
_, err := tx.Exec(`
627
626
alter table comments add column deleted text; -- timestamp
628
627
alter table comments add column edited text; -- timestamp
···
630
629
return err
631
630
})
632
631
633
-
runMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
632
+
orm.RunMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
634
633
_, err := tx.Exec(`
635
634
alter table pulls add column source_branch text;
636
635
alter table pulls add column source_repo_at text;
···
639
638
return err
640
639
})
641
640
642
-
runMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error {
641
+
orm.RunMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error {
643
642
_, err := tx.Exec(`
644
643
alter table repos add column source text;
645
644
`)
···
651
650
//
652
651
// [0]: https://sqlite.org/pragma.html#pragma_foreign_keys
653
652
conn.ExecContext(ctx, "pragma foreign_keys = off;")
654
-
runMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
653
+
orm.RunMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
655
654
_, err := tx.Exec(`
656
655
create table pulls_new (
657
656
-- identifiers
···
708
707
})
709
708
conn.ExecContext(ctx, "pragma foreign_keys = on;")
710
709
711
-
runMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error {
710
+
orm.RunMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error {
712
711
tx.Exec(`
713
712
alter table repos add column spindle text;
714
713
`)
···
718
717
// drop all knot secrets, add unique constraint to knots
719
718
//
720
719
// knots will henceforth use service auth for signed requests
721
-
runMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error {
720
+
orm.RunMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error {
722
721
_, err := tx.Exec(`
723
722
create table registrations_new (
724
723
id integer primary key autoincrement,
···
741
740
})
742
741
743
742
// recreate and add rkey + created columns with default constraint
744
-
runMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error {
743
+
orm.RunMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error {
745
744
// create new table
746
745
// - repo_at instead of repo integer
747
746
// - rkey field
···
795
794
return err
796
795
})
797
796
798
-
runMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error {
797
+
orm.RunMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error {
799
798
_, err := tx.Exec(`
800
799
alter table issues add column rkey text not null default '';
801
800
···
807
806
})
808
807
809
808
// repurpose the read-only column to "needs-upgrade"
810
-
runMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error {
809
+
orm.RunMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error {
811
810
_, err := tx.Exec(`
812
811
alter table registrations rename column read_only to needs_upgrade;
813
812
`)
···
815
814
})
816
815
817
816
// require all knots to upgrade after the release of total xrpc
818
-
runMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error {
817
+
orm.RunMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error {
819
818
_, err := tx.Exec(`
820
819
update registrations set needs_upgrade = 1;
821
820
`)
···
823
822
})
824
823
825
824
// require all knots to upgrade after the release of total xrpc
826
-
runMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error {
825
+
orm.RunMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error {
827
826
_, err := tx.Exec(`
828
827
alter table spindles add column needs_upgrade integer not null default 0;
829
828
`)
···
841
840
//
842
841
// disable foreign-keys for the next migration
843
842
conn.ExecContext(ctx, "pragma foreign_keys = off;")
844
-
runMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error {
843
+
orm.RunMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error {
845
844
_, err := tx.Exec(`
846
845
create table if not exists issues_new (
847
846
-- identifiers
···
911
910
// - new columns
912
911
// * column "reply_to" which can be any other comment
913
912
// * column "at-uri" which is a generated column
914
-
runMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error {
913
+
orm.RunMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error {
915
914
_, err := tx.Exec(`
916
915
create table if not exists issue_comments (
917
916
-- identifiers
···
971
970
//
972
971
// disable foreign-keys for the next migration
973
972
conn.ExecContext(ctx, "pragma foreign_keys = off;")
974
-
runMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error {
973
+
orm.RunMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error {
975
974
_, err := tx.Exec(`
976
975
create table if not exists pulls_new (
977
976
-- identifiers
···
1052
1051
//
1053
1052
// disable foreign-keys for the next migration
1054
1053
conn.ExecContext(ctx, "pragma foreign_keys = off;")
1055
-
runMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error {
1054
+
orm.RunMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error {
1056
1055
_, err := tx.Exec(`
1057
1056
create table if not exists pull_submissions_new (
1058
1057
-- identifiers
···
1106
1105
1107
1106
// knots may report the combined patch for a comparison, we can store that on the appview side
1108
1107
// (but not on the pds record), because calculating the combined patch requires a git index
1109
-
runMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error {
1108
+
orm.RunMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error {
1110
1109
_, err := tx.Exec(`
1111
1110
alter table pull_submissions add column combined text;
1112
1111
`)
1113
1112
return err
1114
1113
})
1115
1114
1116
-
runMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error {
1115
+
orm.RunMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error {
1117
1116
_, err := tx.Exec(`
1118
1117
alter table profile add column pronouns text;
1119
1118
`)
1120
1119
return err
1121
1120
})
1122
1121
1123
-
runMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error {
1122
+
orm.RunMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error {
1124
1123
_, err := tx.Exec(`
1125
1124
alter table repos add column website text;
1126
1125
alter table repos add column topics text;
···
1128
1127
return err
1129
1128
})
1130
1129
1131
-
runMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error {
1130
+
orm.RunMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error {
1132
1131
_, err := tx.Exec(`
1133
1132
alter table notification_preferences add column user_mentioned integer not null default 1;
1134
1133
`)
···
1136
1135
})
1137
1136
1138
1137
// remove the foreign key constraints from stars.
1139
-
runMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error {
1138
+
orm.RunMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error {
1140
1139
_, err := tx.Exec(`
1141
1140
create table stars_new (
1142
1141
id integer primary key autoincrement,
···
1187
1186
}, nil
1188
1187
}
1189
1188
1190
-
type migrationFn = func(*sql.Tx) error
1191
-
1192
-
func runMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error {
1193
-
logger = logger.With("migration", name)
1194
-
1195
-
tx, err := c.BeginTx(context.Background(), nil)
1196
-
if err != nil {
1197
-
return err
1198
-
}
1199
-
defer tx.Rollback()
1200
-
1201
-
var exists bool
1202
-
err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists)
1203
-
if err != nil {
1204
-
return err
1205
-
}
1206
-
1207
-
if !exists {
1208
-
// run migration
1209
-
err = migrationFn(tx)
1210
-
if err != nil {
1211
-
logger.Error("failed to run migration", "err", err)
1212
-
return err
1213
-
}
1214
-
1215
-
// mark migration as complete
1216
-
_, err = tx.Exec("insert into migrations (name) values (?)", name)
1217
-
if err != nil {
1218
-
logger.Error("failed to mark migration as complete", "err", err)
1219
-
return err
1220
-
}
1221
-
1222
-
// commit the transaction
1223
-
if err := tx.Commit(); err != nil {
1224
-
return err
1225
-
}
1226
-
1227
-
logger.Info("migration applied successfully")
1228
-
} else {
1229
-
logger.Warn("skipped migration, already applied")
1230
-
}
1231
-
1232
-
return nil
1233
-
}
1234
-
1235
1189
func (d *DB) Close() error {
1236
1190
return d.DB.Close()
1237
1191
}
1238
-
1239
-
type filter struct {
1240
-
key string
1241
-
arg any
1242
-
cmp string
1243
-
}
1244
-
1245
-
func newFilter(key, cmp string, arg any) filter {
1246
-
return filter{
1247
-
key: key,
1248
-
arg: arg,
1249
-
cmp: cmp,
1250
-
}
1251
-
}
1252
-
1253
-
func FilterEq(key string, arg any) filter { return newFilter(key, "=", arg) }
1254
-
func FilterNotEq(key string, arg any) filter { return newFilter(key, "<>", arg) }
1255
-
func FilterGte(key string, arg any) filter { return newFilter(key, ">=", arg) }
1256
-
func FilterLte(key string, arg any) filter { return newFilter(key, "<=", arg) }
1257
-
func FilterIs(key string, arg any) filter { return newFilter(key, "is", arg) }
1258
-
func FilterIsNot(key string, arg any) filter { return newFilter(key, "is not", arg) }
1259
-
func FilterIn(key string, arg any) filter { return newFilter(key, "in", arg) }
1260
-
func FilterLike(key string, arg any) filter { return newFilter(key, "like", arg) }
1261
-
func FilterNotLike(key string, arg any) filter { return newFilter(key, "not like", arg) }
1262
-
func FilterContains(key string, arg any) filter {
1263
-
return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg))
1264
-
}
1265
-
1266
-
func (f filter) Condition() string {
1267
-
rv := reflect.ValueOf(f.arg)
1268
-
kind := rv.Kind()
1269
-
1270
-
// if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)`
1271
-
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
1272
-
if rv.Len() == 0 {
1273
-
// always false
1274
-
return "1 = 0"
1275
-
}
1276
-
1277
-
placeholders := make([]string, rv.Len())
1278
-
for i := range placeholders {
1279
-
placeholders[i] = "?"
1280
-
}
1281
-
1282
-
return fmt.Sprintf("%s %s (%s)", f.key, f.cmp, strings.Join(placeholders, ", "))
1283
-
}
1284
-
1285
-
return fmt.Sprintf("%s %s ?", f.key, f.cmp)
1286
-
}
1287
-
1288
-
func (f filter) Arg() []any {
1289
-
rv := reflect.ValueOf(f.arg)
1290
-
kind := rv.Kind()
1291
-
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
1292
-
if rv.Len() == 0 {
1293
-
return nil
1294
-
}
1295
-
1296
-
out := make([]any, rv.Len())
1297
-
for i := range rv.Len() {
1298
-
out[i] = rv.Index(i).Interface()
1299
-
}
1300
-
return out
1301
-
}
1302
-
1303
-
return []any{f.arg}
1304
-
}
+6
-3
appview/db/follow.go
+6
-3
appview/db/follow.go
···
7
7
"time"
8
8
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
13
func AddFollow(e Execer, follow *models.Follow) error {
···
134
135
return result, nil
135
136
}
136
137
137
-
func GetFollows(e Execer, limit int, filters ...filter) ([]models.Follow, error) {
138
+
func GetFollows(e Execer, limit int, filters ...orm.Filter) ([]models.Follow, error) {
138
139
var follows []models.Follow
139
140
140
141
var conditions []string
···
166
167
if err != nil {
167
168
return nil, err
168
169
}
170
+
defer rows.Close()
171
+
169
172
for rows.Next() {
170
173
var follow models.Follow
171
174
var followedAt string
···
191
194
}
192
195
193
196
func GetFollowers(e Execer, did string) ([]models.Follow, error) {
194
-
return GetFollows(e, 0, FilterEq("subject_did", did))
197
+
return GetFollows(e, 0, orm.FilterEq("subject_did", did))
195
198
}
196
199
197
200
func GetFollowing(e Execer, did string) ([]models.Follow, error) {
198
-
return GetFollows(e, 0, FilterEq("user_did", did))
201
+
return GetFollows(e, 0, orm.FilterEq("user_did", did))
199
202
}
200
203
201
204
func getFollowStatuses(e Execer, userDid string, subjectDids []string) (map[string]models.FollowStatus, error) {
+22
-20
appview/db/issues.go
+22
-20
appview/db/issues.go
···
13
13
"tangled.org/core/api/tangled"
14
14
"tangled.org/core/appview/models"
15
15
"tangled.org/core/appview/pagination"
16
+
"tangled.org/core/orm"
16
17
)
17
18
18
19
func PutIssue(tx *sql.Tx, issue *models.Issue) error {
···
27
28
28
29
issues, err := GetIssues(
29
30
tx,
30
-
FilterEq("did", issue.Did),
31
-
FilterEq("rkey", issue.Rkey),
31
+
orm.FilterEq("did", issue.Did),
32
+
orm.FilterEq("rkey", issue.Rkey),
32
33
)
33
34
switch {
34
35
case err != nil:
···
98
99
return nil
99
100
}
100
101
101
-
func GetIssuesPaginated(e Execer, page pagination.Page, filters ...filter) ([]models.Issue, error) {
102
+
func GetIssuesPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]models.Issue, error) {
102
103
issueMap := make(map[string]*models.Issue) // at-uri -> issue
103
104
104
105
var conditions []string
···
114
115
whereClause = " where " + strings.Join(conditions, " and ")
115
116
}
116
117
117
-
pLower := FilterGte("row_num", page.Offset+1)
118
-
pUpper := FilterLte("row_num", page.Offset+page.Limit)
118
+
pLower := orm.FilterGte("row_num", page.Offset+1)
119
+
pUpper := orm.FilterLte("row_num", page.Offset+page.Limit)
119
120
120
121
pageClause := ""
121
122
if page.Limit > 0 {
···
205
206
repoAts = append(repoAts, string(issue.RepoAt))
206
207
}
207
208
208
-
repos, err := GetRepos(e, 0, FilterIn("at_uri", repoAts))
209
+
repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoAts))
209
210
if err != nil {
210
211
return nil, fmt.Errorf("failed to build repo mappings: %w", err)
211
212
}
···
228
229
// collect comments
229
230
issueAts := slices.Collect(maps.Keys(issueMap))
230
231
231
-
comments, err := GetIssueComments(e, FilterIn("issue_at", issueAts))
232
+
comments, err := GetIssueComments(e, orm.FilterIn("issue_at", issueAts))
232
233
if err != nil {
233
234
return nil, fmt.Errorf("failed to query comments: %w", err)
234
235
}
···
240
241
}
241
242
242
243
// collect allLabels for each issue
243
-
allLabels, err := GetLabels(e, FilterIn("subject", issueAts))
244
+
allLabels, err := GetLabels(e, orm.FilterIn("subject", issueAts))
244
245
if err != nil {
245
246
return nil, fmt.Errorf("failed to query labels: %w", err)
246
247
}
···
251
252
}
252
253
253
254
// collect references for each issue
254
-
allReferencs, err := GetReferencesAll(e, FilterIn("from_at", issueAts))
255
+
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", issueAts))
255
256
if err != nil {
256
257
return nil, fmt.Errorf("failed to query reference_links: %w", err)
257
258
}
···
277
278
issues, err := GetIssuesPaginated(
278
279
e,
279
280
pagination.Page{},
280
-
FilterEq("repo_at", repoAt),
281
-
FilterEq("issue_id", issueId),
281
+
orm.FilterEq("repo_at", repoAt),
282
+
orm.FilterEq("issue_id", issueId),
282
283
)
283
284
if err != nil {
284
285
return nil, err
···
290
291
return &issues[0], nil
291
292
}
292
293
293
-
func GetIssues(e Execer, filters ...filter) ([]models.Issue, error) {
294
+
func GetIssues(e Execer, filters ...orm.Filter) ([]models.Issue, error) {
294
295
return GetIssuesPaginated(e, pagination.Page{}, filters...)
295
296
}
296
297
···
298
299
func GetIssueIDs(e Execer, opts models.IssueSearchOptions) ([]int64, error) {
299
300
var ids []int64
300
301
301
-
var filters []filter
302
+
var filters []orm.Filter
302
303
openValue := 0
303
304
if opts.IsOpen {
304
305
openValue = 1
305
306
}
306
-
filters = append(filters, FilterEq("open", openValue))
307
+
filters = append(filters, orm.FilterEq("open", openValue))
307
308
if opts.RepoAt != "" {
308
-
filters = append(filters, FilterEq("repo_at", opts.RepoAt))
309
+
filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt))
309
310
}
310
311
311
312
var conditions []string
···
397
398
return id, nil
398
399
}
399
400
400
-
func DeleteIssueComments(e Execer, filters ...filter) error {
401
+
func DeleteIssueComments(e Execer, filters ...orm.Filter) error {
401
402
var conditions []string
402
403
var args []any
403
404
for _, filter := range filters {
···
416
417
return err
417
418
}
418
419
419
-
func GetIssueComments(e Execer, filters ...filter) ([]models.IssueComment, error) {
420
+
func GetIssueComments(e Execer, filters ...orm.Filter) ([]models.IssueComment, error) {
420
421
commentMap := make(map[string]*models.IssueComment)
421
422
422
423
var conditions []string
···
451
452
if err != nil {
452
453
return nil, err
453
454
}
455
+
defer rows.Close()
454
456
455
457
for rows.Next() {
456
458
var comment models.IssueComment
···
506
508
507
509
// collect references for each comments
508
510
commentAts := slices.Collect(maps.Keys(commentMap))
509
-
allReferencs, err := GetReferencesAll(e, FilterIn("from_at", commentAts))
511
+
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
510
512
if err != nil {
511
513
return nil, fmt.Errorf("failed to query reference_links: %w", err)
512
514
}
···
548
550
return nil
549
551
}
550
552
551
-
func CloseIssues(e Execer, filters ...filter) error {
553
+
func CloseIssues(e Execer, filters ...orm.Filter) error {
552
554
var conditions []string
553
555
var args []any
554
556
for _, filter := range filters {
···
566
568
return err
567
569
}
568
570
569
-
func ReopenIssues(e Execer, filters ...filter) error {
571
+
func ReopenIssues(e Execer, filters ...orm.Filter) error {
570
572
var conditions []string
571
573
var args []any
572
574
for _, filter := range filters {
+8
-7
appview/db/label.go
+8
-7
appview/db/label.go
···
10
10
11
11
"github.com/bluesky-social/indigo/atproto/syntax"
12
12
"tangled.org/core/appview/models"
13
+
"tangled.org/core/orm"
13
14
)
14
15
15
16
// no updating type for now
···
59
60
return id, nil
60
61
}
61
62
62
-
func DeleteLabelDefinition(e Execer, filters ...filter) error {
63
+
func DeleteLabelDefinition(e Execer, filters ...orm.Filter) error {
63
64
var conditions []string
64
65
var args []any
65
66
for _, filter := range filters {
···
75
76
return err
76
77
}
77
78
78
-
func GetLabelDefinitions(e Execer, filters ...filter) ([]models.LabelDefinition, error) {
79
+
func GetLabelDefinitions(e Execer, filters ...orm.Filter) ([]models.LabelDefinition, error) {
79
80
var labelDefinitions []models.LabelDefinition
80
81
var conditions []string
81
82
var args []any
···
167
168
}
168
169
169
170
// helper to get exactly one label def
170
-
func GetLabelDefinition(e Execer, filters ...filter) (*models.LabelDefinition, error) {
171
+
func GetLabelDefinition(e Execer, filters ...orm.Filter) (*models.LabelDefinition, error) {
171
172
labels, err := GetLabelDefinitions(e, filters...)
172
173
if err != nil {
173
174
return nil, err
···
227
228
return id, nil
228
229
}
229
230
230
-
func GetLabelOps(e Execer, filters ...filter) ([]models.LabelOp, error) {
231
+
func GetLabelOps(e Execer, filters ...orm.Filter) ([]models.LabelOp, error) {
231
232
var labelOps []models.LabelOp
232
233
var conditions []string
233
234
var args []any
···
302
303
}
303
304
304
305
// get labels for a given list of subject URIs
305
-
func GetLabels(e Execer, filters ...filter) (map[syntax.ATURI]models.LabelState, error) {
306
+
func GetLabels(e Execer, filters ...orm.Filter) (map[syntax.ATURI]models.LabelState, error) {
306
307
ops, err := GetLabelOps(e, filters...)
307
308
if err != nil {
308
309
return nil, err
···
322
323
}
323
324
labelAts := slices.Collect(maps.Keys(labelAtSet))
324
325
325
-
actx, err := NewLabelApplicationCtx(e, FilterIn("at_uri", labelAts))
326
+
actx, err := NewLabelApplicationCtx(e, orm.FilterIn("at_uri", labelAts))
326
327
if err != nil {
327
328
return nil, err
328
329
}
···
338
339
return results, nil
339
340
}
340
341
341
-
func NewLabelApplicationCtx(e Execer, filters ...filter) (*models.LabelApplicationCtx, error) {
342
+
func NewLabelApplicationCtx(e Execer, filters ...orm.Filter) (*models.LabelApplicationCtx, error) {
342
343
labels, err := GetLabelDefinitions(e, filters...)
343
344
if err != nil {
344
345
return nil, err
+6
-5
appview/db/language.go
+6
-5
appview/db/language.go
···
7
7
8
8
"github.com/bluesky-social/indigo/atproto/syntax"
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
-
func GetRepoLanguages(e Execer, filters ...filter) ([]models.RepoLanguage, error) {
13
+
func GetRepoLanguages(e Execer, filters ...orm.Filter) ([]models.RepoLanguage, error) {
13
14
var conditions []string
14
15
var args []any
15
16
for _, filter := range filters {
···
27
28
whereClause,
28
29
)
29
30
rows, err := e.Query(query, args...)
30
-
31
31
if err != nil {
32
32
return nil, fmt.Errorf("failed to execute query: %w ", err)
33
33
}
34
+
defer rows.Close()
34
35
35
36
var langs []models.RepoLanguage
36
37
for rows.Next() {
···
85
86
return nil
86
87
}
87
88
88
-
func DeleteRepoLanguages(e Execer, filters ...filter) error {
89
+
func DeleteRepoLanguages(e Execer, filters ...orm.Filter) error {
89
90
var conditions []string
90
91
var args []any
91
92
for _, filter := range filters {
···
107
108
func UpdateRepoLanguages(tx *sql.Tx, repoAt syntax.ATURI, ref string, langs []models.RepoLanguage) error {
108
109
err := DeleteRepoLanguages(
109
110
tx,
110
-
FilterEq("repo_at", repoAt),
111
-
FilterEq("ref", ref),
111
+
orm.FilterEq("repo_at", repoAt),
112
+
orm.FilterEq("ref", ref),
112
113
)
113
114
if err != nil {
114
115
return fmt.Errorf("failed to delete existing languages: %w", err)
+14
-13
appview/db/notifications.go
+14
-13
appview/db/notifications.go
···
11
11
"github.com/bluesky-social/indigo/atproto/syntax"
12
12
"tangled.org/core/appview/models"
13
13
"tangled.org/core/appview/pagination"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
17
func CreateNotification(e Execer, notification *models.Notification) error {
···
44
45
}
45
46
46
47
// GetNotificationsPaginated retrieves notifications with filters and pagination
47
-
func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...filter) ([]*models.Notification, error) {
48
+
func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.Notification, error) {
48
49
var conditions []string
49
50
var args []any
50
51
···
113
114
}
114
115
115
116
// GetNotificationsWithEntities retrieves notifications with their related entities
116
-
func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...filter) ([]*models.NotificationWithEntity, error) {
117
+
func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.NotificationWithEntity, error) {
117
118
var conditions []string
118
119
var args []any
119
120
···
256
257
}
257
258
258
259
// GetNotifications retrieves notifications with filters
259
-
func GetNotifications(e Execer, filters ...filter) ([]*models.Notification, error) {
260
+
func GetNotifications(e Execer, filters ...orm.Filter) ([]*models.Notification, error) {
260
261
return GetNotificationsPaginated(e, pagination.FirstPage(), filters...)
261
262
}
262
263
263
-
func CountNotifications(e Execer, filters ...filter) (int64, error) {
264
+
func CountNotifications(e Execer, filters ...orm.Filter) (int64, error) {
264
265
var conditions []string
265
266
var args []any
266
267
for _, filter := range filters {
···
285
286
}
286
287
287
288
func MarkNotificationRead(e Execer, notificationID int64, userDID string) error {
288
-
idFilter := FilterEq("id", notificationID)
289
-
recipientFilter := FilterEq("recipient_did", userDID)
289
+
idFilter := orm.FilterEq("id", notificationID)
290
+
recipientFilter := orm.FilterEq("recipient_did", userDID)
290
291
291
292
query := fmt.Sprintf(`
292
293
UPDATE notifications
···
314
315
}
315
316
316
317
func MarkAllNotificationsRead(e Execer, userDID string) error {
317
-
recipientFilter := FilterEq("recipient_did", userDID)
318
-
readFilter := FilterEq("read", 0)
318
+
recipientFilter := orm.FilterEq("recipient_did", userDID)
319
+
readFilter := orm.FilterEq("read", 0)
319
320
320
321
query := fmt.Sprintf(`
321
322
UPDATE notifications
···
334
335
}
335
336
336
337
func DeleteNotification(e Execer, notificationID int64, userDID string) error {
337
-
idFilter := FilterEq("id", notificationID)
338
-
recipientFilter := FilterEq("recipient_did", userDID)
338
+
idFilter := orm.FilterEq("id", notificationID)
339
+
recipientFilter := orm.FilterEq("recipient_did", userDID)
339
340
340
341
query := fmt.Sprintf(`
341
342
DELETE FROM notifications
···
362
363
}
363
364
364
365
func GetNotificationPreference(e Execer, userDid string) (*models.NotificationPreferences, error) {
365
-
prefs, err := GetNotificationPreferences(e, FilterEq("user_did", userDid))
366
+
prefs, err := GetNotificationPreferences(e, orm.FilterEq("user_did", userDid))
366
367
if err != nil {
367
368
return nil, err
368
369
}
···
375
376
return p, nil
376
377
}
377
378
378
-
func GetNotificationPreferences(e Execer, filters ...filter) (map[syntax.DID]*models.NotificationPreferences, error) {
379
+
func GetNotificationPreferences(e Execer, filters ...orm.Filter) (map[syntax.DID]*models.NotificationPreferences, error) {
379
380
prefsMap := make(map[syntax.DID]*models.NotificationPreferences)
380
381
381
382
var conditions []string
···
483
484
484
485
func (d *DB) ClearOldNotifications(ctx context.Context, olderThan time.Duration) error {
485
486
cutoff := time.Now().Add(-olderThan)
486
-
createdFilter := FilterLte("created", cutoff)
487
+
createdFilter := orm.FilterLte("created", cutoff)
487
488
488
489
query := fmt.Sprintf(`
489
490
DELETE FROM notifications
+6
-5
appview/db/pipeline.go
+6
-5
appview/db/pipeline.go
···
7
7
"time"
8
8
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
-
func GetPipelines(e Execer, filters ...filter) ([]models.Pipeline, error) {
13
+
func GetPipelines(e Execer, filters ...orm.Filter) ([]models.Pipeline, error) {
13
14
var pipelines []models.Pipeline
14
15
15
16
var conditions []string
···
168
169
169
170
// this is a mega query, but the most useful one:
170
171
// get N pipelines, for each one get the latest status of its N workflows
171
-
func GetPipelineStatuses(e Execer, limit int, filters ...filter) ([]models.Pipeline, error) {
172
+
func GetPipelineStatuses(e Execer, limit int, filters ...orm.Filter) ([]models.Pipeline, error) {
172
173
var conditions []string
173
174
var args []any
174
175
for _, filter := range filters {
175
-
filter.key = "p." + filter.key // the table is aliased in the query to `p`
176
+
filter.Key = "p." + filter.Key // the table is aliased in the query to `p`
176
177
conditions = append(conditions, filter.Condition())
177
178
args = append(args, filter.Arg()...)
178
179
}
···
264
265
conditions = nil
265
266
args = nil
266
267
for _, p := range pipelines {
267
-
knotFilter := FilterEq("pipeline_knot", p.Knot)
268
-
rkeyFilter := FilterEq("pipeline_rkey", p.Rkey)
268
+
knotFilter := orm.FilterEq("pipeline_knot", p.Knot)
269
+
rkeyFilter := orm.FilterEq("pipeline_rkey", p.Rkey)
269
270
conditions = append(conditions, fmt.Sprintf("(%s and %s)", knotFilter.Condition(), rkeyFilter.Condition()))
270
271
args = append(args, p.Knot)
271
272
args = append(args, p.Rkey)
+29
-16
appview/db/profile.go
+29
-16
appview/db/profile.go
···
11
11
12
12
"github.com/bluesky-social/indigo/atproto/syntax"
13
13
"tangled.org/core/appview/models"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
17
const TimeframeMonths = 7
···
19
20
timeline := models.ProfileTimeline{
20
21
ByMonth: make([]models.ByMonth, TimeframeMonths),
21
22
}
22
-
currentMonth := time.Now().Month()
23
+
now := time.Now()
23
24
timeframe := fmt.Sprintf("-%d months", TimeframeMonths)
24
25
25
26
pulls, err := GetPullsByOwnerDid(e, forDid, timeframe)
···
29
30
30
31
// group pulls by month
31
32
for _, pull := range pulls {
32
-
pullMonth := pull.Created.Month()
33
+
monthsAgo := monthsBetween(pull.Created, now)
33
34
34
-
if currentMonth-pullMonth >= TimeframeMonths {
35
+
if monthsAgo >= TimeframeMonths {
35
36
// shouldn't happen; but times are weird
36
37
continue
37
38
}
38
39
39
-
idx := currentMonth - pullMonth
40
+
idx := monthsAgo
40
41
items := &timeline.ByMonth[idx].PullEvents.Items
41
42
42
43
*items = append(*items, &pull)
···
44
45
45
46
issues, err := GetIssues(
46
47
e,
47
-
FilterEq("did", forDid),
48
-
FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
48
+
orm.FilterEq("did", forDid),
49
+
orm.FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
49
50
)
50
51
if err != nil {
51
52
return nil, fmt.Errorf("error getting issues by owner did: %w", err)
52
53
}
53
54
54
55
for _, issue := range issues {
55
-
issueMonth := issue.Created.Month()
56
+
monthsAgo := monthsBetween(issue.Created, now)
56
57
57
-
if currentMonth-issueMonth >= TimeframeMonths {
58
+
if monthsAgo >= TimeframeMonths {
58
59
// shouldn't happen; but times are weird
59
60
continue
60
61
}
61
62
62
-
idx := currentMonth - issueMonth
63
+
idx := monthsAgo
63
64
items := &timeline.ByMonth[idx].IssueEvents.Items
64
65
65
66
*items = append(*items, &issue)
66
67
}
67
68
68
-
repos, err := GetRepos(e, 0, FilterEq("did", forDid))
69
+
repos, err := GetRepos(e, 0, orm.FilterEq("did", forDid))
69
70
if err != nil {
70
71
return nil, fmt.Errorf("error getting all repos by did: %w", err)
71
72
}
···
76
77
if repo.Source != "" {
77
78
sourceRepo, err = GetRepoByAtUri(e, repo.Source)
78
79
if err != nil {
79
-
return nil, err
80
+
// the source repo was not found, skip this bit
81
+
log.Println("profile", "err", err)
80
82
}
81
83
}
82
84
83
-
repoMonth := repo.Created.Month()
85
+
monthsAgo := monthsBetween(repo.Created, now)
84
86
85
-
if currentMonth-repoMonth >= TimeframeMonths {
87
+
if monthsAgo >= TimeframeMonths {
86
88
// shouldn't happen; but times are weird
87
89
continue
88
90
}
89
91
90
-
idx := currentMonth - repoMonth
92
+
idx := monthsAgo
91
93
92
94
items := &timeline.ByMonth[idx].RepoEvents
93
95
*items = append(*items, models.RepoEvent{
···
99
101
return &timeline, nil
100
102
}
101
103
104
+
func monthsBetween(from, to time.Time) int {
105
+
years := to.Year() - from.Year()
106
+
months := int(to.Month() - from.Month())
107
+
return years*12 + months
108
+
}
109
+
102
110
func UpsertProfile(tx *sql.Tx, profile *models.Profile) error {
103
111
defer tx.Rollback()
104
112
···
201
209
return tx.Commit()
202
210
}
203
211
204
-
func GetProfiles(e Execer, filters ...filter) (map[string]*models.Profile, error) {
212
+
func GetProfiles(e Execer, filters ...orm.Filter) (map[string]*models.Profile, error) {
205
213
var conditions []string
206
214
var args []any
207
215
for _, filter := range filters {
···
231
239
if err != nil {
232
240
return nil, err
233
241
}
242
+
defer rows.Close()
234
243
235
244
profileMap := make(map[string]*models.Profile)
236
245
for rows.Next() {
···
271
280
if err != nil {
272
281
return nil, err
273
282
}
283
+
defer rows.Close()
284
+
274
285
idxs := make(map[string]int)
275
286
for did := range profileMap {
276
287
idxs[did] = 0
···
291
302
if err != nil {
292
303
return nil, err
293
304
}
305
+
defer rows.Close()
306
+
294
307
idxs = make(map[string]int)
295
308
for did := range profileMap {
296
309
idxs[did] = 0
···
448
461
}
449
462
450
463
// ensure all pinned repos are either own repos or collaborating repos
451
-
repos, err := GetRepos(e, 0, FilterEq("did", profile.Did))
464
+
repos, err := GetRepos(e, 0, orm.FilterEq("did", profile.Did))
452
465
if err != nil {
453
466
log.Printf("getting repos for %s: %s", profile.Did, err)
454
467
}
+21
-20
appview/db/pulls.go
+21
-20
appview/db/pulls.go
···
13
13
14
14
"github.com/bluesky-social/indigo/atproto/syntax"
15
15
"tangled.org/core/appview/models"
16
+
"tangled.org/core/orm"
16
17
)
17
18
18
19
func NewPull(tx *sql.Tx, pull *models.Pull) error {
···
118
119
return pullId - 1, err
119
120
}
120
121
121
-
func GetPullsWithLimit(e Execer, limit int, filters ...filter) ([]*models.Pull, error) {
122
+
func GetPullsWithLimit(e Execer, limit int, filters ...orm.Filter) ([]*models.Pull, error) {
122
123
pulls := make(map[syntax.ATURI]*models.Pull)
123
124
124
125
var conditions []string
···
229
230
for _, p := range pulls {
230
231
pullAts = append(pullAts, p.AtUri())
231
232
}
232
-
submissionsMap, err := GetPullSubmissions(e, FilterIn("pull_at", pullAts))
233
+
submissionsMap, err := GetPullSubmissions(e, orm.FilterIn("pull_at", pullAts))
233
234
if err != nil {
234
235
return nil, fmt.Errorf("failed to get submissions: %w", err)
235
236
}
···
241
242
}
242
243
243
244
// collect allLabels for each issue
244
-
allLabels, err := GetLabels(e, FilterIn("subject", pullAts))
245
+
allLabels, err := GetLabels(e, orm.FilterIn("subject", pullAts))
245
246
if err != nil {
246
247
return nil, fmt.Errorf("failed to query labels: %w", err)
247
248
}
···
258
259
sourceAts = append(sourceAts, *p.PullSource.RepoAt)
259
260
}
260
261
}
261
-
sourceRepos, err := GetRepos(e, 0, FilterIn("at_uri", sourceAts))
262
+
sourceRepos, err := GetRepos(e, 0, orm.FilterIn("at_uri", sourceAts))
262
263
if err != nil && !errors.Is(err, sql.ErrNoRows) {
263
264
return nil, fmt.Errorf("failed to get source repos: %w", err)
264
265
}
···
274
275
}
275
276
}
276
277
277
-
allReferences, err := GetReferencesAll(e, FilterIn("from_at", pullAts))
278
+
allReferences, err := GetReferencesAll(e, orm.FilterIn("from_at", pullAts))
278
279
if err != nil {
279
280
return nil, fmt.Errorf("failed to query reference_links: %w", err)
280
281
}
···
295
296
return orderedByPullId, nil
296
297
}
297
298
298
-
func GetPulls(e Execer, filters ...filter) ([]*models.Pull, error) {
299
+
func GetPulls(e Execer, filters ...orm.Filter) ([]*models.Pull, error) {
299
300
return GetPullsWithLimit(e, 0, filters...)
300
301
}
301
302
302
303
func GetPullIDs(e Execer, opts models.PullSearchOptions) ([]int64, error) {
303
304
var ids []int64
304
305
305
-
var filters []filter
306
-
filters = append(filters, FilterEq("state", opts.State))
306
+
var filters []orm.Filter
307
+
filters = append(filters, orm.FilterEq("state", opts.State))
307
308
if opts.RepoAt != "" {
308
-
filters = append(filters, FilterEq("repo_at", opts.RepoAt))
309
+
filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt))
309
310
}
310
311
311
312
var conditions []string
···
361
362
}
362
363
363
364
func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) {
364
-
pulls, err := GetPullsWithLimit(e, 1, FilterEq("repo_at", repoAt), FilterEq("pull_id", pullId))
365
+
pulls, err := GetPullsWithLimit(e, 1, orm.FilterEq("repo_at", repoAt), orm.FilterEq("pull_id", pullId))
365
366
if err != nil {
366
367
return nil, err
367
368
}
···
373
374
}
374
375
375
376
// mapping from pull -> pull submissions
376
-
func GetPullSubmissions(e Execer, filters ...filter) (map[syntax.ATURI][]*models.PullSubmission, error) {
377
+
func GetPullSubmissions(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]*models.PullSubmission, error) {
377
378
var conditions []string
378
379
var args []any
379
380
for _, filter := range filters {
···
448
449
449
450
// Get comments for all submissions using GetPullComments
450
451
submissionIds := slices.Collect(maps.Keys(submissionMap))
451
-
comments, err := GetPullComments(e, FilterIn("submission_id", submissionIds))
452
+
comments, err := GetPullComments(e, orm.FilterIn("submission_id", submissionIds))
452
453
if err != nil {
453
454
return nil, fmt.Errorf("failed to get pull comments: %w", err)
454
455
}
···
474
475
return m, nil
475
476
}
476
477
477
-
func GetPullComments(e Execer, filters ...filter) ([]models.PullComment, error) {
478
+
func GetPullComments(e Execer, filters ...orm.Filter) ([]models.PullComment, error) {
478
479
var conditions []string
479
480
var args []any
480
481
for _, filter := range filters {
···
542
543
543
544
// collect references for each comments
544
545
commentAts := slices.Collect(maps.Keys(commentMap))
545
-
allReferencs, err := GetReferencesAll(e, FilterIn("from_at", commentAts))
546
+
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
546
547
if err != nil {
547
548
return nil, fmt.Errorf("failed to query reference_links: %w", err)
548
549
}
···
708
709
return err
709
710
}
710
711
711
-
func SetPullParentChangeId(e Execer, parentChangeId string, filters ...filter) error {
712
+
func SetPullParentChangeId(e Execer, parentChangeId string, filters ...orm.Filter) error {
712
713
var conditions []string
713
714
var args []any
714
715
···
732
733
733
734
// Only used when stacking to update contents in the event of a rebase (the interdiff should be empty).
734
735
// otherwise submissions are immutable
735
-
func UpdatePull(e Execer, newPatch, sourceRev string, filters ...filter) error {
736
+
func UpdatePull(e Execer, newPatch, sourceRev string, filters ...orm.Filter) error {
736
737
var conditions []string
737
738
var args []any
738
739
···
790
791
func GetStack(e Execer, stackId string) (models.Stack, error) {
791
792
unorderedPulls, err := GetPulls(
792
793
e,
793
-
FilterEq("stack_id", stackId),
794
-
FilterNotEq("state", models.PullDeleted),
794
+
orm.FilterEq("stack_id", stackId),
795
+
orm.FilterNotEq("state", models.PullDeleted),
795
796
)
796
797
if err != nil {
797
798
return nil, err
···
835
836
func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) {
836
837
pulls, err := GetPulls(
837
838
e,
838
-
FilterEq("stack_id", stackId),
839
-
FilterEq("state", models.PullDeleted),
839
+
orm.FilterEq("stack_id", stackId),
840
+
orm.FilterEq("state", models.PullDeleted),
840
841
)
841
842
if err != nil {
842
843
return nil, err
+3
-2
appview/db/punchcard.go
+3
-2
appview/db/punchcard.go
···
7
7
"time"
8
8
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
13
// this adds to the existing count
···
20
21
return err
21
22
}
22
23
23
-
func MakePunchcard(e Execer, filters ...filter) (*models.Punchcard, error) {
24
+
func MakePunchcard(e Execer, filters ...orm.Filter) (*models.Punchcard, error) {
24
25
punchcard := &models.Punchcard{}
25
26
now := time.Now()
26
27
startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
···
77
78
punch.Count = int(count.Int64)
78
79
}
79
80
80
-
punchcard.Punches[punch.Date.YearDay()] = punch
81
+
punchcard.Punches[punch.Date.YearDay()-1] = punch
81
82
punchcard.Total += punch.Count
82
83
}
83
84
+4
-3
appview/db/reference.go
+4
-3
appview/db/reference.go
···
8
8
"github.com/bluesky-social/indigo/atproto/syntax"
9
9
"tangled.org/core/api/tangled"
10
10
"tangled.org/core/appview/models"
11
+
"tangled.org/core/orm"
11
12
)
12
13
13
14
// ValidateReferenceLinks resolves refLinks to Issue/PR/IssueComment/PullComment ATURIs.
···
205
206
return err
206
207
}
207
208
208
-
func GetReferencesAll(e Execer, filters ...filter) (map[syntax.ATURI][]syntax.ATURI, error) {
209
+
func GetReferencesAll(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]syntax.ATURI, error) {
209
210
var (
210
211
conditions []string
211
212
args []any
···
347
348
if len(aturis) == 0 {
348
349
return nil, nil
349
350
}
350
-
filter := FilterIn("c.at_uri", aturis)
351
+
filter := orm.FilterIn("c.at_uri", aturis)
351
352
rows, err := e.Query(
352
353
fmt.Sprintf(
353
354
`select r.did, r.name, i.issue_id, c.id, i.title, i.open
···
427
428
if len(aturis) == 0 {
428
429
return nil, nil
429
430
}
430
-
filter := FilterIn("c.comment_at", aturis)
431
+
filter := orm.FilterIn("c.comment_at", aturis)
431
432
rows, err := e.Query(
432
433
fmt.Sprintf(
433
434
`select r.did, r.name, p.pull_id, c.id, p.title, p.state
+5
-3
appview/db/registration.go
+5
-3
appview/db/registration.go
···
7
7
"time"
8
8
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
-
func GetRegistrations(e Execer, filters ...filter) ([]models.Registration, error) {
13
+
func GetRegistrations(e Execer, filters ...orm.Filter) ([]models.Registration, error) {
13
14
var registrations []models.Registration
14
15
15
16
var conditions []string
···
37
38
if err != nil {
38
39
return nil, err
39
40
}
41
+
defer rows.Close()
40
42
41
43
for rows.Next() {
42
44
var createdAt string
···
69
71
return registrations, nil
70
72
}
71
73
72
-
func MarkRegistered(e Execer, filters ...filter) error {
74
+
func MarkRegistered(e Execer, filters ...orm.Filter) error {
73
75
var conditions []string
74
76
var args []any
75
77
for _, filter := range filters {
···
94
96
return err
95
97
}
96
98
97
-
func DeleteKnot(e Execer, filters ...filter) error {
99
+
func DeleteKnot(e Execer, filters ...orm.Filter) error {
98
100
var conditions []string
99
101
var args []any
100
102
for _, filter := range filters {
+18
-6
appview/db/repos.go
+18
-6
appview/db/repos.go
···
11
11
12
12
"github.com/bluesky-social/indigo/atproto/syntax"
13
13
"tangled.org/core/appview/models"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
-
func GetRepos(e Execer, limit int, filters ...filter) ([]models.Repo, error) {
17
+
func GetRepos(e Execer, limit int, filters ...orm.Filter) ([]models.Repo, error) {
17
18
repoMap := make(map[syntax.ATURI]*models.Repo)
18
19
19
20
var conditions []string
···
55
56
limitClause,
56
57
)
57
58
rows, err := e.Query(repoQuery, args...)
58
-
59
59
if err != nil {
60
60
return nil, fmt.Errorf("failed to execute repo query: %w ", err)
61
61
}
62
+
defer rows.Close()
62
63
63
64
for rows.Next() {
64
65
var repo models.Repo
···
127
128
if err != nil {
128
129
return nil, fmt.Errorf("failed to execute labels query: %w ", err)
129
130
}
131
+
defer rows.Close()
132
+
130
133
for rows.Next() {
131
134
var repoat, labelat string
132
135
if err := rows.Scan(&repoat, &labelat); err != nil {
···
155
158
from repo_languages
156
159
where repo_at in (%s)
157
160
and is_default_ref = 1
161
+
and language <> ''
158
162
)
159
163
where rn = 1
160
164
`,
···
164
168
if err != nil {
165
169
return nil, fmt.Errorf("failed to execute lang query: %w ", err)
166
170
}
171
+
defer rows.Close()
172
+
167
173
for rows.Next() {
168
174
var repoat, lang string
169
175
if err := rows.Scan(&repoat, &lang); err != nil {
···
190
196
if err != nil {
191
197
return nil, fmt.Errorf("failed to execute star-count query: %w ", err)
192
198
}
199
+
defer rows.Close()
200
+
193
201
for rows.Next() {
194
202
var repoat string
195
203
var count int
···
219
227
if err != nil {
220
228
return nil, fmt.Errorf("failed to execute issue-count query: %w ", err)
221
229
}
230
+
defer rows.Close()
231
+
222
232
for rows.Next() {
223
233
var repoat string
224
234
var open, closed int
···
260
270
if err != nil {
261
271
return nil, fmt.Errorf("failed to execute pulls-count query: %w ", err)
262
272
}
273
+
defer rows.Close()
274
+
263
275
for rows.Next() {
264
276
var repoat string
265
277
var open, merged, closed, deleted int
···
294
306
}
295
307
296
308
// helper to get exactly one repo
297
-
func GetRepo(e Execer, filters ...filter) (*models.Repo, error) {
309
+
func GetRepo(e Execer, filters ...orm.Filter) (*models.Repo, error) {
298
310
repos, err := GetRepos(e, 0, filters...)
299
311
if err != nil {
300
312
return nil, err
···
311
323
return &repos[0], nil
312
324
}
313
325
314
-
func CountRepos(e Execer, filters ...filter) (int64, error) {
326
+
func CountRepos(e Execer, filters ...orm.Filter) (int64, error) {
315
327
var conditions []string
316
328
var args []any
317
329
for _, filter := range filters {
···
542
554
return err
543
555
}
544
556
545
-
func UnsubscribeLabel(e Execer, filters ...filter) error {
557
+
func UnsubscribeLabel(e Execer, filters ...orm.Filter) error {
546
558
var conditions []string
547
559
var args []any
548
560
for _, filter := range filters {
···
560
572
return err
561
573
}
562
574
563
-
func GetRepoLabels(e Execer, filters ...filter) ([]models.RepoLabel, error) {
575
+
func GetRepoLabels(e Execer, filters ...orm.Filter) ([]models.RepoLabel, error) {
564
576
var conditions []string
565
577
var args []any
566
578
for _, filter := range filters {
+6
-5
appview/db/spindle.go
+6
-5
appview/db/spindle.go
···
7
7
"time"
8
8
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
-
func GetSpindles(e Execer, filters ...filter) ([]models.Spindle, error) {
13
+
func GetSpindles(e Execer, filters ...orm.Filter) ([]models.Spindle, error) {
13
14
var spindles []models.Spindle
14
15
15
16
var conditions []string
···
91
92
return err
92
93
}
93
94
94
-
func VerifySpindle(e Execer, filters ...filter) (int64, error) {
95
+
func VerifySpindle(e Execer, filters ...orm.Filter) (int64, error) {
95
96
var conditions []string
96
97
var args []any
97
98
for _, filter := range filters {
···
114
115
return res.RowsAffected()
115
116
}
116
117
117
-
func DeleteSpindle(e Execer, filters ...filter) error {
118
+
func DeleteSpindle(e Execer, filters ...orm.Filter) error {
118
119
var conditions []string
119
120
var args []any
120
121
for _, filter := range filters {
···
144
145
return err
145
146
}
146
147
147
-
func RemoveSpindleMember(e Execer, filters ...filter) error {
148
+
func RemoveSpindleMember(e Execer, filters ...orm.Filter) error {
148
149
var conditions []string
149
150
var args []any
150
151
for _, filter := range filters {
···
163
164
return err
164
165
}
165
166
166
-
func GetSpindleMembers(e Execer, filters ...filter) ([]models.SpindleMember, error) {
167
+
func GetSpindleMembers(e Execer, filters ...orm.Filter) ([]models.SpindleMember, error) {
167
168
var members []models.SpindleMember
168
169
169
170
var conditions []string
+6
-4
appview/db/star.go
+6
-4
appview/db/star.go
···
11
11
12
12
"github.com/bluesky-social/indigo/atproto/syntax"
13
13
"tangled.org/core/appview/models"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
17
func AddStar(e Execer, star *models.Star) error {
···
133
134
134
135
// GetRepoStars return a list of stars each holding target repository.
135
136
// If there isn't known repo with starred at-uri, those stars will be ignored.
136
-
func GetRepoStars(e Execer, limit int, filters ...filter) ([]models.RepoStar, error) {
137
+
func GetRepoStars(e Execer, limit int, filters ...orm.Filter) ([]models.RepoStar, error) {
137
138
var conditions []string
138
139
var args []any
139
140
for _, filter := range filters {
···
164
165
if err != nil {
165
166
return nil, err
166
167
}
168
+
defer rows.Close()
167
169
168
170
starMap := make(map[string][]models.Star)
169
171
for rows.Next() {
···
195
197
return nil, nil
196
198
}
197
199
198
-
repos, err := GetRepos(e, 0, FilterIn("at_uri", args))
200
+
repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", args))
199
201
if err != nil {
200
202
return nil, err
201
203
}
···
225
227
return repoStars, nil
226
228
}
227
229
228
-
func CountStars(e Execer, filters ...filter) (int64, error) {
230
+
func CountStars(e Execer, filters ...orm.Filter) (int64, error) {
229
231
var conditions []string
230
232
var args []any
231
233
for _, filter := range filters {
···
298
300
}
299
301
300
302
// get full repo data
301
-
repos, err := GetRepos(e, 0, FilterIn("at_uri", repoUris))
303
+
repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoUris))
302
304
if err != nil {
303
305
return nil, err
304
306
}
+4
-3
appview/db/strings.go
+4
-3
appview/db/strings.go
···
8
8
"time"
9
9
10
10
"tangled.org/core/appview/models"
11
+
"tangled.org/core/orm"
11
12
)
12
13
13
14
func AddString(e Execer, s models.String) error {
···
44
45
return err
45
46
}
46
47
47
-
func GetStrings(e Execer, limit int, filters ...filter) ([]models.String, error) {
48
+
func GetStrings(e Execer, limit int, filters ...orm.Filter) ([]models.String, error) {
48
49
var all []models.String
49
50
50
51
var conditions []string
···
127
128
return all, nil
128
129
}
129
130
130
-
func CountStrings(e Execer, filters ...filter) (int64, error) {
131
+
func CountStrings(e Execer, filters ...orm.Filter) (int64, error) {
131
132
var conditions []string
132
133
var args []any
133
134
for _, filter := range filters {
···
151
152
return count, nil
152
153
}
153
154
154
-
func DeleteString(e Execer, filters ...filter) error {
155
+
func DeleteString(e Execer, filters ...orm.Filter) error {
155
156
var conditions []string
156
157
var args []any
157
158
for _, filter := range filters {
+9
-8
appview/db/timeline.go
+9
-8
appview/db/timeline.go
···
5
5
6
6
"github.com/bluesky-social/indigo/atproto/syntax"
7
7
"tangled.org/core/appview/models"
8
+
"tangled.org/core/orm"
8
9
)
9
10
10
11
// TODO: this gathers heterogenous events from different sources and aggregates
···
84
85
}
85
86
86
87
func getTimelineRepos(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) {
87
-
filters := make([]filter, 0)
88
+
filters := make([]orm.Filter, 0)
88
89
if userIsFollowing != nil {
89
-
filters = append(filters, FilterIn("did", userIsFollowing))
90
+
filters = append(filters, orm.FilterIn("did", userIsFollowing))
90
91
}
91
92
92
93
repos, err := GetRepos(e, limit, filters...)
···
104
105
105
106
var origRepos []models.Repo
106
107
if args != nil {
107
-
origRepos, err = GetRepos(e, 0, FilterIn("at_uri", args))
108
+
origRepos, err = GetRepos(e, 0, orm.FilterIn("at_uri", args))
108
109
}
109
110
if err != nil {
110
111
return nil, err
···
144
145
}
145
146
146
147
func getTimelineStars(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) {
147
-
filters := make([]filter, 0)
148
+
filters := make([]orm.Filter, 0)
148
149
if userIsFollowing != nil {
149
-
filters = append(filters, FilterIn("did", userIsFollowing))
150
+
filters = append(filters, orm.FilterIn("did", userIsFollowing))
150
151
}
151
152
152
153
stars, err := GetRepoStars(e, limit, filters...)
···
180
181
}
181
182
182
183
func getTimelineFollows(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) {
183
-
filters := make([]filter, 0)
184
+
filters := make([]orm.Filter, 0)
184
185
if userIsFollowing != nil {
185
-
filters = append(filters, FilterIn("user_did", userIsFollowing))
186
+
filters = append(filters, orm.FilterIn("user_did", userIsFollowing))
186
187
}
187
188
188
189
follows, err := GetFollows(e, limit, filters...)
···
199
200
return nil, nil
200
201
}
201
202
202
-
profiles, err := GetProfiles(e, FilterIn("did", subjects))
203
+
profiles, err := GetProfiles(e, orm.FilterIn("did", subjects))
203
204
if err != nil {
204
205
return nil, err
205
206
}
+25
-24
appview/ingester.go
+25
-24
appview/ingester.go
···
21
21
"tangled.org/core/appview/serververify"
22
22
"tangled.org/core/appview/validator"
23
23
"tangled.org/core/idresolver"
24
+
"tangled.org/core/orm"
24
25
"tangled.org/core/rbac"
25
26
)
26
27
···
253
254
254
255
err = db.AddArtifact(i.Db, artifact)
255
256
case jmodels.CommitOperationDelete:
256
-
err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey))
257
+
err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey))
257
258
}
258
259
259
260
if err != nil {
···
350
351
351
352
err = db.UpsertProfile(tx, &profile)
352
353
case jmodels.CommitOperationDelete:
353
-
err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey))
354
+
err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey))
354
355
}
355
356
356
357
if err != nil {
···
424
425
// get record from db first
425
426
members, err := db.GetSpindleMembers(
426
427
ddb,
427
-
db.FilterEq("did", did),
428
-
db.FilterEq("rkey", rkey),
428
+
orm.FilterEq("did", did),
429
+
orm.FilterEq("rkey", rkey),
429
430
)
430
431
if err != nil || len(members) != 1 {
431
432
return fmt.Errorf("failed to get member: %w, len(members) = %d", err, len(members))
···
440
441
// remove record by rkey && update enforcer
441
442
if err = db.RemoveSpindleMember(
442
443
tx,
443
-
db.FilterEq("did", did),
444
-
db.FilterEq("rkey", rkey),
444
+
orm.FilterEq("did", did),
445
+
orm.FilterEq("rkey", rkey),
445
446
); err != nil {
446
447
return fmt.Errorf("failed to remove from db: %w", err)
447
448
}
···
523
524
// get record from db first
524
525
spindles, err := db.GetSpindles(
525
526
ddb,
526
-
db.FilterEq("owner", did),
527
-
db.FilterEq("instance", instance),
527
+
orm.FilterEq("owner", did),
528
+
orm.FilterEq("instance", instance),
528
529
)
529
530
if err != nil || len(spindles) != 1 {
530
531
return fmt.Errorf("failed to get spindles: %w, len(spindles) = %d", err, len(spindles))
···
543
544
// remove spindle members first
544
545
err = db.RemoveSpindleMember(
545
546
tx,
546
-
db.FilterEq("owner", did),
547
-
db.FilterEq("instance", instance),
547
+
orm.FilterEq("owner", did),
548
+
orm.FilterEq("instance", instance),
548
549
)
549
550
if err != nil {
550
551
return err
···
552
553
553
554
err = db.DeleteSpindle(
554
555
tx,
555
-
db.FilterEq("owner", did),
556
-
db.FilterEq("instance", instance),
556
+
orm.FilterEq("owner", did),
557
+
orm.FilterEq("instance", instance),
557
558
)
558
559
if err != nil {
559
560
return err
···
621
622
case jmodels.CommitOperationDelete:
622
623
if err := db.DeleteString(
623
624
ddb,
624
-
db.FilterEq("did", did),
625
-
db.FilterEq("rkey", rkey),
625
+
orm.FilterEq("did", did),
626
+
orm.FilterEq("rkey", rkey),
626
627
); err != nil {
627
628
l.Error("failed to delete", "err", err)
628
629
return fmt.Errorf("failed to delete string record: %w", err)
···
740
741
// get record from db first
741
742
registrations, err := db.GetRegistrations(
742
743
ddb,
743
-
db.FilterEq("domain", domain),
744
-
db.FilterEq("did", did),
744
+
orm.FilterEq("domain", domain),
745
+
orm.FilterEq("did", did),
745
746
)
746
747
if err != nil {
747
748
return fmt.Errorf("failed to get registration: %w", err)
···
762
763
763
764
err = db.DeleteKnot(
764
765
tx,
765
-
db.FilterEq("did", did),
766
-
db.FilterEq("domain", domain),
766
+
orm.FilterEq("did", did),
767
+
orm.FilterEq("domain", domain),
767
768
)
768
769
if err != nil {
769
770
return err
···
915
916
case jmodels.CommitOperationDelete:
916
917
if err := db.DeleteIssueComments(
917
918
ddb,
918
-
db.FilterEq("did", did),
919
-
db.FilterEq("rkey", rkey),
919
+
orm.FilterEq("did", did),
920
+
orm.FilterEq("rkey", rkey),
920
921
); err != nil {
921
922
return fmt.Errorf("failed to delete issue comment record: %w", err)
922
923
}
···
969
970
case jmodels.CommitOperationDelete:
970
971
if err := db.DeleteLabelDefinition(
971
972
ddb,
972
-
db.FilterEq("did", did),
973
-
db.FilterEq("rkey", rkey),
973
+
orm.FilterEq("did", did),
974
+
orm.FilterEq("rkey", rkey),
974
975
); err != nil {
975
976
return fmt.Errorf("failed to delete labeldef record: %w", err)
976
977
}
···
1010
1011
var repo *models.Repo
1011
1012
switch collection {
1012
1013
case tangled.RepoIssueNSID:
1013
-
i, err := db.GetIssues(ddb, db.FilterEq("at_uri", subject))
1014
+
i, err := db.GetIssues(ddb, orm.FilterEq("at_uri", subject))
1014
1015
if err != nil || len(i) != 1 {
1015
1016
return fmt.Errorf("failed to find subject: %w || subject count %d", err, len(i))
1016
1017
}
···
1019
1020
return fmt.Errorf("unsupport label subject: %s", collection)
1020
1021
}
1021
1022
1022
-
actx, err := db.NewLabelApplicationCtx(ddb, db.FilterIn("at_uri", repo.Labels))
1023
+
actx, err := db.NewLabelApplicationCtx(ddb, orm.FilterIn("at_uri", repo.Labels))
1023
1024
if err != nil {
1024
1025
return fmt.Errorf("failed to build label application ctx: %w", err)
1025
1026
}
+46
-45
appview/issues/issues.go
+46
-45
appview/issues/issues.go
···
19
19
"tangled.org/core/appview/config"
20
20
"tangled.org/core/appview/db"
21
21
issues_indexer "tangled.org/core/appview/indexer/issues"
22
+
"tangled.org/core/appview/mentions"
22
23
"tangled.org/core/appview/models"
23
24
"tangled.org/core/appview/notify"
24
25
"tangled.org/core/appview/oauth"
25
26
"tangled.org/core/appview/pages"
26
27
"tangled.org/core/appview/pages/repoinfo"
27
28
"tangled.org/core/appview/pagination"
28
-
"tangled.org/core/appview/refresolver"
29
29
"tangled.org/core/appview/reporesolver"
30
30
"tangled.org/core/appview/validator"
31
31
"tangled.org/core/idresolver"
32
+
"tangled.org/core/orm"
32
33
"tangled.org/core/rbac"
33
34
"tangled.org/core/tid"
34
35
)
35
36
36
37
type Issues struct {
37
-
oauth *oauth.OAuth
38
-
repoResolver *reporesolver.RepoResolver
39
-
enforcer *rbac.Enforcer
40
-
pages *pages.Pages
41
-
idResolver *idresolver.Resolver
42
-
refResolver *refresolver.Resolver
43
-
db *db.DB
44
-
config *config.Config
45
-
notifier notify.Notifier
46
-
logger *slog.Logger
47
-
validator *validator.Validator
48
-
indexer *issues_indexer.Indexer
38
+
oauth *oauth.OAuth
39
+
repoResolver *reporesolver.RepoResolver
40
+
enforcer *rbac.Enforcer
41
+
pages *pages.Pages
42
+
idResolver *idresolver.Resolver
43
+
mentionsResolver *mentions.Resolver
44
+
db *db.DB
45
+
config *config.Config
46
+
notifier notify.Notifier
47
+
logger *slog.Logger
48
+
validator *validator.Validator
49
+
indexer *issues_indexer.Indexer
49
50
}
50
51
51
52
func New(
···
54
55
enforcer *rbac.Enforcer,
55
56
pages *pages.Pages,
56
57
idResolver *idresolver.Resolver,
57
-
refResolver *refresolver.Resolver,
58
+
mentionsResolver *mentions.Resolver,
58
59
db *db.DB,
59
60
config *config.Config,
60
61
notifier notify.Notifier,
···
63
64
logger *slog.Logger,
64
65
) *Issues {
65
66
return &Issues{
66
-
oauth: oauth,
67
-
repoResolver: repoResolver,
68
-
enforcer: enforcer,
69
-
pages: pages,
70
-
idResolver: idResolver,
71
-
refResolver: refResolver,
72
-
db: db,
73
-
config: config,
74
-
notifier: notifier,
75
-
logger: logger,
76
-
validator: validator,
77
-
indexer: indexer,
67
+
oauth: oauth,
68
+
repoResolver: repoResolver,
69
+
enforcer: enforcer,
70
+
pages: pages,
71
+
idResolver: idResolver,
72
+
mentionsResolver: mentionsResolver,
73
+
db: db,
74
+
config: config,
75
+
notifier: notifier,
76
+
logger: logger,
77
+
validator: validator,
78
+
indexer: indexer,
78
79
}
79
80
}
80
81
···
113
114
114
115
labelDefs, err := db.GetLabelDefinitions(
115
116
rp.db,
116
-
db.FilterIn("at_uri", f.Labels),
117
-
db.FilterContains("scope", tangled.RepoIssueNSID),
117
+
orm.FilterIn("at_uri", f.Labels),
118
+
orm.FilterContains("scope", tangled.RepoIssueNSID),
118
119
)
119
120
if err != nil {
120
121
l.Error("failed to fetch labels", "err", err)
···
163
164
newIssue := issue
164
165
newIssue.Title = r.FormValue("title")
165
166
newIssue.Body = r.FormValue("body")
166
-
newIssue.Mentions, newIssue.References = rp.refResolver.Resolve(r.Context(), newIssue.Body)
167
+
newIssue.Mentions, newIssue.References = rp.mentionsResolver.Resolve(r.Context(), newIssue.Body)
167
168
168
169
if err := rp.validator.ValidateIssue(newIssue); err != nil {
169
170
l.Error("validation error", "err", err)
···
314
315
if isIssueOwner || isRepoOwner || isCollaborator {
315
316
err = db.CloseIssues(
316
317
rp.db,
317
-
db.FilterEq("id", issue.Id),
318
+
orm.FilterEq("id", issue.Id),
318
319
)
319
320
if err != nil {
320
321
l.Error("failed to close issue", "err", err)
···
361
362
if isCollaborator || isRepoOwner || isIssueOwner {
362
363
err := db.ReopenIssues(
363
364
rp.db,
364
-
db.FilterEq("id", issue.Id),
365
+
orm.FilterEq("id", issue.Id),
365
366
)
366
367
if err != nil {
367
368
l.Error("failed to reopen issue", "err", err)
···
412
413
replyTo = &replyToUri
413
414
}
414
415
415
-
mentions, references := rp.refResolver.Resolve(r.Context(), body)
416
+
mentions, references := rp.mentionsResolver.Resolve(r.Context(), body)
416
417
417
418
comment := models.IssueComment{
418
419
Did: user.Did,
···
506
507
commentId := chi.URLParam(r, "commentId")
507
508
comments, err := db.GetIssueComments(
508
509
rp.db,
509
-
db.FilterEq("id", commentId),
510
+
orm.FilterEq("id", commentId),
510
511
)
511
512
if err != nil {
512
513
l.Error("failed to fetch comment", "id", commentId)
···
542
543
commentId := chi.URLParam(r, "commentId")
543
544
comments, err := db.GetIssueComments(
544
545
rp.db,
545
-
db.FilterEq("id", commentId),
546
+
orm.FilterEq("id", commentId),
546
547
)
547
548
if err != nil {
548
549
l.Error("failed to fetch comment", "id", commentId)
···
584
585
newComment := comment
585
586
newComment.Body = newBody
586
587
newComment.Edited = &now
587
-
newComment.Mentions, newComment.References = rp.refResolver.Resolve(r.Context(), newBody)
588
+
newComment.Mentions, newComment.References = rp.mentionsResolver.Resolve(r.Context(), newBody)
588
589
589
590
record := newComment.AsRecord()
590
591
···
652
653
commentId := chi.URLParam(r, "commentId")
653
654
comments, err := db.GetIssueComments(
654
655
rp.db,
655
-
db.FilterEq("id", commentId),
656
+
orm.FilterEq("id", commentId),
656
657
)
657
658
if err != nil {
658
659
l.Error("failed to fetch comment", "id", commentId)
···
688
689
commentId := chi.URLParam(r, "commentId")
689
690
comments, err := db.GetIssueComments(
690
691
rp.db,
691
-
db.FilterEq("id", commentId),
692
+
orm.FilterEq("id", commentId),
692
693
)
693
694
if err != nil {
694
695
l.Error("failed to fetch comment", "id", commentId)
···
724
725
commentId := chi.URLParam(r, "commentId")
725
726
comments, err := db.GetIssueComments(
726
727
rp.db,
727
-
db.FilterEq("id", commentId),
728
+
orm.FilterEq("id", commentId),
728
729
)
729
730
if err != nil {
730
731
l.Error("failed to fetch comment", "id", commentId)
···
751
752
752
753
// optimistic deletion
753
754
deleted := time.Now()
754
-
err = db.DeleteIssueComments(rp.db, db.FilterEq("id", comment.Id))
755
+
err = db.DeleteIssueComments(rp.db, orm.FilterEq("id", comment.Id))
755
756
if err != nil {
756
757
l.Error("failed to delete comment", "err", err)
757
758
rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "failed to delete comment")
···
840
841
841
842
issues, err = db.GetIssues(
842
843
rp.db,
843
-
db.FilterIn("id", res.Hits),
844
+
orm.FilterIn("id", res.Hits),
844
845
)
845
846
if err != nil {
846
847
l.Error("failed to get issues", "err", err)
···
856
857
issues, err = db.GetIssuesPaginated(
857
858
rp.db,
858
859
page,
859
-
db.FilterEq("repo_at", f.RepoAt()),
860
-
db.FilterEq("open", openInt),
860
+
orm.FilterEq("repo_at", f.RepoAt()),
861
+
orm.FilterEq("open", openInt),
861
862
)
862
863
if err != nil {
863
864
l.Error("failed to get issues", "err", err)
···
868
869
869
870
labelDefs, err := db.GetLabelDefinitions(
870
871
rp.db,
871
-
db.FilterIn("at_uri", f.Labels),
872
-
db.FilterContains("scope", tangled.RepoIssueNSID),
872
+
orm.FilterIn("at_uri", f.Labels),
873
+
orm.FilterContains("scope", tangled.RepoIssueNSID),
873
874
)
874
875
if err != nil {
875
876
l.Error("failed to fetch labels", "err", err)
···
912
913
})
913
914
case http.MethodPost:
914
915
body := r.FormValue("body")
915
-
mentions, references := rp.refResolver.Resolve(r.Context(), body)
916
+
mentions, references := rp.mentionsResolver.Resolve(r.Context(), body)
916
917
917
918
issue := &models.Issue{
918
919
RepoAt: f.RepoAt(),
+2
-2
appview/issues/opengraph.go
+2
-2
appview/issues/opengraph.go
···
193
193
dollyX := dollyBounds.Min.X + (dollyBounds.Dx() / 2) - (dollySize / 2)
194
194
dollyY := statsY + iconBaselineOffset - dollySize/2 + 25
195
195
dollyColor := color.RGBA{180, 180, 180, 255} // light gray
196
-
err = dollyArea.DrawDollySilhouette(dollyX, dollyY, dollySize, dollyColor)
196
+
err = dollyArea.DrawDolly(dollyX, dollyY, dollySize, dollyColor)
197
197
if err != nil {
198
-
log.Printf("dolly silhouette not available (this is ok): %v", err)
198
+
log.Printf("dolly not available (this is ok): %v", err)
199
199
}
200
200
201
201
// Draw "opened by @author" and date at the bottom with more spacing
+19
-23
appview/knots/knots.go
+19
-23
appview/knots/knots.go
···
21
21
"tangled.org/core/appview/xrpcclient"
22
22
"tangled.org/core/eventconsumer"
23
23
"tangled.org/core/idresolver"
24
+
"tangled.org/core/orm"
24
25
"tangled.org/core/rbac"
25
26
"tangled.org/core/tid"
26
27
···
72
73
user := k.OAuth.GetUser(r)
73
74
registrations, err := db.GetRegistrations(
74
75
k.Db,
75
-
db.FilterEq("did", user.Did),
76
+
orm.FilterEq("did", user.Did),
76
77
)
77
78
if err != nil {
78
79
k.Logger.Error("failed to fetch knot registrations", "err", err)
···
112
113
113
114
registrations, err := db.GetRegistrations(
114
115
k.Db,
115
-
db.FilterEq("did", user.Did),
116
-
db.FilterEq("domain", domain),
116
+
orm.FilterEq("did", user.Did),
117
+
orm.FilterEq("domain", domain),
117
118
)
118
119
if err != nil {
119
120
l.Error("failed to get registrations", "err", err)
···
137
138
repos, err := db.GetRepos(
138
139
k.Db,
139
140
0,
140
-
db.FilterEq("knot", domain),
141
+
orm.FilterEq("knot", domain),
141
142
)
142
143
if err != nil {
143
144
l.Error("failed to get knot repos", "err", err)
···
303
304
// get record from db first
304
305
registrations, err := db.GetRegistrations(
305
306
k.Db,
306
-
db.FilterEq("did", user.Did),
307
-
db.FilterEq("domain", domain),
307
+
orm.FilterEq("did", user.Did),
308
+
orm.FilterEq("domain", domain),
308
309
)
309
310
if err != nil {
310
311
l.Error("failed to get registration", "err", err)
···
331
332
332
333
err = db.DeleteKnot(
333
334
tx,
334
-
db.FilterEq("did", user.Did),
335
-
db.FilterEq("domain", domain),
335
+
orm.FilterEq("did", user.Did),
336
+
orm.FilterEq("domain", domain),
336
337
)
337
338
if err != nil {
338
339
l.Error("failed to delete registration", "err", err)
···
412
413
// get record from db first
413
414
registrations, err := db.GetRegistrations(
414
415
k.Db,
415
-
db.FilterEq("did", user.Did),
416
-
db.FilterEq("domain", domain),
416
+
orm.FilterEq("did", user.Did),
417
+
orm.FilterEq("domain", domain),
417
418
)
418
419
if err != nil {
419
420
l.Error("failed to get registration", "err", err)
···
503
504
// Get updated registration to show
504
505
registrations, err = db.GetRegistrations(
505
506
k.Db,
506
-
db.FilterEq("did", user.Did),
507
-
db.FilterEq("domain", domain),
507
+
orm.FilterEq("did", user.Did),
508
+
orm.FilterEq("domain", domain),
508
509
)
509
510
if err != nil {
510
511
l.Error("failed to get registration", "err", err)
···
539
540
540
541
registrations, err := db.GetRegistrations(
541
542
k.Db,
542
-
db.FilterEq("did", user.Did),
543
-
db.FilterEq("domain", domain),
544
-
db.FilterIsNot("registered", "null"),
543
+
orm.FilterEq("did", user.Did),
544
+
orm.FilterEq("domain", domain),
545
+
orm.FilterIsNot("registered", "null"),
545
546
)
546
547
if err != nil {
547
548
l.Error("failed to get registration", "err", err)
···
647
648
648
649
registrations, err := db.GetRegistrations(
649
650
k.Db,
650
-
db.FilterEq("did", user.Did),
651
-
db.FilterEq("domain", domain),
652
-
db.FilterIsNot("registered", "null"),
651
+
orm.FilterEq("did", user.Did),
652
+
orm.FilterEq("domain", domain),
653
+
orm.FilterIsNot("registered", "null"),
653
654
)
654
655
if err != nil {
655
656
l.Error("failed to get registration", "err", err)
···
672
673
memberId, err := k.IdResolver.ResolveIdent(r.Context(), member)
673
674
if err != nil {
674
675
l.Error("failed to resolve member identity to handle", "err", err)
675
-
k.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.")
676
-
return
677
-
}
678
-
if memberId.Handle.IsInvalidHandle() {
679
-
l.Error("failed to resolve member identity to handle")
680
676
k.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.")
681
677
return
682
678
}
+5
-4
appview/labels/labels.go
+5
-4
appview/labels/labels.go
···
16
16
"tangled.org/core/appview/oauth"
17
17
"tangled.org/core/appview/pages"
18
18
"tangled.org/core/appview/validator"
19
+
"tangled.org/core/orm"
19
20
"tangled.org/core/rbac"
20
21
"tangled.org/core/tid"
21
22
···
88
89
repoAt := r.Form.Get("repo")
89
90
subjectUri := r.Form.Get("subject")
90
91
91
-
repo, err := db.GetRepo(l.db, db.FilterEq("at_uri", repoAt))
92
+
repo, err := db.GetRepo(l.db, orm.FilterEq("at_uri", repoAt))
92
93
if err != nil {
93
94
fail("Failed to get repository.", err)
94
95
return
95
96
}
96
97
97
98
// find all the labels that this repo subscribes to
98
-
repoLabels, err := db.GetRepoLabels(l.db, db.FilterEq("repo_at", repoAt))
99
+
repoLabels, err := db.GetRepoLabels(l.db, orm.FilterEq("repo_at", repoAt))
99
100
if err != nil {
100
101
fail("Failed to get labels for this repository.", err)
101
102
return
···
106
107
labelAts = append(labelAts, rl.LabelAt.String())
107
108
}
108
109
109
-
actx, err := db.NewLabelApplicationCtx(l.db, db.FilterIn("at_uri", labelAts))
110
+
actx, err := db.NewLabelApplicationCtx(l.db, orm.FilterIn("at_uri", labelAts))
110
111
if err != nil {
111
112
fail("Invalid form data.", err)
112
113
return
113
114
}
114
115
115
116
// calculate the start state by applying already known labels
116
-
existingOps, err := db.GetLabelOps(l.db, db.FilterEq("subject", subjectUri))
117
+
existingOps, err := db.GetLabelOps(l.db, orm.FilterEq("subject", subjectUri))
117
118
if err != nil {
118
119
fail("Invalid form data.", err)
119
120
return
+7
-2
appview/middleware/middleware.go
+7
-2
appview/middleware/middleware.go
···
18
18
"tangled.org/core/appview/pagination"
19
19
"tangled.org/core/appview/reporesolver"
20
20
"tangled.org/core/idresolver"
21
+
"tangled.org/core/orm"
21
22
"tangled.org/core/rbac"
22
23
)
23
24
···
217
218
218
219
repo, err := db.GetRepo(
219
220
mw.db,
220
-
db.FilterEq("did", id.DID.String()),
221
-
db.FilterEq("name", repoName),
221
+
orm.FilterEq("did", id.DID.String()),
222
+
orm.FilterEq("name", repoName),
222
223
)
223
224
if err != nil {
224
225
log.Println("failed to resolve repo", "err", err)
226
+
w.WriteHeader(http.StatusNotFound)
225
227
mw.pages.ErrorKnot404(w)
226
228
return
227
229
}
···
239
241
f, err := mw.repoResolver.Resolve(r)
240
242
if err != nil {
241
243
log.Println("failed to fully resolve repo", err)
244
+
w.WriteHeader(http.StatusNotFound)
242
245
mw.pages.ErrorKnot404(w)
243
246
return
244
247
}
···
287
290
f, err := mw.repoResolver.Resolve(r)
288
291
if err != nil {
289
292
log.Println("failed to fully resolve repo", err)
293
+
w.WriteHeader(http.StatusNotFound)
290
294
mw.pages.ErrorKnot404(w)
291
295
return
292
296
}
···
323
327
f, err := mw.repoResolver.Resolve(r)
324
328
if err != nil {
325
329
log.Println("failed to fully resolve repo", err)
330
+
w.WriteHeader(http.StatusNotFound)
326
331
mw.pages.ErrorKnot404(w)
327
332
return
328
333
}
+1
-1
appview/models/pull.go
+1
-1
appview/models/pull.go
···
83
83
Repo *Repo
84
84
}
85
85
86
+
// NOTE: This method does not include patch blob in returned atproto record
86
87
func (p Pull) AsRecord() tangled.RepoPull {
87
88
var source *tangled.RepoPull_Source
88
89
if p.PullSource != nil {
···
113
114
Repo: p.RepoAt.String(),
114
115
Branch: p.TargetBranch,
115
116
},
116
-
Patch: p.LatestPatch(),
117
117
Source: source,
118
118
}
119
119
return record
+5
-4
appview/notifications/notifications.go
+5
-4
appview/notifications/notifications.go
···
11
11
"tangled.org/core/appview/oauth"
12
12
"tangled.org/core/appview/pages"
13
13
"tangled.org/core/appview/pagination"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
17
type Notifications struct {
···
53
54
54
55
total, err := db.CountNotifications(
55
56
n.db,
56
-
db.FilterEq("recipient_did", user.Did),
57
+
orm.FilterEq("recipient_did", user.Did),
57
58
)
58
59
if err != nil {
59
60
l.Error("failed to get total notifications", "err", err)
···
64
65
notifications, err := db.GetNotificationsWithEntities(
65
66
n.db,
66
67
page,
67
-
db.FilterEq("recipient_did", user.Did),
68
+
orm.FilterEq("recipient_did", user.Did),
68
69
)
69
70
if err != nil {
70
71
l.Error("failed to get notifications", "err", err)
···
96
97
97
98
count, err := db.CountNotifications(
98
99
n.db,
99
-
db.FilterEq("recipient_did", user.Did),
100
-
db.FilterEq("read", 0),
100
+
orm.FilterEq("recipient_did", user.Did),
101
+
orm.FilterEq("read", 0),
101
102
)
102
103
if err != nil {
103
104
http.Error(w, "Failed to get unread count", http.StatusInternalServerError)
+77
-66
appview/notify/db/db.go
+77
-66
appview/notify/db/db.go
···
3
3
import (
4
4
"context"
5
5
"log"
6
-
"maps"
7
6
"slices"
8
7
9
8
"github.com/bluesky-social/indigo/atproto/syntax"
···
12
11
"tangled.org/core/appview/models"
13
12
"tangled.org/core/appview/notify"
14
13
"tangled.org/core/idresolver"
14
+
"tangled.org/core/orm"
15
+
"tangled.org/core/sets"
15
16
)
16
17
17
18
const (
18
-
maxMentions = 5
19
+
maxMentions = 8
19
20
)
20
21
21
22
type databaseNotifier struct {
···
42
43
return
43
44
}
44
45
var err error
45
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(star.RepoAt)))
46
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(star.RepoAt)))
46
47
if err != nil {
47
48
log.Printf("NewStar: failed to get repos: %v", err)
48
49
return
49
50
}
50
51
51
52
actorDid := syntax.DID(star.Did)
52
-
recipients := []syntax.DID{syntax.DID(repo.Did)}
53
+
recipients := sets.Singleton(syntax.DID(repo.Did))
53
54
eventType := models.NotificationTypeRepoStarred
54
55
entityType := "repo"
55
56
entityId := star.RepoAt.String()
···
74
75
}
75
76
76
77
func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {
77
-
78
-
// build the recipients list
79
-
// - owner of the repo
80
-
// - collaborators in the repo
81
-
var recipients []syntax.DID
82
-
recipients = append(recipients, syntax.DID(issue.Repo.Did))
83
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt()))
78
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt()))
84
79
if err != nil {
85
80
log.Printf("failed to fetch collaborators: %v", err)
86
81
return
87
82
}
83
+
84
+
// build the recipients list
85
+
// - owner of the repo
86
+
// - collaborators in the repo
87
+
// - remove users already mentioned
88
+
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
88
89
for _, c := range collaborators {
89
-
recipients = append(recipients, c.SubjectDid)
90
+
recipients.Insert(c.SubjectDid)
91
+
}
92
+
for _, m := range mentions {
93
+
recipients.Remove(m)
90
94
}
91
95
92
96
actorDid := syntax.DID(issue.Did)
···
108
112
)
109
113
n.notifyEvent(
110
114
actorDid,
111
-
mentions,
115
+
sets.Collect(slices.Values(mentions)),
112
116
models.NotificationTypeUserMentioned,
113
117
entityType,
114
118
entityId,
···
119
123
}
120
124
121
125
func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) {
122
-
issues, err := db.GetIssues(n.db, db.FilterEq("at_uri", comment.IssueAt))
126
+
issues, err := db.GetIssues(n.db, orm.FilterEq("at_uri", comment.IssueAt))
123
127
if err != nil {
124
128
log.Printf("NewIssueComment: failed to get issues: %v", err)
125
129
return
···
130
134
}
131
135
issue := issues[0]
132
136
133
-
var recipients []syntax.DID
134
-
recipients = append(recipients, syntax.DID(issue.Repo.Did))
137
+
// built the recipients list:
138
+
// - the owner of the repo
139
+
// - | if the comment is a reply -> everybody on that thread
140
+
// | if the comment is a top level -> just the issue owner
141
+
// - remove mentioned users from the recipients list
142
+
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
135
143
136
144
if comment.IsReply() {
137
145
// if this comment is a reply, then notify everybody in that thread
138
146
parentAtUri := *comment.ReplyTo
139
-
allThreads := issue.CommentList()
140
147
141
148
// find the parent thread, and add all DIDs from here to the recipient list
142
-
for _, t := range allThreads {
149
+
for _, t := range issue.CommentList() {
143
150
if t.Self.AtUri().String() == parentAtUri {
144
-
recipients = append(recipients, t.Participants()...)
151
+
for _, p := range t.Participants() {
152
+
recipients.Insert(p)
153
+
}
145
154
}
146
155
}
147
156
} else {
148
157
// not a reply, notify just the issue author
149
-
recipients = append(recipients, syntax.DID(issue.Did))
158
+
recipients.Insert(syntax.DID(issue.Did))
159
+
}
160
+
161
+
for _, m := range mentions {
162
+
recipients.Remove(m)
150
163
}
151
164
152
165
actorDid := syntax.DID(comment.Did)
···
168
181
)
169
182
n.notifyEvent(
170
183
actorDid,
171
-
mentions,
184
+
sets.Collect(slices.Values(mentions)),
172
185
models.NotificationTypeUserMentioned,
173
186
entityType,
174
187
entityId,
···
184
197
185
198
func (n *databaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) {
186
199
actorDid := syntax.DID(follow.UserDid)
187
-
recipients := []syntax.DID{syntax.DID(follow.SubjectDid)}
200
+
recipients := sets.Singleton(syntax.DID(follow.SubjectDid))
188
201
eventType := models.NotificationTypeFollowed
189
202
entityType := "follow"
190
203
entityId := follow.UserDid
···
207
220
}
208
221
209
222
func (n *databaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {
210
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt)))
223
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt)))
211
224
if err != nil {
212
225
log.Printf("NewPull: failed to get repos: %v", err)
213
226
return
214
227
}
215
-
216
-
// build the recipients list
217
-
// - owner of the repo
218
-
// - collaborators in the repo
219
-
var recipients []syntax.DID
220
-
recipients = append(recipients, syntax.DID(repo.Did))
221
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt()))
228
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt()))
222
229
if err != nil {
223
230
log.Printf("failed to fetch collaborators: %v", err)
224
231
return
225
232
}
233
+
234
+
// build the recipients list
235
+
// - owner of the repo
236
+
// - collaborators in the repo
237
+
recipients := sets.Singleton(syntax.DID(repo.Did))
226
238
for _, c := range collaborators {
227
-
recipients = append(recipients, c.SubjectDid)
239
+
recipients.Insert(c.SubjectDid)
228
240
}
229
241
230
242
actorDid := syntax.DID(pull.OwnerDid)
···
258
270
return
259
271
}
260
272
261
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", comment.RepoAt))
273
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", comment.RepoAt))
262
274
if err != nil {
263
275
log.Printf("NewPullComment: failed to get repos: %v", err)
264
276
return
···
267
279
// build up the recipients list:
268
280
// - repo owner
269
281
// - all pull participants
270
-
var recipients []syntax.DID
271
-
recipients = append(recipients, syntax.DID(repo.Did))
282
+
// - remove those already mentioned
283
+
recipients := sets.Singleton(syntax.DID(repo.Did))
272
284
for _, p := range pull.Participants() {
273
-
recipients = append(recipients, syntax.DID(p))
285
+
recipients.Insert(syntax.DID(p))
286
+
}
287
+
for _, m := range mentions {
288
+
recipients.Remove(m)
274
289
}
275
290
276
291
actorDid := syntax.DID(comment.OwnerDid)
···
294
309
)
295
310
n.notifyEvent(
296
311
actorDid,
297
-
mentions,
312
+
sets.Collect(slices.Values(mentions)),
298
313
models.NotificationTypeUserMentioned,
299
314
entityType,
300
315
entityId,
···
321
336
}
322
337
323
338
func (n *databaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) {
324
-
// build up the recipients list:
325
-
// - repo owner
326
-
// - repo collaborators
327
-
// - all issue participants
328
-
var recipients []syntax.DID
329
-
recipients = append(recipients, syntax.DID(issue.Repo.Did))
330
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt()))
339
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt()))
331
340
if err != nil {
332
341
log.Printf("failed to fetch collaborators: %v", err)
333
342
return
334
343
}
344
+
345
+
// build up the recipients list:
346
+
// - repo owner
347
+
// - repo collaborators
348
+
// - all issue participants
349
+
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
335
350
for _, c := range collaborators {
336
-
recipients = append(recipients, c.SubjectDid)
351
+
recipients.Insert(c.SubjectDid)
337
352
}
338
353
for _, p := range issue.Participants() {
339
-
recipients = append(recipients, syntax.DID(p))
354
+
recipients.Insert(syntax.DID(p))
340
355
}
341
356
342
357
entityType := "pull"
···
366
381
367
382
func (n *databaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {
368
383
// Get repo details
369
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt)))
384
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt)))
370
385
if err != nil {
371
386
log.Printf("NewPullState: failed to get repos: %v", err)
372
387
return
373
388
}
374
389
375
-
// build up the recipients list:
376
-
// - repo owner
377
-
// - all pull participants
378
-
var recipients []syntax.DID
379
-
recipients = append(recipients, syntax.DID(repo.Did))
380
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt()))
390
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt()))
381
391
if err != nil {
382
392
log.Printf("failed to fetch collaborators: %v", err)
383
393
return
384
394
}
395
+
396
+
// build up the recipients list:
397
+
// - repo owner
398
+
// - all pull participants
399
+
recipients := sets.Singleton(syntax.DID(repo.Did))
385
400
for _, c := range collaborators {
386
-
recipients = append(recipients, c.SubjectDid)
401
+
recipients.Insert(c.SubjectDid)
387
402
}
388
403
for _, p := range pull.Participants() {
389
-
recipients = append(recipients, syntax.DID(p))
404
+
recipients.Insert(syntax.DID(p))
390
405
}
391
406
392
407
entityType := "pull"
···
422
437
423
438
func (n *databaseNotifier) notifyEvent(
424
439
actorDid syntax.DID,
425
-
recipients []syntax.DID,
440
+
recipients sets.Set[syntax.DID],
426
441
eventType models.NotificationType,
427
442
entityType string,
428
443
entityId string,
···
430
445
issueId *int64,
431
446
pullId *int64,
432
447
) {
433
-
if eventType == models.NotificationTypeUserMentioned && len(recipients) > maxMentions {
434
-
recipients = recipients[:maxMentions]
448
+
// if the user is attempting to mention >maxMentions users, this is probably spam, do not mention anybody
449
+
if eventType == models.NotificationTypeUserMentioned && recipients.Len() > maxMentions {
450
+
return
435
451
}
436
-
recipientSet := make(map[syntax.DID]struct{})
437
-
for _, did := range recipients {
438
-
// everybody except actor themselves
439
-
if did != actorDid {
440
-
recipientSet[did] = struct{}{}
441
-
}
442
-
}
452
+
453
+
recipients.Remove(actorDid)
443
454
444
455
prefMap, err := db.GetNotificationPreferences(
445
456
n.db,
446
-
db.FilterIn("user_did", slices.Collect(maps.Keys(recipientSet))),
457
+
orm.FilterIn("user_did", slices.Collect(recipients.All())),
447
458
)
448
459
if err != nil {
449
460
// failed to get prefs for users
···
459
470
defer tx.Rollback()
460
471
461
472
// filter based on preferences
462
-
for recipientDid := range recipientSet {
473
+
for recipientDid := range recipients.All() {
463
474
prefs, ok := prefMap[recipientDid]
464
475
if !ok {
465
476
prefs = models.DefaultNotificationPreferences(recipientDid)
-1
appview/notify/merged_notifier.go
-1
appview/notify/merged_notifier.go
+3
-2
appview/oauth/handler.go
+3
-2
appview/oauth/handler.go
···
16
16
"tangled.org/core/api/tangled"
17
17
"tangled.org/core/appview/db"
18
18
"tangled.org/core/consts"
19
+
"tangled.org/core/orm"
19
20
"tangled.org/core/tid"
20
21
)
21
22
···
97
98
// and create an sh.tangled.spindle.member record with that
98
99
spindleMembers, err := db.GetSpindleMembers(
99
100
o.Db,
100
-
db.FilterEq("instance", "spindle.tangled.sh"),
101
-
db.FilterEq("subject", did),
101
+
orm.FilterEq("instance", "spindle.tangled.sh"),
102
+
orm.FilterEq("subject", did),
102
103
)
103
104
if err != nil {
104
105
l.Error("failed to get spindle members", "err", err)
+9
-9
appview/ogcard/card.go
+9
-9
appview/ogcard/card.go
···
334
334
return nil
335
335
}
336
336
337
-
func (c *Card) DrawDollySilhouette(x, y, size int, iconColor color.Color) error {
337
+
func (c *Card) DrawDolly(x, y, size int, iconColor color.Color) error {
338
338
tpl, err := template.New("dolly").
339
-
ParseFS(pages.Files, "templates/fragments/dolly/silhouette.html")
339
+
ParseFS(pages.Files, "templates/fragments/dolly/logo.html")
340
340
if err != nil {
341
-
return fmt.Errorf("failed to read dolly silhouette template: %w", err)
341
+
return fmt.Errorf("failed to read dolly template: %w", err)
342
342
}
343
343
344
344
var svgData bytes.Buffer
345
-
if err = tpl.ExecuteTemplate(&svgData, "fragments/dolly/silhouette", nil); err != nil {
346
-
return fmt.Errorf("failed to execute dolly silhouette template: %w", err)
345
+
if err = tpl.ExecuteTemplate(&svgData, "fragments/dolly/logo", nil); err != nil {
346
+
return fmt.Errorf("failed to execute dolly template: %w", err)
347
347
}
348
348
349
349
icon, err := BuildSVGIconFromData(svgData.Bytes(), iconColor)
···
453
453
454
454
// Handle SVG separately
455
455
if contentType == "image/svg+xml" || strings.HasSuffix(url, ".svg") {
456
-
return c.convertSVGToPNG(bodyBytes)
456
+
return convertSVGToPNG(bodyBytes)
457
457
}
458
458
459
459
// Support content types are in-sync with the allowed custom avatar file types
···
493
493
}
494
494
495
495
// convertSVGToPNG converts SVG data to a PNG image
496
-
func (c *Card) convertSVGToPNG(svgData []byte) (image.Image, bool) {
496
+
func convertSVGToPNG(svgData []byte) (image.Image, bool) {
497
497
// Parse the SVG
498
498
icon, err := oksvg.ReadIconStream(bytes.NewReader(svgData))
499
499
if err != nil {
···
547
547
draw.CatmullRom.Scale(scaledImg, scaledImg.Bounds(), img, srcBounds, draw.Src, nil)
548
548
549
549
// Draw the image with circular clipping
550
-
for cy := 0; cy < size; cy++ {
551
-
for cx := 0; cx < size; cx++ {
550
+
for cy := range size {
551
+
for cx := range size {
552
552
// Calculate distance from center
553
553
dx := float64(cx - center)
554
554
dy := float64(cy - center)
+7
-2
appview/pages/funcmap.go
+7
-2
appview/pages/funcmap.go
···
25
25
"github.com/dustin/go-humanize"
26
26
"github.com/go-enry/go-enry/v2"
27
27
"github.com/yuin/goldmark"
28
+
emoji "github.com/yuin/goldmark-emoji"
28
29
"tangled.org/core/appview/filetree"
29
30
"tangled.org/core/appview/models"
30
31
"tangled.org/core/appview/pages/markup"
···
162
163
}
163
164
return pairs, nil
164
165
},
165
-
"append": func(s []string, values ...string) []string {
166
+
"append": func(s []any, values ...any) []any {
166
167
s = append(s, values...)
167
168
return s
168
169
},
···
261
262
},
262
263
"description": func(text string) template.HTML {
263
264
p.rctx.RendererType = markup.RendererTypeDefault
264
-
htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New())
265
+
htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New(
266
+
goldmark.WithExtensions(
267
+
emoji.Emoji,
268
+
),
269
+
))
265
270
sanitized := p.rctx.SanitizeDescription(htmlString)
266
271
return template.HTML(sanitized)
267
272
},
+13
-3
appview/pages/markup/extension/atlink.go
+13
-3
appview/pages/markup/extension/atlink.go
···
35
35
return KindAt
36
36
}
37
37
38
-
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`)
38
+
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`)
39
+
var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`)
39
40
40
41
type atParser struct{}
41
42
···
55
56
if m == nil {
56
57
return nil
57
58
}
59
+
60
+
// Check for all links in the markdown to see if the handle found is inside one
61
+
linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1)
62
+
for _, linkMatch := range linksIndexes {
63
+
if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] {
64
+
return nil
65
+
}
66
+
}
67
+
58
68
atSegment := text.NewSegment(segment.Start, segment.Start+m[1])
59
69
block.Advance(m[1])
60
70
node := &AtNode{}
···
87
97
88
98
func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
89
99
if entering {
90
-
w.WriteString(`<a href="/@`)
100
+
w.WriteString(`<a href="/`)
91
101
w.WriteString(n.(*AtNode).Handle)
92
-
w.WriteString(`" class="mention font-bold">`)
102
+
w.WriteString(`" class="mention">`)
93
103
} else {
94
104
w.WriteString("</a>")
95
105
}
+2
-2
appview/pages/markup/markdown.go
+2
-2
appview/pages/markup/markdown.go
···
12
12
13
13
chromahtml "github.com/alecthomas/chroma/v2/formatters/html"
14
14
"github.com/alecthomas/chroma/v2/styles"
15
-
treeblood "github.com/wyatt915/goldmark-treeblood"
16
15
"github.com/yuin/goldmark"
16
+
"github.com/yuin/goldmark-emoji"
17
17
highlighting "github.com/yuin/goldmark-highlighting/v2"
18
18
"github.com/yuin/goldmark/ast"
19
19
"github.com/yuin/goldmark/extension"
···
65
65
extension.NewFootnote(
66
66
extension.WithFootnoteIDPrefix([]byte("footnote")),
67
67
),
68
-
treeblood.MathML(),
69
68
callout.CalloutExtention,
70
69
textension.AtExt,
70
+
emoji.Emoji,
71
71
),
72
72
goldmark.WithParserOptions(
73
73
parser.WithAutoHeadingID(),
+121
appview/pages/markup/markdown_test.go
+121
appview/pages/markup/markdown_test.go
···
1
+
package markup
2
+
3
+
import (
4
+
"bytes"
5
+
"testing"
6
+
)
7
+
8
+
func TestAtExtension_Rendering(t *testing.T) {
9
+
tests := []struct {
10
+
name string
11
+
markdown string
12
+
expected string
13
+
}{
14
+
{
15
+
name: "renders simple at mention",
16
+
markdown: "Hello @user.tngl.sh!",
17
+
expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`,
18
+
},
19
+
{
20
+
name: "renders multiple at mentions",
21
+
markdown: "Hi @alice.tngl.sh and @bob.example.com",
22
+
expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`,
23
+
},
24
+
{
25
+
name: "renders at mention in parentheses",
26
+
markdown: "Check this out (@user.tngl.sh)",
27
+
expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`,
28
+
},
29
+
{
30
+
name: "does not render email",
31
+
markdown: "Contact me at test@example.com",
32
+
expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`,
33
+
},
34
+
{
35
+
name: "renders at mention with hyphen",
36
+
markdown: "Follow @user-name.tngl.sh",
37
+
expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`,
38
+
},
39
+
{
40
+
name: "renders at mention with numbers",
41
+
markdown: "@user123.test456.social",
42
+
expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`,
43
+
},
44
+
{
45
+
name: "at mention at start of line",
46
+
markdown: "@user.tngl.sh is cool",
47
+
expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`,
48
+
},
49
+
}
50
+
51
+
for _, tt := range tests {
52
+
t.Run(tt.name, func(t *testing.T) {
53
+
md := NewMarkdown()
54
+
55
+
var buf bytes.Buffer
56
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
57
+
t.Fatalf("failed to convert markdown: %v", err)
58
+
}
59
+
60
+
result := buf.String()
61
+
if result != tt.expected+"\n" {
62
+
t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result)
63
+
}
64
+
})
65
+
}
66
+
}
67
+
68
+
func TestAtExtension_WithOtherMarkdown(t *testing.T) {
69
+
tests := []struct {
70
+
name string
71
+
markdown string
72
+
contains string
73
+
}{
74
+
{
75
+
name: "at mention with bold",
76
+
markdown: "**Hello @user.tngl.sh**",
77
+
contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`,
78
+
},
79
+
{
80
+
name: "at mention with italic",
81
+
markdown: "*Check @user.tngl.sh*",
82
+
contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`,
83
+
},
84
+
{
85
+
name: "at mention in list",
86
+
markdown: "- Item 1\n- @user.tngl.sh\n- Item 3",
87
+
contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`,
88
+
},
89
+
{
90
+
name: "at mention in link",
91
+
markdown: "[@regnault.dev](https://regnault.dev)",
92
+
contains: `<a href="https://regnault.dev">@regnault.dev</a>`,
93
+
},
94
+
{
95
+
name: "at mention in link again",
96
+
markdown: "[check out @regnault.dev](https://regnault.dev)",
97
+
contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`,
98
+
},
99
+
{
100
+
name: "at mention in link again, multiline",
101
+
markdown: "[\ncheck out @regnault.dev](https://regnault.dev)",
102
+
contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>",
103
+
},
104
+
}
105
+
106
+
for _, tt := range tests {
107
+
t.Run(tt.name, func(t *testing.T) {
108
+
md := NewMarkdown()
109
+
110
+
var buf bytes.Buffer
111
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
112
+
t.Fatalf("failed to convert markdown: %v", err)
113
+
}
114
+
115
+
result := buf.String()
116
+
if !bytes.Contains([]byte(result), []byte(tt.contains)) {
117
+
t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result)
118
+
}
119
+
})
120
+
}
121
+
}
+14
-4
appview/pages/pages.go
+14
-4
appview/pages/pages.go
···
31
31
"github.com/bluesky-social/indigo/atproto/identity"
32
32
"github.com/bluesky-social/indigo/atproto/syntax"
33
33
"github.com/go-git/go-git/v5/plumbing"
34
-
"github.com/go-git/go-git/v5/plumbing/object"
35
34
)
36
35
37
36
//go:embed templates/* static legal
···
211
210
return tpl.ExecuteTemplate(w, "layouts/base", params)
212
211
}
213
212
213
+
type DollyParams struct {
214
+
Classes string
215
+
FillColor string
216
+
}
217
+
218
+
func (p *Pages) Dolly(w io.Writer, params DollyParams) error {
219
+
return p.executePlain("fragments/dolly/logo", w, params)
220
+
}
221
+
214
222
func (p *Pages) Favicon(w io.Writer) error {
215
-
return p.executePlain("fragments/dolly/silhouette", w, nil)
223
+
return p.Dolly(w, DollyParams{
224
+
Classes: "text-black dark:text-white",
225
+
})
216
226
}
217
227
218
228
type LoginParams struct {
···
644
654
}
645
655
646
656
func (p *Pages) StarBtnFragment(w io.Writer, params StarBtnFragmentParams) error {
647
-
return p.executePlain("fragments/starBtn", w, params)
657
+
return p.executePlain("fragments/starBtn-oob", w, params)
648
658
}
649
659
650
660
type RepoIndexParams struct {
···
652
662
RepoInfo repoinfo.RepoInfo
653
663
Active string
654
664
TagMap map[string][]string
655
-
CommitsTrunc []*object.Commit
665
+
CommitsTrunc []types.Commit
656
666
TagsTrunc []*types.TagReference
657
667
BranchesTrunc []types.Branch
658
668
// ForkInfo *types.ForkInfo
+9
-29
appview/pages/templates/brand/brand.html
+9
-29
appview/pages/templates/brand/brand.html
···
4
4
<div class="grid grid-cols-10">
5
5
<header class="col-span-full md:col-span-10 px-6 py-2 mb-4">
6
6
<h1 class="text-2xl font-bold dark:text-white mb-1">Brand</h1>
7
-
<p class="text-gray-600 dark:text-gray-400 mb-1">
7
+
<p class="text-gray-500 dark:text-gray-300 mb-1">
8
8
Assets and guidelines for using Tangled's logo and brand elements.
9
9
</p>
10
10
</header>
···
14
14
15
15
<!-- Introduction Section -->
16
16
<section>
17
-
<p class="text-gray-600 dark:text-gray-400 mb-2">
17
+
<p class="text-gray-500 dark:text-gray-300 mb-2">
18
18
Tangled's logo and mascot is <strong>Dolly</strong>, the first ever <em>cloned</em> mammal. Please
19
19
follow the below guidelines when using Dolly and the logotype.
20
20
</p>
21
-
<p class="text-gray-600 dark:text-gray-400 mb-2">
21
+
<p class="text-gray-500 dark:text-gray-300 mb-2">
22
22
All assets are served as SVGs, and can be downloaded by right-clicking and clicking "Save image as".
23
23
</p>
24
24
</section>
···
34
34
</div>
35
35
<div class="order-1 lg:order-2">
36
36
<h2 class="text-xl font-semibold dark:text-white mb-3">Black logotype</h2>
37
-
<p class="text-gray-600 dark:text-gray-400 mb-4">For use on light-colored backgrounds.</p>
37
+
<p class="text-gray-500 dark:text-gray-300 mb-4">For use on light-colored backgrounds.</p>
38
38
<p class="text-gray-700 dark:text-gray-300">
39
39
This is the preferred version of the logotype, featuring dark text and elements, ideal for light
40
40
backgrounds and designs.
···
53
53
</div>
54
54
<div class="order-1 lg:order-2">
55
55
<h2 class="text-xl font-semibold dark:text-white mb-3">White logotype</h2>
56
-
<p class="text-gray-600 dark:text-gray-400 mb-4">For use on dark-colored backgrounds.</p>
56
+
<p class="text-gray-500 dark:text-gray-300 mb-4">For use on dark-colored backgrounds.</p>
57
57
<p class="text-gray-700 dark:text-gray-300">
58
58
This version features white text and elements, ideal for dark backgrounds
59
59
and inverted designs.
···
81
81
</div>
82
82
<div class="order-1 lg:order-2">
83
83
<h2 class="text-xl font-semibold dark:text-white mb-3">Mark only</h2>
84
-
<p class="text-gray-600 dark:text-gray-400 mb-4">
84
+
<p class="text-gray-500 dark:text-gray-300 mb-4">
85
85
When a smaller 1:1 logo or icon is needed, Dolly's face may be used on its own.
86
86
</p>
87
87
<p class="text-gray-700 dark:text-gray-300 mb-4">
···
123
123
</div>
124
124
<div class="order-1 lg:order-2">
125
125
<h2 class="text-xl font-semibold dark:text-white mb-3">Colored backgrounds</h2>
126
-
<p class="text-gray-600 dark:text-gray-400 mb-4">
126
+
<p class="text-gray-500 dark:text-gray-300 mb-4">
127
127
White logo mark on colored backgrounds.
128
128
</p>
129
129
<p class="text-gray-700 dark:text-gray-300 mb-4">
···
165
165
</div>
166
166
<div class="order-1 lg:order-2">
167
167
<h2 class="text-xl font-semibold dark:text-white mb-3">Lighter backgrounds</h2>
168
-
<p class="text-gray-600 dark:text-gray-400 mb-4">
168
+
<p class="text-gray-500 dark:text-gray-300 mb-4">
169
169
Dark logo mark on lighter, pastel backgrounds.
170
170
</p>
171
171
<p class="text-gray-700 dark:text-gray-300 mb-4">
···
186
186
</div>
187
187
<div class="order-1 lg:order-2">
188
188
<h2 class="text-xl font-semibold dark:text-white mb-3">Recoloring</h2>
189
-
<p class="text-gray-600 dark:text-gray-400 mb-4">
189
+
<p class="text-gray-500 dark:text-gray-300 mb-4">
190
190
Custom coloring of the logotype is permitted.
191
191
</p>
192
192
<p class="text-gray-700 dark:text-gray-300 mb-4">
···
194
194
</p>
195
195
<p class="text-gray-700 dark:text-gray-300 text-sm">
196
196
<strong>Example:</strong> Gray/sand colored logotype on a light yellow/tan background.
197
-
</p>
198
-
</div>
199
-
</section>
200
-
201
-
<!-- Silhouette Section -->
202
-
<section class="grid grid-cols-1 lg:grid-cols-2 gap-8 items-center">
203
-
<div class="order-2 lg:order-1">
204
-
<div class="border border-gray-200 dark:border-gray-700 p-8 sm:p-16 bg-gray-50 dark:bg-gray-100 rounded">
205
-
<img src="https://assets.tangled.network/tangled_dolly_silhouette.svg"
206
-
alt="Dolly silhouette"
207
-
class="w-full max-w-32 mx-auto" />
208
-
</div>
209
-
</div>
210
-
<div class="order-1 lg:order-2">
211
-
<h2 class="text-xl font-semibold dark:text-white mb-3">Dolly silhouette</h2>
212
-
<p class="text-gray-600 dark:text-gray-400 mb-4">A minimalist version of Dolly.</p>
213
-
<p class="text-gray-700 dark:text-gray-300">
214
-
The silhouette can be used where a subtle brand presence is needed,
215
-
or as a background element. Works on any background color with proper contrast.
216
-
For example, we use this as the site's favicon.
217
197
</p>
218
198
</div>
219
199
</section>
+14
-2
appview/pages/templates/fragments/dolly/logo.html
+14
-2
appview/pages/templates/fragments/dolly/logo.html
···
2
2
<svg
3
3
version="1.1"
4
4
id="svg1"
5
-
class="{{ . }}"
5
+
class="{{ .Classes }}"
6
6
width="25"
7
7
height="25"
8
8
viewBox="0 0 25 25"
···
17
17
xmlns:svg="http://www.w3.org/2000/svg"
18
18
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
19
19
xmlns:cc="http://creativecommons.org/ns#">
20
+
<style>
21
+
.dolly {
22
+
color: #000000;
23
+
}
24
+
25
+
@media (prefers-color-scheme: dark) {
26
+
.dolly {
27
+
color: #ffffff;
28
+
}
29
+
}
30
+
</style>
20
31
<sodipodi:namedview
21
32
id="namedview1"
22
33
pagecolor="#ffffff"
···
51
62
id="g1"
52
63
transform="translate(-0.42924038,-0.87777209)">
53
64
<path
54
-
fill="currentColor"
65
+
class="dolly"
66
+
fill="{{ or .FillColor "currentColor" }}"
55
67
style="stroke-width:0.111183;"
56
68
d="m 16.775491,24.987061 c -0.78517,-0.0064 -1.384202,-0.234614 -2.033994,-0.631295 -0.931792,-0.490188 -1.643475,-1.31368 -2.152014,-2.221647 C 11.781409,23.136647 10.701392,23.744942 9.4922931,24.0886 8.9774725,24.238111 8.0757679,24.389777 6.5811304,23.84827 4.4270703,23.124679 2.8580086,20.883331 3.0363279,18.599583 3.0037061,17.652919 3.3488675,16.723769 3.8381157,15.925061 2.5329485,15.224503 1.4686756,14.048584 1.0611184,12.606459 0.81344502,11.816973 0.82385989,10.966486 0.91519098,10.154906 1.2422711,8.2387903 2.6795811,6.5725716 4.5299585,5.9732484 5.2685364,4.290122 6.8802592,3.0349975 8.706276,2.7794663 c 1.2124148,-0.1688264 2.46744,0.084987 3.52811,0.7011837 1.545426,-1.7139736 4.237779,-2.2205077 6.293579,-1.1676231 1.568222,0.7488935 2.689625,2.3113526 2.961888,4.0151464 1.492195,0.5977882 2.749007,1.8168898 3.242225,3.3644951 0.329805,0.9581836 0.340709,2.0135956 0.127128,2.9974286 -0.381606,1.535184 -1.465322,2.842146 -2.868035,3.556463 0.0034,0.273204 0.901506,2.243045 0.751284,3.729647 -0.03281,1.858525 -1.211631,3.619894 -2.846433,4.475452 -0.953967,0.556812 -2.084452,0.546309 -3.120531,0.535398 z m -4.470079,-5.349839 c 1.322246,-0.147248 2.189053,-1.300106 2.862307,-2.338363 0.318287,-0.472954 0.561404,-1.002348 0.803,-1.505815 0.313265,0.287151 0.578698,0.828085 1.074141,0.956909 0.521892,0.162542 1.133743,0.03052 1.45325,-0.443554 0.611414,-1.140449 0.31004,-2.516537 -0.04602,-3.698347 C 18.232844,11.92927 17.945151,11.232927 17.397785,10.751793 17.514522,9.9283111 17.026575,9.0919791 16.332883,8.6609491 15.741721,9.1323278 14.842258,9.1294949 14.271975,8.6252369 13.178927,9.7400102 12.177239,9.7029996 11.209704,8.8195135 10.992255,8.6209543 10.577326,10.031484 9.1211947,9.2324497 8.2846288,9.9333947 7.6359672,10.607693 7.0611981,11.578553 6.5026891,12.62523 5.9177873,13.554793 5.867393,14.69141 c -0.024234,0.66432 0.4948601,1.360337 1.1982269,1.306329 0.702996,0.06277 1.1815208,-0.629091 1.7138087,-0.916491 0.079382,0.927141 0.1688108,1.923227 0.4821259,2.828358 0.3596254,1.171275 1.6262605,1.915695 2.8251855,1.745211 0.08481,-0.0066 0.218672,-0.01769 0.218672,-0.0176 z m 0.686342,-3.497495 c -0.643126,-0.394168 -0.33365,-1.249599 -0.359402,-1.870938 0.064,-0.749774 0.115321,-1.538054 0.452402,-2.221125 0.356724,-0.487008 1.226721,-0.299139 1.265134,0.325689 -0.02558,0.628509 -0.314101,1.25416 -0.279646,1.9057 -0.07482,0.544043 0.05418,1.155133 -0.186476,1.652391 -0.197455,0.275121 -0.599638,0.355105 -0.892012,0.208283 z m -2.808766,-0.358124 c -0.605767,-0.328664 -0.4133176,-1.155655 -0.5083256,-1.73063 0.078762,-0.66567 0.013203,-1.510085 0.5705316,-1.976886 0.545037,-0.380109 1.286917,0.270803 1.029164,0.868384 -0.274913,0.755214 -0.09475,1.580345 -0.08893,2.34609 -0.104009,0.451702 -0.587146,0.691508 -1.002445,0.493042 z"
57
69
id="path4"
-95
appview/pages/templates/fragments/dolly/silhouette.html
-95
appview/pages/templates/fragments/dolly/silhouette.html
···
1
-
{{ define "fragments/dolly/silhouette" }}
2
-
<svg
3
-
version="1.1"
4
-
id="svg1"
5
-
width="25"
6
-
height="25"
7
-
viewBox="0 0 25 25"
8
-
sodipodi:docname="tangled_dolly_face_only_black_on_trans.svg"
9
-
inkscape:export-filename="tangled_dolly_silhouette_black_on_trans.svg"
10
-
inkscape:export-xdpi="96"
11
-
inkscape:export-ydpi="96"
12
-
inkscape:version="1.4 (e7c3feb100, 2024-10-09)"
13
-
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
14
-
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
15
-
xmlns="http://www.w3.org/2000/svg"
16
-
xmlns:svg="http://www.w3.org/2000/svg"
17
-
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
18
-
xmlns:cc="http://creativecommons.org/ns#">
19
-
<style>
20
-
.dolly {
21
-
color: #000000;
22
-
}
23
-
24
-
@media (prefers-color-scheme: dark) {
25
-
.dolly {
26
-
color: #ffffff;
27
-
}
28
-
}
29
-
</style>
30
-
<sodipodi:namedview
31
-
id="namedview1"
32
-
pagecolor="#ffffff"
33
-
bordercolor="#000000"
34
-
borderopacity="0.25"
35
-
inkscape:showpageshadow="2"
36
-
inkscape:pageopacity="0.0"
37
-
inkscape:pagecheckerboard="true"
38
-
inkscape:deskcolor="#d5d5d5"
39
-
inkscape:zoom="64"
40
-
inkscape:cx="4.96875"
41
-
inkscape:cy="13.429688"
42
-
inkscape:window-width="3840"
43
-
inkscape:window-height="2160"
44
-
inkscape:window-x="0"
45
-
inkscape:window-y="0"
46
-
inkscape:window-maximized="0"
47
-
inkscape:current-layer="g1"
48
-
borderlayer="true">
49
-
<inkscape:page
50
-
x="0"
51
-
y="0"
52
-
width="25"
53
-
height="25"
54
-
id="page2"
55
-
margin="0"
56
-
bleed="0" />
57
-
</sodipodi:namedview>
58
-
<g
59
-
inkscape:groupmode="layer"
60
-
inkscape:label="Image"
61
-
id="g1"
62
-
transform="translate(-0.42924038,-0.87777209)">
63
-
<path
64
-
class="dolly"
65
-
fill="currentColor"
66
-
style="stroke-width:0.111183"
67
-
d="m 16.775491,24.987061 c -0.78517,-0.0064 -1.384202,-0.234614 -2.033994,-0.631295 -0.931792,-0.490188 -1.643475,-1.31368 -2.152014,-2.221647 C 11.781409,23.136647 10.701392,23.744942 9.4922931,24.0886 8.9774725,24.238111 8.0757679,24.389777 6.5811304,23.84827 4.4270703,23.124679 2.8580086,20.883331 3.0363279,18.599583 3.0037061,17.652919 3.3488675,16.723769 3.8381157,15.925061 2.5329485,15.224503 1.4686756,14.048584 1.0611184,12.606459 0.81344502,11.816973 0.82385989,10.966486 0.91519098,10.154906 1.2422711,8.2387903 2.6795811,6.5725716 4.5299585,5.9732484 5.2685364,4.290122 6.8802592,3.0349975 8.706276,2.7794663 c 1.2124148,-0.1688264 2.46744,0.084987 3.52811,0.7011837 1.545426,-1.7139736 4.237779,-2.2205077 6.293579,-1.1676231 1.568222,0.7488935 2.689625,2.3113526 2.961888,4.0151464 1.492195,0.5977882 2.749007,1.8168898 3.242225,3.3644951 0.329805,0.9581836 0.340709,2.0135956 0.127128,2.9974286 -0.381606,1.535184 -1.465322,2.842146 -2.868035,3.556463 0.0034,0.273204 0.901506,2.243045 0.751284,3.729647 -0.03281,1.858525 -1.211631,3.619894 -2.846433,4.475452 -0.953967,0.556812 -2.084452,0.546309 -3.120531,0.535398 z m -4.470079,-5.349839 c 1.322246,-0.147248 2.189053,-1.300106 2.862307,-2.338363 0.318287,-0.472954 0.561404,-1.002348 0.803,-1.505815 0.313265,0.287151 0.578698,0.828085 1.074141,0.956909 0.521892,0.162542 1.133743,0.03052 1.45325,-0.443554 0.611414,-1.140449 0.31004,-2.516537 -0.04602,-3.698347 C 18.232844,11.92927 17.945151,11.232927 17.397785,10.751793 17.514522,9.9283111 17.026575,9.0919791 16.332883,8.6609491 15.741721,9.1323278 14.842258,9.1294949 14.271975,8.6252369 13.178927,9.7400102 12.177239,9.7029996 11.209704,8.8195135 10.992255,8.6209543 10.577326,10.031484 9.1211947,9.2324497 8.2846288,9.9333947 7.6359672,10.607693 7.0611981,11.578553 6.5026891,12.62523 5.9177873,13.554793 5.867393,14.69141 c -0.024234,0.66432 0.4948601,1.360337 1.1982269,1.306329 0.702996,0.06277 1.1815208,-0.629091 1.7138087,-0.916491 0.079382,0.927141 0.1688108,1.923227 0.4821259,2.828358 0.3596254,1.171275 1.6262605,1.915695 2.8251855,1.745211 0.08481,-0.0066 0.218672,-0.01769 0.218672,-0.0176 z"
68
-
id="path7"
69
-
sodipodi:nodetypes="sccccccccccccccccccsscccccccccscccccccsc" />
70
-
</g>
71
-
<metadata
72
-
id="metadata1">
73
-
<rdf:RDF>
74
-
<cc:Work
75
-
rdf:about="">
76
-
<cc:license
77
-
rdf:resource="http://creativecommons.org/licenses/by/4.0/" />
78
-
</cc:Work>
79
-
<cc:License
80
-
rdf:about="http://creativecommons.org/licenses/by/4.0/">
81
-
<cc:permits
82
-
rdf:resource="http://creativecommons.org/ns#Reproduction" />
83
-
<cc:permits
84
-
rdf:resource="http://creativecommons.org/ns#Distribution" />
85
-
<cc:requires
86
-
rdf:resource="http://creativecommons.org/ns#Notice" />
87
-
<cc:requires
88
-
rdf:resource="http://creativecommons.org/ns#Attribution" />
89
-
<cc:permits
90
-
rdf:resource="http://creativecommons.org/ns#DerivativeWorks" />
91
-
</cc:License>
92
-
</rdf:RDF>
93
-
</metadata>
94
-
</svg>
95
-
{{ end }}
+1
-1
appview/pages/templates/fragments/logotype.html
+1
-1
appview/pages/templates/fragments/logotype.html
···
1
1
{{ define "fragments/logotype" }}
2
2
<span class="flex items-center gap-2">
3
-
{{ template "fragments/dolly/logo" "size-16 text-black dark:text-white" }}
3
+
{{ template "fragments/dolly/logo" (dict "Classes" "size-16 text-black dark:text-white") }}
4
4
<span class="font-bold text-4xl not-italic">tangled</span>
5
5
<span class="font-normal not-italic text-xs rounded bg-gray-100 dark:bg-gray-700 px-1">
6
6
alpha
+1
-1
appview/pages/templates/fragments/logotypeSmall.html
+1
-1
appview/pages/templates/fragments/logotypeSmall.html
···
1
1
{{ define "fragments/logotypeSmall" }}
2
2
<span class="flex items-center gap-2">
3
-
{{ template "fragments/dolly/logo" "size-8 text-black dark:text-white" }}
3
+
{{ template "fragments/dolly/logo" (dict "Classes" "size-8 text-black dark:text-white")}}
4
4
<span class="font-bold text-xl not-italic">tangled</span>
5
5
<span class="font-normal not-italic text-xs rounded bg-gray-100 dark:bg-gray-700 px-1">
6
6
alpha
+5
appview/pages/templates/fragments/starBtn-oob.html
+5
appview/pages/templates/fragments/starBtn-oob.html
+1
-3
appview/pages/templates/fragments/starBtn.html
+1
-3
appview/pages/templates/fragments/starBtn.html
···
1
1
{{ define "fragments/starBtn" }}
2
+
{{/* NOTE: this fragment is always replaced with hx-swap-oob */}}
2
3
<button
3
4
id="starBtn"
4
5
class="btn disabled:opacity-50 disabled:cursor-not-allowed flex gap-2 items-center group"
···
10
11
{{ end }}
11
12
12
13
hx-trigger="click"
13
-
hx-target="this"
14
-
hx-swap="outerHTML"
15
-
hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]'
16
14
hx-disabled-elt="#starBtn"
17
15
>
18
16
{{ if .IsStarred }}
+22
appview/pages/templates/fragments/tinyAvatarList.html
+22
appview/pages/templates/fragments/tinyAvatarList.html
···
1
+
{{ define "fragments/tinyAvatarList" }}
2
+
{{ $all := .all }}
3
+
{{ $classes := .classes }}
4
+
{{ $ps := take $all 5 }}
5
+
<div class="inline-flex items-center -space-x-3">
6
+
{{ $c := "z-50 z-40 z-30 z-20 z-10" }}
7
+
{{ range $i, $p := $ps }}
8
+
<img
9
+
src="{{ tinyAvatar . }}"
10
+
alt=""
11
+
class="rounded-full size-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0 {{ $classes }}"
12
+
/>
13
+
{{ end }}
14
+
15
+
{{ if gt (len $all) 5 }}
16
+
<span class="pl-4 text-gray-500 dark:text-gray-400 text-sm">
17
+
+{{ sub (len $all) 5 }}
18
+
</span>
19
+
{{ end }}
20
+
</div>
21
+
{{ end }}
22
+
+1
-1
appview/pages/templates/knots/index.html
+1
-1
appview/pages/templates/knots/index.html
···
129
129
{{ define "docsButton" }}
130
130
<a
131
131
class="btn flex items-center gap-2"
132
-
href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
132
+
href="https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide">
133
133
{{ i "book" "size-4" }}
134
134
docs
135
135
</a>
+4
appview/pages/templates/layouts/base.html
+4
appview/pages/templates/layouts/base.html
···
11
11
<script defer src="/static/htmx-ext-ws.min.js"></script>
12
12
<script defer src="/static/actor-typeahead.js" type="module"></script>
13
13
14
+
<link rel="icon" href="/static/logos/dolly.ico" sizes="48x48"/>
15
+
<link rel="icon" href="/static/logos/dolly.svg" sizes="any" type="image/svg+xml"/>
16
+
<link rel="apple-touch-icon" href="/static/logos/dolly.png"/>
17
+
14
18
<!-- preconnect to image cdn -->
15
19
<link rel="preconnect" href="https://avatar.tangled.sh" />
16
20
<link rel="preconnect" href="https://camo.tangled.sh" />
+1
-5
appview/pages/templates/layouts/fragments/topbar.html
+1
-5
appview/pages/templates/layouts/fragments/topbar.html
···
3
3
<div class="flex justify-between p-0 items-center">
4
4
<div id="left-items">
5
5
<a href="/" hx-boost="true" class="text-2xl no-underline hover:no-underline flex items-center gap-2">
6
-
{{ template "fragments/dolly/logo" "size-8 text-black dark:text-white" }}
7
-
<span class="font-bold text-xl not-italic hidden md:inline">tangled</span>
8
-
<span class="font-normal not-italic text-xs rounded bg-gray-100 dark:bg-gray-700 px-1 hidden md:inline">
9
-
alpha
10
-
</span>
6
+
{{ template "fragments/logotypeSmall" }}
11
7
</a>
12
8
</div>
13
9
+34
-9
appview/pages/templates/repo/commit.html
+34
-9
appview/pages/templates/repo/commit.html
···
25
25
</div>
26
26
27
27
<div class="flex flex-wrap items-center space-x-2">
28
-
<p class="flex flex-wrap items-center gap-2 text-sm text-gray-500 dark:text-gray-300">
29
-
{{ $did := index $.EmailToDid $commit.Author.Email }}
30
-
31
-
{{ if $did }}
32
-
{{ template "user/fragments/picHandleLink" $did }}
33
-
{{ else }}
34
-
<a href="mailto:{{ $commit.Author.Email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $commit.Author.Name }}</a>
35
-
{{ end }}
28
+
<p class="flex flex-wrap items-center gap-1 text-sm text-gray-500 dark:text-gray-300">
29
+
{{ template "attribution" . }}
36
30
37
31
<span class="px-1 select-none before:content-['\00B7']"></span>
38
-
{{ template "repo/fragments/time" $commit.Author.When }}
32
+
{{ template "repo/fragments/time" $commit.Committer.When }}
39
33
<span class="px-1 select-none before:content-['\00B7']"></span>
40
34
41
35
<a href="/{{ $repo }}/commit/{{ $commit.This }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ slice $commit.This 0 8 }}</a>
···
78
72
79
73
</section>
80
74
{{end}}
75
+
76
+
{{ define "attribution" }}
77
+
{{ $commit := .Diff.Commit }}
78
+
{{ $showCommitter := true }}
79
+
{{ if eq $commit.Author.Email $commit.Committer.Email }}
80
+
{{ $showCommitter = false }}
81
+
{{ end }}
82
+
83
+
{{ if $showCommitter }}
84
+
authored by {{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid) }}
85
+
{{ range $commit.CoAuthors }}
86
+
{{ template "attributedUser" (list .Email .Name $.EmailToDid) }}
87
+
{{ end }}
88
+
and committed by {{ template "attributedUser" (list $commit.Committer.Email $commit.Committer.Name $.EmailToDid) }}
89
+
{{ else }}
90
+
{{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid )}}
91
+
{{ end }}
92
+
{{ end }}
93
+
94
+
{{ define "attributedUser" }}
95
+
{{ $email := index . 0 }}
96
+
{{ $name := index . 1 }}
97
+
{{ $map := index . 2 }}
98
+
{{ $did := index $map $email }}
99
+
100
+
{{ if $did }}
101
+
{{ template "user/fragments/picHandleLink" $did }}
102
+
{{ else }}
103
+
<a href="mailto:{{ $email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $name }}</a>
104
+
{{ end }}
105
+
{{ end }}
81
106
82
107
{{ define "topbarLayout" }}
83
108
<header class="col-span-full" style="z-index: 20;">
+1
-1
appview/pages/templates/repo/empty.html
+1
-1
appview/pages/templates/repo/empty.html
···
26
26
{{ else if (and .LoggedInUser (eq .LoggedInUser.Did .RepoInfo.OwnerDid)) }}
27
27
{{ $knot := .RepoInfo.Knot }}
28
28
{{ if eq $knot "knot1.tangled.sh" }}
29
-
{{ $knot = "tangled.sh" }}
29
+
{{ $knot = "tangled.org" }}
30
30
{{ end }}
31
31
<div class="w-full flex place-content-center">
32
32
<div class="py-6 w-fit flex flex-col gap-4">
+6
-6
appview/pages/templates/repo/fragments/backlinks.html
+6
-6
appview/pages/templates/repo/fragments/backlinks.html
···
14
14
<div class="flex gap-2 items-center">
15
15
{{ if .State.IsClosed }}
16
16
<span class="text-gray-500 dark:text-gray-400">
17
-
{{ i "ban" "w-4 h-4" }}
17
+
{{ i "ban" "size-3" }}
18
18
</span>
19
19
{{ else if eq .Kind.String "issues" }}
20
20
<span class="text-green-600 dark:text-green-500">
21
-
{{ i "circle-dot" "w-4 h-4" }}
21
+
{{ i "circle-dot" "size-3" }}
22
22
</span>
23
23
{{ else if .State.IsOpen }}
24
24
<span class="text-green-600 dark:text-green-500">
25
-
{{ i "git-pull-request" "w-4 h-4" }}
25
+
{{ i "git-pull-request" "size-3" }}
26
26
</span>
27
27
{{ else if .State.IsMerged }}
28
28
<span class="text-purple-600 dark:text-purple-500">
29
-
{{ i "git-merge" "w-4 h-4" }}
29
+
{{ i "git-merge" "size-3" }}
30
30
</span>
31
31
{{ else }}
32
32
<span class="text-gray-600 dark:text-gray-300">
33
-
{{ i "git-pull-request-closed" "w-4 h-4" }}
33
+
{{ i "git-pull-request-closed" "size-3" }}
34
34
</span>
35
35
{{ end }}
36
-
<a href="{{ . }}"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a>
36
+
<a href="{{ . }}" class="line-clamp-1 text-sm"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a>
37
37
</div>
38
38
{{ if not (eq $.RepoInfo.FullName $repoUrl) }}
39
39
<div>
+1
-1
appview/pages/templates/repo/fragments/diff.html
+1
-1
appview/pages/templates/repo/fragments/diff.html
···
17
17
{{ else }}
18
18
{{ range $idx, $hunk := $diff }}
19
19
{{ with $hunk }}
20
-
<details open id="file-{{ .Name.New }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}">
20
+
<details open id="file-{{ .Id }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}">
21
21
<summary class="list-none cursor-pointer sticky top-0">
22
22
<div id="diff-file-header" class="rounded cursor-pointer bg-white dark:bg-gray-800 flex justify-between">
23
23
<div id="left-side-items" class="p-2 flex gap-2 items-center overflow-x-auto">
+1
-16
appview/pages/templates/repo/fragments/participants.html
+1
-16
appview/pages/templates/repo/fragments/participants.html
···
6
6
<span class="font-bold text-gray-500 dark:text-gray-400 capitalize">Participants</span>
7
7
<span class="bg-gray-200 dark:bg-gray-700 rounded py-1/2 px-1 ml-1">{{ len $all }}</span>
8
8
</div>
9
-
<div class="flex items-center -space-x-3 mt-2">
10
-
{{ $c := "z-50 z-40 z-30 z-20 z-10" }}
11
-
{{ range $i, $p := $ps }}
12
-
<img
13
-
src="{{ tinyAvatar . }}"
14
-
alt=""
15
-
class="rounded-full h-8 w-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0"
16
-
/>
17
-
{{ end }}
18
-
19
-
{{ if gt (len $all) 5 }}
20
-
<span class="pl-4 text-gray-500 dark:text-gray-400 text-sm">
21
-
+{{ sub (len $all) 5 }}
22
-
</span>
23
-
{{ end }}
24
-
</div>
9
+
{{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "w-8 h-8") }}
25
10
</div>
26
11
{{ end }}
+35
-35
appview/pages/templates/repo/fragments/splitDiff.html
+35
-35
appview/pages/templates/repo/fragments/splitDiff.html
···
3
3
{{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800" -}}
4
4
{{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}}
5
5
{{- $lineNrSepStyle := "pr-2 border-r border-gray-200 dark:border-gray-700" -}}
6
-
{{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
6
+
{{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
7
7
{{- $emptyStyle := "bg-gray-200/30 dark:bg-gray-700/30" -}}
8
8
{{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400" -}}
9
9
{{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}}
10
10
{{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}}
11
11
{{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}}
12
12
<div class="grid grid-cols-2 divide-x divide-gray-200 dark:divide-gray-700">
13
-
<pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
13
+
<div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
14
14
{{- range .LeftLines -}}
15
15
{{- if .IsEmpty -}}
16
-
<div class="{{ $emptyStyle }} {{ $containerStyle }}">
17
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div>
18
-
<div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div>
19
-
<div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div>
20
-
</div>
16
+
<span class="{{ $emptyStyle }} {{ $containerStyle }}">
17
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span>
18
+
<span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span>
19
+
<span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span>
20
+
</span>
21
21
{{- else if eq .Op.String "-" -}}
22
-
<div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
23
-
<div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div>
24
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
25
-
<div class="px-2">{{ .Content }}</div>
26
-
</div>
22
+
<span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
23
+
<span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span>
24
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
25
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
26
+
</span>
27
27
{{- else if eq .Op.String " " -}}
28
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
29
-
<div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div>
30
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
31
-
<div class="px-2">{{ .Content }}</div>
32
-
</div>
28
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
29
+
<span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span>
30
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
31
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
32
+
</span>
33
33
{{- end -}}
34
34
{{- end -}}
35
-
{{- end -}}</div></div></pre>
35
+
{{- end -}}</div></div></div>
36
36
37
-
<pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
37
+
<div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
38
38
{{- range .RightLines -}}
39
39
{{- if .IsEmpty -}}
40
-
<div class="{{ $emptyStyle }} {{ $containerStyle }}">
41
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div>
42
-
<div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div>
43
-
<div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div>
44
-
</div>
40
+
<span class="{{ $emptyStyle }} {{ $containerStyle }}">
41
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span>
42
+
<span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span>
43
+
<span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span>
44
+
</span>
45
45
{{- else if eq .Op.String "+" -}}
46
-
<div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
47
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div>
48
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
49
-
<div class="px-2" >{{ .Content }}</div>
50
-
</div>
46
+
<span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
47
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></span>
48
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
49
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
50
+
</span>
51
51
{{- else if eq .Op.String " " -}}
52
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
53
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div>
54
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
55
-
<div class="px-2">{{ .Content }}</div>
56
-
</div>
52
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
53
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a> </span>
54
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
55
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
56
+
</span>
57
57
{{- end -}}
58
58
{{- end -}}
59
-
{{- end -}}</div></div></pre>
59
+
{{- end -}}</div></div></div>
60
60
</div>
61
61
{{ end }}
+21
-22
appview/pages/templates/repo/fragments/unifiedDiff.html
+21
-22
appview/pages/templates/repo/fragments/unifiedDiff.html
···
1
1
{{ define "repo/fragments/unifiedDiff" }}
2
2
{{ $name := .Id }}
3
-
<pre class="overflow-x-auto"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
3
+
<div class="overflow-x-auto font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
4
4
{{- $oldStart := .OldPosition -}}
5
5
{{- $newStart := .NewPosition -}}
6
6
{{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800 target:bg-yellow-200 target:dark:bg-yellow-600" -}}
7
7
{{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}}
8
8
{{- $lineNrSepStyle1 := "" -}}
9
9
{{- $lineNrSepStyle2 := "pr-2 border-r border-gray-200 dark:border-gray-700" -}}
10
-
{{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
10
+
{{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
11
11
{{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400 " -}}
12
12
{{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}}
13
13
{{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}}
14
14
{{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}}
15
15
{{- range .Lines -}}
16
16
{{- if eq .Op.String "+" -}}
17
-
<div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}">
18
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></div>
19
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></div>
20
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
21
-
<div class="px-2">{{ .Line }}</div>
22
-
</div>
17
+
<span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}">
18
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></span>
19
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></span>
20
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
21
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
22
+
</span>
23
23
{{- $newStart = add64 $newStart 1 -}}
24
24
{{- end -}}
25
25
{{- if eq .Op.String "-" -}}
26
-
<div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}">
27
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></div>
28
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></div>
29
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
30
-
<div class="px-2">{{ .Line }}</div>
31
-
</div>
26
+
<span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}">
27
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></span>
28
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></span>
29
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
30
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
31
+
</span>
32
32
{{- $oldStart = add64 $oldStart 1 -}}
33
33
{{- end -}}
34
34
{{- if eq .Op.String " " -}}
35
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}">
36
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></div>
37
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></div>
38
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
39
-
<div class="px-2">{{ .Line }}</div>
40
-
</div>
35
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}">
36
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></span>
37
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></span>
38
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
39
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
40
+
</span>
41
41
{{- $newStart = add64 $newStart 1 -}}
42
42
{{- $oldStart = add64 $oldStart 1 -}}
43
43
{{- end -}}
44
44
{{- end -}}
45
-
{{- end -}}</div></div></pre>
45
+
{{- end -}}</div></div></div>
46
46
{{ end }}
47
-
+31
-9
appview/pages/templates/repo/index.html
+31
-9
appview/pages/templates/repo/index.html
···
14
14
{{ end }}
15
15
<div class="flex items-center justify-between pb-5">
16
16
{{ block "branchSelector" . }}{{ end }}
17
-
<div class="flex md:hidden items-center gap-2">
17
+
<div class="flex md:hidden items-center gap-3">
18
18
<a href="/{{ .RepoInfo.FullName }}/commits/{{ .Ref | urlquery }}" class="inline-flex items-center text-sm gap-1 font-bold">
19
19
{{ i "git-commit-horizontal" "w-4" "h-4" }} {{ .TotalCommits }}
20
20
</a>
···
47
47
<div class="px-4 py-2 border-b border-gray-200 dark:border-gray-600 flex items-center gap-4 flex-wrap">
48
48
{{ range $value := .Languages }}
49
49
<div
50
-
class="flex flex-grow items-center gap-2 text-xs align-items-center justify-center"
50
+
class="flex items-center gap-2 text-xs align-items-center justify-center"
51
51
>
52
52
{{ template "repo/fragments/colorBall" (dict "color" (langColor $value.Name)) }}
53
53
<div>{{ or $value.Name "Other" }}
···
66
66
67
67
{{ define "branchSelector" }}
68
68
<div class="flex gap-2 items-center justify-between w-full">
69
-
<div class="flex gap-2 items-center">
69
+
<div class="flex gap-2 items-stretch">
70
70
<select
71
71
onchange="window.location.href = '/{{ .RepoInfo.FullName }}/tree/' + encodeURIComponent(this.value)"
72
72
class="p-1 border max-w-32 border-gray-200 bg-white dark:bg-gray-800 dark:text-white dark:border-gray-700"
···
228
228
<span
229
229
class="mx-1 before:content-['·'] before:select-none"
230
230
></span>
231
-
<span>
232
-
{{ $did := index $.EmailToDid .Author.Email }}
233
-
<a href="{{ if $did }}/{{ resolve $did }}{{ else }}mailto:{{ .Author.Email }}{{ end }}"
234
-
class="text-gray-500 dark:text-gray-400 no-underline hover:underline"
235
-
>{{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ .Author.Name }}{{ end }}</a>
236
-
</span>
231
+
{{ template "attribution" (list . $.EmailToDid) }}
237
232
<div class="inline-block px-1 select-none after:content-['·']"></div>
238
233
{{ template "repo/fragments/time" .Committer.When }}
239
234
···
259
254
{{ end }}
260
255
</div>
261
256
</div>
257
+
{{ end }}
258
+
259
+
{{ define "attribution" }}
260
+
{{ $commit := index . 0 }}
261
+
{{ $map := index . 1 }}
262
+
<span class="flex items-center">
263
+
{{ $author := index $map $commit.Author.Email }}
264
+
{{ $coauthors := $commit.CoAuthors }}
265
+
{{ $all := list }}
266
+
267
+
{{ if $author }}
268
+
{{ $all = append $all $author }}
269
+
{{ end }}
270
+
{{ range $coauthors }}
271
+
{{ $co := index $map .Email }}
272
+
{{ if $co }}
273
+
{{ $all = append $all $co }}
274
+
{{ end }}
275
+
{{ end }}
276
+
277
+
{{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }}
278
+
<a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}"
279
+
class="no-underline hover:underline">
280
+
{{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }}
281
+
{{ if $coauthors }} +{{ length $coauthors }}{{ end }}
282
+
</a>
283
+
</span>
262
284
{{ end }}
263
285
264
286
{{ define "branchList" }}
+40
-23
appview/pages/templates/repo/log.html
+40
-23
appview/pages/templates/repo/log.html
···
17
17
<div class="hidden md:flex md:flex-col divide-y divide-gray-200 dark:divide-gray-700">
18
18
{{ $grid := "grid grid-cols-14 gap-4" }}
19
19
<div class="{{ $grid }}">
20
-
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2">Author</div>
20
+
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Author</div>
21
21
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Commit</div>
22
22
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-6">Message</div>
23
-
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-1"></div>
24
23
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2 justify-self-end">Date</div>
25
24
</div>
26
25
{{ range $index, $commit := .Commits }}
27
26
{{ $messageParts := splitN $commit.Message "\n\n" 2 }}
28
27
<div class="{{ $grid }} py-3">
29
-
<div class="align-top truncate col-span-2">
30
-
{{ $did := index $.EmailToDid $commit.Author.Email }}
31
-
{{ if $did }}
32
-
{{ template "user/fragments/picHandleLink" $did }}
33
-
{{ else }}
34
-
<a href="mailto:{{ $commit.Author.Email }}" class="text-gray-700 dark:text-gray-300 no-underline hover:underline">{{ $commit.Author.Name }}</a>
35
-
{{ end }}
28
+
<div class="align-top col-span-3">
29
+
{{ template "attribution" (list $commit $.EmailToDid) }}
36
30
</div>
37
31
<div class="align-top font-mono flex items-start col-span-3">
38
32
{{ $verified := $.VerifiedCommits.IsVerified $commit.Hash.String }}
···
61
55
<div class="align-top col-span-6">
62
56
<div>
63
57
<a href="/{{ $.RepoInfo.FullName }}/commit/{{ $commit.Hash.String }}" class="dark:text-white no-underline hover:underline">{{ index $messageParts 0 }}</a>
58
+
64
59
{{ if gt (len $messageParts) 1 }}
65
60
<button class="py-1/2 px-1 bg-gray-200 hover:bg-gray-400 dark:bg-gray-700 dark:hover:bg-gray-600 rounded" hx-on:click="this.parentElement.nextElementSibling.classList.toggle('hidden')">{{ i "ellipsis" "w-3 h-3" }}</button>
66
61
{{ end }}
···
72
67
</span>
73
68
{{ end }}
74
69
{{ end }}
70
+
71
+
<!-- ci status -->
72
+
<span class="text-xs">
73
+
{{ $pipeline := index $.Pipelines .Hash.String }}
74
+
{{ if and $pipeline (gt (len $pipeline.Statuses) 0) }}
75
+
{{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }}
76
+
{{ end }}
77
+
</span>
75
78
</div>
76
79
77
80
{{ if gt (len $messageParts) 1 }}
78
81
<p class="hidden mt-1 text-sm text-gray-600 dark:text-gray-400">{{ nl2br (index $messageParts 1) }}</p>
79
82
{{ end }}
80
-
</div>
81
-
<div class="align-top col-span-1">
82
-
<!-- ci status -->
83
-
{{ $pipeline := index $.Pipelines .Hash.String }}
84
-
{{ if and $pipeline (gt (len $pipeline.Statuses) 0) }}
85
-
{{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }}
86
-
{{ end }}
87
83
</div>
88
84
<div class="align-top justify-self-end text-gray-500 dark:text-gray-400 col-span-2">{{ template "repo/fragments/shortTimeAgo" $commit.Committer.When }}</div>
89
85
</div>
···
152
148
</a>
153
149
</span>
154
150
<span class="mx-2 before:content-['·'] before:select-none"></span>
155
-
<span>
156
-
{{ $did := index $.EmailToDid $commit.Author.Email }}
157
-
<a href="{{ if $did }}/{{ $did }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}"
158
-
class="text-gray-500 dark:text-gray-400 no-underline hover:underline">
159
-
{{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ $commit.Author.Name }}{{ end }}
160
-
</a>
161
-
</span>
151
+
{{ template "attribution" (list $commit $.EmailToDid) }}
162
152
<div class="inline-block px-1 select-none after:content-['·']"></div>
163
153
<span>{{ template "repo/fragments/shortTime" $commit.Committer.When }}</span>
164
154
···
176
166
</div>
177
167
</section>
178
168
169
+
{{ end }}
170
+
171
+
{{ define "attribution" }}
172
+
{{ $commit := index . 0 }}
173
+
{{ $map := index . 1 }}
174
+
<span class="flex items-center gap-1">
175
+
{{ $author := index $map $commit.Author.Email }}
176
+
{{ $coauthors := $commit.CoAuthors }}
177
+
{{ $all := list }}
178
+
179
+
{{ if $author }}
180
+
{{ $all = append $all $author }}
181
+
{{ end }}
182
+
{{ range $coauthors }}
183
+
{{ $co := index $map .Email }}
184
+
{{ if $co }}
185
+
{{ $all = append $all $co }}
186
+
{{ end }}
187
+
{{ end }}
188
+
189
+
{{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }}
190
+
<a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}"
191
+
class="no-underline hover:underline">
192
+
{{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }}
193
+
{{ if $coauthors }} +{{ length $coauthors }}{{ end }}
194
+
</a>
195
+
</span>
179
196
{{ end }}
180
197
181
198
{{ define "repoAfter" }}
+1
-1
appview/pages/templates/repo/pipelines/pipelines.html
+1
-1
appview/pages/templates/repo/pipelines/pipelines.html
···
23
23
</p>
24
24
<p>
25
25
<span class="{{ $bullet }}">2</span>Configure your CI/CD
26
-
<a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/pipeline.md" class="underline">pipeline</a>.
26
+
<a href="https://docs.tangled.org/spindles.html#pipelines" class="underline">pipeline</a>.
27
27
</p>
28
28
<p><span class="{{ $bullet }}">3</span>Trigger a workflow with a push or a pull-request!</p>
29
29
</div>
+1
-1
appview/pages/templates/repo/settings/pipelines.html
+1
-1
appview/pages/templates/repo/settings/pipelines.html
···
22
22
<p class="text-gray-500 dark:text-gray-400">
23
23
Choose a spindle to execute your workflows on. Only repository owners
24
24
can configure spindles. Spindles can be selfhosted,
25
-
<a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
25
+
<a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide">
26
26
click to learn more.
27
27
</a>
28
28
</p>
+1
-1
appview/pages/templates/spindles/index.html
+1
-1
appview/pages/templates/spindles/index.html
···
102
102
{{ define "docsButton" }}
103
103
<a
104
104
class="btn flex items-center gap-2"
105
-
href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
105
+
href="https://docs.tangled.org/spindles.html#self-hosting-guide">
106
106
{{ i "book" "size-4" }}
107
107
docs
108
108
</a>
+1
-1
appview/pages/templates/strings/string.html
+1
-1
appview/pages/templates/strings/string.html
···
17
17
<span class="select-none">/</span>
18
18
<a href="/strings/{{ $ownerId }}/{{ .String.Rkey }}" class="font-bold">{{ .String.Filename }}</a>
19
19
</div>
20
-
<div class="flex gap-2 text-base">
20
+
<div class="flex gap-2 items-stretch text-base">
21
21
{{ if and .LoggedInUser (eq .LoggedInUser.Did .String.Did) }}
22
22
<a class="btn flex items-center gap-2 no-underline hover:no-underline p-2 group"
23
23
hx-boost="true"
+3
-1
appview/pages/templates/user/followers.html
+3
-1
appview/pages/templates/user/followers.html
···
19
19
"FollowersCount" .FollowersCount
20
20
"FollowingCount" .FollowingCount) }}
21
21
{{ else }}
22
-
<p class="px-6 dark:text-white">This user does not have any followers yet.</p>
22
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
23
+
<span>This user does not have any followers yet.</span>
24
+
</div>
23
25
{{ end }}
24
26
</div>
25
27
{{ end }}
+3
-1
appview/pages/templates/user/following.html
+3
-1
appview/pages/templates/user/following.html
···
19
19
"FollowersCount" .FollowersCount
20
20
"FollowingCount" .FollowingCount) }}
21
21
{{ else }}
22
-
<p class="px-6 dark:text-white">This user does not follow anyone yet.</p>
22
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
23
+
<span>This user does not follow anyone yet.</span>
24
+
</div>
23
25
{{ end }}
24
26
</div>
25
27
{{ end }}
+2
-2
appview/pages/templates/user/fragments/followCard.html
+2
-2
appview/pages/templates/user/fragments/followCard.html
···
6
6
<img class="object-cover rounded-full p-2" src="{{ fullAvatar $userIdent }}" alt="{{ $userIdent }}" />
7
7
</div>
8
8
9
-
<div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full">
9
+
<div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full min-w-0">
10
10
<div class="flex-1 min-h-0 justify-around flex flex-col">
11
11
<a href="/{{ $userIdent }}">
12
12
<span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $userIdent | truncateAt30 }}</span>
13
13
</a>
14
14
{{ with .Profile }}
15
-
<p class="text-sm pb-2 md:pb-2">{{.Description}}</p>
15
+
<p class="text-sm pb-2 md:pb-2 break-words">{{.Description}}</p>
16
16
{{ end }}
17
17
<div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full">
18
18
<span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
+10
-2
appview/pages/templates/user/overview.html
+10
-2
appview/pages/templates/user/overview.html
···
16
16
<p class="text-sm font-bold px-2 pb-4 dark:text-white">ACTIVITY</p>
17
17
<div class="flex flex-col gap-4 relative">
18
18
{{ if .ProfileTimeline.IsEmpty }}
19
-
<p class="dark:text-white">This user does not have any activity yet.</p>
19
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
20
+
<span class="flex items-center gap-2">
21
+
This user does not have any activity yet.
22
+
</span>
23
+
</div>
20
24
{{ end }}
21
25
22
26
{{ with .ProfileTimeline }}
···
254
258
{{ template "user/fragments/repoCard" (list $ . false) }}
255
259
</div>
256
260
{{ else }}
257
-
<p class="dark:text-white">This user does not have any pinned repos.</p>
261
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
262
+
<span class="flex items-center gap-2">
263
+
This user does not have any pinned repos.
264
+
</span>
265
+
</div>
258
266
{{ end }}
259
267
</div>
260
268
</div>
+3
-1
appview/pages/templates/user/repos.html
+3
-1
appview/pages/templates/user/repos.html
···
13
13
{{ template "user/fragments/repoCard" (list $ . false) }}
14
14
</div>
15
15
{{ else }}
16
-
<p class="px-6 dark:text-white">This user does not have any repos yet.</p>
16
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
17
+
<span>This user does not have any repos yet.</span>
18
+
</div>
17
19
{{ end }}
18
20
</div>
19
21
{{ end }}
+9
-6
appview/pages/templates/user/signup.html
+9
-6
appview/pages/templates/user/signup.html
···
43
43
page to complete your registration.
44
44
</span>
45
45
<div class="w-full mt-4 text-center">
46
-
<div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}"></div>
46
+
<div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}" data-size="flexible"></div>
47
47
</div>
48
48
<button class="btn text-base w-full my-2 mt-6" type="submit" id="signup-button" tabindex="7" >
49
49
<span>join now</span>
50
50
</button>
51
+
<p class="text-sm text-gray-500">
52
+
Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>.
53
+
</p>
54
+
55
+
<p id="signup-msg" class="error w-full"></p>
56
+
<p class="text-sm text-gray-500 pt-4">
57
+
By signing up, you agree to our <a href="/terms" class="underline">Terms of Service</a> and <a href="/privacy" class="underline">Privacy Policy</a>.
58
+
</p>
51
59
</form>
52
-
<p class="text-sm text-gray-500">
53
-
Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>.
54
-
</p>
55
-
56
-
<p id="signup-msg" class="error w-full"></p>
57
60
</main>
58
61
</body>
59
62
</html>
+3
-1
appview/pages/templates/user/starred.html
+3
-1
appview/pages/templates/user/starred.html
···
13
13
{{ template "user/fragments/repoCard" (list $ . true) }}
14
14
</div>
15
15
{{ else }}
16
-
<p class="px-6 dark:text-white">This user does not have any starred repos yet.</p>
16
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
17
+
<span>This user does not have any starred repos yet.</span>
18
+
</div>
17
19
{{ end }}
18
20
</div>
19
21
{{ end }}
+3
-1
appview/pages/templates/user/strings.html
+3
-1
appview/pages/templates/user/strings.html
···
13
13
{{ template "singleString" (list $ .) }}
14
14
</div>
15
15
{{ else }}
16
-
<p class="px-6 dark:text-white">This user does not have any strings yet.</p>
16
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
17
+
<span>This user does not have any strings yet.</span>
18
+
</div>
17
19
{{ end }}
18
20
</div>
19
21
{{ end }}
+12
-11
appview/pipelines/pipelines.go
+12
-11
appview/pipelines/pipelines.go
···
16
16
"tangled.org/core/appview/reporesolver"
17
17
"tangled.org/core/eventconsumer"
18
18
"tangled.org/core/idresolver"
19
+
"tangled.org/core/orm"
19
20
"tangled.org/core/rbac"
20
21
spindlemodel "tangled.org/core/spindle/models"
21
22
···
81
82
ps, err := db.GetPipelineStatuses(
82
83
p.db,
83
84
30,
84
-
db.FilterEq("repo_owner", f.Did),
85
-
db.FilterEq("repo_name", f.Name),
86
-
db.FilterEq("knot", f.Knot),
85
+
orm.FilterEq("repo_owner", f.Did),
86
+
orm.FilterEq("repo_name", f.Name),
87
+
orm.FilterEq("knot", f.Knot),
87
88
)
88
89
if err != nil {
89
90
l.Error("failed to query db", "err", err)
···
122
123
ps, err := db.GetPipelineStatuses(
123
124
p.db,
124
125
1,
125
-
db.FilterEq("repo_owner", f.Did),
126
-
db.FilterEq("repo_name", f.Name),
127
-
db.FilterEq("knot", f.Knot),
128
-
db.FilterEq("id", pipelineId),
126
+
orm.FilterEq("repo_owner", f.Did),
127
+
orm.FilterEq("repo_name", f.Name),
128
+
orm.FilterEq("knot", f.Knot),
129
+
orm.FilterEq("id", pipelineId),
129
130
)
130
131
if err != nil {
131
132
l.Error("failed to query db", "err", err)
···
189
190
ps, err := db.GetPipelineStatuses(
190
191
p.db,
191
192
1,
192
-
db.FilterEq("repo_owner", f.Did),
193
-
db.FilterEq("repo_name", f.Name),
194
-
db.FilterEq("knot", f.Knot),
195
-
db.FilterEq("id", pipelineId),
193
+
orm.FilterEq("repo_owner", f.Did),
194
+
orm.FilterEq("repo_name", f.Name),
195
+
orm.FilterEq("knot", f.Knot),
196
+
orm.FilterEq("id", pipelineId),
196
197
)
197
198
if err != nil || len(ps) != 1 {
198
199
l.Error("pipeline query failed", "err", err, "count", len(ps))
+3
-2
appview/pulls/opengraph.go
+3
-2
appview/pulls/opengraph.go
···
13
13
"tangled.org/core/appview/db"
14
14
"tangled.org/core/appview/models"
15
15
"tangled.org/core/appview/ogcard"
16
+
"tangled.org/core/orm"
16
17
"tangled.org/core/patchutil"
17
18
"tangled.org/core/types"
18
19
)
···
241
242
dollyX := dollyBounds.Min.X + (dollyBounds.Dx() / 2) - (dollySize / 2)
242
243
dollyY := statsY + iconBaselineOffset - dollySize/2 + 25
243
244
dollyColor := color.RGBA{180, 180, 180, 255} // light gray
244
-
err = dollyArea.DrawDollySilhouette(dollyX, dollyY, dollySize, dollyColor)
245
+
err = dollyArea.DrawDolly(dollyX, dollyY, dollySize, dollyColor)
245
246
if err != nil {
246
247
log.Printf("dolly silhouette not available (this is ok): %v", err)
247
248
}
···
276
277
}
277
278
278
279
// Get comment count from database
279
-
comments, err := db.GetPullComments(s.db, db.FilterEq("pull_id", pull.ID))
280
+
comments, err := db.GetPullComments(s.db, orm.FilterEq("pull_id", pull.ID))
280
281
if err != nil {
281
282
log.Printf("failed to get pull comments: %v", err)
282
283
}
+104
-83
appview/pulls/pulls.go
+104
-83
appview/pulls/pulls.go
···
19
19
"tangled.org/core/appview/config"
20
20
"tangled.org/core/appview/db"
21
21
pulls_indexer "tangled.org/core/appview/indexer/pulls"
22
+
"tangled.org/core/appview/mentions"
22
23
"tangled.org/core/appview/models"
23
24
"tangled.org/core/appview/notify"
24
25
"tangled.org/core/appview/oauth"
25
26
"tangled.org/core/appview/pages"
26
27
"tangled.org/core/appview/pages/markup"
27
28
"tangled.org/core/appview/pages/repoinfo"
28
-
"tangled.org/core/appview/refresolver"
29
29
"tangled.org/core/appview/reporesolver"
30
30
"tangled.org/core/appview/validator"
31
31
"tangled.org/core/appview/xrpcclient"
32
32
"tangled.org/core/idresolver"
33
+
"tangled.org/core/orm"
33
34
"tangled.org/core/patchutil"
34
35
"tangled.org/core/rbac"
35
36
"tangled.org/core/tid"
···
44
45
)
45
46
46
47
type Pulls struct {
47
-
oauth *oauth.OAuth
48
-
repoResolver *reporesolver.RepoResolver
49
-
pages *pages.Pages
50
-
idResolver *idresolver.Resolver
51
-
refResolver *refresolver.Resolver
52
-
db *db.DB
53
-
config *config.Config
54
-
notifier notify.Notifier
55
-
enforcer *rbac.Enforcer
56
-
logger *slog.Logger
57
-
validator *validator.Validator
58
-
indexer *pulls_indexer.Indexer
48
+
oauth *oauth.OAuth
49
+
repoResolver *reporesolver.RepoResolver
50
+
pages *pages.Pages
51
+
idResolver *idresolver.Resolver
52
+
mentionsResolver *mentions.Resolver
53
+
db *db.DB
54
+
config *config.Config
55
+
notifier notify.Notifier
56
+
enforcer *rbac.Enforcer
57
+
logger *slog.Logger
58
+
validator *validator.Validator
59
+
indexer *pulls_indexer.Indexer
59
60
}
60
61
61
62
func New(
···
63
64
repoResolver *reporesolver.RepoResolver,
64
65
pages *pages.Pages,
65
66
resolver *idresolver.Resolver,
66
-
refResolver *refresolver.Resolver,
67
+
mentionsResolver *mentions.Resolver,
67
68
db *db.DB,
68
69
config *config.Config,
69
70
notifier notify.Notifier,
···
73
74
logger *slog.Logger,
74
75
) *Pulls {
75
76
return &Pulls{
76
-
oauth: oauth,
77
-
repoResolver: repoResolver,
78
-
pages: pages,
79
-
idResolver: resolver,
80
-
refResolver: refResolver,
81
-
db: db,
82
-
config: config,
83
-
notifier: notifier,
84
-
enforcer: enforcer,
85
-
logger: logger,
86
-
validator: validator,
87
-
indexer: indexer,
77
+
oauth: oauth,
78
+
repoResolver: repoResolver,
79
+
pages: pages,
80
+
idResolver: resolver,
81
+
mentionsResolver: mentionsResolver,
82
+
db: db,
83
+
config: config,
84
+
notifier: notifier,
85
+
enforcer: enforcer,
86
+
logger: logger,
87
+
validator: validator,
88
+
indexer: indexer,
88
89
}
89
90
}
90
91
···
190
191
ps, err := db.GetPipelineStatuses(
191
192
s.db,
192
193
len(shas),
193
-
db.FilterEq("repo_owner", f.Did),
194
-
db.FilterEq("repo_name", f.Name),
195
-
db.FilterEq("knot", f.Knot),
196
-
db.FilterIn("sha", shas),
194
+
orm.FilterEq("repo_owner", f.Did),
195
+
orm.FilterEq("repo_name", f.Name),
196
+
orm.FilterEq("knot", f.Knot),
197
+
orm.FilterIn("sha", shas),
197
198
)
198
199
if err != nil {
199
200
log.Printf("failed to fetch pipeline statuses: %s", err)
···
217
218
218
219
labelDefs, err := db.GetLabelDefinitions(
219
220
s.db,
220
-
db.FilterIn("at_uri", f.Labels),
221
-
db.FilterContains("scope", tangled.RepoPullNSID),
221
+
orm.FilterIn("at_uri", f.Labels),
222
+
orm.FilterContains("scope", tangled.RepoPullNSID),
222
223
)
223
224
if err != nil {
224
225
log.Println("failed to fetch labels", err)
···
597
598
598
599
pulls, err := db.GetPulls(
599
600
s.db,
600
-
db.FilterIn("id", ids),
601
+
orm.FilterIn("id", ids),
601
602
)
602
603
if err != nil {
603
604
log.Println("failed to get pulls", err)
···
648
649
ps, err := db.GetPipelineStatuses(
649
650
s.db,
650
651
len(shas),
651
-
db.FilterEq("repo_owner", f.Did),
652
-
db.FilterEq("repo_name", f.Name),
653
-
db.FilterEq("knot", f.Knot),
654
-
db.FilterIn("sha", shas),
652
+
orm.FilterEq("repo_owner", f.Did),
653
+
orm.FilterEq("repo_name", f.Name),
654
+
orm.FilterEq("knot", f.Knot),
655
+
orm.FilterIn("sha", shas),
655
656
)
656
657
if err != nil {
657
658
log.Printf("failed to fetch pipeline statuses: %s", err)
···
664
665
665
666
labelDefs, err := db.GetLabelDefinitions(
666
667
s.db,
667
-
db.FilterIn("at_uri", f.Labels),
668
-
db.FilterContains("scope", tangled.RepoPullNSID),
668
+
orm.FilterIn("at_uri", f.Labels),
669
+
orm.FilterContains("scope", tangled.RepoPullNSID),
669
670
)
670
671
if err != nil {
671
672
log.Println("failed to fetch labels", err)
···
729
730
return
730
731
}
731
732
732
-
mentions, references := s.refResolver.Resolve(r.Context(), body)
733
+
mentions, references := s.mentionsResolver.Resolve(r.Context(), body)
733
734
734
735
// Start a transaction
735
736
tx, err := s.db.BeginTx(r.Context(), nil)
···
1205
1206
}
1206
1207
}
1207
1208
1208
-
mentions, references := s.refResolver.Resolve(r.Context(), body)
1209
+
mentions, references := s.mentionsResolver.Resolve(r.Context(), body)
1209
1210
1210
1211
rkey := tid.TID()
1211
1212
initialSubmission := models.PullSubmission{
···
1240
1241
return
1241
1242
}
1242
1243
1244
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
1245
+
if err != nil {
1246
+
log.Println("failed to upload patch", err)
1247
+
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1248
+
return
1249
+
}
1250
+
1243
1251
_, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
1244
1252
Collection: tangled.RepoPullNSID,
1245
1253
Repo: user.Did,
···
1251
1259
Repo: string(repo.RepoAt()),
1252
1260
Branch: targetBranch,
1253
1261
},
1254
-
Patch: patch,
1262
+
PatchBlob: blob.Blob,
1255
1263
Source: recordPullSource,
1256
1264
CreatedAt: time.Now().Format(time.RFC3339),
1257
1265
},
···
1327
1335
// apply all record creations at once
1328
1336
var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem
1329
1337
for _, p := range stack {
1338
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(p.LatestPatch()))
1339
+
if err != nil {
1340
+
log.Println("failed to upload patch blob", err)
1341
+
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1342
+
return
1343
+
}
1344
+
1330
1345
record := p.AsRecord()
1331
-
write := comatproto.RepoApplyWrites_Input_Writes_Elem{
1346
+
record.PatchBlob = blob.Blob
1347
+
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
1332
1348
RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{
1333
1349
Collection: tangled.RepoPullNSID,
1334
1350
Rkey: &p.Rkey,
···
1336
1352
Val: &record,
1337
1353
},
1338
1354
},
1339
-
}
1340
-
writes = append(writes, &write)
1355
+
})
1341
1356
}
1342
1357
_, err = comatproto.RepoApplyWrites(r.Context(), client, &comatproto.RepoApplyWrites_Input{
1343
1358
Repo: user.Did,
···
1365
1380
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1366
1381
return
1367
1382
}
1383
+
1368
1384
}
1369
1385
1370
1386
if err = tx.Commit(); err != nil {
1371
1387
log.Println("failed to create pull request", err)
1372
1388
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1373
1389
return
1390
+
}
1391
+
1392
+
// notify about each pull
1393
+
//
1394
+
// this is performed after tx.Commit, because it could result in a locked DB otherwise
1395
+
for _, p := range stack {
1396
+
s.notifier.NewPull(r.Context(), p)
1374
1397
}
1375
1398
1376
1399
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
···
1498
1521
// fork repo
1499
1522
repo, err := db.GetRepo(
1500
1523
s.db,
1501
-
db.FilterEq("did", forkOwnerDid),
1502
-
db.FilterEq("name", forkName),
1524
+
orm.FilterEq("did", forkOwnerDid),
1525
+
orm.FilterEq("name", forkName),
1503
1526
)
1504
1527
if err != nil {
1505
1528
log.Println("failed to get repo", "did", forkOwnerDid, "name", forkName, "err", err)
···
1862
1885
return
1863
1886
}
1864
1887
1865
-
var recordPullSource *tangled.RepoPull_Source
1866
-
if pull.IsBranchBased() {
1867
-
recordPullSource = &tangled.RepoPull_Source{
1868
-
Branch: pull.PullSource.Branch,
1869
-
Sha: sourceRev,
1870
-
}
1871
-
}
1872
-
if pull.IsForkBased() {
1873
-
repoAt := pull.PullSource.RepoAt.String()
1874
-
recordPullSource = &tangled.RepoPull_Source{
1875
-
Branch: pull.PullSource.Branch,
1876
-
Repo: &repoAt,
1877
-
Sha: sourceRev,
1878
-
}
1888
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
1889
+
if err != nil {
1890
+
log.Println("failed to upload patch blob", err)
1891
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
1892
+
return
1879
1893
}
1894
+
record := pull.AsRecord()
1895
+
record.PatchBlob = blob.Blob
1896
+
record.CreatedAt = time.Now().Format(time.RFC3339)
1880
1897
1881
1898
_, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
1882
1899
Collection: tangled.RepoPullNSID,
···
1884
1901
Rkey: pull.Rkey,
1885
1902
SwapRecord: ex.Cid,
1886
1903
Record: &lexutil.LexiconTypeDecoder{
1887
-
Val: &tangled.RepoPull{
1888
-
Title: pull.Title,
1889
-
Target: &tangled.RepoPull_Target{
1890
-
Repo: string(repo.RepoAt()),
1891
-
Branch: pull.TargetBranch,
1892
-
},
1893
-
Patch: patch, // new patch
1894
-
Source: recordPullSource,
1895
-
CreatedAt: time.Now().Format(time.RFC3339),
1896
-
},
1904
+
Val: &record,
1897
1905
},
1898
1906
})
1899
1907
if err != nil {
···
1979
1987
}
1980
1988
defer tx.Rollback()
1981
1989
1990
+
client, err := s.oauth.AuthorizedClient(r)
1991
+
if err != nil {
1992
+
log.Println("failed to authorize client")
1993
+
s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
1994
+
return
1995
+
}
1996
+
1982
1997
// pds updates to make
1983
1998
var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem
1984
1999
···
2012
2027
return
2013
2028
}
2014
2029
2030
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
2031
+
if err != nil {
2032
+
log.Println("failed to upload patch blob", err)
2033
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
2034
+
return
2035
+
}
2015
2036
record := p.AsRecord()
2037
+
record.PatchBlob = blob.Blob
2016
2038
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
2017
2039
RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{
2018
2040
Collection: tangled.RepoPullNSID,
···
2047
2069
return
2048
2070
}
2049
2071
2072
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
2073
+
if err != nil {
2074
+
log.Println("failed to upload patch blob", err)
2075
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
2076
+
return
2077
+
}
2050
2078
record := np.AsRecord()
2051
-
2079
+
record.PatchBlob = blob.Blob
2052
2080
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
2053
2081
RepoApplyWrites_Update: &comatproto.RepoApplyWrites_Update{
2054
2082
Collection: tangled.RepoPullNSID,
···
2066
2094
tx,
2067
2095
p.ParentChangeId,
2068
2096
// these should be enough filters to be unique per-stack
2069
-
db.FilterEq("repo_at", p.RepoAt.String()),
2070
-
db.FilterEq("owner_did", p.OwnerDid),
2071
-
db.FilterEq("change_id", p.ChangeId),
2097
+
orm.FilterEq("repo_at", p.RepoAt.String()),
2098
+
orm.FilterEq("owner_did", p.OwnerDid),
2099
+
orm.FilterEq("change_id", p.ChangeId),
2072
2100
)
2073
2101
2074
2102
if err != nil {
···
2082
2110
if err != nil {
2083
2111
log.Println("failed to resubmit pull", err)
2084
2112
s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.")
2085
-
return
2086
-
}
2087
-
2088
-
client, err := s.oauth.AuthorizedClient(r)
2089
-
if err != nil {
2090
-
log.Println("failed to authorize client")
2091
-
s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
2092
2113
return
2093
2114
}
2094
2115
···
2397
2418
body := fp.Body
2398
2419
rkey := tid.TID()
2399
2420
2400
-
mentions, references := s.refResolver.Resolve(ctx, body)
2421
+
mentions, references := s.mentionsResolver.Resolve(ctx, body)
2401
2422
2402
2423
initialSubmission := models.PullSubmission{
2403
2424
Patch: fp.Raw,
+3
-1
appview/refresolver/resolver.go
appview/mentions/resolver.go
+3
-1
appview/refresolver/resolver.go
appview/mentions/resolver.go
···
1
-
package refresolver
1
+
package mentions
2
2
3
3
import (
4
4
"context"
···
35
35
36
36
func (r *Resolver) Resolve(ctx context.Context, source string) ([]syntax.DID, []syntax.ATURI) {
37
37
l := r.logger.With("method", "Resolve")
38
+
38
39
rawMentions, rawRefs := markup.FindReferences(r.config.Core.AppviewHost, source)
39
40
l.Debug("found possible references", "mentions", rawMentions, "refs", rawRefs)
41
+
40
42
idents := r.idResolver.ResolveIdents(ctx, rawMentions)
41
43
var mentions []syntax.DID
42
44
for _, ident := range idents {
+1
appview/repo/archive.go
+1
appview/repo/archive.go
···
18
18
l := rp.logger.With("handler", "DownloadArchive")
19
19
ref := chi.URLParam(r, "ref")
20
20
ref, _ = url.PathUnescape(ref)
21
+
ref = strings.TrimSuffix(ref, ".tar.gz")
21
22
f, err := rp.repoResolver.Resolve(r)
22
23
if err != nil {
23
24
l.Error("failed to get repo and knot", "err", err)
+10
-9
appview/repo/artifact.go
+10
-9
appview/repo/artifact.go
···
15
15
"tangled.org/core/appview/models"
16
16
"tangled.org/core/appview/pages"
17
17
"tangled.org/core/appview/xrpcclient"
18
+
"tangled.org/core/orm"
18
19
"tangled.org/core/tid"
19
20
"tangled.org/core/types"
20
21
···
155
156
156
157
artifacts, err := db.GetArtifact(
157
158
rp.db,
158
-
db.FilterEq("repo_at", f.RepoAt()),
159
-
db.FilterEq("tag", tag.Tag.Hash[:]),
160
-
db.FilterEq("name", filename),
159
+
orm.FilterEq("repo_at", f.RepoAt()),
160
+
orm.FilterEq("tag", tag.Tag.Hash[:]),
161
+
orm.FilterEq("name", filename),
161
162
)
162
163
if err != nil {
163
164
log.Println("failed to get artifacts", err)
···
234
235
235
236
artifacts, err := db.GetArtifact(
236
237
rp.db,
237
-
db.FilterEq("repo_at", f.RepoAt()),
238
-
db.FilterEq("tag", tag[:]),
239
-
db.FilterEq("name", filename),
238
+
orm.FilterEq("repo_at", f.RepoAt()),
239
+
orm.FilterEq("tag", tag[:]),
240
+
orm.FilterEq("name", filename),
240
241
)
241
242
if err != nil {
242
243
log.Println("failed to get artifacts", err)
···
276
277
defer tx.Rollback()
277
278
278
279
err = db.DeleteArtifact(tx,
279
-
db.FilterEq("repo_at", f.RepoAt()),
280
-
db.FilterEq("tag", artifact.Tag[:]),
281
-
db.FilterEq("name", filename),
280
+
orm.FilterEq("repo_at", f.RepoAt()),
281
+
orm.FilterEq("tag", artifact.Tag[:]),
282
+
orm.FilterEq("name", filename),
282
283
)
283
284
if err != nil {
284
285
log.Println("failed to remove artifact record from db", err)
+3
-2
appview/repo/feed.go
+3
-2
appview/repo/feed.go
···
11
11
"tangled.org/core/appview/db"
12
12
"tangled.org/core/appview/models"
13
13
"tangled.org/core/appview/pagination"
14
+
"tangled.org/core/orm"
14
15
15
16
"github.com/bluesky-social/indigo/atproto/identity"
16
17
"github.com/bluesky-social/indigo/atproto/syntax"
···
20
21
func (rp *Repo) getRepoFeed(ctx context.Context, repo *models.Repo, ownerSlashRepo string) (*feeds.Feed, error) {
21
22
const feedLimitPerType = 100
22
23
23
-
pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, db.FilterEq("repo_at", repo.RepoAt()))
24
+
pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, orm.FilterEq("repo_at", repo.RepoAt()))
24
25
if err != nil {
25
26
return nil, err
26
27
}
···
28
29
issues, err := db.GetIssuesPaginated(
29
30
rp.db,
30
31
pagination.Page{Limit: feedLimitPerType},
31
-
db.FilterEq("repo_at", repo.RepoAt()),
32
+
orm.FilterEq("repo_at", repo.RepoAt()),
32
33
)
33
34
if err != nil {
34
35
return nil, err
+4
-3
appview/repo/index.go
+4
-3
appview/repo/index.go
···
23
23
"tangled.org/core/appview/models"
24
24
"tangled.org/core/appview/pages"
25
25
"tangled.org/core/appview/xrpcclient"
26
+
"tangled.org/core/orm"
26
27
"tangled.org/core/types"
27
28
28
29
"github.com/go-chi/chi/v5"
···
122
123
l.Error("failed to get email to did map", "err", err)
123
124
}
124
125
125
-
vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, commitsTrunc)
126
+
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, commitsTrunc)
126
127
if err != nil {
127
128
l.Error("failed to GetVerifiedObjectCommits", "err", err)
128
129
}
···
171
172
// first attempt to fetch from db
172
173
langs, err := db.GetRepoLanguages(
173
174
rp.db,
174
-
db.FilterEq("repo_at", repo.RepoAt()),
175
-
db.FilterEq("ref", currentRef),
175
+
orm.FilterEq("repo_at", repo.RepoAt()),
176
+
orm.FilterEq("ref", currentRef),
176
177
)
177
178
178
179
if err != nil || langs == nil {
+2
-2
appview/repo/log.go
+2
-2
appview/repo/log.go
···
116
116
l.Error("failed to fetch email to did mapping", "err", err)
117
117
}
118
118
119
-
vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, xrpcResp.Commits)
119
+
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, xrpcResp.Commits)
120
120
if err != nil {
121
121
l.Error("failed to GetVerifiedObjectCommits", "err", err)
122
122
}
···
192
192
l.Error("failed to get email to did mapping", "err", err)
193
193
}
194
194
195
-
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.NiceDiff{*result.Diff})
195
+
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.Commit{result.Diff.Commit})
196
196
if err != nil {
197
197
l.Error("failed to GetVerifiedCommits", "err", err)
198
198
}
+4
-3
appview/repo/opengraph.go
+4
-3
appview/repo/opengraph.go
···
16
16
"tangled.org/core/appview/db"
17
17
"tangled.org/core/appview/models"
18
18
"tangled.org/core/appview/ogcard"
19
+
"tangled.org/core/orm"
19
20
"tangled.org/core/types"
20
21
)
21
22
···
236
237
dollyX := dollyBounds.Min.X + (dollyBounds.Dx() / 2) - (dollySize / 2)
237
238
dollyY := statsY + iconBaselineOffset - dollySize/2 + 25
238
239
dollyColor := color.RGBA{180, 180, 180, 255} // light gray
239
-
err = dollyArea.DrawDollySilhouette(dollyX, dollyY, dollySize, dollyColor)
240
+
err = dollyArea.DrawDolly(dollyX, dollyY, dollySize, dollyColor)
240
241
if err != nil {
241
242
log.Printf("dolly silhouette not available (this is ok): %v", err)
242
243
}
···
338
339
var languageStats []types.RepoLanguageDetails
339
340
langs, err := db.GetRepoLanguages(
340
341
rp.db,
341
-
db.FilterEq("repo_at", f.RepoAt()),
342
-
db.FilterEq("is_default_ref", 1),
342
+
orm.FilterEq("repo_at", f.RepoAt()),
343
+
orm.FilterEq("is_default_ref", 1),
343
344
)
344
345
if err != nil {
345
346
log.Printf("failed to get language stats from db: %v", err)
+17
-16
appview/repo/repo.go
+17
-16
appview/repo/repo.go
···
24
24
xrpcclient "tangled.org/core/appview/xrpcclient"
25
25
"tangled.org/core/eventconsumer"
26
26
"tangled.org/core/idresolver"
27
+
"tangled.org/core/orm"
27
28
"tangled.org/core/rbac"
28
29
"tangled.org/core/tid"
29
30
"tangled.org/core/xrpc/serviceauth"
···
345
346
// get form values
346
347
labelId := r.FormValue("label-id")
347
348
348
-
label, err := db.GetLabelDefinition(rp.db, db.FilterEq("id", labelId))
349
+
label, err := db.GetLabelDefinition(rp.db, orm.FilterEq("id", labelId))
349
350
if err != nil {
350
351
fail("Failed to find label definition.", err)
351
352
return
···
409
410
410
411
err = db.UnsubscribeLabel(
411
412
tx,
412
-
db.FilterEq("repo_at", f.RepoAt()),
413
-
db.FilterEq("label_at", removedAt),
413
+
orm.FilterEq("repo_at", f.RepoAt()),
414
+
orm.FilterEq("label_at", removedAt),
414
415
)
415
416
if err != nil {
416
417
fail("Failed to unsubscribe label.", err)
417
418
return
418
419
}
419
420
420
-
err = db.DeleteLabelDefinition(tx, db.FilterEq("id", label.Id))
421
+
err = db.DeleteLabelDefinition(tx, orm.FilterEq("id", label.Id))
421
422
if err != nil {
422
423
fail("Failed to delete label definition.", err)
423
424
return
···
456
457
}
457
458
458
459
labelAts := r.Form["label"]
459
-
_, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts))
460
+
_, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts))
460
461
if err != nil {
461
462
fail("Failed to subscribe to label.", err)
462
463
return
···
542
543
}
543
544
544
545
labelAts := r.Form["label"]
545
-
_, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts))
546
+
_, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts))
546
547
if err != nil {
547
548
fail("Failed to unsubscribe to label.", err)
548
549
return
···
582
583
583
584
err = db.UnsubscribeLabel(
584
585
rp.db,
585
-
db.FilterEq("repo_at", f.RepoAt()),
586
-
db.FilterIn("label_at", labelAts),
586
+
orm.FilterEq("repo_at", f.RepoAt()),
587
+
orm.FilterIn("label_at", labelAts),
587
588
)
588
589
if err != nil {
589
590
fail("Failed to unsubscribe label.", err)
···
612
613
613
614
labelDefs, err := db.GetLabelDefinitions(
614
615
rp.db,
615
-
db.FilterIn("at_uri", f.Labels),
616
-
db.FilterContains("scope", subject.Collection().String()),
616
+
orm.FilterIn("at_uri", f.Labels),
617
+
orm.FilterContains("scope", subject.Collection().String()),
617
618
)
618
619
if err != nil {
619
620
l.Error("failed to fetch label defs", "err", err)
···
625
626
defs[l.AtUri().String()] = &l
626
627
}
627
628
628
-
states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject))
629
+
states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject))
629
630
if err != nil {
630
631
l.Error("failed to build label state", "err", err)
631
632
return
···
660
661
661
662
labelDefs, err := db.GetLabelDefinitions(
662
663
rp.db,
663
-
db.FilterIn("at_uri", f.Labels),
664
-
db.FilterContains("scope", subject.Collection().String()),
664
+
orm.FilterIn("at_uri", f.Labels),
665
+
orm.FilterContains("scope", subject.Collection().String()),
665
666
)
666
667
if err != nil {
667
668
l.Error("failed to fetch labels", "err", err)
···
673
674
defs[l.AtUri().String()] = &l
674
675
}
675
676
676
-
states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject))
677
+
states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject))
677
678
if err != nil {
678
679
l.Error("failed to build label state", "err", err)
679
680
return
···
1046
1047
// in the user's account.
1047
1048
existingRepo, err := db.GetRepo(
1048
1049
rp.db,
1049
-
db.FilterEq("did", user.Did),
1050
-
db.FilterEq("name", forkName),
1050
+
orm.FilterEq("did", user.Did),
1051
+
orm.FilterEq("name", forkName),
1051
1052
)
1052
1053
if err != nil {
1053
1054
if !errors.Is(err, sql.ErrNoRows) {
+16
-17
appview/repo/repo_util.go
+16
-17
appview/repo/repo_util.go
···
1
1
package repo
2
2
3
3
import (
4
+
"maps"
4
5
"slices"
5
6
"sort"
6
7
"strings"
7
8
8
9
"tangled.org/core/appview/db"
9
10
"tangled.org/core/appview/models"
11
+
"tangled.org/core/orm"
10
12
"tangled.org/core/types"
11
-
12
-
"github.com/go-git/go-git/v5/plumbing/object"
13
13
)
14
14
15
15
func sortFiles(files []types.NiceTree) {
···
42
42
})
43
43
}
44
44
45
-
func uniqueEmails(commits []*object.Commit) []string {
45
+
func uniqueEmails(commits []types.Commit) []string {
46
46
emails := make(map[string]struct{})
47
47
for _, commit := range commits {
48
-
if commit.Author.Email != "" {
49
-
emails[commit.Author.Email] = struct{}{}
50
-
}
51
-
if commit.Committer.Email != "" {
52
-
emails[commit.Committer.Email] = struct{}{}
48
+
emails[commit.Author.Email] = struct{}{}
49
+
emails[commit.Committer.Email] = struct{}{}
50
+
for _, c := range commit.CoAuthors() {
51
+
emails[c.Email] = struct{}{}
53
52
}
54
53
}
55
-
var uniqueEmails []string
56
-
for email := range emails {
57
-
uniqueEmails = append(uniqueEmails, email)
58
-
}
59
-
return uniqueEmails
54
+
55
+
// delete empty emails if any, from the set
56
+
delete(emails, "")
57
+
58
+
return slices.Collect(maps.Keys(emails))
60
59
}
61
60
62
61
func balanceIndexItems(commitCount, branchCount, tagCount, fileCount int) (commitsTrunc int, branchesTrunc int, tagsTrunc int) {
···
104
103
ps, err := db.GetPipelineStatuses(
105
104
d,
106
105
len(shas),
107
-
db.FilterEq("repo_owner", repo.Did),
108
-
db.FilterEq("repo_name", repo.Name),
109
-
db.FilterEq("knot", repo.Knot),
110
-
db.FilterIn("sha", shas),
106
+
orm.FilterEq("repo_owner", repo.Did),
107
+
orm.FilterEq("repo_name", repo.Name),
108
+
orm.FilterEq("knot", repo.Knot),
109
+
orm.FilterIn("sha", shas),
111
110
)
112
111
if err != nil {
113
112
return nil, err
+3
-2
appview/repo/settings.go
+3
-2
appview/repo/settings.go
···
14
14
"tangled.org/core/appview/oauth"
15
15
"tangled.org/core/appview/pages"
16
16
xrpcclient "tangled.org/core/appview/xrpcclient"
17
+
"tangled.org/core/orm"
17
18
"tangled.org/core/types"
18
19
19
20
comatproto "github.com/bluesky-social/indigo/api/atproto"
···
210
211
return
211
212
}
212
213
213
-
defaultLabels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs))
214
+
defaultLabels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs))
214
215
if err != nil {
215
216
l.Error("failed to fetch labels", "err", err)
216
217
rp.pages.Error503(w)
217
218
return
218
219
}
219
220
220
-
labels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", f.Labels))
221
+
labels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", f.Labels))
221
222
if err != nil {
222
223
l.Error("failed to fetch labels", "err", err)
223
224
rp.pages.Error503(w)
+26
-1
appview/reporesolver/resolver.go
+26
-1
appview/reporesolver/resolver.go
···
63
63
}
64
64
65
65
// get dir/ref
66
-
currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath()))
66
+
currentDir := extractCurrentDir(r.URL.EscapedPath())
67
67
ref := chi.URLParam(r, "ref")
68
68
69
69
repoAt := repo.RepoAt()
···
130
130
}
131
131
132
132
return repoInfo
133
+
}
134
+
135
+
// extractCurrentDir gets the current directory for markdown link resolution.
136
+
// for blob paths, returns the parent dir. for tree paths, returns the path itself.
137
+
//
138
+
// /@user/repo/blob/main/docs/README.md => docs
139
+
// /@user/repo/tree/main/docs => docs
140
+
func extractCurrentDir(fullPath string) string {
141
+
fullPath = strings.TrimPrefix(fullPath, "/")
142
+
143
+
blobPattern := regexp.MustCompile(`blob/[^/]+/(.*)$`)
144
+
if matches := blobPattern.FindStringSubmatch(fullPath); len(matches) > 1 {
145
+
return path.Dir(matches[1])
146
+
}
147
+
148
+
treePattern := regexp.MustCompile(`tree/[^/]+/(.*)$`)
149
+
if matches := treePattern.FindStringSubmatch(fullPath); len(matches) > 1 {
150
+
dir := strings.TrimSuffix(matches[1], "/")
151
+
if dir == "" {
152
+
return "."
153
+
}
154
+
return dir
155
+
}
156
+
157
+
return "."
133
158
}
134
159
135
160
// extractPathAfterRef gets the actual repository path
+22
appview/reporesolver/resolver_test.go
+22
appview/reporesolver/resolver_test.go
···
1
+
package reporesolver
2
+
3
+
import "testing"
4
+
5
+
func TestExtractCurrentDir(t *testing.T) {
6
+
tests := []struct {
7
+
path string
8
+
want string
9
+
}{
10
+
{"/@user/repo/blob/main/docs/README.md", "docs"},
11
+
{"/@user/repo/blob/main/README.md", "."},
12
+
{"/@user/repo/tree/main/docs", "docs"},
13
+
{"/@user/repo/tree/main/docs/", "docs"},
14
+
{"/@user/repo/tree/main", "."},
15
+
}
16
+
17
+
for _, tt := range tests {
18
+
if got := extractCurrentDir(tt.path); got != tt.want {
19
+
t.Errorf("extractCurrentDir(%q) = %q, want %q", tt.path, got, tt.want)
20
+
}
21
+
}
22
+
}
+5
-4
appview/serververify/verify.go
+5
-4
appview/serververify/verify.go
···
9
9
"tangled.org/core/api/tangled"
10
10
"tangled.org/core/appview/db"
11
11
"tangled.org/core/appview/xrpcclient"
12
+
"tangled.org/core/orm"
12
13
"tangled.org/core/rbac"
13
14
)
14
15
···
76
77
// mark this spindle as verified in the db
77
78
rowId, err := db.VerifySpindle(
78
79
tx,
79
-
db.FilterEq("owner", owner),
80
-
db.FilterEq("instance", instance),
80
+
orm.FilterEq("owner", owner),
81
+
orm.FilterEq("instance", instance),
81
82
)
82
83
if err != nil {
83
84
return 0, fmt.Errorf("failed to write to DB: %w", err)
···
115
116
// mark as registered
116
117
err = db.MarkRegistered(
117
118
tx,
118
-
db.FilterEq("did", owner),
119
-
db.FilterEq("domain", domain),
119
+
orm.FilterEq("did", owner),
120
+
orm.FilterEq("domain", domain),
120
121
)
121
122
if err != nil {
122
123
return fmt.Errorf("failed to register domain: %w", err)
+25
-29
appview/spindles/spindles.go
+25
-29
appview/spindles/spindles.go
···
20
20
"tangled.org/core/appview/serververify"
21
21
"tangled.org/core/appview/xrpcclient"
22
22
"tangled.org/core/idresolver"
23
+
"tangled.org/core/orm"
23
24
"tangled.org/core/rbac"
24
25
"tangled.org/core/tid"
25
26
···
71
72
user := s.OAuth.GetUser(r)
72
73
all, err := db.GetSpindles(
73
74
s.Db,
74
-
db.FilterEq("owner", user.Did),
75
+
orm.FilterEq("owner", user.Did),
75
76
)
76
77
if err != nil {
77
78
s.Logger.Error("failed to fetch spindles", "err", err)
···
101
102
102
103
spindles, err := db.GetSpindles(
103
104
s.Db,
104
-
db.FilterEq("instance", instance),
105
-
db.FilterEq("owner", user.Did),
106
-
db.FilterIsNot("verified", "null"),
105
+
orm.FilterEq("instance", instance),
106
+
orm.FilterEq("owner", user.Did),
107
+
orm.FilterIsNot("verified", "null"),
107
108
)
108
109
if err != nil || len(spindles) != 1 {
109
110
l.Error("failed to get spindle", "err", err, "len(spindles)", len(spindles))
···
123
124
repos, err := db.GetRepos(
124
125
s.Db,
125
126
0,
126
-
db.FilterEq("spindle", instance),
127
+
orm.FilterEq("spindle", instance),
127
128
)
128
129
if err != nil {
129
130
l.Error("failed to get spindle repos", "err", err)
···
290
291
291
292
spindles, err := db.GetSpindles(
292
293
s.Db,
293
-
db.FilterEq("owner", user.Did),
294
-
db.FilterEq("instance", instance),
294
+
orm.FilterEq("owner", user.Did),
295
+
orm.FilterEq("instance", instance),
295
296
)
296
297
if err != nil || len(spindles) != 1 {
297
298
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
319
320
// remove spindle members first
320
321
err = db.RemoveSpindleMember(
321
322
tx,
322
-
db.FilterEq("did", user.Did),
323
-
db.FilterEq("instance", instance),
323
+
orm.FilterEq("did", user.Did),
324
+
orm.FilterEq("instance", instance),
324
325
)
325
326
if err != nil {
326
327
l.Error("failed to remove spindle members", "err", err)
···
330
331
331
332
err = db.DeleteSpindle(
332
333
tx,
333
-
db.FilterEq("owner", user.Did),
334
-
db.FilterEq("instance", instance),
334
+
orm.FilterEq("owner", user.Did),
335
+
orm.FilterEq("instance", instance),
335
336
)
336
337
if err != nil {
337
338
l.Error("failed to delete spindle", "err", err)
···
410
411
411
412
spindles, err := db.GetSpindles(
412
413
s.Db,
413
-
db.FilterEq("owner", user.Did),
414
-
db.FilterEq("instance", instance),
414
+
orm.FilterEq("owner", user.Did),
415
+
orm.FilterEq("instance", instance),
415
416
)
416
417
if err != nil || len(spindles) != 1 {
417
418
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
453
454
454
455
verifiedSpindle, err := db.GetSpindles(
455
456
s.Db,
456
-
db.FilterEq("id", rowId),
457
+
orm.FilterEq("id", rowId),
457
458
)
458
459
if err != nil || len(verifiedSpindle) != 1 {
459
460
l.Error("failed get new spindle", "err", err)
···
486
487
487
488
spindles, err := db.GetSpindles(
488
489
s.Db,
489
-
db.FilterEq("owner", user.Did),
490
-
db.FilterEq("instance", instance),
490
+
orm.FilterEq("owner", user.Did),
491
+
orm.FilterEq("instance", instance),
491
492
)
492
493
if err != nil || len(spindles) != 1 {
493
494
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
622
623
623
624
spindles, err := db.GetSpindles(
624
625
s.Db,
625
-
db.FilterEq("owner", user.Did),
626
-
db.FilterEq("instance", instance),
626
+
orm.FilterEq("owner", user.Did),
627
+
orm.FilterEq("instance", instance),
627
628
)
628
629
if err != nil || len(spindles) != 1 {
629
630
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
652
653
s.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.")
653
654
return
654
655
}
655
-
if memberId.Handle.IsInvalidHandle() {
656
-
l.Error("failed to resolve member identity to handle")
657
-
s.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.")
658
-
return
659
-
}
660
656
661
657
tx, err := s.Db.Begin()
662
658
if err != nil {
···
672
668
// get the record from the DB first:
673
669
members, err := db.GetSpindleMembers(
674
670
s.Db,
675
-
db.FilterEq("did", user.Did),
676
-
db.FilterEq("instance", instance),
677
-
db.FilterEq("subject", memberId.DID),
671
+
orm.FilterEq("did", user.Did),
672
+
orm.FilterEq("instance", instance),
673
+
orm.FilterEq("subject", memberId.DID),
678
674
)
679
675
if err != nil || len(members) != 1 {
680
676
l.Error("failed to get member", "err", err)
···
685
681
// remove from db
686
682
if err = db.RemoveSpindleMember(
687
683
tx,
688
-
db.FilterEq("did", user.Did),
689
-
db.FilterEq("instance", instance),
690
-
db.FilterEq("subject", memberId.DID),
684
+
orm.FilterEq("did", user.Did),
685
+
orm.FilterEq("instance", instance),
686
+
orm.FilterEq("subject", memberId.DID),
691
687
); err != nil {
692
688
l.Error("failed to remove spindle member", "err", err)
693
689
fail()
+6
-5
appview/state/gfi.go
+6
-5
appview/state/gfi.go
···
11
11
"tangled.org/core/appview/pages"
12
12
"tangled.org/core/appview/pagination"
13
13
"tangled.org/core/consts"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
17
func (s *State) GoodFirstIssues(w http.ResponseWriter, r *http.Request) {
···
20
21
21
22
goodFirstIssueLabel := s.config.Label.GoodFirstIssue
22
23
23
-
gfiLabelDef, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", goodFirstIssueLabel))
24
+
gfiLabelDef, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", goodFirstIssueLabel))
24
25
if err != nil {
25
26
log.Println("failed to get gfi label def", err)
26
27
s.pages.Error500(w)
27
28
return
28
29
}
29
30
30
-
repoLabels, err := db.GetRepoLabels(s.db, db.FilterEq("label_at", goodFirstIssueLabel))
31
+
repoLabels, err := db.GetRepoLabels(s.db, orm.FilterEq("label_at", goodFirstIssueLabel))
31
32
if err != nil {
32
33
log.Println("failed to get repo labels", err)
33
34
s.pages.Error503(w)
···
55
56
pagination.Page{
56
57
Limit: 500,
57
58
},
58
-
db.FilterIn("repo_at", repoUris),
59
-
db.FilterEq("open", 1),
59
+
orm.FilterIn("repo_at", repoUris),
60
+
orm.FilterEq("open", 1),
60
61
)
61
62
if err != nil {
62
63
log.Println("failed to get issues", err)
···
132
133
}
133
134
134
135
if len(uriList) > 0 {
135
-
allLabelDefs, err = db.GetLabelDefinitions(s.db, db.FilterIn("at_uri", uriList))
136
+
allLabelDefs, err = db.GetLabelDefinitions(s.db, orm.FilterIn("at_uri", uriList))
136
137
if err != nil {
137
138
log.Println("failed to fetch labels", err)
138
139
}
+17
appview/state/git_http.go
+17
appview/state/git_http.go
···
25
25
26
26
}
27
27
28
+
func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) {
29
+
user, ok := r.Context().Value("resolvedId").(identity.Identity)
30
+
if !ok {
31
+
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
32
+
return
33
+
}
34
+
repo := r.Context().Value("repo").(*models.Repo)
35
+
36
+
scheme := "https"
37
+
if s.config.Core.Dev {
38
+
scheme = "http"
39
+
}
40
+
41
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
42
+
s.proxyRequest(w, r, targetURL)
43
+
}
44
+
28
45
func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) {
29
46
user, ok := r.Context().Value("resolvedId").(identity.Identity)
30
47
if !ok {
+6
-5
appview/state/knotstream.go
+6
-5
appview/state/knotstream.go
···
16
16
ec "tangled.org/core/eventconsumer"
17
17
"tangled.org/core/eventconsumer/cursor"
18
18
"tangled.org/core/log"
19
+
"tangled.org/core/orm"
19
20
"tangled.org/core/rbac"
20
21
"tangled.org/core/workflow"
21
22
···
30
31
31
32
knots, err := db.GetRegistrations(
32
33
d,
33
-
db.FilterIsNot("registered", "null"),
34
+
orm.FilterIsNot("registered", "null"),
34
35
)
35
36
if err != nil {
36
37
return nil, err
···
143
144
repos, err := db.GetRepos(
144
145
d,
145
146
0,
146
-
db.FilterEq("did", record.RepoDid),
147
-
db.FilterEq("name", record.RepoName),
147
+
orm.FilterEq("did", record.RepoDid),
148
+
orm.FilterEq("name", record.RepoName),
148
149
)
149
150
if err != nil {
150
151
return fmt.Errorf("failed to look for repo in DB (%s/%s): %w", record.RepoDid, record.RepoName, err)
···
209
210
repos, err := db.GetRepos(
210
211
d,
211
212
0,
212
-
db.FilterEq("did", record.TriggerMetadata.Repo.Did),
213
-
db.FilterEq("name", record.TriggerMetadata.Repo.Repo),
213
+
orm.FilterEq("did", record.TriggerMetadata.Repo.Did),
214
+
orm.FilterEq("name", record.TriggerMetadata.Repo.Repo),
214
215
)
215
216
if err != nil {
216
217
return fmt.Errorf("failed to look for repo in DB: nsid %s, rkey %s, %w", msg.Nsid, msg.Rkey, err)
+29
appview/state/manifest.go
+29
appview/state/manifest.go
···
1
+
package state
2
+
3
+
import (
4
+
"encoding/json"
5
+
"net/http"
6
+
)
7
+
8
+
// https://developer.mozilla.org/en-US/docs/Web/Progressive_web_apps/Manifest
9
+
// https://www.w3.org/TR/appmanifest/
10
+
var manifestData = map[string]any{
11
+
"name": "tangled",
12
+
"description": "tightly-knit social coding.",
13
+
"icons": []map[string]string{
14
+
{
15
+
"src": "/static/logos/dolly.svg",
16
+
"sizes": "144x144",
17
+
},
18
+
},
19
+
"start_url": "/",
20
+
"id": "https://tangled.org",
21
+
"display": "standalone",
22
+
"background_color": "#111827",
23
+
"theme_color": "#111827",
24
+
}
25
+
26
+
func (p *State) WebAppManifest(w http.ResponseWriter, r *http.Request) {
27
+
w.Header().Set("Content-Type", "application/manifest+json")
28
+
json.NewEncoder(w).Encode(manifestData)
29
+
}
+19
-16
appview/state/profile.go
+19
-16
appview/state/profile.go
···
19
19
"tangled.org/core/appview/db"
20
20
"tangled.org/core/appview/models"
21
21
"tangled.org/core/appview/pages"
22
+
"tangled.org/core/orm"
22
23
)
23
24
24
25
func (s *State) Profile(w http.ResponseWriter, r *http.Request) {
···
56
57
return nil, fmt.Errorf("failed to get profile: %w", err)
57
58
}
58
59
59
-
repoCount, err := db.CountRepos(s.db, db.FilterEq("did", did))
60
+
repoCount, err := db.CountRepos(s.db, orm.FilterEq("did", did))
60
61
if err != nil {
61
62
return nil, fmt.Errorf("failed to get repo count: %w", err)
62
63
}
63
64
64
-
stringCount, err := db.CountStrings(s.db, db.FilterEq("did", did))
65
+
stringCount, err := db.CountStrings(s.db, orm.FilterEq("did", did))
65
66
if err != nil {
66
67
return nil, fmt.Errorf("failed to get string count: %w", err)
67
68
}
68
69
69
-
starredCount, err := db.CountStars(s.db, db.FilterEq("did", did))
70
+
starredCount, err := db.CountStars(s.db, orm.FilterEq("did", did))
70
71
if err != nil {
71
72
return nil, fmt.Errorf("failed to get starred repo count: %w", err)
72
73
}
···
86
87
startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
87
88
punchcard, err := db.MakePunchcard(
88
89
s.db,
89
-
db.FilterEq("did", did),
90
-
db.FilterGte("date", startOfYear.Format(time.DateOnly)),
91
-
db.FilterLte("date", now.Format(time.DateOnly)),
90
+
orm.FilterEq("did", did),
91
+
orm.FilterGte("date", startOfYear.Format(time.DateOnly)),
92
+
orm.FilterLte("date", now.Format(time.DateOnly)),
92
93
)
93
94
if err != nil {
94
95
return nil, fmt.Errorf("failed to get punchcard for %s: %w", did, err)
···
123
124
repos, err := db.GetRepos(
124
125
s.db,
125
126
0,
126
-
db.FilterEq("did", profile.UserDid),
127
+
orm.FilterEq("did", profile.UserDid),
127
128
)
128
129
if err != nil {
129
130
l.Error("failed to fetch repos", "err", err)
···
162
163
}
163
164
164
165
// populate commit counts in the timeline, using the punchcard
165
-
currentMonth := time.Now().Month()
166
+
now := time.Now()
166
167
for _, p := range profile.Punchcard.Punches {
167
-
idx := currentMonth - p.Date.Month()
168
-
if int(idx) < len(timeline.ByMonth) {
169
-
timeline.ByMonth[idx].Commits += p.Count
168
+
years := now.Year() - p.Date.Year()
169
+
months := int(now.Month() - p.Date.Month())
170
+
monthsAgo := years*12 + months
171
+
if monthsAgo >= 0 && monthsAgo < len(timeline.ByMonth) {
172
+
timeline.ByMonth[monthsAgo].Commits += p.Count
170
173
}
171
174
}
172
175
···
193
196
repos, err := db.GetRepos(
194
197
s.db,
195
198
0,
196
-
db.FilterEq("did", profile.UserDid),
199
+
orm.FilterEq("did", profile.UserDid),
197
200
)
198
201
if err != nil {
199
202
l.Error("failed to get repos", "err", err)
···
219
222
}
220
223
l = l.With("profileDid", profile.UserDid)
221
224
222
-
stars, err := db.GetRepoStars(s.db, 0, db.FilterEq("did", profile.UserDid))
225
+
stars, err := db.GetRepoStars(s.db, 0, orm.FilterEq("did", profile.UserDid))
223
226
if err != nil {
224
227
l.Error("failed to get stars", "err", err)
225
228
s.pages.Error500(w)
···
248
251
}
249
252
l = l.With("profileDid", profile.UserDid)
250
253
251
-
strings, err := db.GetStrings(s.db, 0, db.FilterEq("did", profile.UserDid))
254
+
strings, err := db.GetStrings(s.db, 0, orm.FilterEq("did", profile.UserDid))
252
255
if err != nil {
253
256
l.Error("failed to get strings", "err", err)
254
257
s.pages.Error500(w)
···
300
303
followDids = append(followDids, extractDid(follow))
301
304
}
302
305
303
-
profiles, err := db.GetProfiles(s.db, db.FilterIn("did", followDids))
306
+
profiles, err := db.GetProfiles(s.db, orm.FilterIn("did", followDids))
304
307
if err != nil {
305
308
l.Error("failed to get profiles", "followDids", followDids, "err", err)
306
309
return ¶ms, err
···
735
738
log.Printf("getting profile data for %s: %s", user.Did, err)
736
739
}
737
740
738
-
repos, err := db.GetRepos(s.db, 0, db.FilterEq("did", user.Did))
741
+
repos, err := db.GetRepos(s.db, 0, orm.FilterEq("did", user.Did))
739
742
if err != nil {
740
743
log.Printf("getting repos for %s: %s", user.Did, err)
741
744
}
+6
-5
appview/state/router.go
+6
-5
appview/state/router.go
···
32
32
s.pages,
33
33
)
34
34
35
-
router.Get("/favicon.svg", s.Favicon)
36
-
router.Get("/favicon.ico", s.Favicon)
37
-
router.Get("/pwa-manifest.json", s.PWAManifest)
35
+
router.Get("/pwa-manifest.json", s.WebAppManifest)
38
36
router.Get("/robots.txt", s.RobotsTxt)
39
37
40
38
userRouter := s.UserRouter(&middleware)
···
101
99
102
100
// These routes get proxied to the knot
103
101
r.Get("/info/refs", s.InfoRefs)
102
+
r.Post("/git-upload-archive", s.UploadArchive)
104
103
r.Post("/git-upload-pack", s.UploadPack)
105
104
r.Post("/git-receive-pack", s.ReceivePack)
106
105
···
108
107
})
109
108
110
109
r.NotFound(func(w http.ResponseWriter, r *http.Request) {
110
+
w.WriteHeader(http.StatusNotFound)
111
111
s.pages.Error404(w)
112
112
})
113
113
···
182
182
r.Get("/brand", s.Brand)
183
183
184
184
r.NotFound(func(w http.ResponseWriter, r *http.Request) {
185
+
w.WriteHeader(http.StatusNotFound)
185
186
s.pages.Error404(w)
186
187
})
187
188
return r
···
267
268
s.enforcer,
268
269
s.pages,
269
270
s.idResolver,
270
-
s.refResolver,
271
+
s.mentionsResolver,
271
272
s.db,
272
273
s.config,
273
274
s.notifier,
···
284
285
s.repoResolver,
285
286
s.pages,
286
287
s.idResolver,
287
-
s.refResolver,
288
+
s.mentionsResolver,
288
289
s.db,
289
290
s.config,
290
291
s.notifier,
+2
-1
appview/state/spindlestream.go
+2
-1
appview/state/spindlestream.go
···
17
17
ec "tangled.org/core/eventconsumer"
18
18
"tangled.org/core/eventconsumer/cursor"
19
19
"tangled.org/core/log"
20
+
"tangled.org/core/orm"
20
21
"tangled.org/core/rbac"
21
22
spindle "tangled.org/core/spindle/models"
22
23
)
···
27
28
28
29
spindles, err := db.GetSpindles(
29
30
d,
30
-
db.FilterIsNot("verified", "null"),
31
+
orm.FilterIsNot("verified", "null"),
31
32
)
32
33
if err != nil {
33
34
return nil, err
+28
-63
appview/state/state.go
+28
-63
appview/state/state.go
···
15
15
"tangled.org/core/appview/config"
16
16
"tangled.org/core/appview/db"
17
17
"tangled.org/core/appview/indexer"
18
+
"tangled.org/core/appview/mentions"
18
19
"tangled.org/core/appview/models"
19
20
"tangled.org/core/appview/notify"
20
21
dbnotify "tangled.org/core/appview/notify/db"
21
22
phnotify "tangled.org/core/appview/notify/posthog"
22
23
"tangled.org/core/appview/oauth"
23
24
"tangled.org/core/appview/pages"
24
-
"tangled.org/core/appview/refresolver"
25
25
"tangled.org/core/appview/reporesolver"
26
26
"tangled.org/core/appview/validator"
27
27
xrpcclient "tangled.org/core/appview/xrpcclient"
···
30
30
"tangled.org/core/jetstream"
31
31
"tangled.org/core/log"
32
32
tlog "tangled.org/core/log"
33
+
"tangled.org/core/orm"
33
34
"tangled.org/core/rbac"
34
35
"tangled.org/core/tid"
35
36
···
43
44
)
44
45
45
46
type State struct {
46
-
db *db.DB
47
-
notifier notify.Notifier
48
-
indexer *indexer.Indexer
49
-
oauth *oauth.OAuth
50
-
enforcer *rbac.Enforcer
51
-
pages *pages.Pages
52
-
idResolver *idresolver.Resolver
53
-
refResolver *refresolver.Resolver
54
-
posthog posthog.Client
55
-
jc *jetstream.JetstreamClient
56
-
config *config.Config
57
-
repoResolver *reporesolver.RepoResolver
58
-
knotstream *eventconsumer.Consumer
59
-
spindlestream *eventconsumer.Consumer
60
-
logger *slog.Logger
61
-
validator *validator.Validator
47
+
db *db.DB
48
+
notifier notify.Notifier
49
+
indexer *indexer.Indexer
50
+
oauth *oauth.OAuth
51
+
enforcer *rbac.Enforcer
52
+
pages *pages.Pages
53
+
idResolver *idresolver.Resolver
54
+
mentionsResolver *mentions.Resolver
55
+
posthog posthog.Client
56
+
jc *jetstream.JetstreamClient
57
+
config *config.Config
58
+
repoResolver *reporesolver.RepoResolver
59
+
knotstream *eventconsumer.Consumer
60
+
spindlestream *eventconsumer.Consumer
61
+
logger *slog.Logger
62
+
validator *validator.Validator
62
63
}
63
64
64
65
func Make(ctx context.Context, config *config.Config) (*State, error) {
···
100
101
101
102
repoResolver := reporesolver.New(config, enforcer, d)
102
103
103
-
refResolver := refresolver.New(config, res, d, log.SubLogger(logger, "refResolver"))
104
+
mentionsResolver := mentions.New(config, res, d, log.SubLogger(logger, "mentionsResolver"))
104
105
105
106
wrapper := db.DbWrapper{Execer: d}
106
107
jc, err := jetstream.NewJetstreamClient(
···
182
183
enforcer,
183
184
pages,
184
185
res,
185
-
refResolver,
186
+
mentionsResolver,
186
187
posthog,
187
188
jc,
188
189
config,
···
201
202
return s.db.Close()
202
203
}
203
204
204
-
func (s *State) Favicon(w http.ResponseWriter, r *http.Request) {
205
-
w.Header().Set("Content-Type", "image/svg+xml")
206
-
w.Header().Set("Cache-Control", "public, max-age=31536000") // one year
207
-
w.Header().Set("ETag", `"favicon-svg-v1"`)
208
-
209
-
if match := r.Header.Get("If-None-Match"); match == `"favicon-svg-v1"` {
210
-
w.WriteHeader(http.StatusNotModified)
211
-
return
212
-
}
213
-
214
-
s.pages.Favicon(w)
215
-
}
216
-
217
205
func (s *State) RobotsTxt(w http.ResponseWriter, r *http.Request) {
218
206
w.Header().Set("Content-Type", "text/plain")
219
207
w.Header().Set("Cache-Control", "public, max-age=86400") // one day
···
224
212
w.Write([]byte(robotsTxt))
225
213
}
226
214
227
-
// https://developer.mozilla.org/en-US/docs/Web/Progressive_web_apps/Manifest
228
-
const manifestJson = `{
229
-
"name": "tangled",
230
-
"description": "tightly-knit social coding.",
231
-
"icons": [
232
-
{
233
-
"src": "/favicon.svg",
234
-
"sizes": "144x144"
235
-
}
236
-
],
237
-
"start_url": "/",
238
-
"id": "org.tangled",
239
-
240
-
"display": "standalone",
241
-
"background_color": "#111827",
242
-
"theme_color": "#111827"
243
-
}`
244
-
245
-
func (p *State) PWAManifest(w http.ResponseWriter, r *http.Request) {
246
-
w.Header().Set("Content-Type", "application/json")
247
-
w.Write([]byte(manifestJson))
248
-
}
249
-
250
215
func (s *State) TermsOfService(w http.ResponseWriter, r *http.Request) {
251
216
user := s.oauth.GetUser(r)
252
217
s.pages.TermsOfService(w, pages.TermsOfServiceParams{
···
299
264
return
300
265
}
301
266
302
-
gfiLabel, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", s.config.Label.GoodFirstIssue))
267
+
gfiLabel, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", s.config.Label.GoodFirstIssue))
303
268
if err != nil {
304
269
// non-fatal
305
270
}
···
323
288
324
289
regs, err := db.GetRegistrations(
325
290
s.db,
326
-
db.FilterEq("did", user.Did),
327
-
db.FilterEq("needs_upgrade", 1),
291
+
orm.FilterEq("did", user.Did),
292
+
orm.FilterEq("needs_upgrade", 1),
328
293
)
329
294
if err != nil {
330
295
l.Error("non-fatal: failed to get registrations", "err", err)
···
332
297
333
298
spindles, err := db.GetSpindles(
334
299
s.db,
335
-
db.FilterEq("owner", user.Did),
336
-
db.FilterEq("needs_upgrade", 1),
300
+
orm.FilterEq("owner", user.Did),
301
+
orm.FilterEq("needs_upgrade", 1),
337
302
)
338
303
if err != nil {
339
304
l.Error("non-fatal: failed to get spindles", "err", err)
···
514
479
// Check for existing repos
515
480
existingRepo, err := db.GetRepo(
516
481
s.db,
517
-
db.FilterEq("did", user.Did),
518
-
db.FilterEq("name", repoName),
482
+
orm.FilterEq("did", user.Did),
483
+
orm.FilterEq("name", repoName),
519
484
)
520
485
if err == nil && existingRepo != nil {
521
486
l.Info("repo exists")
···
675
640
}
676
641
677
642
func BackfillDefaultDefs(e db.Execer, r *idresolver.Resolver, defaults []string) error {
678
-
defaultLabels, err := db.GetLabelDefinitions(e, db.FilterIn("at_uri", defaults))
643
+
defaultLabels, err := db.GetLabelDefinitions(e, orm.FilterIn("at_uri", defaults))
679
644
if err != nil {
680
645
return err
681
646
}
+7
-6
appview/strings/strings.go
+7
-6
appview/strings/strings.go
···
17
17
"tangled.org/core/appview/pages"
18
18
"tangled.org/core/appview/pages/markup"
19
19
"tangled.org/core/idresolver"
20
+
"tangled.org/core/orm"
20
21
"tangled.org/core/tid"
21
22
22
23
"github.com/bluesky-social/indigo/api/atproto"
···
108
109
strings, err := db.GetStrings(
109
110
s.Db,
110
111
0,
111
-
db.FilterEq("did", id.DID),
112
-
db.FilterEq("rkey", rkey),
112
+
orm.FilterEq("did", id.DID),
113
+
orm.FilterEq("rkey", rkey),
113
114
)
114
115
if err != nil {
115
116
l.Error("failed to fetch string", "err", err)
···
199
200
all, err := db.GetStrings(
200
201
s.Db,
201
202
0,
202
-
db.FilterEq("did", id.DID),
203
-
db.FilterEq("rkey", rkey),
203
+
orm.FilterEq("did", id.DID),
204
+
orm.FilterEq("rkey", rkey),
204
205
)
205
206
if err != nil {
206
207
l.Error("failed to fetch string", "err", err)
···
408
409
409
410
if err := db.DeleteString(
410
411
s.Db,
411
-
db.FilterEq("did", user.Did),
412
-
db.FilterEq("rkey", rkey),
412
+
orm.FilterEq("did", user.Did),
413
+
orm.FilterEq("rkey", rkey),
413
414
); err != nil {
414
415
fail("Failed to delete string.", err)
415
416
return
+2
-1
appview/validator/issue.go
+2
-1
appview/validator/issue.go
···
6
6
7
7
"tangled.org/core/appview/db"
8
8
"tangled.org/core/appview/models"
9
+
"tangled.org/core/orm"
9
10
)
10
11
11
12
func (v *Validator) ValidateIssueComment(comment *models.IssueComment) error {
12
13
// if comments have parents, only ingest ones that are 1 level deep
13
14
if comment.ReplyTo != nil {
14
-
parents, err := db.GetIssueComments(v.db, db.FilterEq("at_uri", *comment.ReplyTo))
15
+
parents, err := db.GetIssueComments(v.db, orm.FilterEq("at_uri", *comment.ReplyTo))
15
16
if err != nil {
16
17
return fmt.Errorf("failed to fetch parent comment: %w", err)
17
18
}
+182
cmd/dolly/main.go
+182
cmd/dolly/main.go
···
1
+
package main
2
+
3
+
import (
4
+
"bytes"
5
+
"flag"
6
+
"fmt"
7
+
"image"
8
+
"image/color"
9
+
"image/png"
10
+
"os"
11
+
"path/filepath"
12
+
"strconv"
13
+
"strings"
14
+
"text/template"
15
+
16
+
"github.com/srwiley/oksvg"
17
+
"github.com/srwiley/rasterx"
18
+
"golang.org/x/image/draw"
19
+
"tangled.org/core/appview/pages"
20
+
"tangled.org/core/ico"
21
+
)
22
+
23
+
func main() {
24
+
var (
25
+
size string
26
+
fillColor string
27
+
output string
28
+
)
29
+
30
+
flag.StringVar(&size, "size", "512x512", "Output size in format WIDTHxHEIGHT (e.g., 512x512)")
31
+
flag.StringVar(&fillColor, "color", "#000000", "Fill color in hex format (e.g., #FF5733)")
32
+
flag.StringVar(&output, "output", "dolly.svg", "Output file path (format detected from extension: .svg, .png, or .ico)")
33
+
flag.Parse()
34
+
35
+
width, height, err := parseSize(size)
36
+
if err != nil {
37
+
fmt.Fprintf(os.Stderr, "Error parsing size: %v\n", err)
38
+
os.Exit(1)
39
+
}
40
+
41
+
// Detect format from file extension
42
+
ext := strings.ToLower(filepath.Ext(output))
43
+
format := strings.TrimPrefix(ext, ".")
44
+
45
+
if format != "svg" && format != "png" && format != "ico" {
46
+
fmt.Fprintf(os.Stderr, "Invalid file extension: %s. Must be .svg, .png, or .ico\n", ext)
47
+
os.Exit(1)
48
+
}
49
+
50
+
if fillColor != "currentColor" && !isValidHexColor(fillColor) {
51
+
fmt.Fprintf(os.Stderr, "Invalid color format: %s. Use hex format like #FF5733\n", fillColor)
52
+
os.Exit(1)
53
+
}
54
+
55
+
svgData, err := dolly(fillColor)
56
+
if err != nil {
57
+
fmt.Fprintf(os.Stderr, "Error generating SVG: %v\n", err)
58
+
os.Exit(1)
59
+
}
60
+
61
+
// Create output directory if it doesn't exist
62
+
dir := filepath.Dir(output)
63
+
if dir != "" && dir != "." {
64
+
if err := os.MkdirAll(dir, 0755); err != nil {
65
+
fmt.Fprintf(os.Stderr, "Error creating output directory: %v\n", err)
66
+
os.Exit(1)
67
+
}
68
+
}
69
+
70
+
switch format {
71
+
case "svg":
72
+
err = saveSVG(svgData, output, width, height)
73
+
case "png":
74
+
err = savePNG(svgData, output, width, height)
75
+
case "ico":
76
+
err = saveICO(svgData, output, width, height)
77
+
}
78
+
79
+
if err != nil {
80
+
fmt.Fprintf(os.Stderr, "Error saving file: %v\n", err)
81
+
os.Exit(1)
82
+
}
83
+
84
+
fmt.Printf("Successfully generated %s (%dx%d)\n", output, width, height)
85
+
}
86
+
87
+
func dolly(hexColor string) ([]byte, error) {
88
+
tpl, err := template.New("dolly").
89
+
ParseFS(pages.Files, "templates/fragments/dolly/logo.html")
90
+
if err != nil {
91
+
return nil, err
92
+
}
93
+
94
+
var svgData bytes.Buffer
95
+
if err := tpl.ExecuteTemplate(&svgData, "fragments/dolly/logo", pages.DollyParams{
96
+
FillColor: hexColor,
97
+
}); err != nil {
98
+
return nil, err
99
+
}
100
+
101
+
return svgData.Bytes(), nil
102
+
}
103
+
104
+
func svgToImage(svgData []byte, w, h int) (image.Image, error) {
105
+
icon, err := oksvg.ReadIconStream(bytes.NewReader(svgData))
106
+
if err != nil {
107
+
return nil, fmt.Errorf("error parsing SVG: %v", err)
108
+
}
109
+
110
+
icon.SetTarget(0, 0, float64(w), float64(h))
111
+
rgba := image.NewRGBA(image.Rect(0, 0, w, h))
112
+
draw.Draw(rgba, rgba.Bounds(), &image.Uniform{color.Transparent}, image.Point{}, draw.Src)
113
+
scanner := rasterx.NewScannerGV(w, h, rgba, rgba.Bounds())
114
+
raster := rasterx.NewDasher(w, h, scanner)
115
+
icon.Draw(raster, 1.0)
116
+
117
+
return rgba, nil
118
+
}
119
+
120
+
func parseSize(size string) (int, int, error) {
121
+
parts := strings.Split(size, "x")
122
+
if len(parts) != 2 {
123
+
return 0, 0, fmt.Errorf("invalid size format, use WIDTHxHEIGHT")
124
+
}
125
+
126
+
width, err := strconv.Atoi(parts[0])
127
+
if err != nil {
128
+
return 0, 0, fmt.Errorf("invalid width: %v", err)
129
+
}
130
+
131
+
height, err := strconv.Atoi(parts[1])
132
+
if err != nil {
133
+
return 0, 0, fmt.Errorf("invalid height: %v", err)
134
+
}
135
+
136
+
if width <= 0 || height <= 0 {
137
+
return 0, 0, fmt.Errorf("width and height must be positive")
138
+
}
139
+
140
+
return width, height, nil
141
+
}
142
+
143
+
func isValidHexColor(hex string) bool {
144
+
if len(hex) != 7 || hex[0] != '#' {
145
+
return false
146
+
}
147
+
_, err := strconv.ParseUint(hex[1:], 16, 32)
148
+
return err == nil
149
+
}
150
+
151
+
func saveSVG(svgData []byte, filepath string, _, _ int) error {
152
+
return os.WriteFile(filepath, svgData, 0644)
153
+
}
154
+
155
+
func savePNG(svgData []byte, filepath string, width, height int) error {
156
+
img, err := svgToImage(svgData, width, height)
157
+
if err != nil {
158
+
return err
159
+
}
160
+
161
+
f, err := os.Create(filepath)
162
+
if err != nil {
163
+
return err
164
+
}
165
+
defer f.Close()
166
+
167
+
return png.Encode(f, img)
168
+
}
169
+
170
+
func saveICO(svgData []byte, filepath string, width, height int) error {
171
+
img, err := svgToImage(svgData, width, height)
172
+
if err != nil {
173
+
return err
174
+
}
175
+
176
+
icoData, err := ico.ImageToIco(img)
177
+
if err != nil {
178
+
return err
179
+
}
180
+
181
+
return os.WriteFile(filepath, icoData, 0644)
182
+
}
+1
-34
crypto/verify.go
+1
-34
crypto/verify.go
···
5
5
"crypto/sha256"
6
6
"encoding/base64"
7
7
"fmt"
8
-
"strings"
9
8
10
9
"github.com/hiddeco/sshsig"
11
10
"golang.org/x/crypto/ssh"
12
-
"tangled.org/core/types"
13
11
)
14
12
15
13
func VerifySignature(pubKey, signature, payload []byte) (error, bool) {
···
28
26
// multiple algorithms but sha-512 is most secure, and git's ssh signing defaults
29
27
// to sha-512 for all key types anyway.
30
28
err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git")
31
-
return err, err == nil
32
-
}
33
29
34
-
// VerifyCommitSignature reconstructs the payload used to sign a commit. This is
35
-
// essentially the git cat-file output but without the gpgsig header.
36
-
//
37
-
// Caveats: signature verification will fail on commits with more than one parent,
38
-
// i.e. merge commits, because types.NiceDiff doesn't carry more than one Parent field
39
-
// and we are unable to reconstruct the payload correctly.
40
-
//
41
-
// Ideally this should directly operate on an *object.Commit.
42
-
func VerifyCommitSignature(pubKey string, commit types.NiceDiff) (error, bool) {
43
-
signature := commit.Commit.PGPSignature
44
-
45
-
author := bytes.NewBuffer([]byte{})
46
-
committer := bytes.NewBuffer([]byte{})
47
-
commit.Commit.Author.Encode(author)
48
-
commit.Commit.Committer.Encode(committer)
49
-
50
-
payload := strings.Builder{}
51
-
52
-
fmt.Fprintf(&payload, "tree %s\n", commit.Commit.Tree)
53
-
if commit.Commit.Parent != "" {
54
-
fmt.Fprintf(&payload, "parent %s\n", commit.Commit.Parent)
55
-
}
56
-
fmt.Fprintf(&payload, "author %s\n", author.String())
57
-
fmt.Fprintf(&payload, "committer %s\n", committer.String())
58
-
if commit.Commit.ChangedId != "" {
59
-
fmt.Fprintf(&payload, "change-id %s\n", commit.Commit.ChangedId)
60
-
}
61
-
fmt.Fprintf(&payload, "\n%s", commit.Commit.Message)
62
-
63
-
return VerifySignature([]byte(pubKey), []byte(signature), []byte(payload.String()))
30
+
return err, err == nil
64
31
}
65
32
66
33
// SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
+1527
docs/DOCS.md
+1527
docs/DOCS.md
···
1
+
---
2
+
title: Tangled docs
3
+
author: The Tangled Contributors
4
+
date: 21 Sun, Dec 2025
5
+
abstract: |
6
+
Tangled is a decentralized code hosting and collaboration
7
+
platform. Every component of Tangled is open-source and
8
+
self-hostable. [tangled.org](https://tangled.org) also
9
+
provides hosting and CI services that are free to use.
10
+
11
+
There are several models for decentralized code
12
+
collaboration platforms, ranging from ActivityPub’s
13
+
(Forgejo) federated model, to Radicle’s entirely P2P model.
14
+
Our approach attempts to be the best of both worlds by
15
+
adopting the AT Protocol—a protocol for building decentralized
16
+
social applications with a central identity
17
+
18
+
Our approach to this is the idea of “knots”. Knots are
19
+
lightweight, headless servers that enable users to host Git
20
+
repositories with ease. Knots are designed for either single
21
+
or multi-tenant use which is perfect for self-hosting on a
22
+
Raspberry Pi at home, or larger “community” servers. By
23
+
default, Tangled provides managed knots where you can host
24
+
your repositories for free.
25
+
26
+
The appview at tangled.org acts as a consolidated "view"
27
+
into the whole network, allowing users to access, clone and
28
+
contribute to repositories hosted across different knots
29
+
seamlessly.
30
+
---
31
+
32
+
# Quick start guide
33
+
34
+
## Login or sign up
35
+
36
+
You can [login](https://tangled.org) by using your AT Protocol
37
+
account. If you are unclear on what that means, simply head
38
+
to the [signup](https://tangled.org/signup) page and create
39
+
an account. By doing so, you will be choosing Tangled as
40
+
your account provider (you will be granted a handle of the
41
+
form `user.tngl.sh`).
42
+
43
+
In the AT Protocol network, users are free to choose their account
44
+
provider (known as a "Personal Data Service", or PDS), and
45
+
login to applications that support AT accounts.
46
+
47
+
You can think of it as "one account for all of the atmosphere"!
48
+
49
+
If you already have an AT account (you may have one if you
50
+
signed up to Bluesky, for example), you can login with the
51
+
same handle on Tangled (so just use `user.bsky.social` on
52
+
the login page).
53
+
54
+
## Add an SSH key
55
+
56
+
Once you are logged in, you can start creating repositories
57
+
and pushing code. Tangled supports pushing git repositories
58
+
over SSH.
59
+
60
+
First, you'll need to generate an SSH key if you don't
61
+
already have one:
62
+
63
+
```bash
64
+
ssh-keygen -t ed25519 -C "foo@bar.com"
65
+
```
66
+
67
+
When prompted, save the key to the default location
68
+
(`~/.ssh/id_ed25519`) and optionally set a passphrase.
69
+
70
+
Copy your public key to your clipboard:
71
+
72
+
```bash
73
+
# on X11
74
+
cat ~/.ssh/id_ed25519.pub | xclip -sel c
75
+
76
+
# on wayland
77
+
cat ~/.ssh/id_ed25519.pub | wl-copy
78
+
79
+
# on macos
80
+
cat ~/.ssh/id_ed25519.pub | pbcopy
81
+
```
82
+
83
+
Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
84
+
paste your public key, give it a descriptive name, and hit
85
+
save.
86
+
87
+
## Create a repository
88
+
89
+
Once your SSH key is added, create your first repository:
90
+
91
+
1. Hit the green `+` icon on the topbar, and select
92
+
repository
93
+
2. Enter a repository name
94
+
3. Add a description
95
+
4. Choose a knotserver to host this repository on
96
+
5. Hit create
97
+
98
+
Knots are self-hostable, lightweight Git servers that can
99
+
host your repository. Unlike traditional code forges, your
100
+
code can live on any server. Read the [Knots](TODO) section
101
+
for more.
102
+
103
+
## Configure SSH
104
+
105
+
To ensure Git uses the correct SSH key and connects smoothly
106
+
to Tangled, add this configuration to your `~/.ssh/config`
107
+
file:
108
+
109
+
```
110
+
Host tangled.org
111
+
Hostname tangled.org
112
+
User git
113
+
IdentityFile ~/.ssh/id_ed25519
114
+
AddressFamily inet
115
+
```
116
+
117
+
This tells SSH to use your specific key when connecting to
118
+
Tangled and prevents authentication issues if you have
119
+
multiple SSH keys.
120
+
121
+
Note that this configuration only works for knotservers that
122
+
are hosted by tangled.org. If you use a custom knot, refer
123
+
to the [Knots](TODO) section.
124
+
125
+
## Push your first repository
126
+
127
+
Initialize a new Git repository:
128
+
129
+
```bash
130
+
mkdir my-project
131
+
cd my-project
132
+
133
+
git init
134
+
echo "# My Project" > README.md
135
+
```
136
+
137
+
Add some content and push!
138
+
139
+
```bash
140
+
git add README.md
141
+
git commit -m "Initial commit"
142
+
git remote add origin git@tangled.org:user.tngl.sh/my-project
143
+
git push -u origin main
144
+
```
145
+
146
+
That's it! Your code is now hosted on Tangled.
147
+
148
+
## Migrating an existing repository
149
+
150
+
Moving your repositories from GitHub, GitLab, Bitbucket, or
151
+
any other Git forge to Tangled is straightforward. You'll
152
+
simply change your repository's remote URL. At the moment,
153
+
Tangled does not have any tooling to migrate data such as
154
+
GitHub issues or pull requests.
155
+
156
+
First, create a new repository on tangled.org as described
157
+
in the [Quick Start Guide](#create-a-repository).
158
+
159
+
Navigate to your existing local repository:
160
+
161
+
```bash
162
+
cd /path/to/your/existing/repo
163
+
```
164
+
165
+
You can inspect your existing Git remote like so:
166
+
167
+
```bash
168
+
git remote -v
169
+
```
170
+
171
+
You'll see something like:
172
+
173
+
```
174
+
origin git@github.com:username/my-project (fetch)
175
+
origin git@github.com:username/my-project (push)
176
+
```
177
+
178
+
Update the remote URL to point to tangled:
179
+
180
+
```bash
181
+
git remote set-url origin git@tangled.org:user.tngl.sh/my-project
182
+
```
183
+
184
+
Verify the change:
185
+
186
+
```bash
187
+
git remote -v
188
+
```
189
+
190
+
You should now see:
191
+
192
+
```
193
+
origin git@tangled.org:user.tngl.sh/my-project (fetch)
194
+
origin git@tangled.org:user.tngl.sh/my-project (push)
195
+
```
196
+
197
+
Push all your branches and tags to Tangled:
198
+
199
+
```bash
200
+
git push -u origin --all
201
+
git push -u origin --tags
202
+
```
203
+
204
+
Your repository is now migrated to Tangled! All commit
205
+
history, branches, and tags have been preserved.
206
+
207
+
## Mirroring a repository to Tangled
208
+
209
+
If you want to maintain your repository on multiple forges
210
+
simultaneously, for example, keeping your primary repository
211
+
on GitHub while mirroring to Tangled for backup or
212
+
redundancy, you can do so by adding multiple remotes.
213
+
214
+
You can configure your local repository to push to both
215
+
Tangled and, say, GitHub. You may already have the following
216
+
setup:
217
+
218
+
```
219
+
$ git remote -v
220
+
origin git@github.com:username/my-project (fetch)
221
+
origin git@github.com:username/my-project (push)
222
+
```
223
+
224
+
Now add Tangled as an additional push URL to the same
225
+
remote:
226
+
227
+
```bash
228
+
git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
229
+
```
230
+
231
+
You also need to re-add the original URL as a push
232
+
destination (Git replaces the push URL when you use `--add`
233
+
the first time):
234
+
235
+
```bash
236
+
git remote set-url --add --push origin git@github.com:username/my-project
237
+
```
238
+
239
+
Verify your configuration:
240
+
241
+
```
242
+
$ git remote -v
243
+
origin git@github.com:username/repo (fetch)
244
+
origin git@tangled.org:username/my-project (push)
245
+
origin git@github.com:username/repo (push)
246
+
```
247
+
248
+
Notice that there's one fetch URL (the primary remote) and
249
+
two push URLs. Now, whenever you push, Git will
250
+
automatically push to both remotes:
251
+
252
+
```bash
253
+
git push origin main
254
+
```
255
+
256
+
This single command pushes your `main` branch to both GitHub
257
+
and Tangled simultaneously.
258
+
259
+
To push all branches and tags:
260
+
261
+
```bash
262
+
git push origin --all
263
+
git push origin --tags
264
+
```
265
+
266
+
If you prefer more control over which remote you push to,
267
+
you can maintain separate remotes:
268
+
269
+
```bash
270
+
git remote add github git@github.com:username/my-project
271
+
git remote add tangled git@tangled.org:username/my-project
272
+
```
273
+
274
+
Then push to each explicitly:
275
+
276
+
```bash
277
+
git push github main
278
+
git push tangled main
279
+
```
280
+
281
+
# Knot self-hosting guide
282
+
283
+
So you want to run your own knot server? Great! Here are a few prerequisites:
284
+
285
+
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
286
+
2. A (sub)domain name. People generally use `knot.example.com`.
287
+
3. A valid SSL certificate for your domain.
288
+
289
+
## NixOS
290
+
291
+
Refer to the [knot
292
+
module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
293
+
for a full list of options. Sample configurations:
294
+
295
+
- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
296
+
- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
297
+
298
+
## Docker
299
+
300
+
Refer to
301
+
[@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker).
302
+
Note that this is community maintained.
303
+
304
+
## Manual setup
305
+
306
+
First, clone this repository:
307
+
308
+
```
309
+
git clone https://tangled.org/@tangled.org/core
310
+
```
311
+
312
+
Then, build the `knot` CLI. This is the knot administration
313
+
and operation tool. For the purpose of this guide, we're
314
+
only concerned with these subcommands:
315
+
316
+
* `knot server`: the main knot server process, typically
317
+
run as a supervised service
318
+
* `knot guard`: handles role-based access control for git
319
+
over SSH (you'll never have to run this yourself)
320
+
* `knot keys`: fetches SSH keys associated with your knot;
321
+
we'll use this to generate the SSH
322
+
`AuthorizedKeysCommand`
323
+
324
+
```
325
+
cd core
326
+
export CGO_ENABLED=1
327
+
go build -o knot ./cmd/knot
328
+
```
329
+
330
+
Next, move the `knot` binary to a location owned by `root` --
331
+
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
332
+
333
+
```
334
+
sudo mv knot /usr/local/bin/knot
335
+
sudo chown root:root /usr/local/bin/knot
336
+
```
337
+
338
+
This is necessary because SSH `AuthorizedKeysCommand` requires [really
339
+
specific permissions](https://stackoverflow.com/a/27638306). The
340
+
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
341
+
retrieve a user's public SSH keys dynamically for authentication. Let's
342
+
set that up.
343
+
344
+
```
345
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
346
+
Match User git
347
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
348
+
AuthorizedKeysCommandUser nobody
349
+
EOF
350
+
```
351
+
352
+
Then, reload `sshd`:
353
+
354
+
```
355
+
sudo systemctl reload ssh
356
+
```
357
+
358
+
Next, create the `git` user. We'll use the `git` user's home directory
359
+
to store repositories:
360
+
361
+
```
362
+
sudo adduser git
363
+
```
364
+
365
+
Create `/home/git/.knot.env` with the following, updating the values as
366
+
necessary. The `KNOT_SERVER_OWNER` should be set to your
367
+
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
368
+
369
+
```
370
+
KNOT_REPO_SCAN_PATH=/home/git
371
+
KNOT_SERVER_HOSTNAME=knot.example.com
372
+
APPVIEW_ENDPOINT=https://tangled.org
373
+
KNOT_SERVER_OWNER=did:plc:foobar
374
+
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
375
+
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
376
+
```
377
+
378
+
If you run a Linux distribution that uses systemd, you can use the provided
379
+
service file to run the server. Copy
380
+
[`knotserver.service`](/systemd/knotserver.service)
381
+
to `/etc/systemd/system/`. Then, run:
382
+
383
+
```
384
+
systemctl enable knotserver
385
+
systemctl start knotserver
386
+
```
387
+
388
+
The last step is to configure a reverse proxy like Nginx or Caddy to front your
389
+
knot. Here's an example configuration for Nginx:
390
+
391
+
```
392
+
server {
393
+
listen 80;
394
+
listen [::]:80;
395
+
server_name knot.example.com;
396
+
397
+
location / {
398
+
proxy_pass http://localhost:5555;
399
+
proxy_set_header Host $host;
400
+
proxy_set_header X-Real-IP $remote_addr;
401
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
402
+
proxy_set_header X-Forwarded-Proto $scheme;
403
+
}
404
+
405
+
# wss endpoint for git events
406
+
location /events {
407
+
proxy_set_header X-Forwarded-For $remote_addr;
408
+
proxy_set_header Host $http_host;
409
+
proxy_set_header Upgrade websocket;
410
+
proxy_set_header Connection Upgrade;
411
+
proxy_pass http://localhost:5555;
412
+
}
413
+
# additional config for SSL/TLS go here.
414
+
}
415
+
416
+
```
417
+
418
+
Remember to use Let's Encrypt or similar to procure a certificate for your
419
+
knot domain.
420
+
421
+
You should now have a running knot server! You can finalize
422
+
your registration by hitting the `verify` button on the
423
+
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
424
+
a record on your PDS to announce the existence of the knot.
425
+
426
+
### Custom paths
427
+
428
+
(This section applies to manual setup only. Docker users should edit the mounts
429
+
in `docker-compose.yml` instead.)
430
+
431
+
Right now, the database and repositories of your knot lives in `/home/git`. You
432
+
can move these paths if you'd like to store them in another folder. Be careful
433
+
when adjusting these paths:
434
+
435
+
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
436
+
any possible side effects. Remember to restart it once you're done.
437
+
* Make backups before moving in case something goes wrong.
438
+
* Make sure the `git` user can read and write from the new paths.
439
+
440
+
#### Database
441
+
442
+
As an example, let's say the current database is at `/home/git/knotserver.db`,
443
+
and we want to move it to `/home/git/database/knotserver.db`.
444
+
445
+
Copy the current database to the new location. Make sure to copy the `.db-shm`
446
+
and `.db-wal` files if they exist.
447
+
448
+
```
449
+
mkdir /home/git/database
450
+
cp /home/git/knotserver.db* /home/git/database
451
+
```
452
+
453
+
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
454
+
the new file path (_not_ the directory):
455
+
456
+
```
457
+
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
458
+
```
459
+
460
+
#### Repositories
461
+
462
+
As an example, let's say the repositories are currently in `/home/git`, and we
463
+
want to move them into `/home/git/repositories`.
464
+
465
+
Create the new folder, then move the existing repositories (if there are any):
466
+
467
+
```
468
+
mkdir /home/git/repositories
469
+
# move all DIDs into the new folder; these will vary for you!
470
+
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
471
+
```
472
+
473
+
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
474
+
to the new directory:
475
+
476
+
```
477
+
KNOT_REPO_SCAN_PATH=/home/git/repositories
478
+
```
479
+
480
+
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
481
+
repository path:
482
+
483
+
```
484
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
485
+
Match User git
486
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
487
+
AuthorizedKeysCommandUser nobody
488
+
EOF
489
+
```
490
+
491
+
Make sure to restart your SSH server!
492
+
493
+
#### MOTD (message of the day)
494
+
495
+
To configure the MOTD used ("Welcome to this knot!" by default), edit the
496
+
`/home/git/motd` file:
497
+
498
+
```
499
+
printf "Hi from this knot!\n" > /home/git/motd
500
+
```
501
+
502
+
Note that you should add a newline at the end if setting a non-empty message
503
+
since the knot won't do this for you.
504
+
505
+
# Spindles
506
+
507
+
## Pipelines
508
+
509
+
Spindle workflows allow you to write CI/CD pipelines in a
510
+
simple format. They're located in the `.tangled/workflows`
511
+
directory at the root of your repository, and are defined
512
+
using YAML.
513
+
514
+
The fields are:
515
+
516
+
- [Trigger](#trigger): A **required** field that defines
517
+
when a workflow should be triggered.
518
+
- [Engine](#engine): A **required** field that defines which
519
+
engine a workflow should run on.
520
+
- [Clone options](#clone-options): An **optional** field
521
+
that defines how the repository should be cloned.
522
+
- [Dependencies](#dependencies): An **optional** field that
523
+
allows you to list dependencies you may need.
524
+
- [Environment](#environment): An **optional** field that
525
+
allows you to define environment variables.
526
+
- [Steps](#steps): An **optional** field that allows you to
527
+
define what steps should run in the workflow.
528
+
529
+
### Trigger
530
+
531
+
The first thing to add to a workflow is the trigger, which
532
+
defines when a workflow runs. This is defined using a `when`
533
+
field, which takes in a list of conditions. Each condition
534
+
has the following fields:
535
+
536
+
- `event`: This is a **required** field that defines when
537
+
your workflow should run. It's a list that can take one or
538
+
more of the following values:
539
+
- `push`: The workflow should run every time a commit is
540
+
pushed to the repository.
541
+
- `pull_request`: The workflow should run every time a
542
+
pull request is made or updated.
543
+
- `manual`: The workflow can be triggered manually.
544
+
- `branch`: Defines which branches the workflow should run
545
+
for. If used with the `push` event, commits to the
546
+
branch(es) listed here will trigger the workflow. If used
547
+
with the `pull_request` event, updates to pull requests
548
+
targeting the branch(es) listed here will trigger the
549
+
workflow. This field has no effect with the `manual`
550
+
event. Supports glob patterns using `*` and `**` (e.g.,
551
+
`main`, `develop`, `release-*`). Either `branch` or `tag`
552
+
(or both) must be specified for `push` events.
553
+
- `tag`: Defines which tags the workflow should run for.
554
+
Only used with the `push` event - when tags matching the
555
+
pattern(s) listed here are pushed, the workflow will
556
+
trigger. This field has no effect with `pull_request` or
557
+
`manual` events. Supports glob patterns using `*` and `**`
558
+
(e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
559
+
`tag` (or both) must be specified for `push` events.
560
+
561
+
For example, if you'd like to define a workflow that runs
562
+
when commits are pushed to the `main` and `develop`
563
+
branches, or when pull requests that target the `main`
564
+
branch are updated, or manually, you can do so with:
565
+
566
+
```yaml
567
+
when:
568
+
- event: ["push", "manual"]
569
+
branch: ["main", "develop"]
570
+
- event: ["pull_request"]
571
+
branch: ["main"]
572
+
```
573
+
574
+
You can also trigger workflows on tag pushes. For instance,
575
+
to run a deployment workflow when tags matching `v*` are
576
+
pushed:
577
+
578
+
```yaml
579
+
when:
580
+
- event: ["push"]
581
+
tag: ["v*"]
582
+
```
583
+
584
+
You can even combine branch and tag patterns in a single
585
+
constraint (the workflow triggers if either matches):
586
+
587
+
```yaml
588
+
when:
589
+
- event: ["push"]
590
+
branch: ["main", "release-*"]
591
+
tag: ["v*", "stable"]
592
+
```
593
+
594
+
### Engine
595
+
596
+
Next is the engine on which the workflow should run, defined
597
+
using the **required** `engine` field. The currently
598
+
supported engines are:
599
+
600
+
- `nixery`: This uses an instance of
601
+
[Nixery](https://nixery.dev) to run steps, which allows
602
+
you to add [dependencies](#dependencies) from
603
+
Nixpkgs (https://github.com/NixOS/nixpkgs). You can
604
+
search for packages on https://search.nixos.org, and
605
+
there's a pretty good chance the package(s) you're looking
606
+
for will be there.
607
+
608
+
Example:
609
+
610
+
```yaml
611
+
engine: "nixery"
612
+
```
613
+
614
+
### Clone options
615
+
616
+
When a workflow starts, the first step is to clone the
617
+
repository. You can customize this behavior using the
618
+
**optional** `clone` field. It has the following fields:
619
+
620
+
- `skip`: Setting this to `true` will skip cloning the
621
+
repository. This can be useful if your workflow is doing
622
+
something that doesn't require anything from the
623
+
repository itself. This is `false` by default.
624
+
- `depth`: This sets the number of commits, or the "clone
625
+
depth", to fetch from the repository. For example, if you
626
+
set this to 2, the last 2 commits will be fetched. By
627
+
default, the depth is set to 1, meaning only the most
628
+
recent commit will be fetched, which is the commit that
629
+
triggered the workflow.
630
+
- `submodules`: If you use Git submodules
631
+
(https://git-scm.com/book/en/v2/Git-Tools-Submodules)
632
+
in your repository, setting this field to `true` will
633
+
recursively fetch all submodules. This is `false` by
634
+
default.
635
+
636
+
The default settings are:
637
+
638
+
```yaml
639
+
clone:
640
+
skip: false
641
+
depth: 1
642
+
submodules: false
643
+
```
644
+
645
+
### Dependencies
646
+
647
+
Usually when you're running a workflow, you'll need
648
+
additional dependencies. The `dependencies` field lets you
649
+
define which dependencies to get, and from where. It's a
650
+
key-value map, with the key being the registry to fetch
651
+
dependencies from, and the value being the list of
652
+
dependencies to fetch.
653
+
654
+
Say you want to fetch Node.js and Go from `nixpkgs`, and a
655
+
package called `my_pkg` you've made from your own registry
656
+
at your repository at
657
+
`https://tangled.org/@example.com/my_pkg`. You can define
658
+
those dependencies like so:
659
+
660
+
```yaml
661
+
dependencies:
662
+
# nixpkgs
663
+
nixpkgs:
664
+
- nodejs
665
+
- go
666
+
# custom registry
667
+
git+https://tangled.org/@example.com/my_pkg:
668
+
- my_pkg
669
+
```
670
+
671
+
Now these dependencies are available to use in your
672
+
workflow!
673
+
674
+
### Environment
675
+
676
+
The `environment` field allows you define environment
677
+
variables that will be available throughout the entire
678
+
workflow. **Do not put secrets here, these environment
679
+
variables are visible to anyone viewing the repository. You
680
+
can add secrets for pipelines in your repository's
681
+
settings.**
682
+
683
+
Example:
684
+
685
+
```yaml
686
+
environment:
687
+
GOOS: "linux"
688
+
GOARCH: "arm64"
689
+
NODE_ENV: "production"
690
+
MY_ENV_VAR: "MY_ENV_VALUE"
691
+
```
692
+
693
+
### Steps
694
+
695
+
The `steps` field allows you to define what steps should run
696
+
in the workflow. It's a list of step objects, each with the
697
+
following fields:
698
+
699
+
- `name`: This field allows you to give your step a name.
700
+
This name is visible in your workflow runs, and is used to
701
+
describe what the step is doing.
702
+
- `command`: This field allows you to define a command to
703
+
run in that step. The step is run in a Bash shell, and the
704
+
logs from the command will be visible in the pipelines
705
+
page on the Tangled website. The
706
+
[dependencies](#dependencies) you added will be available
707
+
to use here.
708
+
- `environment`: Similar to the global
709
+
[environment](#environment) config, this **optional**
710
+
field is a key-value map that allows you to set
711
+
environment variables for the step. **Do not put secrets
712
+
here, these environment variables are visible to anyone
713
+
viewing the repository. You can add secrets for pipelines
714
+
in your repository's settings.**
715
+
716
+
Example:
717
+
718
+
```yaml
719
+
steps:
720
+
- name: "Build backend"
721
+
command: "go build"
722
+
environment:
723
+
GOOS: "darwin"
724
+
GOARCH: "arm64"
725
+
- name: "Build frontend"
726
+
command: "npm run build"
727
+
environment:
728
+
NODE_ENV: "production"
729
+
```
730
+
731
+
### Complete workflow
732
+
733
+
```yaml
734
+
# .tangled/workflows/build.yml
735
+
736
+
when:
737
+
- event: ["push", "manual"]
738
+
branch: ["main", "develop"]
739
+
- event: ["pull_request"]
740
+
branch: ["main"]
741
+
742
+
engine: "nixery"
743
+
744
+
# using the default values
745
+
clone:
746
+
skip: false
747
+
depth: 1
748
+
submodules: false
749
+
750
+
dependencies:
751
+
# nixpkgs
752
+
nixpkgs:
753
+
- nodejs
754
+
- go
755
+
# custom registry
756
+
git+https://tangled.org/@example.com/my_pkg:
757
+
- my_pkg
758
+
759
+
environment:
760
+
GOOS: "linux"
761
+
GOARCH: "arm64"
762
+
NODE_ENV: "production"
763
+
MY_ENV_VAR: "MY_ENV_VALUE"
764
+
765
+
steps:
766
+
- name: "Build backend"
767
+
command: "go build"
768
+
environment:
769
+
GOOS: "darwin"
770
+
GOARCH: "arm64"
771
+
- name: "Build frontend"
772
+
command: "npm run build"
773
+
environment:
774
+
NODE_ENV: "production"
775
+
```
776
+
777
+
If you want another example of a workflow, you can look at
778
+
the one [Tangled uses to build the
779
+
project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml).
780
+
781
+
## Self-hosting guide
782
+
783
+
### Prerequisites
784
+
785
+
* Go
786
+
* Docker (the only supported backend currently)
787
+
788
+
### Configuration
789
+
790
+
Spindle is configured using environment variables. The following environment variables are available:
791
+
792
+
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
793
+
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
794
+
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
795
+
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
796
+
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
797
+
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
798
+
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
799
+
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
800
+
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
801
+
802
+
### Running spindle
803
+
804
+
1. **Set the environment variables.** For example:
805
+
806
+
```shell
807
+
export SPINDLE_SERVER_HOSTNAME="your-hostname"
808
+
export SPINDLE_SERVER_OWNER="your-did"
809
+
```
810
+
811
+
2. **Build the Spindle binary.**
812
+
813
+
```shell
814
+
cd core
815
+
go mod download
816
+
go build -o cmd/spindle/spindle cmd/spindle/main.go
817
+
```
818
+
819
+
3. **Create the log directory.**
820
+
821
+
```shell
822
+
sudo mkdir -p /var/log/spindle
823
+
sudo chown $USER:$USER -R /var/log/spindle
824
+
```
825
+
826
+
4. **Run the Spindle binary.**
827
+
828
+
```shell
829
+
./cmd/spindle/spindle
830
+
```
831
+
832
+
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
833
+
834
+
## Architecture
835
+
836
+
Spindle is a small CI runner service. Here's a high-level overview of how it operates:
837
+
838
+
* Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
839
+
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
840
+
* When a new repo record comes through (typically when you add a spindle to a
841
+
repo from the settings), spindle then resolves the underlying knot and
842
+
subscribes to repo events (see:
843
+
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
844
+
* The spindle engine then handles execution of the pipeline, with results and
845
+
logs beamed on the spindle event stream over WebSocket
846
+
847
+
### The engine
848
+
849
+
At present, the only supported backend is Docker (and Podman, if Docker
850
+
compatibility is enabled, so that `/run/docker.sock` is created). spindle
851
+
executes each step in the pipeline in a fresh container, with state persisted
852
+
across steps within the `/tangled/workspace` directory.
853
+
854
+
The base image for the container is constructed on the fly using
855
+
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
856
+
used packages.
857
+
858
+
The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines).
859
+
860
+
## Secrets with openbao
861
+
862
+
This document covers setting up spindle to use OpenBao for secrets
863
+
management via OpenBao Proxy instead of the default SQLite backend.
864
+
865
+
### Overview
866
+
867
+
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
868
+
authentication automatically using AppRole credentials, while spindle
869
+
connects to the local proxy instead of directly to the OpenBao server.
870
+
871
+
This approach provides better security, automatic token renewal, and
872
+
simplified application code.
873
+
874
+
### Installation
875
+
876
+
Install OpenBao from Nixpkgs:
877
+
878
+
```bash
879
+
nix shell nixpkgs#openbao # for a local server
880
+
```
881
+
882
+
### Setup
883
+
884
+
The setup process can is documented for both local development and production.
885
+
886
+
#### Local development
887
+
888
+
Start OpenBao in dev mode:
889
+
890
+
```bash
891
+
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
892
+
```
893
+
894
+
This starts OpenBao on `http://localhost:8201` with a root token.
895
+
896
+
Set up environment for bao CLI:
897
+
898
+
```bash
899
+
export BAO_ADDR=http://localhost:8200
900
+
export BAO_TOKEN=root
901
+
```
902
+
903
+
#### Production
904
+
905
+
You would typically use a systemd service with a
906
+
configuration file. Refer to
907
+
[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
908
+
for how this can be achieved using Nix.
909
+
910
+
Then, initialize the bao server:
911
+
912
+
```bash
913
+
bao operator init -key-shares=1 -key-threshold=1
914
+
```
915
+
916
+
This will print out an unseal key and a root key. Save them
917
+
somewhere (like a password manager). Then unseal the vault
918
+
to begin setting it up:
919
+
920
+
```bash
921
+
bao operator unseal <unseal_key>
922
+
```
923
+
924
+
All steps below remain the same across both dev and
925
+
production setups.
926
+
927
+
#### Configure openbao server
928
+
929
+
Create the spindle KV mount:
930
+
931
+
```bash
932
+
bao secrets enable -path=spindle -version=2 kv
933
+
```
934
+
935
+
Set up AppRole authentication and policy:
936
+
937
+
Create a policy file `spindle-policy.hcl`:
938
+
939
+
```hcl
940
+
# Full access to spindle KV v2 data
941
+
path "spindle/data/*" {
942
+
capabilities = ["create", "read", "update", "delete"]
943
+
}
944
+
945
+
# Access to metadata for listing and management
946
+
path "spindle/metadata/*" {
947
+
capabilities = ["list", "read", "delete", "update"]
948
+
}
949
+
950
+
# Allow listing at root level
951
+
path "spindle/" {
952
+
capabilities = ["list"]
953
+
}
954
+
955
+
# Required for connection testing and health checks
956
+
path "auth/token/lookup-self" {
957
+
capabilities = ["read"]
958
+
}
959
+
```
960
+
961
+
Apply the policy and create an AppRole:
962
+
963
+
```bash
964
+
bao policy write spindle-policy spindle-policy.hcl
965
+
bao auth enable approle
966
+
bao write auth/approle/role/spindle \
967
+
token_policies="spindle-policy" \
968
+
token_ttl=1h \
969
+
token_max_ttl=4h \
970
+
bind_secret_id=true \
971
+
secret_id_ttl=0 \
972
+
secret_id_num_uses=0
973
+
```
974
+
975
+
Get the credentials:
976
+
977
+
```bash
978
+
# Get role ID (static)
979
+
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
980
+
981
+
# Generate secret ID
982
+
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
983
+
984
+
echo "Role ID: $ROLE_ID"
985
+
echo "Secret ID: $SECRET_ID"
986
+
```
987
+
988
+
#### Create proxy configuration
989
+
990
+
Create the credential files:
991
+
992
+
```bash
993
+
# Create directory for OpenBao files
994
+
mkdir -p /tmp/openbao
995
+
996
+
# Save credentials
997
+
echo "$ROLE_ID" > /tmp/openbao/role-id
998
+
echo "$SECRET_ID" > /tmp/openbao/secret-id
999
+
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
1000
+
```
1001
+
1002
+
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
1003
+
1004
+
```hcl
1005
+
# OpenBao server connection
1006
+
vault {
1007
+
address = "http://localhost:8200"
1008
+
}
1009
+
1010
+
# Auto-Auth using AppRole
1011
+
auto_auth {
1012
+
method "approle" {
1013
+
mount_path = "auth/approle"
1014
+
config = {
1015
+
role_id_file_path = "/tmp/openbao/role-id"
1016
+
secret_id_file_path = "/tmp/openbao/secret-id"
1017
+
}
1018
+
}
1019
+
1020
+
# Optional: write token to file for debugging
1021
+
sink "file" {
1022
+
config = {
1023
+
path = "/tmp/openbao/token"
1024
+
mode = 0640
1025
+
}
1026
+
}
1027
+
}
1028
+
1029
+
# Proxy listener for spindle
1030
+
listener "tcp" {
1031
+
address = "127.0.0.1:8201"
1032
+
tls_disable = true
1033
+
}
1034
+
1035
+
# Enable API proxy with auto-auth token
1036
+
api_proxy {
1037
+
use_auto_auth_token = true
1038
+
}
1039
+
1040
+
# Enable response caching
1041
+
cache {
1042
+
use_auto_auth_token = true
1043
+
}
1044
+
1045
+
# Logging
1046
+
log_level = "info"
1047
+
```
1048
+
1049
+
#### Start the proxy
1050
+
1051
+
Start OpenBao Proxy:
1052
+
1053
+
```bash
1054
+
bao proxy -config=/tmp/openbao/proxy.hcl
1055
+
```
1056
+
1057
+
The proxy will authenticate with OpenBao and start listening on
1058
+
`127.0.0.1:8201`.
1059
+
1060
+
#### Configure spindle
1061
+
1062
+
Set these environment variables for spindle:
1063
+
1064
+
```bash
1065
+
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
1066
+
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
1067
+
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
1068
+
```
1069
+
1070
+
On startup, spindle will now connect to the local proxy,
1071
+
which handles all authentication automatically.
1072
+
1073
+
### Production setup for proxy
1074
+
1075
+
For production, you'll want to run the proxy as a service:
1076
+
1077
+
Place your production configuration in
1078
+
`/etc/openbao/proxy.hcl` with proper TLS settings for the
1079
+
vault connection.
1080
+
1081
+
### Verifying setup
1082
+
1083
+
Test the proxy directly:
1084
+
1085
+
```bash
1086
+
# Check proxy health
1087
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
1088
+
1089
+
# Test token lookup through proxy
1090
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
1091
+
```
1092
+
1093
+
Test OpenBao operations through the server:
1094
+
1095
+
```bash
1096
+
# List all secrets
1097
+
bao kv list spindle/
1098
+
1099
+
# Add a test secret via the spindle API, then check it exists
1100
+
bao kv list spindle/repos/
1101
+
1102
+
# Get a specific secret
1103
+
bao kv get spindle/repos/your_repo_path/SECRET_NAME
1104
+
```
1105
+
1106
+
### How it works
1107
+
1108
+
- Spindle connects to OpenBao Proxy on localhost (typically
1109
+
port 8200 or 8201)
1110
+
- The proxy authenticates with OpenBao using AppRole
1111
+
credentials
1112
+
- All spindle requests go through the proxy, which injects
1113
+
authentication tokens
1114
+
- Secrets are stored at
1115
+
`spindle/repos/{sanitized_repo_path}/{secret_key}`
1116
+
- Repository paths like `did:plc:alice/myrepo` become
1117
+
`did_plc_alice_myrepo`
1118
+
- The proxy handles all token renewal automatically
1119
+
- Spindle no longer manages tokens or authentication
1120
+
directly
1121
+
1122
+
### Troubleshooting
1123
+
1124
+
**Connection refused**: Check that the OpenBao Proxy is
1125
+
running and listening on the configured address.
1126
+
1127
+
**403 errors**: Verify the AppRole credentials are correct
1128
+
and the policy has the necessary permissions.
1129
+
1130
+
**404 route errors**: The spindle KV mount probably doesn't
1131
+
exist—run the mount creation step again.
1132
+
1133
+
**Proxy authentication failures**: Check the proxy logs and
1134
+
verify the role-id and secret-id files are readable and
1135
+
contain valid credentials.
1136
+
1137
+
**Secret not found after writing**: This can indicate policy
1138
+
permission issues. Verify the policy includes both
1139
+
`spindle/data/*` and `spindle/metadata/*` paths with
1140
+
appropriate capabilities.
1141
+
1142
+
Check proxy logs:
1143
+
1144
+
```bash
1145
+
# If running as systemd service
1146
+
journalctl -u openbao-proxy -f
1147
+
1148
+
# If running directly, check the console output
1149
+
```
1150
+
1151
+
Test AppRole authentication manually:
1152
+
1153
+
```bash
1154
+
bao write auth/approle/login \
1155
+
role_id="$(cat /tmp/openbao/role-id)" \
1156
+
secret_id="$(cat /tmp/openbao/secret-id)"
1157
+
```
1158
+
1159
+
# Migrating knots and spindles
1160
+
1161
+
Sometimes, non-backwards compatible changes are made to the
1162
+
knot/spindle XRPC APIs. If you host a knot or a spindle, you
1163
+
will need to follow this guide to upgrade. Typically, this
1164
+
only requires you to deploy the newest version.
1165
+
1166
+
This document is laid out in reverse-chronological order.
1167
+
Newer migration guides are listed first, and older guides
1168
+
are further down the page.
1169
+
1170
+
## Upgrading from v1.8.x
1171
+
1172
+
After v1.8.2, the HTTP API for knots and spindles has been
1173
+
deprecated and replaced with XRPC. Repositories on outdated
1174
+
knots will not be viewable from the appview. Upgrading is
1175
+
straightforward however.
1176
+
1177
+
For knots:
1178
+
1179
+
- Upgrade to the latest tag (v1.9.0 or above)
1180
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1181
+
hit the "retry" button to verify your knot
1182
+
1183
+
For spindles:
1184
+
1185
+
- Upgrade to the latest tag (v1.9.0 or above)
1186
+
- Head to the [spindle
1187
+
dashboard](https://tangled.org/settings/spindles) and hit the
1188
+
"retry" button to verify your spindle
1189
+
1190
+
## Upgrading from v1.7.x
1191
+
1192
+
After v1.7.0, knot secrets have been deprecated. You no
1193
+
longer need a secret from the appview to run a knot. All
1194
+
authorized commands to knots are managed via [Inter-Service
1195
+
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
1196
+
Knots will be read-only until upgraded.
1197
+
1198
+
Upgrading is quite easy, in essence:
1199
+
1200
+
- `KNOT_SERVER_SECRET` is no more, you can remove this
1201
+
environment variable entirely
1202
+
- `KNOT_SERVER_OWNER` is now required on boot, set this to
1203
+
your DID. You can find your DID in the
1204
+
[settings](https://tangled.org/settings) page.
1205
+
- Restart your knot once you have replaced the environment
1206
+
variable
1207
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1208
+
hit the "retry" button to verify your knot. This simply
1209
+
writes a `sh.tangled.knot` record to your PDS.
1210
+
1211
+
If you use the nix module, simply bump the flake to the
1212
+
latest revision, and change your config block like so:
1213
+
1214
+
```diff
1215
+
services.tangled.knot = {
1216
+
enable = true;
1217
+
server = {
1218
+
- secretFile = /path/to/secret;
1219
+
+ owner = "did:plc:foo";
1220
+
};
1221
+
};
1222
+
```
1223
+
1224
+
# Hacking on Tangled
1225
+
1226
+
We highly recommend [installing
1227
+
Nix](https://nixos.org/download/) (the package manager)
1228
+
before working on the codebase. The Nix flake provides a lot
1229
+
of helpers to get started and most importantly, builds and
1230
+
dev shells are entirely deterministic.
1231
+
1232
+
To set up your dev environment:
1233
+
1234
+
```bash
1235
+
nix develop
1236
+
```
1237
+
1238
+
Non-Nix users can look at the `devShell` attribute in the
1239
+
`flake.nix` file to determine necessary dependencies.
1240
+
1241
+
## Running the appview
1242
+
1243
+
The Nix flake also exposes a few `app` attributes (run `nix
1244
+
flake show` to see a full list of what the flake provides),
1245
+
one of the apps runs the appview with the `air`
1246
+
live-reloader:
1247
+
1248
+
```bash
1249
+
TANGLED_DEV=true nix run .#watch-appview
1250
+
1251
+
# TANGLED_DB_PATH might be of interest to point to
1252
+
# different sqlite DBs
1253
+
1254
+
# in a separate shell, you can live-reload tailwind
1255
+
nix run .#watch-tailwind
1256
+
```
1257
+
1258
+
To authenticate with the appview, you will need Redis and
1259
+
OAuth JWKs to be set up:
1260
+
1261
+
```
1262
+
# OAuth JWKs should already be set up by the Nix devshell:
1263
+
echo $TANGLED_OAUTH_CLIENT_SECRET
1264
+
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
1265
+
1266
+
echo $TANGLED_OAUTH_CLIENT_KID
1267
+
1761667908
1268
+
1269
+
# if not, you can set it up yourself:
1270
+
goat key generate -t P-256
1271
+
Key Type: P-256 / secp256r1 / ES256 private key
1272
+
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
1273
+
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
1274
+
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
1275
+
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
1276
+
1277
+
# the secret key from above
1278
+
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
1279
+
1280
+
# Run Redis in a new shell to store OAuth sessions
1281
+
redis-server
1282
+
```
1283
+
1284
+
## Running knots and spindles
1285
+
1286
+
An end-to-end knot setup requires setting up a machine with
1287
+
`sshd`, `AuthorizedKeysCommand`, and a Git user, which is
1288
+
quite cumbersome. So the Nix flake provides a
1289
+
`nixosConfiguration` to do so.
1290
+
1291
+
<details>
1292
+
<summary><strong>macOS users will have to set up a Nix Builder first</strong></summary>
1293
+
1294
+
In order to build Tangled's dev VM on macOS, you will
1295
+
first need to set up a Linux Nix builder. The recommended
1296
+
way to do so is to run a [`darwin.linux-builder`
1297
+
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
1298
+
and to register it in `nix.conf` as a builder for Linux
1299
+
with the same architecture as your Mac (`linux-aarch64` if
1300
+
you are using Apple Silicon).
1301
+
1302
+
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
1303
+
> the Tangled repo so that it doesn't conflict with the other VM. For example,
1304
+
> you can do
1305
+
>
1306
+
> ```shell
1307
+
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
1308
+
> ```
1309
+
>
1310
+
> to store the builder VM in a temporary dir.
1311
+
>
1312
+
> You should read and follow [all the other intructions][darwin builder vm] to
1313
+
> avoid subtle problems.
1314
+
1315
+
Alternatively, you can use any other method to set up a
1316
+
Linux machine with Nix installed that you can `sudo ssh`
1317
+
into (in other words, root user on your Mac has to be able
1318
+
to ssh into the Linux machine without entering a password)
1319
+
and that has the same architecture as your Mac. See
1320
+
[remote builder
1321
+
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
1322
+
for how to register such a builder in `nix.conf`.
1323
+
1324
+
> WARNING: If you'd like to use
1325
+
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
1326
+
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
1327
+
> ssh` works can be tricky. It seems to be [possible with
1328
+
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
1329
+
1330
+
</details>
1331
+
1332
+
To begin, grab your DID from http://localhost:3000/settings.
1333
+
Then, set `TANGLED_VM_KNOT_OWNER` and
1334
+
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
1335
+
lightweight NixOS VM like so:
1336
+
1337
+
```bash
1338
+
nix run --impure .#vm
1339
+
1340
+
# type `poweroff` at the shell to exit the VM
1341
+
```
1342
+
1343
+
This starts a knot on port 6444, a spindle on port 6555
1344
+
with `ssh` exposed on port 2222.
1345
+
1346
+
Once the services are running, head to
1347
+
http://localhost:3000/settings/knots and hit "Verify". It should
1348
+
verify the ownership of the services instantly if everything
1349
+
went smoothly.
1350
+
1351
+
You can push repositories to this VM with this ssh config
1352
+
block on your main machine:
1353
+
1354
+
```bash
1355
+
Host nixos-shell
1356
+
Hostname localhost
1357
+
Port 2222
1358
+
User git
1359
+
IdentityFile ~/.ssh/my_tangled_key
1360
+
```
1361
+
1362
+
Set up a remote called `local-dev` on a git repo:
1363
+
1364
+
```bash
1365
+
git remote add local-dev git@nixos-shell:user/repo
1366
+
git push local-dev main
1367
+
```
1368
+
1369
+
The above VM should already be running a spindle on
1370
+
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
1371
+
hit "Verify". You can then configure each repository to use
1372
+
this spindle and run CI jobs.
1373
+
1374
+
Of interest when debugging spindles:
1375
+
1376
+
```
1377
+
# Service logs from journald:
1378
+
journalctl -xeu spindle
1379
+
1380
+
# CI job logs from disk:
1381
+
ls /var/log/spindle
1382
+
1383
+
# Debugging spindle database:
1384
+
sqlite3 /var/lib/spindle/spindle.db
1385
+
1386
+
# litecli has a nicer REPL interface:
1387
+
litecli /var/lib/spindle/spindle.db
1388
+
```
1389
+
1390
+
If for any reason you wish to disable either one of the
1391
+
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
1392
+
`services.tangled.spindle.enable` (or
1393
+
`services.tangled.knot.enable`) to `false`.
1394
+
1395
+
# Contribution guide
1396
+
1397
+
## Commit guidelines
1398
+
1399
+
We follow a commit style similar to the Go project. Please keep commits:
1400
+
1401
+
* **atomic**: each commit should represent one logical change
1402
+
* **descriptive**: the commit message should clearly describe what the
1403
+
change does and why it's needed
1404
+
1405
+
### Message format
1406
+
1407
+
```
1408
+
<service/top-level directory>/<affected package/directory>: <short summary of change>
1409
+
1410
+
Optional longer description can go here, if necessary. Explain what the
1411
+
change does and why, especially if not obvious. Reference relevant
1412
+
issues or PRs when applicable. These can be links for now since we don't
1413
+
auto-link issues/PRs yet.
1414
+
```
1415
+
1416
+
Here are some examples:
1417
+
1418
+
```
1419
+
appview/state: fix token expiry check in middleware
1420
+
1421
+
The previous check did not account for clock drift, leading to premature
1422
+
token invalidation.
1423
+
```
1424
+
1425
+
```
1426
+
knotserver/git/service: improve error checking in upload-pack
1427
+
```
1428
+
1429
+
1430
+
### General notes
1431
+
1432
+
- PRs get merged "as-is" (fast-forward)—like applying a patch-series
1433
+
using `git am`. At present, there is no squashing—so please author
1434
+
your commits as they would appear on `master`, following the above
1435
+
guidelines.
1436
+
- If there is a lot of nesting, for example "appview:
1437
+
pages/templates/repo/fragments: ...", these can be truncated down to
1438
+
just "appview: repo/fragments: ...". If the change affects a lot of
1439
+
subdirectories, you may abbreviate to just the top-level names, e.g.
1440
+
"appview: ..." or "knotserver: ...".
1441
+
- Keep commits lowercased with no trailing period.
1442
+
- Use the imperative mood in the summary line (e.g., "fix bug" not
1443
+
"fixed bug" or "fixes bug").
1444
+
- Try to keep the summary line under 72 characters, but we aren't too
1445
+
fussed about this.
1446
+
- Follow the same formatting for PR titles if filled manually.
1447
+
- Don't include unrelated changes in the same commit.
1448
+
- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
1449
+
before submitting if necessary.
1450
+
1451
+
## Code formatting
1452
+
1453
+
We use a variety of tools to format our code, and multiplex them with
1454
+
[`treefmt`](https://treefmt.com). All you need to do to format your changes
1455
+
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
1456
+
1457
+
## Proposals for bigger changes
1458
+
1459
+
Small fixes like typos, minor bugs, or trivial refactors can be
1460
+
submitted directly as PRs.
1461
+
1462
+
For larger changes—especially those introducing new features, significant
1463
+
refactoring, or altering system behavior—please open a proposal first. This
1464
+
helps us evaluate the scope, design, and potential impact before implementation.
1465
+
1466
+
Create a new issue titled:
1467
+
1468
+
```
1469
+
proposal: <affected scope>: <summary of change>
1470
+
```
1471
+
1472
+
In the description, explain:
1473
+
1474
+
- What the change is
1475
+
- Why it's needed
1476
+
- How you plan to implement it (roughly)
1477
+
- Any open questions or tradeoffs
1478
+
1479
+
We'll use the issue thread to discuss and refine the idea before moving
1480
+
forward.
1481
+
1482
+
## Developer Certificate of Origin (DCO)
1483
+
1484
+
We require all contributors to certify that they have the right to
1485
+
submit the code they're contributing. To do this, we follow the
1486
+
[Developer Certificate of Origin
1487
+
(DCO)](https://developercertificate.org/).
1488
+
1489
+
By signing your commits, you're stating that the contribution is your
1490
+
own work, or that you have the right to submit it under the project's
1491
+
license. This helps us keep things clean and legally sound.
1492
+
1493
+
To sign your commit, just add the `-s` flag when committing:
1494
+
1495
+
```sh
1496
+
git commit -s -m "your commit message"
1497
+
```
1498
+
1499
+
This appends a line like:
1500
+
1501
+
```
1502
+
Signed-off-by: Your Name <your.email@example.com>
1503
+
```
1504
+
1505
+
We won't merge commits if they aren't signed off. If you forget, you can
1506
+
amend the last commit like this:
1507
+
1508
+
```sh
1509
+
git commit --amend -s
1510
+
```
1511
+
1512
+
If you're submitting a PR with multiple commits, make sure each one is
1513
+
signed.
1514
+
1515
+
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
1516
+
to make it sign off commits in the tangled repo:
1517
+
1518
+
```shell
1519
+
# Safety check, should say "No matching config key..."
1520
+
jj config list templates.commit_trailers
1521
+
# The command below may need to be adjusted if the command above returned something.
1522
+
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
1523
+
```
1524
+
1525
+
Refer to the [jujutsu
1526
+
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
1527
+
for more information.
-136
docs/contributing.md
-136
docs/contributing.md
···
1
-
# tangled contributing guide
2
-
3
-
## commit guidelines
4
-
5
-
We follow a commit style similar to the Go project. Please keep commits:
6
-
7
-
* **atomic**: each commit should represent one logical change
8
-
* **descriptive**: the commit message should clearly describe what the
9
-
change does and why it's needed
10
-
11
-
### message format
12
-
13
-
```
14
-
<service/top-level directory>/<affected package/directory>: <short summary of change>
15
-
16
-
17
-
Optional longer description can go here, if necessary. Explain what the
18
-
change does and why, especially if not obvious. Reference relevant
19
-
issues or PRs when applicable. These can be links for now since we don't
20
-
auto-link issues/PRs yet.
21
-
```
22
-
23
-
Here are some examples:
24
-
25
-
```
26
-
appview/state: fix token expiry check in middleware
27
-
28
-
The previous check did not account for clock drift, leading to premature
29
-
token invalidation.
30
-
```
31
-
32
-
```
33
-
knotserver/git/service: improve error checking in upload-pack
34
-
```
35
-
36
-
37
-
### general notes
38
-
39
-
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
40
-
using `git am`. At present, there is no squashing -- so please author
41
-
your commits as they would appear on `master`, following the above
42
-
guidelines.
43
-
- If there is a lot of nesting, for example "appview:
44
-
pages/templates/repo/fragments: ...", these can be truncated down to
45
-
just "appview: repo/fragments: ...". If the change affects a lot of
46
-
subdirectories, you may abbreviate to just the top-level names, e.g.
47
-
"appview: ..." or "knotserver: ...".
48
-
- Keep commits lowercased with no trailing period.
49
-
- Use the imperative mood in the summary line (e.g., "fix bug" not
50
-
"fixed bug" or "fixes bug").
51
-
- Try to keep the summary line under 72 characters, but we aren't too
52
-
fussed about this.
53
-
- Follow the same formatting for PR titles if filled manually.
54
-
- Don't include unrelated changes in the same commit.
55
-
- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
56
-
before submitting if necessary.
57
-
58
-
## code formatting
59
-
60
-
We use a variety of tools to format our code, and multiplex them with
61
-
[`treefmt`](https://treefmt.com): all you need to do to format your changes
62
-
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
63
-
64
-
## proposals for bigger changes
65
-
66
-
Small fixes like typos, minor bugs, or trivial refactors can be
67
-
submitted directly as PRs.
68
-
69
-
For larger changes—especially those introducing new features, significant
70
-
refactoring, or altering system behavior—please open a proposal first. This
71
-
helps us evaluate the scope, design, and potential impact before implementation.
72
-
73
-
### proposal format
74
-
75
-
Create a new issue titled:
76
-
77
-
```
78
-
proposal: <affected scope>: <summary of change>
79
-
```
80
-
81
-
In the description, explain:
82
-
83
-
- What the change is
84
-
- Why it's needed
85
-
- How you plan to implement it (roughly)
86
-
- Any open questions or tradeoffs
87
-
88
-
We'll use the issue thread to discuss and refine the idea before moving
89
-
forward.
90
-
91
-
## developer certificate of origin (DCO)
92
-
93
-
We require all contributors to certify that they have the right to
94
-
submit the code they're contributing. To do this, we follow the
95
-
[Developer Certificate of Origin
96
-
(DCO)](https://developercertificate.org/).
97
-
98
-
By signing your commits, you're stating that the contribution is your
99
-
own work, or that you have the right to submit it under the project's
100
-
license. This helps us keep things clean and legally sound.
101
-
102
-
To sign your commit, just add the `-s` flag when committing:
103
-
104
-
```sh
105
-
git commit -s -m "your commit message"
106
-
```
107
-
108
-
This appends a line like:
109
-
110
-
```
111
-
Signed-off-by: Your Name <your.email@example.com>
112
-
```
113
-
114
-
We won't merge commits if they aren't signed off. If you forget, you can
115
-
amend the last commit like this:
116
-
117
-
```sh
118
-
git commit --amend -s
119
-
```
120
-
121
-
If you're submitting a PR with multiple commits, make sure each one is
122
-
signed.
123
-
124
-
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125
-
to make it sign off commits in the tangled repo:
126
-
127
-
```shell
128
-
# Safety check, should say "No matching config key..."
129
-
jj config list templates.commit_trailers
130
-
# The command below may need to be adjusted if the command above returned something.
131
-
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
132
-
```
133
-
134
-
Refer to the [jj
135
-
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
136
-
for more information.
-172
docs/hacking.md
-172
docs/hacking.md
···
1
-
# hacking on tangled
2
-
3
-
We highly recommend [installing
4
-
nix](https://nixos.org/download/) (the package manager)
5
-
before working on the codebase. The nix flake provides a lot
6
-
of helpers to get started and most importantly, builds and
7
-
dev shells are entirely deterministic.
8
-
9
-
To set up your dev environment:
10
-
11
-
```bash
12
-
nix develop
13
-
```
14
-
15
-
Non-nix users can look at the `devShell` attribute in the
16
-
`flake.nix` file to determine necessary dependencies.
17
-
18
-
## running the appview
19
-
20
-
The nix flake also exposes a few `app` attributes (run `nix
21
-
flake show` to see a full list of what the flake provides),
22
-
one of the apps runs the appview with the `air`
23
-
live-reloader:
24
-
25
-
```bash
26
-
TANGLED_DEV=true nix run .#watch-appview
27
-
28
-
# TANGLED_DB_PATH might be of interest to point to
29
-
# different sqlite DBs
30
-
31
-
# in a separate shell, you can live-reload tailwind
32
-
nix run .#watch-tailwind
33
-
```
34
-
35
-
To authenticate with the appview, you will need redis and
36
-
OAUTH JWKs to be setup:
37
-
38
-
```
39
-
# oauth jwks should already be setup by the nix devshell:
40
-
echo $TANGLED_OAUTH_CLIENT_SECRET
41
-
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
42
-
43
-
echo $TANGLED_OAUTH_CLIENT_KID
44
-
1761667908
45
-
46
-
# if not, you can set it up yourself:
47
-
goat key generate -t P-256
48
-
Key Type: P-256 / secp256r1 / ES256 private key
49
-
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
50
-
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
51
-
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
52
-
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
53
-
54
-
# the secret key from above
55
-
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
56
-
57
-
# run redis in at a new shell to store oauth sessions
58
-
redis-server
59
-
```
60
-
61
-
## running knots and spindles
62
-
63
-
An end-to-end knot setup requires setting up a machine with
64
-
`sshd`, `AuthorizedKeysCommand`, and git user, which is
65
-
quite cumbersome. So the nix flake provides a
66
-
`nixosConfiguration` to do so.
67
-
68
-
<details>
69
-
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
70
-
71
-
In order to build Tangled's dev VM on macOS, you will
72
-
first need to set up a Linux Nix builder. The recommended
73
-
way to do so is to run a [`darwin.linux-builder`
74
-
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
75
-
and to register it in `nix.conf` as a builder for Linux
76
-
with the same architecture as your Mac (`linux-aarch64` if
77
-
you are using Apple Silicon).
78
-
79
-
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
80
-
> the tangled repo so that it doesn't conflict with the other VM. For example,
81
-
> you can do
82
-
>
83
-
> ```shell
84
-
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
85
-
> ```
86
-
>
87
-
> to store the builder VM in a temporary dir.
88
-
>
89
-
> You should read and follow [all the other intructions][darwin builder vm] to
90
-
> avoid subtle problems.
91
-
92
-
Alternatively, you can use any other method to set up a
93
-
Linux machine with `nix` installed that you can `sudo ssh`
94
-
into (in other words, root user on your Mac has to be able
95
-
to ssh into the Linux machine without entering a password)
96
-
and that has the same architecture as your Mac. See
97
-
[remote builder
98
-
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
99
-
for how to register such a builder in `nix.conf`.
100
-
101
-
> WARNING: If you'd like to use
102
-
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
103
-
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
104
-
> ssh` works can be tricky. It seems to be [possible with
105
-
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
106
-
107
-
</details>
108
-
109
-
To begin, grab your DID from http://localhost:3000/settings.
110
-
Then, set `TANGLED_VM_KNOT_OWNER` and
111
-
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
112
-
lightweight NixOS VM like so:
113
-
114
-
```bash
115
-
nix run --impure .#vm
116
-
117
-
# type `poweroff` at the shell to exit the VM
118
-
```
119
-
120
-
This starts a knot on port 6444, a spindle on port 6555
121
-
with `ssh` exposed on port 2222.
122
-
123
-
Once the services are running, head to
124
-
http://localhost:3000/settings/knots and hit verify. It should
125
-
verify the ownership of the services instantly if everything
126
-
went smoothly.
127
-
128
-
You can push repositories to this VM with this ssh config
129
-
block on your main machine:
130
-
131
-
```bash
132
-
Host nixos-shell
133
-
Hostname localhost
134
-
Port 2222
135
-
User git
136
-
IdentityFile ~/.ssh/my_tangled_key
137
-
```
138
-
139
-
Set up a remote called `local-dev` on a git repo:
140
-
141
-
```bash
142
-
git remote add local-dev git@nixos-shell:user/repo
143
-
git push local-dev main
144
-
```
145
-
146
-
### running a spindle
147
-
148
-
The above VM should already be running a spindle on
149
-
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
150
-
hit verify. You can then configure each repository to use
151
-
this spindle and run CI jobs.
152
-
153
-
Of interest when debugging spindles:
154
-
155
-
```
156
-
# service logs from journald:
157
-
journalctl -xeu spindle
158
-
159
-
# CI job logs from disk:
160
-
ls /var/log/spindle
161
-
162
-
# debugging spindle db:
163
-
sqlite3 /var/lib/spindle/spindle.db
164
-
165
-
# litecli has a nicer REPL interface:
166
-
litecli /var/lib/spindle/spindle.db
167
-
```
168
-
169
-
If for any reason you wish to disable either one of the
170
-
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171
-
`services.tangled.spindle.enable` (or
172
-
`services.tangled.knot.enable`) to `false`.
+93
docs/highlight.theme
+93
docs/highlight.theme
···
1
+
{
2
+
"text-color": null,
3
+
"background-color": null,
4
+
"line-number-color": null,
5
+
"line-number-background-color": null,
6
+
"text-styles": {
7
+
"Annotation": {
8
+
"text-color": null,
9
+
"background-color": null,
10
+
"bold": false,
11
+
"italic": true,
12
+
"underline": false
13
+
},
14
+
"ControlFlow": {
15
+
"text-color": null,
16
+
"background-color": null,
17
+
"bold": true,
18
+
"italic": false,
19
+
"underline": false
20
+
},
21
+
"Error": {
22
+
"text-color": null,
23
+
"background-color": null,
24
+
"bold": true,
25
+
"italic": false,
26
+
"underline": false
27
+
},
28
+
"Alert": {
29
+
"text-color": null,
30
+
"background-color": null,
31
+
"bold": true,
32
+
"italic": false,
33
+
"underline": false
34
+
},
35
+
"Preprocessor": {
36
+
"text-color": null,
37
+
"background-color": null,
38
+
"bold": true,
39
+
"italic": false,
40
+
"underline": false
41
+
},
42
+
"Information": {
43
+
"text-color": null,
44
+
"background-color": null,
45
+
"bold": false,
46
+
"italic": true,
47
+
"underline": false
48
+
},
49
+
"Warning": {
50
+
"text-color": null,
51
+
"background-color": null,
52
+
"bold": false,
53
+
"italic": true,
54
+
"underline": false
55
+
},
56
+
"Documentation": {
57
+
"text-color": null,
58
+
"background-color": null,
59
+
"bold": false,
60
+
"italic": true,
61
+
"underline": false
62
+
},
63
+
"DataType": {
64
+
"text-color": "#8f4e8b",
65
+
"background-color": null,
66
+
"bold": false,
67
+
"italic": false,
68
+
"underline": false
69
+
},
70
+
"Comment": {
71
+
"text-color": null,
72
+
"background-color": null,
73
+
"bold": false,
74
+
"italic": true,
75
+
"underline": false
76
+
},
77
+
"CommentVar": {
78
+
"text-color": null,
79
+
"background-color": null,
80
+
"bold": false,
81
+
"italic": true,
82
+
"underline": false
83
+
},
84
+
"Keyword": {
85
+
"text-color": null,
86
+
"background-color": null,
87
+
"bold": true,
88
+
"italic": false,
89
+
"underline": false
90
+
}
91
+
}
92
+
}
93
+
-214
docs/knot-hosting.md
-214
docs/knot-hosting.md
···
1
-
# knot self-hosting guide
2
-
3
-
So you want to run your own knot server? Great! Here are a few prerequisites:
4
-
5
-
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
6
-
2. A (sub)domain name. People generally use `knot.example.com`.
7
-
3. A valid SSL certificate for your domain.
8
-
9
-
There's a couple of ways to get started:
10
-
* NixOS: refer to
11
-
[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
12
-
* Docker: Documented at
13
-
[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
14
-
(community maintained: support is not guaranteed!)
15
-
* Manual: Documented below.
16
-
17
-
## manual setup
18
-
19
-
First, clone this repository:
20
-
21
-
```
22
-
git clone https://tangled.org/@tangled.org/core
23
-
```
24
-
25
-
Then, build the `knot` CLI. This is the knot administration and operation tool.
26
-
For the purpose of this guide, we're only concerned with these subcommands:
27
-
28
-
* `knot server`: the main knot server process, typically run as a
29
-
supervised service
30
-
* `knot guard`: handles role-based access control for git over SSH
31
-
(you'll never have to run this yourself)
32
-
* `knot keys`: fetches SSH keys associated with your knot; we'll use
33
-
this to generate the SSH `AuthorizedKeysCommand`
34
-
35
-
```
36
-
cd core
37
-
export CGO_ENABLED=1
38
-
go build -o knot ./cmd/knot
39
-
```
40
-
41
-
Next, move the `knot` binary to a location owned by `root` --
42
-
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
43
-
44
-
```
45
-
sudo mv knot /usr/local/bin/knot
46
-
sudo chown root:root /usr/local/bin/knot
47
-
```
48
-
49
-
This is necessary because SSH `AuthorizedKeysCommand` requires [really
50
-
specific permissions](https://stackoverflow.com/a/27638306). The
51
-
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
52
-
retrieve a user's public SSH keys dynamically for authentication. Let's
53
-
set that up.
54
-
55
-
```
56
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
57
-
Match User git
58
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
59
-
AuthorizedKeysCommandUser nobody
60
-
EOF
61
-
```
62
-
63
-
Then, reload `sshd`:
64
-
65
-
```
66
-
sudo systemctl reload ssh
67
-
```
68
-
69
-
Next, create the `git` user. We'll use the `git` user's home directory
70
-
to store repositories:
71
-
72
-
```
73
-
sudo adduser git
74
-
```
75
-
76
-
Create `/home/git/.knot.env` with the following, updating the values as
77
-
necessary. The `KNOT_SERVER_OWNER` should be set to your
78
-
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
79
-
80
-
```
81
-
KNOT_REPO_SCAN_PATH=/home/git
82
-
KNOT_SERVER_HOSTNAME=knot.example.com
83
-
APPVIEW_ENDPOINT=https://tangled.sh
84
-
KNOT_SERVER_OWNER=did:plc:foobar
85
-
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
86
-
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
87
-
```
88
-
89
-
If you run a Linux distribution that uses systemd, you can use the provided
90
-
service file to run the server. Copy
91
-
[`knotserver.service`](/systemd/knotserver.service)
92
-
to `/etc/systemd/system/`. Then, run:
93
-
94
-
```
95
-
systemctl enable knotserver
96
-
systemctl start knotserver
97
-
```
98
-
99
-
The last step is to configure a reverse proxy like Nginx or Caddy to front your
100
-
knot. Here's an example configuration for Nginx:
101
-
102
-
```
103
-
server {
104
-
listen 80;
105
-
listen [::]:80;
106
-
server_name knot.example.com;
107
-
108
-
location / {
109
-
proxy_pass http://localhost:5555;
110
-
proxy_set_header Host $host;
111
-
proxy_set_header X-Real-IP $remote_addr;
112
-
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
113
-
proxy_set_header X-Forwarded-Proto $scheme;
114
-
}
115
-
116
-
# wss endpoint for git events
117
-
location /events {
118
-
proxy_set_header X-Forwarded-For $remote_addr;
119
-
proxy_set_header Host $http_host;
120
-
proxy_set_header Upgrade websocket;
121
-
proxy_set_header Connection Upgrade;
122
-
proxy_pass http://localhost:5555;
123
-
}
124
-
# additional config for SSL/TLS go here.
125
-
}
126
-
127
-
```
128
-
129
-
Remember to use Let's Encrypt or similar to procure a certificate for your
130
-
knot domain.
131
-
132
-
You should now have a running knot server! You can finalize
133
-
your registration by hitting the `verify` button on the
134
-
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
135
-
a record on your PDS to announce the existence of the knot.
136
-
137
-
### custom paths
138
-
139
-
(This section applies to manual setup only. Docker users should edit the mounts
140
-
in `docker-compose.yml` instead.)
141
-
142
-
Right now, the database and repositories of your knot lives in `/home/git`. You
143
-
can move these paths if you'd like to store them in another folder. Be careful
144
-
when adjusting these paths:
145
-
146
-
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
147
-
any possible side effects. Remember to restart it once you're done.
148
-
* Make backups before moving in case something goes wrong.
149
-
* Make sure the `git` user can read and write from the new paths.
150
-
151
-
#### database
152
-
153
-
As an example, let's say the current database is at `/home/git/knotserver.db`,
154
-
and we want to move it to `/home/git/database/knotserver.db`.
155
-
156
-
Copy the current database to the new location. Make sure to copy the `.db-shm`
157
-
and `.db-wal` files if they exist.
158
-
159
-
```
160
-
mkdir /home/git/database
161
-
cp /home/git/knotserver.db* /home/git/database
162
-
```
163
-
164
-
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
165
-
the new file path (_not_ the directory):
166
-
167
-
```
168
-
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
169
-
```
170
-
171
-
#### repositories
172
-
173
-
As an example, let's say the repositories are currently in `/home/git`, and we
174
-
want to move them into `/home/git/repositories`.
175
-
176
-
Create the new folder, then move the existing repositories (if there are any):
177
-
178
-
```
179
-
mkdir /home/git/repositories
180
-
# move all DIDs into the new folder; these will vary for you!
181
-
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
182
-
```
183
-
184
-
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
185
-
to the new directory:
186
-
187
-
```
188
-
KNOT_REPO_SCAN_PATH=/home/git/repositories
189
-
```
190
-
191
-
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
192
-
repository path:
193
-
194
-
```
195
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
196
-
Match User git
197
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
198
-
AuthorizedKeysCommandUser nobody
199
-
EOF
200
-
```
201
-
202
-
Make sure to restart your SSH server!
203
-
204
-
#### MOTD (message of the day)
205
-
206
-
To configure the MOTD used ("Welcome to this knot!" by default), edit the
207
-
`/home/git/motd` file:
208
-
209
-
```
210
-
printf "Hi from this knot!\n" > /home/git/motd
211
-
```
212
-
213
-
Note that you should add a newline at the end if setting a non-empty message
214
-
since the knot won't do this for you.
+6
docs/logo.html
+6
docs/logo.html
-59
docs/migrations.md
-59
docs/migrations.md
···
1
-
# Migrations
2
-
3
-
This document is laid out in reverse-chronological order.
4
-
Newer migration guides are listed first, and older guides
5
-
are further down the page.
6
-
7
-
## Upgrading from v1.8.x
8
-
9
-
After v1.8.2, the HTTP API for knot and spindles have been
10
-
deprecated and replaced with XRPC. Repositories on outdated
11
-
knots will not be viewable from the appview. Upgrading is
12
-
straightforward however.
13
-
14
-
For knots:
15
-
16
-
- Upgrade to latest tag (v1.9.0 or above)
17
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
18
-
hit the "retry" button to verify your knot
19
-
20
-
For spindles:
21
-
22
-
- Upgrade to latest tag (v1.9.0 or above)
23
-
- Head to the [spindle
24
-
dashboard](https://tangled.org/settings/spindles) and hit the
25
-
"retry" button to verify your spindle
26
-
27
-
## Upgrading from v1.7.x
28
-
29
-
After v1.7.0, knot secrets have been deprecated. You no
30
-
longer need a secret from the appview to run a knot. All
31
-
authorized commands to knots are managed via [Inter-Service
32
-
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
33
-
Knots will be read-only until upgraded.
34
-
35
-
Upgrading is quite easy, in essence:
36
-
37
-
- `KNOT_SERVER_SECRET` is no more, you can remove this
38
-
environment variable entirely
39
-
- `KNOT_SERVER_OWNER` is now required on boot, set this to
40
-
your DID. You can find your DID in the
41
-
[settings](https://tangled.org/settings) page.
42
-
- Restart your knot once you have replaced the environment
43
-
variable
44
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
45
-
hit the "retry" button to verify your knot. This simply
46
-
writes a `sh.tangled.knot` record to your PDS.
47
-
48
-
If you use the nix module, simply bump the flake to the
49
-
latest revision, and change your config block like so:
50
-
51
-
```diff
52
-
services.tangled.knot = {
53
-
enable = true;
54
-
server = {
55
-
- secretFile = /path/to/secret;
56
-
+ owner = "did:plc:foo";
57
-
};
58
-
};
59
-
```
+3
docs/mode.html
+3
docs/mode.html
+7
docs/search.html
+7
docs/search.html
···
1
+
<form action="https://google.com/search" role="search" aria-label="Sitewide" class="w-full">
2
+
<input type="hidden" name="q" value="+[inurl:https://docs.tangled.org]">
3
+
<label>
4
+
<span style="display:none;">Search</span>
5
+
<input type="text" name="q" placeholder="Search docs ..." class="w-full font-normal">
6
+
</label>
7
+
</form>
-25
docs/spindle/architecture.md
-25
docs/spindle/architecture.md
···
1
-
# spindle architecture
2
-
3
-
Spindle is a small CI runner service. Here's a high level overview of how it operates:
4
-
5
-
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
6
-
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
7
-
* when a new repo record comes through (typically when you add a spindle to a
8
-
repo from the settings), spindle then resolves the underlying knot and
9
-
subscribes to repo events (see:
10
-
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
11
-
* the spindle engine then handles execution of the pipeline, with results and
12
-
logs beamed on the spindle event stream over wss
13
-
14
-
### the engine
15
-
16
-
At present, the only supported backend is Docker (and Podman, if Docker
17
-
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
18
-
executes each step in the pipeline in a fresh container, with state persisted
19
-
across steps within the `/tangled/workspace` directory.
20
-
21
-
The base image for the container is constructed on the fly using
22
-
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
23
-
used packages.
24
-
25
-
The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
-52
docs/spindle/hosting.md
···
1
-
# spindle self-hosting guide
2
-
3
-
## prerequisites
4
-
5
-
* Go
6
-
* Docker (the only supported backend currently)
7
-
8
-
## configuration
9
-
10
-
Spindle is configured using environment variables. The following environment variables are available:
11
-
12
-
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
13
-
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
14
-
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
15
-
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
16
-
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
17
-
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
18
-
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
19
-
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
20
-
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
21
-
22
-
## running spindle
23
-
24
-
1. **Set the environment variables.** For example:
25
-
26
-
```shell
27
-
export SPINDLE_SERVER_HOSTNAME="your-hostname"
28
-
export SPINDLE_SERVER_OWNER="your-did"
29
-
```
30
-
31
-
2. **Build the Spindle binary.**
32
-
33
-
```shell
34
-
cd core
35
-
go mod download
36
-
go build -o cmd/spindle/spindle cmd/spindle/main.go
37
-
```
38
-
39
-
3. **Create the log directory.**
40
-
41
-
```shell
42
-
sudo mkdir -p /var/log/spindle
43
-
sudo chown $USER:$USER -R /var/log/spindle
44
-
```
45
-
46
-
4. **Run the Spindle binary.**
47
-
48
-
```shell
49
-
./cmd/spindle/spindle
50
-
```
51
-
52
-
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
-285
docs/spindle/openbao.md
···
1
-
# spindle secrets with openbao
2
-
3
-
This document covers setting up Spindle to use OpenBao for secrets
4
-
management via OpenBao Proxy instead of the default SQLite backend.
5
-
6
-
## overview
7
-
8
-
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
9
-
authentication automatically using AppRole credentials, while Spindle
10
-
connects to the local proxy instead of directly to the OpenBao server.
11
-
12
-
This approach provides better security, automatic token renewal, and
13
-
simplified application code.
14
-
15
-
## installation
16
-
17
-
Install OpenBao from nixpkgs:
18
-
19
-
```bash
20
-
nix shell nixpkgs#openbao # for a local server
21
-
```
22
-
23
-
## setup
24
-
25
-
The setup process can is documented for both local development and production.
26
-
27
-
### local development
28
-
29
-
Start OpenBao in dev mode:
30
-
31
-
```bash
32
-
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
33
-
```
34
-
35
-
This starts OpenBao on `http://localhost:8201` with a root token.
36
-
37
-
Set up environment for bao CLI:
38
-
39
-
```bash
40
-
export BAO_ADDR=http://localhost:8200
41
-
export BAO_TOKEN=root
42
-
```
43
-
44
-
### production
45
-
46
-
You would typically use a systemd service with a configuration file. Refer to
47
-
[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
48
-
achieved using Nix.
49
-
50
-
Then, initialize the bao server:
51
-
```bash
52
-
bao operator init -key-shares=1 -key-threshold=1
53
-
```
54
-
55
-
This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
56
-
```bash
57
-
bao operator unseal <unseal_key>
58
-
```
59
-
60
-
All steps below remain the same across both dev and production setups.
61
-
62
-
### configure openbao server
63
-
64
-
Create the spindle KV mount:
65
-
66
-
```bash
67
-
bao secrets enable -path=spindle -version=2 kv
68
-
```
69
-
70
-
Set up AppRole authentication and policy:
71
-
72
-
Create a policy file `spindle-policy.hcl`:
73
-
74
-
```hcl
75
-
# Full access to spindle KV v2 data
76
-
path "spindle/data/*" {
77
-
capabilities = ["create", "read", "update", "delete"]
78
-
}
79
-
80
-
# Access to metadata for listing and management
81
-
path "spindle/metadata/*" {
82
-
capabilities = ["list", "read", "delete", "update"]
83
-
}
84
-
85
-
# Allow listing at root level
86
-
path "spindle/" {
87
-
capabilities = ["list"]
88
-
}
89
-
90
-
# Required for connection testing and health checks
91
-
path "auth/token/lookup-self" {
92
-
capabilities = ["read"]
93
-
}
94
-
```
95
-
96
-
Apply the policy and create an AppRole:
97
-
98
-
```bash
99
-
bao policy write spindle-policy spindle-policy.hcl
100
-
bao auth enable approle
101
-
bao write auth/approle/role/spindle \
102
-
token_policies="spindle-policy" \
103
-
token_ttl=1h \
104
-
token_max_ttl=4h \
105
-
bind_secret_id=true \
106
-
secret_id_ttl=0 \
107
-
secret_id_num_uses=0
108
-
```
109
-
110
-
Get the credentials:
111
-
112
-
```bash
113
-
# Get role ID (static)
114
-
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115
-
116
-
# Generate secret ID
117
-
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118
-
119
-
echo "Role ID: $ROLE_ID"
120
-
echo "Secret ID: $SECRET_ID"
121
-
```
122
-
123
-
### create proxy configuration
124
-
125
-
Create the credential files:
126
-
127
-
```bash
128
-
# Create directory for OpenBao files
129
-
mkdir -p /tmp/openbao
130
-
131
-
# Save credentials
132
-
echo "$ROLE_ID" > /tmp/openbao/role-id
133
-
echo "$SECRET_ID" > /tmp/openbao/secret-id
134
-
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135
-
```
136
-
137
-
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138
-
139
-
```hcl
140
-
# OpenBao server connection
141
-
vault {
142
-
address = "http://localhost:8200"
143
-
}
144
-
145
-
# Auto-Auth using AppRole
146
-
auto_auth {
147
-
method "approle" {
148
-
mount_path = "auth/approle"
149
-
config = {
150
-
role_id_file_path = "/tmp/openbao/role-id"
151
-
secret_id_file_path = "/tmp/openbao/secret-id"
152
-
}
153
-
}
154
-
155
-
# Optional: write token to file for debugging
156
-
sink "file" {
157
-
config = {
158
-
path = "/tmp/openbao/token"
159
-
mode = 0640
160
-
}
161
-
}
162
-
}
163
-
164
-
# Proxy listener for Spindle
165
-
listener "tcp" {
166
-
address = "127.0.0.1:8201"
167
-
tls_disable = true
168
-
}
169
-
170
-
# Enable API proxy with auto-auth token
171
-
api_proxy {
172
-
use_auto_auth_token = true
173
-
}
174
-
175
-
# Enable response caching
176
-
cache {
177
-
use_auto_auth_token = true
178
-
}
179
-
180
-
# Logging
181
-
log_level = "info"
182
-
```
183
-
184
-
### start the proxy
185
-
186
-
Start OpenBao Proxy:
187
-
188
-
```bash
189
-
bao proxy -config=/tmp/openbao/proxy.hcl
190
-
```
191
-
192
-
The proxy will authenticate with OpenBao and start listening on
193
-
`127.0.0.1:8201`.
194
-
195
-
### configure spindle
196
-
197
-
Set these environment variables for Spindle:
198
-
199
-
```bash
200
-
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201
-
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
202
-
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203
-
```
204
-
205
-
Start Spindle:
206
-
207
-
Spindle will now connect to the local proxy, which handles all
208
-
authentication automatically.
209
-
210
-
## production setup for proxy
211
-
212
-
For production, you'll want to run the proxy as a service:
213
-
214
-
Place your production configuration in `/etc/openbao/proxy.hcl` with
215
-
proper TLS settings for the vault connection.
216
-
217
-
## verifying setup
218
-
219
-
Test the proxy directly:
220
-
221
-
```bash
222
-
# Check proxy health
223
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224
-
225
-
# Test token lookup through proxy
226
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227
-
```
228
-
229
-
Test OpenBao operations through the server:
230
-
231
-
```bash
232
-
# List all secrets
233
-
bao kv list spindle/
234
-
235
-
# Add a test secret via Spindle API, then check it exists
236
-
bao kv list spindle/repos/
237
-
238
-
# Get a specific secret
239
-
bao kv get spindle/repos/your_repo_path/SECRET_NAME
240
-
```
241
-
242
-
## how it works
243
-
244
-
- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245
-
- The proxy authenticates with OpenBao using AppRole credentials
246
-
- All Spindle requests go through the proxy, which injects authentication tokens
247
-
- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248
-
- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249
-
- The proxy handles all token renewal automatically
250
-
- Spindle no longer manages tokens or authentication directly
251
-
252
-
## troubleshooting
253
-
254
-
**Connection refused**: Check that the OpenBao Proxy is running and
255
-
listening on the configured address.
256
-
257
-
**403 errors**: Verify the AppRole credentials are correct and the policy
258
-
has the necessary permissions.
259
-
260
-
**404 route errors**: The spindle KV mount probably doesn't exist - run
261
-
the mount creation step again.
262
-
263
-
**Proxy authentication failures**: Check the proxy logs and verify the
264
-
role-id and secret-id files are readable and contain valid credentials.
265
-
266
-
**Secret not found after writing**: This can indicate policy permission
267
-
issues. Verify the policy includes both `spindle/data/*` and
268
-
`spindle/metadata/*` paths with appropriate capabilities.
269
-
270
-
Check proxy logs:
271
-
272
-
```bash
273
-
# If running as systemd service
274
-
journalctl -u openbao-proxy -f
275
-
276
-
# If running directly, check the console output
277
-
```
278
-
279
-
Test AppRole authentication manually:
280
-
281
-
```bash
282
-
bao write auth/approle/login \
283
-
role_id="$(cat /tmp/openbao/role-id)" \
284
-
secret_id="$(cat /tmp/openbao/secret-id)"
285
-
```
-183
docs/spindle/pipeline.md
-183
docs/spindle/pipeline.md
···
1
-
# spindle pipelines
2
-
3
-
Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
4
-
5
-
The fields are:
6
-
7
-
- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
8
-
- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
9
-
- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
10
-
- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
11
-
- [Environment](#environment): An **optional** field that allows you to define environment variables.
12
-
- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
13
-
14
-
## Trigger
15
-
16
-
The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
17
-
18
-
- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
19
-
- `push`: The workflow should run every time a commit is pushed to the repository.
20
-
- `pull_request`: The workflow should run every time a pull request is made or updated.
21
-
- `manual`: The workflow can be triggered manually.
22
-
- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
23
-
- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
24
-
25
-
For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
26
-
27
-
```yaml
28
-
when:
29
-
- event: ["push", "manual"]
30
-
branch: ["main", "develop"]
31
-
- event: ["pull_request"]
32
-
branch: ["main"]
33
-
```
34
-
35
-
You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
36
-
37
-
```yaml
38
-
when:
39
-
- event: ["push"]
40
-
tag: ["v*"]
41
-
```
42
-
43
-
You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
44
-
45
-
```yaml
46
-
when:
47
-
- event: ["push"]
48
-
branch: ["main", "release-*"]
49
-
tag: ["v*", "stable"]
50
-
```
51
-
52
-
## Engine
53
-
54
-
Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
55
-
56
-
- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
57
-
58
-
Example:
59
-
60
-
```yaml
61
-
engine: "nixery"
62
-
```
63
-
64
-
## Clone options
65
-
66
-
When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
67
-
68
-
- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
69
-
- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
70
-
- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
71
-
72
-
The default settings are:
73
-
74
-
```yaml
75
-
clone:
76
-
skip: false
77
-
depth: 1
78
-
submodules: false
79
-
```
80
-
81
-
## Dependencies
82
-
83
-
Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
84
-
85
-
Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
86
-
87
-
```yaml
88
-
dependencies:
89
-
# nixpkgs
90
-
nixpkgs:
91
-
- nodejs
92
-
- go
93
-
# custom registry
94
-
git+https://tangled.org/@example.com/my_pkg:
95
-
- my_pkg
96
-
```
97
-
98
-
Now these dependencies are available to use in your workflow!
99
-
100
-
## Environment
101
-
102
-
The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103
-
104
-
Example:
105
-
106
-
```yaml
107
-
environment:
108
-
GOOS: "linux"
109
-
GOARCH: "arm64"
110
-
NODE_ENV: "production"
111
-
MY_ENV_VAR: "MY_ENV_VALUE"
112
-
```
113
-
114
-
## Steps
115
-
116
-
The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
117
-
118
-
- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
119
-
- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
120
-
- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
121
-
122
-
Example:
123
-
124
-
```yaml
125
-
steps:
126
-
- name: "Build backend"
127
-
command: "go build"
128
-
environment:
129
-
GOOS: "darwin"
130
-
GOARCH: "arm64"
131
-
- name: "Build frontend"
132
-
command: "npm run build"
133
-
environment:
134
-
NODE_ENV: "production"
135
-
```
136
-
137
-
## Complete workflow
138
-
139
-
```yaml
140
-
# .tangled/workflows/build.yml
141
-
142
-
when:
143
-
- event: ["push", "manual"]
144
-
branch: ["main", "develop"]
145
-
- event: ["pull_request"]
146
-
branch: ["main"]
147
-
148
-
engine: "nixery"
149
-
150
-
# using the default values
151
-
clone:
152
-
skip: false
153
-
depth: 1
154
-
submodules: false
155
-
156
-
dependencies:
157
-
# nixpkgs
158
-
nixpkgs:
159
-
- nodejs
160
-
- go
161
-
# custom registry
162
-
git+https://tangled.org/@example.com/my_pkg:
163
-
- my_pkg
164
-
165
-
environment:
166
-
GOOS: "linux"
167
-
GOARCH: "arm64"
168
-
NODE_ENV: "production"
169
-
MY_ENV_VAR: "MY_ENV_VALUE"
170
-
171
-
steps:
172
-
- name: "Build backend"
173
-
command: "go build"
174
-
environment:
175
-
GOOS: "darwin"
176
-
GOARCH: "arm64"
177
-
- name: "Build frontend"
178
-
command: "npm run build"
179
-
environment:
180
-
NODE_ENV: "production"
181
-
```
182
-
183
-
If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+101
docs/styles.css
+101
docs/styles.css
···
1
+
svg {
2
+
width: 16px;
3
+
height: 16px;
4
+
}
5
+
6
+
:root {
7
+
--syntax-alert: #d20f39;
8
+
--syntax-annotation: #fe640b;
9
+
--syntax-attribute: #df8e1d;
10
+
--syntax-basen: #40a02b;
11
+
--syntax-builtin: #1e66f5;
12
+
--syntax-controlflow: #8839ef;
13
+
--syntax-char: #04a5e5;
14
+
--syntax-constant: #fe640b;
15
+
--syntax-comment: #9ca0b0;
16
+
--syntax-commentvar: #7c7f93;
17
+
--syntax-documentation: #9ca0b0;
18
+
--syntax-datatype: #df8e1d;
19
+
--syntax-decval: #40a02b;
20
+
--syntax-error: #d20f39;
21
+
--syntax-extension: #4c4f69;
22
+
--syntax-float: #40a02b;
23
+
--syntax-function: #1e66f5;
24
+
--syntax-import: #40a02b;
25
+
--syntax-information: #04a5e5;
26
+
--syntax-keyword: #8839ef;
27
+
--syntax-operator: #179299;
28
+
--syntax-other: #8839ef;
29
+
--syntax-preprocessor: #ea76cb;
30
+
--syntax-specialchar: #04a5e5;
31
+
--syntax-specialstring: #ea76cb;
32
+
--syntax-string: #40a02b;
33
+
--syntax-variable: #8839ef;
34
+
--syntax-verbatimstring: #40a02b;
35
+
--syntax-warning: #df8e1d;
36
+
}
37
+
38
+
@media (prefers-color-scheme: dark) {
39
+
:root {
40
+
--syntax-alert: #f38ba8;
41
+
--syntax-annotation: #fab387;
42
+
--syntax-attribute: #f9e2af;
43
+
--syntax-basen: #a6e3a1;
44
+
--syntax-builtin: #89b4fa;
45
+
--syntax-controlflow: #cba6f7;
46
+
--syntax-char: #89dceb;
47
+
--syntax-constant: #fab387;
48
+
--syntax-comment: #6c7086;
49
+
--syntax-commentvar: #585b70;
50
+
--syntax-documentation: #6c7086;
51
+
--syntax-datatype: #f9e2af;
52
+
--syntax-decval: #a6e3a1;
53
+
--syntax-error: #f38ba8;
54
+
--syntax-extension: #cdd6f4;
55
+
--syntax-float: #a6e3a1;
56
+
--syntax-function: #89b4fa;
57
+
--syntax-import: #a6e3a1;
58
+
--syntax-information: #89dceb;
59
+
--syntax-keyword: #cba6f7;
60
+
--syntax-operator: #94e2d5;
61
+
--syntax-other: #cba6f7;
62
+
--syntax-preprocessor: #f5c2e7;
63
+
--syntax-specialchar: #89dceb;
64
+
--syntax-specialstring: #f5c2e7;
65
+
--syntax-string: #a6e3a1;
66
+
--syntax-variable: #cba6f7;
67
+
--syntax-verbatimstring: #a6e3a1;
68
+
--syntax-warning: #f9e2af;
69
+
}
70
+
}
71
+
72
+
/* pandoc syntax highlighting classes */
73
+
code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */
74
+
code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */
75
+
code span.at { color: var(--syntax-attribute); } /* attribute */
76
+
code span.bn { color: var(--syntax-basen); } /* basen */
77
+
code span.bu { color: var(--syntax-builtin); } /* builtin */
78
+
code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */
79
+
code span.ch { color: var(--syntax-char); } /* char */
80
+
code span.cn { color: var(--syntax-constant); } /* constant */
81
+
code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */
82
+
code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */
83
+
code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */
84
+
code span.dt { color: var(--syntax-datatype); } /* datatype */
85
+
code span.dv { color: var(--syntax-decval); } /* decval */
86
+
code span.er { color: var(--syntax-error); font-weight: bold; } /* error */
87
+
code span.ex { color: var(--syntax-extension); } /* extension */
88
+
code span.fl { color: var(--syntax-float); } /* float */
89
+
code span.fu { color: var(--syntax-function); } /* function */
90
+
code span.im { color: var(--syntax-import); font-weight: bold; } /* import */
91
+
code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */
92
+
code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */
93
+
code span.op { color: var(--syntax-operator); } /* operator */
94
+
code span.ot { color: var(--syntax-other); } /* other */
95
+
code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */
96
+
code span.sc { color: var(--syntax-specialchar); } /* specialchar */
97
+
code span.ss { color: var(--syntax-specialstring); } /* specialstring */
98
+
code span.st { color: var(--syntax-string); } /* string */
99
+
code span.va { color: var(--syntax-variable); } /* variable */
100
+
code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */
101
+
code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
+158
docs/template.html
+158
docs/template.html
···
1
+
<!DOCTYPE html>
2
+
<html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$>
3
+
<head>
4
+
<meta charset="utf-8" />
5
+
<meta name="generator" content="pandoc" />
6
+
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
7
+
$for(author-meta)$
8
+
<meta name="author" content="$author-meta$" />
9
+
$endfor$
10
+
11
+
$if(date-meta)$
12
+
<meta name="dcterms.date" content="$date-meta$" />
13
+
$endif$
14
+
15
+
$if(keywords)$
16
+
<meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" />
17
+
$endif$
18
+
19
+
$if(description-meta)$
20
+
<meta name="description" content="$description-meta$" />
21
+
$endif$
22
+
23
+
<title>$pagetitle$</title>
24
+
25
+
<style>
26
+
$styles.css()$
27
+
</style>
28
+
29
+
$for(css)$
30
+
<link rel="stylesheet" href="$css$" />
31
+
$endfor$
32
+
33
+
$for(header-includes)$
34
+
$header-includes$
35
+
$endfor$
36
+
37
+
<link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin />
38
+
39
+
</head>
40
+
<body class="bg-white dark:bg-gray-900 flex flex-col min-h-svh">
41
+
$for(include-before)$
42
+
$include-before$
43
+
$endfor$
44
+
45
+
$if(toc)$
46
+
<!-- mobile TOC trigger -->
47
+
<div class="md:hidden px-6 py-4 border-b border-gray-200 dark:border-gray-700">
48
+
<button
49
+
type="button"
50
+
popovertarget="mobile-toc-popover"
51
+
popovertargetaction="toggle"
52
+
class="w-full flex gap-2 items-center text-sm font-semibold dark:text-white"
53
+
>
54
+
${ menu.svg() }
55
+
$if(toc-title)$$toc-title$$else$Table of Contents$endif$
56
+
</button>
57
+
</div>
58
+
59
+
<div
60
+
id="mobile-toc-popover"
61
+
popover
62
+
class="mobile-toc-popover
63
+
bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700
64
+
h-full overflow-y-auto shadow-sm
65
+
px-6 py-4 fixed inset-x-0 top-0 w-fit max-w-4/5 m-0"
66
+
>
67
+
<div class="flex flex-col min-h-full">
68
+
<div class="flex-1 space-y-4">
69
+
<button
70
+
type="button"
71
+
popovertarget="mobile-toc-popover"
72
+
popovertargetaction="toggle"
73
+
class="w-full flex gap-2 items-center text-sm font-semibold dark:text-white mb-4">
74
+
${ x.svg() }
75
+
$if(toc-title)$$toc-title$$else$Table of Contents$endif$
76
+
</button>
77
+
${ logo.html() }
78
+
${ search.html() }
79
+
${ table-of-contents:toc.html() }
80
+
</div>
81
+
${ single-page:mode.html() }
82
+
</div>
83
+
</div>
84
+
85
+
<!-- desktop sidebar toc -->
86
+
<nav
87
+
id="$idprefix$TOC"
88
+
role="doc-toc"
89
+
class="hidden md:flex md:flex-col gap-4 fixed left-0 top-0 w-80 h-screen
90
+
bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700
91
+
p-4 z-50 overflow-y-auto">
92
+
${ logo.html() }
93
+
${ search.html() }
94
+
<div class="flex-1">
95
+
$if(toc-title)$
96
+
<h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2>
97
+
$endif$
98
+
${ table-of-contents:toc.html() }
99
+
</div>
100
+
${ single-page:mode.html() }
101
+
</nav>
102
+
$endif$
103
+
104
+
<div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col">
105
+
<main class="max-w-4xl w-full mx-auto p-6 flex-1">
106
+
$if(top)$
107
+
$-- only print title block if this is NOT the top page
108
+
$else$
109
+
$if(title)$
110
+
<header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700">
111
+
<h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1>
112
+
$if(subtitle)$
113
+
<p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p>
114
+
$endif$
115
+
$for(author)$
116
+
<p class="text-sm text-gray-500 dark:text-gray-400">$author$</p>
117
+
$endfor$
118
+
$if(date)$
119
+
<p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p>
120
+
$endif$
121
+
$endif$
122
+
</header>
123
+
$endif$
124
+
125
+
$if(abstract)$
126
+
<article class="prose dark:prose-invert max-w-none">
127
+
$abstract$
128
+
</article>
129
+
$endif$
130
+
131
+
<article class="prose dark:prose-invert max-w-none">
132
+
$body$
133
+
</article>
134
+
</main>
135
+
<nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800">
136
+
<div class="max-w-4xl mx-auto px-8 py-4">
137
+
<div class="flex justify-between gap-4">
138
+
<span class="flex-1">
139
+
$if(previous.url)$
140
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span>
141
+
<a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a>
142
+
$endif$
143
+
</span>
144
+
<span class="flex-1 text-right">
145
+
$if(next.url)$
146
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span>
147
+
<a href="$next.url$" accesskey="n" rel="next">$next.title$</a>
148
+
$endif$
149
+
</span>
150
+
</div>
151
+
</div>
152
+
</nav>
153
+
</div>
154
+
$for(include-after)$
155
+
$include-after$
156
+
$endfor$
157
+
</body>
158
+
</html>
+4
docs/toc.html
+4
docs/toc.html
+9
-9
flake.lock
+9
-9
flake.lock
···
35
35
"systems": "systems"
36
36
},
37
37
"locked": {
38
-
"lastModified": 1694529238,
39
-
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
38
+
"lastModified": 1731533236,
39
+
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
40
40
"owner": "numtide",
41
41
"repo": "flake-utils",
42
-
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
42
+
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
43
43
"type": "github"
44
44
},
45
45
"original": {
···
56
56
]
57
57
},
58
58
"locked": {
59
-
"lastModified": 1754078208,
60
-
"narHash": "sha256-YVoIFDCDpYuU3riaDEJ3xiGdPOtsx4sR5eTzHTytPV8=",
59
+
"lastModified": 1763982521,
60
+
"narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=",
61
61
"owner": "nix-community",
62
62
"repo": "gomod2nix",
63
-
"rev": "7f963246a71626c7fc70b431a315c4388a0c95cf",
63
+
"rev": "02e63a239d6eabd595db56852535992c898eba72",
64
64
"type": "github"
65
65
},
66
66
"original": {
···
150
150
},
151
151
"nixpkgs": {
152
152
"locked": {
153
-
"lastModified": 1751984180,
154
-
"narHash": "sha256-LwWRsENAZJKUdD3SpLluwDmdXY9F45ZEgCb0X+xgOL0=",
153
+
"lastModified": 1766070988,
154
+
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
155
155
"owner": "nixos",
156
156
"repo": "nixpkgs",
157
-
"rev": "9807714d6944a957c2e036f84b0ff8caf9930bc0",
157
+
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
158
158
"type": "github"
159
159
},
160
160
"original": {
+21
-5
flake.nix
+21
-5
flake.nix
···
76
76
};
77
77
buildGoApplication =
78
78
(self.callPackage "${gomod2nix}/builder" {
79
-
gomod2nix = gomod2nix.legacyPackages.${pkgs.system}.gomod2nix;
79
+
gomod2nix = gomod2nix.legacyPackages.${pkgs.stdenv.hostPlatform.system}.gomod2nix;
80
80
}).buildGoApplication;
81
81
modules = ./nix/gomod2nix.toml;
82
82
sqlite-lib = self.callPackage ./nix/pkgs/sqlite-lib.nix {
83
-
inherit (pkgs) gcc;
84
83
inherit sqlite-lib-src;
85
84
};
86
85
lexgen = self.callPackage ./nix/pkgs/lexgen.nix {inherit indigo;};
···
89
88
inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src;
90
89
};
91
90
appview = self.callPackage ./nix/pkgs/appview.nix {};
91
+
docs = self.callPackage ./nix/pkgs/docs.nix {
92
+
inherit inter-fonts-src ibm-plex-mono-src lucide-src;
93
+
};
92
94
spindle = self.callPackage ./nix/pkgs/spindle.nix {};
93
95
knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {};
94
96
knot = self.callPackage ./nix/pkgs/knot.nix {};
97
+
dolly = self.callPackage ./nix/pkgs/dolly.nix {};
95
98
});
96
99
in {
97
100
overlays.default = final: prev: {
98
-
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview;
101
+
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs dolly;
99
102
};
100
103
101
104
packages = forAllSystems (system: let
···
104
107
staticPackages = mkPackageSet pkgs.pkgsStatic;
105
108
crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic;
106
109
in {
107
-
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib;
110
+
inherit
111
+
(packages)
112
+
appview
113
+
appview-static-files
114
+
lexgen
115
+
goat
116
+
spindle
117
+
knot
118
+
knot-unwrapped
119
+
sqlite-lib
120
+
docs
121
+
dolly
122
+
;
108
123
109
124
pkgsStatic-appview = staticPackages.appview;
110
125
pkgsStatic-knot = staticPackages.knot;
111
126
pkgsStatic-knot-unwrapped = staticPackages.knot-unwrapped;
112
127
pkgsStatic-spindle = staticPackages.spindle;
113
128
pkgsStatic-sqlite-lib = staticPackages.sqlite-lib;
129
+
pkgsStatic-dolly = staticPackages.dolly;
114
130
115
131
pkgsCross-gnu64-pkgsStatic-appview = crossPackages.appview;
116
132
pkgsCross-gnu64-pkgsStatic-knot = crossPackages.knot;
117
133
pkgsCross-gnu64-pkgsStatic-knot-unwrapped = crossPackages.knot-unwrapped;
118
134
pkgsCross-gnu64-pkgsStatic-spindle = crossPackages.spindle;
135
+
pkgsCross-gnu64-pkgsStatic-dolly = crossPackages.dolly;
119
136
120
137
treefmt-wrapper = pkgs.treefmt.withConfig {
121
138
settings.formatter = {
···
156
173
nativeBuildInputs = [
157
174
pkgs.go
158
175
pkgs.air
159
-
pkgs.tilt
160
176
pkgs.gopls
161
177
pkgs.httpie
162
178
pkgs.litecli
+3
-4
go.mod
+3
-4
go.mod
···
1
1
module tangled.org/core
2
2
3
-
go 1.24.4
3
+
go 1.25.0
4
4
5
5
require (
6
6
github.com/Blank-Xu/sql-adapter v1.1.1
···
44
44
github.com/stretchr/testify v1.10.0
45
45
github.com/urfave/cli/v3 v3.3.3
46
46
github.com/whyrusleeping/cbor-gen v0.3.1
47
-
github.com/wyatt915/goldmark-treeblood v0.0.1
48
47
github.com/yuin/goldmark v1.7.13
48
+
github.com/yuin/goldmark-emoji v1.0.6
49
49
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
50
50
gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab
51
51
golang.org/x/crypto v0.40.0
52
52
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
53
53
golang.org/x/image v0.31.0
54
54
golang.org/x/net v0.42.0
55
-
golang.org/x/sync v0.17.0
56
55
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
57
56
gopkg.in/yaml.v3 v3.0.1
58
57
)
···
190
189
github.com/vmihailenco/go-tinylfu v0.2.2 // indirect
191
190
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
192
191
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
193
-
github.com/wyatt915/treeblood v0.1.16 // indirect
194
192
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
195
193
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect
196
194
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect
···
205
203
go.uber.org/atomic v1.11.0 // indirect
206
204
go.uber.org/multierr v1.11.0 // indirect
207
205
go.uber.org/zap v1.27.0 // indirect
206
+
golang.org/x/sync v0.17.0 // indirect
208
207
golang.org/x/sys v0.34.0 // indirect
209
208
golang.org/x/text v0.29.0 // indirect
210
209
golang.org/x/time v0.12.0 // indirect
+2
-4
go.sum
+2
-4
go.sum
···
495
495
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
496
496
github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0=
497
497
github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so=
498
-
github.com/wyatt915/goldmark-treeblood v0.0.1 h1:6vLJcjFrHgE4ASu2ga4hqIQmbvQLU37v53jlHZ3pqDs=
499
-
github.com/wyatt915/goldmark-treeblood v0.0.1/go.mod h1:SmcJp5EBaV17rroNlgNQFydYwy0+fv85CUr/ZaCz208=
500
-
github.com/wyatt915/treeblood v0.1.16 h1:byxNbWZhnPDxdTp7W5kQhCeaY8RBVmojTFz1tEHgg8Y=
501
-
github.com/wyatt915/treeblood v0.1.16/go.mod h1:i7+yhhmzdDP17/97pIsOSffw74EK/xk+qJ0029cSXUY=
502
498
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
503
499
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
504
500
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
···
509
505
github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
510
506
github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA=
511
507
github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
508
+
github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs=
509
+
github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA=
512
510
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ=
513
511
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I=
514
512
gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab h1:gK9tS6QJw5F0SIhYJnGG2P83kuabOdmWBbSmZhJkz2A=
+4
-4
hook/hook.go
+4
-4
hook/hook.go
···
48
48
},
49
49
Commands: []*cli.Command{
50
50
{
51
-
Name: "post-recieve",
52
-
Usage: "sends a post-recieve hook to the knot (waits for stdin)",
53
-
Action: postRecieve,
51
+
Name: "post-receive",
52
+
Usage: "sends a post-receive hook to the knot (waits for stdin)",
53
+
Action: postReceive,
54
54
},
55
55
},
56
56
}
57
57
}
58
58
59
-
func postRecieve(ctx context.Context, cmd *cli.Command) error {
59
+
func postReceive(ctx context.Context, cmd *cli.Command) error {
60
60
gitDir := cmd.String("git-dir")
61
61
userDid := cmd.String("user-did")
62
62
userHandle := cmd.String("user-handle")
+1
-1
hook/setup.go
+1
-1
hook/setup.go
···
138
138
option_var="GIT_PUSH_OPTION_$i"
139
139
push_options+=(-push-option "${!option_var}")
140
140
done
141
-
%s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-recieve
141
+
%s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-receive
142
142
`, executablePath, config.internalApi)
143
143
144
144
return os.WriteFile(hookPath, []byte(hookContent), 0755)
+88
ico/ico.go
+88
ico/ico.go
···
1
+
package ico
2
+
3
+
import (
4
+
"bytes"
5
+
"encoding/binary"
6
+
"fmt"
7
+
"image"
8
+
"image/png"
9
+
)
10
+
11
+
type IconDir struct {
12
+
Reserved uint16 // must be 0
13
+
Type uint16 // 1 for ICO, 2 for CUR
14
+
Count uint16 // number of images
15
+
}
16
+
17
+
type IconDirEntry struct {
18
+
Width uint8 // 0 means 256
19
+
Height uint8 // 0 means 256
20
+
ColorCount uint8
21
+
Reserved uint8 // must be 0
22
+
ColorPlanes uint16 // 0 or 1
23
+
BitsPerPixel uint16
24
+
SizeInBytes uint32
25
+
Offset uint32
26
+
}
27
+
28
+
func ImageToIco(img image.Image) ([]byte, error) {
29
+
// encode image as png
30
+
var pngBuf bytes.Buffer
31
+
if err := png.Encode(&pngBuf, img); err != nil {
32
+
return nil, fmt.Errorf("failed to encode PNG: %w", err)
33
+
}
34
+
pngData := pngBuf.Bytes()
35
+
36
+
// get image dimensions
37
+
bounds := img.Bounds()
38
+
width := bounds.Dx()
39
+
height := bounds.Dy()
40
+
41
+
// prepare output buffer
42
+
var icoBuf bytes.Buffer
43
+
44
+
iconDir := IconDir{
45
+
Reserved: 0,
46
+
Type: 1, // ICO format
47
+
Count: 1, // One image
48
+
}
49
+
50
+
w := uint8(width)
51
+
h := uint8(height)
52
+
53
+
// width/height of 256 should be stored as 0
54
+
if width == 256 {
55
+
w = 0
56
+
}
57
+
if height == 256 {
58
+
h = 0
59
+
}
60
+
61
+
iconDirEntry := IconDirEntry{
62
+
Width: w,
63
+
Height: h,
64
+
ColorCount: 0, // 0 for PNG (32-bit)
65
+
Reserved: 0,
66
+
ColorPlanes: 1,
67
+
BitsPerPixel: 32, // PNG with alpha
68
+
SizeInBytes: uint32(len(pngData)),
69
+
Offset: 6 + 16, // Size of ICONDIR + ICONDIRENTRY
70
+
}
71
+
72
+
// write IconDir
73
+
if err := binary.Write(&icoBuf, binary.LittleEndian, iconDir); err != nil {
74
+
return nil, fmt.Errorf("failed to write ICONDIR: %w", err)
75
+
}
76
+
77
+
// write IconDirEntry
78
+
if err := binary.Write(&icoBuf, binary.LittleEndian, iconDirEntry); err != nil {
79
+
return nil, fmt.Errorf("failed to write ICONDIRENTRY: %w", err)
80
+
}
81
+
82
+
// write PNG data directly
83
+
if _, err := icoBuf.Write(pngData); err != nil {
84
+
return nil, fmt.Errorf("failed to write PNG data: %w", err)
85
+
}
86
+
87
+
return icoBuf.Bytes(), nil
88
+
}
+2
-1
input.css
+2
-1
input.css
···
162
162
}
163
163
164
164
.prose a.mention {
165
-
@apply no-underline hover:underline;
165
+
@apply no-underline hover:underline font-bold;
166
166
}
167
167
168
168
.prose li {
···
255
255
@apply py-1 text-gray-900 dark:text-gray-100;
256
256
}
257
257
}
258
+
258
259
}
259
260
260
261
/* Background */
+24
-7
knotserver/db/init.go
knotserver/db/db.go
+24
-7
knotserver/db/init.go
knotserver/db/db.go
···
1
1
package db
2
2
3
3
import (
4
+
"context"
4
5
"database/sql"
6
+
"log/slog"
5
7
"strings"
6
8
7
9
_ "github.com/mattn/go-sqlite3"
10
+
"tangled.org/core/log"
8
11
)
9
12
10
13
type DB struct {
11
-
db *sql.DB
14
+
db *sql.DB
15
+
logger *slog.Logger
12
16
}
13
17
14
-
func Setup(dbPath string) (*DB, error) {
18
+
func Setup(ctx context.Context, dbPath string) (*DB, error) {
15
19
// https://github.com/mattn/go-sqlite3#connection-string
16
20
opts := []string{
17
21
"_foreign_keys=1",
···
20
24
"_auto_vacuum=incremental",
21
25
}
22
26
27
+
logger := log.FromContext(ctx)
28
+
logger = log.SubLogger(logger, "db")
29
+
23
30
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
24
31
if err != nil {
25
32
return nil, err
26
33
}
27
34
28
-
// NOTE: If any other migration is added here, you MUST
29
-
// copy the pattern in appview: use a single sql.Conn
30
-
// for every migration.
35
+
conn, err := db.Conn(ctx)
36
+
if err != nil {
37
+
return nil, err
38
+
}
39
+
defer conn.Close()
31
40
32
-
_, err = db.Exec(`
41
+
_, err = conn.ExecContext(ctx, `
33
42
create table if not exists known_dids (
34
43
did text primary key
35
44
);
···
55
64
created integer not null default (strftime('%s', 'now')),
56
65
primary key (rkey, nsid)
57
66
);
67
+
68
+
create table if not exists migrations (
69
+
id integer primary key autoincrement,
70
+
name text unique
71
+
);
58
72
`)
59
73
if err != nil {
60
74
return nil, err
61
75
}
62
76
63
-
return &DB{db: db}, nil
77
+
return &DB{
78
+
db: db,
79
+
logger: logger,
80
+
}, nil
64
81
}
+47
knotserver/git.go
+47
knotserver/git.go
···
56
56
}
57
57
}
58
58
59
+
func (h *Knot) UploadArchive(w http.ResponseWriter, r *http.Request) {
60
+
did := chi.URLParam(r, "did")
61
+
name := chi.URLParam(r, "name")
62
+
repo, err := securejoin.SecureJoin(h.c.Repo.ScanPath, filepath.Join(did, name))
63
+
if err != nil {
64
+
gitError(w, err.Error(), http.StatusInternalServerError)
65
+
h.l.Error("git: failed to secure join repo path", "handler", "UploadPack", "error", err)
66
+
return
67
+
}
68
+
69
+
const expectedContentType = "application/x-git-upload-archive-request"
70
+
contentType := r.Header.Get("Content-Type")
71
+
if contentType != expectedContentType {
72
+
gitError(w, fmt.Sprintf("Expected Content-Type: '%s', but received '%s'.", expectedContentType, contentType), http.StatusUnsupportedMediaType)
73
+
}
74
+
75
+
var bodyReader io.ReadCloser = r.Body
76
+
if r.Header.Get("Content-Encoding") == "gzip" {
77
+
gzipReader, err := gzip.NewReader(r.Body)
78
+
if err != nil {
79
+
gitError(w, err.Error(), http.StatusInternalServerError)
80
+
h.l.Error("git: failed to create gzip reader", "handler", "UploadArchive", "error", err)
81
+
return
82
+
}
83
+
defer gzipReader.Close()
84
+
bodyReader = gzipReader
85
+
}
86
+
87
+
w.Header().Set("Content-Type", "application/x-git-upload-archive-result")
88
+
89
+
h.l.Info("git: executing git-upload-archive", "handler", "UploadArchive", "repo", repo)
90
+
91
+
cmd := service.ServiceCommand{
92
+
GitProtocol: r.Header.Get("Git-Protocol"),
93
+
Dir: repo,
94
+
Stdout: w,
95
+
Stdin: bodyReader,
96
+
}
97
+
98
+
w.WriteHeader(http.StatusOK)
99
+
100
+
if err := cmd.UploadArchive(); err != nil {
101
+
h.l.Error("git: failed to execute git-upload-pack", "handler", "UploadPack", "error", err)
102
+
return
103
+
}
104
+
}
105
+
59
106
func (h *Knot) UploadPack(w http.ResponseWriter, r *http.Request) {
60
107
did := chi.URLParam(r, "did")
61
108
name := chi.URLParam(r, "name")
+1
-17
knotserver/git/diff.go
+1
-17
knotserver/git/diff.go
···
77
77
nd.Diff = append(nd.Diff, ndiff)
78
78
}
79
79
80
-
nd.Stat.FilesChanged = len(diffs)
81
-
nd.Commit.This = c.Hash.String()
82
-
nd.Commit.PGPSignature = c.PGPSignature
83
-
nd.Commit.Committer = c.Committer
84
-
nd.Commit.Tree = c.TreeHash.String()
85
-
86
-
if parent.Hash.IsZero() {
87
-
nd.Commit.Parent = ""
88
-
} else {
89
-
nd.Commit.Parent = parent.Hash.String()
90
-
}
91
-
nd.Commit.Author = c.Author
92
-
nd.Commit.Message = c.Message
93
-
94
-
if v, ok := c.ExtraHeaders["change-id"]; ok {
95
-
nd.Commit.ChangedId = string(v)
96
-
}
80
+
nd.Commit.FromGoGitCommit(c)
97
81
98
82
return &nd, nil
99
83
}
+13
-1
knotserver/git/service/service.go
+13
-1
knotserver/git/service/service.go
···
95
95
return c.RunService(cmd)
96
96
}
97
97
98
+
func (c *ServiceCommand) UploadArchive() error {
99
+
cmd := exec.Command("git", []string{
100
+
"upload-archive",
101
+
".",
102
+
}...)
103
+
104
+
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
105
+
cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_PROTOCOL=%s", c.GitProtocol))
106
+
cmd.Dir = c.Dir
107
+
108
+
return c.RunService(cmd)
109
+
}
110
+
98
111
func (c *ServiceCommand) UploadPack() error {
99
112
cmd := exec.Command("git", []string{
100
-
"-c", "uploadpack.allowFilter=true",
101
113
"upload-pack",
102
114
"--stateless-rpc",
103
115
".",
+1
knotserver/router.go
+1
knotserver/router.go
+1
-1
knotserver/server.go
+1
-1
knotserver/server.go
+6
-1
knotserver/xrpc/repo_log.go
+6
-1
knotserver/xrpc/repo_log.go
···
62
62
return
63
63
}
64
64
65
+
tcommits := make([]types.Commit, len(commits))
66
+
for i, c := range commits {
67
+
tcommits[i].FromGoGitCommit(c)
68
+
}
69
+
65
70
// Create response using existing types.RepoLogResponse
66
71
response := types.RepoLogResponse{
67
-
Commits: commits,
72
+
Commits: tcommits,
68
73
Ref: ref,
69
74
Page: (offset / limit) + 1,
70
75
PerPage: limit,
+10
-2
lexicons/pulls/pull.json
+10
-2
lexicons/pulls/pull.json
···
12
12
"required": [
13
13
"target",
14
14
"title",
15
-
"patch",
15
+
"patchBlob",
16
16
"createdAt"
17
17
],
18
18
"properties": {
···
27
27
"type": "string"
28
28
},
29
29
"patch": {
30
-
"type": "string"
30
+
"type": "string",
31
+
"description": "(deprecated) use patchBlob instead"
32
+
},
33
+
"patchBlob": {
34
+
"type": "blob",
35
+
"accept": [
36
+
"text/x-patch"
37
+
],
38
+
"description": "patch content"
31
39
},
32
40
"source": {
33
41
"type": "ref",
+3
-30
nix/gomod2nix.toml
+3
-30
nix/gomod2nix.toml
···
165
165
[mod."github.com/davecgh/go-spew"]
166
166
version = "v1.1.2-0.20180830191138-d8f796af33cc"
167
167
hash = "sha256-fV9oI51xjHdOmEx6+dlq7Ku2Ag+m/bmbzPo6A4Y74qc="
168
-
[mod."github.com/decred/dcrd/dcrec/secp256k1/v4"]
169
-
version = "v4.4.0"
170
-
hash = "sha256-qrhEIwhDll3cxoVpMbm1NQ9/HTI42S7ms8Buzlo5HCg="
171
168
[mod."github.com/dgraph-io/ristretto"]
172
169
version = "v0.2.0"
173
170
hash = "sha256-bnpxX+oO/Qf7IJevA0gsbloVoqRx+5bh7RQ9d9eLNYw="
···
373
370
[mod."github.com/klauspost/cpuid/v2"]
374
371
version = "v2.3.0"
375
372
hash = "sha256-50JhbQyT67BK38HIdJihPtjV7orYp96HknI2VP7A9Yc="
376
-
[mod."github.com/lestrrat-go/blackmagic"]
377
-
version = "v1.0.4"
378
-
hash = "sha256-HmWOpwoPDNMwLdOi7onNn3Sb+ZsAa3Ai3gVBbXmQ0e8="
379
-
[mod."github.com/lestrrat-go/httpcc"]
380
-
version = "v1.0.1"
381
-
hash = "sha256-SMRSwJpqDIs/xL0l2e8vP0W65qtCHX2wigcOeqPJmos="
382
-
[mod."github.com/lestrrat-go/httprc"]
383
-
version = "v1.0.6"
384
-
hash = "sha256-mfZzePEhrmyyu/avEBd2MsDXyto8dq5+fyu5lA8GUWM="
385
-
[mod."github.com/lestrrat-go/iter"]
386
-
version = "v1.0.2"
387
-
hash = "sha256-30tErRf7Qu/NOAt1YURXY/XJSA6sCr6hYQfO8QqHrtw="
388
-
[mod."github.com/lestrrat-go/jwx/v2"]
389
-
version = "v2.1.6"
390
-
hash = "sha256-0LszXRZIba+X8AOrs3T4uanAUafBdlVB8/MpUNEFpbc="
391
-
[mod."github.com/lestrrat-go/option"]
392
-
version = "v1.0.1"
393
-
hash = "sha256-jVcIYYVsxElIS/l2akEw32vdEPR8+anR6oeT1FoYULI="
394
373
[mod."github.com/lucasb-eyer/go-colorful"]
395
374
version = "v1.2.0"
396
375
hash = "sha256-Gg9dDJFCTaHrKHRR1SrJgZ8fWieJkybljybkI9x0gyE="
···
511
490
[mod."github.com/ryanuber/go-glob"]
512
491
version = "v1.0.0"
513
492
hash = "sha256-YkMl1utwUhi3E0sHK23ISpAsPyj4+KeXyXKoFYGXGVY="
514
-
[mod."github.com/segmentio/asm"]
515
-
version = "v1.2.0"
516
-
hash = "sha256-zbNuKxNrUDUc6IlmRQNuJQzVe5Ol/mqp7srDg9IMMqs="
517
493
[mod."github.com/sergi/go-diff"]
518
494
version = "v1.1.0"
519
495
hash = "sha256-8NJMabldpf40uwQN20T6QXx5KORDibCBJL02KD661xY="
···
548
524
[mod."github.com/whyrusleeping/cbor-gen"]
549
525
version = "v0.3.1"
550
526
hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc="
551
-
[mod."github.com/wyatt915/goldmark-treeblood"]
552
-
version = "v0.0.1"
553
-
hash = "sha256-hAVFaktO02MiiqZFffr8ZlvFEfwxw4Y84OZ2t7e5G7g="
554
-
[mod."github.com/wyatt915/treeblood"]
555
-
version = "v0.1.16"
556
-
hash = "sha256-T68sa+iVx0qY7dDjXEAJvRWQEGXYIpUsf9tcWwO1tIw="
557
527
[mod."github.com/xo/terminfo"]
558
528
version = "v0.0.0-20220910002029-abceb7e1c41e"
559
529
hash = "sha256-GyCDxxMQhXA3Pi/TsWXpA8cX5akEoZV7CFx4RO3rARU="
560
530
[mod."github.com/yuin/goldmark"]
561
531
version = "v1.7.13"
562
532
hash = "sha256-vBCxZrPYPc8x/nvAAv3Au59dCCyfS80Vw3/a9EXK7TE="
533
+
[mod."github.com/yuin/goldmark-emoji"]
534
+
version = "v1.0.6"
535
+
hash = "sha256-+d6bZzOPE+JSFsZbQNZMCWE+n3jgcQnkPETVk47mxSY="
563
536
[mod."github.com/yuin/goldmark-highlighting/v2"]
564
537
version = "v2.0.0-20230729083705-37449abec8cc"
565
538
hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
+6
-1
nix/pkgs/appview-static-files.nix
+6
-1
nix/pkgs/appview-static-files.nix
···
8
8
actor-typeahead-src,
9
9
sqlite-lib,
10
10
tailwindcss,
11
+
dolly,
11
12
src,
12
13
}:
13
14
runCommandLocal "appview-static-files" {
···
17
18
(allow file-read* (subpath "/System/Library/OpenSSL"))
18
19
'';
19
20
} ''
20
-
mkdir -p $out/{fonts,icons} && cd $out
21
+
mkdir -p $out/{fonts,icons,logos} && cd $out
21
22
cp -f ${htmx-src} htmx.min.js
22
23
cp -f ${htmx-ws-src} htmx-ext-ws.min.js
23
24
cp -rf ${lucide-src}/*.svg icons/
···
26
27
cp -f ${inter-fonts-src}/InterVariable*.ttf fonts/
27
28
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 fonts/
28
29
cp -f ${actor-typeahead-src}/actor-typeahead.js .
30
+
31
+
${dolly}/bin/dolly -output logos/dolly.png -size 180x180
32
+
${dolly}/bin/dolly -output logos/dolly.ico -size 48x48
33
+
${dolly}/bin/dolly -output logos/dolly.svg
29
34
# tailwindcss -c $src/tailwind.config.js -i $src/input.css -o tw.css won't work
30
35
# for whatever reason (produces broken css), so we are doing this instead
31
36
cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/tw.css
+57
nix/pkgs/docs.nix
+57
nix/pkgs/docs.nix
···
1
+
{
2
+
pandoc,
3
+
tailwindcss,
4
+
runCommandLocal,
5
+
inter-fonts-src,
6
+
ibm-plex-mono-src,
7
+
lucide-src,
8
+
dolly,
9
+
src,
10
+
}:
11
+
runCommandLocal "docs" {} ''
12
+
mkdir -p working
13
+
14
+
# copy templates, themes, styles, filters to working directory
15
+
cp ${src}/docs/*.html working/
16
+
cp ${src}/docs/*.theme working/
17
+
cp ${src}/docs/*.css working/
18
+
19
+
# icons
20
+
cp -rf ${lucide-src}/*.svg working/
21
+
22
+
# logo
23
+
${dolly}/bin/dolly -output working/dolly.svg -color currentColor
24
+
25
+
# content - chunked
26
+
${pandoc}/bin/pandoc ${src}/docs/DOCS.md \
27
+
-o $out/ \
28
+
-t chunkedhtml \
29
+
--variable toc \
30
+
--variable-json single-page=false \
31
+
--toc-depth=2 \
32
+
--css=stylesheet.css \
33
+
--chunk-template="%i.html" \
34
+
--highlight-style=working/highlight.theme \
35
+
--template=working/template.html
36
+
37
+
# content - single page
38
+
${pandoc}/bin/pandoc ${src}/docs/DOCS.md \
39
+
-o $out/single-page.html \
40
+
--toc \
41
+
--variable toc \
42
+
--variable single-page \
43
+
--toc-depth=2 \
44
+
--css=stylesheet.css \
45
+
--highlight-style=working/highlight.theme \
46
+
--template=working/template.html
47
+
48
+
# fonts
49
+
mkdir -p $out/static/fonts
50
+
cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/
51
+
cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/
52
+
cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/
53
+
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/
54
+
55
+
# styles
56
+
cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css
57
+
''
+21
nix/pkgs/dolly.nix
+21
nix/pkgs/dolly.nix
···
1
+
{
2
+
buildGoApplication,
3
+
modules,
4
+
src,
5
+
}:
6
+
buildGoApplication {
7
+
pname = "dolly";
8
+
version = "0.1.0";
9
+
inherit src modules;
10
+
11
+
# patch the static dir
12
+
postUnpack = ''
13
+
pushd source
14
+
mkdir -p appview/pages/static
15
+
touch appview/pages/static/x
16
+
popd
17
+
'';
18
+
19
+
doCheck = false;
20
+
subPackages = ["cmd/dolly"];
21
+
}
+7
-5
nix/pkgs/sqlite-lib.nix
+7
-5
nix/pkgs/sqlite-lib.nix
···
1
1
{
2
-
gcc,
3
2
stdenv,
4
3
sqlite-lib-src,
5
4
}:
6
5
stdenv.mkDerivation {
7
6
name = "sqlite-lib";
8
7
src = sqlite-lib-src;
9
-
nativeBuildInputs = [gcc];
8
+
10
9
buildPhase = ''
11
-
gcc -c sqlite3.c
12
-
ar rcs libsqlite3.a sqlite3.o
13
-
ranlib libsqlite3.a
10
+
$CC -c sqlite3.c
11
+
$AR rcs libsqlite3.a sqlite3.o
12
+
$RANLIB libsqlite3.a
13
+
'';
14
+
15
+
installPhase = ''
14
16
mkdir -p $out/include $out/lib
15
17
cp *.h $out/include
16
18
cp libsqlite3.a $out/lib
+1
-1
nix/vm.nix
+1
-1
nix/vm.nix
···
8
8
var = builtins.getEnv name;
9
9
in
10
10
if var == ""
11
-
then throw "\$${name} must be defined, see docs/hacking.md for more details"
11
+
then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details"
12
12
else var;
13
13
envVarOr = name: default: let
14
14
var = builtins.getEnv name;
+122
orm/orm.go
+122
orm/orm.go
···
1
+
package orm
2
+
3
+
import (
4
+
"context"
5
+
"database/sql"
6
+
"fmt"
7
+
"log/slog"
8
+
"reflect"
9
+
"strings"
10
+
)
11
+
12
+
type migrationFn = func(*sql.Tx) error
13
+
14
+
func RunMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error {
15
+
logger = logger.With("migration", name)
16
+
17
+
tx, err := c.BeginTx(context.Background(), nil)
18
+
if err != nil {
19
+
return err
20
+
}
21
+
defer tx.Rollback()
22
+
23
+
var exists bool
24
+
err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists)
25
+
if err != nil {
26
+
return err
27
+
}
28
+
29
+
if !exists {
30
+
// run migration
31
+
err = migrationFn(tx)
32
+
if err != nil {
33
+
logger.Error("failed to run migration", "err", err)
34
+
return err
35
+
}
36
+
37
+
// mark migration as complete
38
+
_, err = tx.Exec("insert into migrations (name) values (?)", name)
39
+
if err != nil {
40
+
logger.Error("failed to mark migration as complete", "err", err)
41
+
return err
42
+
}
43
+
44
+
// commit the transaction
45
+
if err := tx.Commit(); err != nil {
46
+
return err
47
+
}
48
+
49
+
logger.Info("migration applied successfully")
50
+
} else {
51
+
logger.Warn("skipped migration, already applied")
52
+
}
53
+
54
+
return nil
55
+
}
56
+
57
+
type Filter struct {
58
+
Key string
59
+
arg any
60
+
Cmp string
61
+
}
62
+
63
+
func newFilter(key, cmp string, arg any) Filter {
64
+
return Filter{
65
+
Key: key,
66
+
arg: arg,
67
+
Cmp: cmp,
68
+
}
69
+
}
70
+
71
+
func FilterEq(key string, arg any) Filter { return newFilter(key, "=", arg) }
72
+
func FilterNotEq(key string, arg any) Filter { return newFilter(key, "<>", arg) }
73
+
func FilterGte(key string, arg any) Filter { return newFilter(key, ">=", arg) }
74
+
func FilterLte(key string, arg any) Filter { return newFilter(key, "<=", arg) }
75
+
func FilterIs(key string, arg any) Filter { return newFilter(key, "is", arg) }
76
+
func FilterIsNot(key string, arg any) Filter { return newFilter(key, "is not", arg) }
77
+
func FilterIn(key string, arg any) Filter { return newFilter(key, "in", arg) }
78
+
func FilterLike(key string, arg any) Filter { return newFilter(key, "like", arg) }
79
+
func FilterNotLike(key string, arg any) Filter { return newFilter(key, "not like", arg) }
80
+
func FilterContains(key string, arg any) Filter {
81
+
return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg))
82
+
}
83
+
84
+
func (f Filter) Condition() string {
85
+
rv := reflect.ValueOf(f.arg)
86
+
kind := rv.Kind()
87
+
88
+
// if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)`
89
+
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
90
+
if rv.Len() == 0 {
91
+
// always false
92
+
return "1 = 0"
93
+
}
94
+
95
+
placeholders := make([]string, rv.Len())
96
+
for i := range placeholders {
97
+
placeholders[i] = "?"
98
+
}
99
+
100
+
return fmt.Sprintf("%s %s (%s)", f.Key, f.Cmp, strings.Join(placeholders, ", "))
101
+
}
102
+
103
+
return fmt.Sprintf("%s %s ?", f.Key, f.Cmp)
104
+
}
105
+
106
+
func (f Filter) Arg() []any {
107
+
rv := reflect.ValueOf(f.arg)
108
+
kind := rv.Kind()
109
+
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
110
+
if rv.Len() == 0 {
111
+
return nil
112
+
}
113
+
114
+
out := make([]any, rv.Len())
115
+
for i := range rv.Len() {
116
+
out[i] = rv.Index(i).Interface()
117
+
}
118
+
return out
119
+
}
120
+
121
+
return []any{f.arg}
122
+
}
-1
patchutil/patchutil.go
-1
patchutil/patchutil.go
+3
-3
readme.md
+3
-3
readme.md
···
10
10
11
11
## docs
12
12
13
-
* [knot hosting guide](/docs/knot-hosting.md)
14
-
* [contributing guide](/docs/contributing.md) **please read before opening a PR!**
15
-
* [hacking on tangled](/docs/hacking.md)
13
+
- [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide)
14
+
- [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!**
15
+
- [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled)
16
16
17
17
## security
18
18
+31
sets/gen.go
+31
sets/gen.go
···
1
+
package sets
2
+
3
+
import (
4
+
"math/rand"
5
+
"reflect"
6
+
"testing/quick"
7
+
)
8
+
9
+
func (_ Set[T]) Generate(rand *rand.Rand, size int) reflect.Value {
10
+
s := New[T]()
11
+
12
+
var zero T
13
+
itemType := reflect.TypeOf(zero)
14
+
15
+
for {
16
+
if s.Len() >= size {
17
+
break
18
+
}
19
+
20
+
item, ok := quick.Value(itemType, rand)
21
+
if !ok {
22
+
continue
23
+
}
24
+
25
+
if val, ok := item.Interface().(T); ok {
26
+
s.Insert(val)
27
+
}
28
+
}
29
+
30
+
return reflect.ValueOf(s)
31
+
}
+35
sets/readme.txt
+35
sets/readme.txt
···
1
+
sets
2
+
----
3
+
set datastructure for go with generics and iterators. the
4
+
api is supposed to mimic rust's std::collections::HashSet api.
5
+
6
+
s1 := sets.Collect(slices.Values([]int{1, 2, 3, 4}))
7
+
s2 := sets.Collect(slices.Values([]int{1, 2, 3, 4, 5, 6}))
8
+
9
+
union := sets.Collect(s1.Union(s2))
10
+
intersect := sets.Collect(s1.Intersection(s2))
11
+
diff := sets.Collect(s1.Difference(s2))
12
+
symdiff := sets.Collect(s1.SymmetricDifference(s2))
13
+
14
+
s1.Len() // 4
15
+
s1.Contains(1) // true
16
+
s1.IsEmpty() // false
17
+
s1.IsSubset(s2) // true
18
+
s1.IsSuperset(s2) // false
19
+
s1.IsDisjoint(s2) // false
20
+
21
+
if exists := s1.Insert(1); exists {
22
+
// already existed in set
23
+
}
24
+
25
+
if existed := s1.Remove(1); existed {
26
+
// existed in set, now removed
27
+
}
28
+
29
+
30
+
testing
31
+
-------
32
+
includes property-based tests using the wonderful
33
+
testing/quick module!
34
+
35
+
go test -v
+174
sets/set.go
+174
sets/set.go
···
1
+
package sets
2
+
3
+
import (
4
+
"iter"
5
+
"maps"
6
+
)
7
+
8
+
type Set[T comparable] struct {
9
+
data map[T]struct{}
10
+
}
11
+
12
+
func New[T comparable]() Set[T] {
13
+
return Set[T]{
14
+
data: make(map[T]struct{}),
15
+
}
16
+
}
17
+
18
+
func (s *Set[T]) Insert(item T) bool {
19
+
_, exists := s.data[item]
20
+
s.data[item] = struct{}{}
21
+
return !exists
22
+
}
23
+
24
+
func Singleton[T comparable](item T) Set[T] {
25
+
n := New[T]()
26
+
_ = n.Insert(item)
27
+
return n
28
+
}
29
+
30
+
func (s *Set[T]) Remove(item T) bool {
31
+
_, exists := s.data[item]
32
+
if exists {
33
+
delete(s.data, item)
34
+
}
35
+
return exists
36
+
}
37
+
38
+
func (s Set[T]) Contains(item T) bool {
39
+
_, exists := s.data[item]
40
+
return exists
41
+
}
42
+
43
+
func (s Set[T]) Len() int {
44
+
return len(s.data)
45
+
}
46
+
47
+
func (s Set[T]) IsEmpty() bool {
48
+
return len(s.data) == 0
49
+
}
50
+
51
+
func (s *Set[T]) Clear() {
52
+
s.data = make(map[T]struct{})
53
+
}
54
+
55
+
func (s Set[T]) All() iter.Seq[T] {
56
+
return func(yield func(T) bool) {
57
+
for item := range s.data {
58
+
if !yield(item) {
59
+
return
60
+
}
61
+
}
62
+
}
63
+
}
64
+
65
+
func (s Set[T]) Clone() Set[T] {
66
+
return Set[T]{
67
+
data: maps.Clone(s.data),
68
+
}
69
+
}
70
+
71
+
func (s Set[T]) Union(other Set[T]) iter.Seq[T] {
72
+
if s.Len() >= other.Len() {
73
+
return chain(s.All(), other.Difference(s))
74
+
} else {
75
+
return chain(other.All(), s.Difference(other))
76
+
}
77
+
}
78
+
79
+
func chain[T any](seqs ...iter.Seq[T]) iter.Seq[T] {
80
+
return func(yield func(T) bool) {
81
+
for _, seq := range seqs {
82
+
for item := range seq {
83
+
if !yield(item) {
84
+
return
85
+
}
86
+
}
87
+
}
88
+
}
89
+
}
90
+
91
+
func (s Set[T]) Intersection(other Set[T]) iter.Seq[T] {
92
+
return func(yield func(T) bool) {
93
+
for item := range s.data {
94
+
if other.Contains(item) {
95
+
if !yield(item) {
96
+
return
97
+
}
98
+
}
99
+
}
100
+
}
101
+
}
102
+
103
+
func (s Set[T]) Difference(other Set[T]) iter.Seq[T] {
104
+
return func(yield func(T) bool) {
105
+
for item := range s.data {
106
+
if !other.Contains(item) {
107
+
if !yield(item) {
108
+
return
109
+
}
110
+
}
111
+
}
112
+
}
113
+
}
114
+
115
+
func (s Set[T]) SymmetricDifference(other Set[T]) iter.Seq[T] {
116
+
return func(yield func(T) bool) {
117
+
for item := range s.data {
118
+
if !other.Contains(item) {
119
+
if !yield(item) {
120
+
return
121
+
}
122
+
}
123
+
}
124
+
for item := range other.data {
125
+
if !s.Contains(item) {
126
+
if !yield(item) {
127
+
return
128
+
}
129
+
}
130
+
}
131
+
}
132
+
}
133
+
134
+
func (s Set[T]) IsSubset(other Set[T]) bool {
135
+
for item := range s.data {
136
+
if !other.Contains(item) {
137
+
return false
138
+
}
139
+
}
140
+
return true
141
+
}
142
+
143
+
func (s Set[T]) IsSuperset(other Set[T]) bool {
144
+
return other.IsSubset(s)
145
+
}
146
+
147
+
func (s Set[T]) IsDisjoint(other Set[T]) bool {
148
+
for item := range s.data {
149
+
if other.Contains(item) {
150
+
return false
151
+
}
152
+
}
153
+
return true
154
+
}
155
+
156
+
func (s Set[T]) Equal(other Set[T]) bool {
157
+
if s.Len() != other.Len() {
158
+
return false
159
+
}
160
+
for item := range s.data {
161
+
if !other.Contains(item) {
162
+
return false
163
+
}
164
+
}
165
+
return true
166
+
}
167
+
168
+
func Collect[T comparable](seq iter.Seq[T]) Set[T] {
169
+
result := New[T]()
170
+
for item := range seq {
171
+
result.Insert(item)
172
+
}
173
+
return result
174
+
}
+411
sets/set_test.go
+411
sets/set_test.go
···
1
+
package sets
2
+
3
+
import (
4
+
"slices"
5
+
"testing"
6
+
"testing/quick"
7
+
)
8
+
9
+
func TestNew(t *testing.T) {
10
+
s := New[int]()
11
+
if s.Len() != 0 {
12
+
t.Errorf("New set should be empty, got length %d", s.Len())
13
+
}
14
+
if !s.IsEmpty() {
15
+
t.Error("New set should be empty")
16
+
}
17
+
}
18
+
19
+
func TestFromSlice(t *testing.T) {
20
+
s := Collect(slices.Values([]int{1, 2, 3, 2, 1}))
21
+
if s.Len() != 3 {
22
+
t.Errorf("Expected length 3, got %d", s.Len())
23
+
}
24
+
if !s.Contains(1) || !s.Contains(2) || !s.Contains(3) {
25
+
t.Error("Set should contain all unique elements from slice")
26
+
}
27
+
}
28
+
29
+
func TestInsert(t *testing.T) {
30
+
s := New[string]()
31
+
32
+
if !s.Insert("hello") {
33
+
t.Error("First insert should return true")
34
+
}
35
+
if s.Insert("hello") {
36
+
t.Error("Duplicate insert should return false")
37
+
}
38
+
if s.Len() != 1 {
39
+
t.Errorf("Expected length 1, got %d", s.Len())
40
+
}
41
+
}
42
+
43
+
func TestRemove(t *testing.T) {
44
+
s := Collect(slices.Values([]int{1, 2, 3}))
45
+
46
+
if !s.Remove(2) {
47
+
t.Error("Remove existing element should return true")
48
+
}
49
+
if s.Remove(2) {
50
+
t.Error("Remove non-existing element should return false")
51
+
}
52
+
if s.Contains(2) {
53
+
t.Error("Element should be removed")
54
+
}
55
+
if s.Len() != 2 {
56
+
t.Errorf("Expected length 2, got %d", s.Len())
57
+
}
58
+
}
59
+
60
+
func TestContains(t *testing.T) {
61
+
s := Collect(slices.Values([]int{1, 2, 3}))
62
+
63
+
if !s.Contains(1) {
64
+
t.Error("Should contain 1")
65
+
}
66
+
if s.Contains(4) {
67
+
t.Error("Should not contain 4")
68
+
}
69
+
}
70
+
71
+
func TestClear(t *testing.T) {
72
+
s := Collect(slices.Values([]int{1, 2, 3}))
73
+
s.Clear()
74
+
75
+
if !s.IsEmpty() {
76
+
t.Error("Set should be empty after clear")
77
+
}
78
+
if s.Len() != 0 {
79
+
t.Errorf("Expected length 0, got %d", s.Len())
80
+
}
81
+
}
82
+
83
+
func TestIterator(t *testing.T) {
84
+
s := Collect(slices.Values([]int{1, 2, 3}))
85
+
var items []int
86
+
87
+
for item := range s.All() {
88
+
items = append(items, item)
89
+
}
90
+
91
+
slices.Sort(items)
92
+
expected := []int{1, 2, 3}
93
+
if !slices.Equal(items, expected) {
94
+
t.Errorf("Expected %v, got %v", expected, items)
95
+
}
96
+
}
97
+
98
+
func TestClone(t *testing.T) {
99
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
100
+
s2 := s1.Clone()
101
+
102
+
if !s1.Equal(s2) {
103
+
t.Error("Cloned set should be equal to original")
104
+
}
105
+
106
+
s2.Insert(4)
107
+
if s1.Contains(4) {
108
+
t.Error("Modifying clone should not affect original")
109
+
}
110
+
}
111
+
112
+
func TestUnion(t *testing.T) {
113
+
s1 := Collect(slices.Values([]int{1, 2}))
114
+
s2 := Collect(slices.Values([]int{2, 3}))
115
+
116
+
result := Collect(s1.Union(s2))
117
+
expected := Collect(slices.Values([]int{1, 2, 3}))
118
+
119
+
if !result.Equal(expected) {
120
+
t.Errorf("Expected %v, got %v", expected, result)
121
+
}
122
+
}
123
+
124
+
func TestIntersection(t *testing.T) {
125
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
126
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
127
+
128
+
expected := Collect(slices.Values([]int{2, 3}))
129
+
result := Collect(s1.Intersection(s2))
130
+
131
+
if !result.Equal(expected) {
132
+
t.Errorf("Expected %v, got %v", expected, result)
133
+
}
134
+
}
135
+
136
+
func TestDifference(t *testing.T) {
137
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
138
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
139
+
140
+
expected := Collect(slices.Values([]int{1}))
141
+
result := Collect(s1.Difference(s2))
142
+
143
+
if !result.Equal(expected) {
144
+
t.Errorf("Expected %v, got %v", expected, result)
145
+
}
146
+
}
147
+
148
+
func TestSymmetricDifference(t *testing.T) {
149
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
150
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
151
+
152
+
expected := Collect(slices.Values([]int{1, 4}))
153
+
result := Collect(s1.SymmetricDifference(s2))
154
+
155
+
if !result.Equal(expected) {
156
+
t.Errorf("Expected %v, got %v", expected, result)
157
+
}
158
+
}
159
+
160
+
func TestSymmetricDifferenceCommutativeProperty(t *testing.T) {
161
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
162
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
163
+
164
+
result1 := Collect(s1.SymmetricDifference(s2))
165
+
result2 := Collect(s2.SymmetricDifference(s1))
166
+
167
+
if !result1.Equal(result2) {
168
+
t.Errorf("Expected %v, got %v", result1, result2)
169
+
}
170
+
}
171
+
172
+
func TestIsSubset(t *testing.T) {
173
+
s1 := Collect(slices.Values([]int{1, 2}))
174
+
s2 := Collect(slices.Values([]int{1, 2, 3}))
175
+
176
+
if !s1.IsSubset(s2) {
177
+
t.Error("s1 should be subset of s2")
178
+
}
179
+
if s2.IsSubset(s1) {
180
+
t.Error("s2 should not be subset of s1")
181
+
}
182
+
}
183
+
184
+
func TestIsSuperset(t *testing.T) {
185
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
186
+
s2 := Collect(slices.Values([]int{1, 2}))
187
+
188
+
if !s1.IsSuperset(s2) {
189
+
t.Error("s1 should be superset of s2")
190
+
}
191
+
if s2.IsSuperset(s1) {
192
+
t.Error("s2 should not be superset of s1")
193
+
}
194
+
}
195
+
196
+
func TestIsDisjoint(t *testing.T) {
197
+
s1 := Collect(slices.Values([]int{1, 2}))
198
+
s2 := Collect(slices.Values([]int{3, 4}))
199
+
s3 := Collect(slices.Values([]int{2, 3}))
200
+
201
+
if !s1.IsDisjoint(s2) {
202
+
t.Error("s1 and s2 should be disjoint")
203
+
}
204
+
if s1.IsDisjoint(s3) {
205
+
t.Error("s1 and s3 should not be disjoint")
206
+
}
207
+
}
208
+
209
+
func TestEqual(t *testing.T) {
210
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
211
+
s2 := Collect(slices.Values([]int{3, 2, 1}))
212
+
s3 := Collect(slices.Values([]int{1, 2}))
213
+
214
+
if !s1.Equal(s2) {
215
+
t.Error("s1 and s2 should be equal")
216
+
}
217
+
if s1.Equal(s3) {
218
+
t.Error("s1 and s3 should not be equal")
219
+
}
220
+
}
221
+
222
+
func TestCollect(t *testing.T) {
223
+
s1 := Collect(slices.Values([]int{1, 2}))
224
+
s2 := Collect(slices.Values([]int{2, 3}))
225
+
226
+
unionSet := Collect(s1.Union(s2))
227
+
if unionSet.Len() != 3 {
228
+
t.Errorf("Expected union set length 3, got %d", unionSet.Len())
229
+
}
230
+
if !unionSet.Contains(1) || !unionSet.Contains(2) || !unionSet.Contains(3) {
231
+
t.Error("Union set should contain 1, 2, and 3")
232
+
}
233
+
234
+
diffSet := Collect(s1.Difference(s2))
235
+
if diffSet.Len() != 1 {
236
+
t.Errorf("Expected difference set length 1, got %d", diffSet.Len())
237
+
}
238
+
if !diffSet.Contains(1) {
239
+
t.Error("Difference set should contain 1")
240
+
}
241
+
}
242
+
243
+
func TestPropertySingleonLen(t *testing.T) {
244
+
f := func(item int) bool {
245
+
single := Singleton(item)
246
+
return single.Len() == 1
247
+
}
248
+
249
+
if err := quick.Check(f, nil); err != nil {
250
+
t.Error(err)
251
+
}
252
+
}
253
+
254
+
func TestPropertyInsertIdempotent(t *testing.T) {
255
+
f := func(s Set[int], item int) bool {
256
+
clone := s.Clone()
257
+
258
+
clone.Insert(item)
259
+
firstLen := clone.Len()
260
+
261
+
clone.Insert(item)
262
+
secondLen := clone.Len()
263
+
264
+
return firstLen == secondLen
265
+
}
266
+
267
+
if err := quick.Check(f, nil); err != nil {
268
+
t.Error(err)
269
+
}
270
+
}
271
+
272
+
func TestPropertyUnionCommutative(t *testing.T) {
273
+
f := func(s1 Set[int], s2 Set[int]) bool {
274
+
union1 := Collect(s1.Union(s2))
275
+
union2 := Collect(s2.Union(s1))
276
+
return union1.Equal(union2)
277
+
}
278
+
279
+
if err := quick.Check(f, nil); err != nil {
280
+
t.Error(err)
281
+
}
282
+
}
283
+
284
+
func TestPropertyIntersectionCommutative(t *testing.T) {
285
+
f := func(s1 Set[int], s2 Set[int]) bool {
286
+
inter1 := Collect(s1.Intersection(s2))
287
+
inter2 := Collect(s2.Intersection(s1))
288
+
return inter1.Equal(inter2)
289
+
}
290
+
291
+
if err := quick.Check(f, nil); err != nil {
292
+
t.Error(err)
293
+
}
294
+
}
295
+
296
+
func TestPropertyCloneEquals(t *testing.T) {
297
+
f := func(s Set[int]) bool {
298
+
clone := s.Clone()
299
+
return s.Equal(clone)
300
+
}
301
+
302
+
if err := quick.Check(f, nil); err != nil {
303
+
t.Error(err)
304
+
}
305
+
}
306
+
307
+
func TestPropertyIntersectionIsSubset(t *testing.T) {
308
+
f := func(s1 Set[int], s2 Set[int]) bool {
309
+
inter := Collect(s1.Intersection(s2))
310
+
return inter.IsSubset(s1) && inter.IsSubset(s2)
311
+
}
312
+
313
+
if err := quick.Check(f, nil); err != nil {
314
+
t.Error(err)
315
+
}
316
+
}
317
+
318
+
func TestPropertyUnionIsSuperset(t *testing.T) {
319
+
f := func(s1 Set[int], s2 Set[int]) bool {
320
+
union := Collect(s1.Union(s2))
321
+
return union.IsSuperset(s1) && union.IsSuperset(s2)
322
+
}
323
+
324
+
if err := quick.Check(f, nil); err != nil {
325
+
t.Error(err)
326
+
}
327
+
}
328
+
329
+
func TestPropertyDifferenceDisjoint(t *testing.T) {
330
+
f := func(s1 Set[int], s2 Set[int]) bool {
331
+
diff := Collect(s1.Difference(s2))
332
+
return diff.IsDisjoint(s2)
333
+
}
334
+
335
+
if err := quick.Check(f, nil); err != nil {
336
+
t.Error(err)
337
+
}
338
+
}
339
+
340
+
func TestPropertySymmetricDifferenceCommutative(t *testing.T) {
341
+
f := func(s1 Set[int], s2 Set[int]) bool {
342
+
symDiff1 := Collect(s1.SymmetricDifference(s2))
343
+
symDiff2 := Collect(s2.SymmetricDifference(s1))
344
+
return symDiff1.Equal(symDiff2)
345
+
}
346
+
347
+
if err := quick.Check(f, nil); err != nil {
348
+
t.Error(err)
349
+
}
350
+
}
351
+
352
+
func TestPropertyRemoveWorks(t *testing.T) {
353
+
f := func(s Set[int], item int) bool {
354
+
clone := s.Clone()
355
+
clone.Insert(item)
356
+
clone.Remove(item)
357
+
return !clone.Contains(item)
358
+
}
359
+
360
+
if err := quick.Check(f, nil); err != nil {
361
+
t.Error(err)
362
+
}
363
+
}
364
+
365
+
func TestPropertyClearEmpty(t *testing.T) {
366
+
f := func(s Set[int]) bool {
367
+
s.Clear()
368
+
return s.IsEmpty() && s.Len() == 0
369
+
}
370
+
371
+
if err := quick.Check(f, nil); err != nil {
372
+
t.Error(err)
373
+
}
374
+
}
375
+
376
+
func TestPropertyIsSubsetReflexive(t *testing.T) {
377
+
f := func(s Set[int]) bool {
378
+
return s.IsSubset(s)
379
+
}
380
+
381
+
if err := quick.Check(f, nil); err != nil {
382
+
t.Error(err)
383
+
}
384
+
}
385
+
386
+
func TestPropertyDeMorganUnion(t *testing.T) {
387
+
f := func(s1 Set[int], s2 Set[int], universe Set[int]) bool {
388
+
// create a universe that contains both sets
389
+
u := universe.Clone()
390
+
for item := range s1.All() {
391
+
u.Insert(item)
392
+
}
393
+
for item := range s2.All() {
394
+
u.Insert(item)
395
+
}
396
+
397
+
// (A u B)' = A' n B'
398
+
union := Collect(s1.Union(s2))
399
+
complementUnion := Collect(u.Difference(union))
400
+
401
+
complementS1 := Collect(u.Difference(s1))
402
+
complementS2 := Collect(u.Difference(s2))
403
+
intersectionComplements := Collect(complementS1.Intersection(complementS2))
404
+
405
+
return complementUnion.Equal(intersectionComplements)
406
+
}
407
+
408
+
if err := quick.Check(f, nil); err != nil {
409
+
t.Error(err)
410
+
}
411
+
}
+1
spindle/db/repos.go
+1
spindle/db/repos.go
+22
-21
spindle/engine/engine.go
+22
-21
spindle/engine/engine.go
···
3
3
import (
4
4
"context"
5
5
"errors"
6
-
"fmt"
7
6
"log/slog"
7
+
"sync"
8
8
9
9
securejoin "github.com/cyphar/filepath-securejoin"
10
-
"golang.org/x/sync/errgroup"
11
10
"tangled.org/core/notifier"
12
11
"tangled.org/core/spindle/config"
13
12
"tangled.org/core/spindle/db"
···
31
30
}
32
31
}
33
32
34
-
eg, ctx := errgroup.WithContext(ctx)
33
+
var wg sync.WaitGroup
35
34
for eng, wfs := range pipeline.Workflows {
36
35
workflowTimeout := eng.WorkflowTimeout()
37
36
l.Info("using workflow timeout", "timeout", workflowTimeout)
38
37
39
38
for _, w := range wfs {
40
-
eg.Go(func() error {
39
+
wg.Add(1)
40
+
go func() {
41
+
defer wg.Done()
42
+
41
43
wid := models.WorkflowId{
42
44
PipelineId: pipelineId,
43
45
Name: w.Name,
···
45
47
46
48
err := db.StatusRunning(wid, n)
47
49
if err != nil {
48
-
return err
50
+
l.Error("failed to set workflow status to running", "wid", wid, "err", err)
51
+
return
49
52
}
50
53
51
54
err = eng.SetupWorkflow(ctx, wid, &w)
···
61
64
62
65
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
63
66
if dbErr != nil {
64
-
return dbErr
67
+
l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr)
65
68
}
66
-
return err
69
+
return
67
70
}
68
71
defer eng.DestroyWorkflow(ctx, wid)
69
72
70
-
wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid)
73
+
secretValues := make([]string, len(allSecrets))
74
+
for i, s := range allSecrets {
75
+
secretValues[i] = s.Value
76
+
}
77
+
wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid, secretValues)
71
78
if err != nil {
72
79
l.Warn("failed to setup step logger; logs will not be persisted", "error", err)
73
80
wfLogger = nil
···
99
106
if errors.Is(err, ErrTimedOut) {
100
107
dbErr := db.StatusTimeout(wid, n)
101
108
if dbErr != nil {
102
-
return dbErr
109
+
l.Error("failed to set workflow status to timeout", "wid", wid, "err", dbErr)
103
110
}
104
111
} else {
105
112
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
106
113
if dbErr != nil {
107
-
return dbErr
114
+
l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr)
108
115
}
109
116
}
110
-
111
-
return fmt.Errorf("starting steps image: %w", err)
117
+
return
112
118
}
113
119
}
114
120
115
121
err = db.StatusSuccess(wid, n)
116
122
if err != nil {
117
-
return err
123
+
l.Error("failed to set workflow status to success", "wid", wid, "err", err)
118
124
}
119
-
120
-
return nil
121
-
})
125
+
}()
122
126
}
123
127
}
124
128
125
-
if err := eg.Wait(); err != nil {
126
-
l.Error("failed to run one or more workflows", "err", err)
127
-
} else {
128
-
l.Info("successfully ran full pipeline")
129
-
}
129
+
wg.Wait()
130
+
l.Info("all workflows completed")
130
131
}
+5
-3
spindle/engines/nixery/engine.go
+5
-3
spindle/engines/nixery/engine.go
···
294
294
workflowEnvs.AddEnv(s.Key, s.Value)
295
295
}
296
296
297
-
step := w.Steps[idx].(Step)
297
+
step := w.Steps[idx]
298
298
299
299
select {
300
300
case <-ctx.Done():
···
303
303
}
304
304
305
305
envs := append(EnvVars(nil), workflowEnvs...)
306
-
for k, v := range step.environment {
307
-
envs.AddEnv(k, v)
306
+
if nixStep, ok := step.(Step); ok {
307
+
for k, v := range nixStep.environment {
308
+
envs.AddEnv(k, v)
309
+
}
308
310
}
309
311
envs.AddEnv("HOME", homeDir)
310
312
+6
-1
spindle/models/logger.go
+6
-1
spindle/models/logger.go
···
12
12
type WorkflowLogger struct {
13
13
file *os.File
14
14
encoder *json.Encoder
15
+
mask *SecretMask
15
16
}
16
17
17
-
func NewWorkflowLogger(baseDir string, wid WorkflowId) (*WorkflowLogger, error) {
18
+
func NewWorkflowLogger(baseDir string, wid WorkflowId, secretValues []string) (*WorkflowLogger, error) {
18
19
path := LogFilePath(baseDir, wid)
19
20
20
21
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
···
25
26
return &WorkflowLogger{
26
27
file: file,
27
28
encoder: json.NewEncoder(file),
29
+
mask: NewSecretMask(secretValues),
28
30
}, nil
29
31
}
30
32
···
62
64
63
65
func (w *dataWriter) Write(p []byte) (int, error) {
64
66
line := strings.TrimRight(string(p), "\r\n")
67
+
if w.logger.mask != nil {
68
+
line = w.logger.mask.Mask(line)
69
+
}
65
70
entry := NewDataLogLine(w.idx, line, w.stream)
66
71
if err := w.logger.encoder.Encode(entry); err != nil {
67
72
return 0, err
+51
spindle/models/secret_mask.go
+51
spindle/models/secret_mask.go
···
1
+
package models
2
+
3
+
import (
4
+
"encoding/base64"
5
+
"strings"
6
+
)
7
+
8
+
// SecretMask replaces secret values in strings with "***".
9
+
type SecretMask struct {
10
+
replacer *strings.Replacer
11
+
}
12
+
13
+
// NewSecretMask creates a mask for the given secret values.
14
+
// Also registers base64-encoded variants of each secret.
15
+
func NewSecretMask(values []string) *SecretMask {
16
+
var pairs []string
17
+
18
+
for _, value := range values {
19
+
if value == "" {
20
+
continue
21
+
}
22
+
23
+
pairs = append(pairs, value, "***")
24
+
25
+
b64 := base64.StdEncoding.EncodeToString([]byte(value))
26
+
if b64 != value {
27
+
pairs = append(pairs, b64, "***")
28
+
}
29
+
30
+
b64NoPad := strings.TrimRight(b64, "=")
31
+
if b64NoPad != b64 && b64NoPad != value {
32
+
pairs = append(pairs, b64NoPad, "***")
33
+
}
34
+
}
35
+
36
+
if len(pairs) == 0 {
37
+
return nil
38
+
}
39
+
40
+
return &SecretMask{
41
+
replacer: strings.NewReplacer(pairs...),
42
+
}
43
+
}
44
+
45
+
// Mask replaces all registered secret values with "***".
46
+
func (m *SecretMask) Mask(input string) string {
47
+
if m == nil || m.replacer == nil {
48
+
return input
49
+
}
50
+
return m.replacer.Replace(input)
51
+
}
+135
spindle/models/secret_mask_test.go
+135
spindle/models/secret_mask_test.go
···
1
+
package models
2
+
3
+
import (
4
+
"encoding/base64"
5
+
"testing"
6
+
)
7
+
8
+
func TestSecretMask_BasicMasking(t *testing.T) {
9
+
mask := NewSecretMask([]string{"mysecret123"})
10
+
11
+
input := "The password is mysecret123 in this log"
12
+
expected := "The password is *** in this log"
13
+
14
+
result := mask.Mask(input)
15
+
if result != expected {
16
+
t.Errorf("expected %q, got %q", expected, result)
17
+
}
18
+
}
19
+
20
+
func TestSecretMask_Base64Encoded(t *testing.T) {
21
+
secret := "mysecret123"
22
+
mask := NewSecretMask([]string{secret})
23
+
24
+
b64 := base64.StdEncoding.EncodeToString([]byte(secret))
25
+
input := "Encoded: " + b64
26
+
expected := "Encoded: ***"
27
+
28
+
result := mask.Mask(input)
29
+
if result != expected {
30
+
t.Errorf("expected %q, got %q", expected, result)
31
+
}
32
+
}
33
+
34
+
func TestSecretMask_Base64NoPadding(t *testing.T) {
35
+
// "test" encodes to "dGVzdA==" with padding
36
+
secret := "test"
37
+
mask := NewSecretMask([]string{secret})
38
+
39
+
b64NoPad := "dGVzdA" // base64 without padding
40
+
input := "Token: " + b64NoPad
41
+
expected := "Token: ***"
42
+
43
+
result := mask.Mask(input)
44
+
if result != expected {
45
+
t.Errorf("expected %q, got %q", expected, result)
46
+
}
47
+
}
48
+
49
+
func TestSecretMask_MultipleSecrets(t *testing.T) {
50
+
mask := NewSecretMask([]string{"password1", "apikey123"})
51
+
52
+
input := "Using password1 and apikey123 for auth"
53
+
expected := "Using *** and *** for auth"
54
+
55
+
result := mask.Mask(input)
56
+
if result != expected {
57
+
t.Errorf("expected %q, got %q", expected, result)
58
+
}
59
+
}
60
+
61
+
func TestSecretMask_MultipleOccurrences(t *testing.T) {
62
+
mask := NewSecretMask([]string{"secret"})
63
+
64
+
input := "secret appears twice: secret"
65
+
expected := "*** appears twice: ***"
66
+
67
+
result := mask.Mask(input)
68
+
if result != expected {
69
+
t.Errorf("expected %q, got %q", expected, result)
70
+
}
71
+
}
72
+
73
+
func TestSecretMask_ShortValues(t *testing.T) {
74
+
mask := NewSecretMask([]string{"abc", "xy", ""})
75
+
76
+
if mask == nil {
77
+
t.Fatal("expected non-nil mask")
78
+
}
79
+
80
+
input := "abc xy test"
81
+
expected := "*** *** test"
82
+
result := mask.Mask(input)
83
+
if result != expected {
84
+
t.Errorf("expected %q, got %q", expected, result)
85
+
}
86
+
}
87
+
88
+
func TestSecretMask_NilMask(t *testing.T) {
89
+
var mask *SecretMask
90
+
91
+
input := "some input text"
92
+
result := mask.Mask(input)
93
+
if result != input {
94
+
t.Errorf("expected %q, got %q", input, result)
95
+
}
96
+
}
97
+
98
+
func TestSecretMask_EmptyInput(t *testing.T) {
99
+
mask := NewSecretMask([]string{"secret"})
100
+
101
+
result := mask.Mask("")
102
+
if result != "" {
103
+
t.Errorf("expected empty string, got %q", result)
104
+
}
105
+
}
106
+
107
+
func TestSecretMask_NoMatch(t *testing.T) {
108
+
mask := NewSecretMask([]string{"secretvalue"})
109
+
110
+
input := "nothing to mask here"
111
+
result := mask.Mask(input)
112
+
if result != input {
113
+
t.Errorf("expected %q, got %q", input, result)
114
+
}
115
+
}
116
+
117
+
func TestSecretMask_EmptySecretsList(t *testing.T) {
118
+
mask := NewSecretMask([]string{})
119
+
120
+
if mask != nil {
121
+
t.Error("expected nil mask for empty secrets list")
122
+
}
123
+
}
124
+
125
+
func TestSecretMask_EmptySecretsFiltered(t *testing.T) {
126
+
mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"})
127
+
128
+
input := "Using validpassword here"
129
+
expected := "Using *** here"
130
+
131
+
result := mask.Mask(input)
132
+
if result != expected {
133
+
t.Errorf("expected %q, got %q", expected, result)
134
+
}
135
+
}
+1
-1
spindle/motd
+1
-1
spindle/motd
+31
-13
spindle/server.go
+31
-13
spindle/server.go
···
8
8
"log/slog"
9
9
"maps"
10
10
"net/http"
11
+
"sync"
11
12
12
13
"github.com/go-chi/chi/v5"
13
14
"tangled.org/core/api/tangled"
···
30
31
)
31
32
32
33
//go:embed motd
33
-
var motd []byte
34
+
var defaultMotd []byte
34
35
35
36
const (
36
37
rbacDomain = "thisserver"
37
38
)
38
39
39
40
type Spindle struct {
40
-
jc *jetstream.JetstreamClient
41
-
db *db.DB
42
-
e *rbac.Enforcer
43
-
l *slog.Logger
44
-
n *notifier.Notifier
45
-
engs map[string]models.Engine
46
-
jq *queue.Queue
47
-
cfg *config.Config
48
-
ks *eventconsumer.Consumer
49
-
res *idresolver.Resolver
50
-
vault secrets.Manager
41
+
jc *jetstream.JetstreamClient
42
+
db *db.DB
43
+
e *rbac.Enforcer
44
+
l *slog.Logger
45
+
n *notifier.Notifier
46
+
engs map[string]models.Engine
47
+
jq *queue.Queue
48
+
cfg *config.Config
49
+
ks *eventconsumer.Consumer
50
+
res *idresolver.Resolver
51
+
vault secrets.Manager
52
+
motd []byte
53
+
motdMu sync.RWMutex
51
54
}
52
55
53
56
// New creates a new Spindle server with the provided configuration and engines.
···
128
131
cfg: cfg,
129
132
res: resolver,
130
133
vault: vault,
134
+
motd: defaultMotd,
131
135
}
132
136
133
137
err = e.AddSpindle(rbacDomain)
···
201
205
return s.e
202
206
}
203
207
208
+
// SetMotdContent sets custom MOTD content, replacing the embedded default.
209
+
func (s *Spindle) SetMotdContent(content []byte) {
210
+
s.motdMu.Lock()
211
+
defer s.motdMu.Unlock()
212
+
s.motd = content
213
+
}
214
+
215
+
// GetMotdContent returns the current MOTD content.
216
+
func (s *Spindle) GetMotdContent() []byte {
217
+
s.motdMu.RLock()
218
+
defer s.motdMu.RUnlock()
219
+
return s.motd
220
+
}
221
+
204
222
// Start starts the Spindle server (blocking).
205
223
func (s *Spindle) Start(ctx context.Context) error {
206
224
// starts a job queue runner in the background
···
246
264
mux := chi.NewRouter()
247
265
248
266
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
249
-
w.Write(motd)
267
+
w.Write(s.GetMotdContent())
250
268
})
251
269
mux.HandleFunc("/events", s.Events)
252
270
mux.HandleFunc("/logs/{knot}/{rkey}/{name}", s.Logs)
+1
-1
tailwind.config.js
+1
-1
tailwind.config.js
···
2
2
const colors = require("tailwindcss/colors");
3
3
4
4
module.exports = {
5
-
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"],
5
+
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"],
6
6
darkMode: "media",
7
7
theme: {
8
8
container: {
+199
types/commit.go
+199
types/commit.go
···
1
+
package types
2
+
3
+
import (
4
+
"bytes"
5
+
"encoding/json"
6
+
"fmt"
7
+
"maps"
8
+
"regexp"
9
+
"strings"
10
+
11
+
"github.com/go-git/go-git/v5/plumbing"
12
+
"github.com/go-git/go-git/v5/plumbing/object"
13
+
)
14
+
15
+
type Commit struct {
16
+
// hash of the commit object.
17
+
Hash plumbing.Hash `json:"hash,omitempty"`
18
+
19
+
// author is the original author of the commit.
20
+
Author object.Signature `json:"author"`
21
+
22
+
// committer is the one performing the commit, might be different from author.
23
+
Committer object.Signature `json:"committer"`
24
+
25
+
// message is the commit message, contains arbitrary text.
26
+
Message string `json:"message"`
27
+
28
+
// treehash is the hash of the root tree of the commit.
29
+
Tree string `json:"tree"`
30
+
31
+
// parents are the hashes of the parent commits of the commit.
32
+
ParentHashes []plumbing.Hash `json:"parent_hashes,omitempty"`
33
+
34
+
// pgpsignature is the pgp signature of the commit.
35
+
PGPSignature string `json:"pgp_signature,omitempty"`
36
+
37
+
// mergetag is the embedded tag object when a merge commit is created by
38
+
// merging a signed tag.
39
+
MergeTag string `json:"merge_tag,omitempty"`
40
+
41
+
// changeid is a unique identifier for the change (e.g., gerrit change-id).
42
+
ChangeId string `json:"change_id,omitempty"`
43
+
44
+
// extraheaders contains additional headers not captured by other fields.
45
+
ExtraHeaders map[string][]byte `json:"extra_headers,omitempty"`
46
+
47
+
// deprecated: kept for backwards compatibility with old json format.
48
+
This string `json:"this,omitempty"`
49
+
50
+
// deprecated: kept for backwards compatibility with old json format.
51
+
Parent string `json:"parent,omitempty"`
52
+
}
53
+
54
+
// types.Commit is an unify two commit structs:
55
+
// - git.object.Commit from
56
+
// - types.NiceDiff.commit
57
+
//
58
+
// to do this in backwards compatible fashion, we define the base struct
59
+
// to use the same fields as NiceDiff.Commit, and then we also unmarshal
60
+
// the struct fields from go-git structs, this custom unmarshal makes sense
61
+
// of both representations and unifies them to have maximal data in either
62
+
// form.
63
+
func (c *Commit) UnmarshalJSON(data []byte) error {
64
+
type Alias Commit
65
+
66
+
aux := &struct {
67
+
*object.Commit
68
+
*Alias
69
+
}{
70
+
Alias: (*Alias)(c),
71
+
}
72
+
73
+
if err := json.Unmarshal(data, aux); err != nil {
74
+
return err
75
+
}
76
+
77
+
c.FromGoGitCommit(aux.Commit)
78
+
79
+
return nil
80
+
}
81
+
82
+
// fill in as much of Commit as possible from the given go-git commit
83
+
func (c *Commit) FromGoGitCommit(gc *object.Commit) {
84
+
if gc == nil {
85
+
return
86
+
}
87
+
88
+
if c.Hash.IsZero() {
89
+
c.Hash = gc.Hash
90
+
}
91
+
if c.This == "" {
92
+
c.This = gc.Hash.String()
93
+
}
94
+
if isEmptySignature(c.Author) {
95
+
c.Author = gc.Author
96
+
}
97
+
if isEmptySignature(c.Committer) {
98
+
c.Committer = gc.Committer
99
+
}
100
+
if c.Message == "" {
101
+
c.Message = gc.Message
102
+
}
103
+
if c.Tree == "" {
104
+
c.Tree = gc.TreeHash.String()
105
+
}
106
+
if c.PGPSignature == "" {
107
+
c.PGPSignature = gc.PGPSignature
108
+
}
109
+
if c.MergeTag == "" {
110
+
c.MergeTag = gc.MergeTag
111
+
}
112
+
113
+
if len(c.ParentHashes) == 0 {
114
+
c.ParentHashes = gc.ParentHashes
115
+
}
116
+
if c.Parent == "" && len(gc.ParentHashes) > 0 {
117
+
c.Parent = gc.ParentHashes[0].String()
118
+
}
119
+
120
+
if len(c.ExtraHeaders) == 0 {
121
+
c.ExtraHeaders = make(map[string][]byte)
122
+
maps.Copy(c.ExtraHeaders, gc.ExtraHeaders)
123
+
}
124
+
125
+
if c.ChangeId == "" {
126
+
if v, ok := gc.ExtraHeaders["change-id"]; ok {
127
+
c.ChangeId = string(v)
128
+
}
129
+
}
130
+
}
131
+
132
+
func isEmptySignature(s object.Signature) bool {
133
+
return s.Email == "" && s.Name == "" && s.When.IsZero()
134
+
}
135
+
136
+
// produce a verifiable payload from this commit's metadata
137
+
func (c *Commit) Payload() string {
138
+
author := bytes.NewBuffer([]byte{})
139
+
c.Author.Encode(author)
140
+
141
+
committer := bytes.NewBuffer([]byte{})
142
+
c.Committer.Encode(committer)
143
+
144
+
payload := strings.Builder{}
145
+
146
+
fmt.Fprintf(&payload, "tree %s\n", c.Tree)
147
+
148
+
if len(c.ParentHashes) > 0 {
149
+
for _, p := range c.ParentHashes {
150
+
fmt.Fprintf(&payload, "parent %s\n", p.String())
151
+
}
152
+
} else {
153
+
// present for backwards compatibility
154
+
fmt.Fprintf(&payload, "parent %s\n", c.Parent)
155
+
}
156
+
157
+
fmt.Fprintf(&payload, "author %s\n", author.String())
158
+
fmt.Fprintf(&payload, "committer %s\n", committer.String())
159
+
160
+
if c.ChangeId != "" {
161
+
fmt.Fprintf(&payload, "change-id %s\n", c.ChangeId)
162
+
} else if v, ok := c.ExtraHeaders["change-id"]; ok {
163
+
fmt.Fprintf(&payload, "change-id %s\n", string(v))
164
+
}
165
+
166
+
fmt.Fprintf(&payload, "\n%s", c.Message)
167
+
168
+
return payload.String()
169
+
}
170
+
171
+
var (
172
+
coAuthorRegex = regexp.MustCompile(`(?im)^Co-authored-by:\s*(.+?)\s*<([^>]+)>`)
173
+
)
174
+
175
+
func (commit Commit) CoAuthors() []object.Signature {
176
+
var coAuthors []object.Signature
177
+
seen := make(map[string]bool)
178
+
matches := coAuthorRegex.FindAllStringSubmatch(commit.Message, -1)
179
+
180
+
for _, match := range matches {
181
+
if len(match) >= 3 {
182
+
name := strings.TrimSpace(match[1])
183
+
email := strings.TrimSpace(match[2])
184
+
185
+
if seen[email] {
186
+
continue
187
+
}
188
+
seen[email] = true
189
+
190
+
coAuthors = append(coAuthors, object.Signature{
191
+
Name: name,
192
+
Email: email,
193
+
When: commit.Committer.When,
194
+
})
195
+
}
196
+
}
197
+
198
+
return coAuthors
199
+
}
+5
-12
types/diff.go
+5
-12
types/diff.go
···
2
2
3
3
import (
4
4
"github.com/bluekeyes/go-gitdiff/gitdiff"
5
-
"github.com/go-git/go-git/v5/plumbing/object"
6
5
)
7
6
8
7
type DiffOpts struct {
···
43
42
44
43
// A nicer git diff representation.
45
44
type NiceDiff struct {
46
-
Commit struct {
47
-
Message string `json:"message"`
48
-
Author object.Signature `json:"author"`
49
-
This string `json:"this"`
50
-
Parent string `json:"parent"`
51
-
PGPSignature string `json:"pgp_signature"`
52
-
Committer object.Signature `json:"committer"`
53
-
Tree string `json:"tree"`
54
-
ChangedId string `json:"change_id"`
55
-
} `json:"commit"`
56
-
Stat struct {
45
+
Commit Commit `json:"commit"`
46
+
Stat struct {
57
47
FilesChanged int `json:"files_changed"`
58
48
Insertions int `json:"insertions"`
59
49
Deletions int `json:"deletions"`
···
84
74
85
75
// used by html elements as a unique ID for hrefs
86
76
func (d *Diff) Id() string {
77
+
if d.IsDelete {
78
+
return d.Name.Old
79
+
}
87
80
return d.Name.New
88
81
}
89
82
+112
types/diff_test.go
+112
types/diff_test.go
···
1
+
package types
2
+
3
+
import "testing"
4
+
5
+
func TestDiffId(t *testing.T) {
6
+
tests := []struct {
7
+
name string
8
+
diff Diff
9
+
expected string
10
+
}{
11
+
{
12
+
name: "regular file uses new name",
13
+
diff: Diff{
14
+
Name: struct {
15
+
Old string `json:"old"`
16
+
New string `json:"new"`
17
+
}{Old: "", New: "src/main.go"},
18
+
},
19
+
expected: "src/main.go",
20
+
},
21
+
{
22
+
name: "new file uses new name",
23
+
diff: Diff{
24
+
Name: struct {
25
+
Old string `json:"old"`
26
+
New string `json:"new"`
27
+
}{Old: "", New: "src/new.go"},
28
+
IsNew: true,
29
+
},
30
+
expected: "src/new.go",
31
+
},
32
+
{
33
+
name: "deleted file uses old name",
34
+
diff: Diff{
35
+
Name: struct {
36
+
Old string `json:"old"`
37
+
New string `json:"new"`
38
+
}{Old: "src/deleted.go", New: ""},
39
+
IsDelete: true,
40
+
},
41
+
expected: "src/deleted.go",
42
+
},
43
+
{
44
+
name: "renamed file uses new name",
45
+
diff: Diff{
46
+
Name: struct {
47
+
Old string `json:"old"`
48
+
New string `json:"new"`
49
+
}{Old: "src/old.go", New: "src/renamed.go"},
50
+
IsRename: true,
51
+
},
52
+
expected: "src/renamed.go",
53
+
},
54
+
}
55
+
56
+
for _, tt := range tests {
57
+
t.Run(tt.name, func(t *testing.T) {
58
+
if got := tt.diff.Id(); got != tt.expected {
59
+
t.Errorf("Diff.Id() = %q, want %q", got, tt.expected)
60
+
}
61
+
})
62
+
}
63
+
}
64
+
65
+
func TestChangedFilesMatchesDiffId(t *testing.T) {
66
+
// ChangedFiles() must return values matching each Diff's Id()
67
+
// so that sidebar links point to the correct anchors.
68
+
// Tests existing, deleted, new, and renamed files.
69
+
nd := NiceDiff{
70
+
Diff: []Diff{
71
+
{
72
+
Name: struct {
73
+
Old string `json:"old"`
74
+
New string `json:"new"`
75
+
}{Old: "", New: "src/modified.go"},
76
+
},
77
+
{
78
+
Name: struct {
79
+
Old string `json:"old"`
80
+
New string `json:"new"`
81
+
}{Old: "src/deleted.go", New: ""},
82
+
IsDelete: true,
83
+
},
84
+
{
85
+
Name: struct {
86
+
Old string `json:"old"`
87
+
New string `json:"new"`
88
+
}{Old: "", New: "src/new.go"},
89
+
IsNew: true,
90
+
},
91
+
{
92
+
Name: struct {
93
+
Old string `json:"old"`
94
+
New string `json:"new"`
95
+
}{Old: "src/old.go", New: "src/renamed.go"},
96
+
IsRename: true,
97
+
},
98
+
},
99
+
}
100
+
101
+
changedFiles := nd.ChangedFiles()
102
+
103
+
if len(changedFiles) != len(nd.Diff) {
104
+
t.Fatalf("ChangedFiles() returned %d items, want %d", len(changedFiles), len(nd.Diff))
105
+
}
106
+
107
+
for i, diff := range nd.Diff {
108
+
if changedFiles[i] != diff.Id() {
109
+
t.Errorf("ChangedFiles()[%d] = %q, but Diff.Id() = %q", i, changedFiles[i], diff.Id())
110
+
}
111
+
}
112
+
}
+17
-17
types/repo.go
+17
-17
types/repo.go
···
8
8
)
9
9
10
10
type RepoIndexResponse struct {
11
-
IsEmpty bool `json:"is_empty"`
12
-
Ref string `json:"ref,omitempty"`
13
-
Readme string `json:"readme,omitempty"`
14
-
ReadmeFileName string `json:"readme_file_name,omitempty"`
15
-
Commits []*object.Commit `json:"commits,omitempty"`
16
-
Description string `json:"description,omitempty"`
17
-
Files []NiceTree `json:"files,omitempty"`
18
-
Branches []Branch `json:"branches,omitempty"`
19
-
Tags []*TagReference `json:"tags,omitempty"`
20
-
TotalCommits int `json:"total_commits,omitempty"`
11
+
IsEmpty bool `json:"is_empty"`
12
+
Ref string `json:"ref,omitempty"`
13
+
Readme string `json:"readme,omitempty"`
14
+
ReadmeFileName string `json:"readme_file_name,omitempty"`
15
+
Commits []Commit `json:"commits,omitempty"`
16
+
Description string `json:"description,omitempty"`
17
+
Files []NiceTree `json:"files,omitempty"`
18
+
Branches []Branch `json:"branches,omitempty"`
19
+
Tags []*TagReference `json:"tags,omitempty"`
20
+
TotalCommits int `json:"total_commits,omitempty"`
21
21
}
22
22
23
23
type RepoLogResponse struct {
24
-
Commits []*object.Commit `json:"commits,omitempty"`
25
-
Ref string `json:"ref,omitempty"`
26
-
Description string `json:"description,omitempty"`
27
-
Log bool `json:"log,omitempty"`
28
-
Total int `json:"total,omitempty"`
29
-
Page int `json:"page,omitempty"`
30
-
PerPage int `json:"per_page,omitempty"`
24
+
Commits []Commit `json:"commits,omitempty"`
25
+
Ref string `json:"ref,omitempty"`
26
+
Description string `json:"description,omitempty"`
27
+
Log bool `json:"log,omitempty"`
28
+
Total int `json:"total,omitempty"`
29
+
Page int `json:"page,omitempty"`
30
+
PerPage int `json:"per_page,omitempty"`
31
31
}
32
32
33
33
type RepoCommitResponse struct {