forked from tangled.org/core
Monorepo for Tangled

Compare changes

Choose any two refs to compare.

Changed files
+4635 -2654
api
appview
crypto
docs
hook
knotserver
lexicons
pulls
nix
orm
patchutil
sets
spindle
types
+79 -20
api/tangled/cbor_gen.go
··· 7934 7934 } 7935 7935 7936 7936 cw := cbg.NewCborWriter(w) 7937 - fieldCount := 9 7937 + fieldCount := 10 7938 7938 7939 7939 if t.Body == nil { 7940 7940 fieldCount-- 7941 7941 } 7942 7942 7943 7943 if t.Mentions == nil { 7944 + fieldCount-- 7945 + } 7946 + 7947 + if t.Patch == nil { 7944 7948 fieldCount-- 7945 7949 } 7946 7950 ··· 8008 8012 } 8009 8013 8010 8014 // t.Patch (string) (string) 8011 - if len("patch") > 1000000 { 8012 - return xerrors.Errorf("Value in field \"patch\" was too long") 8013 - } 8015 + if t.Patch != nil { 8014 8016 8015 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil { 8016 - return err 8017 - } 8018 - if _, err := cw.WriteString(string("patch")); err != nil { 8019 - return err 8020 - } 8017 + if len("patch") > 1000000 { 8018 + return xerrors.Errorf("Value in field \"patch\" was too long") 8019 + } 8021 8020 8022 - if len(t.Patch) > 1000000 { 8023 - return xerrors.Errorf("Value in field t.Patch was too long") 8024 - } 8021 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil { 8022 + return err 8023 + } 8024 + if _, err := cw.WriteString(string("patch")); err != nil { 8025 + return err 8026 + } 8027 + 8028 + if t.Patch == nil { 8029 + if _, err := cw.Write(cbg.CborNull); err != nil { 8030 + return err 8031 + } 8032 + } else { 8033 + if len(*t.Patch) > 1000000 { 8034 + return xerrors.Errorf("Value in field t.Patch was too long") 8035 + } 8025 8036 8026 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Patch))); err != nil { 8027 - return err 8028 - } 8029 - if _, err := cw.WriteString(string(t.Patch)); err != nil { 8030 - return err 8037 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Patch))); err != nil { 8038 + return err 8039 + } 8040 + if _, err := cw.WriteString(string(*t.Patch)); err != nil { 8041 + return err 8042 + } 8043 + } 8031 8044 } 8032 8045 8033 8046 // t.Title (string) (string) ··· 8147 8160 return err 8148 8161 } 8149 8162 8163 + // t.PatchBlob (util.LexBlob) (struct) 8164 + if len("patchBlob") > 1000000 { 8165 + return xerrors.Errorf("Value in field \"patchBlob\" was too long") 8166 + } 8167 + 8168 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patchBlob"))); err != nil { 8169 + return err 8170 + } 8171 + if _, err := cw.WriteString(string("patchBlob")); err != nil { 8172 + return err 8173 + } 8174 + 8175 + if err := t.PatchBlob.MarshalCBOR(cw); err != nil { 8176 + return err 8177 + } 8178 + 8150 8179 // t.References ([]string) (slice) 8151 8180 if t.References != nil { 8152 8181 ··· 8262 8291 case "patch": 8263 8292 8264 8293 { 8265 - sval, err := cbg.ReadStringWithMax(cr, 1000000) 8294 + b, err := cr.ReadByte() 8266 8295 if err != nil { 8267 8296 return err 8268 8297 } 8298 + if b != cbg.CborNull[0] { 8299 + if err := cr.UnreadByte(); err != nil { 8300 + return err 8301 + } 8269 8302 8270 - t.Patch = string(sval) 8303 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8304 + if err != nil { 8305 + return err 8306 + } 8307 + 8308 + t.Patch = (*string)(&sval) 8309 + } 8271 8310 } 8272 8311 // t.Title (string) (string) 8273 8312 case "title": ··· 8370 8409 } 8371 8410 8372 8411 t.CreatedAt = string(sval) 8412 + } 8413 + // t.PatchBlob (util.LexBlob) (struct) 8414 + case "patchBlob": 8415 + 8416 + { 8417 + 8418 + b, err := cr.ReadByte() 8419 + if err != nil { 8420 + return err 8421 + } 8422 + if b != cbg.CborNull[0] { 8423 + if err := cr.UnreadByte(); err != nil { 8424 + return err 8425 + } 8426 + t.PatchBlob = new(util.LexBlob) 8427 + if err := t.PatchBlob.UnmarshalCBOR(cr); err != nil { 8428 + return xerrors.Errorf("unmarshaling t.PatchBlob pointer: %w", err) 8429 + } 8430 + } 8431 + 8373 8432 } 8374 8433 // t.References ([]string) (slice) 8375 8434 case "references":
+12 -9
api/tangled/repopull.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoPull 19 19 type RepoPull struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"` 21 - Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 24 - Patch string `json:"patch" cborgen:"patch"` 25 - References []string `json:"references,omitempty" cborgen:"references,omitempty"` 26 - Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"` 27 - Target *RepoPull_Target `json:"target" cborgen:"target"` 28 - Title string `json:"title" cborgen:"title"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"` 21 + Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 24 + // patch: (deprecated) use patchBlob instead 25 + Patch *string `json:"patch,omitempty" cborgen:"patch,omitempty"` 26 + // patchBlob: patch content 27 + PatchBlob *util.LexBlob `json:"patchBlob" cborgen:"patchBlob"` 28 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 29 + Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"` 30 + Target *RepoPull_Target `json:"target" cborgen:"target"` 31 + Title string `json:"title" cborgen:"title"` 29 32 } 30 33 31 34 // RepoPull_Source is a "source" in the sh.tangled.repo.pull schema.
+6 -45
appview/commitverify/verify.go
··· 3 3 import ( 4 4 "log" 5 5 6 - "github.com/go-git/go-git/v5/plumbing/object" 7 6 "tangled.org/core/appview/db" 8 7 "tangled.org/core/appview/models" 9 8 "tangled.org/core/crypto" ··· 35 34 return "" 36 35 } 37 36 38 - func GetVerifiedObjectCommits(e db.Execer, emailToDid map[string]string, commits []*object.Commit) (VerifiedCommits, error) { 39 - ndCommits := []types.NiceDiff{} 40 - for _, commit := range commits { 41 - ndCommits = append(ndCommits, ObjectCommitToNiceDiff(commit)) 42 - } 43 - return GetVerifiedCommits(e, emailToDid, ndCommits) 44 - } 45 - 46 - func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.NiceDiff) (VerifiedCommits, error) { 37 + func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.Commit) (VerifiedCommits, error) { 47 38 vcs := VerifiedCommits{} 48 39 49 40 didPubkeyCache := make(map[string][]models.PublicKey) 50 41 51 42 for _, commit := range ndCommits { 52 - c := commit.Commit 53 - 54 - committerEmail := c.Committer.Email 43 + committerEmail := commit.Committer.Email 55 44 if did, exists := emailToDid[committerEmail]; exists { 56 45 // check if we've already fetched public keys for this did 57 46 pubKeys, ok := didPubkeyCache[did] ··· 67 56 } 68 57 69 58 // try to verify with any associated pubkeys 59 + payload := commit.Payload() 60 + signature := commit.PGPSignature 70 61 for _, pk := range pubKeys { 71 - if _, ok := crypto.VerifyCommitSignature(pk.Key, commit); ok { 62 + if _, ok := crypto.VerifySignature([]byte(pk.Key), []byte(signature), []byte(payload)); ok { 72 63 73 64 fp, err := crypto.SSHFingerprint(pk.Key) 74 65 if err != nil { 75 66 log.Println("error computing ssh fingerprint:", err) 76 67 } 77 68 78 - vc := verifiedCommit{fingerprint: fp, hash: c.This} 69 + vc := verifiedCommit{fingerprint: fp, hash: commit.This} 79 70 vcs[vc] = struct{}{} 80 71 break 81 72 } ··· 86 77 87 78 return vcs, nil 88 79 } 89 - 90 - // ObjectCommitToNiceDiff is a compatibility function to convert a 91 - // commit object into a NiceDiff structure. 92 - func ObjectCommitToNiceDiff(c *object.Commit) types.NiceDiff { 93 - var niceDiff types.NiceDiff 94 - 95 - // set commit information 96 - niceDiff.Commit.Message = c.Message 97 - niceDiff.Commit.Author = c.Author 98 - niceDiff.Commit.This = c.Hash.String() 99 - niceDiff.Commit.Committer = c.Committer 100 - niceDiff.Commit.Tree = c.TreeHash.String() 101 - niceDiff.Commit.PGPSignature = c.PGPSignature 102 - 103 - changeId, ok := c.ExtraHeaders["change-id"] 104 - if ok { 105 - niceDiff.Commit.ChangedId = string(changeId) 106 - } 107 - 108 - // set parent hash if available 109 - if len(c.ParentHashes) > 0 { 110 - niceDiff.Commit.Parent = c.ParentHashes[0].String() 111 - } 112 - 113 - // XXX: Stats and Diff fields are typically populated 114 - // after fetching the actual diff information, which isn't 115 - // directly available in the commit object itself. 116 - 117 - return niceDiff 118 - }
+3 -2
appview/db/artifact.go
··· 8 8 "github.com/go-git/go-git/v5/plumbing" 9 9 "github.com/ipfs/go-cid" 10 10 "tangled.org/core/appview/models" 11 + "tangled.org/core/orm" 11 12 ) 12 13 13 14 func AddArtifact(e Execer, artifact models.Artifact) error { ··· 37 38 return err 38 39 } 39 40 40 - func GetArtifact(e Execer, filters ...filter) ([]models.Artifact, error) { 41 + func GetArtifact(e Execer, filters ...orm.Filter) ([]models.Artifact, error) { 41 42 var artifacts []models.Artifact 42 43 43 44 var conditions []string ··· 109 110 return artifacts, nil 110 111 } 111 112 112 - func DeleteArtifact(e Execer, filters ...filter) error { 113 + func DeleteArtifact(e Execer, filters ...orm.Filter) error { 113 114 var conditions []string 114 115 var args []any 115 116 for _, filter := range filters {
+4 -3
appview/db/collaborators.go
··· 6 6 "time" 7 7 8 8 "tangled.org/core/appview/models" 9 + "tangled.org/core/orm" 9 10 ) 10 11 11 12 func AddCollaborator(e Execer, c models.Collaborator) error { ··· 16 17 return err 17 18 } 18 19 19 - func DeleteCollaborator(e Execer, filters ...filter) error { 20 + func DeleteCollaborator(e Execer, filters ...orm.Filter) error { 20 21 var conditions []string 21 22 var args []any 22 23 for _, filter := range filters { ··· 58 59 return nil, nil 59 60 } 60 61 61 - return GetRepos(e, 0, FilterIn("at_uri", repoAts)) 62 + return GetRepos(e, 0, orm.FilterIn("at_uri", repoAts)) 62 63 } 63 64 64 - func GetCollaborators(e Execer, filters ...filter) ([]models.Collaborator, error) { 65 + func GetCollaborators(e Execer, filters ...orm.Filter) ([]models.Collaborator, error) { 65 66 var collaborators []models.Collaborator 66 67 var conditions []string 67 68 var args []any
+24 -137
appview/db/db.go
··· 3 3 import ( 4 4 "context" 5 5 "database/sql" 6 - "fmt" 7 6 "log/slog" 8 - "reflect" 9 7 "strings" 10 8 11 9 _ "github.com/mattn/go-sqlite3" 12 10 "tangled.org/core/log" 11 + "tangled.org/core/orm" 13 12 ) 14 13 15 14 type DB struct { ··· 584 583 } 585 584 586 585 // run migrations 587 - runMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error { 586 + orm.RunMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error { 588 587 tx.Exec(` 589 588 alter table repos add column description text check (length(description) <= 200); 590 589 `) 591 590 return nil 592 591 }) 593 592 594 - runMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error { 593 + orm.RunMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error { 595 594 // add unconstrained column 596 595 _, err := tx.Exec(` 597 596 alter table public_keys ··· 614 613 return nil 615 614 }) 616 615 617 - runMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error { 616 + orm.RunMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error { 618 617 _, err := tx.Exec(` 619 618 alter table comments drop column comment_at; 620 619 alter table comments add column rkey text; ··· 622 621 return err 623 622 }) 624 623 625 - runMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error { 624 + orm.RunMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error { 626 625 _, err := tx.Exec(` 627 626 alter table comments add column deleted text; -- timestamp 628 627 alter table comments add column edited text; -- timestamp ··· 630 629 return err 631 630 }) 632 631 633 - runMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error { 632 + orm.RunMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error { 634 633 _, err := tx.Exec(` 635 634 alter table pulls add column source_branch text; 636 635 alter table pulls add column source_repo_at text; ··· 639 638 return err 640 639 }) 641 640 642 - runMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error { 641 + orm.RunMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error { 643 642 _, err := tx.Exec(` 644 643 alter table repos add column source text; 645 644 `) ··· 651 650 // 652 651 // [0]: https://sqlite.org/pragma.html#pragma_foreign_keys 653 652 conn.ExecContext(ctx, "pragma foreign_keys = off;") 654 - runMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error { 653 + orm.RunMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error { 655 654 _, err := tx.Exec(` 656 655 create table pulls_new ( 657 656 -- identifiers ··· 708 707 }) 709 708 conn.ExecContext(ctx, "pragma foreign_keys = on;") 710 709 711 - runMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error { 710 + orm.RunMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error { 712 711 tx.Exec(` 713 712 alter table repos add column spindle text; 714 713 `) ··· 718 717 // drop all knot secrets, add unique constraint to knots 719 718 // 720 719 // knots will henceforth use service auth for signed requests 721 - runMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error { 720 + orm.RunMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error { 722 721 _, err := tx.Exec(` 723 722 create table registrations_new ( 724 723 id integer primary key autoincrement, ··· 741 740 }) 742 741 743 742 // recreate and add rkey + created columns with default constraint 744 - runMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error { 743 + orm.RunMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error { 745 744 // create new table 746 745 // - repo_at instead of repo integer 747 746 // - rkey field ··· 795 794 return err 796 795 }) 797 796 798 - runMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error { 797 + orm.RunMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error { 799 798 _, err := tx.Exec(` 800 799 alter table issues add column rkey text not null default ''; 801 800 ··· 807 806 }) 808 807 809 808 // repurpose the read-only column to "needs-upgrade" 810 - runMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error { 809 + orm.RunMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error { 811 810 _, err := tx.Exec(` 812 811 alter table registrations rename column read_only to needs_upgrade; 813 812 `) ··· 815 814 }) 816 815 817 816 // require all knots to upgrade after the release of total xrpc 818 - runMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error { 817 + orm.RunMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error { 819 818 _, err := tx.Exec(` 820 819 update registrations set needs_upgrade = 1; 821 820 `) ··· 823 822 }) 824 823 825 824 // require all knots to upgrade after the release of total xrpc 826 - runMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error { 825 + orm.RunMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error { 827 826 _, err := tx.Exec(` 828 827 alter table spindles add column needs_upgrade integer not null default 0; 829 828 `) ··· 841 840 // 842 841 // disable foreign-keys for the next migration 843 842 conn.ExecContext(ctx, "pragma foreign_keys = off;") 844 - runMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error { 843 + orm.RunMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error { 845 844 _, err := tx.Exec(` 846 845 create table if not exists issues_new ( 847 846 -- identifiers ··· 911 910 // - new columns 912 911 // * column "reply_to" which can be any other comment 913 912 // * column "at-uri" which is a generated column 914 - runMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error { 913 + orm.RunMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error { 915 914 _, err := tx.Exec(` 916 915 create table if not exists issue_comments ( 917 916 -- identifiers ··· 971 970 // 972 971 // disable foreign-keys for the next migration 973 972 conn.ExecContext(ctx, "pragma foreign_keys = off;") 974 - runMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error { 973 + orm.RunMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error { 975 974 _, err := tx.Exec(` 976 975 create table if not exists pulls_new ( 977 976 -- identifiers ··· 1052 1051 // 1053 1052 // disable foreign-keys for the next migration 1054 1053 conn.ExecContext(ctx, "pragma foreign_keys = off;") 1055 - runMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error { 1054 + orm.RunMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error { 1056 1055 _, err := tx.Exec(` 1057 1056 create table if not exists pull_submissions_new ( 1058 1057 -- identifiers ··· 1106 1105 1107 1106 // knots may report the combined patch for a comparison, we can store that on the appview side 1108 1107 // (but not on the pds record), because calculating the combined patch requires a git index 1109 - runMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error { 1108 + orm.RunMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error { 1110 1109 _, err := tx.Exec(` 1111 1110 alter table pull_submissions add column combined text; 1112 1111 `) 1113 1112 return err 1114 1113 }) 1115 1114 1116 - runMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error { 1115 + orm.RunMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error { 1117 1116 _, err := tx.Exec(` 1118 1117 alter table profile add column pronouns text; 1119 1118 `) 1120 1119 return err 1121 1120 }) 1122 1121 1123 - runMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error { 1122 + orm.RunMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error { 1124 1123 _, err := tx.Exec(` 1125 1124 alter table repos add column website text; 1126 1125 alter table repos add column topics text; ··· 1128 1127 return err 1129 1128 }) 1130 1129 1131 - runMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error { 1130 + orm.RunMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error { 1132 1131 _, err := tx.Exec(` 1133 1132 alter table notification_preferences add column user_mentioned integer not null default 1; 1134 1133 `) ··· 1136 1135 }) 1137 1136 1138 1137 // remove the foreign key constraints from stars. 1139 - runMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error { 1138 + orm.RunMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error { 1140 1139 _, err := tx.Exec(` 1141 1140 create table stars_new ( 1142 1141 id integer primary key autoincrement, ··· 1180 1179 }, nil 1181 1180 } 1182 1181 1183 - type migrationFn = func(*sql.Tx) error 1184 - 1185 - func runMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error { 1186 - logger = logger.With("migration", name) 1187 - 1188 - tx, err := c.BeginTx(context.Background(), nil) 1189 - if err != nil { 1190 - return err 1191 - } 1192 - defer tx.Rollback() 1193 - 1194 - var exists bool 1195 - err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists) 1196 - if err != nil { 1197 - return err 1198 - } 1199 - 1200 - if !exists { 1201 - // run migration 1202 - err = migrationFn(tx) 1203 - if err != nil { 1204 - logger.Error("failed to run migration", "err", err) 1205 - return err 1206 - } 1207 - 1208 - // mark migration as complete 1209 - _, err = tx.Exec("insert into migrations (name) values (?)", name) 1210 - if err != nil { 1211 - logger.Error("failed to mark migration as complete", "err", err) 1212 - return err 1213 - } 1214 - 1215 - // commit the transaction 1216 - if err := tx.Commit(); err != nil { 1217 - return err 1218 - } 1219 - 1220 - logger.Info("migration applied successfully") 1221 - } else { 1222 - logger.Warn("skipped migration, already applied") 1223 - } 1224 - 1225 - return nil 1226 - } 1227 - 1228 1182 func (d *DB) Close() error { 1229 1183 return d.DB.Close() 1230 1184 } 1231 - 1232 - type filter struct { 1233 - key string 1234 - arg any 1235 - cmp string 1236 - } 1237 - 1238 - func newFilter(key, cmp string, arg any) filter { 1239 - return filter{ 1240 - key: key, 1241 - arg: arg, 1242 - cmp: cmp, 1243 - } 1244 - } 1245 - 1246 - func FilterEq(key string, arg any) filter { return newFilter(key, "=", arg) } 1247 - func FilterNotEq(key string, arg any) filter { return newFilter(key, "<>", arg) } 1248 - func FilterGte(key string, arg any) filter { return newFilter(key, ">=", arg) } 1249 - func FilterLte(key string, arg any) filter { return newFilter(key, "<=", arg) } 1250 - func FilterIs(key string, arg any) filter { return newFilter(key, "is", arg) } 1251 - func FilterIsNot(key string, arg any) filter { return newFilter(key, "is not", arg) } 1252 - func FilterIn(key string, arg any) filter { return newFilter(key, "in", arg) } 1253 - func FilterLike(key string, arg any) filter { return newFilter(key, "like", arg) } 1254 - func FilterNotLike(key string, arg any) filter { return newFilter(key, "not like", arg) } 1255 - func FilterContains(key string, arg any) filter { 1256 - return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg)) 1257 - } 1258 - 1259 - func (f filter) Condition() string { 1260 - rv := reflect.ValueOf(f.arg) 1261 - kind := rv.Kind() 1262 - 1263 - // if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)` 1264 - if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 1265 - if rv.Len() == 0 { 1266 - // always false 1267 - return "1 = 0" 1268 - } 1269 - 1270 - placeholders := make([]string, rv.Len()) 1271 - for i := range placeholders { 1272 - placeholders[i] = "?" 1273 - } 1274 - 1275 - return fmt.Sprintf("%s %s (%s)", f.key, f.cmp, strings.Join(placeholders, ", ")) 1276 - } 1277 - 1278 - return fmt.Sprintf("%s %s ?", f.key, f.cmp) 1279 - } 1280 - 1281 - func (f filter) Arg() []any { 1282 - rv := reflect.ValueOf(f.arg) 1283 - kind := rv.Kind() 1284 - if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 1285 - if rv.Len() == 0 { 1286 - return nil 1287 - } 1288 - 1289 - out := make([]any, rv.Len()) 1290 - for i := range rv.Len() { 1291 - out[i] = rv.Index(i).Interface() 1292 - } 1293 - return out 1294 - } 1295 - 1296 - return []any{f.arg} 1297 - }
+6 -3
appview/db/follow.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 13 func AddFollow(e Execer, follow *models.Follow) error { ··· 134 135 return result, nil 135 136 } 136 137 137 - func GetFollows(e Execer, limit int, filters ...filter) ([]models.Follow, error) { 138 + func GetFollows(e Execer, limit int, filters ...orm.Filter) ([]models.Follow, error) { 138 139 var follows []models.Follow 139 140 140 141 var conditions []string ··· 166 167 if err != nil { 167 168 return nil, err 168 169 } 170 + defer rows.Close() 171 + 169 172 for rows.Next() { 170 173 var follow models.Follow 171 174 var followedAt string ··· 191 194 } 192 195 193 196 func GetFollowers(e Execer, did string) ([]models.Follow, error) { 194 - return GetFollows(e, 0, FilterEq("subject_did", did)) 197 + return GetFollows(e, 0, orm.FilterEq("subject_did", did)) 195 198 } 196 199 197 200 func GetFollowing(e Execer, did string) ([]models.Follow, error) { 198 - return GetFollows(e, 0, FilterEq("user_did", did)) 201 + return GetFollows(e, 0, orm.FilterEq("user_did", did)) 199 202 } 200 203 201 204 func getFollowStatuses(e Execer, userDid string, subjectDids []string) (map[string]models.FollowStatus, error) {
+22 -20
appview/db/issues.go
··· 13 13 "tangled.org/core/api/tangled" 14 14 "tangled.org/core/appview/models" 15 15 "tangled.org/core/appview/pagination" 16 + "tangled.org/core/orm" 16 17 ) 17 18 18 19 func PutIssue(tx *sql.Tx, issue *models.Issue) error { ··· 27 28 28 29 issues, err := GetIssues( 29 30 tx, 30 - FilterEq("did", issue.Did), 31 - FilterEq("rkey", issue.Rkey), 31 + orm.FilterEq("did", issue.Did), 32 + orm.FilterEq("rkey", issue.Rkey), 32 33 ) 33 34 switch { 34 35 case err != nil: ··· 98 99 return nil 99 100 } 100 101 101 - func GetIssuesPaginated(e Execer, page pagination.Page, filters ...filter) ([]models.Issue, error) { 102 + func GetIssuesPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]models.Issue, error) { 102 103 issueMap := make(map[string]*models.Issue) // at-uri -> issue 103 104 104 105 var conditions []string ··· 114 115 whereClause = " where " + strings.Join(conditions, " and ") 115 116 } 116 117 117 - pLower := FilterGte("row_num", page.Offset+1) 118 - pUpper := FilterLte("row_num", page.Offset+page.Limit) 118 + pLower := orm.FilterGte("row_num", page.Offset+1) 119 + pUpper := orm.FilterLte("row_num", page.Offset+page.Limit) 119 120 120 121 pageClause := "" 121 122 if page.Limit > 0 { ··· 205 206 repoAts = append(repoAts, string(issue.RepoAt)) 206 207 } 207 208 208 - repos, err := GetRepos(e, 0, FilterIn("at_uri", repoAts)) 209 + repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoAts)) 209 210 if err != nil { 210 211 return nil, fmt.Errorf("failed to build repo mappings: %w", err) 211 212 } ··· 228 229 // collect comments 229 230 issueAts := slices.Collect(maps.Keys(issueMap)) 230 231 231 - comments, err := GetIssueComments(e, FilterIn("issue_at", issueAts)) 232 + comments, err := GetIssueComments(e, orm.FilterIn("issue_at", issueAts)) 232 233 if err != nil { 233 234 return nil, fmt.Errorf("failed to query comments: %w", err) 234 235 } ··· 240 241 } 241 242 242 243 // collect allLabels for each issue 243 - allLabels, err := GetLabels(e, FilterIn("subject", issueAts)) 244 + allLabels, err := GetLabels(e, orm.FilterIn("subject", issueAts)) 244 245 if err != nil { 245 246 return nil, fmt.Errorf("failed to query labels: %w", err) 246 247 } ··· 251 252 } 252 253 253 254 // collect references for each issue 254 - allReferencs, err := GetReferencesAll(e, FilterIn("from_at", issueAts)) 255 + allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", issueAts)) 255 256 if err != nil { 256 257 return nil, fmt.Errorf("failed to query reference_links: %w", err) 257 258 } ··· 277 278 issues, err := GetIssuesPaginated( 278 279 e, 279 280 pagination.Page{}, 280 - FilterEq("repo_at", repoAt), 281 - FilterEq("issue_id", issueId), 281 + orm.FilterEq("repo_at", repoAt), 282 + orm.FilterEq("issue_id", issueId), 282 283 ) 283 284 if err != nil { 284 285 return nil, err ··· 290 291 return &issues[0], nil 291 292 } 292 293 293 - func GetIssues(e Execer, filters ...filter) ([]models.Issue, error) { 294 + func GetIssues(e Execer, filters ...orm.Filter) ([]models.Issue, error) { 294 295 return GetIssuesPaginated(e, pagination.Page{}, filters...) 295 296 } 296 297 ··· 298 299 func GetIssueIDs(e Execer, opts models.IssueSearchOptions) ([]int64, error) { 299 300 var ids []int64 300 301 301 - var filters []filter 302 + var filters []orm.Filter 302 303 openValue := 0 303 304 if opts.IsOpen { 304 305 openValue = 1 305 306 } 306 - filters = append(filters, FilterEq("open", openValue)) 307 + filters = append(filters, orm.FilterEq("open", openValue)) 307 308 if opts.RepoAt != "" { 308 - filters = append(filters, FilterEq("repo_at", opts.RepoAt)) 309 + filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt)) 309 310 } 310 311 311 312 var conditions []string ··· 397 398 return id, nil 398 399 } 399 400 400 - func DeleteIssueComments(e Execer, filters ...filter) error { 401 + func DeleteIssueComments(e Execer, filters ...orm.Filter) error { 401 402 var conditions []string 402 403 var args []any 403 404 for _, filter := range filters { ··· 416 417 return err 417 418 } 418 419 419 - func GetIssueComments(e Execer, filters ...filter) ([]models.IssueComment, error) { 420 + func GetIssueComments(e Execer, filters ...orm.Filter) ([]models.IssueComment, error) { 420 421 commentMap := make(map[string]*models.IssueComment) 421 422 422 423 var conditions []string ··· 451 452 if err != nil { 452 453 return nil, err 453 454 } 455 + defer rows.Close() 454 456 455 457 for rows.Next() { 456 458 var comment models.IssueComment ··· 506 508 507 509 // collect references for each comments 508 510 commentAts := slices.Collect(maps.Keys(commentMap)) 509 - allReferencs, err := GetReferencesAll(e, FilterIn("from_at", commentAts)) 511 + allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts)) 510 512 if err != nil { 511 513 return nil, fmt.Errorf("failed to query reference_links: %w", err) 512 514 } ··· 548 550 return nil 549 551 } 550 552 551 - func CloseIssues(e Execer, filters ...filter) error { 553 + func CloseIssues(e Execer, filters ...orm.Filter) error { 552 554 var conditions []string 553 555 var args []any 554 556 for _, filter := range filters { ··· 566 568 return err 567 569 } 568 570 569 - func ReopenIssues(e Execer, filters ...filter) error { 571 + func ReopenIssues(e Execer, filters ...orm.Filter) error { 570 572 var conditions []string 571 573 var args []any 572 574 for _, filter := range filters {
+8 -7
appview/db/label.go
··· 10 10 11 11 "github.com/bluesky-social/indigo/atproto/syntax" 12 12 "tangled.org/core/appview/models" 13 + "tangled.org/core/orm" 13 14 ) 14 15 15 16 // no updating type for now ··· 59 60 return id, nil 60 61 } 61 62 62 - func DeleteLabelDefinition(e Execer, filters ...filter) error { 63 + func DeleteLabelDefinition(e Execer, filters ...orm.Filter) error { 63 64 var conditions []string 64 65 var args []any 65 66 for _, filter := range filters { ··· 75 76 return err 76 77 } 77 78 78 - func GetLabelDefinitions(e Execer, filters ...filter) ([]models.LabelDefinition, error) { 79 + func GetLabelDefinitions(e Execer, filters ...orm.Filter) ([]models.LabelDefinition, error) { 79 80 var labelDefinitions []models.LabelDefinition 80 81 var conditions []string 81 82 var args []any ··· 167 168 } 168 169 169 170 // helper to get exactly one label def 170 - func GetLabelDefinition(e Execer, filters ...filter) (*models.LabelDefinition, error) { 171 + func GetLabelDefinition(e Execer, filters ...orm.Filter) (*models.LabelDefinition, error) { 171 172 labels, err := GetLabelDefinitions(e, filters...) 172 173 if err != nil { 173 174 return nil, err ··· 227 228 return id, nil 228 229 } 229 230 230 - func GetLabelOps(e Execer, filters ...filter) ([]models.LabelOp, error) { 231 + func GetLabelOps(e Execer, filters ...orm.Filter) ([]models.LabelOp, error) { 231 232 var labelOps []models.LabelOp 232 233 var conditions []string 233 234 var args []any ··· 302 303 } 303 304 304 305 // get labels for a given list of subject URIs 305 - func GetLabels(e Execer, filters ...filter) (map[syntax.ATURI]models.LabelState, error) { 306 + func GetLabels(e Execer, filters ...orm.Filter) (map[syntax.ATURI]models.LabelState, error) { 306 307 ops, err := GetLabelOps(e, filters...) 307 308 if err != nil { 308 309 return nil, err ··· 322 323 } 323 324 labelAts := slices.Collect(maps.Keys(labelAtSet)) 324 325 325 - actx, err := NewLabelApplicationCtx(e, FilterIn("at_uri", labelAts)) 326 + actx, err := NewLabelApplicationCtx(e, orm.FilterIn("at_uri", labelAts)) 326 327 if err != nil { 327 328 return nil, err 328 329 } ··· 338 339 return results, nil 339 340 } 340 341 341 - func NewLabelApplicationCtx(e Execer, filters ...filter) (*models.LabelApplicationCtx, error) { 342 + func NewLabelApplicationCtx(e Execer, filters ...orm.Filter) (*models.LabelApplicationCtx, error) { 342 343 labels, err := GetLabelDefinitions(e, filters...) 343 344 if err != nil { 344 345 return nil, err
+6 -5
appview/db/language.go
··· 7 7 8 8 "github.com/bluesky-social/indigo/atproto/syntax" 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetRepoLanguages(e Execer, filters ...filter) ([]models.RepoLanguage, error) { 13 + func GetRepoLanguages(e Execer, filters ...orm.Filter) ([]models.RepoLanguage, error) { 13 14 var conditions []string 14 15 var args []any 15 16 for _, filter := range filters { ··· 27 28 whereClause, 28 29 ) 29 30 rows, err := e.Query(query, args...) 30 - 31 31 if err != nil { 32 32 return nil, fmt.Errorf("failed to execute query: %w ", err) 33 33 } 34 + defer rows.Close() 34 35 35 36 var langs []models.RepoLanguage 36 37 for rows.Next() { ··· 85 86 return nil 86 87 } 87 88 88 - func DeleteRepoLanguages(e Execer, filters ...filter) error { 89 + func DeleteRepoLanguages(e Execer, filters ...orm.Filter) error { 89 90 var conditions []string 90 91 var args []any 91 92 for _, filter := range filters { ··· 107 108 func UpdateRepoLanguages(tx *sql.Tx, repoAt syntax.ATURI, ref string, langs []models.RepoLanguage) error { 108 109 err := DeleteRepoLanguages( 109 110 tx, 110 - FilterEq("repo_at", repoAt), 111 - FilterEq("ref", ref), 111 + orm.FilterEq("repo_at", repoAt), 112 + orm.FilterEq("ref", ref), 112 113 ) 113 114 if err != nil { 114 115 return fmt.Errorf("failed to delete existing languages: %w", err)
+14 -13
appview/db/notifications.go
··· 11 11 "github.com/bluesky-social/indigo/atproto/syntax" 12 12 "tangled.org/core/appview/models" 13 13 "tangled.org/core/appview/pagination" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 func CreateNotification(e Execer, notification *models.Notification) error { ··· 44 45 } 45 46 46 47 // GetNotificationsPaginated retrieves notifications with filters and pagination 47 - func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...filter) ([]*models.Notification, error) { 48 + func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.Notification, error) { 48 49 var conditions []string 49 50 var args []any 50 51 ··· 113 114 } 114 115 115 116 // GetNotificationsWithEntities retrieves notifications with their related entities 116 - func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...filter) ([]*models.NotificationWithEntity, error) { 117 + func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.NotificationWithEntity, error) { 117 118 var conditions []string 118 119 var args []any 119 120 ··· 256 257 } 257 258 258 259 // GetNotifications retrieves notifications with filters 259 - func GetNotifications(e Execer, filters ...filter) ([]*models.Notification, error) { 260 + func GetNotifications(e Execer, filters ...orm.Filter) ([]*models.Notification, error) { 260 261 return GetNotificationsPaginated(e, pagination.FirstPage(), filters...) 261 262 } 262 263 263 - func CountNotifications(e Execer, filters ...filter) (int64, error) { 264 + func CountNotifications(e Execer, filters ...orm.Filter) (int64, error) { 264 265 var conditions []string 265 266 var args []any 266 267 for _, filter := range filters { ··· 285 286 } 286 287 287 288 func MarkNotificationRead(e Execer, notificationID int64, userDID string) error { 288 - idFilter := FilterEq("id", notificationID) 289 - recipientFilter := FilterEq("recipient_did", userDID) 289 + idFilter := orm.FilterEq("id", notificationID) 290 + recipientFilter := orm.FilterEq("recipient_did", userDID) 290 291 291 292 query := fmt.Sprintf(` 292 293 UPDATE notifications ··· 314 315 } 315 316 316 317 func MarkAllNotificationsRead(e Execer, userDID string) error { 317 - recipientFilter := FilterEq("recipient_did", userDID) 318 - readFilter := FilterEq("read", 0) 318 + recipientFilter := orm.FilterEq("recipient_did", userDID) 319 + readFilter := orm.FilterEq("read", 0) 319 320 320 321 query := fmt.Sprintf(` 321 322 UPDATE notifications ··· 334 335 } 335 336 336 337 func DeleteNotification(e Execer, notificationID int64, userDID string) error { 337 - idFilter := FilterEq("id", notificationID) 338 - recipientFilter := FilterEq("recipient_did", userDID) 338 + idFilter := orm.FilterEq("id", notificationID) 339 + recipientFilter := orm.FilterEq("recipient_did", userDID) 339 340 340 341 query := fmt.Sprintf(` 341 342 DELETE FROM notifications ··· 362 363 } 363 364 364 365 func GetNotificationPreference(e Execer, userDid string) (*models.NotificationPreferences, error) { 365 - prefs, err := GetNotificationPreferences(e, FilterEq("user_did", userDid)) 366 + prefs, err := GetNotificationPreferences(e, orm.FilterEq("user_did", userDid)) 366 367 if err != nil { 367 368 return nil, err 368 369 } ··· 375 376 return p, nil 376 377 } 377 378 378 - func GetNotificationPreferences(e Execer, filters ...filter) (map[syntax.DID]*models.NotificationPreferences, error) { 379 + func GetNotificationPreferences(e Execer, filters ...orm.Filter) (map[syntax.DID]*models.NotificationPreferences, error) { 379 380 prefsMap := make(map[syntax.DID]*models.NotificationPreferences) 380 381 381 382 var conditions []string ··· 483 484 484 485 func (d *DB) ClearOldNotifications(ctx context.Context, olderThan time.Duration) error { 485 486 cutoff := time.Now().Add(-olderThan) 486 - createdFilter := FilterLte("created", cutoff) 487 + createdFilter := orm.FilterLte("created", cutoff) 487 488 488 489 query := fmt.Sprintf(` 489 490 DELETE FROM notifications
+6 -5
appview/db/pipeline.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetPipelines(e Execer, filters ...filter) ([]models.Pipeline, error) { 13 + func GetPipelines(e Execer, filters ...orm.Filter) ([]models.Pipeline, error) { 13 14 var pipelines []models.Pipeline 14 15 15 16 var conditions []string ··· 168 169 169 170 // this is a mega query, but the most useful one: 170 171 // get N pipelines, for each one get the latest status of its N workflows 171 - func GetPipelineStatuses(e Execer, limit int, filters ...filter) ([]models.Pipeline, error) { 172 + func GetPipelineStatuses(e Execer, limit int, filters ...orm.Filter) ([]models.Pipeline, error) { 172 173 var conditions []string 173 174 var args []any 174 175 for _, filter := range filters { 175 - filter.key = "p." + filter.key // the table is aliased in the query to `p` 176 + filter.Key = "p." + filter.Key // the table is aliased in the query to `p` 176 177 conditions = append(conditions, filter.Condition()) 177 178 args = append(args, filter.Arg()...) 178 179 } ··· 264 265 conditions = nil 265 266 args = nil 266 267 for _, p := range pipelines { 267 - knotFilter := FilterEq("pipeline_knot", p.Knot) 268 - rkeyFilter := FilterEq("pipeline_rkey", p.Rkey) 268 + knotFilter := orm.FilterEq("pipeline_knot", p.Knot) 269 + rkeyFilter := orm.FilterEq("pipeline_rkey", p.Rkey) 269 270 conditions = append(conditions, fmt.Sprintf("(%s and %s)", knotFilter.Condition(), rkeyFilter.Condition())) 270 271 args = append(args, p.Knot) 271 272 args = append(args, p.Rkey)
+11 -5
appview/db/profile.go
··· 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 13 "tangled.org/core/appview/models" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 const TimeframeMonths = 7 ··· 44 45 45 46 issues, err := GetIssues( 46 47 e, 47 - FilterEq("did", forDid), 48 - FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)), 48 + orm.FilterEq("did", forDid), 49 + orm.FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)), 49 50 ) 50 51 if err != nil { 51 52 return nil, fmt.Errorf("error getting issues by owner did: %w", err) ··· 65 66 *items = append(*items, &issue) 66 67 } 67 68 68 - repos, err := GetRepos(e, 0, FilterEq("did", forDid)) 69 + repos, err := GetRepos(e, 0, orm.FilterEq("did", forDid)) 69 70 if err != nil { 70 71 return nil, fmt.Errorf("error getting all repos by did: %w", err) 71 72 } ··· 199 200 return tx.Commit() 200 201 } 201 202 202 - func GetProfiles(e Execer, filters ...filter) (map[string]*models.Profile, error) { 203 + func GetProfiles(e Execer, filters ...orm.Filter) (map[string]*models.Profile, error) { 203 204 var conditions []string 204 205 var args []any 205 206 for _, filter := range filters { ··· 229 230 if err != nil { 230 231 return nil, err 231 232 } 233 + defer rows.Close() 232 234 233 235 profileMap := make(map[string]*models.Profile) 234 236 for rows.Next() { ··· 269 271 if err != nil { 270 272 return nil, err 271 273 } 274 + defer rows.Close() 275 + 272 276 idxs := make(map[string]int) 273 277 for did := range profileMap { 274 278 idxs[did] = 0 ··· 289 293 if err != nil { 290 294 return nil, err 291 295 } 296 + defer rows.Close() 297 + 292 298 idxs = make(map[string]int) 293 299 for did := range profileMap { 294 300 idxs[did] = 0 ··· 441 447 } 442 448 443 449 // ensure all pinned repos are either own repos or collaborating repos 444 - repos, err := GetRepos(e, 0, FilterEq("did", profile.Did)) 450 + repos, err := GetRepos(e, 0, orm.FilterEq("did", profile.Did)) 445 451 if err != nil { 446 452 log.Printf("getting repos for %s: %s", profile.Did, err) 447 453 }
+21 -20
appview/db/pulls.go
··· 13 13 14 14 "github.com/bluesky-social/indigo/atproto/syntax" 15 15 "tangled.org/core/appview/models" 16 + "tangled.org/core/orm" 16 17 ) 17 18 18 19 func NewPull(tx *sql.Tx, pull *models.Pull) error { ··· 118 119 return pullId - 1, err 119 120 } 120 121 121 - func GetPullsWithLimit(e Execer, limit int, filters ...filter) ([]*models.Pull, error) { 122 + func GetPullsWithLimit(e Execer, limit int, filters ...orm.Filter) ([]*models.Pull, error) { 122 123 pulls := make(map[syntax.ATURI]*models.Pull) 123 124 124 125 var conditions []string ··· 229 230 for _, p := range pulls { 230 231 pullAts = append(pullAts, p.AtUri()) 231 232 } 232 - submissionsMap, err := GetPullSubmissions(e, FilterIn("pull_at", pullAts)) 233 + submissionsMap, err := GetPullSubmissions(e, orm.FilterIn("pull_at", pullAts)) 233 234 if err != nil { 234 235 return nil, fmt.Errorf("failed to get submissions: %w", err) 235 236 } ··· 241 242 } 242 243 243 244 // collect allLabels for each issue 244 - allLabels, err := GetLabels(e, FilterIn("subject", pullAts)) 245 + allLabels, err := GetLabels(e, orm.FilterIn("subject", pullAts)) 245 246 if err != nil { 246 247 return nil, fmt.Errorf("failed to query labels: %w", err) 247 248 } ··· 258 259 sourceAts = append(sourceAts, *p.PullSource.RepoAt) 259 260 } 260 261 } 261 - sourceRepos, err := GetRepos(e, 0, FilterIn("at_uri", sourceAts)) 262 + sourceRepos, err := GetRepos(e, 0, orm.FilterIn("at_uri", sourceAts)) 262 263 if err != nil && !errors.Is(err, sql.ErrNoRows) { 263 264 return nil, fmt.Errorf("failed to get source repos: %w", err) 264 265 } ··· 274 275 } 275 276 } 276 277 277 - allReferences, err := GetReferencesAll(e, FilterIn("from_at", pullAts)) 278 + allReferences, err := GetReferencesAll(e, orm.FilterIn("from_at", pullAts)) 278 279 if err != nil { 279 280 return nil, fmt.Errorf("failed to query reference_links: %w", err) 280 281 } ··· 295 296 return orderedByPullId, nil 296 297 } 297 298 298 - func GetPulls(e Execer, filters ...filter) ([]*models.Pull, error) { 299 + func GetPulls(e Execer, filters ...orm.Filter) ([]*models.Pull, error) { 299 300 return GetPullsWithLimit(e, 0, filters...) 300 301 } 301 302 302 303 func GetPullIDs(e Execer, opts models.PullSearchOptions) ([]int64, error) { 303 304 var ids []int64 304 305 305 - var filters []filter 306 - filters = append(filters, FilterEq("state", opts.State)) 306 + var filters []orm.Filter 307 + filters = append(filters, orm.FilterEq("state", opts.State)) 307 308 if opts.RepoAt != "" { 308 - filters = append(filters, FilterEq("repo_at", opts.RepoAt)) 309 + filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt)) 309 310 } 310 311 311 312 var conditions []string ··· 361 362 } 362 363 363 364 func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) { 364 - pulls, err := GetPullsWithLimit(e, 1, FilterEq("repo_at", repoAt), FilterEq("pull_id", pullId)) 365 + pulls, err := GetPullsWithLimit(e, 1, orm.FilterEq("repo_at", repoAt), orm.FilterEq("pull_id", pullId)) 365 366 if err != nil { 366 367 return nil, err 367 368 } ··· 373 374 } 374 375 375 376 // mapping from pull -> pull submissions 376 - func GetPullSubmissions(e Execer, filters ...filter) (map[syntax.ATURI][]*models.PullSubmission, error) { 377 + func GetPullSubmissions(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]*models.PullSubmission, error) { 377 378 var conditions []string 378 379 var args []any 379 380 for _, filter := range filters { ··· 448 449 449 450 // Get comments for all submissions using GetPullComments 450 451 submissionIds := slices.Collect(maps.Keys(submissionMap)) 451 - comments, err := GetPullComments(e, FilterIn("submission_id", submissionIds)) 452 + comments, err := GetPullComments(e, orm.FilterIn("submission_id", submissionIds)) 452 453 if err != nil { 453 454 return nil, fmt.Errorf("failed to get pull comments: %w", err) 454 455 } ··· 474 475 return m, nil 475 476 } 476 477 477 - func GetPullComments(e Execer, filters ...filter) ([]models.PullComment, error) { 478 + func GetPullComments(e Execer, filters ...orm.Filter) ([]models.PullComment, error) { 478 479 var conditions []string 479 480 var args []any 480 481 for _, filter := range filters { ··· 542 543 543 544 // collect references for each comments 544 545 commentAts := slices.Collect(maps.Keys(commentMap)) 545 - allReferencs, err := GetReferencesAll(e, FilterIn("from_at", commentAts)) 546 + allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts)) 546 547 if err != nil { 547 548 return nil, fmt.Errorf("failed to query reference_links: %w", err) 548 549 } ··· 708 709 return err 709 710 } 710 711 711 - func SetPullParentChangeId(e Execer, parentChangeId string, filters ...filter) error { 712 + func SetPullParentChangeId(e Execer, parentChangeId string, filters ...orm.Filter) error { 712 713 var conditions []string 713 714 var args []any 714 715 ··· 732 733 733 734 // Only used when stacking to update contents in the event of a rebase (the interdiff should be empty). 734 735 // otherwise submissions are immutable 735 - func UpdatePull(e Execer, newPatch, sourceRev string, filters ...filter) error { 736 + func UpdatePull(e Execer, newPatch, sourceRev string, filters ...orm.Filter) error { 736 737 var conditions []string 737 738 var args []any 738 739 ··· 790 791 func GetStack(e Execer, stackId string) (models.Stack, error) { 791 792 unorderedPulls, err := GetPulls( 792 793 e, 793 - FilterEq("stack_id", stackId), 794 - FilterNotEq("state", models.PullDeleted), 794 + orm.FilterEq("stack_id", stackId), 795 + orm.FilterNotEq("state", models.PullDeleted), 795 796 ) 796 797 if err != nil { 797 798 return nil, err ··· 835 836 func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) { 836 837 pulls, err := GetPulls( 837 838 e, 838 - FilterEq("stack_id", stackId), 839 - FilterEq("state", models.PullDeleted), 839 + orm.FilterEq("stack_id", stackId), 840 + orm.FilterEq("state", models.PullDeleted), 840 841 ) 841 842 if err != nil { 842 843 return nil, err
+2 -1
appview/db/punchcard.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 13 // this adds to the existing count ··· 20 21 return err 21 22 } 22 23 23 - func MakePunchcard(e Execer, filters ...filter) (*models.Punchcard, error) { 24 + func MakePunchcard(e Execer, filters ...orm.Filter) (*models.Punchcard, error) { 24 25 punchcard := &models.Punchcard{} 25 26 now := time.Now() 26 27 startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
+4 -3
appview/db/reference.go
··· 8 8 "github.com/bluesky-social/indigo/atproto/syntax" 9 9 "tangled.org/core/api/tangled" 10 10 "tangled.org/core/appview/models" 11 + "tangled.org/core/orm" 11 12 ) 12 13 13 14 // ValidateReferenceLinks resolves refLinks to Issue/PR/IssueComment/PullComment ATURIs. ··· 205 206 return err 206 207 } 207 208 208 - func GetReferencesAll(e Execer, filters ...filter) (map[syntax.ATURI][]syntax.ATURI, error) { 209 + func GetReferencesAll(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]syntax.ATURI, error) { 209 210 var ( 210 211 conditions []string 211 212 args []any ··· 347 348 if len(aturis) == 0 { 348 349 return nil, nil 349 350 } 350 - filter := FilterIn("c.at_uri", aturis) 351 + filter := orm.FilterIn("c.at_uri", aturis) 351 352 rows, err := e.Query( 352 353 fmt.Sprintf( 353 354 `select r.did, r.name, i.issue_id, c.id, i.title, i.open ··· 427 428 if len(aturis) == 0 { 428 429 return nil, nil 429 430 } 430 - filter := FilterIn("c.comment_at", aturis) 431 + filter := orm.FilterIn("c.comment_at", aturis) 431 432 rows, err := e.Query( 432 433 fmt.Sprintf( 433 434 `select r.did, r.name, p.pull_id, c.id, p.title, p.state
+5 -3
appview/db/registration.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetRegistrations(e Execer, filters ...filter) ([]models.Registration, error) { 13 + func GetRegistrations(e Execer, filters ...orm.Filter) ([]models.Registration, error) { 13 14 var registrations []models.Registration 14 15 15 16 var conditions []string ··· 37 38 if err != nil { 38 39 return nil, err 39 40 } 41 + defer rows.Close() 40 42 41 43 for rows.Next() { 42 44 var createdAt string ··· 69 71 return registrations, nil 70 72 } 71 73 72 - func MarkRegistered(e Execer, filters ...filter) error { 74 + func MarkRegistered(e Execer, filters ...orm.Filter) error { 73 75 var conditions []string 74 76 var args []any 75 77 for _, filter := range filters { ··· 94 96 return err 95 97 } 96 98 97 - func DeleteKnot(e Execer, filters ...filter) error { 99 + func DeleteKnot(e Execer, filters ...orm.Filter) error { 98 100 var conditions []string 99 101 var args []any 100 102 for _, filter := range filters {
+18 -6
appview/db/repos.go
··· 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 13 "tangled.org/core/appview/models" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 - func GetRepos(e Execer, limit int, filters ...filter) ([]models.Repo, error) { 17 + func GetRepos(e Execer, limit int, filters ...orm.Filter) ([]models.Repo, error) { 17 18 repoMap := make(map[syntax.ATURI]*models.Repo) 18 19 19 20 var conditions []string ··· 55 56 limitClause, 56 57 ) 57 58 rows, err := e.Query(repoQuery, args...) 58 - 59 59 if err != nil { 60 60 return nil, fmt.Errorf("failed to execute repo query: %w ", err) 61 61 } 62 + defer rows.Close() 62 63 63 64 for rows.Next() { 64 65 var repo models.Repo ··· 127 128 if err != nil { 128 129 return nil, fmt.Errorf("failed to execute labels query: %w ", err) 129 130 } 131 + defer rows.Close() 132 + 130 133 for rows.Next() { 131 134 var repoat, labelat string 132 135 if err := rows.Scan(&repoat, &labelat); err != nil { ··· 155 158 from repo_languages 156 159 where repo_at in (%s) 157 160 and is_default_ref = 1 161 + and language <> '' 158 162 ) 159 163 where rn = 1 160 164 `, ··· 164 168 if err != nil { 165 169 return nil, fmt.Errorf("failed to execute lang query: %w ", err) 166 170 } 171 + defer rows.Close() 172 + 167 173 for rows.Next() { 168 174 var repoat, lang string 169 175 if err := rows.Scan(&repoat, &lang); err != nil { ··· 190 196 if err != nil { 191 197 return nil, fmt.Errorf("failed to execute star-count query: %w ", err) 192 198 } 199 + defer rows.Close() 200 + 193 201 for rows.Next() { 194 202 var repoat string 195 203 var count int ··· 219 227 if err != nil { 220 228 return nil, fmt.Errorf("failed to execute issue-count query: %w ", err) 221 229 } 230 + defer rows.Close() 231 + 222 232 for rows.Next() { 223 233 var repoat string 224 234 var open, closed int ··· 260 270 if err != nil { 261 271 return nil, fmt.Errorf("failed to execute pulls-count query: %w ", err) 262 272 } 273 + defer rows.Close() 274 + 263 275 for rows.Next() { 264 276 var repoat string 265 277 var open, merged, closed, deleted int ··· 294 306 } 295 307 296 308 // helper to get exactly one repo 297 - func GetRepo(e Execer, filters ...filter) (*models.Repo, error) { 309 + func GetRepo(e Execer, filters ...orm.Filter) (*models.Repo, error) { 298 310 repos, err := GetRepos(e, 0, filters...) 299 311 if err != nil { 300 312 return nil, err ··· 311 323 return &repos[0], nil 312 324 } 313 325 314 - func CountRepos(e Execer, filters ...filter) (int64, error) { 326 + func CountRepos(e Execer, filters ...orm.Filter) (int64, error) { 315 327 var conditions []string 316 328 var args []any 317 329 for _, filter := range filters { ··· 542 554 return err 543 555 } 544 556 545 - func UnsubscribeLabel(e Execer, filters ...filter) error { 557 + func UnsubscribeLabel(e Execer, filters ...orm.Filter) error { 546 558 var conditions []string 547 559 var args []any 548 560 for _, filter := range filters { ··· 560 572 return err 561 573 } 562 574 563 - func GetRepoLabels(e Execer, filters ...filter) ([]models.RepoLabel, error) { 575 + func GetRepoLabels(e Execer, filters ...orm.Filter) ([]models.RepoLabel, error) { 564 576 var conditions []string 565 577 var args []any 566 578 for _, filter := range filters {
+6 -5
appview/db/spindle.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetSpindles(e Execer, filters ...filter) ([]models.Spindle, error) { 13 + func GetSpindles(e Execer, filters ...orm.Filter) ([]models.Spindle, error) { 13 14 var spindles []models.Spindle 14 15 15 16 var conditions []string ··· 91 92 return err 92 93 } 93 94 94 - func VerifySpindle(e Execer, filters ...filter) (int64, error) { 95 + func VerifySpindle(e Execer, filters ...orm.Filter) (int64, error) { 95 96 var conditions []string 96 97 var args []any 97 98 for _, filter := range filters { ··· 114 115 return res.RowsAffected() 115 116 } 116 117 117 - func DeleteSpindle(e Execer, filters ...filter) error { 118 + func DeleteSpindle(e Execer, filters ...orm.Filter) error { 118 119 var conditions []string 119 120 var args []any 120 121 for _, filter := range filters { ··· 144 145 return err 145 146 } 146 147 147 - func RemoveSpindleMember(e Execer, filters ...filter) error { 148 + func RemoveSpindleMember(e Execer, filters ...orm.Filter) error { 148 149 var conditions []string 149 150 var args []any 150 151 for _, filter := range filters { ··· 163 164 return err 164 165 } 165 166 166 - func GetSpindleMembers(e Execer, filters ...filter) ([]models.SpindleMember, error) { 167 + func GetSpindleMembers(e Execer, filters ...orm.Filter) ([]models.SpindleMember, error) { 167 168 var members []models.SpindleMember 168 169 169 170 var conditions []string
+6 -4
appview/db/star.go
··· 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 13 "tangled.org/core/appview/models" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 func AddStar(e Execer, star *models.Star) error { ··· 133 134 134 135 // GetRepoStars return a list of stars each holding target repository. 135 136 // If there isn't known repo with starred at-uri, those stars will be ignored. 136 - func GetRepoStars(e Execer, limit int, filters ...filter) ([]models.RepoStar, error) { 137 + func GetRepoStars(e Execer, limit int, filters ...orm.Filter) ([]models.RepoStar, error) { 137 138 var conditions []string 138 139 var args []any 139 140 for _, filter := range filters { ··· 164 165 if err != nil { 165 166 return nil, err 166 167 } 168 + defer rows.Close() 167 169 168 170 starMap := make(map[string][]models.Star) 169 171 for rows.Next() { ··· 195 197 return nil, nil 196 198 } 197 199 198 - repos, err := GetRepos(e, 0, FilterIn("at_uri", args)) 200 + repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", args)) 199 201 if err != nil { 200 202 return nil, err 201 203 } ··· 225 227 return repoStars, nil 226 228 } 227 229 228 - func CountStars(e Execer, filters ...filter) (int64, error) { 230 + func CountStars(e Execer, filters ...orm.Filter) (int64, error) { 229 231 var conditions []string 230 232 var args []any 231 233 for _, filter := range filters { ··· 298 300 } 299 301 300 302 // get full repo data 301 - repos, err := GetRepos(e, 0, FilterIn("at_uri", repoUris)) 303 + repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoUris)) 302 304 if err != nil { 303 305 return nil, err 304 306 }
+4 -3
appview/db/strings.go
··· 8 8 "time" 9 9 10 10 "tangled.org/core/appview/models" 11 + "tangled.org/core/orm" 11 12 ) 12 13 13 14 func AddString(e Execer, s models.String) error { ··· 44 45 return err 45 46 } 46 47 47 - func GetStrings(e Execer, limit int, filters ...filter) ([]models.String, error) { 48 + func GetStrings(e Execer, limit int, filters ...orm.Filter) ([]models.String, error) { 48 49 var all []models.String 49 50 50 51 var conditions []string ··· 127 128 return all, nil 128 129 } 129 130 130 - func CountStrings(e Execer, filters ...filter) (int64, error) { 131 + func CountStrings(e Execer, filters ...orm.Filter) (int64, error) { 131 132 var conditions []string 132 133 var args []any 133 134 for _, filter := range filters { ··· 151 152 return count, nil 152 153 } 153 154 154 - func DeleteString(e Execer, filters ...filter) error { 155 + func DeleteString(e Execer, filters ...orm.Filter) error { 155 156 var conditions []string 156 157 var args []any 157 158 for _, filter := range filters {
+9 -8
appview/db/timeline.go
··· 5 5 6 6 "github.com/bluesky-social/indigo/atproto/syntax" 7 7 "tangled.org/core/appview/models" 8 + "tangled.org/core/orm" 8 9 ) 9 10 10 11 // TODO: this gathers heterogenous events from different sources and aggregates ··· 84 85 } 85 86 86 87 func getTimelineRepos(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) { 87 - filters := make([]filter, 0) 88 + filters := make([]orm.Filter, 0) 88 89 if userIsFollowing != nil { 89 - filters = append(filters, FilterIn("did", userIsFollowing)) 90 + filters = append(filters, orm.FilterIn("did", userIsFollowing)) 90 91 } 91 92 92 93 repos, err := GetRepos(e, limit, filters...) ··· 104 105 105 106 var origRepos []models.Repo 106 107 if args != nil { 107 - origRepos, err = GetRepos(e, 0, FilterIn("at_uri", args)) 108 + origRepos, err = GetRepos(e, 0, orm.FilterIn("at_uri", args)) 108 109 } 109 110 if err != nil { 110 111 return nil, err ··· 144 145 } 145 146 146 147 func getTimelineStars(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) { 147 - filters := make([]filter, 0) 148 + filters := make([]orm.Filter, 0) 148 149 if userIsFollowing != nil { 149 - filters = append(filters, FilterIn("did", userIsFollowing)) 150 + filters = append(filters, orm.FilterIn("did", userIsFollowing)) 150 151 } 151 152 152 153 stars, err := GetRepoStars(e, limit, filters...) ··· 180 181 } 181 182 182 183 func getTimelineFollows(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) { 183 - filters := make([]filter, 0) 184 + filters := make([]orm.Filter, 0) 184 185 if userIsFollowing != nil { 185 - filters = append(filters, FilterIn("user_did", userIsFollowing)) 186 + filters = append(filters, orm.FilterIn("user_did", userIsFollowing)) 186 187 } 187 188 188 189 follows, err := GetFollows(e, limit, filters...) ··· 199 200 return nil, nil 200 201 } 201 202 202 - profiles, err := GetProfiles(e, FilterIn("did", subjects)) 203 + profiles, err := GetProfiles(e, orm.FilterIn("did", subjects)) 203 204 if err != nil { 204 205 return nil, err 205 206 }
+25 -24
appview/ingester.go
··· 21 21 "tangled.org/core/appview/serververify" 22 22 "tangled.org/core/appview/validator" 23 23 "tangled.org/core/idresolver" 24 + "tangled.org/core/orm" 24 25 "tangled.org/core/rbac" 25 26 ) 26 27 ··· 253 254 254 255 err = db.AddArtifact(i.Db, artifact) 255 256 case jmodels.CommitOperationDelete: 256 - err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey)) 257 + err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey)) 257 258 } 258 259 259 260 if err != nil { ··· 350 351 351 352 err = db.UpsertProfile(tx, &profile) 352 353 case jmodels.CommitOperationDelete: 353 - err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey)) 354 + err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey)) 354 355 } 355 356 356 357 if err != nil { ··· 424 425 // get record from db first 425 426 members, err := db.GetSpindleMembers( 426 427 ddb, 427 - db.FilterEq("did", did), 428 - db.FilterEq("rkey", rkey), 428 + orm.FilterEq("did", did), 429 + orm.FilterEq("rkey", rkey), 429 430 ) 430 431 if err != nil || len(members) != 1 { 431 432 return fmt.Errorf("failed to get member: %w, len(members) = %d", err, len(members)) ··· 440 441 // remove record by rkey && update enforcer 441 442 if err = db.RemoveSpindleMember( 442 443 tx, 443 - db.FilterEq("did", did), 444 - db.FilterEq("rkey", rkey), 444 + orm.FilterEq("did", did), 445 + orm.FilterEq("rkey", rkey), 445 446 ); err != nil { 446 447 return fmt.Errorf("failed to remove from db: %w", err) 447 448 } ··· 523 524 // get record from db first 524 525 spindles, err := db.GetSpindles( 525 526 ddb, 526 - db.FilterEq("owner", did), 527 - db.FilterEq("instance", instance), 527 + orm.FilterEq("owner", did), 528 + orm.FilterEq("instance", instance), 528 529 ) 529 530 if err != nil || len(spindles) != 1 { 530 531 return fmt.Errorf("failed to get spindles: %w, len(spindles) = %d", err, len(spindles)) ··· 543 544 // remove spindle members first 544 545 err = db.RemoveSpindleMember( 545 546 tx, 546 - db.FilterEq("owner", did), 547 - db.FilterEq("instance", instance), 547 + orm.FilterEq("owner", did), 548 + orm.FilterEq("instance", instance), 548 549 ) 549 550 if err != nil { 550 551 return err ··· 552 553 553 554 err = db.DeleteSpindle( 554 555 tx, 555 - db.FilterEq("owner", did), 556 - db.FilterEq("instance", instance), 556 + orm.FilterEq("owner", did), 557 + orm.FilterEq("instance", instance), 557 558 ) 558 559 if err != nil { 559 560 return err ··· 621 622 case jmodels.CommitOperationDelete: 622 623 if err := db.DeleteString( 623 624 ddb, 624 - db.FilterEq("did", did), 625 - db.FilterEq("rkey", rkey), 625 + orm.FilterEq("did", did), 626 + orm.FilterEq("rkey", rkey), 626 627 ); err != nil { 627 628 l.Error("failed to delete", "err", err) 628 629 return fmt.Errorf("failed to delete string record: %w", err) ··· 740 741 // get record from db first 741 742 registrations, err := db.GetRegistrations( 742 743 ddb, 743 - db.FilterEq("domain", domain), 744 - db.FilterEq("did", did), 744 + orm.FilterEq("domain", domain), 745 + orm.FilterEq("did", did), 745 746 ) 746 747 if err != nil { 747 748 return fmt.Errorf("failed to get registration: %w", err) ··· 762 763 763 764 err = db.DeleteKnot( 764 765 tx, 765 - db.FilterEq("did", did), 766 - db.FilterEq("domain", domain), 766 + orm.FilterEq("did", did), 767 + orm.FilterEq("domain", domain), 767 768 ) 768 769 if err != nil { 769 770 return err ··· 915 916 case jmodels.CommitOperationDelete: 916 917 if err := db.DeleteIssueComments( 917 918 ddb, 918 - db.FilterEq("did", did), 919 - db.FilterEq("rkey", rkey), 919 + orm.FilterEq("did", did), 920 + orm.FilterEq("rkey", rkey), 920 921 ); err != nil { 921 922 return fmt.Errorf("failed to delete issue comment record: %w", err) 922 923 } ··· 969 970 case jmodels.CommitOperationDelete: 970 971 if err := db.DeleteLabelDefinition( 971 972 ddb, 972 - db.FilterEq("did", did), 973 - db.FilterEq("rkey", rkey), 973 + orm.FilterEq("did", did), 974 + orm.FilterEq("rkey", rkey), 974 975 ); err != nil { 975 976 return fmt.Errorf("failed to delete labeldef record: %w", err) 976 977 } ··· 1010 1011 var repo *models.Repo 1011 1012 switch collection { 1012 1013 case tangled.RepoIssueNSID: 1013 - i, err := db.GetIssues(ddb, db.FilterEq("at_uri", subject)) 1014 + i, err := db.GetIssues(ddb, orm.FilterEq("at_uri", subject)) 1014 1015 if err != nil || len(i) != 1 { 1015 1016 return fmt.Errorf("failed to find subject: %w || subject count %d", err, len(i)) 1016 1017 } ··· 1019 1020 return fmt.Errorf("unsupport label subject: %s", collection) 1020 1021 } 1021 1022 1022 - actx, err := db.NewLabelApplicationCtx(ddb, db.FilterIn("at_uri", repo.Labels)) 1023 + actx, err := db.NewLabelApplicationCtx(ddb, orm.FilterIn("at_uri", repo.Labels)) 1023 1024 if err != nil { 1024 1025 return fmt.Errorf("failed to build label application ctx: %w", err) 1025 1026 }
+46 -45
appview/issues/issues.go
··· 19 19 "tangled.org/core/appview/config" 20 20 "tangled.org/core/appview/db" 21 21 issues_indexer "tangled.org/core/appview/indexer/issues" 22 + "tangled.org/core/appview/mentions" 22 23 "tangled.org/core/appview/models" 23 24 "tangled.org/core/appview/notify" 24 25 "tangled.org/core/appview/oauth" 25 26 "tangled.org/core/appview/pages" 26 27 "tangled.org/core/appview/pages/repoinfo" 27 28 "tangled.org/core/appview/pagination" 28 - "tangled.org/core/appview/refresolver" 29 29 "tangled.org/core/appview/reporesolver" 30 30 "tangled.org/core/appview/validator" 31 31 "tangled.org/core/idresolver" 32 + "tangled.org/core/orm" 32 33 "tangled.org/core/rbac" 33 34 "tangled.org/core/tid" 34 35 ) 35 36 36 37 type Issues struct { 37 - oauth *oauth.OAuth 38 - repoResolver *reporesolver.RepoResolver 39 - enforcer *rbac.Enforcer 40 - pages *pages.Pages 41 - idResolver *idresolver.Resolver 42 - refResolver *refresolver.Resolver 43 - db *db.DB 44 - config *config.Config 45 - notifier notify.Notifier 46 - logger *slog.Logger 47 - validator *validator.Validator 48 - indexer *issues_indexer.Indexer 38 + oauth *oauth.OAuth 39 + repoResolver *reporesolver.RepoResolver 40 + enforcer *rbac.Enforcer 41 + pages *pages.Pages 42 + idResolver *idresolver.Resolver 43 + mentionsResolver *mentions.Resolver 44 + db *db.DB 45 + config *config.Config 46 + notifier notify.Notifier 47 + logger *slog.Logger 48 + validator *validator.Validator 49 + indexer *issues_indexer.Indexer 49 50 } 50 51 51 52 func New( ··· 54 55 enforcer *rbac.Enforcer, 55 56 pages *pages.Pages, 56 57 idResolver *idresolver.Resolver, 57 - refResolver *refresolver.Resolver, 58 + mentionsResolver *mentions.Resolver, 58 59 db *db.DB, 59 60 config *config.Config, 60 61 notifier notify.Notifier, ··· 63 64 logger *slog.Logger, 64 65 ) *Issues { 65 66 return &Issues{ 66 - oauth: oauth, 67 - repoResolver: repoResolver, 68 - enforcer: enforcer, 69 - pages: pages, 70 - idResolver: idResolver, 71 - refResolver: refResolver, 72 - db: db, 73 - config: config, 74 - notifier: notifier, 75 - logger: logger, 76 - validator: validator, 77 - indexer: indexer, 67 + oauth: oauth, 68 + repoResolver: repoResolver, 69 + enforcer: enforcer, 70 + pages: pages, 71 + idResolver: idResolver, 72 + mentionsResolver: mentionsResolver, 73 + db: db, 74 + config: config, 75 + notifier: notifier, 76 + logger: logger, 77 + validator: validator, 78 + indexer: indexer, 78 79 } 79 80 } 80 81 ··· 113 114 114 115 labelDefs, err := db.GetLabelDefinitions( 115 116 rp.db, 116 - db.FilterIn("at_uri", f.Labels), 117 - db.FilterContains("scope", tangled.RepoIssueNSID), 117 + orm.FilterIn("at_uri", f.Labels), 118 + orm.FilterContains("scope", tangled.RepoIssueNSID), 118 119 ) 119 120 if err != nil { 120 121 l.Error("failed to fetch labels", "err", err) ··· 163 164 newIssue := issue 164 165 newIssue.Title = r.FormValue("title") 165 166 newIssue.Body = r.FormValue("body") 166 - newIssue.Mentions, newIssue.References = rp.refResolver.Resolve(r.Context(), newIssue.Body) 167 + newIssue.Mentions, newIssue.References = rp.mentionsResolver.Resolve(r.Context(), newIssue.Body) 167 168 168 169 if err := rp.validator.ValidateIssue(newIssue); err != nil { 169 170 l.Error("validation error", "err", err) ··· 314 315 if isIssueOwner || isRepoOwner || isCollaborator { 315 316 err = db.CloseIssues( 316 317 rp.db, 317 - db.FilterEq("id", issue.Id), 318 + orm.FilterEq("id", issue.Id), 318 319 ) 319 320 if err != nil { 320 321 l.Error("failed to close issue", "err", err) ··· 361 362 if isCollaborator || isRepoOwner || isIssueOwner { 362 363 err := db.ReopenIssues( 363 364 rp.db, 364 - db.FilterEq("id", issue.Id), 365 + orm.FilterEq("id", issue.Id), 365 366 ) 366 367 if err != nil { 367 368 l.Error("failed to reopen issue", "err", err) ··· 412 413 replyTo = &replyToUri 413 414 } 414 415 415 - mentions, references := rp.refResolver.Resolve(r.Context(), body) 416 + mentions, references := rp.mentionsResolver.Resolve(r.Context(), body) 416 417 417 418 comment := models.IssueComment{ 418 419 Did: user.Did, ··· 506 507 commentId := chi.URLParam(r, "commentId") 507 508 comments, err := db.GetIssueComments( 508 509 rp.db, 509 - db.FilterEq("id", commentId), 510 + orm.FilterEq("id", commentId), 510 511 ) 511 512 if err != nil { 512 513 l.Error("failed to fetch comment", "id", commentId) ··· 542 543 commentId := chi.URLParam(r, "commentId") 543 544 comments, err := db.GetIssueComments( 544 545 rp.db, 545 - db.FilterEq("id", commentId), 546 + orm.FilterEq("id", commentId), 546 547 ) 547 548 if err != nil { 548 549 l.Error("failed to fetch comment", "id", commentId) ··· 584 585 newComment := comment 585 586 newComment.Body = newBody 586 587 newComment.Edited = &now 587 - newComment.Mentions, newComment.References = rp.refResolver.Resolve(r.Context(), newBody) 588 + newComment.Mentions, newComment.References = rp.mentionsResolver.Resolve(r.Context(), newBody) 588 589 589 590 record := newComment.AsRecord() 590 591 ··· 652 653 commentId := chi.URLParam(r, "commentId") 653 654 comments, err := db.GetIssueComments( 654 655 rp.db, 655 - db.FilterEq("id", commentId), 656 + orm.FilterEq("id", commentId), 656 657 ) 657 658 if err != nil { 658 659 l.Error("failed to fetch comment", "id", commentId) ··· 688 689 commentId := chi.URLParam(r, "commentId") 689 690 comments, err := db.GetIssueComments( 690 691 rp.db, 691 - db.FilterEq("id", commentId), 692 + orm.FilterEq("id", commentId), 692 693 ) 693 694 if err != nil { 694 695 l.Error("failed to fetch comment", "id", commentId) ··· 724 725 commentId := chi.URLParam(r, "commentId") 725 726 comments, err := db.GetIssueComments( 726 727 rp.db, 727 - db.FilterEq("id", commentId), 728 + orm.FilterEq("id", commentId), 728 729 ) 729 730 if err != nil { 730 731 l.Error("failed to fetch comment", "id", commentId) ··· 751 752 752 753 // optimistic deletion 753 754 deleted := time.Now() 754 - err = db.DeleteIssueComments(rp.db, db.FilterEq("id", comment.Id)) 755 + err = db.DeleteIssueComments(rp.db, orm.FilterEq("id", comment.Id)) 755 756 if err != nil { 756 757 l.Error("failed to delete comment", "err", err) 757 758 rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "failed to delete comment") ··· 840 841 841 842 issues, err = db.GetIssues( 842 843 rp.db, 843 - db.FilterIn("id", res.Hits), 844 + orm.FilterIn("id", res.Hits), 844 845 ) 845 846 if err != nil { 846 847 l.Error("failed to get issues", "err", err) ··· 856 857 issues, err = db.GetIssuesPaginated( 857 858 rp.db, 858 859 page, 859 - db.FilterEq("repo_at", f.RepoAt()), 860 - db.FilterEq("open", openInt), 860 + orm.FilterEq("repo_at", f.RepoAt()), 861 + orm.FilterEq("open", openInt), 861 862 ) 862 863 if err != nil { 863 864 l.Error("failed to get issues", "err", err) ··· 868 869 869 870 labelDefs, err := db.GetLabelDefinitions( 870 871 rp.db, 871 - db.FilterIn("at_uri", f.Labels), 872 - db.FilterContains("scope", tangled.RepoIssueNSID), 872 + orm.FilterIn("at_uri", f.Labels), 873 + orm.FilterContains("scope", tangled.RepoIssueNSID), 873 874 ) 874 875 if err != nil { 875 876 l.Error("failed to fetch labels", "err", err) ··· 912 913 }) 913 914 case http.MethodPost: 914 915 body := r.FormValue("body") 915 - mentions, references := rp.refResolver.Resolve(r.Context(), body) 916 + mentions, references := rp.mentionsResolver.Resolve(r.Context(), body) 916 917 917 918 issue := &models.Issue{ 918 919 RepoAt: f.RepoAt(),
+19 -18
appview/knots/knots.go
··· 21 21 "tangled.org/core/appview/xrpcclient" 22 22 "tangled.org/core/eventconsumer" 23 23 "tangled.org/core/idresolver" 24 + "tangled.org/core/orm" 24 25 "tangled.org/core/rbac" 25 26 "tangled.org/core/tid" 26 27 ··· 72 73 user := k.OAuth.GetUser(r) 73 74 registrations, err := db.GetRegistrations( 74 75 k.Db, 75 - db.FilterEq("did", user.Did), 76 + orm.FilterEq("did", user.Did), 76 77 ) 77 78 if err != nil { 78 79 k.Logger.Error("failed to fetch knot registrations", "err", err) ··· 102 103 103 104 registrations, err := db.GetRegistrations( 104 105 k.Db, 105 - db.FilterEq("did", user.Did), 106 - db.FilterEq("domain", domain), 106 + orm.FilterEq("did", user.Did), 107 + orm.FilterEq("domain", domain), 107 108 ) 108 109 if err != nil { 109 110 l.Error("failed to get registrations", "err", err) ··· 127 128 repos, err := db.GetRepos( 128 129 k.Db, 129 130 0, 130 - db.FilterEq("knot", domain), 131 + orm.FilterEq("knot", domain), 131 132 ) 132 133 if err != nil { 133 134 l.Error("failed to get knot repos", "err", err) ··· 293 294 // get record from db first 294 295 registrations, err := db.GetRegistrations( 295 296 k.Db, 296 - db.FilterEq("did", user.Did), 297 - db.FilterEq("domain", domain), 297 + orm.FilterEq("did", user.Did), 298 + orm.FilterEq("domain", domain), 298 299 ) 299 300 if err != nil { 300 301 l.Error("failed to get registration", "err", err) ··· 321 322 322 323 err = db.DeleteKnot( 323 324 tx, 324 - db.FilterEq("did", user.Did), 325 - db.FilterEq("domain", domain), 325 + orm.FilterEq("did", user.Did), 326 + orm.FilterEq("domain", domain), 326 327 ) 327 328 if err != nil { 328 329 l.Error("failed to delete registration", "err", err) ··· 402 403 // get record from db first 403 404 registrations, err := db.GetRegistrations( 404 405 k.Db, 405 - db.FilterEq("did", user.Did), 406 - db.FilterEq("domain", domain), 406 + orm.FilterEq("did", user.Did), 407 + orm.FilterEq("domain", domain), 407 408 ) 408 409 if err != nil { 409 410 l.Error("failed to get registration", "err", err) ··· 493 494 // Get updated registration to show 494 495 registrations, err = db.GetRegistrations( 495 496 k.Db, 496 - db.FilterEq("did", user.Did), 497 - db.FilterEq("domain", domain), 497 + orm.FilterEq("did", user.Did), 498 + orm.FilterEq("domain", domain), 498 499 ) 499 500 if err != nil { 500 501 l.Error("failed to get registration", "err", err) ··· 529 530 530 531 registrations, err := db.GetRegistrations( 531 532 k.Db, 532 - db.FilterEq("did", user.Did), 533 - db.FilterEq("domain", domain), 534 - db.FilterIsNot("registered", "null"), 533 + orm.FilterEq("did", user.Did), 534 + orm.FilterEq("domain", domain), 535 + orm.FilterIsNot("registered", "null"), 535 536 ) 536 537 if err != nil { 537 538 l.Error("failed to get registration", "err", err) ··· 637 638 638 639 registrations, err := db.GetRegistrations( 639 640 k.Db, 640 - db.FilterEq("did", user.Did), 641 - db.FilterEq("domain", domain), 642 - db.FilterIsNot("registered", "null"), 641 + orm.FilterEq("did", user.Did), 642 + orm.FilterEq("domain", domain), 643 + orm.FilterIsNot("registered", "null"), 643 644 ) 644 645 if err != nil { 645 646 l.Error("failed to get registration", "err", err)
+5 -4
appview/labels/labels.go
··· 16 16 "tangled.org/core/appview/oauth" 17 17 "tangled.org/core/appview/pages" 18 18 "tangled.org/core/appview/validator" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/rbac" 20 21 "tangled.org/core/tid" 21 22 ··· 88 89 repoAt := r.Form.Get("repo") 89 90 subjectUri := r.Form.Get("subject") 90 91 91 - repo, err := db.GetRepo(l.db, db.FilterEq("at_uri", repoAt)) 92 + repo, err := db.GetRepo(l.db, orm.FilterEq("at_uri", repoAt)) 92 93 if err != nil { 93 94 fail("Failed to get repository.", err) 94 95 return 95 96 } 96 97 97 98 // find all the labels that this repo subscribes to 98 - repoLabels, err := db.GetRepoLabels(l.db, db.FilterEq("repo_at", repoAt)) 99 + repoLabels, err := db.GetRepoLabels(l.db, orm.FilterEq("repo_at", repoAt)) 99 100 if err != nil { 100 101 fail("Failed to get labels for this repository.", err) 101 102 return ··· 106 107 labelAts = append(labelAts, rl.LabelAt.String()) 107 108 } 108 109 109 - actx, err := db.NewLabelApplicationCtx(l.db, db.FilterIn("at_uri", labelAts)) 110 + actx, err := db.NewLabelApplicationCtx(l.db, orm.FilterIn("at_uri", labelAts)) 110 111 if err != nil { 111 112 fail("Invalid form data.", err) 112 113 return 113 114 } 114 115 115 116 // calculate the start state by applying already known labels 116 - existingOps, err := db.GetLabelOps(l.db, db.FilterEq("subject", subjectUri)) 117 + existingOps, err := db.GetLabelOps(l.db, orm.FilterEq("subject", subjectUri)) 117 118 if err != nil { 118 119 fail("Invalid form data.", err) 119 120 return
+67
appview/mentions/resolver.go
··· 1 + package mentions 2 + 3 + import ( 4 + "context" 5 + "log/slog" 6 + 7 + "github.com/bluesky-social/indigo/atproto/syntax" 8 + "tangled.org/core/appview/config" 9 + "tangled.org/core/appview/db" 10 + "tangled.org/core/appview/models" 11 + "tangled.org/core/appview/pages/markup" 12 + "tangled.org/core/idresolver" 13 + ) 14 + 15 + type Resolver struct { 16 + config *config.Config 17 + idResolver *idresolver.Resolver 18 + execer db.Execer 19 + logger *slog.Logger 20 + } 21 + 22 + func New( 23 + config *config.Config, 24 + idResolver *idresolver.Resolver, 25 + execer db.Execer, 26 + logger *slog.Logger, 27 + ) *Resolver { 28 + return &Resolver{ 29 + config, 30 + idResolver, 31 + execer, 32 + logger, 33 + } 34 + } 35 + 36 + func (r *Resolver) Resolve(ctx context.Context, source string) ([]syntax.DID, []syntax.ATURI) { 37 + l := r.logger.With("method", "Resolve") 38 + 39 + rawMentions, rawRefs := markup.FindReferences(r.config.Core.AppviewHost, source) 40 + l.Debug("found possible references", "mentions", rawMentions, "refs", rawRefs) 41 + 42 + idents := r.idResolver.ResolveIdents(ctx, rawMentions) 43 + var mentions []syntax.DID 44 + for _, ident := range idents { 45 + if ident != nil && !ident.Handle.IsInvalidHandle() { 46 + mentions = append(mentions, ident.DID) 47 + } 48 + } 49 + l.Debug("found mentions", "mentions", mentions) 50 + 51 + var resolvedRefs []models.ReferenceLink 52 + for _, rawRef := range rawRefs { 53 + ident, err := r.idResolver.ResolveIdent(ctx, rawRef.Handle) 54 + if err != nil || ident == nil || ident.Handle.IsInvalidHandle() { 55 + continue 56 + } 57 + rawRef.Handle = string(ident.DID) 58 + resolvedRefs = append(resolvedRefs, rawRef) 59 + } 60 + aturiRefs, err := db.ValidateReferenceLinks(r.execer, resolvedRefs) 61 + if err != nil { 62 + l.Error("failed running query", "err", err) 63 + } 64 + l.Debug("found references", "refs", aturiRefs) 65 + 66 + return mentions, aturiRefs 67 + }
+3 -2
appview/middleware/middleware.go
··· 18 18 "tangled.org/core/appview/pagination" 19 19 "tangled.org/core/appview/reporesolver" 20 20 "tangled.org/core/idresolver" 21 + "tangled.org/core/orm" 21 22 "tangled.org/core/rbac" 22 23 ) 23 24 ··· 217 218 218 219 repo, err := db.GetRepo( 219 220 mw.db, 220 - db.FilterEq("did", id.DID.String()), 221 - db.FilterEq("name", repoName), 221 + orm.FilterEq("did", id.DID.String()), 222 + orm.FilterEq("name", repoName), 222 223 ) 223 224 if err != nil { 224 225 log.Println("failed to resolve repo", "err", err)
+1 -1
appview/models/pull.go
··· 83 83 Repo *Repo 84 84 } 85 85 86 + // NOTE: This method does not include patch blob in returned atproto record 86 87 func (p Pull) AsRecord() tangled.RepoPull { 87 88 var source *tangled.RepoPull_Source 88 89 if p.PullSource != nil { ··· 113 114 Repo: p.RepoAt.String(), 114 115 Branch: p.TargetBranch, 115 116 }, 116 - Patch: p.LatestPatch(), 117 117 Source: source, 118 118 } 119 119 return record
+5 -4
appview/notifications/notifications.go
··· 11 11 "tangled.org/core/appview/oauth" 12 12 "tangled.org/core/appview/pages" 13 13 "tangled.org/core/appview/pagination" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 type Notifications struct { ··· 53 54 54 55 total, err := db.CountNotifications( 55 56 n.db, 56 - db.FilterEq("recipient_did", user.Did), 57 + orm.FilterEq("recipient_did", user.Did), 57 58 ) 58 59 if err != nil { 59 60 l.Error("failed to get total notifications", "err", err) ··· 64 65 notifications, err := db.GetNotificationsWithEntities( 65 66 n.db, 66 67 page, 67 - db.FilterEq("recipient_did", user.Did), 68 + orm.FilterEq("recipient_did", user.Did), 68 69 ) 69 70 if err != nil { 70 71 l.Error("failed to get notifications", "err", err) ··· 96 97 97 98 count, err := db.CountNotifications( 98 99 n.db, 99 - db.FilterEq("recipient_did", user.Did), 100 - db.FilterEq("read", 0), 100 + orm.FilterEq("recipient_did", user.Did), 101 + orm.FilterEq("read", 0), 101 102 ) 102 103 if err != nil { 103 104 http.Error(w, "Failed to get unread count", http.StatusInternalServerError)
+77 -66
appview/notify/db/db.go
··· 3 3 import ( 4 4 "context" 5 5 "log" 6 - "maps" 7 6 "slices" 8 7 9 8 "github.com/bluesky-social/indigo/atproto/syntax" ··· 12 11 "tangled.org/core/appview/models" 13 12 "tangled.org/core/appview/notify" 14 13 "tangled.org/core/idresolver" 14 + "tangled.org/core/orm" 15 + "tangled.org/core/sets" 15 16 ) 16 17 17 18 const ( 18 - maxMentions = 5 19 + maxMentions = 8 19 20 ) 20 21 21 22 type databaseNotifier struct { ··· 42 43 return 43 44 } 44 45 var err error 45 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(star.RepoAt))) 46 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(star.RepoAt))) 46 47 if err != nil { 47 48 log.Printf("NewStar: failed to get repos: %v", err) 48 49 return 49 50 } 50 51 51 52 actorDid := syntax.DID(star.Did) 52 - recipients := []syntax.DID{syntax.DID(repo.Did)} 53 + recipients := sets.Singleton(syntax.DID(repo.Did)) 53 54 eventType := models.NotificationTypeRepoStarred 54 55 entityType := "repo" 55 56 entityId := star.RepoAt.String() ··· 74 75 } 75 76 76 77 func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 77 - 78 - // build the recipients list 79 - // - owner of the repo 80 - // - collaborators in the repo 81 - var recipients []syntax.DID 82 - recipients = append(recipients, syntax.DID(issue.Repo.Did)) 83 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt())) 78 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt())) 84 79 if err != nil { 85 80 log.Printf("failed to fetch collaborators: %v", err) 86 81 return 87 82 } 83 + 84 + // build the recipients list 85 + // - owner of the repo 86 + // - collaborators in the repo 87 + // - remove users already mentioned 88 + recipients := sets.Singleton(syntax.DID(issue.Repo.Did)) 88 89 for _, c := range collaborators { 89 - recipients = append(recipients, c.SubjectDid) 90 + recipients.Insert(c.SubjectDid) 91 + } 92 + for _, m := range mentions { 93 + recipients.Remove(m) 90 94 } 91 95 92 96 actorDid := syntax.DID(issue.Did) ··· 108 112 ) 109 113 n.notifyEvent( 110 114 actorDid, 111 - mentions, 115 + sets.Collect(slices.Values(mentions)), 112 116 models.NotificationTypeUserMentioned, 113 117 entityType, 114 118 entityId, ··· 119 123 } 120 124 121 125 func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 122 - issues, err := db.GetIssues(n.db, db.FilterEq("at_uri", comment.IssueAt)) 126 + issues, err := db.GetIssues(n.db, orm.FilterEq("at_uri", comment.IssueAt)) 123 127 if err != nil { 124 128 log.Printf("NewIssueComment: failed to get issues: %v", err) 125 129 return ··· 130 134 } 131 135 issue := issues[0] 132 136 133 - var recipients []syntax.DID 134 - recipients = append(recipients, syntax.DID(issue.Repo.Did)) 137 + // built the recipients list: 138 + // - the owner of the repo 139 + // - | if the comment is a reply -> everybody on that thread 140 + // | if the comment is a top level -> just the issue owner 141 + // - remove mentioned users from the recipients list 142 + recipients := sets.Singleton(syntax.DID(issue.Repo.Did)) 135 143 136 144 if comment.IsReply() { 137 145 // if this comment is a reply, then notify everybody in that thread 138 146 parentAtUri := *comment.ReplyTo 139 - allThreads := issue.CommentList() 140 147 141 148 // find the parent thread, and add all DIDs from here to the recipient list 142 - for _, t := range allThreads { 149 + for _, t := range issue.CommentList() { 143 150 if t.Self.AtUri().String() == parentAtUri { 144 - recipients = append(recipients, t.Participants()...) 151 + for _, p := range t.Participants() { 152 + recipients.Insert(p) 153 + } 145 154 } 146 155 } 147 156 } else { 148 157 // not a reply, notify just the issue author 149 - recipients = append(recipients, syntax.DID(issue.Did)) 158 + recipients.Insert(syntax.DID(issue.Did)) 159 + } 160 + 161 + for _, m := range mentions { 162 + recipients.Remove(m) 150 163 } 151 164 152 165 actorDid := syntax.DID(comment.Did) ··· 168 181 ) 169 182 n.notifyEvent( 170 183 actorDid, 171 - mentions, 184 + sets.Collect(slices.Values(mentions)), 172 185 models.NotificationTypeUserMentioned, 173 186 entityType, 174 187 entityId, ··· 184 197 185 198 func (n *databaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) { 186 199 actorDid := syntax.DID(follow.UserDid) 187 - recipients := []syntax.DID{syntax.DID(follow.SubjectDid)} 200 + recipients := sets.Singleton(syntax.DID(follow.SubjectDid)) 188 201 eventType := models.NotificationTypeFollowed 189 202 entityType := "follow" 190 203 entityId := follow.UserDid ··· 207 220 } 208 221 209 222 func (n *databaseNotifier) NewPull(ctx context.Context, pull *models.Pull) { 210 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt))) 223 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt))) 211 224 if err != nil { 212 225 log.Printf("NewPull: failed to get repos: %v", err) 213 226 return 214 227 } 215 - 216 - // build the recipients list 217 - // - owner of the repo 218 - // - collaborators in the repo 219 - var recipients []syntax.DID 220 - recipients = append(recipients, syntax.DID(repo.Did)) 221 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt())) 228 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt())) 222 229 if err != nil { 223 230 log.Printf("failed to fetch collaborators: %v", err) 224 231 return 225 232 } 233 + 234 + // build the recipients list 235 + // - owner of the repo 236 + // - collaborators in the repo 237 + recipients := sets.Singleton(syntax.DID(repo.Did)) 226 238 for _, c := range collaborators { 227 - recipients = append(recipients, c.SubjectDid) 239 + recipients.Insert(c.SubjectDid) 228 240 } 229 241 230 242 actorDid := syntax.DID(pull.OwnerDid) ··· 258 270 return 259 271 } 260 272 261 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", comment.RepoAt)) 273 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", comment.RepoAt)) 262 274 if err != nil { 263 275 log.Printf("NewPullComment: failed to get repos: %v", err) 264 276 return ··· 267 279 // build up the recipients list: 268 280 // - repo owner 269 281 // - all pull participants 270 - var recipients []syntax.DID 271 - recipients = append(recipients, syntax.DID(repo.Did)) 282 + // - remove those already mentioned 283 + recipients := sets.Singleton(syntax.DID(repo.Did)) 272 284 for _, p := range pull.Participants() { 273 - recipients = append(recipients, syntax.DID(p)) 285 + recipients.Insert(syntax.DID(p)) 286 + } 287 + for _, m := range mentions { 288 + recipients.Remove(m) 274 289 } 275 290 276 291 actorDid := syntax.DID(comment.OwnerDid) ··· 294 309 ) 295 310 n.notifyEvent( 296 311 actorDid, 297 - mentions, 312 + sets.Collect(slices.Values(mentions)), 298 313 models.NotificationTypeUserMentioned, 299 314 entityType, 300 315 entityId, ··· 321 336 } 322 337 323 338 func (n *databaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) { 324 - // build up the recipients list: 325 - // - repo owner 326 - // - repo collaborators 327 - // - all issue participants 328 - var recipients []syntax.DID 329 - recipients = append(recipients, syntax.DID(issue.Repo.Did)) 330 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt())) 339 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt())) 331 340 if err != nil { 332 341 log.Printf("failed to fetch collaborators: %v", err) 333 342 return 334 343 } 344 + 345 + // build up the recipients list: 346 + // - repo owner 347 + // - repo collaborators 348 + // - all issue participants 349 + recipients := sets.Singleton(syntax.DID(issue.Repo.Did)) 335 350 for _, c := range collaborators { 336 - recipients = append(recipients, c.SubjectDid) 351 + recipients.Insert(c.SubjectDid) 337 352 } 338 353 for _, p := range issue.Participants() { 339 - recipients = append(recipients, syntax.DID(p)) 354 + recipients.Insert(syntax.DID(p)) 340 355 } 341 356 342 357 entityType := "pull" ··· 366 381 367 382 func (n *databaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) { 368 383 // Get repo details 369 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt))) 384 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt))) 370 385 if err != nil { 371 386 log.Printf("NewPullState: failed to get repos: %v", err) 372 387 return 373 388 } 374 389 375 - // build up the recipients list: 376 - // - repo owner 377 - // - all pull participants 378 - var recipients []syntax.DID 379 - recipients = append(recipients, syntax.DID(repo.Did)) 380 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt())) 390 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt())) 381 391 if err != nil { 382 392 log.Printf("failed to fetch collaborators: %v", err) 383 393 return 384 394 } 395 + 396 + // build up the recipients list: 397 + // - repo owner 398 + // - all pull participants 399 + recipients := sets.Singleton(syntax.DID(repo.Did)) 385 400 for _, c := range collaborators { 386 - recipients = append(recipients, c.SubjectDid) 401 + recipients.Insert(c.SubjectDid) 387 402 } 388 403 for _, p := range pull.Participants() { 389 - recipients = append(recipients, syntax.DID(p)) 404 + recipients.Insert(syntax.DID(p)) 390 405 } 391 406 392 407 entityType := "pull" ··· 422 437 423 438 func (n *databaseNotifier) notifyEvent( 424 439 actorDid syntax.DID, 425 - recipients []syntax.DID, 440 + recipients sets.Set[syntax.DID], 426 441 eventType models.NotificationType, 427 442 entityType string, 428 443 entityId string, ··· 430 445 issueId *int64, 431 446 pullId *int64, 432 447 ) { 433 - if eventType == models.NotificationTypeUserMentioned && len(recipients) > maxMentions { 434 - recipients = recipients[:maxMentions] 448 + // if the user is attempting to mention >maxMentions users, this is probably spam, do not mention anybody 449 + if eventType == models.NotificationTypeUserMentioned && recipients.Len() > maxMentions { 450 + return 435 451 } 436 - recipientSet := make(map[syntax.DID]struct{}) 437 - for _, did := range recipients { 438 - // everybody except actor themselves 439 - if did != actorDid { 440 - recipientSet[did] = struct{}{} 441 - } 442 - } 452 + 453 + recipients.Remove(actorDid) 443 454 444 455 prefMap, err := db.GetNotificationPreferences( 445 456 n.db, 446 - db.FilterIn("user_did", slices.Collect(maps.Keys(recipientSet))), 457 + orm.FilterIn("user_did", slices.Collect(recipients.All())), 447 458 ) 448 459 if err != nil { 449 460 // failed to get prefs for users ··· 459 470 defer tx.Rollback() 460 471 461 472 // filter based on preferences 462 - for recipientDid := range recipientSet { 473 + for recipientDid := range recipients.All() { 463 474 prefs, ok := prefMap[recipientDid] 464 475 if !ok { 465 476 prefs = models.DefaultNotificationPreferences(recipientDid)
-1
appview/notify/merged_notifier.go
··· 39 39 v.Call(in) 40 40 }(n) 41 41 } 42 - wg.Wait() 43 42 } 44 43 45 44 func (m *mergedNotifier) NewRepo(ctx context.Context, repo *models.Repo) {
+3 -2
appview/oauth/handler.go
··· 16 16 "tangled.org/core/api/tangled" 17 17 "tangled.org/core/appview/db" 18 18 "tangled.org/core/consts" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/tid" 20 21 ) 21 22 ··· 97 98 // and create an sh.tangled.spindle.member record with that 98 99 spindleMembers, err := db.GetSpindleMembers( 99 100 o.Db, 100 - db.FilterEq("instance", "spindle.tangled.sh"), 101 - db.FilterEq("subject", did), 101 + orm.FilterEq("instance", "spindle.tangled.sh"), 102 + orm.FilterEq("subject", did), 102 103 ) 103 104 if err != nil { 104 105 l.Error("failed to get spindle members", "err", err)
+7 -2
appview/pages/funcmap.go
··· 25 25 "github.com/dustin/go-humanize" 26 26 "github.com/go-enry/go-enry/v2" 27 27 "github.com/yuin/goldmark" 28 + emoji "github.com/yuin/goldmark-emoji" 28 29 "tangled.org/core/appview/filetree" 29 30 "tangled.org/core/appview/models" 30 31 "tangled.org/core/appview/pages/markup" ··· 162 163 } 163 164 return pairs, nil 164 165 }, 165 - "append": func(s []string, values ...string) []string { 166 + "append": func(s []any, values ...any) []any { 166 167 s = append(s, values...) 167 168 return s 168 169 }, ··· 261 262 }, 262 263 "description": func(text string) template.HTML { 263 264 p.rctx.RendererType = markup.RendererTypeDefault 264 - htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New()) 265 + htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New( 266 + goldmark.WithExtensions( 267 + emoji.Emoji, 268 + ), 269 + )) 265 270 sanitized := p.rctx.SanitizeDescription(htmlString) 266 271 return template.HTML(sanitized) 267 272 },
+13 -3
appview/pages/markup/extension/atlink.go
··· 35 35 return KindAt 36 36 } 37 37 38 - var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`) 38 + var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`) 39 + var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`) 39 40 40 41 type atParser struct{} 41 42 ··· 55 56 if m == nil { 56 57 return nil 57 58 } 59 + 60 + // Check for all links in the markdown to see if the handle found is inside one 61 + linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1) 62 + for _, linkMatch := range linksIndexes { 63 + if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] { 64 + return nil 65 + } 66 + } 67 + 58 68 atSegment := text.NewSegment(segment.Start, segment.Start+m[1]) 59 69 block.Advance(m[1]) 60 70 node := &AtNode{} ··· 87 97 88 98 func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) { 89 99 if entering { 90 - w.WriteString(`<a href="/@`) 100 + w.WriteString(`<a href="/`) 91 101 w.WriteString(n.(*AtNode).Handle) 92 - w.WriteString(`" class="mention font-bold">`) 102 + w.WriteString(`" class="mention">`) 93 103 } else { 94 104 w.WriteString("</a>") 95 105 }
+2 -2
appview/pages/markup/markdown.go
··· 12 12 13 13 chromahtml "github.com/alecthomas/chroma/v2/formatters/html" 14 14 "github.com/alecthomas/chroma/v2/styles" 15 - treeblood "github.com/wyatt915/goldmark-treeblood" 16 15 "github.com/yuin/goldmark" 16 + "github.com/yuin/goldmark-emoji" 17 17 highlighting "github.com/yuin/goldmark-highlighting/v2" 18 18 "github.com/yuin/goldmark/ast" 19 19 "github.com/yuin/goldmark/extension" ··· 65 65 extension.NewFootnote( 66 66 extension.WithFootnoteIDPrefix([]byte("footnote")), 67 67 ), 68 - treeblood.MathML(), 69 68 callout.CalloutExtention, 70 69 textension.AtExt, 70 + emoji.Emoji, 71 71 ), 72 72 goldmark.WithParserOptions( 73 73 parser.WithAutoHeadingID(),
+121
appview/pages/markup/markdown_test.go
··· 1 + package markup 2 + 3 + import ( 4 + "bytes" 5 + "testing" 6 + ) 7 + 8 + func TestAtExtension_Rendering(t *testing.T) { 9 + tests := []struct { 10 + name string 11 + markdown string 12 + expected string 13 + }{ 14 + { 15 + name: "renders simple at mention", 16 + markdown: "Hello @user.tngl.sh!", 17 + expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`, 18 + }, 19 + { 20 + name: "renders multiple at mentions", 21 + markdown: "Hi @alice.tngl.sh and @bob.example.com", 22 + expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`, 23 + }, 24 + { 25 + name: "renders at mention in parentheses", 26 + markdown: "Check this out (@user.tngl.sh)", 27 + expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`, 28 + }, 29 + { 30 + name: "does not render email", 31 + markdown: "Contact me at test@example.com", 32 + expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`, 33 + }, 34 + { 35 + name: "renders at mention with hyphen", 36 + markdown: "Follow @user-name.tngl.sh", 37 + expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`, 38 + }, 39 + { 40 + name: "renders at mention with numbers", 41 + markdown: "@user123.test456.social", 42 + expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`, 43 + }, 44 + { 45 + name: "at mention at start of line", 46 + markdown: "@user.tngl.sh is cool", 47 + expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`, 48 + }, 49 + } 50 + 51 + for _, tt := range tests { 52 + t.Run(tt.name, func(t *testing.T) { 53 + md := NewMarkdown() 54 + 55 + var buf bytes.Buffer 56 + if err := md.Convert([]byte(tt.markdown), &buf); err != nil { 57 + t.Fatalf("failed to convert markdown: %v", err) 58 + } 59 + 60 + result := buf.String() 61 + if result != tt.expected+"\n" { 62 + t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result) 63 + } 64 + }) 65 + } 66 + } 67 + 68 + func TestAtExtension_WithOtherMarkdown(t *testing.T) { 69 + tests := []struct { 70 + name string 71 + markdown string 72 + contains string 73 + }{ 74 + { 75 + name: "at mention with bold", 76 + markdown: "**Hello @user.tngl.sh**", 77 + contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`, 78 + }, 79 + { 80 + name: "at mention with italic", 81 + markdown: "*Check @user.tngl.sh*", 82 + contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`, 83 + }, 84 + { 85 + name: "at mention in list", 86 + markdown: "- Item 1\n- @user.tngl.sh\n- Item 3", 87 + contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`, 88 + }, 89 + { 90 + name: "at mention in link", 91 + markdown: "[@regnault.dev](https://regnault.dev)", 92 + contains: `<a href="https://regnault.dev">@regnault.dev</a>`, 93 + }, 94 + { 95 + name: "at mention in link again", 96 + markdown: "[check out @regnault.dev](https://regnault.dev)", 97 + contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`, 98 + }, 99 + { 100 + name: "at mention in link again, multiline", 101 + markdown: "[\ncheck out @regnault.dev](https://regnault.dev)", 102 + contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>", 103 + }, 104 + } 105 + 106 + for _, tt := range tests { 107 + t.Run(tt.name, func(t *testing.T) { 108 + md := NewMarkdown() 109 + 110 + var buf bytes.Buffer 111 + if err := md.Convert([]byte(tt.markdown), &buf); err != nil { 112 + t.Fatalf("failed to convert markdown: %v", err) 113 + } 114 + 115 + result := buf.String() 116 + if !bytes.Contains([]byte(result), []byte(tt.contains)) { 117 + t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result) 118 + } 119 + }) 120 + } 121 + }
+2 -3
appview/pages/pages.go
··· 31 31 "github.com/bluesky-social/indigo/atproto/identity" 32 32 "github.com/bluesky-social/indigo/atproto/syntax" 33 33 "github.com/go-git/go-git/v5/plumbing" 34 - "github.com/go-git/go-git/v5/plumbing/object" 35 34 ) 36 35 37 36 //go:embed templates/* static legal ··· 641 640 } 642 641 643 642 func (p *Pages) StarBtnFragment(w io.Writer, params StarBtnFragmentParams) error { 644 - return p.executePlain("fragments/starBtn", w, params) 643 + return p.executePlain("fragments/starBtn-oob", w, params) 645 644 } 646 645 647 646 type RepoIndexParams struct { ··· 649 648 RepoInfo repoinfo.RepoInfo 650 649 Active string 651 650 TagMap map[string][]string 652 - CommitsTrunc []*object.Commit 651 + CommitsTrunc []types.Commit 653 652 TagsTrunc []*types.TagReference 654 653 BranchesTrunc []types.Branch 655 654 // ForkInfo *types.ForkInfo
-46
appview/pages/repoinfo/repoinfo.go
··· 1 1 package repoinfo 2 2 3 3 import ( 4 - "encoding/json" 5 4 "fmt" 6 5 "path" 7 6 "slices" ··· 118 117 func (r RolesInRepo) IsPushAllowed() bool { 119 118 return slices.Contains(r.Roles, "repo:push") 120 119 } 121 - 122 - // PrimaryLanguage returns the first (most used) language from a list, or empty string if none 123 - func PrimaryLanguage(languages []interface{}) string { 124 - if len(languages) == 0 { 125 - return "" 126 - } 127 - 128 - // Languages are already sorted by percentage in descending order 129 - // Just get the first one 130 - if firstLang, ok := languages[0].(map[string]interface{}); ok { 131 - if name, ok := firstLang["Name"].(string); ok { 132 - return name 133 - } 134 - } 135 - 136 - return "" 137 - } 138 - 139 - // StructuredData generates Schema.org JSON-LD structured data for the repository 140 - func (r RepoInfo) StructuredData(primaryLanguage string) string { 141 - data := map[string]interface{}{ 142 - "@context": "https://schema.org", 143 - "@type": "SoftwareSourceCode", 144 - "name": r.Name, 145 - "description": r.Description, 146 - "codeRepository": "https://tangled.org/" + r.FullName(), 147 - "url": "https://tangled.org/" + r.FullName(), 148 - "author": map[string]interface{}{ 149 - "@type": "Person", 150 - "name": r.owner(), 151 - "url": "https://tangled.org/" + r.owner(), 152 - }, 153 - } 154 - 155 - // Add programming language if available 156 - if primaryLanguage != "" { 157 - data["programmingLanguage"] = primaryLanguage 158 - } 159 - 160 - jsonBytes, err := json.Marshal(data) 161 - if err != nil { 162 - return "{}" 163 - } 164 - return string(jsonBytes) 165 - }
+1 -1
appview/pages/templates/banner.html
··· 30 30 <div class="mx-6"> 31 31 These services may not be fully accessible until upgraded. 32 32 <a class="underline text-red-800 dark:text-red-200" 33 - href="https://tangled.org/@tangled.org/core/tree/master/docs/migrations.md"> 33 + href="https://docs.tangled.org/migrating-knots-spindles.html#migrating-knots-spindles"> 34 34 Click to read the upgrade guide</a>. 35 35 </div> 36 36 </details>
-22
appview/pages/templates/fragments/breadcrumb.html
··· 1 - {{ define "fragments/breadcrumb" }} 2 - {{ $items := . }} 3 - {{ if gt (len $items) 0 }} 4 - <script type="application/ld+json"> 5 - { 6 - "@context": "https://schema.org", 7 - "@type": "BreadcrumbList", 8 - "itemListElement": [ 9 - {{ range $idx, $item := $items }} 10 - {{ if gt $idx 0 }},{{ end }} 11 - { 12 - "@type": "ListItem", 13 - "position": {{ add $idx 1 }}, 14 - "name": "{{ index $item 0 }}", 15 - "item": "{{ index $item 1 }}" 16 - } 17 - {{ end }} 18 - ] 19 - } 20 - </script> 21 - {{ end }} 22 - {{ end }}
-44
appview/pages/templates/fragments/dolly/logo.svg
··· 1 - <svg 2 - version="1.1" 3 - id="svg1" 4 - width="25" 5 - height="25" 6 - viewBox="0 0 25 25" 7 - sodipodi:docname="tangled_dolly_face_only.png" 8 - xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" 9 - xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" 10 - xmlns:xlink="http://www.w3.org/1999/xlink" 11 - xmlns="http://www.w3.org/2000/svg" 12 - xmlns:svg="http://www.w3.org/2000/svg"> 13 - <title>Dolly</title> 14 - <defs 15 - id="defs1" /> 16 - <sodipodi:namedview 17 - id="namedview1" 18 - pagecolor="#ffffff" 19 - bordercolor="#000000" 20 - borderopacity="0.25" 21 - inkscape:showpageshadow="2" 22 - inkscape:pageopacity="0.0" 23 - inkscape:pagecheckerboard="true" 24 - inkscape:deskcolor="#d5d5d5"> 25 - <inkscape:page 26 - x="0" 27 - y="0" 28 - width="25" 29 - height="25" 30 - id="page2" 31 - margin="0" 32 - bleed="0" /> 33 - </sodipodi:namedview> 34 - <g 35 - inkscape:groupmode="layer" 36 - inkscape:label="Image" 37 - id="g1"> 38 - <path 39 - fill="currentColor" 40 - style="stroke-width:0.111183" 41 - d="m 16.348974,24.09935 -0.06485,-0.03766 -0.202005,-0.0106 -0.202008,-0.01048 -0.275736,-0.02601 -0.275734,-0.02602 v -0.02649 -0.02648 l -0.204577,-0.04019 -0.204578,-0.04019 -0.167616,-0.08035 -0.167617,-0.08035 -0.0014,-0.04137 -0.0014,-0.04137 -0.266473,-0.143735 -0.266475,-0.143735 -0.276098,-0.20335 -0.2761,-0.203347 -0.262064,-0.251949 -0.262064,-0.25195 -0.22095,-0.284628 -0.220948,-0.284629 -0.170253,-0.284631 -0.170252,-0.284628 -0.01341,-0.0144 -0.0134,-0.0144 -0.141982,0.161297 -0.14198,0.1613 -0.22313,0.21426 -0.223132,0.214264 -0.186025,0.146053 -0.186023,0.14605 -0.252501,0.163342 -0.252502,0.163342 -0.249014,0.115348 -0.249013,0.115336 0.0053,0.03241 0.0053,0.03241 -0.1716725,0.04599 -0.171669,0.046 -0.3379966,0.101058 -0.3379972,0.101058 -0.1778925,0.04506 -0.1778935,0.04508 -0.3913655,0.02601 -0.3913643,0.02603 -0.3557868,-0.03514 -0.3557863,-0.03514 -0.037426,-0.03029 -0.037427,-0.03029 -0.076924,0.02011 -0.076924,0.02011 -0.050508,-0.05051 -0.050405,-0.05056 L 6.6604532,23.110188 6.451745,23.063961 6.1546135,22.960559 5.8574835,22.857156 5.5319879,22.694039 5.2064938,22.530922 4.8793922,22.302961 4.5522905,22.075005 4.247598,21.786585 3.9429055,21.49817 3.7185335,21.208777 3.4941628,20.919385 3.3669822,20.705914 3.239803,20.492443 3.1335213,20.278969 3.0272397,20.065499 2.9015252,19.7275 2.7758105,19.389504 2.6925225,18.998139 2.6092345,18.606774 2.6096814,17.91299 2.6101284,17.219208 2.6744634,16.90029 2.7387984,16.581374 2.8474286,16.242088 2.9560588,15.9028 3.1137374,15.583492 3.2714148,15.264182 3.3415068,15.150766 3.4115988,15.03735 3.3127798,14.96945 3.2139618,14.90157 3.0360685,14.800239 2.8581753,14.698908 2.5913347,14.503228 2.3244955,14.307547 2.0621238,14.055599 1.7997507,13.803651 1.6111953,13.56878 1.4226411,13.333906 1.2632237,13.087474 1.1038089,12.841042 0.97442,12.575195 0.8450307,12.30935 0.724603,11.971351 0.6041766,11.633356 0.52150365,11.241991 0.4388285,10.850626 0.44091592,10.156842 0.44300333,9.4630594 0.54235911,9.0369608 0.6417149,8.6108622 0.7741173,8.2694368 0.9065196,7.9280115 1.0736303,7.6214262 1.2407515,7.3148397 1.45931,7.0191718 1.6778685,6.7235039 1.9300326,6.4611321 2.1821966,6.1987592 2.4134579,6.0137228 2.6447193,5.8286865 2.8759792,5.6776409 3.1072406,5.526594 3.4282004,5.3713977 3.7491603,5.2162016 3.9263009,5.1508695 4.1034416,5.0855373 4.2813348,4.7481598 4.4592292,4.4107823 4.6718,4.108422 4.8843733,3.8060618 5.198353,3.4805372 5.5123313,3.155014 5.7685095,2.9596425 6.0246877,2.7642722 6.329187,2.5851365 6.6336863,2.406002 6.9497657,2.2751596 7.2658453,2.1443184 7.4756394,2.0772947 7.6854348,2.01027 8.0825241,1.931086 8.4796139,1.851902 l 0.5870477,0.00291 0.5870469,0.00291 0.4447315,0.092455 0.444734,0.092455 0.302419,0.1105495 0.302417,0.1105495 0.329929,0.1646046 0.32993,0.1646033 0.239329,-0.2316919 0.239329,-0.2316919 0.160103,-0.1256767 0.160105,-0.1256767 0.160102,-0.1021909 0.160105,-0.1021899 0.142315,-0.082328 0.142314,-0.082328 0.231262,-0.1090091 0.231259,-0.1090091 0.26684,-0.098743 0.266839,-0.098743 0.320208,-0.073514 0.320209,-0.073527 0.355787,-0.041833 0.355785,-0.041834 0.426942,0.023827 0.426945,0.023828 0.355785,0.071179 0.355788,0.0711791 0.284627,0.09267 0.284629,0.09267 0.28514,0.1310267 0.28514,0.1310255 0.238179,0.1446969 0.238174,0.1446979 0.259413,0.1955332 0.259413,0.1955319 0.290757,0.296774 0.290758,0.2967753 0.151736,0.1941581 0.151734,0.1941594 0.135326,0.2149951 0.135327,0.2149952 0.154755,0.3202073 0.154758,0.3202085 0.09409,0.2677358 0.09409,0.267737 0.06948,0.3319087 0.06948,0.3319099 0.01111,0.00808 0.01111,0.00808 0.444734,0.2173653 0.444734,0.2173665 0.309499,0.2161102 0.309497,0.2161101 0.309694,0.2930023 0.309694,0.2930037 0.18752,0.2348726 0.187524,0.2348727 0.166516,0.2574092 0.166519,0.2574108 0.15273,0.3260252 0.152734,0.3260262 0.08972,0.2668403 0.08971,0.2668391 0.08295,0.3913655 0.08295,0.3913652 -6.21e-4,0.6582049 -6.21e-4,0.658204 -0.06362,0.315725 -0.06362,0.315725 -0.09046,0.289112 -0.09046,0.289112 -0.122759,0.281358 -0.12276,0.281356 -0.146626,0.252323 -0.146629,0.252322 -0.190443,0.258668 -0.190448,0.258671 -0.254911,0.268356 -0.254911,0.268355 -0.286872,0.223127 -0.286874,0.223127 -0.320203,0.187693 -0.320209,0.187693 -0.04347,0.03519 -0.04347,0.03521 0.0564,0.12989 0.0564,0.129892 0.08728,0.213472 0.08728,0.213471 0.189755,0.729363 0.189753,0.729362 0.0652,0.302417 0.0652,0.302419 -0.0018,0.675994 -0.0018,0.675995 -0.0801,0.373573 -0.08009,0.373577 -0.09,0.266839 -0.09,0.26684 -0.190389,0.391364 -0.19039,0.391366 -0.223169,0.320207 -0.223167,0.320209 -0.303585,0.315294 -0.303584,0.315291 -0.284631,0.220665 -0.284629,0.220663 -0.220128,0.132359 -0.220127,0.132358 -0.242395,0.106698 -0.242394,0.106699 -0.08895,0.04734 -0.08895,0.04733 -0.249052,0.07247 -0.24905,0.07247 -0.322042,0.0574 -0.322044,0.0574 -0.282794,-0.003 -0.282795,-0.003 -0.07115,-0.0031 -0.07115,-0.0031 -0.177894,-0.0033 -0.177893,-0.0033 -0.124528,0.02555 -0.124528,0.02555 z m -4.470079,-5.349839 0.214838,-0.01739 0.206601,-0.06782 0.206602,-0.06782 0.244389,-0.117874 0.244393,-0.11786 0.274473,-0.206822 0.27447,-0.20682 0.229308,-0.257201 0.229306,-0.2572 0.219161,-0.28463 0.219159,-0.284629 0.188541,-0.284628 0.188543,-0.28463 0.214594,-0.373574 0.214593,-0.373577 0.133861,-0.312006 0.133865,-0.312007 0.02861,-0.01769 0.02861,-0.01769 0.197275,0.26212 0.197278,0.262119 0.163613,0.150814 0.163614,0.150814 0.201914,0.09276 0.201914,0.09276 0.302417,0.01421 0.302418,0.01421 0.213472,-0.08025 0.213471,-0.08025 0.200606,-0.204641 0.200606,-0.204642 0.09242,-0.278887 0.09241,-0.278888 0.05765,-0.302418 0.05764,-0.302416 L 18.41327,13.768114 18.39502,13.34117 18.31849,12.915185 18.24196,12.4892 18.15595,12.168033 18.06994,11.846867 17.928869,11.444534 17.787801,11.042201 17.621278,10.73296 17.454757,10.423723 17.337388,10.263619 17.220021,10.103516 17.095645,9.9837986 16.971268,9.8640816 16.990048,9.6813736 17.008828,9.4986654 16.947568,9.249616 16.886308,9.0005655 16.752419,8.7159355 16.618521,8.4313217 16.435707,8.2294676 16.252892,8.0276114 16.079629,7.9004245 15.906366,7.773238 l -0.20429,0.1230127 -0.204289,0.1230121 -0.26702,0.059413 -0.267022,0.059413 -0.205761,-0.021508 -0.205766,-0.021508 -0.23495,-0.08844 -0.234953,-0.08844 -0.118429,-0.090334 -0.118428,-0.090333 h -0.03944 -0.03944 L 13.711268,7.8540732 13.655958,7.9706205 13.497227,8.1520709 13.338499,8.3335203 13.168394,8.4419112 12.998289,8.550301 12.777045,8.624223 12.5558,8.698155 H 12.275611 11.995429 L 11.799973,8.6309015 11.604513,8.5636472 11.491311,8.5051061 11.37811,8.446565 11.138172,8.2254579 10.898231,8.0043497 l -0.09565,-0.084618 -0.09565,-0.084613 -0.218822,0.198024 -0.218822,0.1980231 -0.165392,0.078387 -0.1653925,0.078387 -0.177894,0.047948 -0.177892,0.047948 L 9.3635263,8.4842631 9.144328,8.4846889 8.9195029,8.4147138 8.6946778,8.3447386 8.5931214,8.4414036 8.491565,8.5380686 8.3707618,8.7019598 8.2499597,8.8658478 8.0802403,8.9290726 7.9105231,8.9922974 7.7952769,9.0780061 7.6800299,9.1637148 7.5706169,9.2778257 7.4612038,9.3919481 7.1059768,9.9205267 6.7507497,10.449105 l -0.2159851,0.449834 -0.2159839,0.449834 -0.2216572,0.462522 -0.2216559,0.462523 -0.1459343,0.337996 -0.1459342,0.337998 -0.055483,0.220042 -0.055483,0.220041 -0.015885,0.206903 -0.015872,0.206901 0.034307,0.242939 0.034307,0.24294 0.096281,0.196632 0.096281,0.196634 0.143607,0.125222 0.1436071,0.125222 0.1873143,0.08737 0.1873141,0.08737 0.2752084,0.002 0.2752084,0.002 0.2312297,-0.09773 0.231231,-0.09772 0.1067615,-0.07603 0.1067614,-0.07603 0.3679062,-0.29377 0.3679065,-0.293771 0.026804,0.01656 0.026804,0.01656 0.023626,0.466819 0.023626,0.466815 0.088326,0.513195 0.088326,0.513193 0.08897,0.364413 0.08897,0.364411 0.1315362,0.302418 0.1315352,0.302418 0.1051964,0.160105 0.1051954,0.160103 0.1104741,0.11877 0.1104731,0.118769 0.2846284,0.205644 0.2846305,0.205642 0.144448,0.07312 0.144448,0.07312 0.214787,0.05566 0.214787,0.05566 0.245601,0.03075 0.245602,0.03075 0.204577,-0.0125 0.204578,-0.0125 z m 0.686342,-3.497495 -0.11281,-0.06077 -0.106155,-0.134033 -0.106155,-0.134031 -0.04406,-0.18371 -0.04406,-0.183707 0.02417,-0.553937 0.02417,-0.553936 0.03513,-0.426945 0.03513,-0.426942 0.07225,-0.373576 0.07225,-0.373575 0.05417,-0.211338 0.05417,-0.211339 0.0674,-0.132112 0.0674,-0.132112 0.132437,-0.10916 0.132437,-0.109161 0.187436,-0.04195 0.187438,-0.04195 0.170366,0.06469 0.170364,0.06469 0.114312,0.124073 0.114313,0.124086 0.04139,0.18495 0.04139,0.184951 -0.111218,0.459845 -0.111219,0.459844 -0.03383,0.26584 -0.03382,0.265841 -0.03986,0.818307 -0.03986,0.818309 -0.0378,0.15162 -0.03779,0.151621 -0.11089,0.110562 -0.110891,0.110561 -0.114489,0.04913 -0.114489,0.04913 -0.187932,-0.0016 -0.187929,-0.0016 z m -2.8087655,-0.358124 -0.146445,-0.06848 -0.088025,-0.119502 -0.088024,-0.119502 -0.038581,-0.106736 -0.038581,-0.106736 -0.02237,-0.134956 -0.02239,-0.134957 -0.031955,-0.46988 -0.031955,-0.469881 0.036203,-0.444733 0.036203,-0.444731 0.048862,-0.215257 0.048862,-0.215255 0.076082,-0.203349 0.076081,-0.203348 0.0936,-0.111244 0.0936,-0.111245 0.143787,-0.06531 0.1437865,-0.06532 h 0.142315 0.142314 l 0.142314,0.06588 0.142316,0.06588 0.093,0.102325 0.093,0.102325 0.04042,0.120942 0.04042,0.120942 v 0.152479 0.152477 l -0.03347,0.08804 -0.03347,0.08805 -0.05693,0.275653 -0.05693,0.275651 2.11e-4,0.430246 2.12e-4,0.430243 0.04294,0.392646 0.04295,0.392647 -0.09189,0.200702 -0.09189,0.200702 -0.148688,0.0984 -0.148687,0.0984 -0.20136,0.01212 -0.2013595,0.01212 z" 42 - id="path4" /> 43 - </g> 44 - </svg>
+5
appview/pages/templates/fragments/starBtn-oob.html
··· 1 + {{ define "fragments/starBtn-oob" }} 2 + <div hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]'> 3 + {{ template "fragments/starBtn" . }} 4 + </div> 5 + {{ end }}
+1 -3
appview/pages/templates/fragments/starBtn.html
··· 1 1 {{ define "fragments/starBtn" }} 2 + {{/* NOTE: this fragment is always replaced with hx-swap-oob */}} 2 3 <button 3 4 id="starBtn" 4 5 class="btn disabled:opacity-50 disabled:cursor-not-allowed flex gap-2 items-center group" ··· 10 11 {{ end }} 11 12 12 13 hx-trigger="click" 13 - hx-target="this" 14 - hx-swap="outerHTML" 15 - hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]' 16 14 hx-disabled-elt="#starBtn" 17 15 > 18 16 {{ if .IsStarred }}
+22
appview/pages/templates/fragments/tinyAvatarList.html
··· 1 + {{ define "fragments/tinyAvatarList" }} 2 + {{ $all := .all }} 3 + {{ $classes := .classes }} 4 + {{ $ps := take $all 5 }} 5 + <div class="inline-flex items-center -space-x-3"> 6 + {{ $c := "z-50 z-40 z-30 z-20 z-10" }} 7 + {{ range $i, $p := $ps }} 8 + <img 9 + src="{{ tinyAvatar . }}" 10 + alt="" 11 + class="rounded-full size-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0 {{ $classes }}" 12 + /> 13 + {{ end }} 14 + 15 + {{ if gt (len $all) 5 }} 16 + <span class="pl-4 text-gray-500 dark:text-gray-400 text-sm"> 17 + +{{ sub (len $all) 5 }} 18 + </span> 19 + {{ end }} 20 + </div> 21 + {{ end }} 22 +
+2 -27
appview/pages/templates/goodfirstissues/index.html
··· 1 1 {{ define "title" }}good first issues{{ end }} 2 2 3 3 {{ define "extrameta" }} 4 - <meta name="description" content="Discover beginner-friendly good first issues across open source projects on Tangled. Perfect for new contributors looking to get started with open source development." /> 5 - <meta name="keywords" content="good first issues, beginner issues, open source contribution, first time contributor, beginner friendly, open source projects" /> 6 - 7 4 <meta property="og:title" content="good first issues ยท tangled" /> 8 - <meta property="og:type" content="website" /> 5 + <meta property="og:type" content="object" /> 9 6 <meta property="og:url" content="https://tangled.org/goodfirstissues" /> 10 - <meta property="og:description" content="Find beginner-friendly issues across all repositories to get started with open source contributions on Tangled." /> 11 - 12 - <meta name="twitter:card" content="summary" /> 13 - <meta name="twitter:title" content="good first issues ยท tangled" /> 14 - <meta name="twitter:description" content="Find beginner-friendly issues to get started with open source contributions." /> 15 - 16 - <!-- structured data for good first issues page --> 17 - <script type="application/ld+json"> 18 - { 19 - "@context": "https://schema.org", 20 - "@type": "CollectionPage", 21 - "name": "Good First Issues", 22 - "description": "A curated collection of beginner-friendly issues across open source projects", 23 - "url": "https://tangled.org/goodfirstissues", 24 - "isPartOf": { 25 - "@type": "WebSite", 26 - "name": "Tangled", 27 - "url": "https://tangled.org" 28 - } 29 - } 30 - </script> 7 + <meta property="og:description" content="Find good first issues to contribute to open source projects" /> 31 8 {{ end }} 32 - 33 - {{ define "canonical" }}https://tangled.org/goodfirstissues{{ end }} 34 9 35 10 {{ define "content" }} 36 11 <div class="grid grid-cols-10">
+1 -1
appview/pages/templates/knots/index.html
··· 105 105 {{ define "docsButton" }} 106 106 <a 107 107 class="btn flex items-center gap-2" 108 - href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md"> 108 + href="https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide"> 109 109 {{ i "book" "size-4" }} 110 110 docs 111 111 </a>
+2 -26
appview/pages/templates/layouts/base.html
··· 4 4 <head> 5 5 <meta charset="UTF-8" /> 6 6 <meta name="viewport" content="width=device-width, initial-scale=1.0"/> 7 - <meta name="description" content="tightly-knit social coding"/> 8 - <meta name="keywords" content="git hosting, social coding, version control, pull requests, CI/CD, code collaboration, open source, decentralized"/> 7 + <meta name="description" content="Social coding, but for real this time!"/> 9 8 <meta name="htmx-config" content='{"includeIndicatorStyles": false}'> 10 - <meta name="author" content="Tangled"/> 11 - 12 - <!-- Canonical URL --> 13 - <link rel="canonical" href="{{ block "canonical" . }}https://tangled.org{{ .Request.URL.Path }}{{ end }}" /> 14 9 15 10 <script defer src="/static/htmx.min.js"></script> 16 11 <script defer src="/static/htmx-ext-ws.min.js"></script> ··· 20 15 <link rel="preconnect" href="https://avatar.tangled.sh" /> 21 16 <link rel="preconnect" href="https://camo.tangled.sh" /> 22 17 23 - <!-- RSS Feed Discovery --> 24 - {{ block "rss" . }}{{ end }} 25 - 26 18 <!-- pwa manifest --> 27 19 <link rel="manifest" href="/pwa-manifest.json" /> 28 20 ··· 30 22 <link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin /> 31 23 32 24 <link rel="stylesheet" href="/static/tw.css?{{ cssContentHash }}" type="text/css" /> 33 - <title>{{ block "title" . }}{{ end }}</title> 34 - 35 - <!-- Structured Data --> 36 - {{ block "structuredData" . }} 37 - <script type="application/ld+json"> 38 - { 39 - "@context": "https://schema.org", 40 - "@type": "Organization", 41 - "name": "Tangled", 42 - "url": "https://tangled.org", 43 - "logo": "https://tangled.org/favicon.svg", 44 - "description": "tightly-knit social coding", 45 - "sameAs": [] 46 - } 47 - </script> 48 - {{ end }} 49 - 25 + <title>{{ block "title" . }}{{ end }} ยท tangled</title> 50 26 {{ block "extrameta" . }}{{ end }} 51 27 </head> 52 28 <body class="min-h-screen flex flex-col gap-4 bg-slate-100 dark:bg-gray-900 dark:text-white transition-colors duration-200">
+2 -2
appview/pages/templates/layouts/fragments/footer.html
··· 26 26 <div class="flex flex-col gap-1"> 27 27 <div class="{{ $headerStyle }}">resources</div> 28 28 <a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a> 29 - <a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 29 + <a href="https://docs.tangled.org" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 30 30 <a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a> 31 31 <a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a> 32 32 </div> ··· 73 73 <div class="flex flex-col gap-1"> 74 74 <div class="{{ $headerStyle }}">resources</div> 75 75 <a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a> 76 - <a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 76 + <a href="https://docs.tangled.org" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 77 77 <a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a> 78 78 <a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a> 79 79 </div>
+1 -20
appview/pages/templates/layouts/profilebase.html
··· 10 10 <meta property="og:image" content="{{ $avatarUrl }}" /> 11 11 <meta property="og:image:width" content="512" /> 12 12 <meta property="og:image:height" content="512" /> 13 - 13 + 14 14 <meta name="twitter:card" content="summary" /> 15 15 <meta name="twitter:title" content="{{ $handle }}" /> 16 16 <meta name="twitter:description" content="{{ or .Card.Profile.Description $handle }}" /> 17 17 <meta name="twitter:image" content="{{ $avatarUrl }}" /> 18 - 19 - <!-- structured data for user profile --> 20 - <script type="application/ld+json"> 21 - { 22 - "@context": "https://schema.org", 23 - "@type": "Person", 24 - "name": "{{ or .Card.Profile.DisplayName .Card.UserHandle .Card.UserDid }}", 25 - "url": "https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}", 26 - "image": "{{ $avatarUrl }}", 27 - "description": "{{ .Card.Profile.Description }}"{{ if .Card.UserHandle }}, 28 - "identifier": "{{ .Card.UserHandle }}"{{ end }} 29 - } 30 - </script> 31 - {{ end }} 32 - 33 - {{ define "canonical" }}https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}{{ end }} 34 - 35 - {{ define "rss" }} 36 - <link rel="alternate" type="application/atom+xml" title="{{ or .Card.UserHandle .Card.UserDid }} Activity Feed" href="https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}/feed.atom" /> 37 18 {{ end }} 38 19 39 20 {{ define "content" }}
+34 -9
appview/pages/templates/repo/commit.html
··· 25 25 </div> 26 26 27 27 <div class="flex flex-wrap items-center space-x-2"> 28 - <p class="flex flex-wrap items-center gap-2 text-sm text-gray-500 dark:text-gray-300"> 29 - {{ $did := index $.EmailToDid $commit.Author.Email }} 30 - 31 - {{ if $did }} 32 - {{ template "user/fragments/picHandleLink" $did }} 33 - {{ else }} 34 - <a href="mailto:{{ $commit.Author.Email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $commit.Author.Name }}</a> 35 - {{ end }} 28 + <p class="flex flex-wrap items-center gap-1 text-sm text-gray-500 dark:text-gray-300"> 29 + {{ template "attribution" . }} 36 30 37 31 <span class="px-1 select-none before:content-['\00B7']"></span> 38 - {{ template "repo/fragments/time" $commit.Author.When }} 32 + {{ template "repo/fragments/time" $commit.Committer.When }} 39 33 <span class="px-1 select-none before:content-['\00B7']"></span> 40 34 41 35 <a href="/{{ $repo }}/commit/{{ $commit.This }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ slice $commit.This 0 8 }}</a> ··· 78 72 79 73 </section> 80 74 {{end}} 75 + 76 + {{ define "attribution" }} 77 + {{ $commit := .Diff.Commit }} 78 + {{ $showCommitter := true }} 79 + {{ if eq $commit.Author.Email $commit.Committer.Email }} 80 + {{ $showCommitter = false }} 81 + {{ end }} 82 + 83 + {{ if $showCommitter }} 84 + authored by {{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid) }} 85 + {{ range $commit.CoAuthors }} 86 + {{ template "attributedUser" (list .Email .Name $.EmailToDid) }} 87 + {{ end }} 88 + and committed by {{ template "attributedUser" (list $commit.Committer.Email $commit.Committer.Name $.EmailToDid) }} 89 + {{ else }} 90 + {{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid )}} 91 + {{ end }} 92 + {{ end }} 93 + 94 + {{ define "attributedUser" }} 95 + {{ $email := index . 0 }} 96 + {{ $name := index . 1 }} 97 + {{ $map := index . 2 }} 98 + {{ $did := index $map $email }} 99 + 100 + {{ if $did }} 101 + {{ template "user/fragments/picHandleLink" $did }} 102 + {{ else }} 103 + <a href="mailto:{{ $email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $name }}</a> 104 + {{ end }} 105 + {{ end }} 81 106 82 107 {{ define "topbarLayout" }} 83 108 <header class="col-span-full" style="z-index: 20;">
+1 -1
appview/pages/templates/repo/empty.html
··· 26 26 {{ else if (and .LoggedInUser (eq .LoggedInUser.Did .RepoInfo.OwnerDid)) }} 27 27 {{ $knot := .RepoInfo.Knot }} 28 28 {{ if eq $knot "knot1.tangled.sh" }} 29 - {{ $knot = "tangled.sh" }} 29 + {{ $knot = "tangled.org" }} 30 30 {{ end }} 31 31 <div class="w-full flex place-content-center"> 32 32 <div class="py-6 w-fit flex flex-col gap-4">
+6 -6
appview/pages/templates/repo/fragments/backlinks.html
··· 14 14 <div class="flex gap-2 items-center"> 15 15 {{ if .State.IsClosed }} 16 16 <span class="text-gray-500 dark:text-gray-400"> 17 - {{ i "ban" "w-4 h-4" }} 17 + {{ i "ban" "size-3" }} 18 18 </span> 19 19 {{ else if eq .Kind.String "issues" }} 20 20 <span class="text-green-600 dark:text-green-500"> 21 - {{ i "circle-dot" "w-4 h-4" }} 21 + {{ i "circle-dot" "size-3" }} 22 22 </span> 23 23 {{ else if .State.IsOpen }} 24 24 <span class="text-green-600 dark:text-green-500"> 25 - {{ i "git-pull-request" "w-4 h-4" }} 25 + {{ i "git-pull-request" "size-3" }} 26 26 </span> 27 27 {{ else if .State.IsMerged }} 28 28 <span class="text-purple-600 dark:text-purple-500"> 29 - {{ i "git-merge" "w-4 h-4" }} 29 + {{ i "git-merge" "size-3" }} 30 30 </span> 31 31 {{ else }} 32 32 <span class="text-gray-600 dark:text-gray-300"> 33 - {{ i "git-pull-request-closed" "w-4 h-4" }} 33 + {{ i "git-pull-request-closed" "size-3" }} 34 34 </span> 35 35 {{ end }} 36 - <a href="{{ . }}"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a> 36 + <a href="{{ . }}" class="line-clamp-1 text-sm"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a> 37 37 </div> 38 38 {{ if not (eq $.RepoInfo.FullName $repoUrl) }} 39 39 <div>
+1 -1
appview/pages/templates/repo/fragments/diff.html
··· 17 17 {{ else }} 18 18 {{ range $idx, $hunk := $diff }} 19 19 {{ with $hunk }} 20 - <details open id="file-{{ .Name.New }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}"> 20 + <details open id="file-{{ .Id }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}"> 21 21 <summary class="list-none cursor-pointer sticky top-0"> 22 22 <div id="diff-file-header" class="rounded cursor-pointer bg-white dark:bg-gray-800 flex justify-between"> 23 23 <div id="left-side-items" class="p-2 flex gap-2 items-center overflow-x-auto">
+1 -16
appview/pages/templates/repo/fragments/participants.html
··· 6 6 <span class="font-bold text-gray-500 dark:text-gray-400 capitalize">Participants</span> 7 7 <span class="bg-gray-200 dark:bg-gray-700 rounded py-1/2 px-1 ml-1">{{ len $all }}</span> 8 8 </div> 9 - <div class="flex items-center -space-x-3 mt-2"> 10 - {{ $c := "z-50 z-40 z-30 z-20 z-10" }} 11 - {{ range $i, $p := $ps }} 12 - <img 13 - src="{{ tinyAvatar . }}" 14 - alt="" 15 - class="rounded-full h-8 w-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0" 16 - /> 17 - {{ end }} 18 - 19 - {{ if gt (len $all) 5 }} 20 - <span class="pl-4 text-gray-500 dark:text-gray-400 text-sm"> 21 - +{{ sub (len $all) 5 }} 22 - </span> 23 - {{ end }} 24 - </div> 9 + {{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "w-8 h-8") }} 25 10 </div> 26 11 {{ end }}
+35 -35
appview/pages/templates/repo/fragments/splitDiff.html
··· 3 3 {{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800" -}} 4 4 {{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}} 5 5 {{- $lineNrSepStyle := "pr-2 border-r border-gray-200 dark:border-gray-700" -}} 6 - {{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 6 + {{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 7 7 {{- $emptyStyle := "bg-gray-200/30 dark:bg-gray-700/30" -}} 8 8 {{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400" -}} 9 9 {{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}} 10 10 {{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}} 11 11 {{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}} 12 12 <div class="grid grid-cols-2 divide-x divide-gray-200 dark:divide-gray-700"> 13 - <pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</div> 13 + <div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</span> 14 14 {{- range .LeftLines -}} 15 15 {{- if .IsEmpty -}} 16 - <div class="{{ $emptyStyle }} {{ $containerStyle }}"> 17 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div> 18 - <div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div> 19 - <div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div> 20 - </div> 16 + <span class="{{ $emptyStyle }} {{ $containerStyle }}"> 17 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span> 18 + <span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span> 19 + <span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span> 20 + </span> 21 21 {{- else if eq .Op.String "-" -}} 22 - <div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 23 - <div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div> 24 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 25 - <div class="px-2">{{ .Content }}</div> 26 - </div> 22 + <span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 23 + <span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span> 24 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 25 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 26 + </span> 27 27 {{- else if eq .Op.String " " -}} 28 - <div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 29 - <div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div> 30 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 31 - <div class="px-2">{{ .Content }}</div> 32 - </div> 28 + <span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 29 + <span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span> 30 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 31 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 32 + </span> 33 33 {{- end -}} 34 34 {{- end -}} 35 - {{- end -}}</div></div></pre> 35 + {{- end -}}</div></div></div> 36 36 37 - <pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</div> 37 + <div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</span> 38 38 {{- range .RightLines -}} 39 39 {{- if .IsEmpty -}} 40 - <div class="{{ $emptyStyle }} {{ $containerStyle }}"> 41 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div> 42 - <div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div> 43 - <div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div> 44 - </div> 40 + <span class="{{ $emptyStyle }} {{ $containerStyle }}"> 41 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span> 42 + <span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span> 43 + <span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span> 44 + </span> 45 45 {{- else if eq .Op.String "+" -}} 46 - <div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 47 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div> 48 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 49 - <div class="px-2" >{{ .Content }}</div> 50 - </div> 46 + <span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 47 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></span> 48 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 49 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 50 + </span> 51 51 {{- else if eq .Op.String " " -}} 52 - <div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 53 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div> 54 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 55 - <div class="px-2">{{ .Content }}</div> 56 - </div> 52 + <span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 53 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a> </span> 54 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 55 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 56 + </span> 57 57 {{- end -}} 58 58 {{- end -}} 59 - {{- end -}}</div></div></pre> 59 + {{- end -}}</div></div></div> 60 60 </div> 61 61 {{ end }}
+21 -22
appview/pages/templates/repo/fragments/unifiedDiff.html
··· 1 1 {{ define "repo/fragments/unifiedDiff" }} 2 2 {{ $name := .Id }} 3 - <pre class="overflow-x-auto"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</div> 3 + <div class="overflow-x-auto font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</span> 4 4 {{- $oldStart := .OldPosition -}} 5 5 {{- $newStart := .NewPosition -}} 6 6 {{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800 target:bg-yellow-200 target:dark:bg-yellow-600" -}} 7 7 {{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}} 8 8 {{- $lineNrSepStyle1 := "" -}} 9 9 {{- $lineNrSepStyle2 := "pr-2 border-r border-gray-200 dark:border-gray-700" -}} 10 - {{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 10 + {{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 11 11 {{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400 " -}} 12 12 {{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}} 13 13 {{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}} 14 14 {{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}} 15 15 {{- range .Lines -}} 16 16 {{- if eq .Op.String "+" -}} 17 - <div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}"> 18 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></div> 19 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></div> 20 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 21 - <div class="px-2">{{ .Line }}</div> 22 - </div> 17 + <span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}"> 18 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></span> 19 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></span> 20 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 21 + <span class="px-2 whitespace-pre">{{ .Line }}</span> 22 + </span> 23 23 {{- $newStart = add64 $newStart 1 -}} 24 24 {{- end -}} 25 25 {{- if eq .Op.String "-" -}} 26 - <div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}"> 27 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></div> 28 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></div> 29 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 30 - <div class="px-2">{{ .Line }}</div> 31 - </div> 26 + <span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}"> 27 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></span> 28 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></span> 29 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 30 + <span class="px-2 whitespace-pre">{{ .Line }}</span> 31 + </span> 32 32 {{- $oldStart = add64 $oldStart 1 -}} 33 33 {{- end -}} 34 34 {{- if eq .Op.String " " -}} 35 - <div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}"> 36 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></div> 37 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></div> 38 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 39 - <div class="px-2">{{ .Line }}</div> 40 - </div> 35 + <span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}"> 36 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></span> 37 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></span> 38 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 39 + <span class="px-2 whitespace-pre">{{ .Line }}</span> 40 + </span> 41 41 {{- $newStart = add64 $newStart 1 -}} 42 42 {{- $oldStart = add64 $oldStart 1 -}} 43 43 {{- end -}} 44 44 {{- end -}} 45 - {{- end -}}</div></div></pre> 45 + {{- end -}}</div></div></div> 46 46 {{ end }} 47 -
+30 -44
appview/pages/templates/repo/index.html
··· 5 5 {{ template "repo/fragments/meta" . }} 6 6 7 7 {{ template "repo/fragments/og" (dict "RepoInfo" .RepoInfo) }} 8 - 9 - <!-- Structured Data for Repository --> 10 - <script type="application/ld+json"> 11 - { 12 - "@context": "https://schema.org", 13 - "@type": "SoftwareSourceCode", 14 - "name": "{{ .RepoInfo.Name }}", 15 - "description": "{{ .RepoInfo.Description }}", 16 - "codeRepository": "https://tangled.org/{{ .RepoInfo.FullName }}", 17 - "programmingLanguage": {{ if .Languages }}{{ range $idx, $lang := .Languages }}{{ if eq $idx 0 }}"{{ $lang.Name }}"{{ end }}{{ end }}{{ else }}"Unknown"{{ end }}, 18 - "url": "https://tangled.org/{{ .RepoInfo.FullName }}", 19 - "author": { 20 - "@type": "Person", 21 - "name": "{{ .RepoInfo.OwnerWithAt }}", 22 - "url": "https://tangled.org/{{ .RepoInfo.OwnerWithAt }}" 23 - }{{ if .RepoInfo.Source }}, 24 - "isBasedOn": { 25 - "@type": "SoftwareSourceCode", 26 - "name": "{{ .RepoInfo.Source.Name }}", 27 - "url": "https://tangled.org/{{ didOrHandle .RepoInfo.Source.Did .RepoInfo.SourceHandle }}/{{ .RepoInfo.Source.Name }}" 28 - }{{ end }} 29 - } 30 - </script> 31 - 32 - <!-- Breadcrumb Navigation --> 33 - {{ template "fragments/breadcrumb" (list 34 - (list "Home" "https://tangled.org") 35 - (list .RepoInfo.OwnerWithAt (printf "https://tangled.org/%s" .RepoInfo.OwnerWithAt)) 36 - (list .RepoInfo.Name (printf "https://tangled.org/%s" .RepoInfo.FullName)) 37 - ) }} 38 - {{ end }} 39 - 40 - {{ define "canonical" }}https://tangled.org/{{ .RepoInfo.FullName }}{{ end }} 41 - 42 - {{ define "rss" }} 43 - <link rel="alternate" type="application/atom+xml" title="{{ .RepoInfo.FullName }} Activity Feed" href="https://tangled.org/{{ .RepoInfo.FullName }}/feed.atom" /> 44 8 {{ end }} 45 9 46 10 {{ define "repoContent" }} ··· 50 14 {{ end }} 51 15 <div class="flex items-center justify-between pb-5"> 52 16 {{ block "branchSelector" . }}{{ end }} 53 - <div class="flex md:hidden items-center gap-2"> 17 + <div class="flex md:hidden items-center gap-3"> 54 18 <a href="/{{ .RepoInfo.FullName }}/commits/{{ .Ref | urlquery }}" class="inline-flex items-center text-sm gap-1 font-bold"> 55 19 {{ i "git-commit-horizontal" "w-4" "h-4" }} {{ .TotalCommits }} 56 20 </a> ··· 102 66 103 67 {{ define "branchSelector" }} 104 68 <div class="flex gap-2 items-center justify-between w-full"> 105 - <div class="flex gap-2 items-center"> 69 + <div class="flex gap-2 items-stretch"> 106 70 <select 107 71 onchange="window.location.href = '/{{ .RepoInfo.FullName }}/tree/' + encodeURIComponent(this.value)" 108 72 class="p-1 border max-w-32 border-gray-200 bg-white dark:bg-gray-800 dark:text-white dark:border-gray-700" ··· 264 228 <span 265 229 class="mx-1 before:content-['ยท'] before:select-none" 266 230 ></span> 267 - <span> 268 - {{ $did := index $.EmailToDid .Author.Email }} 269 - <a href="{{ if $did }}/{{ resolve $did }}{{ else }}mailto:{{ .Author.Email }}{{ end }}" 270 - class="text-gray-500 dark:text-gray-400 no-underline hover:underline" 271 - >{{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ .Author.Name }}{{ end }}</a> 272 - </span> 231 + {{ template "attribution" (list . $.EmailToDid) }} 273 232 <div class="inline-block px-1 select-none after:content-['ยท']"></div> 274 233 {{ template "repo/fragments/time" .Committer.When }} 275 234 ··· 295 254 {{ end }} 296 255 </div> 297 256 </div> 257 + {{ end }} 258 + 259 + {{ define "attribution" }} 260 + {{ $commit := index . 0 }} 261 + {{ $map := index . 1 }} 262 + <span class="flex items-center"> 263 + {{ $author := index $map $commit.Author.Email }} 264 + {{ $coauthors := $commit.CoAuthors }} 265 + {{ $all := list }} 266 + 267 + {{ if $author }} 268 + {{ $all = append $all $author }} 269 + {{ end }} 270 + {{ range $coauthors }} 271 + {{ $co := index $map .Email }} 272 + {{ if $co }} 273 + {{ $all = append $all $co }} 274 + {{ end }} 275 + {{ end }} 276 + 277 + {{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }} 278 + <a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}" 279 + class="no-underline hover:underline"> 280 + {{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }} 281 + {{ if $coauthors }} +{{ length $coauthors }}{{ end }} 282 + </a> 283 + </span> 298 284 {{ end }} 299 285 300 286 {{ define "branchList" }}
+19
appview/pages/templates/repo/issues/fragments/og.html
··· 1 + {{ define "repo/issues/fragments/og" }} 2 + {{ $title := printf "%s #%d" .Issue.Title .Issue.IssueId }} 3 + {{ $description := or .Issue.Body .RepoInfo.Description }} 4 + {{ $url := printf "https://tangled.org/%s/issues/%d" .RepoInfo.FullName .Issue.IssueId }} 5 + {{ $imageUrl := printf "https://tangled.org/%s/issues/%d/opengraph" .RepoInfo.FullName .Issue.IssueId }} 6 + 7 + <meta property="og:title" content="{{ unescapeHtml $title }}" /> 8 + <meta property="og:type" content="object" /> 9 + <meta property="og:url" content="{{ $url }}" /> 10 + <meta property="og:description" content="{{ $description }}" /> 11 + <meta property="og:image" content="{{ $imageUrl }}" /> 12 + <meta property="og:image:width" content="1200" /> 13 + <meta property="og:image:height" content="600" /> 14 + 15 + <meta name="twitter:card" content="summary_large_image" /> 16 + <meta name="twitter:title" content="{{ unescapeHtml $title }}" /> 17 + <meta name="twitter:description" content="{{ $description }}" /> 18 + <meta name="twitter:image" content="{{ $imageUrl }}" /> 19 + {{ end }}
+40 -23
appview/pages/templates/repo/log.html
··· 17 17 <div class="hidden md:flex md:flex-col divide-y divide-gray-200 dark:divide-gray-700"> 18 18 {{ $grid := "grid grid-cols-14 gap-4" }} 19 19 <div class="{{ $grid }}"> 20 - <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2">Author</div> 20 + <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Author</div> 21 21 <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Commit</div> 22 22 <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-6">Message</div> 23 - <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-1"></div> 24 23 <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2 justify-self-end">Date</div> 25 24 </div> 26 25 {{ range $index, $commit := .Commits }} 27 26 {{ $messageParts := splitN $commit.Message "\n\n" 2 }} 28 27 <div class="{{ $grid }} py-3"> 29 - <div class="align-top truncate col-span-2"> 30 - {{ $did := index $.EmailToDid $commit.Author.Email }} 31 - {{ if $did }} 32 - {{ template "user/fragments/picHandleLink" $did }} 33 - {{ else }} 34 - <a href="mailto:{{ $commit.Author.Email }}" class="text-gray-700 dark:text-gray-300 no-underline hover:underline">{{ $commit.Author.Name }}</a> 35 - {{ end }} 28 + <div class="align-top col-span-3"> 29 + {{ template "attribution" (list $commit $.EmailToDid) }} 36 30 </div> 37 31 <div class="align-top font-mono flex items-start col-span-3"> 38 32 {{ $verified := $.VerifiedCommits.IsVerified $commit.Hash.String }} ··· 61 55 <div class="align-top col-span-6"> 62 56 <div> 63 57 <a href="/{{ $.RepoInfo.FullName }}/commit/{{ $commit.Hash.String }}" class="dark:text-white no-underline hover:underline">{{ index $messageParts 0 }}</a> 58 + 64 59 {{ if gt (len $messageParts) 1 }} 65 60 <button class="py-1/2 px-1 bg-gray-200 hover:bg-gray-400 dark:bg-gray-700 dark:hover:bg-gray-600 rounded" hx-on:click="this.parentElement.nextElementSibling.classList.toggle('hidden')">{{ i "ellipsis" "w-3 h-3" }}</button> 66 61 {{ end }} ··· 72 67 </span> 73 68 {{ end }} 74 69 {{ end }} 70 + 71 + <!-- ci status --> 72 + <span class="text-xs"> 73 + {{ $pipeline := index $.Pipelines .Hash.String }} 74 + {{ if and $pipeline (gt (len $pipeline.Statuses) 0) }} 75 + {{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }} 76 + {{ end }} 77 + </span> 75 78 </div> 76 79 77 80 {{ if gt (len $messageParts) 1 }} 78 81 <p class="hidden mt-1 text-sm text-gray-600 dark:text-gray-400">{{ nl2br (index $messageParts 1) }}</p> 79 82 {{ end }} 80 - </div> 81 - <div class="align-top col-span-1"> 82 - <!-- ci status --> 83 - {{ $pipeline := index $.Pipelines .Hash.String }} 84 - {{ if and $pipeline (gt (len $pipeline.Statuses) 0) }} 85 - {{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }} 86 - {{ end }} 87 83 </div> 88 84 <div class="align-top justify-self-end text-gray-500 dark:text-gray-400 col-span-2">{{ template "repo/fragments/shortTimeAgo" $commit.Committer.When }}</div> 89 85 </div> ··· 152 148 </a> 153 149 </span> 154 150 <span class="mx-2 before:content-['ยท'] before:select-none"></span> 155 - <span> 156 - {{ $did := index $.EmailToDid $commit.Author.Email }} 157 - <a href="{{ if $did }}/{{ $did }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}" 158 - class="text-gray-500 dark:text-gray-400 no-underline hover:underline"> 159 - {{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ $commit.Author.Name }}{{ end }} 160 - </a> 161 - </span> 151 + {{ template "attribution" (list $commit $.EmailToDid) }} 162 152 <div class="inline-block px-1 select-none after:content-['ยท']"></div> 163 153 <span>{{ template "repo/fragments/shortTime" $commit.Committer.When }}</span> 164 154 ··· 176 166 </div> 177 167 </section> 178 168 169 + {{ end }} 170 + 171 + {{ define "attribution" }} 172 + {{ $commit := index . 0 }} 173 + {{ $map := index . 1 }} 174 + <span class="flex items-center gap-1"> 175 + {{ $author := index $map $commit.Author.Email }} 176 + {{ $coauthors := $commit.CoAuthors }} 177 + {{ $all := list }} 178 + 179 + {{ if $author }} 180 + {{ $all = append $all $author }} 181 + {{ end }} 182 + {{ range $coauthors }} 183 + {{ $co := index $map .Email }} 184 + {{ if $co }} 185 + {{ $all = append $all $co }} 186 + {{ end }} 187 + {{ end }} 188 + 189 + {{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }} 190 + <a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}" 191 + class="no-underline hover:underline"> 192 + {{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }} 193 + {{ if $coauthors }} +{{ length $coauthors }}{{ end }} 194 + </a> 195 + </span> 179 196 {{ end }} 180 197 181 198 {{ define "repoAfter" }}
+1 -1
appview/pages/templates/repo/pipelines/pipelines.html
··· 23 23 </p> 24 24 <p> 25 25 <span class="{{ $bullet }}">2</span>Configure your CI/CD 26 - <a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/pipeline.md" class="underline">pipeline</a>. 26 + <a href="https://docs.tangled.org/spindles.html#pipelines" class="underline">pipeline</a>. 27 27 </p> 28 28 <p><span class="{{ $bullet }}">3</span>Trigger a workflow with a push or a pull-request!</p> 29 29 </div>
+16 -16
appview/pages/templates/repo/pulls/fragments/og.html
··· 1 - {{ define "pulls/fragments/og" }} 2 - {{ $title := printf "%s #%d" .Pull.Title .Pull.PullId }} 3 - {{ $description := or .Pull.Body .RepoInfo.Description }} 4 - {{ $url := printf "https://tangled.org/%s/pulls/%d" .RepoInfo.FullName .Pull.PullId }} 5 - {{ $imageUrl := printf "https://tangled.org/%s/pulls/%d/opengraph" .RepoInfo.FullName .Pull.PullId }} 1 + {{ define "repo/pulls/fragments/og" }} 2 + {{ $title := printf "%s #%d" .Pull.Title .Pull.PullId }} 3 + {{ $description := or .Pull.Body .RepoInfo.Description }} 4 + {{ $url := printf "https://tangled.org/%s/pulls/%d" .RepoInfo.FullName .Pull.PullId }} 5 + {{ $imageUrl := printf "https://tangled.org/%s/pulls/%d/opengraph" .RepoInfo.FullName .Pull.PullId }} 6 6 7 - <meta property="og:title" content="{{ unescapeHtml $title }}" /> 8 - <meta property="og:type" content="object" /> 9 - <meta property="og:url" content="{{ $url }}" /> 10 - <meta property="og:description" content="{{ $description }}" /> 11 - <meta property="og:image" content="{{ $imageUrl }}" /> 12 - <meta property="og:image:width" content="1200" /> 13 - <meta property="og:image:height" content="600" /> 7 + <meta property="og:title" content="{{ unescapeHtml $title }}" /> 8 + <meta property="og:type" content="object" /> 9 + <meta property="og:url" content="{{ $url }}" /> 10 + <meta property="og:description" content="{{ $description }}" /> 11 + <meta property="og:image" content="{{ $imageUrl }}" /> 12 + <meta property="og:image:width" content="1200" /> 13 + <meta property="og:image:height" content="600" /> 14 14 15 - <meta name="twitter:card" content="summary_large_image" /> 16 - <meta name="twitter:title" content="{{ unescapeHtml $title }}" /> 17 - <meta name="twitter:description" content="{{ $description }}" /> 18 - <meta name="twitter:image" content="{{ $imageUrl }}" /> 15 + <meta name="twitter:card" content="summary_large_image" /> 16 + <meta name="twitter:title" content="{{ unescapeHtml $title }}" /> 17 + <meta name="twitter:description" content="{{ $description }}" /> 18 + <meta name="twitter:image" content="{{ $imageUrl }}" /> 19 19 {{ end }}
+1 -1
appview/pages/templates/repo/settings/pipelines.html
··· 22 22 <p class="text-gray-500 dark:text-gray-400"> 23 23 Choose a spindle to execute your workflows on. Only repository owners 24 24 can configure spindles. Spindles can be selfhosted, 25 - <a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md"> 25 + <a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide"> 26 26 click to learn more. 27 27 </a> 28 28 </p>
+1 -1
appview/pages/templates/spindles/index.html
··· 102 102 {{ define "docsButton" }} 103 103 <a 104 104 class="btn flex items-center gap-2" 105 - href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md"> 105 + href="https://docs.tangled.org/spindles.html#self-hosting-guide"> 106 106 {{ i "book" "size-4" }} 107 107 docs 108 108 </a>
+1 -1
appview/pages/templates/strings/string.html
··· 17 17 <span class="select-none">/</span> 18 18 <a href="/strings/{{ $ownerId }}/{{ .String.Rkey }}" class="font-bold">{{ .String.Filename }}</a> 19 19 </div> 20 - <div class="flex gap-2 text-base"> 20 + <div class="flex gap-2 items-stretch text-base"> 21 21 {{ if and .LoggedInUser (eq .LoggedInUser.Did .String.Did) }} 22 22 <a class="btn flex items-center gap-2 no-underline hover:no-underline p-2 group" 23 23 hx-boost="true"
+3 -49
appview/pages/templates/timeline/home.html
··· 1 1 {{ define "title" }}tangled &middot; tightly-knit social coding{{ end }} 2 2 3 3 {{ define "extrameta" }} 4 - {{ $desc := "Collaborate on code with decentralized git hosting, modern contribution and review workflows, and lightweight CI/CD pipelines." }} 5 - {{ $title = "tangled ยท tightly-knit social coding" }} 6 - 7 - <meta name="description" content="{{ $desc }}" /> 8 - <meta property="og:title" content="{{ $title }}" /> 9 - <meta property="og:type" content="website" /> 4 + <meta property="og:title" content="timeline ยท tangled" /> 5 + <meta property="og:type" content="object" /> 10 6 <meta property="og:url" content="https://tangled.org" /> 11 - <meta property="og:description" content="Decentralized git hosting with improved pull requests and lightweight CI/CD. Host repositories on your own infrastructure." /> 12 - <meta property="og:image" content="https://assets.tangled.network/tangled_og.png" /> 13 - <meta property="og:image:width" content="1200" /> 14 - <meta property="og:image:height" content="630" /> 15 - 16 - <meta name="twitter:card" content="summary_large_image" /> 17 - <meta name="twitter:title" content="{{ $title }}" /> 18 - <meta name="twitter:description" content="{{ $desc }}" /> 19 - <meta name="twitter:image" content="https://assets.tangled.network/tangled_og.png" /> 20 - 21 - <!-- Enhanced Structured Data for Homepage --> 22 - <script type="application/ld+json"> 23 - { 24 - "@context": "https://schema.org", 25 - "@type": "WebSite", 26 - "name": "Tangled", 27 - "alternateName": "Tangled", 28 - "url": "https://tangled.org", 29 - "description": "{{ $desc }}", 30 - "potentialAction": { 31 - "@type": "SearchAction", 32 - "target": "https://tangled.org/?q={search_term_string}", 33 - "query-input": "required name=search_term_string" 34 - } 35 - } 36 - </script> 37 - <script type="application/ld+json"> 38 - { 39 - "@context": "https://schema.org", 40 - "@type": "SoftwareApplication", 41 - "name": "Tangled", 42 - "applicationCategory": "DeveloperTool", 43 - "offers": { 44 - "@type": "Offer", 45 - "price": "0", 46 - "priceCurrency": "USD" 47 - }, 48 - "operatingSystem": "Web", 49 - "description": "{{ $desc }}" 50 - } 51 - </script> 7 + <meta property="og:description" content="tightly-knit social coding" /> 52 8 {{ end }} 53 - 54 - {{ define "canonical" }}https://tangled.org{{ end }} 55 9 56 10 57 11 {{ define "content" }}
+2 -2
appview/pages/templates/user/fragments/followCard.html
··· 6 6 <img class="object-cover rounded-full p-2" src="{{ fullAvatar $userIdent }}" alt="{{ $userIdent }}" /> 7 7 </div> 8 8 9 - <div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full"> 9 + <div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full min-w-0"> 10 10 <div class="flex-1 min-h-0 justify-around flex flex-col"> 11 11 <a href="/{{ $userIdent }}"> 12 12 <span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $userIdent | truncateAt30 }}</span> 13 13 </a> 14 14 {{ with .Profile }} 15 - <p class="text-sm pb-2 md:pb-2">{{.Description}}</p> 15 + <p class="text-sm pb-2 md:pb-2 break-words">{{.Description}}</p> 16 16 {{ end }} 17 17 <div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full"> 18 18 <span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
+9 -6
appview/pages/templates/user/signup.html
··· 43 43 page to complete your registration. 44 44 </span> 45 45 <div class="w-full mt-4 text-center"> 46 - <div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}"></div> 46 + <div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}" data-size="flexible"></div> 47 47 </div> 48 48 <button class="btn text-base w-full my-2 mt-6" type="submit" id="signup-button" tabindex="7" > 49 49 <span>join now</span> 50 50 </button> 51 + <p class="text-sm text-gray-500"> 52 + Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>. 53 + </p> 54 + 55 + <p id="signup-msg" class="error w-full"></p> 56 + <p class="text-sm text-gray-500 pt-4"> 57 + By signing up, you agree to our <a href="/terms" class="underline">Terms of Service</a> and <a href="/privacy" class="underline">Privacy Policy</a>. 58 + </p> 51 59 </form> 52 - <p class="text-sm text-gray-500"> 53 - Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>. 54 - </p> 55 - 56 - <p id="signup-msg" class="error w-full"></p> 57 60 </main> 58 61 </body> 59 62 </html>
+12 -11
appview/pipelines/pipelines.go
··· 16 16 "tangled.org/core/appview/reporesolver" 17 17 "tangled.org/core/eventconsumer" 18 18 "tangled.org/core/idresolver" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/rbac" 20 21 spindlemodel "tangled.org/core/spindle/models" 21 22 ··· 81 82 ps, err := db.GetPipelineStatuses( 82 83 p.db, 83 84 30, 84 - db.FilterEq("repo_owner", f.Did), 85 - db.FilterEq("repo_name", f.Name), 86 - db.FilterEq("knot", f.Knot), 85 + orm.FilterEq("repo_owner", f.Did), 86 + orm.FilterEq("repo_name", f.Name), 87 + orm.FilterEq("knot", f.Knot), 87 88 ) 88 89 if err != nil { 89 90 l.Error("failed to query db", "err", err) ··· 122 123 ps, err := db.GetPipelineStatuses( 123 124 p.db, 124 125 1, 125 - db.FilterEq("repo_owner", f.Did), 126 - db.FilterEq("repo_name", f.Name), 127 - db.FilterEq("knot", f.Knot), 128 - db.FilterEq("id", pipelineId), 126 + orm.FilterEq("repo_owner", f.Did), 127 + orm.FilterEq("repo_name", f.Name), 128 + orm.FilterEq("knot", f.Knot), 129 + orm.FilterEq("id", pipelineId), 129 130 ) 130 131 if err != nil { 131 132 l.Error("failed to query db", "err", err) ··· 189 190 ps, err := db.GetPipelineStatuses( 190 191 p.db, 191 192 1, 192 - db.FilterEq("repo_owner", f.Did), 193 - db.FilterEq("repo_name", f.Name), 194 - db.FilterEq("knot", f.Knot), 195 - db.FilterEq("id", pipelineId), 193 + orm.FilterEq("repo_owner", f.Did), 194 + orm.FilterEq("repo_name", f.Name), 195 + orm.FilterEq("knot", f.Knot), 196 + orm.FilterEq("id", pipelineId), 196 197 ) 197 198 if err != nil || len(ps) != 1 { 198 199 l.Error("pipeline query failed", "err", err, "count", len(ps))
+2 -1
appview/pulls/opengraph.go
··· 13 13 "tangled.org/core/appview/db" 14 14 "tangled.org/core/appview/models" 15 15 "tangled.org/core/appview/ogcard" 16 + "tangled.org/core/orm" 16 17 "tangled.org/core/patchutil" 17 18 "tangled.org/core/types" 18 19 ) ··· 276 277 } 277 278 278 279 // Get comment count from database 279 - comments, err := db.GetPullComments(s.db, db.FilterEq("pull_id", pull.ID)) 280 + comments, err := db.GetPullComments(s.db, orm.FilterEq("pull_id", pull.ID)) 280 281 if err != nil { 281 282 log.Printf("failed to get pull comments: %v", err) 282 283 }
+104 -83
appview/pulls/pulls.go
··· 19 19 "tangled.org/core/appview/config" 20 20 "tangled.org/core/appview/db" 21 21 pulls_indexer "tangled.org/core/appview/indexer/pulls" 22 + "tangled.org/core/appview/mentions" 22 23 "tangled.org/core/appview/models" 23 24 "tangled.org/core/appview/notify" 24 25 "tangled.org/core/appview/oauth" 25 26 "tangled.org/core/appview/pages" 26 27 "tangled.org/core/appview/pages/markup" 27 28 "tangled.org/core/appview/pages/repoinfo" 28 - "tangled.org/core/appview/refresolver" 29 29 "tangled.org/core/appview/reporesolver" 30 30 "tangled.org/core/appview/validator" 31 31 "tangled.org/core/appview/xrpcclient" 32 32 "tangled.org/core/idresolver" 33 + "tangled.org/core/orm" 33 34 "tangled.org/core/patchutil" 34 35 "tangled.org/core/rbac" 35 36 "tangled.org/core/tid" ··· 44 45 ) 45 46 46 47 type Pulls struct { 47 - oauth *oauth.OAuth 48 - repoResolver *reporesolver.RepoResolver 49 - pages *pages.Pages 50 - idResolver *idresolver.Resolver 51 - refResolver *refresolver.Resolver 52 - db *db.DB 53 - config *config.Config 54 - notifier notify.Notifier 55 - enforcer *rbac.Enforcer 56 - logger *slog.Logger 57 - validator *validator.Validator 58 - indexer *pulls_indexer.Indexer 48 + oauth *oauth.OAuth 49 + repoResolver *reporesolver.RepoResolver 50 + pages *pages.Pages 51 + idResolver *idresolver.Resolver 52 + mentionsResolver *mentions.Resolver 53 + db *db.DB 54 + config *config.Config 55 + notifier notify.Notifier 56 + enforcer *rbac.Enforcer 57 + logger *slog.Logger 58 + validator *validator.Validator 59 + indexer *pulls_indexer.Indexer 59 60 } 60 61 61 62 func New( ··· 63 64 repoResolver *reporesolver.RepoResolver, 64 65 pages *pages.Pages, 65 66 resolver *idresolver.Resolver, 66 - refResolver *refresolver.Resolver, 67 + mentionsResolver *mentions.Resolver, 67 68 db *db.DB, 68 69 config *config.Config, 69 70 notifier notify.Notifier, ··· 73 74 logger *slog.Logger, 74 75 ) *Pulls { 75 76 return &Pulls{ 76 - oauth: oauth, 77 - repoResolver: repoResolver, 78 - pages: pages, 79 - idResolver: resolver, 80 - refResolver: refResolver, 81 - db: db, 82 - config: config, 83 - notifier: notifier, 84 - enforcer: enforcer, 85 - logger: logger, 86 - validator: validator, 87 - indexer: indexer, 77 + oauth: oauth, 78 + repoResolver: repoResolver, 79 + pages: pages, 80 + idResolver: resolver, 81 + mentionsResolver: mentionsResolver, 82 + db: db, 83 + config: config, 84 + notifier: notifier, 85 + enforcer: enforcer, 86 + logger: logger, 87 + validator: validator, 88 + indexer: indexer, 88 89 } 89 90 } 90 91 ··· 190 191 ps, err := db.GetPipelineStatuses( 191 192 s.db, 192 193 len(shas), 193 - db.FilterEq("repo_owner", f.Did), 194 - db.FilterEq("repo_name", f.Name), 195 - db.FilterEq("knot", f.Knot), 196 - db.FilterIn("sha", shas), 194 + orm.FilterEq("repo_owner", f.Did), 195 + orm.FilterEq("repo_name", f.Name), 196 + orm.FilterEq("knot", f.Knot), 197 + orm.FilterIn("sha", shas), 197 198 ) 198 199 if err != nil { 199 200 log.Printf("failed to fetch pipeline statuses: %s", err) ··· 217 218 218 219 labelDefs, err := db.GetLabelDefinitions( 219 220 s.db, 220 - db.FilterIn("at_uri", f.Labels), 221 - db.FilterContains("scope", tangled.RepoPullNSID), 221 + orm.FilterIn("at_uri", f.Labels), 222 + orm.FilterContains("scope", tangled.RepoPullNSID), 222 223 ) 223 224 if err != nil { 224 225 log.Println("failed to fetch labels", err) ··· 597 598 598 599 pulls, err := db.GetPulls( 599 600 s.db, 600 - db.FilterIn("id", ids), 601 + orm.FilterIn("id", ids), 601 602 ) 602 603 if err != nil { 603 604 log.Println("failed to get pulls", err) ··· 648 649 ps, err := db.GetPipelineStatuses( 649 650 s.db, 650 651 len(shas), 651 - db.FilterEq("repo_owner", f.Did), 652 - db.FilterEq("repo_name", f.Name), 653 - db.FilterEq("knot", f.Knot), 654 - db.FilterIn("sha", shas), 652 + orm.FilterEq("repo_owner", f.Did), 653 + orm.FilterEq("repo_name", f.Name), 654 + orm.FilterEq("knot", f.Knot), 655 + orm.FilterIn("sha", shas), 655 656 ) 656 657 if err != nil { 657 658 log.Printf("failed to fetch pipeline statuses: %s", err) ··· 664 665 665 666 labelDefs, err := db.GetLabelDefinitions( 666 667 s.db, 667 - db.FilterIn("at_uri", f.Labels), 668 - db.FilterContains("scope", tangled.RepoPullNSID), 668 + orm.FilterIn("at_uri", f.Labels), 669 + orm.FilterContains("scope", tangled.RepoPullNSID), 669 670 ) 670 671 if err != nil { 671 672 log.Println("failed to fetch labels", err) ··· 729 730 return 730 731 } 731 732 732 - mentions, references := s.refResolver.Resolve(r.Context(), body) 733 + mentions, references := s.mentionsResolver.Resolve(r.Context(), body) 733 734 734 735 // Start a transaction 735 736 tx, err := s.db.BeginTx(r.Context(), nil) ··· 1205 1206 } 1206 1207 } 1207 1208 1208 - mentions, references := s.refResolver.Resolve(r.Context(), body) 1209 + mentions, references := s.mentionsResolver.Resolve(r.Context(), body) 1209 1210 1210 1211 rkey := tid.TID() 1211 1212 initialSubmission := models.PullSubmission{ ··· 1240 1241 return 1241 1242 } 1242 1243 1244 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 1245 + if err != nil { 1246 + log.Println("failed to upload patch", err) 1247 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1248 + return 1249 + } 1250 + 1243 1251 _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 1244 1252 Collection: tangled.RepoPullNSID, 1245 1253 Repo: user.Did, ··· 1251 1259 Repo: string(repo.RepoAt()), 1252 1260 Branch: targetBranch, 1253 1261 }, 1254 - Patch: patch, 1262 + PatchBlob: blob.Blob, 1255 1263 Source: recordPullSource, 1256 1264 CreatedAt: time.Now().Format(time.RFC3339), 1257 1265 }, ··· 1327 1335 // apply all record creations at once 1328 1336 var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem 1329 1337 for _, p := range stack { 1338 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(p.LatestPatch())) 1339 + if err != nil { 1340 + log.Println("failed to upload patch blob", err) 1341 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1342 + return 1343 + } 1344 + 1330 1345 record := p.AsRecord() 1331 - write := comatproto.RepoApplyWrites_Input_Writes_Elem{ 1346 + record.PatchBlob = blob.Blob 1347 + writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 1332 1348 RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{ 1333 1349 Collection: tangled.RepoPullNSID, 1334 1350 Rkey: &p.Rkey, ··· 1336 1352 Val: &record, 1337 1353 }, 1338 1354 }, 1339 - } 1340 - writes = append(writes, &write) 1355 + }) 1341 1356 } 1342 1357 _, err = comatproto.RepoApplyWrites(r.Context(), client, &comatproto.RepoApplyWrites_Input{ 1343 1358 Repo: user.Did, ··· 1365 1380 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1366 1381 return 1367 1382 } 1383 + 1368 1384 } 1369 1385 1370 1386 if err = tx.Commit(); err != nil { 1371 1387 log.Println("failed to create pull request", err) 1372 1388 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1373 1389 return 1390 + } 1391 + 1392 + // notify about each pull 1393 + // 1394 + // this is performed after tx.Commit, because it could result in a locked DB otherwise 1395 + for _, p := range stack { 1396 + s.notifier.NewPull(r.Context(), p) 1374 1397 } 1375 1398 1376 1399 ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo) ··· 1498 1521 // fork repo 1499 1522 repo, err := db.GetRepo( 1500 1523 s.db, 1501 - db.FilterEq("did", forkOwnerDid), 1502 - db.FilterEq("name", forkName), 1524 + orm.FilterEq("did", forkOwnerDid), 1525 + orm.FilterEq("name", forkName), 1503 1526 ) 1504 1527 if err != nil { 1505 1528 log.Println("failed to get repo", "did", forkOwnerDid, "name", forkName, "err", err) ··· 1862 1885 return 1863 1886 } 1864 1887 1865 - var recordPullSource *tangled.RepoPull_Source 1866 - if pull.IsBranchBased() { 1867 - recordPullSource = &tangled.RepoPull_Source{ 1868 - Branch: pull.PullSource.Branch, 1869 - Sha: sourceRev, 1870 - } 1871 - } 1872 - if pull.IsForkBased() { 1873 - repoAt := pull.PullSource.RepoAt.String() 1874 - recordPullSource = &tangled.RepoPull_Source{ 1875 - Branch: pull.PullSource.Branch, 1876 - Repo: &repoAt, 1877 - Sha: sourceRev, 1878 - } 1888 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 1889 + if err != nil { 1890 + log.Println("failed to upload patch blob", err) 1891 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 1892 + return 1879 1893 } 1894 + record := pull.AsRecord() 1895 + record.PatchBlob = blob.Blob 1896 + record.CreatedAt = time.Now().Format(time.RFC3339) 1880 1897 1881 1898 _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 1882 1899 Collection: tangled.RepoPullNSID, ··· 1884 1901 Rkey: pull.Rkey, 1885 1902 SwapRecord: ex.Cid, 1886 1903 Record: &lexutil.LexiconTypeDecoder{ 1887 - Val: &tangled.RepoPull{ 1888 - Title: pull.Title, 1889 - Target: &tangled.RepoPull_Target{ 1890 - Repo: string(repo.RepoAt()), 1891 - Branch: pull.TargetBranch, 1892 - }, 1893 - Patch: patch, // new patch 1894 - Source: recordPullSource, 1895 - CreatedAt: time.Now().Format(time.RFC3339), 1896 - }, 1904 + Val: &record, 1897 1905 }, 1898 1906 }) 1899 1907 if err != nil { ··· 1979 1987 } 1980 1988 defer tx.Rollback() 1981 1989 1990 + client, err := s.oauth.AuthorizedClient(r) 1991 + if err != nil { 1992 + log.Println("failed to authorize client") 1993 + s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.") 1994 + return 1995 + } 1996 + 1982 1997 // pds updates to make 1983 1998 var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem 1984 1999 ··· 2012 2027 return 2013 2028 } 2014 2029 2030 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 2031 + if err != nil { 2032 + log.Println("failed to upload patch blob", err) 2033 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 2034 + return 2035 + } 2015 2036 record := p.AsRecord() 2037 + record.PatchBlob = blob.Blob 2016 2038 writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 2017 2039 RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{ 2018 2040 Collection: tangled.RepoPullNSID, ··· 2047 2069 return 2048 2070 } 2049 2071 2072 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 2073 + if err != nil { 2074 + log.Println("failed to upload patch blob", err) 2075 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 2076 + return 2077 + } 2050 2078 record := np.AsRecord() 2051 - 2079 + record.PatchBlob = blob.Blob 2052 2080 writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 2053 2081 RepoApplyWrites_Update: &comatproto.RepoApplyWrites_Update{ 2054 2082 Collection: tangled.RepoPullNSID, ··· 2066 2094 tx, 2067 2095 p.ParentChangeId, 2068 2096 // these should be enough filters to be unique per-stack 2069 - db.FilterEq("repo_at", p.RepoAt.String()), 2070 - db.FilterEq("owner_did", p.OwnerDid), 2071 - db.FilterEq("change_id", p.ChangeId), 2097 + orm.FilterEq("repo_at", p.RepoAt.String()), 2098 + orm.FilterEq("owner_did", p.OwnerDid), 2099 + orm.FilterEq("change_id", p.ChangeId), 2072 2100 ) 2073 2101 2074 2102 if err != nil { ··· 2082 2110 if err != nil { 2083 2111 log.Println("failed to resubmit pull", err) 2084 2112 s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.") 2085 - return 2086 - } 2087 - 2088 - client, err := s.oauth.AuthorizedClient(r) 2089 - if err != nil { 2090 - log.Println("failed to authorize client") 2091 - s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.") 2092 2113 return 2093 2114 } 2094 2115 ··· 2397 2418 body := fp.Body 2398 2419 rkey := tid.TID() 2399 2420 2400 - mentions, references := s.refResolver.Resolve(ctx, body) 2421 + mentions, references := s.mentionsResolver.Resolve(ctx, body) 2401 2422 2402 2423 initialSubmission := models.PullSubmission{ 2403 2424 Patch: fp.Raw,
-65
appview/refresolver/resolver.go
··· 1 - package refresolver 2 - 3 - import ( 4 - "context" 5 - "log/slog" 6 - 7 - "github.com/bluesky-social/indigo/atproto/syntax" 8 - "tangled.org/core/appview/config" 9 - "tangled.org/core/appview/db" 10 - "tangled.org/core/appview/models" 11 - "tangled.org/core/appview/pages/markup" 12 - "tangled.org/core/idresolver" 13 - ) 14 - 15 - type Resolver struct { 16 - config *config.Config 17 - idResolver *idresolver.Resolver 18 - execer db.Execer 19 - logger *slog.Logger 20 - } 21 - 22 - func New( 23 - config *config.Config, 24 - idResolver *idresolver.Resolver, 25 - execer db.Execer, 26 - logger *slog.Logger, 27 - ) *Resolver { 28 - return &Resolver{ 29 - config, 30 - idResolver, 31 - execer, 32 - logger, 33 - } 34 - } 35 - 36 - func (r *Resolver) Resolve(ctx context.Context, source string) ([]syntax.DID, []syntax.ATURI) { 37 - l := r.logger.With("method", "Resolve") 38 - rawMentions, rawRefs := markup.FindReferences(r.config.Core.AppviewHost, source) 39 - l.Debug("found possible references", "mentions", rawMentions, "refs", rawRefs) 40 - idents := r.idResolver.ResolveIdents(ctx, rawMentions) 41 - var mentions []syntax.DID 42 - for _, ident := range idents { 43 - if ident != nil && !ident.Handle.IsInvalidHandle() { 44 - mentions = append(mentions, ident.DID) 45 - } 46 - } 47 - l.Debug("found mentions", "mentions", mentions) 48 - 49 - var resolvedRefs []models.ReferenceLink 50 - for _, rawRef := range rawRefs { 51 - ident, err := r.idResolver.ResolveIdent(ctx, rawRef.Handle) 52 - if err != nil || ident == nil || ident.Handle.IsInvalidHandle() { 53 - continue 54 - } 55 - rawRef.Handle = string(ident.DID) 56 - resolvedRefs = append(resolvedRefs, rawRef) 57 - } 58 - aturiRefs, err := db.ValidateReferenceLinks(r.execer, resolvedRefs) 59 - if err != nil { 60 - l.Error("failed running query", "err", err) 61 - } 62 - l.Debug("found references", "refs", aturiRefs) 63 - 64 - return mentions, aturiRefs 65 - }
+1
appview/repo/archive.go
··· 18 18 l := rp.logger.With("handler", "DownloadArchive") 19 19 ref := chi.URLParam(r, "ref") 20 20 ref, _ = url.PathUnescape(ref) 21 + ref = strings.TrimSuffix(ref, ".tar.gz") 21 22 f, err := rp.repoResolver.Resolve(r) 22 23 if err != nil { 23 24 l.Error("failed to get repo and knot", "err", err)
+10 -9
appview/repo/artifact.go
··· 15 15 "tangled.org/core/appview/models" 16 16 "tangled.org/core/appview/pages" 17 17 "tangled.org/core/appview/xrpcclient" 18 + "tangled.org/core/orm" 18 19 "tangled.org/core/tid" 19 20 "tangled.org/core/types" 20 21 ··· 155 156 156 157 artifacts, err := db.GetArtifact( 157 158 rp.db, 158 - db.FilterEq("repo_at", f.RepoAt()), 159 - db.FilterEq("tag", tag.Tag.Hash[:]), 160 - db.FilterEq("name", filename), 159 + orm.FilterEq("repo_at", f.RepoAt()), 160 + orm.FilterEq("tag", tag.Tag.Hash[:]), 161 + orm.FilterEq("name", filename), 161 162 ) 162 163 if err != nil { 163 164 log.Println("failed to get artifacts", err) ··· 234 235 235 236 artifacts, err := db.GetArtifact( 236 237 rp.db, 237 - db.FilterEq("repo_at", f.RepoAt()), 238 - db.FilterEq("tag", tag[:]), 239 - db.FilterEq("name", filename), 238 + orm.FilterEq("repo_at", f.RepoAt()), 239 + orm.FilterEq("tag", tag[:]), 240 + orm.FilterEq("name", filename), 240 241 ) 241 242 if err != nil { 242 243 log.Println("failed to get artifacts", err) ··· 276 277 defer tx.Rollback() 277 278 278 279 err = db.DeleteArtifact(tx, 279 - db.FilterEq("repo_at", f.RepoAt()), 280 - db.FilterEq("tag", artifact.Tag[:]), 281 - db.FilterEq("name", filename), 280 + orm.FilterEq("repo_at", f.RepoAt()), 281 + orm.FilterEq("tag", artifact.Tag[:]), 282 + orm.FilterEq("name", filename), 282 283 ) 283 284 if err != nil { 284 285 log.Println("failed to remove artifact record from db", err)
+3 -2
appview/repo/feed.go
··· 11 11 "tangled.org/core/appview/db" 12 12 "tangled.org/core/appview/models" 13 13 "tangled.org/core/appview/pagination" 14 + "tangled.org/core/orm" 14 15 15 16 "github.com/bluesky-social/indigo/atproto/identity" 16 17 "github.com/bluesky-social/indigo/atproto/syntax" ··· 20 21 func (rp *Repo) getRepoFeed(ctx context.Context, repo *models.Repo, ownerSlashRepo string) (*feeds.Feed, error) { 21 22 const feedLimitPerType = 100 22 23 23 - pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, db.FilterEq("repo_at", repo.RepoAt())) 24 + pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, orm.FilterEq("repo_at", repo.RepoAt())) 24 25 if err != nil { 25 26 return nil, err 26 27 } ··· 28 29 issues, err := db.GetIssuesPaginated( 29 30 rp.db, 30 31 pagination.Page{Limit: feedLimitPerType}, 31 - db.FilterEq("repo_at", repo.RepoAt()), 32 + orm.FilterEq("repo_at", repo.RepoAt()), 32 33 ) 33 34 if err != nil { 34 35 return nil, err
+4 -3
appview/repo/index.go
··· 23 23 "tangled.org/core/appview/models" 24 24 "tangled.org/core/appview/pages" 25 25 "tangled.org/core/appview/xrpcclient" 26 + "tangled.org/core/orm" 26 27 "tangled.org/core/types" 27 28 28 29 "github.com/go-chi/chi/v5" ··· 122 123 l.Error("failed to get email to did map", "err", err) 123 124 } 124 125 125 - vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, commitsTrunc) 126 + vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, commitsTrunc) 126 127 if err != nil { 127 128 l.Error("failed to GetVerifiedObjectCommits", "err", err) 128 129 } ··· 171 172 // first attempt to fetch from db 172 173 langs, err := db.GetRepoLanguages( 173 174 rp.db, 174 - db.FilterEq("repo_at", repo.RepoAt()), 175 - db.FilterEq("ref", currentRef), 175 + orm.FilterEq("repo_at", repo.RepoAt()), 176 + orm.FilterEq("ref", currentRef), 176 177 ) 177 178 178 179 if err != nil || langs == nil {
+2 -2
appview/repo/log.go
··· 116 116 l.Error("failed to fetch email to did mapping", "err", err) 117 117 } 118 118 119 - vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, xrpcResp.Commits) 119 + vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, xrpcResp.Commits) 120 120 if err != nil { 121 121 l.Error("failed to GetVerifiedObjectCommits", "err", err) 122 122 } ··· 192 192 l.Error("failed to get email to did mapping", "err", err) 193 193 } 194 194 195 - vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.NiceDiff{*result.Diff}) 195 + vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.Commit{result.Diff.Commit}) 196 196 if err != nil { 197 197 l.Error("failed to GetVerifiedCommits", "err", err) 198 198 }
+3 -2
appview/repo/opengraph.go
··· 16 16 "tangled.org/core/appview/db" 17 17 "tangled.org/core/appview/models" 18 18 "tangled.org/core/appview/ogcard" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/types" 20 21 ) 21 22 ··· 338 339 var languageStats []types.RepoLanguageDetails 339 340 langs, err := db.GetRepoLanguages( 340 341 rp.db, 341 - db.FilterEq("repo_at", f.RepoAt()), 342 - db.FilterEq("is_default_ref", 1), 342 + orm.FilterEq("repo_at", f.RepoAt()), 343 + orm.FilterEq("is_default_ref", 1), 343 344 ) 344 345 if err != nil { 345 346 log.Printf("failed to get language stats from db: %v", err)
+17 -16
appview/repo/repo.go
··· 24 24 xrpcclient "tangled.org/core/appview/xrpcclient" 25 25 "tangled.org/core/eventconsumer" 26 26 "tangled.org/core/idresolver" 27 + "tangled.org/core/orm" 27 28 "tangled.org/core/rbac" 28 29 "tangled.org/core/tid" 29 30 "tangled.org/core/xrpc/serviceauth" ··· 345 346 // get form values 346 347 labelId := r.FormValue("label-id") 347 348 348 - label, err := db.GetLabelDefinition(rp.db, db.FilterEq("id", labelId)) 349 + label, err := db.GetLabelDefinition(rp.db, orm.FilterEq("id", labelId)) 349 350 if err != nil { 350 351 fail("Failed to find label definition.", err) 351 352 return ··· 409 410 410 411 err = db.UnsubscribeLabel( 411 412 tx, 412 - db.FilterEq("repo_at", f.RepoAt()), 413 - db.FilterEq("label_at", removedAt), 413 + orm.FilterEq("repo_at", f.RepoAt()), 414 + orm.FilterEq("label_at", removedAt), 414 415 ) 415 416 if err != nil { 416 417 fail("Failed to unsubscribe label.", err) 417 418 return 418 419 } 419 420 420 - err = db.DeleteLabelDefinition(tx, db.FilterEq("id", label.Id)) 421 + err = db.DeleteLabelDefinition(tx, orm.FilterEq("id", label.Id)) 421 422 if err != nil { 422 423 fail("Failed to delete label definition.", err) 423 424 return ··· 456 457 } 457 458 458 459 labelAts := r.Form["label"] 459 - _, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts)) 460 + _, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts)) 460 461 if err != nil { 461 462 fail("Failed to subscribe to label.", err) 462 463 return ··· 542 543 } 543 544 544 545 labelAts := r.Form["label"] 545 - _, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts)) 546 + _, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts)) 546 547 if err != nil { 547 548 fail("Failed to unsubscribe to label.", err) 548 549 return ··· 582 583 583 584 err = db.UnsubscribeLabel( 584 585 rp.db, 585 - db.FilterEq("repo_at", f.RepoAt()), 586 - db.FilterIn("label_at", labelAts), 586 + orm.FilterEq("repo_at", f.RepoAt()), 587 + orm.FilterIn("label_at", labelAts), 587 588 ) 588 589 if err != nil { 589 590 fail("Failed to unsubscribe label.", err) ··· 612 613 613 614 labelDefs, err := db.GetLabelDefinitions( 614 615 rp.db, 615 - db.FilterIn("at_uri", f.Labels), 616 - db.FilterContains("scope", subject.Collection().String()), 616 + orm.FilterIn("at_uri", f.Labels), 617 + orm.FilterContains("scope", subject.Collection().String()), 617 618 ) 618 619 if err != nil { 619 620 l.Error("failed to fetch label defs", "err", err) ··· 625 626 defs[l.AtUri().String()] = &l 626 627 } 627 628 628 - states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject)) 629 + states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject)) 629 630 if err != nil { 630 631 l.Error("failed to build label state", "err", err) 631 632 return ··· 660 661 661 662 labelDefs, err := db.GetLabelDefinitions( 662 663 rp.db, 663 - db.FilterIn("at_uri", f.Labels), 664 - db.FilterContains("scope", subject.Collection().String()), 664 + orm.FilterIn("at_uri", f.Labels), 665 + orm.FilterContains("scope", subject.Collection().String()), 665 666 ) 666 667 if err != nil { 667 668 l.Error("failed to fetch labels", "err", err) ··· 673 674 defs[l.AtUri().String()] = &l 674 675 } 675 676 676 - states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject)) 677 + states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject)) 677 678 if err != nil { 678 679 l.Error("failed to build label state", "err", err) 679 680 return ··· 1036 1037 // in the user's account. 1037 1038 existingRepo, err := db.GetRepo( 1038 1039 rp.db, 1039 - db.FilterEq("did", user.Did), 1040 - db.FilterEq("name", forkName), 1040 + orm.FilterEq("did", user.Did), 1041 + orm.FilterEq("name", forkName), 1041 1042 ) 1042 1043 if err != nil { 1043 1044 if !errors.Is(err, sql.ErrNoRows) {
+16 -17
appview/repo/repo_util.go
··· 1 1 package repo 2 2 3 3 import ( 4 + "maps" 4 5 "slices" 5 6 "sort" 6 7 "strings" 7 8 8 9 "tangled.org/core/appview/db" 9 10 "tangled.org/core/appview/models" 11 + "tangled.org/core/orm" 10 12 "tangled.org/core/types" 11 - 12 - "github.com/go-git/go-git/v5/plumbing/object" 13 13 ) 14 14 15 15 func sortFiles(files []types.NiceTree) { ··· 42 42 }) 43 43 } 44 44 45 - func uniqueEmails(commits []*object.Commit) []string { 45 + func uniqueEmails(commits []types.Commit) []string { 46 46 emails := make(map[string]struct{}) 47 47 for _, commit := range commits { 48 - if commit.Author.Email != "" { 49 - emails[commit.Author.Email] = struct{}{} 50 - } 51 - if commit.Committer.Email != "" { 52 - emails[commit.Committer.Email] = struct{}{} 48 + emails[commit.Author.Email] = struct{}{} 49 + emails[commit.Committer.Email] = struct{}{} 50 + for _, c := range commit.CoAuthors() { 51 + emails[c.Email] = struct{}{} 53 52 } 54 53 } 55 - var uniqueEmails []string 56 - for email := range emails { 57 - uniqueEmails = append(uniqueEmails, email) 58 - } 59 - return uniqueEmails 54 + 55 + // delete empty emails if any, from the set 56 + delete(emails, "") 57 + 58 + return slices.Collect(maps.Keys(emails)) 60 59 } 61 60 62 61 func balanceIndexItems(commitCount, branchCount, tagCount, fileCount int) (commitsTrunc int, branchesTrunc int, tagsTrunc int) { ··· 104 103 ps, err := db.GetPipelineStatuses( 105 104 d, 106 105 len(shas), 107 - db.FilterEq("repo_owner", repo.Did), 108 - db.FilterEq("repo_name", repo.Name), 109 - db.FilterEq("knot", repo.Knot), 110 - db.FilterIn("sha", shas), 106 + orm.FilterEq("repo_owner", repo.Did), 107 + orm.FilterEq("repo_name", repo.Name), 108 + orm.FilterEq("knot", repo.Knot), 109 + orm.FilterIn("sha", shas), 111 110 ) 112 111 if err != nil { 113 112 return nil, err
+3 -2
appview/repo/settings.go
··· 14 14 "tangled.org/core/appview/oauth" 15 15 "tangled.org/core/appview/pages" 16 16 xrpcclient "tangled.org/core/appview/xrpcclient" 17 + "tangled.org/core/orm" 17 18 "tangled.org/core/types" 18 19 19 20 comatproto "github.com/bluesky-social/indigo/api/atproto" ··· 210 211 return 211 212 } 212 213 213 - defaultLabels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs)) 214 + defaultLabels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs)) 214 215 if err != nil { 215 216 l.Error("failed to fetch labels", "err", err) 216 217 rp.pages.Error503(w) 217 218 return 218 219 } 219 220 220 - labels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", f.Labels)) 221 + labels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", f.Labels)) 221 222 if err != nil { 222 223 l.Error("failed to fetch labels", "err", err) 223 224 rp.pages.Error503(w)
+2 -1
appview/repo/tags.go
··· 10 10 "tangled.org/core/appview/models" 11 11 "tangled.org/core/appview/pages" 12 12 xrpcclient "tangled.org/core/appview/xrpcclient" 13 + "tangled.org/core/orm" 13 14 "tangled.org/core/types" 14 15 15 16 indigoxrpc "github.com/bluesky-social/indigo/xrpc" ··· 44 45 rp.pages.Error503(w) 45 46 return 46 47 } 47 - artifacts, err := db.GetArtifact(rp.db, db.FilterEq("repo_at", f.RepoAt())) 48 + artifacts, err := db.GetArtifact(rp.db, orm.FilterEq("repo_at", f.RepoAt())) 48 49 if err != nil { 49 50 l.Error("failed grab artifacts", "err", err) 50 51 return
+5 -4
appview/serververify/verify.go
··· 9 9 "tangled.org/core/api/tangled" 10 10 "tangled.org/core/appview/db" 11 11 "tangled.org/core/appview/xrpcclient" 12 + "tangled.org/core/orm" 12 13 "tangled.org/core/rbac" 13 14 ) 14 15 ··· 76 77 // mark this spindle as verified in the db 77 78 rowId, err := db.VerifySpindle( 78 79 tx, 79 - db.FilterEq("owner", owner), 80 - db.FilterEq("instance", instance), 80 + orm.FilterEq("owner", owner), 81 + orm.FilterEq("instance", instance), 81 82 ) 82 83 if err != nil { 83 84 return 0, fmt.Errorf("failed to write to DB: %w", err) ··· 115 116 // mark as registered 116 117 err = db.MarkRegistered( 117 118 tx, 118 - db.FilterEq("did", owner), 119 - db.FilterEq("domain", domain), 119 + orm.FilterEq("did", owner), 120 + orm.FilterEq("domain", domain), 120 121 ) 121 122 if err != nil { 122 123 return fmt.Errorf("failed to register domain: %w", err)
+25 -24
appview/spindles/spindles.go
··· 20 20 "tangled.org/core/appview/serververify" 21 21 "tangled.org/core/appview/xrpcclient" 22 22 "tangled.org/core/idresolver" 23 + "tangled.org/core/orm" 23 24 "tangled.org/core/rbac" 24 25 "tangled.org/core/tid" 25 26 ··· 71 72 user := s.OAuth.GetUser(r) 72 73 all, err := db.GetSpindles( 73 74 s.Db, 74 - db.FilterEq("owner", user.Did), 75 + orm.FilterEq("owner", user.Did), 75 76 ) 76 77 if err != nil { 77 78 s.Logger.Error("failed to fetch spindles", "err", err) ··· 101 102 102 103 spindles, err := db.GetSpindles( 103 104 s.Db, 104 - db.FilterEq("instance", instance), 105 - db.FilterEq("owner", user.Did), 106 - db.FilterIsNot("verified", "null"), 105 + orm.FilterEq("instance", instance), 106 + orm.FilterEq("owner", user.Did), 107 + orm.FilterIsNot("verified", "null"), 107 108 ) 108 109 if err != nil || len(spindles) != 1 { 109 110 l.Error("failed to get spindle", "err", err, "len(spindles)", len(spindles)) ··· 123 124 repos, err := db.GetRepos( 124 125 s.Db, 125 126 0, 126 - db.FilterEq("spindle", instance), 127 + orm.FilterEq("spindle", instance), 127 128 ) 128 129 if err != nil { 129 130 l.Error("failed to get spindle repos", "err", err) ··· 290 291 291 292 spindles, err := db.GetSpindles( 292 293 s.Db, 293 - db.FilterEq("owner", user.Did), 294 - db.FilterEq("instance", instance), 294 + orm.FilterEq("owner", user.Did), 295 + orm.FilterEq("instance", instance), 295 296 ) 296 297 if err != nil || len(spindles) != 1 { 297 298 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 319 320 // remove spindle members first 320 321 err = db.RemoveSpindleMember( 321 322 tx, 322 - db.FilterEq("did", user.Did), 323 - db.FilterEq("instance", instance), 323 + orm.FilterEq("did", user.Did), 324 + orm.FilterEq("instance", instance), 324 325 ) 325 326 if err != nil { 326 327 l.Error("failed to remove spindle members", "err", err) ··· 330 331 331 332 err = db.DeleteSpindle( 332 333 tx, 333 - db.FilterEq("owner", user.Did), 334 - db.FilterEq("instance", instance), 334 + orm.FilterEq("owner", user.Did), 335 + orm.FilterEq("instance", instance), 335 336 ) 336 337 if err != nil { 337 338 l.Error("failed to delete spindle", "err", err) ··· 410 411 411 412 spindles, err := db.GetSpindles( 412 413 s.Db, 413 - db.FilterEq("owner", user.Did), 414 - db.FilterEq("instance", instance), 414 + orm.FilterEq("owner", user.Did), 415 + orm.FilterEq("instance", instance), 415 416 ) 416 417 if err != nil || len(spindles) != 1 { 417 418 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 453 454 454 455 verifiedSpindle, err := db.GetSpindles( 455 456 s.Db, 456 - db.FilterEq("id", rowId), 457 + orm.FilterEq("id", rowId), 457 458 ) 458 459 if err != nil || len(verifiedSpindle) != 1 { 459 460 l.Error("failed get new spindle", "err", err) ··· 486 487 487 488 spindles, err := db.GetSpindles( 488 489 s.Db, 489 - db.FilterEq("owner", user.Did), 490 - db.FilterEq("instance", instance), 490 + orm.FilterEq("owner", user.Did), 491 + orm.FilterEq("instance", instance), 491 492 ) 492 493 if err != nil || len(spindles) != 1 { 493 494 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 622 623 623 624 spindles, err := db.GetSpindles( 624 625 s.Db, 625 - db.FilterEq("owner", user.Did), 626 - db.FilterEq("instance", instance), 626 + orm.FilterEq("owner", user.Did), 627 + orm.FilterEq("instance", instance), 627 628 ) 628 629 if err != nil || len(spindles) != 1 { 629 630 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 672 673 // get the record from the DB first: 673 674 members, err := db.GetSpindleMembers( 674 675 s.Db, 675 - db.FilterEq("did", user.Did), 676 - db.FilterEq("instance", instance), 677 - db.FilterEq("subject", memberId.DID), 676 + orm.FilterEq("did", user.Did), 677 + orm.FilterEq("instance", instance), 678 + orm.FilterEq("subject", memberId.DID), 678 679 ) 679 680 if err != nil || len(members) != 1 { 680 681 l.Error("failed to get member", "err", err) ··· 685 686 // remove from db 686 687 if err = db.RemoveSpindleMember( 687 688 tx, 688 - db.FilterEq("did", user.Did), 689 - db.FilterEq("instance", instance), 690 - db.FilterEq("subject", memberId.DID), 689 + orm.FilterEq("did", user.Did), 690 + orm.FilterEq("instance", instance), 691 + orm.FilterEq("subject", memberId.DID), 691 692 ); err != nil { 692 693 l.Error("failed to remove spindle member", "err", err) 693 694 fail()
+6 -5
appview/state/gfi.go
··· 11 11 "tangled.org/core/appview/pages" 12 12 "tangled.org/core/appview/pagination" 13 13 "tangled.org/core/consts" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 func (s *State) GoodFirstIssues(w http.ResponseWriter, r *http.Request) { ··· 20 21 21 22 goodFirstIssueLabel := s.config.Label.GoodFirstIssue 22 23 23 - gfiLabelDef, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", goodFirstIssueLabel)) 24 + gfiLabelDef, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", goodFirstIssueLabel)) 24 25 if err != nil { 25 26 log.Println("failed to get gfi label def", err) 26 27 s.pages.Error500(w) 27 28 return 28 29 } 29 30 30 - repoLabels, err := db.GetRepoLabels(s.db, db.FilterEq("label_at", goodFirstIssueLabel)) 31 + repoLabels, err := db.GetRepoLabels(s.db, orm.FilterEq("label_at", goodFirstIssueLabel)) 31 32 if err != nil { 32 33 log.Println("failed to get repo labels", err) 33 34 s.pages.Error503(w) ··· 55 56 pagination.Page{ 56 57 Limit: 500, 57 58 }, 58 - db.FilterIn("repo_at", repoUris), 59 - db.FilterEq("open", 1), 59 + orm.FilterIn("repo_at", repoUris), 60 + orm.FilterEq("open", 1), 60 61 ) 61 62 if err != nil { 62 63 log.Println("failed to get issues", err) ··· 132 133 } 133 134 134 135 if len(uriList) > 0 { 135 - allLabelDefs, err = db.GetLabelDefinitions(s.db, db.FilterIn("at_uri", uriList)) 136 + allLabelDefs, err = db.GetLabelDefinitions(s.db, orm.FilterIn("at_uri", uriList)) 136 137 if err != nil { 137 138 log.Println("failed to fetch labels", err) 138 139 }
+17
appview/state/git_http.go
··· 25 25 26 26 } 27 27 28 + func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) { 29 + user, ok := r.Context().Value("resolvedId").(identity.Identity) 30 + if !ok { 31 + http.Error(w, "failed to resolve user", http.StatusInternalServerError) 32 + return 33 + } 34 + repo := r.Context().Value("repo").(*models.Repo) 35 + 36 + scheme := "https" 37 + if s.config.Core.Dev { 38 + scheme = "http" 39 + } 40 + 41 + targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery) 42 + s.proxyRequest(w, r, targetURL) 43 + } 44 + 28 45 func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) { 29 46 user, ok := r.Context().Value("resolvedId").(identity.Identity) 30 47 if !ok {
+6 -5
appview/state/knotstream.go
··· 16 16 ec "tangled.org/core/eventconsumer" 17 17 "tangled.org/core/eventconsumer/cursor" 18 18 "tangled.org/core/log" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/rbac" 20 21 "tangled.org/core/workflow" 21 22 ··· 30 31 31 32 knots, err := db.GetRegistrations( 32 33 d, 33 - db.FilterIsNot("registered", "null"), 34 + orm.FilterIsNot("registered", "null"), 34 35 ) 35 36 if err != nil { 36 37 return nil, err ··· 143 144 repos, err := db.GetRepos( 144 145 d, 145 146 0, 146 - db.FilterEq("did", record.RepoDid), 147 - db.FilterEq("name", record.RepoName), 147 + orm.FilterEq("did", record.RepoDid), 148 + orm.FilterEq("name", record.RepoName), 148 149 ) 149 150 if err != nil { 150 151 return fmt.Errorf("failed to look for repo in DB (%s/%s): %w", record.RepoDid, record.RepoName, err) ··· 209 210 repos, err := db.GetRepos( 210 211 d, 211 212 0, 212 - db.FilterEq("did", record.TriggerMetadata.Repo.Did), 213 - db.FilterEq("name", record.TriggerMetadata.Repo.Repo), 213 + orm.FilterEq("did", record.TriggerMetadata.Repo.Did), 214 + orm.FilterEq("name", record.TriggerMetadata.Repo.Repo), 214 215 ) 215 216 if err != nil { 216 217 return fmt.Errorf("failed to look for repo in DB: nsid %s, rkey %s, %w", msg.Nsid, msg.Rkey, err)
+13 -12
appview/state/profile.go
··· 19 19 "tangled.org/core/appview/db" 20 20 "tangled.org/core/appview/models" 21 21 "tangled.org/core/appview/pages" 22 + "tangled.org/core/orm" 22 23 ) 23 24 24 25 func (s *State) Profile(w http.ResponseWriter, r *http.Request) { ··· 56 57 return nil, fmt.Errorf("failed to get profile: %w", err) 57 58 } 58 59 59 - repoCount, err := db.CountRepos(s.db, db.FilterEq("did", did)) 60 + repoCount, err := db.CountRepos(s.db, orm.FilterEq("did", did)) 60 61 if err != nil { 61 62 return nil, fmt.Errorf("failed to get repo count: %w", err) 62 63 } 63 64 64 - stringCount, err := db.CountStrings(s.db, db.FilterEq("did", did)) 65 + stringCount, err := db.CountStrings(s.db, orm.FilterEq("did", did)) 65 66 if err != nil { 66 67 return nil, fmt.Errorf("failed to get string count: %w", err) 67 68 } 68 69 69 - starredCount, err := db.CountStars(s.db, db.FilterEq("did", did)) 70 + starredCount, err := db.CountStars(s.db, orm.FilterEq("did", did)) 70 71 if err != nil { 71 72 return nil, fmt.Errorf("failed to get starred repo count: %w", err) 72 73 } ··· 86 87 startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC) 87 88 punchcard, err := db.MakePunchcard( 88 89 s.db, 89 - db.FilterEq("did", did), 90 - db.FilterGte("date", startOfYear.Format(time.DateOnly)), 91 - db.FilterLte("date", now.Format(time.DateOnly)), 90 + orm.FilterEq("did", did), 91 + orm.FilterGte("date", startOfYear.Format(time.DateOnly)), 92 + orm.FilterLte("date", now.Format(time.DateOnly)), 92 93 ) 93 94 if err != nil { 94 95 return nil, fmt.Errorf("failed to get punchcard for %s: %w", did, err) ··· 123 124 repos, err := db.GetRepos( 124 125 s.db, 125 126 0, 126 - db.FilterEq("did", profile.UserDid), 127 + orm.FilterEq("did", profile.UserDid), 127 128 ) 128 129 if err != nil { 129 130 l.Error("failed to fetch repos", "err", err) ··· 193 194 repos, err := db.GetRepos( 194 195 s.db, 195 196 0, 196 - db.FilterEq("did", profile.UserDid), 197 + orm.FilterEq("did", profile.UserDid), 197 198 ) 198 199 if err != nil { 199 200 l.Error("failed to get repos", "err", err) ··· 219 220 } 220 221 l = l.With("profileDid", profile.UserDid) 221 222 222 - stars, err := db.GetRepoStars(s.db, 0, db.FilterEq("did", profile.UserDid)) 223 + stars, err := db.GetRepoStars(s.db, 0, orm.FilterEq("did", profile.UserDid)) 223 224 if err != nil { 224 225 l.Error("failed to get stars", "err", err) 225 226 s.pages.Error500(w) ··· 248 249 } 249 250 l = l.With("profileDid", profile.UserDid) 250 251 251 - strings, err := db.GetStrings(s.db, 0, db.FilterEq("did", profile.UserDid)) 252 + strings, err := db.GetStrings(s.db, 0, orm.FilterEq("did", profile.UserDid)) 252 253 if err != nil { 253 254 l.Error("failed to get strings", "err", err) 254 255 s.pages.Error500(w) ··· 300 301 followDids = append(followDids, extractDid(follow)) 301 302 } 302 303 303 - profiles, err := db.GetProfiles(s.db, db.FilterIn("did", followDids)) 304 + profiles, err := db.GetProfiles(s.db, orm.FilterIn("did", followDids)) 304 305 if err != nil { 305 306 l.Error("failed to get profiles", "followDids", followDids, "err", err) 306 307 return &params, err ··· 703 704 log.Printf("getting profile data for %s: %s", user.Did, err) 704 705 } 705 706 706 - repos, err := db.GetRepos(s.db, 0, db.FilterEq("did", user.Did)) 707 + repos, err := db.GetRepos(s.db, 0, orm.FilterEq("did", user.Did)) 707 708 if err != nil { 708 709 log.Printf("getting repos for %s: %s", user.Did, err) 709 710 }
+3 -3
appview/state/router.go
··· 36 36 router.Get("/favicon.ico", s.Favicon) 37 37 router.Get("/pwa-manifest.json", s.PWAManifest) 38 38 router.Get("/robots.txt", s.RobotsTxt) 39 - router.Get("/sitemap.xml", s.Sitemap) 40 39 41 40 userRouter := s.UserRouter(&middleware) 42 41 standardRouter := s.StandardRouter(&middleware) ··· 102 101 103 102 // These routes get proxied to the knot 104 103 r.Get("/info/refs", s.InfoRefs) 104 + r.Post("/git-upload-archive", s.UploadArchive) 105 105 r.Post("/git-upload-pack", s.UploadPack) 106 106 r.Post("/git-receive-pack", s.ReceivePack) 107 107 ··· 267 267 s.enforcer, 268 268 s.pages, 269 269 s.idResolver, 270 - s.refResolver, 270 + s.mentionsResolver, 271 271 s.db, 272 272 s.config, 273 273 s.notifier, ··· 284 284 s.repoResolver, 285 285 s.pages, 286 286 s.idResolver, 287 - s.refResolver, 287 + s.mentionsResolver, 288 288 s.db, 289 289 s.config, 290 290 s.notifier,
+2 -1
appview/state/spindlestream.go
··· 17 17 ec "tangled.org/core/eventconsumer" 18 18 "tangled.org/core/eventconsumer/cursor" 19 19 "tangled.org/core/log" 20 + "tangled.org/core/orm" 20 21 "tangled.org/core/rbac" 21 22 spindle "tangled.org/core/spindle/models" 22 23 ) ··· 27 28 28 29 spindles, err := db.GetSpindles( 29 30 d, 30 - db.FilterIsNot("verified", "null"), 31 + orm.FilterIsNot("verified", "null"), 31 32 ) 32 33 if err != nil { 33 34 return nil, err
+28 -80
appview/state/state.go
··· 15 15 "tangled.org/core/appview/config" 16 16 "tangled.org/core/appview/db" 17 17 "tangled.org/core/appview/indexer" 18 + "tangled.org/core/appview/mentions" 18 19 "tangled.org/core/appview/models" 19 20 "tangled.org/core/appview/notify" 20 21 dbnotify "tangled.org/core/appview/notify/db" 21 22 phnotify "tangled.org/core/appview/notify/posthog" 22 23 "tangled.org/core/appview/oauth" 23 24 "tangled.org/core/appview/pages" 24 - "tangled.org/core/appview/refresolver" 25 25 "tangled.org/core/appview/reporesolver" 26 26 "tangled.org/core/appview/validator" 27 27 xrpcclient "tangled.org/core/appview/xrpcclient" ··· 30 30 "tangled.org/core/jetstream" 31 31 "tangled.org/core/log" 32 32 tlog "tangled.org/core/log" 33 + "tangled.org/core/orm" 33 34 "tangled.org/core/rbac" 34 35 "tangled.org/core/tid" 35 36 ··· 43 44 ) 44 45 45 46 type State struct { 46 - db *db.DB 47 - notifier notify.Notifier 48 - indexer *indexer.Indexer 49 - oauth *oauth.OAuth 50 - enforcer *rbac.Enforcer 51 - pages *pages.Pages 52 - idResolver *idresolver.Resolver 53 - refResolver *refresolver.Resolver 54 - posthog posthog.Client 55 - jc *jetstream.JetstreamClient 56 - config *config.Config 57 - repoResolver *reporesolver.RepoResolver 58 - knotstream *eventconsumer.Consumer 59 - spindlestream *eventconsumer.Consumer 60 - logger *slog.Logger 61 - validator *validator.Validator 47 + db *db.DB 48 + notifier notify.Notifier 49 + indexer *indexer.Indexer 50 + oauth *oauth.OAuth 51 + enforcer *rbac.Enforcer 52 + pages *pages.Pages 53 + idResolver *idresolver.Resolver 54 + mentionsResolver *mentions.Resolver 55 + posthog posthog.Client 56 + jc *jetstream.JetstreamClient 57 + config *config.Config 58 + repoResolver *reporesolver.RepoResolver 59 + knotstream *eventconsumer.Consumer 60 + spindlestream *eventconsumer.Consumer 61 + logger *slog.Logger 62 + validator *validator.Validator 62 63 } 63 64 64 65 func Make(ctx context.Context, config *config.Config) (*State, error) { ··· 100 101 101 102 repoResolver := reporesolver.New(config, enforcer, d) 102 103 103 - refResolver := refresolver.New(config, res, d, log.SubLogger(logger, "refResolver")) 104 + mentionsResolver := mentions.New(config, res, d, log.SubLogger(logger, "mentionsResolver")) 104 105 105 106 wrapper := db.DbWrapper{Execer: d} 106 107 jc, err := jetstream.NewJetstreamClient( ··· 182 183 enforcer, 183 184 pages, 184 185 res, 185 - refResolver, 186 + mentionsResolver, 186 187 posthog, 187 188 jc, 188 189 config, ··· 220 221 221 222 robotsTxt := `User-agent: * 222 223 Allow: / 223 - Disallow: /settings 224 - Disallow: /notifications 225 - Disallow: /login 226 - Disallow: /logout 227 - Disallow: /signup 228 - Disallow: /oauth 229 - Disallow: */settings$ 230 - Disallow: */settings/* 231 - 232 - Crawl-delay: 1 233 - 234 - Sitemap: https://tangled.org/sitemap.xml 235 224 ` 236 225 w.Write([]byte(robotsTxt)) 237 226 } 238 227 239 - func (s *State) Sitemap(w http.ResponseWriter, r *http.Request) { 240 - w.Header().Set("Content-Type", "application/xml; charset=utf-8") 241 - w.Header().Set("Cache-Control", "public, max-age=3600") 242 - 243 - // basic sitemap with static pages 244 - sitemap := `<?xml version="1.0" encoding="UTF-8"?> 245 - <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> 246 - <url> 247 - <loc>https://tangled.org</loc> 248 - <changefreq>daily</changefreq> 249 - <priority>1.0</priority> 250 - </url> 251 - <url> 252 - <loc>https://tangled.org/timeline</loc> 253 - <changefreq>hourly</changefreq> 254 - <priority>0.9</priority> 255 - </url> 256 - <url> 257 - <loc>https://tangled.org/goodfirstissues</loc> 258 - <changefreq>daily</changefreq> 259 - <priority>0.8</priority> 260 - </url> 261 - <url> 262 - <loc>https://tangled.org/terms</loc> 263 - <changefreq>monthly</changefreq> 264 - <priority>0.3</priority> 265 - </url> 266 - <url> 267 - <loc>https://tangled.org/privacy</loc> 268 - <changefreq>monthly</changefreq> 269 - <priority>0.3</priority> 270 - </url> 271 - <url> 272 - <loc>https://tangled.org/brand</loc> 273 - <changefreq>monthly</changefreq> 274 - <priority>0.5</priority> 275 - </url> 276 - </urlset>` 277 - w.Write([]byte(sitemap)) 278 - } 279 - 280 228 // https://developer.mozilla.org/en-US/docs/Web/Progressive_web_apps/Manifest 281 229 const manifestJson = `{ 282 230 "name": "tangled", ··· 352 300 return 353 301 } 354 302 355 - gfiLabel, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", s.config.Label.GoodFirstIssue)) 303 + gfiLabel, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", s.config.Label.GoodFirstIssue)) 356 304 if err != nil { 357 305 // non-fatal 358 306 } ··· 376 324 377 325 regs, err := db.GetRegistrations( 378 326 s.db, 379 - db.FilterEq("did", user.Did), 380 - db.FilterEq("needs_upgrade", 1), 327 + orm.FilterEq("did", user.Did), 328 + orm.FilterEq("needs_upgrade", 1), 381 329 ) 382 330 if err != nil { 383 331 l.Error("non-fatal: failed to get registrations", "err", err) ··· 385 333 386 334 spindles, err := db.GetSpindles( 387 335 s.db, 388 - db.FilterEq("owner", user.Did), 389 - db.FilterEq("needs_upgrade", 1), 336 + orm.FilterEq("owner", user.Did), 337 + orm.FilterEq("needs_upgrade", 1), 390 338 ) 391 339 if err != nil { 392 340 l.Error("non-fatal: failed to get spindles", "err", err) ··· 557 505 // Check for existing repos 558 506 existingRepo, err := db.GetRepo( 559 507 s.db, 560 - db.FilterEq("did", user.Did), 561 - db.FilterEq("name", repoName), 508 + orm.FilterEq("did", user.Did), 509 + orm.FilterEq("name", repoName), 562 510 ) 563 511 if err == nil && existingRepo != nil { 564 512 l.Info("repo exists") ··· 718 666 } 719 667 720 668 func BackfillDefaultDefs(e db.Execer, r *idresolver.Resolver, defaults []string) error { 721 - defaultLabels, err := db.GetLabelDefinitions(e, db.FilterIn("at_uri", defaults)) 669 + defaultLabels, err := db.GetLabelDefinitions(e, orm.FilterIn("at_uri", defaults)) 722 670 if err != nil { 723 671 return err 724 672 }
+7 -6
appview/strings/strings.go
··· 17 17 "tangled.org/core/appview/pages" 18 18 "tangled.org/core/appview/pages/markup" 19 19 "tangled.org/core/idresolver" 20 + "tangled.org/core/orm" 20 21 "tangled.org/core/tid" 21 22 22 23 "github.com/bluesky-social/indigo/api/atproto" ··· 108 109 strings, err := db.GetStrings( 109 110 s.Db, 110 111 0, 111 - db.FilterEq("did", id.DID), 112 - db.FilterEq("rkey", rkey), 112 + orm.FilterEq("did", id.DID), 113 + orm.FilterEq("rkey", rkey), 113 114 ) 114 115 if err != nil { 115 116 l.Error("failed to fetch string", "err", err) ··· 199 200 all, err := db.GetStrings( 200 201 s.Db, 201 202 0, 202 - db.FilterEq("did", id.DID), 203 - db.FilterEq("rkey", rkey), 203 + orm.FilterEq("did", id.DID), 204 + orm.FilterEq("rkey", rkey), 204 205 ) 205 206 if err != nil { 206 207 l.Error("failed to fetch string", "err", err) ··· 408 409 409 410 if err := db.DeleteString( 410 411 s.Db, 411 - db.FilterEq("did", user.Did), 412 - db.FilterEq("rkey", rkey), 412 + orm.FilterEq("did", user.Did), 413 + orm.FilterEq("rkey", rkey), 413 414 ); err != nil { 414 415 fail("Failed to delete string.", err) 415 416 return
+2 -1
appview/validator/issue.go
··· 6 6 7 7 "tangled.org/core/appview/db" 8 8 "tangled.org/core/appview/models" 9 + "tangled.org/core/orm" 9 10 ) 10 11 11 12 func (v *Validator) ValidateIssueComment(comment *models.IssueComment) error { 12 13 // if comments have parents, only ingest ones that are 1 level deep 13 14 if comment.ReplyTo != nil { 14 - parents, err := db.GetIssueComments(v.db, db.FilterEq("at_uri", *comment.ReplyTo)) 15 + parents, err := db.GetIssueComments(v.db, orm.FilterEq("at_uri", *comment.ReplyTo)) 15 16 if err != nil { 16 17 return fmt.Errorf("failed to fetch parent comment: %w", err) 17 18 }
+1 -34
crypto/verify.go
··· 5 5 "crypto/sha256" 6 6 "encoding/base64" 7 7 "fmt" 8 - "strings" 9 8 10 9 "github.com/hiddeco/sshsig" 11 10 "golang.org/x/crypto/ssh" 12 - "tangled.org/core/types" 13 11 ) 14 12 15 13 func VerifySignature(pubKey, signature, payload []byte) (error, bool) { ··· 28 26 // multiple algorithms but sha-512 is most secure, and git's ssh signing defaults 29 27 // to sha-512 for all key types anyway. 30 28 err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git") 31 - return err, err == nil 32 - } 33 29 34 - // VerifyCommitSignature reconstructs the payload used to sign a commit. This is 35 - // essentially the git cat-file output but without the gpgsig header. 36 - // 37 - // Caveats: signature verification will fail on commits with more than one parent, 38 - // i.e. merge commits, because types.NiceDiff doesn't carry more than one Parent field 39 - // and we are unable to reconstruct the payload correctly. 40 - // 41 - // Ideally this should directly operate on an *object.Commit. 42 - func VerifyCommitSignature(pubKey string, commit types.NiceDiff) (error, bool) { 43 - signature := commit.Commit.PGPSignature 44 - 45 - author := bytes.NewBuffer([]byte{}) 46 - committer := bytes.NewBuffer([]byte{}) 47 - commit.Commit.Author.Encode(author) 48 - commit.Commit.Committer.Encode(committer) 49 - 50 - payload := strings.Builder{} 51 - 52 - fmt.Fprintf(&payload, "tree %s\n", commit.Commit.Tree) 53 - if commit.Commit.Parent != "" { 54 - fmt.Fprintf(&payload, "parent %s\n", commit.Commit.Parent) 55 - } 56 - fmt.Fprintf(&payload, "author %s\n", author.String()) 57 - fmt.Fprintf(&payload, "committer %s\n", committer.String()) 58 - if commit.Commit.ChangedId != "" { 59 - fmt.Fprintf(&payload, "change-id %s\n", commit.Commit.ChangedId) 60 - } 61 - fmt.Fprintf(&payload, "\n%s", commit.Commit.Message) 62 - 63 - return VerifySignature([]byte(pubKey), []byte(signature), []byte(payload.String())) 30 + return err, err == nil 64 31 } 65 32 66 33 // SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
+1529
docs/DOCS.md
··· 1 + --- 2 + title: Tangled docs 3 + author: The Tangled Contributors 4 + date: 21 Sun, Dec 2025 5 + --- 6 + 7 + # Introduction 8 + 9 + Tangled is a decentralized code hosting and collaboration 10 + platform. Every component of Tangled is open-source and 11 + self-hostable. [tangled.org](https://tangled.org) also 12 + provides hosting and CI services that are free to use. 13 + 14 + There are several models for decentralized code 15 + collaboration platforms, ranging from ActivityPubโ€™s 16 + (Forgejo) federated model, to Radicleโ€™s entirely P2P model. 17 + Our approach attempts to be the best of both worlds by 18 + adopting the AT Protocolโ€”a protocol for building decentralized 19 + social applications with a central identity 20 + 21 + Our approach to this is the idea of โ€œknotsโ€. Knots are 22 + lightweight, headless servers that enable users to host Git 23 + repositories with ease. Knots are designed for either single 24 + or multi-tenant use which is perfect for self-hosting on a 25 + Raspberry Pi at home, or larger โ€œcommunityโ€ servers. By 26 + default, Tangled provides managed knots where you can host 27 + your repositories for free. 28 + 29 + The appview at tangled.org acts as a consolidated "view" 30 + into the whole network, allowing users to access, clone and 31 + contribute to repositories hosted across different knots 32 + seamlessly. 33 + 34 + # Quick start guide 35 + 36 + ## Login or sign up 37 + 38 + You can [login](https://tangled.org) by using your AT Protocol 39 + account. If you are unclear on what that means, simply head 40 + to the [signup](https://tangled.org/signup) page and create 41 + an account. By doing so, you will be choosing Tangled as 42 + your account provider (you will be granted a handle of the 43 + form `user.tngl.sh`). 44 + 45 + In the AT Protocol network, users are free to choose their account 46 + provider (known as a "Personal Data Service", or PDS), and 47 + login to applications that support AT accounts. 48 + 49 + You can think of it as "one account for all of the atmosphere"! 50 + 51 + If you already have an AT account (you may have one if you 52 + signed up to Bluesky, for example), you can login with the 53 + same handle on Tangled (so just use `user.bsky.social` on 54 + the login page). 55 + 56 + ## Add an SSH key 57 + 58 + Once you are logged in, you can start creating repositories 59 + and pushing code. Tangled supports pushing git repositories 60 + over SSH. 61 + 62 + First, you'll need to generate an SSH key if you don't 63 + already have one: 64 + 65 + ```bash 66 + ssh-keygen -t ed25519 -C "foo@bar.com" 67 + ``` 68 + 69 + When prompted, save the key to the default location 70 + (`~/.ssh/id_ed25519`) and optionally set a passphrase. 71 + 72 + Copy your public key to your clipboard: 73 + 74 + ```bash 75 + # on X11 76 + cat ~/.ssh/id_ed25519.pub | xclip -sel c 77 + 78 + # on wayland 79 + cat ~/.ssh/id_ed25519.pub | wl-copy 80 + 81 + # on macos 82 + cat ~/.ssh/id_ed25519.pub | pbcopy 83 + ``` 84 + 85 + Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key', 86 + paste your public key, give it a descriptive name, and hit 87 + save. 88 + 89 + ## Create a repository 90 + 91 + Once your SSH key is added, create your first repository: 92 + 93 + 1. Hit the green `+` icon on the topbar, and select 94 + repository 95 + 2. Enter a repository name 96 + 3. Add a description 97 + 4. Choose a knotserver to host this repository on 98 + 5. Hit create 99 + 100 + Knots are self-hostable, lightweight Git servers that can 101 + host your repository. Unlike traditional code forges, your 102 + code can live on any server. Read the [Knots](TODO) section 103 + for more. 104 + 105 + ## Configure SSH 106 + 107 + To ensure Git uses the correct SSH key and connects smoothly 108 + to Tangled, add this configuration to your `~/.ssh/config` 109 + file: 110 + 111 + ``` 112 + Host tangled.org 113 + Hostname tangled.org 114 + User git 115 + IdentityFile ~/.ssh/id_ed25519 116 + AddressFamily inet 117 + ``` 118 + 119 + This tells SSH to use your specific key when connecting to 120 + Tangled and prevents authentication issues if you have 121 + multiple SSH keys. 122 + 123 + Note that this configuration only works for knotservers that 124 + are hosted by tangled.org. If you use a custom knot, refer 125 + to the [Knots](TODO) section. 126 + 127 + ## Push your first repository 128 + 129 + Initialize a new Git repository: 130 + 131 + ```bash 132 + mkdir my-project 133 + cd my-project 134 + 135 + git init 136 + echo "# My Project" > README.md 137 + ``` 138 + 139 + Add some content and push! 140 + 141 + ```bash 142 + git add README.md 143 + git commit -m "Initial commit" 144 + git remote add origin git@tangled.org:user.tngl.sh/my-project 145 + git push -u origin main 146 + ``` 147 + 148 + That's it! Your code is now hosted on Tangled. 149 + 150 + ## Migrating an existing repository 151 + 152 + Moving your repositories from GitHub, GitLab, Bitbucket, or 153 + any other Git forge to Tangled is straightforward. You'll 154 + simply change your repository's remote URL. At the moment, 155 + Tangled does not have any tooling to migrate data such as 156 + GitHub issues or pull requests. 157 + 158 + First, create a new repository on tangled.org as described 159 + in the [Quick Start Guide](#create-a-repository). 160 + 161 + Navigate to your existing local repository: 162 + 163 + ```bash 164 + cd /path/to/your/existing/repo 165 + ``` 166 + 167 + You can inspect your existing Git remote like so: 168 + 169 + ```bash 170 + git remote -v 171 + ``` 172 + 173 + You'll see something like: 174 + 175 + ``` 176 + origin git@github.com:username/my-project (fetch) 177 + origin git@github.com:username/my-project (push) 178 + ``` 179 + 180 + Update the remote URL to point to tangled: 181 + 182 + ```bash 183 + git remote set-url origin git@tangled.org:user.tngl.sh/my-project 184 + ``` 185 + 186 + Verify the change: 187 + 188 + ```bash 189 + git remote -v 190 + ``` 191 + 192 + You should now see: 193 + 194 + ``` 195 + origin git@tangled.org:user.tngl.sh/my-project (fetch) 196 + origin git@tangled.org:user.tngl.sh/my-project (push) 197 + ``` 198 + 199 + Push all your branches and tags to Tangled: 200 + 201 + ```bash 202 + git push -u origin --all 203 + git push -u origin --tags 204 + ``` 205 + 206 + Your repository is now migrated to Tangled! All commit 207 + history, branches, and tags have been preserved. 208 + 209 + ## Mirroring a repository to Tangled 210 + 211 + If you want to maintain your repository on multiple forges 212 + simultaneously, for example, keeping your primary repository 213 + on GitHub while mirroring to Tangled for backup or 214 + redundancy, you can do so by adding multiple remotes. 215 + 216 + You can configure your local repository to push to both 217 + Tangled and, say, GitHub. You may already have the following 218 + setup: 219 + 220 + ``` 221 + $ git remote -v 222 + origin git@github.com:username/my-project (fetch) 223 + origin git@github.com:username/my-project (push) 224 + ``` 225 + 226 + Now add Tangled as an additional push URL to the same 227 + remote: 228 + 229 + ```bash 230 + git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project 231 + ``` 232 + 233 + You also need to re-add the original URL as a push 234 + destination (Git replaces the push URL when you use `--add` 235 + the first time): 236 + 237 + ```bash 238 + git remote set-url --add --push origin git@github.com:username/my-project 239 + ``` 240 + 241 + Verify your configuration: 242 + 243 + ``` 244 + $ git remote -v 245 + origin git@github.com:username/repo (fetch) 246 + origin git@tangled.org:username/my-project (push) 247 + origin git@github.com:username/repo (push) 248 + ``` 249 + 250 + Notice that there's one fetch URL (the primary remote) and 251 + two push URLs. Now, whenever you push, Git will 252 + automatically push to both remotes: 253 + 254 + ```bash 255 + git push origin main 256 + ``` 257 + 258 + This single command pushes your `main` branch to both GitHub 259 + and Tangled simultaneously. 260 + 261 + To push all branches and tags: 262 + 263 + ```bash 264 + git push origin --all 265 + git push origin --tags 266 + ``` 267 + 268 + If you prefer more control over which remote you push to, 269 + you can maintain separate remotes: 270 + 271 + ```bash 272 + git remote add github git@github.com:username/my-project 273 + git remote add tangled git@tangled.org:username/my-project 274 + ``` 275 + 276 + Then push to each explicitly: 277 + 278 + ```bash 279 + git push github main 280 + git push tangled main 281 + ``` 282 + 283 + # Knot self-hosting guide 284 + 285 + So you want to run your own knot server? Great! Here are a few prerequisites: 286 + 287 + 1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind. 288 + 2. A (sub)domain name. People generally use `knot.example.com`. 289 + 3. A valid SSL certificate for your domain. 290 + 291 + ## NixOS 292 + 293 + Refer to the [knot 294 + module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix) 295 + for a full list of options. Sample configurations: 296 + 297 + - [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85) 298 + - [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25) 299 + 300 + ## Docker 301 + 302 + Refer to 303 + [@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker). 304 + Note that this is community maintained. 305 + 306 + ## Manual setup 307 + 308 + First, clone this repository: 309 + 310 + ``` 311 + git clone https://tangled.org/@tangled.org/core 312 + ``` 313 + 314 + Then, build the `knot` CLI. This is the knot administration 315 + and operation tool. For the purpose of this guide, we're 316 + only concerned with these subcommands: 317 + 318 + * `knot server`: the main knot server process, typically 319 + run as a supervised service 320 + * `knot guard`: handles role-based access control for git 321 + over SSH (you'll never have to run this yourself) 322 + * `knot keys`: fetches SSH keys associated with your knot; 323 + we'll use this to generate the SSH 324 + `AuthorizedKeysCommand` 325 + 326 + ``` 327 + cd core 328 + export CGO_ENABLED=1 329 + go build -o knot ./cmd/knot 330 + ``` 331 + 332 + Next, move the `knot` binary to a location owned by `root` -- 333 + `/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`: 334 + 335 + ``` 336 + sudo mv knot /usr/local/bin/knot 337 + sudo chown root:root /usr/local/bin/knot 338 + ``` 339 + 340 + This is necessary because SSH `AuthorizedKeysCommand` requires [really 341 + specific permissions](https://stackoverflow.com/a/27638306). The 342 + `AuthorizedKeysCommand` specifies a command that is run by `sshd` to 343 + retrieve a user's public SSH keys dynamically for authentication. Let's 344 + set that up. 345 + 346 + ``` 347 + sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 348 + Match User git 349 + AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys 350 + AuthorizedKeysCommandUser nobody 351 + EOF 352 + ``` 353 + 354 + Then, reload `sshd`: 355 + 356 + ``` 357 + sudo systemctl reload ssh 358 + ``` 359 + 360 + Next, create the `git` user. We'll use the `git` user's home directory 361 + to store repositories: 362 + 363 + ``` 364 + sudo adduser git 365 + ``` 366 + 367 + Create `/home/git/.knot.env` with the following, updating the values as 368 + necessary. The `KNOT_SERVER_OWNER` should be set to your 369 + DID, you can find your DID in the [Settings](https://tangled.sh/settings) page. 370 + 371 + ``` 372 + KNOT_REPO_SCAN_PATH=/home/git 373 + KNOT_SERVER_HOSTNAME=knot.example.com 374 + APPVIEW_ENDPOINT=https://tangled.org 375 + KNOT_SERVER_OWNER=did:plc:foobar 376 + KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444 377 + KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555 378 + ``` 379 + 380 + If you run a Linux distribution that uses systemd, you can use the provided 381 + service file to run the server. Copy 382 + [`knotserver.service`](/systemd/knotserver.service) 383 + to `/etc/systemd/system/`. Then, run: 384 + 385 + ``` 386 + systemctl enable knotserver 387 + systemctl start knotserver 388 + ``` 389 + 390 + The last step is to configure a reverse proxy like Nginx or Caddy to front your 391 + knot. Here's an example configuration for Nginx: 392 + 393 + ``` 394 + server { 395 + listen 80; 396 + listen [::]:80; 397 + server_name knot.example.com; 398 + 399 + location / { 400 + proxy_pass http://localhost:5555; 401 + proxy_set_header Host $host; 402 + proxy_set_header X-Real-IP $remote_addr; 403 + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 404 + proxy_set_header X-Forwarded-Proto $scheme; 405 + } 406 + 407 + # wss endpoint for git events 408 + location /events { 409 + proxy_set_header X-Forwarded-For $remote_addr; 410 + proxy_set_header Host $http_host; 411 + proxy_set_header Upgrade websocket; 412 + proxy_set_header Connection Upgrade; 413 + proxy_pass http://localhost:5555; 414 + } 415 + # additional config for SSL/TLS go here. 416 + } 417 + 418 + ``` 419 + 420 + Remember to use Let's Encrypt or similar to procure a certificate for your 421 + knot domain. 422 + 423 + You should now have a running knot server! You can finalize 424 + your registration by hitting the `verify` button on the 425 + [/settings/knots](https://tangled.org/settings/knots) page. This simply creates 426 + a record on your PDS to announce the existence of the knot. 427 + 428 + ### Custom paths 429 + 430 + (This section applies to manual setup only. Docker users should edit the mounts 431 + in `docker-compose.yml` instead.) 432 + 433 + Right now, the database and repositories of your knot lives in `/home/git`. You 434 + can move these paths if you'd like to store them in another folder. Be careful 435 + when adjusting these paths: 436 + 437 + * Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent 438 + any possible side effects. Remember to restart it once you're done. 439 + * Make backups before moving in case something goes wrong. 440 + * Make sure the `git` user can read and write from the new paths. 441 + 442 + #### Database 443 + 444 + As an example, let's say the current database is at `/home/git/knotserver.db`, 445 + and we want to move it to `/home/git/database/knotserver.db`. 446 + 447 + Copy the current database to the new location. Make sure to copy the `.db-shm` 448 + and `.db-wal` files if they exist. 449 + 450 + ``` 451 + mkdir /home/git/database 452 + cp /home/git/knotserver.db* /home/git/database 453 + ``` 454 + 455 + In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to 456 + the new file path (_not_ the directory): 457 + 458 + ``` 459 + KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db 460 + ``` 461 + 462 + #### Repositories 463 + 464 + As an example, let's say the repositories are currently in `/home/git`, and we 465 + want to move them into `/home/git/repositories`. 466 + 467 + Create the new folder, then move the existing repositories (if there are any): 468 + 469 + ``` 470 + mkdir /home/git/repositories 471 + # move all DIDs into the new folder; these will vary for you! 472 + mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories 473 + ``` 474 + 475 + In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH` 476 + to the new directory: 477 + 478 + ``` 479 + KNOT_REPO_SCAN_PATH=/home/git/repositories 480 + ``` 481 + 482 + Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated 483 + repository path: 484 + 485 + ``` 486 + sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 487 + Match User git 488 + AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories 489 + AuthorizedKeysCommandUser nobody 490 + EOF 491 + ``` 492 + 493 + Make sure to restart your SSH server! 494 + 495 + #### MOTD (message of the day) 496 + 497 + To configure the MOTD used ("Welcome to this knot!" by default), edit the 498 + `/home/git/motd` file: 499 + 500 + ``` 501 + printf "Hi from this knot!\n" > /home/git/motd 502 + ``` 503 + 504 + Note that you should add a newline at the end if setting a non-empty message 505 + since the knot won't do this for you. 506 + 507 + # Spindles 508 + 509 + ## Pipelines 510 + 511 + Spindle workflows allow you to write CI/CD pipelines in a 512 + simple format. They're located in the `.tangled/workflows` 513 + directory at the root of your repository, and are defined 514 + using YAML. 515 + 516 + The fields are: 517 + 518 + - [Trigger](#trigger): A **required** field that defines 519 + when a workflow should be triggered. 520 + - [Engine](#engine): A **required** field that defines which 521 + engine a workflow should run on. 522 + - [Clone options](#clone-options): An **optional** field 523 + that defines how the repository should be cloned. 524 + - [Dependencies](#dependencies): An **optional** field that 525 + allows you to list dependencies you may need. 526 + - [Environment](#environment): An **optional** field that 527 + allows you to define environment variables. 528 + - [Steps](#steps): An **optional** field that allows you to 529 + define what steps should run in the workflow. 530 + 531 + ### Trigger 532 + 533 + The first thing to add to a workflow is the trigger, which 534 + defines when a workflow runs. This is defined using a `when` 535 + field, which takes in a list of conditions. Each condition 536 + has the following fields: 537 + 538 + - `event`: This is a **required** field that defines when 539 + your workflow should run. It's a list that can take one or 540 + more of the following values: 541 + - `push`: The workflow should run every time a commit is 542 + pushed to the repository. 543 + - `pull_request`: The workflow should run every time a 544 + pull request is made or updated. 545 + - `manual`: The workflow can be triggered manually. 546 + - `branch`: Defines which branches the workflow should run 547 + for. If used with the `push` event, commits to the 548 + branch(es) listed here will trigger the workflow. If used 549 + with the `pull_request` event, updates to pull requests 550 + targeting the branch(es) listed here will trigger the 551 + workflow. This field has no effect with the `manual` 552 + event. Supports glob patterns using `*` and `**` (e.g., 553 + `main`, `develop`, `release-*`). Either `branch` or `tag` 554 + (or both) must be specified for `push` events. 555 + - `tag`: Defines which tags the workflow should run for. 556 + Only used with the `push` event - when tags matching the 557 + pattern(s) listed here are pushed, the workflow will 558 + trigger. This field has no effect with `pull_request` or 559 + `manual` events. Supports glob patterns using `*` and `**` 560 + (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or 561 + `tag` (or both) must be specified for `push` events. 562 + 563 + For example, if you'd like to define a workflow that runs 564 + when commits are pushed to the `main` and `develop` 565 + branches, or when pull requests that target the `main` 566 + branch are updated, or manually, you can do so with: 567 + 568 + ```yaml 569 + when: 570 + - event: ["push", "manual"] 571 + branch: ["main", "develop"] 572 + - event: ["pull_request"] 573 + branch: ["main"] 574 + ``` 575 + 576 + You can also trigger workflows on tag pushes. For instance, 577 + to run a deployment workflow when tags matching `v*` are 578 + pushed: 579 + 580 + ```yaml 581 + when: 582 + - event: ["push"] 583 + tag: ["v*"] 584 + ``` 585 + 586 + You can even combine branch and tag patterns in a single 587 + constraint (the workflow triggers if either matches): 588 + 589 + ```yaml 590 + when: 591 + - event: ["push"] 592 + branch: ["main", "release-*"] 593 + tag: ["v*", "stable"] 594 + ``` 595 + 596 + ### Engine 597 + 598 + Next is the engine on which the workflow should run, defined 599 + using the **required** `engine` field. The currently 600 + supported engines are: 601 + 602 + - `nixery`: This uses an instance of 603 + [Nixery](https://nixery.dev) to run steps, which allows 604 + you to add [dependencies](#dependencies) from 605 + Nixpkgs (https://github.com/NixOS/nixpkgs). You can 606 + search for packages on https://search.nixos.org, and 607 + there's a pretty good chance the package(s) you're looking 608 + for will be there. 609 + 610 + Example: 611 + 612 + ```yaml 613 + engine: "nixery" 614 + ``` 615 + 616 + ### Clone options 617 + 618 + When a workflow starts, the first step is to clone the 619 + repository. You can customize this behavior using the 620 + **optional** `clone` field. It has the following fields: 621 + 622 + - `skip`: Setting this to `true` will skip cloning the 623 + repository. This can be useful if your workflow is doing 624 + something that doesn't require anything from the 625 + repository itself. This is `false` by default. 626 + - `depth`: This sets the number of commits, or the "clone 627 + depth", to fetch from the repository. For example, if you 628 + set this to 2, the last 2 commits will be fetched. By 629 + default, the depth is set to 1, meaning only the most 630 + recent commit will be fetched, which is the commit that 631 + triggered the workflow. 632 + - `submodules`: If you use Git submodules 633 + (https://git-scm.com/book/en/v2/Git-Tools-Submodules) 634 + in your repository, setting this field to `true` will 635 + recursively fetch all submodules. This is `false` by 636 + default. 637 + 638 + The default settings are: 639 + 640 + ```yaml 641 + clone: 642 + skip: false 643 + depth: 1 644 + submodules: false 645 + ``` 646 + 647 + ### Dependencies 648 + 649 + Usually when you're running a workflow, you'll need 650 + additional dependencies. The `dependencies` field lets you 651 + define which dependencies to get, and from where. It's a 652 + key-value map, with the key being the registry to fetch 653 + dependencies from, and the value being the list of 654 + dependencies to fetch. 655 + 656 + Say you want to fetch Node.js and Go from `nixpkgs`, and a 657 + package called `my_pkg` you've made from your own registry 658 + at your repository at 659 + `https://tangled.org/@example.com/my_pkg`. You can define 660 + those dependencies like so: 661 + 662 + ```yaml 663 + dependencies: 664 + # nixpkgs 665 + nixpkgs: 666 + - nodejs 667 + - go 668 + # custom registry 669 + git+https://tangled.org/@example.com/my_pkg: 670 + - my_pkg 671 + ``` 672 + 673 + Now these dependencies are available to use in your 674 + workflow! 675 + 676 + ### Environment 677 + 678 + The `environment` field allows you define environment 679 + variables that will be available throughout the entire 680 + workflow. **Do not put secrets here, these environment 681 + variables are visible to anyone viewing the repository. You 682 + can add secrets for pipelines in your repository's 683 + settings.** 684 + 685 + Example: 686 + 687 + ```yaml 688 + environment: 689 + GOOS: "linux" 690 + GOARCH: "arm64" 691 + NODE_ENV: "production" 692 + MY_ENV_VAR: "MY_ENV_VALUE" 693 + ``` 694 + 695 + ### Steps 696 + 697 + The `steps` field allows you to define what steps should run 698 + in the workflow. It's a list of step objects, each with the 699 + following fields: 700 + 701 + - `name`: This field allows you to give your step a name. 702 + This name is visible in your workflow runs, and is used to 703 + describe what the step is doing. 704 + - `command`: This field allows you to define a command to 705 + run in that step. The step is run in a Bash shell, and the 706 + logs from the command will be visible in the pipelines 707 + page on the Tangled website. The 708 + [dependencies](#dependencies) you added will be available 709 + to use here. 710 + - `environment`: Similar to the global 711 + [environment](#environment) config, this **optional** 712 + field is a key-value map that allows you to set 713 + environment variables for the step. **Do not put secrets 714 + here, these environment variables are visible to anyone 715 + viewing the repository. You can add secrets for pipelines 716 + in your repository's settings.** 717 + 718 + Example: 719 + 720 + ```yaml 721 + steps: 722 + - name: "Build backend" 723 + command: "go build" 724 + environment: 725 + GOOS: "darwin" 726 + GOARCH: "arm64" 727 + - name: "Build frontend" 728 + command: "npm run build" 729 + environment: 730 + NODE_ENV: "production" 731 + ``` 732 + 733 + ### Complete workflow 734 + 735 + ```yaml 736 + # .tangled/workflows/build.yml 737 + 738 + when: 739 + - event: ["push", "manual"] 740 + branch: ["main", "develop"] 741 + - event: ["pull_request"] 742 + branch: ["main"] 743 + 744 + engine: "nixery" 745 + 746 + # using the default values 747 + clone: 748 + skip: false 749 + depth: 1 750 + submodules: false 751 + 752 + dependencies: 753 + # nixpkgs 754 + nixpkgs: 755 + - nodejs 756 + - go 757 + # custom registry 758 + git+https://tangled.org/@example.com/my_pkg: 759 + - my_pkg 760 + 761 + environment: 762 + GOOS: "linux" 763 + GOARCH: "arm64" 764 + NODE_ENV: "production" 765 + MY_ENV_VAR: "MY_ENV_VALUE" 766 + 767 + steps: 768 + - name: "Build backend" 769 + command: "go build" 770 + environment: 771 + GOOS: "darwin" 772 + GOARCH: "arm64" 773 + - name: "Build frontend" 774 + command: "npm run build" 775 + environment: 776 + NODE_ENV: "production" 777 + ``` 778 + 779 + If you want another example of a workflow, you can look at 780 + the one [Tangled uses to build the 781 + project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml). 782 + 783 + ## Self-hosting guide 784 + 785 + ### Prerequisites 786 + 787 + * Go 788 + * Docker (the only supported backend currently) 789 + 790 + ### Configuration 791 + 792 + Spindle is configured using environment variables. The following environment variables are available: 793 + 794 + * `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`). 795 + * `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`). 796 + * `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required). 797 + * `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`). 798 + * `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`). 799 + * `SPINDLE_SERVER_OWNER`: The DID of the owner (required). 800 + * `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`). 801 + * `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`). 802 + * `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`). 803 + 804 + ### Running spindle 805 + 806 + 1. **Set the environment variables.** For example: 807 + 808 + ```shell 809 + export SPINDLE_SERVER_HOSTNAME="your-hostname" 810 + export SPINDLE_SERVER_OWNER="your-did" 811 + ``` 812 + 813 + 2. **Build the Spindle binary.** 814 + 815 + ```shell 816 + cd core 817 + go mod download 818 + go build -o cmd/spindle/spindle cmd/spindle/main.go 819 + ``` 820 + 821 + 3. **Create the log directory.** 822 + 823 + ```shell 824 + sudo mkdir -p /var/log/spindle 825 + sudo chown $USER:$USER -R /var/log/spindle 826 + ``` 827 + 828 + 4. **Run the Spindle binary.** 829 + 830 + ```shell 831 + ./cmd/spindle/spindle 832 + ``` 833 + 834 + Spindle will now start, connect to the Jetstream server, and begin processing pipelines. 835 + 836 + ## Architecture 837 + 838 + Spindle is a small CI runner service. Here's a high-level overview of how it operates: 839 + 840 + * Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and 841 + [`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream. 842 + * When a new repo record comes through (typically when you add a spindle to a 843 + repo from the settings), spindle then resolves the underlying knot and 844 + subscribes to repo events (see: 845 + [`sh.tangled.pipeline`](/lexicons/pipeline.json)). 846 + * The spindle engine then handles execution of the pipeline, with results and 847 + logs beamed on the spindle event stream over WebSocket 848 + 849 + ### The engine 850 + 851 + At present, the only supported backend is Docker (and Podman, if Docker 852 + compatibility is enabled, so that `/run/docker.sock` is created). spindle 853 + executes each step in the pipeline in a fresh container, with state persisted 854 + across steps within the `/tangled/workspace` directory. 855 + 856 + The base image for the container is constructed on the fly using 857 + [Nixery](https://nixery.dev), which is handy for caching layers for frequently 858 + used packages. 859 + 860 + The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines). 861 + 862 + ## Secrets with openbao 863 + 864 + This document covers setting up spindle to use OpenBao for secrets 865 + management via OpenBao Proxy instead of the default SQLite backend. 866 + 867 + ### Overview 868 + 869 + Spindle now uses OpenBao Proxy for secrets management. The proxy handles 870 + authentication automatically using AppRole credentials, while spindle 871 + connects to the local proxy instead of directly to the OpenBao server. 872 + 873 + This approach provides better security, automatic token renewal, and 874 + simplified application code. 875 + 876 + ### Installation 877 + 878 + Install OpenBao from Nixpkgs: 879 + 880 + ```bash 881 + nix shell nixpkgs#openbao # for a local server 882 + ``` 883 + 884 + ### Setup 885 + 886 + The setup process can is documented for both local development and production. 887 + 888 + #### Local development 889 + 890 + Start OpenBao in dev mode: 891 + 892 + ```bash 893 + bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201 894 + ``` 895 + 896 + This starts OpenBao on `http://localhost:8201` with a root token. 897 + 898 + Set up environment for bao CLI: 899 + 900 + ```bash 901 + export BAO_ADDR=http://localhost:8200 902 + export BAO_TOKEN=root 903 + ``` 904 + 905 + #### Production 906 + 907 + You would typically use a systemd service with a 908 + configuration file. Refer to 909 + [@tangled.org/infra](https://tangled.org/@tangled.org/infra) 910 + for how this can be achieved using Nix. 911 + 912 + Then, initialize the bao server: 913 + 914 + ```bash 915 + bao operator init -key-shares=1 -key-threshold=1 916 + ``` 917 + 918 + This will print out an unseal key and a root key. Save them 919 + somewhere (like a password manager). Then unseal the vault 920 + to begin setting it up: 921 + 922 + ```bash 923 + bao operator unseal <unseal_key> 924 + ``` 925 + 926 + All steps below remain the same across both dev and 927 + production setups. 928 + 929 + #### Configure openbao server 930 + 931 + Create the spindle KV mount: 932 + 933 + ```bash 934 + bao secrets enable -path=spindle -version=2 kv 935 + ``` 936 + 937 + Set up AppRole authentication and policy: 938 + 939 + Create a policy file `spindle-policy.hcl`: 940 + 941 + ```hcl 942 + # Full access to spindle KV v2 data 943 + path "spindle/data/*" { 944 + capabilities = ["create", "read", "update", "delete"] 945 + } 946 + 947 + # Access to metadata for listing and management 948 + path "spindle/metadata/*" { 949 + capabilities = ["list", "read", "delete", "update"] 950 + } 951 + 952 + # Allow listing at root level 953 + path "spindle/" { 954 + capabilities = ["list"] 955 + } 956 + 957 + # Required for connection testing and health checks 958 + path "auth/token/lookup-self" { 959 + capabilities = ["read"] 960 + } 961 + ``` 962 + 963 + Apply the policy and create an AppRole: 964 + 965 + ```bash 966 + bao policy write spindle-policy spindle-policy.hcl 967 + bao auth enable approle 968 + bao write auth/approle/role/spindle \ 969 + token_policies="spindle-policy" \ 970 + token_ttl=1h \ 971 + token_max_ttl=4h \ 972 + bind_secret_id=true \ 973 + secret_id_ttl=0 \ 974 + secret_id_num_uses=0 975 + ``` 976 + 977 + Get the credentials: 978 + 979 + ```bash 980 + # Get role ID (static) 981 + ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id) 982 + 983 + # Generate secret ID 984 + SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id) 985 + 986 + echo "Role ID: $ROLE_ID" 987 + echo "Secret ID: $SECRET_ID" 988 + ``` 989 + 990 + #### Create proxy configuration 991 + 992 + Create the credential files: 993 + 994 + ```bash 995 + # Create directory for OpenBao files 996 + mkdir -p /tmp/openbao 997 + 998 + # Save credentials 999 + echo "$ROLE_ID" > /tmp/openbao/role-id 1000 + echo "$SECRET_ID" > /tmp/openbao/secret-id 1001 + chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id 1002 + ``` 1003 + 1004 + Create a proxy configuration file `/tmp/openbao/proxy.hcl`: 1005 + 1006 + ```hcl 1007 + # OpenBao server connection 1008 + vault { 1009 + address = "http://localhost:8200" 1010 + } 1011 + 1012 + # Auto-Auth using AppRole 1013 + auto_auth { 1014 + method "approle" { 1015 + mount_path = "auth/approle" 1016 + config = { 1017 + role_id_file_path = "/tmp/openbao/role-id" 1018 + secret_id_file_path = "/tmp/openbao/secret-id" 1019 + } 1020 + } 1021 + 1022 + # Optional: write token to file for debugging 1023 + sink "file" { 1024 + config = { 1025 + path = "/tmp/openbao/token" 1026 + mode = 0640 1027 + } 1028 + } 1029 + } 1030 + 1031 + # Proxy listener for spindle 1032 + listener "tcp" { 1033 + address = "127.0.0.1:8201" 1034 + tls_disable = true 1035 + } 1036 + 1037 + # Enable API proxy with auto-auth token 1038 + api_proxy { 1039 + use_auto_auth_token = true 1040 + } 1041 + 1042 + # Enable response caching 1043 + cache { 1044 + use_auto_auth_token = true 1045 + } 1046 + 1047 + # Logging 1048 + log_level = "info" 1049 + ``` 1050 + 1051 + #### Start the proxy 1052 + 1053 + Start OpenBao Proxy: 1054 + 1055 + ```bash 1056 + bao proxy -config=/tmp/openbao/proxy.hcl 1057 + ``` 1058 + 1059 + The proxy will authenticate with OpenBao and start listening on 1060 + `127.0.0.1:8201`. 1061 + 1062 + #### Configure spindle 1063 + 1064 + Set these environment variables for spindle: 1065 + 1066 + ```bash 1067 + export SPINDLE_SERVER_SECRETS_PROVIDER=openbao 1068 + export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201 1069 + export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle 1070 + ``` 1071 + 1072 + On startup, spindle will now connect to the local proxy, 1073 + which handles all authentication automatically. 1074 + 1075 + ### Production setup for proxy 1076 + 1077 + For production, you'll want to run the proxy as a service: 1078 + 1079 + Place your production configuration in 1080 + `/etc/openbao/proxy.hcl` with proper TLS settings for the 1081 + vault connection. 1082 + 1083 + ### Verifying setup 1084 + 1085 + Test the proxy directly: 1086 + 1087 + ```bash 1088 + # Check proxy health 1089 + curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health 1090 + 1091 + # Test token lookup through proxy 1092 + curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self 1093 + ``` 1094 + 1095 + Test OpenBao operations through the server: 1096 + 1097 + ```bash 1098 + # List all secrets 1099 + bao kv list spindle/ 1100 + 1101 + # Add a test secret via the spindle API, then check it exists 1102 + bao kv list spindle/repos/ 1103 + 1104 + # Get a specific secret 1105 + bao kv get spindle/repos/your_repo_path/SECRET_NAME 1106 + ``` 1107 + 1108 + ### How it works 1109 + 1110 + - Spindle connects to OpenBao Proxy on localhost (typically 1111 + port 8200 or 8201) 1112 + - The proxy authenticates with OpenBao using AppRole 1113 + credentials 1114 + - All spindle requests go through the proxy, which injects 1115 + authentication tokens 1116 + - Secrets are stored at 1117 + `spindle/repos/{sanitized_repo_path}/{secret_key}` 1118 + - Repository paths like `did:plc:alice/myrepo` become 1119 + `did_plc_alice_myrepo` 1120 + - The proxy handles all token renewal automatically 1121 + - Spindle no longer manages tokens or authentication 1122 + directly 1123 + 1124 + ### Troubleshooting 1125 + 1126 + **Connection refused**: Check that the OpenBao Proxy is 1127 + running and listening on the configured address. 1128 + 1129 + **403 errors**: Verify the AppRole credentials are correct 1130 + and the policy has the necessary permissions. 1131 + 1132 + **404 route errors**: The spindle KV mount probably doesn't 1133 + existโ€”run the mount creation step again. 1134 + 1135 + **Proxy authentication failures**: Check the proxy logs and 1136 + verify the role-id and secret-id files are readable and 1137 + contain valid credentials. 1138 + 1139 + **Secret not found after writing**: This can indicate policy 1140 + permission issues. Verify the policy includes both 1141 + `spindle/data/*` and `spindle/metadata/*` paths with 1142 + appropriate capabilities. 1143 + 1144 + Check proxy logs: 1145 + 1146 + ```bash 1147 + # If running as systemd service 1148 + journalctl -u openbao-proxy -f 1149 + 1150 + # If running directly, check the console output 1151 + ``` 1152 + 1153 + Test AppRole authentication manually: 1154 + 1155 + ```bash 1156 + bao write auth/approle/login \ 1157 + role_id="$(cat /tmp/openbao/role-id)" \ 1158 + secret_id="$(cat /tmp/openbao/secret-id)" 1159 + ``` 1160 + 1161 + # Migrating knots and spindles 1162 + 1163 + Sometimes, non-backwards compatible changes are made to the 1164 + knot/spindle XRPC APIs. If you host a knot or a spindle, you 1165 + will need to follow this guide to upgrade. Typically, this 1166 + only requires you to deploy the newest version. 1167 + 1168 + This document is laid out in reverse-chronological order. 1169 + Newer migration guides are listed first, and older guides 1170 + are further down the page. 1171 + 1172 + ## Upgrading from v1.8.x 1173 + 1174 + After v1.8.2, the HTTP API for knots and spindles has been 1175 + deprecated and replaced with XRPC. Repositories on outdated 1176 + knots will not be viewable from the appview. Upgrading is 1177 + straightforward however. 1178 + 1179 + For knots: 1180 + 1181 + - Upgrade to the latest tag (v1.9.0 or above) 1182 + - Head to the [knot dashboard](https://tangled.org/settings/knots) and 1183 + hit the "retry" button to verify your knot 1184 + 1185 + For spindles: 1186 + 1187 + - Upgrade to the latest tag (v1.9.0 or above) 1188 + - Head to the [spindle 1189 + dashboard](https://tangled.org/settings/spindles) and hit the 1190 + "retry" button to verify your spindle 1191 + 1192 + ## Upgrading from v1.7.x 1193 + 1194 + After v1.7.0, knot secrets have been deprecated. You no 1195 + longer need a secret from the appview to run a knot. All 1196 + authorized commands to knots are managed via [Inter-Service 1197 + Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt). 1198 + Knots will be read-only until upgraded. 1199 + 1200 + Upgrading is quite easy, in essence: 1201 + 1202 + - `KNOT_SERVER_SECRET` is no more, you can remove this 1203 + environment variable entirely 1204 + - `KNOT_SERVER_OWNER` is now required on boot, set this to 1205 + your DID. You can find your DID in the 1206 + [settings](https://tangled.org/settings) page. 1207 + - Restart your knot once you have replaced the environment 1208 + variable 1209 + - Head to the [knot dashboard](https://tangled.org/settings/knots) and 1210 + hit the "retry" button to verify your knot. This simply 1211 + writes a `sh.tangled.knot` record to your PDS. 1212 + 1213 + If you use the nix module, simply bump the flake to the 1214 + latest revision, and change your config block like so: 1215 + 1216 + ```diff 1217 + services.tangled.knot = { 1218 + enable = true; 1219 + server = { 1220 + - secretFile = /path/to/secret; 1221 + + owner = "did:plc:foo"; 1222 + }; 1223 + }; 1224 + ``` 1225 + 1226 + # Hacking on Tangled 1227 + 1228 + We highly recommend [installing 1229 + Nix](https://nixos.org/download/) (the package manager) 1230 + before working on the codebase. The Nix flake provides a lot 1231 + of helpers to get started and most importantly, builds and 1232 + dev shells are entirely deterministic. 1233 + 1234 + To set up your dev environment: 1235 + 1236 + ```bash 1237 + nix develop 1238 + ``` 1239 + 1240 + Non-Nix users can look at the `devShell` attribute in the 1241 + `flake.nix` file to determine necessary dependencies. 1242 + 1243 + ## Running the appview 1244 + 1245 + The Nix flake also exposes a few `app` attributes (run `nix 1246 + flake show` to see a full list of what the flake provides), 1247 + one of the apps runs the appview with the `air` 1248 + live-reloader: 1249 + 1250 + ```bash 1251 + TANGLED_DEV=true nix run .#watch-appview 1252 + 1253 + # TANGLED_DB_PATH might be of interest to point to 1254 + # different sqlite DBs 1255 + 1256 + # in a separate shell, you can live-reload tailwind 1257 + nix run .#watch-tailwind 1258 + ``` 1259 + 1260 + To authenticate with the appview, you will need Redis and 1261 + OAuth JWKs to be set up: 1262 + 1263 + ``` 1264 + # OAuth JWKs should already be set up by the Nix devshell: 1265 + echo $TANGLED_OAUTH_CLIENT_SECRET 1266 + z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc 1267 + 1268 + echo $TANGLED_OAUTH_CLIENT_KID 1269 + 1761667908 1270 + 1271 + # if not, you can set it up yourself: 1272 + goat key generate -t P-256 1273 + Key Type: P-256 / secp256r1 / ES256 private key 1274 + Secret Key (Multibase Syntax): save this securely (eg, add to password manager) 1275 + z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL 1276 + Public Key (DID Key Syntax): share or publish this (eg, in DID document) 1277 + did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR 1278 + 1279 + # the secret key from above 1280 + export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..." 1281 + 1282 + # Run Redis in a new shell to store OAuth sessions 1283 + redis-server 1284 + ``` 1285 + 1286 + ## Running knots and spindles 1287 + 1288 + An end-to-end knot setup requires setting up a machine with 1289 + `sshd`, `AuthorizedKeysCommand`, and a Git user, which is 1290 + quite cumbersome. So the Nix flake provides a 1291 + `nixosConfiguration` to do so. 1292 + 1293 + <details> 1294 + <summary><strong>macOS users will have to set up a Nix Builder first</strong></summary> 1295 + 1296 + In order to build Tangled's dev VM on macOS, you will 1297 + first need to set up a Linux Nix builder. The recommended 1298 + way to do so is to run a [`darwin.linux-builder` 1299 + VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder) 1300 + and to register it in `nix.conf` as a builder for Linux 1301 + with the same architecture as your Mac (`linux-aarch64` if 1302 + you are using Apple Silicon). 1303 + 1304 + > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside 1305 + > the Tangled repo so that it doesn't conflict with the other VM. For example, 1306 + > you can do 1307 + > 1308 + > ```shell 1309 + > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder 1310 + > ``` 1311 + > 1312 + > to store the builder VM in a temporary dir. 1313 + > 1314 + > You should read and follow [all the other intructions][darwin builder vm] to 1315 + > avoid subtle problems. 1316 + 1317 + Alternatively, you can use any other method to set up a 1318 + Linux machine with Nix installed that you can `sudo ssh` 1319 + into (in other words, root user on your Mac has to be able 1320 + to ssh into the Linux machine without entering a password) 1321 + and that has the same architecture as your Mac. See 1322 + [remote builder 1323 + instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements) 1324 + for how to register such a builder in `nix.conf`. 1325 + 1326 + > WARNING: If you'd like to use 1327 + > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or 1328 + > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo 1329 + > ssh` works can be tricky. It seems to be [possible with 1330 + > Orbstack](https://github.com/orgs/orbstack/discussions/1669). 1331 + 1332 + </details> 1333 + 1334 + To begin, grab your DID from http://localhost:3000/settings. 1335 + Then, set `TANGLED_VM_KNOT_OWNER` and 1336 + `TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a 1337 + lightweight NixOS VM like so: 1338 + 1339 + ```bash 1340 + nix run --impure .#vm 1341 + 1342 + # type `poweroff` at the shell to exit the VM 1343 + ``` 1344 + 1345 + This starts a knot on port 6444, a spindle on port 6555 1346 + with `ssh` exposed on port 2222. 1347 + 1348 + Once the services are running, head to 1349 + http://localhost:3000/settings/knots and hit "Verify". It should 1350 + verify the ownership of the services instantly if everything 1351 + went smoothly. 1352 + 1353 + You can push repositories to this VM with this ssh config 1354 + block on your main machine: 1355 + 1356 + ```bash 1357 + Host nixos-shell 1358 + Hostname localhost 1359 + Port 2222 1360 + User git 1361 + IdentityFile ~/.ssh/my_tangled_key 1362 + ``` 1363 + 1364 + Set up a remote called `local-dev` on a git repo: 1365 + 1366 + ```bash 1367 + git remote add local-dev git@nixos-shell:user/repo 1368 + git push local-dev main 1369 + ``` 1370 + 1371 + The above VM should already be running a spindle on 1372 + `localhost:6555`. Head to http://localhost:3000/settings/spindles and 1373 + hit "Verify". You can then configure each repository to use 1374 + this spindle and run CI jobs. 1375 + 1376 + Of interest when debugging spindles: 1377 + 1378 + ``` 1379 + # Service logs from journald: 1380 + journalctl -xeu spindle 1381 + 1382 + # CI job logs from disk: 1383 + ls /var/log/spindle 1384 + 1385 + # Debugging spindle database: 1386 + sqlite3 /var/lib/spindle/spindle.db 1387 + 1388 + # litecli has a nicer REPL interface: 1389 + litecli /var/lib/spindle/spindle.db 1390 + ``` 1391 + 1392 + If for any reason you wish to disable either one of the 1393 + services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set 1394 + `services.tangled.spindle.enable` (or 1395 + `services.tangled.knot.enable`) to `false`. 1396 + 1397 + # Contribution guide 1398 + 1399 + ## Commit guidelines 1400 + 1401 + We follow a commit style similar to the Go project. Please keep commits: 1402 + 1403 + * **atomic**: each commit should represent one logical change 1404 + * **descriptive**: the commit message should clearly describe what the 1405 + change does and why it's needed 1406 + 1407 + ### Message format 1408 + 1409 + ``` 1410 + <service/top-level directory>/<affected package/directory>: <short summary of change> 1411 + 1412 + Optional longer description can go here, if necessary. Explain what the 1413 + change does and why, especially if not obvious. Reference relevant 1414 + issues or PRs when applicable. These can be links for now since we don't 1415 + auto-link issues/PRs yet. 1416 + ``` 1417 + 1418 + Here are some examples: 1419 + 1420 + ``` 1421 + appview/state: fix token expiry check in middleware 1422 + 1423 + The previous check did not account for clock drift, leading to premature 1424 + token invalidation. 1425 + ``` 1426 + 1427 + ``` 1428 + knotserver/git/service: improve error checking in upload-pack 1429 + ``` 1430 + 1431 + 1432 + ### General notes 1433 + 1434 + - PRs get merged "as-is" (fast-forward)โ€”like applying a patch-series 1435 + using `git am`. At present, there is no squashingโ€”so please author 1436 + your commits as they would appear on `master`, following the above 1437 + guidelines. 1438 + - If there is a lot of nesting, for example "appview: 1439 + pages/templates/repo/fragments: ...", these can be truncated down to 1440 + just "appview: repo/fragments: ...". If the change affects a lot of 1441 + subdirectories, you may abbreviate to just the top-level names, e.g. 1442 + "appview: ..." or "knotserver: ...". 1443 + - Keep commits lowercased with no trailing period. 1444 + - Use the imperative mood in the summary line (e.g., "fix bug" not 1445 + "fixed bug" or "fixes bug"). 1446 + - Try to keep the summary line under 72 characters, but we aren't too 1447 + fussed about this. 1448 + - Follow the same formatting for PR titles if filled manually. 1449 + - Don't include unrelated changes in the same commit. 1450 + - Avoid noisy commit messages like "wip" or "final fix"โ€”rewrite history 1451 + before submitting if necessary. 1452 + 1453 + ## Code formatting 1454 + 1455 + We use a variety of tools to format our code, and multiplex them with 1456 + [`treefmt`](https://treefmt.com). All you need to do to format your changes 1457 + is run `nix run .#fmt` (or just `treefmt` if you're in the devshell). 1458 + 1459 + ## Proposals for bigger changes 1460 + 1461 + Small fixes like typos, minor bugs, or trivial refactors can be 1462 + submitted directly as PRs. 1463 + 1464 + For larger changesโ€”especially those introducing new features, significant 1465 + refactoring, or altering system behaviorโ€”please open a proposal first. This 1466 + helps us evaluate the scope, design, and potential impact before implementation. 1467 + 1468 + Create a new issue titled: 1469 + 1470 + ``` 1471 + proposal: <affected scope>: <summary of change> 1472 + ``` 1473 + 1474 + In the description, explain: 1475 + 1476 + - What the change is 1477 + - Why it's needed 1478 + - How you plan to implement it (roughly) 1479 + - Any open questions or tradeoffs 1480 + 1481 + We'll use the issue thread to discuss and refine the idea before moving 1482 + forward. 1483 + 1484 + ## Developer Certificate of Origin (DCO) 1485 + 1486 + We require all contributors to certify that they have the right to 1487 + submit the code they're contributing. To do this, we follow the 1488 + [Developer Certificate of Origin 1489 + (DCO)](https://developercertificate.org/). 1490 + 1491 + By signing your commits, you're stating that the contribution is your 1492 + own work, or that you have the right to submit it under the project's 1493 + license. This helps us keep things clean and legally sound. 1494 + 1495 + To sign your commit, just add the `-s` flag when committing: 1496 + 1497 + ```sh 1498 + git commit -s -m "your commit message" 1499 + ``` 1500 + 1501 + This appends a line like: 1502 + 1503 + ``` 1504 + Signed-off-by: Your Name <your.email@example.com> 1505 + ``` 1506 + 1507 + We won't merge commits if they aren't signed off. If you forget, you can 1508 + amend the last commit like this: 1509 + 1510 + ```sh 1511 + git commit --amend -s 1512 + ``` 1513 + 1514 + If you're submitting a PR with multiple commits, make sure each one is 1515 + signed. 1516 + 1517 + For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command 1518 + to make it sign off commits in the tangled repo: 1519 + 1520 + ```shell 1521 + # Safety check, should say "No matching config key..." 1522 + jj config list templates.commit_trailers 1523 + # The command below may need to be adjusted if the command above returned something. 1524 + jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)" 1525 + ``` 1526 + 1527 + Refer to the [jujutsu 1528 + documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers) 1529 + for more information.
-136
docs/contributing.md
··· 1 - # tangled contributing guide 2 - 3 - ## commit guidelines 4 - 5 - We follow a commit style similar to the Go project. Please keep commits: 6 - 7 - * **atomic**: each commit should represent one logical change 8 - * **descriptive**: the commit message should clearly describe what the 9 - change does and why it's needed 10 - 11 - ### message format 12 - 13 - ``` 14 - <service/top-level directory>/<affected package/directory>: <short summary of change> 15 - 16 - 17 - Optional longer description can go here, if necessary. Explain what the 18 - change does and why, especially if not obvious. Reference relevant 19 - issues or PRs when applicable. These can be links for now since we don't 20 - auto-link issues/PRs yet. 21 - ``` 22 - 23 - Here are some examples: 24 - 25 - ``` 26 - appview/state: fix token expiry check in middleware 27 - 28 - The previous check did not account for clock drift, leading to premature 29 - token invalidation. 30 - ``` 31 - 32 - ``` 33 - knotserver/git/service: improve error checking in upload-pack 34 - ``` 35 - 36 - 37 - ### general notes 38 - 39 - - PRs get merged "as-is" (fast-forward) -- like applying a patch-series 40 - using `git am`. At present, there is no squashing -- so please author 41 - your commits as they would appear on `master`, following the above 42 - guidelines. 43 - - If there is a lot of nesting, for example "appview: 44 - pages/templates/repo/fragments: ...", these can be truncated down to 45 - just "appview: repo/fragments: ...". If the change affects a lot of 46 - subdirectories, you may abbreviate to just the top-level names, e.g. 47 - "appview: ..." or "knotserver: ...". 48 - - Keep commits lowercased with no trailing period. 49 - - Use the imperative mood in the summary line (e.g., "fix bug" not 50 - "fixed bug" or "fixes bug"). 51 - - Try to keep the summary line under 72 characters, but we aren't too 52 - fussed about this. 53 - - Follow the same formatting for PR titles if filled manually. 54 - - Don't include unrelated changes in the same commit. 55 - - Avoid noisy commit messages like "wip" or "final fix"โ€”rewrite history 56 - before submitting if necessary. 57 - 58 - ## code formatting 59 - 60 - We use a variety of tools to format our code, and multiplex them with 61 - [`treefmt`](https://treefmt.com): all you need to do to format your changes 62 - is run `nix run .#fmt` (or just `treefmt` if you're in the devshell). 63 - 64 - ## proposals for bigger changes 65 - 66 - Small fixes like typos, minor bugs, or trivial refactors can be 67 - submitted directly as PRs. 68 - 69 - For larger changesโ€”especially those introducing new features, significant 70 - refactoring, or altering system behaviorโ€”please open a proposal first. This 71 - helps us evaluate the scope, design, and potential impact before implementation. 72 - 73 - ### proposal format 74 - 75 - Create a new issue titled: 76 - 77 - ``` 78 - proposal: <affected scope>: <summary of change> 79 - ``` 80 - 81 - In the description, explain: 82 - 83 - - What the change is 84 - - Why it's needed 85 - - How you plan to implement it (roughly) 86 - - Any open questions or tradeoffs 87 - 88 - We'll use the issue thread to discuss and refine the idea before moving 89 - forward. 90 - 91 - ## developer certificate of origin (DCO) 92 - 93 - We require all contributors to certify that they have the right to 94 - submit the code they're contributing. To do this, we follow the 95 - [Developer Certificate of Origin 96 - (DCO)](https://developercertificate.org/). 97 - 98 - By signing your commits, you're stating that the contribution is your 99 - own work, or that you have the right to submit it under the project's 100 - license. This helps us keep things clean and legally sound. 101 - 102 - To sign your commit, just add the `-s` flag when committing: 103 - 104 - ```sh 105 - git commit -s -m "your commit message" 106 - ``` 107 - 108 - This appends a line like: 109 - 110 - ``` 111 - Signed-off-by: Your Name <your.email@example.com> 112 - ``` 113 - 114 - We won't merge commits if they aren't signed off. If you forget, you can 115 - amend the last commit like this: 116 - 117 - ```sh 118 - git commit --amend -s 119 - ``` 120 - 121 - If you're submitting a PR with multiple commits, make sure each one is 122 - signed. 123 - 124 - For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command 125 - to make it sign off commits in the tangled repo: 126 - 127 - ```shell 128 - # Safety check, should say "No matching config key..." 129 - jj config list templates.commit_trailers 130 - # The command below may need to be adjusted if the command above returned something. 131 - jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)" 132 - ``` 133 - 134 - Refer to the [jj 135 - documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers) 136 - for more information.
-172
docs/hacking.md
··· 1 - # hacking on tangled 2 - 3 - We highly recommend [installing 4 - nix](https://nixos.org/download/) (the package manager) 5 - before working on the codebase. The nix flake provides a lot 6 - of helpers to get started and most importantly, builds and 7 - dev shells are entirely deterministic. 8 - 9 - To set up your dev environment: 10 - 11 - ```bash 12 - nix develop 13 - ``` 14 - 15 - Non-nix users can look at the `devShell` attribute in the 16 - `flake.nix` file to determine necessary dependencies. 17 - 18 - ## running the appview 19 - 20 - The nix flake also exposes a few `app` attributes (run `nix 21 - flake show` to see a full list of what the flake provides), 22 - one of the apps runs the appview with the `air` 23 - live-reloader: 24 - 25 - ```bash 26 - TANGLED_DEV=true nix run .#watch-appview 27 - 28 - # TANGLED_DB_PATH might be of interest to point to 29 - # different sqlite DBs 30 - 31 - # in a separate shell, you can live-reload tailwind 32 - nix run .#watch-tailwind 33 - ``` 34 - 35 - To authenticate with the appview, you will need redis and 36 - OAUTH JWKs to be setup: 37 - 38 - ``` 39 - # oauth jwks should already be setup by the nix devshell: 40 - echo $TANGLED_OAUTH_CLIENT_SECRET 41 - z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc 42 - 43 - echo $TANGLED_OAUTH_CLIENT_KID 44 - 1761667908 45 - 46 - # if not, you can set it up yourself: 47 - goat key generate -t P-256 48 - Key Type: P-256 / secp256r1 / ES256 private key 49 - Secret Key (Multibase Syntax): save this securely (eg, add to password manager) 50 - z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL 51 - Public Key (DID Key Syntax): share or publish this (eg, in DID document) 52 - did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR 53 - 54 - # the secret key from above 55 - export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..." 56 - 57 - # run redis in at a new shell to store oauth sessions 58 - redis-server 59 - ``` 60 - 61 - ## running knots and spindles 62 - 63 - An end-to-end knot setup requires setting up a machine with 64 - `sshd`, `AuthorizedKeysCommand`, and git user, which is 65 - quite cumbersome. So the nix flake provides a 66 - `nixosConfiguration` to do so. 67 - 68 - <details> 69 - <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary> 70 - 71 - In order to build Tangled's dev VM on macOS, you will 72 - first need to set up a Linux Nix builder. The recommended 73 - way to do so is to run a [`darwin.linux-builder` 74 - VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder) 75 - and to register it in `nix.conf` as a builder for Linux 76 - with the same architecture as your Mac (`linux-aarch64` if 77 - you are using Apple Silicon). 78 - 79 - > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside 80 - > the tangled repo so that it doesn't conflict with the other VM. For example, 81 - > you can do 82 - > 83 - > ```shell 84 - > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder 85 - > ``` 86 - > 87 - > to store the builder VM in a temporary dir. 88 - > 89 - > You should read and follow [all the other intructions][darwin builder vm] to 90 - > avoid subtle problems. 91 - 92 - Alternatively, you can use any other method to set up a 93 - Linux machine with `nix` installed that you can `sudo ssh` 94 - into (in other words, root user on your Mac has to be able 95 - to ssh into the Linux machine without entering a password) 96 - and that has the same architecture as your Mac. See 97 - [remote builder 98 - instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements) 99 - for how to register such a builder in `nix.conf`. 100 - 101 - > WARNING: If you'd like to use 102 - > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or 103 - > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo 104 - > ssh` works can be tricky. It seems to be [possible with 105 - > Orbstack](https://github.com/orgs/orbstack/discussions/1669). 106 - 107 - </details> 108 - 109 - To begin, grab your DID from http://localhost:3000/settings. 110 - Then, set `TANGLED_VM_KNOT_OWNER` and 111 - `TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a 112 - lightweight NixOS VM like so: 113 - 114 - ```bash 115 - nix run --impure .#vm 116 - 117 - # type `poweroff` at the shell to exit the VM 118 - ``` 119 - 120 - This starts a knot on port 6444, a spindle on port 6555 121 - with `ssh` exposed on port 2222. 122 - 123 - Once the services are running, head to 124 - http://localhost:3000/settings/knots and hit verify. It should 125 - verify the ownership of the services instantly if everything 126 - went smoothly. 127 - 128 - You can push repositories to this VM with this ssh config 129 - block on your main machine: 130 - 131 - ```bash 132 - Host nixos-shell 133 - Hostname localhost 134 - Port 2222 135 - User git 136 - IdentityFile ~/.ssh/my_tangled_key 137 - ``` 138 - 139 - Set up a remote called `local-dev` on a git repo: 140 - 141 - ```bash 142 - git remote add local-dev git@nixos-shell:user/repo 143 - git push local-dev main 144 - ``` 145 - 146 - ### running a spindle 147 - 148 - The above VM should already be running a spindle on 149 - `localhost:6555`. Head to http://localhost:3000/settings/spindles and 150 - hit verify. You can then configure each repository to use 151 - this spindle and run CI jobs. 152 - 153 - Of interest when debugging spindles: 154 - 155 - ``` 156 - # service logs from journald: 157 - journalctl -xeu spindle 158 - 159 - # CI job logs from disk: 160 - ls /var/log/spindle 161 - 162 - # debugging spindle db: 163 - sqlite3 /var/lib/spindle/spindle.db 164 - 165 - # litecli has a nicer REPL interface: 166 - litecli /var/lib/spindle/spindle.db 167 - ``` 168 - 169 - If for any reason you wish to disable either one of the 170 - services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set 171 - `services.tangled.spindle.enable` (or 172 - `services.tangled.knot.enable`) to `false`.
+93
docs/highlight.theme
··· 1 + { 2 + "text-color": null, 3 + "background-color": null, 4 + "line-number-color": null, 5 + "line-number-background-color": null, 6 + "text-styles": { 7 + "Annotation": { 8 + "text-color": null, 9 + "background-color": null, 10 + "bold": false, 11 + "italic": true, 12 + "underline": false 13 + }, 14 + "ControlFlow": { 15 + "text-color": null, 16 + "background-color": null, 17 + "bold": true, 18 + "italic": false, 19 + "underline": false 20 + }, 21 + "Error": { 22 + "text-color": null, 23 + "background-color": null, 24 + "bold": true, 25 + "italic": false, 26 + "underline": false 27 + }, 28 + "Alert": { 29 + "text-color": null, 30 + "background-color": null, 31 + "bold": true, 32 + "italic": false, 33 + "underline": false 34 + }, 35 + "Preprocessor": { 36 + "text-color": null, 37 + "background-color": null, 38 + "bold": true, 39 + "italic": false, 40 + "underline": false 41 + }, 42 + "Information": { 43 + "text-color": null, 44 + "background-color": null, 45 + "bold": false, 46 + "italic": true, 47 + "underline": false 48 + }, 49 + "Warning": { 50 + "text-color": null, 51 + "background-color": null, 52 + "bold": false, 53 + "italic": true, 54 + "underline": false 55 + }, 56 + "Documentation": { 57 + "text-color": null, 58 + "background-color": null, 59 + "bold": false, 60 + "italic": true, 61 + "underline": false 62 + }, 63 + "DataType": { 64 + "text-color": "#8f4e8b", 65 + "background-color": null, 66 + "bold": false, 67 + "italic": false, 68 + "underline": false 69 + }, 70 + "Comment": { 71 + "text-color": null, 72 + "background-color": null, 73 + "bold": false, 74 + "italic": true, 75 + "underline": false 76 + }, 77 + "CommentVar": { 78 + "text-color": null, 79 + "background-color": null, 80 + "bold": false, 81 + "italic": true, 82 + "underline": false 83 + }, 84 + "Keyword": { 85 + "text-color": null, 86 + "background-color": null, 87 + "bold": true, 88 + "italic": false, 89 + "underline": false 90 + } 91 + } 92 + } 93 +
-214
docs/knot-hosting.md
··· 1 - # knot self-hosting guide 2 - 3 - So you want to run your own knot server? Great! Here are a few prerequisites: 4 - 5 - 1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind. 6 - 2. A (sub)domain name. People generally use `knot.example.com`. 7 - 3. A valid SSL certificate for your domain. 8 - 9 - There's a couple of ways to get started: 10 - * NixOS: refer to 11 - [flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix) 12 - * Docker: Documented at 13 - [@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker) 14 - (community maintained: support is not guaranteed!) 15 - * Manual: Documented below. 16 - 17 - ## manual setup 18 - 19 - First, clone this repository: 20 - 21 - ``` 22 - git clone https://tangled.org/@tangled.org/core 23 - ``` 24 - 25 - Then, build the `knot` CLI. This is the knot administration and operation tool. 26 - For the purpose of this guide, we're only concerned with these subcommands: 27 - 28 - * `knot server`: the main knot server process, typically run as a 29 - supervised service 30 - * `knot guard`: handles role-based access control for git over SSH 31 - (you'll never have to run this yourself) 32 - * `knot keys`: fetches SSH keys associated with your knot; we'll use 33 - this to generate the SSH `AuthorizedKeysCommand` 34 - 35 - ``` 36 - cd core 37 - export CGO_ENABLED=1 38 - go build -o knot ./cmd/knot 39 - ``` 40 - 41 - Next, move the `knot` binary to a location owned by `root` -- 42 - `/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`: 43 - 44 - ``` 45 - sudo mv knot /usr/local/bin/knot 46 - sudo chown root:root /usr/local/bin/knot 47 - ``` 48 - 49 - This is necessary because SSH `AuthorizedKeysCommand` requires [really 50 - specific permissions](https://stackoverflow.com/a/27638306). The 51 - `AuthorizedKeysCommand` specifies a command that is run by `sshd` to 52 - retrieve a user's public SSH keys dynamically for authentication. Let's 53 - set that up. 54 - 55 - ``` 56 - sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 57 - Match User git 58 - AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys 59 - AuthorizedKeysCommandUser nobody 60 - EOF 61 - ``` 62 - 63 - Then, reload `sshd`: 64 - 65 - ``` 66 - sudo systemctl reload ssh 67 - ``` 68 - 69 - Next, create the `git` user. We'll use the `git` user's home directory 70 - to store repositories: 71 - 72 - ``` 73 - sudo adduser git 74 - ``` 75 - 76 - Create `/home/git/.knot.env` with the following, updating the values as 77 - necessary. The `KNOT_SERVER_OWNER` should be set to your 78 - DID, you can find your DID in the [Settings](https://tangled.sh/settings) page. 79 - 80 - ``` 81 - KNOT_REPO_SCAN_PATH=/home/git 82 - KNOT_SERVER_HOSTNAME=knot.example.com 83 - APPVIEW_ENDPOINT=https://tangled.sh 84 - KNOT_SERVER_OWNER=did:plc:foobar 85 - KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444 86 - KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555 87 - ``` 88 - 89 - If you run a Linux distribution that uses systemd, you can use the provided 90 - service file to run the server. Copy 91 - [`knotserver.service`](/systemd/knotserver.service) 92 - to `/etc/systemd/system/`. Then, run: 93 - 94 - ``` 95 - systemctl enable knotserver 96 - systemctl start knotserver 97 - ``` 98 - 99 - The last step is to configure a reverse proxy like Nginx or Caddy to front your 100 - knot. Here's an example configuration for Nginx: 101 - 102 - ``` 103 - server { 104 - listen 80; 105 - listen [::]:80; 106 - server_name knot.example.com; 107 - 108 - location / { 109 - proxy_pass http://localhost:5555; 110 - proxy_set_header Host $host; 111 - proxy_set_header X-Real-IP $remote_addr; 112 - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 113 - proxy_set_header X-Forwarded-Proto $scheme; 114 - } 115 - 116 - # wss endpoint for git events 117 - location /events { 118 - proxy_set_header X-Forwarded-For $remote_addr; 119 - proxy_set_header Host $http_host; 120 - proxy_set_header Upgrade websocket; 121 - proxy_set_header Connection Upgrade; 122 - proxy_pass http://localhost:5555; 123 - } 124 - # additional config for SSL/TLS go here. 125 - } 126 - 127 - ``` 128 - 129 - Remember to use Let's Encrypt or similar to procure a certificate for your 130 - knot domain. 131 - 132 - You should now have a running knot server! You can finalize 133 - your registration by hitting the `verify` button on the 134 - [/settings/knots](https://tangled.org/settings/knots) page. This simply creates 135 - a record on your PDS to announce the existence of the knot. 136 - 137 - ### custom paths 138 - 139 - (This section applies to manual setup only. Docker users should edit the mounts 140 - in `docker-compose.yml` instead.) 141 - 142 - Right now, the database and repositories of your knot lives in `/home/git`. You 143 - can move these paths if you'd like to store them in another folder. Be careful 144 - when adjusting these paths: 145 - 146 - * Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent 147 - any possible side effects. Remember to restart it once you're done. 148 - * Make backups before moving in case something goes wrong. 149 - * Make sure the `git` user can read and write from the new paths. 150 - 151 - #### database 152 - 153 - As an example, let's say the current database is at `/home/git/knotserver.db`, 154 - and we want to move it to `/home/git/database/knotserver.db`. 155 - 156 - Copy the current database to the new location. Make sure to copy the `.db-shm` 157 - and `.db-wal` files if they exist. 158 - 159 - ``` 160 - mkdir /home/git/database 161 - cp /home/git/knotserver.db* /home/git/database 162 - ``` 163 - 164 - In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to 165 - the new file path (_not_ the directory): 166 - 167 - ``` 168 - KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db 169 - ``` 170 - 171 - #### repositories 172 - 173 - As an example, let's say the repositories are currently in `/home/git`, and we 174 - want to move them into `/home/git/repositories`. 175 - 176 - Create the new folder, then move the existing repositories (if there are any): 177 - 178 - ``` 179 - mkdir /home/git/repositories 180 - # move all DIDs into the new folder; these will vary for you! 181 - mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories 182 - ``` 183 - 184 - In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH` 185 - to the new directory: 186 - 187 - ``` 188 - KNOT_REPO_SCAN_PATH=/home/git/repositories 189 - ``` 190 - 191 - Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated 192 - repository path: 193 - 194 - ``` 195 - sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 196 - Match User git 197 - AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories 198 - AuthorizedKeysCommandUser nobody 199 - EOF 200 - ``` 201 - 202 - Make sure to restart your SSH server! 203 - 204 - #### MOTD (message of the day) 205 - 206 - To configure the MOTD used ("Welcome to this knot!" by default), edit the 207 - `/home/git/motd` file: 208 - 209 - ``` 210 - printf "Hi from this knot!\n" > /home/git/motd 211 - ``` 212 - 213 - Note that you should add a newline at the end if setting a non-empty message 214 - since the knot won't do this for you.
-59
docs/migrations.md
··· 1 - # Migrations 2 - 3 - This document is laid out in reverse-chronological order. 4 - Newer migration guides are listed first, and older guides 5 - are further down the page. 6 - 7 - ## Upgrading from v1.8.x 8 - 9 - After v1.8.2, the HTTP API for knot and spindles have been 10 - deprecated and replaced with XRPC. Repositories on outdated 11 - knots will not be viewable from the appview. Upgrading is 12 - straightforward however. 13 - 14 - For knots: 15 - 16 - - Upgrade to latest tag (v1.9.0 or above) 17 - - Head to the [knot dashboard](https://tangled.org/settings/knots) and 18 - hit the "retry" button to verify your knot 19 - 20 - For spindles: 21 - 22 - - Upgrade to latest tag (v1.9.0 or above) 23 - - Head to the [spindle 24 - dashboard](https://tangled.org/settings/spindles) and hit the 25 - "retry" button to verify your spindle 26 - 27 - ## Upgrading from v1.7.x 28 - 29 - After v1.7.0, knot secrets have been deprecated. You no 30 - longer need a secret from the appview to run a knot. All 31 - authorized commands to knots are managed via [Inter-Service 32 - Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt). 33 - Knots will be read-only until upgraded. 34 - 35 - Upgrading is quite easy, in essence: 36 - 37 - - `KNOT_SERVER_SECRET` is no more, you can remove this 38 - environment variable entirely 39 - - `KNOT_SERVER_OWNER` is now required on boot, set this to 40 - your DID. You can find your DID in the 41 - [settings](https://tangled.org/settings) page. 42 - - Restart your knot once you have replaced the environment 43 - variable 44 - - Head to the [knot dashboard](https://tangled.org/settings/knots) and 45 - hit the "retry" button to verify your knot. This simply 46 - writes a `sh.tangled.knot` record to your PDS. 47 - 48 - If you use the nix module, simply bump the flake to the 49 - latest revision, and change your config block like so: 50 - 51 - ```diff 52 - services.tangled.knot = { 53 - enable = true; 54 - server = { 55 - - secretFile = /path/to/secret; 56 - + owner = "did:plc:foo"; 57 - }; 58 - }; 59 - ```
-25
docs/spindle/architecture.md
··· 1 - # spindle architecture 2 - 3 - Spindle is a small CI runner service. Here's a high level overview of how it operates: 4 - 5 - * listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and 6 - [`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream. 7 - * when a new repo record comes through (typically when you add a spindle to a 8 - repo from the settings), spindle then resolves the underlying knot and 9 - subscribes to repo events (see: 10 - [`sh.tangled.pipeline`](/lexicons/pipeline.json)). 11 - * the spindle engine then handles execution of the pipeline, with results and 12 - logs beamed on the spindle event stream over wss 13 - 14 - ### the engine 15 - 16 - At present, the only supported backend is Docker (and Podman, if Docker 17 - compatibility is enabled, so that `/run/docker.sock` is created). Spindle 18 - executes each step in the pipeline in a fresh container, with state persisted 19 - across steps within the `/tangled/workspace` directory. 20 - 21 - The base image for the container is constructed on the fly using 22 - [Nixery](https://nixery.dev), which is handy for caching layers for frequently 23 - used packages. 24 - 25 - The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
··· 1 - # spindle self-hosting guide 2 - 3 - ## prerequisites 4 - 5 - * Go 6 - * Docker (the only supported backend currently) 7 - 8 - ## configuration 9 - 10 - Spindle is configured using environment variables. The following environment variables are available: 11 - 12 - * `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`). 13 - * `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`). 14 - * `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required). 15 - * `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`). 16 - * `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`). 17 - * `SPINDLE_SERVER_OWNER`: The DID of the owner (required). 18 - * `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`). 19 - * `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`). 20 - * `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`). 21 - 22 - ## running spindle 23 - 24 - 1. **Set the environment variables.** For example: 25 - 26 - ```shell 27 - export SPINDLE_SERVER_HOSTNAME="your-hostname" 28 - export SPINDLE_SERVER_OWNER="your-did" 29 - ``` 30 - 31 - 2. **Build the Spindle binary.** 32 - 33 - ```shell 34 - cd core 35 - go mod download 36 - go build -o cmd/spindle/spindle cmd/spindle/main.go 37 - ``` 38 - 39 - 3. **Create the log directory.** 40 - 41 - ```shell 42 - sudo mkdir -p /var/log/spindle 43 - sudo chown $USER:$USER -R /var/log/spindle 44 - ``` 45 - 46 - 4. **Run the Spindle binary.** 47 - 48 - ```shell 49 - ./cmd/spindle/spindle 50 - ``` 51 - 52 - Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
··· 1 - # spindle secrets with openbao 2 - 3 - This document covers setting up Spindle to use OpenBao for secrets 4 - management via OpenBao Proxy instead of the default SQLite backend. 5 - 6 - ## overview 7 - 8 - Spindle now uses OpenBao Proxy for secrets management. The proxy handles 9 - authentication automatically using AppRole credentials, while Spindle 10 - connects to the local proxy instead of directly to the OpenBao server. 11 - 12 - This approach provides better security, automatic token renewal, and 13 - simplified application code. 14 - 15 - ## installation 16 - 17 - Install OpenBao from nixpkgs: 18 - 19 - ```bash 20 - nix shell nixpkgs#openbao # for a local server 21 - ``` 22 - 23 - ## setup 24 - 25 - The setup process can is documented for both local development and production. 26 - 27 - ### local development 28 - 29 - Start OpenBao in dev mode: 30 - 31 - ```bash 32 - bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201 33 - ``` 34 - 35 - This starts OpenBao on `http://localhost:8201` with a root token. 36 - 37 - Set up environment for bao CLI: 38 - 39 - ```bash 40 - export BAO_ADDR=http://localhost:8200 41 - export BAO_TOKEN=root 42 - ``` 43 - 44 - ### production 45 - 46 - You would typically use a systemd service with a configuration file. Refer to 47 - [@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be 48 - achieved using Nix. 49 - 50 - Then, initialize the bao server: 51 - ```bash 52 - bao operator init -key-shares=1 -key-threshold=1 53 - ``` 54 - 55 - This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up: 56 - ```bash 57 - bao operator unseal <unseal_key> 58 - ``` 59 - 60 - All steps below remain the same across both dev and production setups. 61 - 62 - ### configure openbao server 63 - 64 - Create the spindle KV mount: 65 - 66 - ```bash 67 - bao secrets enable -path=spindle -version=2 kv 68 - ``` 69 - 70 - Set up AppRole authentication and policy: 71 - 72 - Create a policy file `spindle-policy.hcl`: 73 - 74 - ```hcl 75 - # Full access to spindle KV v2 data 76 - path "spindle/data/*" { 77 - capabilities = ["create", "read", "update", "delete"] 78 - } 79 - 80 - # Access to metadata for listing and management 81 - path "spindle/metadata/*" { 82 - capabilities = ["list", "read", "delete", "update"] 83 - } 84 - 85 - # Allow listing at root level 86 - path "spindle/" { 87 - capabilities = ["list"] 88 - } 89 - 90 - # Required for connection testing and health checks 91 - path "auth/token/lookup-self" { 92 - capabilities = ["read"] 93 - } 94 - ``` 95 - 96 - Apply the policy and create an AppRole: 97 - 98 - ```bash 99 - bao policy write spindle-policy spindle-policy.hcl 100 - bao auth enable approle 101 - bao write auth/approle/role/spindle \ 102 - token_policies="spindle-policy" \ 103 - token_ttl=1h \ 104 - token_max_ttl=4h \ 105 - bind_secret_id=true \ 106 - secret_id_ttl=0 \ 107 - secret_id_num_uses=0 108 - ``` 109 - 110 - Get the credentials: 111 - 112 - ```bash 113 - # Get role ID (static) 114 - ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id) 115 - 116 - # Generate secret ID 117 - SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id) 118 - 119 - echo "Role ID: $ROLE_ID" 120 - echo "Secret ID: $SECRET_ID" 121 - ``` 122 - 123 - ### create proxy configuration 124 - 125 - Create the credential files: 126 - 127 - ```bash 128 - # Create directory for OpenBao files 129 - mkdir -p /tmp/openbao 130 - 131 - # Save credentials 132 - echo "$ROLE_ID" > /tmp/openbao/role-id 133 - echo "$SECRET_ID" > /tmp/openbao/secret-id 134 - chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id 135 - ``` 136 - 137 - Create a proxy configuration file `/tmp/openbao/proxy.hcl`: 138 - 139 - ```hcl 140 - # OpenBao server connection 141 - vault { 142 - address = "http://localhost:8200" 143 - } 144 - 145 - # Auto-Auth using AppRole 146 - auto_auth { 147 - method "approle" { 148 - mount_path = "auth/approle" 149 - config = { 150 - role_id_file_path = "/tmp/openbao/role-id" 151 - secret_id_file_path = "/tmp/openbao/secret-id" 152 - } 153 - } 154 - 155 - # Optional: write token to file for debugging 156 - sink "file" { 157 - config = { 158 - path = "/tmp/openbao/token" 159 - mode = 0640 160 - } 161 - } 162 - } 163 - 164 - # Proxy listener for Spindle 165 - listener "tcp" { 166 - address = "127.0.0.1:8201" 167 - tls_disable = true 168 - } 169 - 170 - # Enable API proxy with auto-auth token 171 - api_proxy { 172 - use_auto_auth_token = true 173 - } 174 - 175 - # Enable response caching 176 - cache { 177 - use_auto_auth_token = true 178 - } 179 - 180 - # Logging 181 - log_level = "info" 182 - ``` 183 - 184 - ### start the proxy 185 - 186 - Start OpenBao Proxy: 187 - 188 - ```bash 189 - bao proxy -config=/tmp/openbao/proxy.hcl 190 - ``` 191 - 192 - The proxy will authenticate with OpenBao and start listening on 193 - `127.0.0.1:8201`. 194 - 195 - ### configure spindle 196 - 197 - Set these environment variables for Spindle: 198 - 199 - ```bash 200 - export SPINDLE_SERVER_SECRETS_PROVIDER=openbao 201 - export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201 202 - export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle 203 - ``` 204 - 205 - Start Spindle: 206 - 207 - Spindle will now connect to the local proxy, which handles all 208 - authentication automatically. 209 - 210 - ## production setup for proxy 211 - 212 - For production, you'll want to run the proxy as a service: 213 - 214 - Place your production configuration in `/etc/openbao/proxy.hcl` with 215 - proper TLS settings for the vault connection. 216 - 217 - ## verifying setup 218 - 219 - Test the proxy directly: 220 - 221 - ```bash 222 - # Check proxy health 223 - curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health 224 - 225 - # Test token lookup through proxy 226 - curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self 227 - ``` 228 - 229 - Test OpenBao operations through the server: 230 - 231 - ```bash 232 - # List all secrets 233 - bao kv list spindle/ 234 - 235 - # Add a test secret via Spindle API, then check it exists 236 - bao kv list spindle/repos/ 237 - 238 - # Get a specific secret 239 - bao kv get spindle/repos/your_repo_path/SECRET_NAME 240 - ``` 241 - 242 - ## how it works 243 - 244 - - Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201) 245 - - The proxy authenticates with OpenBao using AppRole credentials 246 - - All Spindle requests go through the proxy, which injects authentication tokens 247 - - Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}` 248 - - Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo` 249 - - The proxy handles all token renewal automatically 250 - - Spindle no longer manages tokens or authentication directly 251 - 252 - ## troubleshooting 253 - 254 - **Connection refused**: Check that the OpenBao Proxy is running and 255 - listening on the configured address. 256 - 257 - **403 errors**: Verify the AppRole credentials are correct and the policy 258 - has the necessary permissions. 259 - 260 - **404 route errors**: The spindle KV mount probably doesn't exist - run 261 - the mount creation step again. 262 - 263 - **Proxy authentication failures**: Check the proxy logs and verify the 264 - role-id and secret-id files are readable and contain valid credentials. 265 - 266 - **Secret not found after writing**: This can indicate policy permission 267 - issues. Verify the policy includes both `spindle/data/*` and 268 - `spindle/metadata/*` paths with appropriate capabilities. 269 - 270 - Check proxy logs: 271 - 272 - ```bash 273 - # If running as systemd service 274 - journalctl -u openbao-proxy -f 275 - 276 - # If running directly, check the console output 277 - ``` 278 - 279 - Test AppRole authentication manually: 280 - 281 - ```bash 282 - bao write auth/approle/login \ 283 - role_id="$(cat /tmp/openbao/role-id)" \ 284 - secret_id="$(cat /tmp/openbao/secret-id)" 285 - ```
-183
docs/spindle/pipeline.md
··· 1 - # spindle pipelines 2 - 3 - Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML. 4 - 5 - The fields are: 6 - 7 - - [Trigger](#trigger): A **required** field that defines when a workflow should be triggered. 8 - - [Engine](#engine): A **required** field that defines which engine a workflow should run on. 9 - - [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned. 10 - - [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need. 11 - - [Environment](#environment): An **optional** field that allows you to define environment variables. 12 - - [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow. 13 - 14 - ## Trigger 15 - 16 - The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields: 17 - 18 - - `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values: 19 - - `push`: The workflow should run every time a commit is pushed to the repository. 20 - - `pull_request`: The workflow should run every time a pull request is made or updated. 21 - - `manual`: The workflow can be triggered manually. 22 - - `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events. 23 - - `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events. 24 - 25 - For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with: 26 - 27 - ```yaml 28 - when: 29 - - event: ["push", "manual"] 30 - branch: ["main", "develop"] 31 - - event: ["pull_request"] 32 - branch: ["main"] 33 - ``` 34 - 35 - You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed: 36 - 37 - ```yaml 38 - when: 39 - - event: ["push"] 40 - tag: ["v*"] 41 - ``` 42 - 43 - You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches): 44 - 45 - ```yaml 46 - when: 47 - - event: ["push"] 48 - branch: ["main", "release-*"] 49 - tag: ["v*", "stable"] 50 - ``` 51 - 52 - ## Engine 53 - 54 - Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are: 55 - 56 - - `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there. 57 - 58 - Example: 59 - 60 - ```yaml 61 - engine: "nixery" 62 - ``` 63 - 64 - ## Clone options 65 - 66 - When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields: 67 - 68 - - `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default. 69 - - `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow. 70 - - `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default. 71 - 72 - The default settings are: 73 - 74 - ```yaml 75 - clone: 76 - skip: false 77 - depth: 1 78 - submodules: false 79 - ``` 80 - 81 - ## Dependencies 82 - 83 - Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch. 84 - 85 - Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so: 86 - 87 - ```yaml 88 - dependencies: 89 - # nixpkgs 90 - nixpkgs: 91 - - nodejs 92 - - go 93 - # custom registry 94 - git+https://tangled.org/@example.com/my_pkg: 95 - - my_pkg 96 - ``` 97 - 98 - Now these dependencies are available to use in your workflow! 99 - 100 - ## Environment 101 - 102 - The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.** 103 - 104 - Example: 105 - 106 - ```yaml 107 - environment: 108 - GOOS: "linux" 109 - GOARCH: "arm64" 110 - NODE_ENV: "production" 111 - MY_ENV_VAR: "MY_ENV_VALUE" 112 - ``` 113 - 114 - ## Steps 115 - 116 - The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields: 117 - 118 - - `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing. 119 - - `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here. 120 - - `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.** 121 - 122 - Example: 123 - 124 - ```yaml 125 - steps: 126 - - name: "Build backend" 127 - command: "go build" 128 - environment: 129 - GOOS: "darwin" 130 - GOARCH: "arm64" 131 - - name: "Build frontend" 132 - command: "npm run build" 133 - environment: 134 - NODE_ENV: "production" 135 - ``` 136 - 137 - ## Complete workflow 138 - 139 - ```yaml 140 - # .tangled/workflows/build.yml 141 - 142 - when: 143 - - event: ["push", "manual"] 144 - branch: ["main", "develop"] 145 - - event: ["pull_request"] 146 - branch: ["main"] 147 - 148 - engine: "nixery" 149 - 150 - # using the default values 151 - clone: 152 - skip: false 153 - depth: 1 154 - submodules: false 155 - 156 - dependencies: 157 - # nixpkgs 158 - nixpkgs: 159 - - nodejs 160 - - go 161 - # custom registry 162 - git+https://tangled.org/@example.com/my_pkg: 163 - - my_pkg 164 - 165 - environment: 166 - GOOS: "linux" 167 - GOARCH: "arm64" 168 - NODE_ENV: "production" 169 - MY_ENV_VAR: "MY_ENV_VALUE" 170 - 171 - steps: 172 - - name: "Build backend" 173 - command: "go build" 174 - environment: 175 - GOOS: "darwin" 176 - GOARCH: "arm64" 177 - - name: "Build frontend" 178 - command: "npm run build" 179 - environment: 180 - NODE_ENV: "production" 181 - ``` 182 - 183 - If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+101
docs/styles.css
··· 1 + svg { 2 + width: 16px; 3 + height: 16px; 4 + } 5 + 6 + :root { 7 + --syntax-alert: #d20f39; 8 + --syntax-annotation: #fe640b; 9 + --syntax-attribute: #df8e1d; 10 + --syntax-basen: #40a02b; 11 + --syntax-builtin: #1e66f5; 12 + --syntax-controlflow: #8839ef; 13 + --syntax-char: #04a5e5; 14 + --syntax-constant: #fe640b; 15 + --syntax-comment: #9ca0b0; 16 + --syntax-commentvar: #7c7f93; 17 + --syntax-documentation: #9ca0b0; 18 + --syntax-datatype: #df8e1d; 19 + --syntax-decval: #40a02b; 20 + --syntax-error: #d20f39; 21 + --syntax-extension: #4c4f69; 22 + --syntax-float: #40a02b; 23 + --syntax-function: #1e66f5; 24 + --syntax-import: #40a02b; 25 + --syntax-information: #04a5e5; 26 + --syntax-keyword: #8839ef; 27 + --syntax-operator: #179299; 28 + --syntax-other: #8839ef; 29 + --syntax-preprocessor: #ea76cb; 30 + --syntax-specialchar: #04a5e5; 31 + --syntax-specialstring: #ea76cb; 32 + --syntax-string: #40a02b; 33 + --syntax-variable: #8839ef; 34 + --syntax-verbatimstring: #40a02b; 35 + --syntax-warning: #df8e1d; 36 + } 37 + 38 + @media (prefers-color-scheme: dark) { 39 + :root { 40 + --syntax-alert: #f38ba8; 41 + --syntax-annotation: #fab387; 42 + --syntax-attribute: #f9e2af; 43 + --syntax-basen: #a6e3a1; 44 + --syntax-builtin: #89b4fa; 45 + --syntax-controlflow: #cba6f7; 46 + --syntax-char: #89dceb; 47 + --syntax-constant: #fab387; 48 + --syntax-comment: #6c7086; 49 + --syntax-commentvar: #585b70; 50 + --syntax-documentation: #6c7086; 51 + --syntax-datatype: #f9e2af; 52 + --syntax-decval: #a6e3a1; 53 + --syntax-error: #f38ba8; 54 + --syntax-extension: #cdd6f4; 55 + --syntax-float: #a6e3a1; 56 + --syntax-function: #89b4fa; 57 + --syntax-import: #a6e3a1; 58 + --syntax-information: #89dceb; 59 + --syntax-keyword: #cba6f7; 60 + --syntax-operator: #94e2d5; 61 + --syntax-other: #cba6f7; 62 + --syntax-preprocessor: #f5c2e7; 63 + --syntax-specialchar: #89dceb; 64 + --syntax-specialstring: #f5c2e7; 65 + --syntax-string: #a6e3a1; 66 + --syntax-variable: #cba6f7; 67 + --syntax-verbatimstring: #a6e3a1; 68 + --syntax-warning: #f9e2af; 69 + } 70 + } 71 + 72 + /* pandoc syntax highlighting classes */ 73 + code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */ 74 + code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */ 75 + code span.at { color: var(--syntax-attribute); } /* attribute */ 76 + code span.bn { color: var(--syntax-basen); } /* basen */ 77 + code span.bu { color: var(--syntax-builtin); } /* builtin */ 78 + code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */ 79 + code span.ch { color: var(--syntax-char); } /* char */ 80 + code span.cn { color: var(--syntax-constant); } /* constant */ 81 + code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */ 82 + code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */ 83 + code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */ 84 + code span.dt { color: var(--syntax-datatype); } /* datatype */ 85 + code span.dv { color: var(--syntax-decval); } /* decval */ 86 + code span.er { color: var(--syntax-error); font-weight: bold; } /* error */ 87 + code span.ex { color: var(--syntax-extension); } /* extension */ 88 + code span.fl { color: var(--syntax-float); } /* float */ 89 + code span.fu { color: var(--syntax-function); } /* function */ 90 + code span.im { color: var(--syntax-import); font-weight: bold; } /* import */ 91 + code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */ 92 + code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */ 93 + code span.op { color: var(--syntax-operator); } /* operator */ 94 + code span.ot { color: var(--syntax-other); } /* other */ 95 + code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */ 96 + code span.sc { color: var(--syntax-specialchar); } /* specialchar */ 97 + code span.ss { color: var(--syntax-specialstring); } /* specialstring */ 98 + code span.st { color: var(--syntax-string); } /* string */ 99 + code span.va { color: var(--syntax-variable); } /* variable */ 100 + code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */ 101 + code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
+117
docs/template.html
··· 1 + <!DOCTYPE html> 2 + <html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$> 3 + <head> 4 + <meta charset="utf-8" /> 5 + <meta name="generator" content="pandoc" /> 6 + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> 7 + $for(author-meta)$ 8 + <meta name="author" content="$author-meta$" /> 9 + $endfor$ 10 + 11 + $if(date-meta)$ 12 + <meta name="dcterms.date" content="$date-meta$" /> 13 + $endif$ 14 + 15 + $if(keywords)$ 16 + <meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" /> 17 + $endif$ 18 + 19 + $if(description-meta)$ 20 + <meta name="description" content="$description-meta$" /> 21 + $endif$ 22 + 23 + <title>$pagetitle$</title> 24 + 25 + <style> 26 + $styles.css()$ 27 + </style> 28 + 29 + $for(css)$ 30 + <link rel="stylesheet" href="$css$" /> 31 + $endfor$ 32 + 33 + $for(header-includes)$ 34 + $header-includes$ 35 + $endfor$ 36 + 37 + <link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin /> 38 + 39 + </head> 40 + <body class="bg-white dark:bg-gray-900 min-h-screen flex flex-col min-h-screen"> 41 + $for(include-before)$ 42 + $include-before$ 43 + $endfor$ 44 + 45 + $if(toc)$ 46 + <!-- mobile topbar toc --> 47 + <details id="mobile-$idprefix$TOC" role="doc-toc" class="md:hidden bg-gray-50 dark:bg-gray-800 border-b border-gray-200 dark:border-gray-700 z-50 space-y-4 group px-6 py-4"> 48 + <summary class="cursor-pointer list-none text-sm font-semibold select-none flex gap-2 justify-between items-center dark:text-white"> 49 + $if(toc-title)$$toc-title$$else$Table of Contents$endif$ 50 + <span class="group-open:hidden inline">${ menu.svg() }</span> 51 + <span class="hidden group-open:inline">${ x.svg() }</span> 52 + </summary> 53 + ${ table-of-contents:toc.html() } 54 + </details> 55 + <!-- desktop sidebar toc --> 56 + <nav id="$idprefix$TOC" role="doc-toc" class="hidden md:block fixed left-0 top-0 w-80 h-screen bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 overflow-y-auto p-4 z-50"> 57 + $if(toc-title)$ 58 + <h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2> 59 + $endif$ 60 + ${ table-of-contents:toc.html() } 61 + </nav> 62 + $endif$ 63 + 64 + <div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col"> 65 + <main class="max-w-4xl w-full mx-auto p-6 flex-1"> 66 + $if(top)$ 67 + $-- only print title block if this is NOT the top page 68 + $else$ 69 + $if(title)$ 70 + <header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700"> 71 + <h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1> 72 + $if(subtitle)$ 73 + <p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p> 74 + $endif$ 75 + $for(author)$ 76 + <p class="text-sm text-gray-500 dark:text-gray-400">$author$</p> 77 + $endfor$ 78 + $if(date)$ 79 + <p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p> 80 + $endif$ 81 + $if(abstract)$ 82 + <div class="mt-6 p-4 bg-gray-50 rounded-lg"> 83 + <div class="text-sm font-semibold text-gray-700 uppercase mb-2">$abstract-title$</div> 84 + <div class="text-gray-700">$abstract$</div> 85 + </div> 86 + $endif$ 87 + $endif$ 88 + </header> 89 + $endif$ 90 + <article class="prose dark:prose-invert max-w-none"> 91 + $body$ 92 + </article> 93 + </main> 94 + <nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800 "> 95 + <div class="max-w-4xl mx-auto px-8 py-4"> 96 + <div class="flex justify-between gap-4"> 97 + <span class="flex-1"> 98 + $if(previous.url)$ 99 + <span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span> 100 + <a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a> 101 + $endif$ 102 + </span> 103 + <span class="flex-1 text-right"> 104 + $if(next.url)$ 105 + <span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span> 106 + <a href="$next.url$" accesskey="n" rel="next">$next.title$</a> 107 + $endif$ 108 + </span> 109 + </div> 110 + </div> 111 + </nav> 112 + </div> 113 + $for(include-after)$ 114 + $include-after$ 115 + $endfor$ 116 + </body> 117 + </html>
+4
docs/toc.html
··· 1 + <div class="[&_ul]:space-y-6 [&_ul]:pl-0 [&_ul]:font-bold [&_ul_ul]:pl-4 [&_ul_ul]:font-normal [&_ul_ul]:space-y-2 [&_li]:space-y-2"> 2 + $table-of-contents$ 3 + </div> 4 +
+9 -9
flake.lock
··· 35 35 "systems": "systems" 36 36 }, 37 37 "locked": { 38 - "lastModified": 1694529238, 39 - "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", 38 + "lastModified": 1731533236, 39 + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 40 40 "owner": "numtide", 41 41 "repo": "flake-utils", 42 - "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", 42 + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 43 43 "type": "github" 44 44 }, 45 45 "original": { ··· 56 56 ] 57 57 }, 58 58 "locked": { 59 - "lastModified": 1754078208, 60 - "narHash": "sha256-YVoIFDCDpYuU3riaDEJ3xiGdPOtsx4sR5eTzHTytPV8=", 59 + "lastModified": 1763982521, 60 + "narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=", 61 61 "owner": "nix-community", 62 62 "repo": "gomod2nix", 63 - "rev": "7f963246a71626c7fc70b431a315c4388a0c95cf", 63 + "rev": "02e63a239d6eabd595db56852535992c898eba72", 64 64 "type": "github" 65 65 }, 66 66 "original": { ··· 150 150 }, 151 151 "nixpkgs": { 152 152 "locked": { 153 - "lastModified": 1751984180, 154 - "narHash": "sha256-LwWRsENAZJKUdD3SpLluwDmdXY9F45ZEgCb0X+xgOL0=", 153 + "lastModified": 1766070988, 154 + "narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=", 155 155 "owner": "nixos", 156 156 "repo": "nixpkgs", 157 - "rev": "9807714d6944a957c2e036f84b0ff8caf9930bc0", 157 + "rev": "c6245e83d836d0433170a16eb185cefe0572f8b8", 158 158 "type": "github" 159 159 }, 160 160 "original": {
+5 -4
flake.nix
··· 80 80 }).buildGoApplication; 81 81 modules = ./nix/gomod2nix.toml; 82 82 sqlite-lib = self.callPackage ./nix/pkgs/sqlite-lib.nix { 83 - inherit (pkgs) gcc; 84 83 inherit sqlite-lib-src; 85 84 }; 86 85 lexgen = self.callPackage ./nix/pkgs/lexgen.nix {inherit indigo;}; ··· 89 88 inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src; 90 89 }; 91 90 appview = self.callPackage ./nix/pkgs/appview.nix {}; 91 + docs = self.callPackage ./nix/pkgs/docs.nix { 92 + inherit inter-fonts-src ibm-plex-mono-src lucide-src; 93 + }; 92 94 spindle = self.callPackage ./nix/pkgs/spindle.nix {}; 93 95 knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {}; 94 96 knot = self.callPackage ./nix/pkgs/knot.nix {}; 95 97 }); 96 98 in { 97 99 overlays.default = final: prev: { 98 - inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview; 100 + inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs; 99 101 }; 100 102 101 103 packages = forAllSystems (system: let ··· 104 106 staticPackages = mkPackageSet pkgs.pkgsStatic; 105 107 crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic; 106 108 in { 107 - inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib; 109 + inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs; 108 110 109 111 pkgsStatic-appview = staticPackages.appview; 110 112 pkgsStatic-knot = staticPackages.knot; ··· 156 158 nativeBuildInputs = [ 157 159 pkgs.go 158 160 pkgs.air 159 - pkgs.tilt 160 161 pkgs.gopls 161 162 pkgs.httpie 162 163 pkgs.litecli
+3 -4
go.mod
··· 1 1 module tangled.org/core 2 2 3 - go 1.24.4 3 + go 1.25.0 4 4 5 5 require ( 6 6 github.com/Blank-Xu/sql-adapter v1.1.1 ··· 44 44 github.com/stretchr/testify v1.10.0 45 45 github.com/urfave/cli/v3 v3.3.3 46 46 github.com/whyrusleeping/cbor-gen v0.3.1 47 - github.com/wyatt915/goldmark-treeblood v0.0.1 48 47 github.com/yuin/goldmark v1.7.13 48 + github.com/yuin/goldmark-emoji v1.0.6 49 49 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc 50 50 gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab 51 51 golang.org/x/crypto v0.40.0 52 52 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b 53 53 golang.org/x/image v0.31.0 54 54 golang.org/x/net v0.42.0 55 - golang.org/x/sync v0.17.0 56 55 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da 57 56 gopkg.in/yaml.v3 v3.0.1 58 57 ) ··· 190 189 github.com/vmihailenco/go-tinylfu v0.2.2 // indirect 191 190 github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect 192 191 github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect 193 - github.com/wyatt915/treeblood v0.1.16 // indirect 194 192 github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect 195 193 gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect 196 194 gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect ··· 205 203 go.uber.org/atomic v1.11.0 // indirect 206 204 go.uber.org/multierr v1.11.0 // indirect 207 205 go.uber.org/zap v1.27.0 // indirect 206 + golang.org/x/sync v0.17.0 // indirect 208 207 golang.org/x/sys v0.34.0 // indirect 209 208 golang.org/x/text v0.29.0 // indirect 210 209 golang.org/x/time v0.12.0 // indirect
+2 -4
go.sum
··· 495 495 github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= 496 496 github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= 497 497 github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= 498 - github.com/wyatt915/goldmark-treeblood v0.0.1 h1:6vLJcjFrHgE4ASu2ga4hqIQmbvQLU37v53jlHZ3pqDs= 499 - github.com/wyatt915/goldmark-treeblood v0.0.1/go.mod h1:SmcJp5EBaV17rroNlgNQFydYwy0+fv85CUr/ZaCz208= 500 - github.com/wyatt915/treeblood v0.1.16 h1:byxNbWZhnPDxdTp7W5kQhCeaY8RBVmojTFz1tEHgg8Y= 501 - github.com/wyatt915/treeblood v0.1.16/go.mod h1:i7+yhhmzdDP17/97pIsOSffw74EK/xk+qJ0029cSXUY= 502 498 github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= 503 499 github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= 504 500 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= ··· 509 505 github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 510 506 github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= 511 507 github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= 508 + github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= 509 + github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= 512 510 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ= 513 511 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I= 514 512 gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab h1:gK9tS6QJw5F0SIhYJnGG2P83kuabOdmWBbSmZhJkz2A=
+4 -4
hook/hook.go
··· 48 48 }, 49 49 Commands: []*cli.Command{ 50 50 { 51 - Name: "post-recieve", 52 - Usage: "sends a post-recieve hook to the knot (waits for stdin)", 53 - Action: postRecieve, 51 + Name: "post-receive", 52 + Usage: "sends a post-receive hook to the knot (waits for stdin)", 53 + Action: postReceive, 54 54 }, 55 55 }, 56 56 } 57 57 } 58 58 59 - func postRecieve(ctx context.Context, cmd *cli.Command) error { 59 + func postReceive(ctx context.Context, cmd *cli.Command) error { 60 60 gitDir := cmd.String("git-dir") 61 61 userDid := cmd.String("user-did") 62 62 userHandle := cmd.String("user-handle")
+1 -1
hook/setup.go
··· 138 138 option_var="GIT_PUSH_OPTION_$i" 139 139 push_options+=(-push-option "${!option_var}") 140 140 done 141 - %s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-recieve 141 + %s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-receive 142 142 `, executablePath, config.internalApi) 143 143 144 144 return os.WriteFile(hookPath, []byte(hookContent), 0755)
+1 -1
input.css
··· 162 162 } 163 163 164 164 .prose a.mention { 165 - @apply no-underline hover:underline; 165 + @apply no-underline hover:underline font-bold; 166 166 } 167 167 168 168 .prose li {
+81
knotserver/db/db.go
··· 1 + package db 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "log/slog" 7 + "strings" 8 + 9 + _ "github.com/mattn/go-sqlite3" 10 + "tangled.org/core/log" 11 + ) 12 + 13 + type DB struct { 14 + db *sql.DB 15 + logger *slog.Logger 16 + } 17 + 18 + func Setup(ctx context.Context, dbPath string) (*DB, error) { 19 + // https://github.com/mattn/go-sqlite3#connection-string 20 + opts := []string{ 21 + "_foreign_keys=1", 22 + "_journal_mode=WAL", 23 + "_synchronous=NORMAL", 24 + "_auto_vacuum=incremental", 25 + } 26 + 27 + logger := log.FromContext(ctx) 28 + logger = log.SubLogger(logger, "db") 29 + 30 + db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&")) 31 + if err != nil { 32 + return nil, err 33 + } 34 + 35 + conn, err := db.Conn(ctx) 36 + if err != nil { 37 + return nil, err 38 + } 39 + defer conn.Close() 40 + 41 + _, err = conn.ExecContext(ctx, ` 42 + create table if not exists known_dids ( 43 + did text primary key 44 + ); 45 + 46 + create table if not exists public_keys ( 47 + id integer primary key autoincrement, 48 + did text not null, 49 + key text not null, 50 + created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 51 + unique(did, key), 52 + foreign key (did) references known_dids(did) on delete cascade 53 + ); 54 + 55 + create table if not exists _jetstream ( 56 + id integer primary key autoincrement, 57 + last_time_us integer not null 58 + ); 59 + 60 + create table if not exists events ( 61 + rkey text not null, 62 + nsid text not null, 63 + event text not null, -- json 64 + created integer not null default (strftime('%s', 'now')), 65 + primary key (rkey, nsid) 66 + ); 67 + 68 + create table if not exists migrations ( 69 + id integer primary key autoincrement, 70 + name text unique 71 + ); 72 + `) 73 + if err != nil { 74 + return nil, err 75 + } 76 + 77 + return &DB{ 78 + db: db, 79 + logger: logger, 80 + }, nil 81 + }
-64
knotserver/db/init.go
··· 1 - package db 2 - 3 - import ( 4 - "database/sql" 5 - "strings" 6 - 7 - _ "github.com/mattn/go-sqlite3" 8 - ) 9 - 10 - type DB struct { 11 - db *sql.DB 12 - } 13 - 14 - func Setup(dbPath string) (*DB, error) { 15 - // https://github.com/mattn/go-sqlite3#connection-string 16 - opts := []string{ 17 - "_foreign_keys=1", 18 - "_journal_mode=WAL", 19 - "_synchronous=NORMAL", 20 - "_auto_vacuum=incremental", 21 - } 22 - 23 - db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&")) 24 - if err != nil { 25 - return nil, err 26 - } 27 - 28 - // NOTE: If any other migration is added here, you MUST 29 - // copy the pattern in appview: use a single sql.Conn 30 - // for every migration. 31 - 32 - _, err = db.Exec(` 33 - create table if not exists known_dids ( 34 - did text primary key 35 - ); 36 - 37 - create table if not exists public_keys ( 38 - id integer primary key autoincrement, 39 - did text not null, 40 - key text not null, 41 - created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 42 - unique(did, key), 43 - foreign key (did) references known_dids(did) on delete cascade 44 - ); 45 - 46 - create table if not exists _jetstream ( 47 - id integer primary key autoincrement, 48 - last_time_us integer not null 49 - ); 50 - 51 - create table if not exists events ( 52 - rkey text not null, 53 - nsid text not null, 54 - event text not null, -- json 55 - created integer not null default (strftime('%s', 'now')), 56 - primary key (rkey, nsid) 57 - ); 58 - `) 59 - if err != nil { 60 - return nil, err 61 - } 62 - 63 - return &DB{db: db}, nil 64 - }
+1 -17
knotserver/git/diff.go
··· 77 77 nd.Diff = append(nd.Diff, ndiff) 78 78 } 79 79 80 - nd.Stat.FilesChanged = len(diffs) 81 - nd.Commit.This = c.Hash.String() 82 - nd.Commit.PGPSignature = c.PGPSignature 83 - nd.Commit.Committer = c.Committer 84 - nd.Commit.Tree = c.TreeHash.String() 85 - 86 - if parent.Hash.IsZero() { 87 - nd.Commit.Parent = "" 88 - } else { 89 - nd.Commit.Parent = parent.Hash.String() 90 - } 91 - nd.Commit.Author = c.Author 92 - nd.Commit.Message = c.Message 93 - 94 - if v, ok := c.ExtraHeaders["change-id"]; ok { 95 - nd.Commit.ChangedId = string(v) 96 - } 80 + nd.Commit.FromGoGitCommit(c) 97 81 98 82 return &nd, nil 99 83 }
+13 -1
knotserver/git/service/service.go
··· 95 95 return c.RunService(cmd) 96 96 } 97 97 98 + func (c *ServiceCommand) UploadArchive() error { 99 + cmd := exec.Command("git", []string{ 100 + "upload-archive", 101 + ".", 102 + }...) 103 + 104 + cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} 105 + cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_PROTOCOL=%s", c.GitProtocol)) 106 + cmd.Dir = c.Dir 107 + 108 + return c.RunService(cmd) 109 + } 110 + 98 111 func (c *ServiceCommand) UploadPack() error { 99 112 cmd := exec.Command("git", []string{ 100 - "-c", "uploadpack.allowFilter=true", 101 113 "upload-pack", 102 114 "--stateless-rpc", 103 115 ".",
+47
knotserver/git.go
··· 56 56 } 57 57 } 58 58 59 + func (h *Knot) UploadArchive(w http.ResponseWriter, r *http.Request) { 60 + did := chi.URLParam(r, "did") 61 + name := chi.URLParam(r, "name") 62 + repo, err := securejoin.SecureJoin(h.c.Repo.ScanPath, filepath.Join(did, name)) 63 + if err != nil { 64 + gitError(w, err.Error(), http.StatusInternalServerError) 65 + h.l.Error("git: failed to secure join repo path", "handler", "UploadPack", "error", err) 66 + return 67 + } 68 + 69 + const expectedContentType = "application/x-git-upload-archive-request" 70 + contentType := r.Header.Get("Content-Type") 71 + if contentType != expectedContentType { 72 + gitError(w, fmt.Sprintf("Expected Content-Type: '%s', but received '%s'.", expectedContentType, contentType), http.StatusUnsupportedMediaType) 73 + } 74 + 75 + var bodyReader io.ReadCloser = r.Body 76 + if r.Header.Get("Content-Encoding") == "gzip" { 77 + gzipReader, err := gzip.NewReader(r.Body) 78 + if err != nil { 79 + gitError(w, err.Error(), http.StatusInternalServerError) 80 + h.l.Error("git: failed to create gzip reader", "handler", "UploadArchive", "error", err) 81 + return 82 + } 83 + defer gzipReader.Close() 84 + bodyReader = gzipReader 85 + } 86 + 87 + w.Header().Set("Content-Type", "application/x-git-upload-archive-result") 88 + 89 + h.l.Info("git: executing git-upload-archive", "handler", "UploadArchive", "repo", repo) 90 + 91 + cmd := service.ServiceCommand{ 92 + GitProtocol: r.Header.Get("Git-Protocol"), 93 + Dir: repo, 94 + Stdout: w, 95 + Stdin: bodyReader, 96 + } 97 + 98 + w.WriteHeader(http.StatusOK) 99 + 100 + if err := cmd.UploadArchive(); err != nil { 101 + h.l.Error("git: failed to execute git-upload-pack", "handler", "UploadPack", "error", err) 102 + return 103 + } 104 + } 105 + 59 106 func (h *Knot) UploadPack(w http.ResponseWriter, r *http.Request) { 60 107 did := chi.URLParam(r, "did") 61 108 name := chi.URLParam(r, "name")
+1
knotserver/router.go
··· 82 82 r.Route("/{name}", func(r chi.Router) { 83 83 // routes for git operations 84 84 r.Get("/info/refs", h.InfoRefs) 85 + r.Post("/git-upload-archive", h.UploadArchive) 85 86 r.Post("/git-upload-pack", h.UploadPack) 86 87 r.Post("/git-receive-pack", h.ReceivePack) 87 88 })
+1 -1
knotserver/server.go
··· 64 64 logger.Info("running in dev mode, signature verification is disabled") 65 65 } 66 66 67 - db, err := db.Setup(c.Server.DBPath) 67 + db, err := db.Setup(ctx, c.Server.DBPath) 68 68 if err != nil { 69 69 return fmt.Errorf("failed to load db: %w", err) 70 70 }
+6 -1
knotserver/xrpc/repo_log.go
··· 62 62 return 63 63 } 64 64 65 + tcommits := make([]types.Commit, len(commits)) 66 + for i, c := range commits { 67 + tcommits[i].FromGoGitCommit(c) 68 + } 69 + 65 70 // Create response using existing types.RepoLogResponse 66 71 response := types.RepoLogResponse{ 67 - Commits: commits, 72 + Commits: tcommits, 68 73 Ref: ref, 69 74 Page: (offset / limit) + 1, 70 75 PerPage: limit,
+8 -2
lexicons/pulls/pull.json
··· 12 12 "required": [ 13 13 "target", 14 14 "title", 15 - "patch", 15 + "patchBlob", 16 16 "createdAt" 17 17 ], 18 18 "properties": { ··· 27 27 "type": "string" 28 28 }, 29 29 "patch": { 30 - "type": "string" 30 + "type": "string", 31 + "description": "(deprecated) use patchBlob instead" 32 + }, 33 + "patchBlob": { 34 + "type": "blob", 35 + "accept": "text/x-patch", 36 + "description": "patch content" 31 37 }, 32 38 "source": { 33 39 "type": "ref",
+3 -30
nix/gomod2nix.toml
··· 165 165 [mod."github.com/davecgh/go-spew"] 166 166 version = "v1.1.2-0.20180830191138-d8f796af33cc" 167 167 hash = "sha256-fV9oI51xjHdOmEx6+dlq7Ku2Ag+m/bmbzPo6A4Y74qc=" 168 - [mod."github.com/decred/dcrd/dcrec/secp256k1/v4"] 169 - version = "v4.4.0" 170 - hash = "sha256-qrhEIwhDll3cxoVpMbm1NQ9/HTI42S7ms8Buzlo5HCg=" 171 168 [mod."github.com/dgraph-io/ristretto"] 172 169 version = "v0.2.0" 173 170 hash = "sha256-bnpxX+oO/Qf7IJevA0gsbloVoqRx+5bh7RQ9d9eLNYw=" ··· 373 370 [mod."github.com/klauspost/cpuid/v2"] 374 371 version = "v2.3.0" 375 372 hash = "sha256-50JhbQyT67BK38HIdJihPtjV7orYp96HknI2VP7A9Yc=" 376 - [mod."github.com/lestrrat-go/blackmagic"] 377 - version = "v1.0.4" 378 - hash = "sha256-HmWOpwoPDNMwLdOi7onNn3Sb+ZsAa3Ai3gVBbXmQ0e8=" 379 - [mod."github.com/lestrrat-go/httpcc"] 380 - version = "v1.0.1" 381 - hash = "sha256-SMRSwJpqDIs/xL0l2e8vP0W65qtCHX2wigcOeqPJmos=" 382 - [mod."github.com/lestrrat-go/httprc"] 383 - version = "v1.0.6" 384 - hash = "sha256-mfZzePEhrmyyu/avEBd2MsDXyto8dq5+fyu5lA8GUWM=" 385 - [mod."github.com/lestrrat-go/iter"] 386 - version = "v1.0.2" 387 - hash = "sha256-30tErRf7Qu/NOAt1YURXY/XJSA6sCr6hYQfO8QqHrtw=" 388 - [mod."github.com/lestrrat-go/jwx/v2"] 389 - version = "v2.1.6" 390 - hash = "sha256-0LszXRZIba+X8AOrs3T4uanAUafBdlVB8/MpUNEFpbc=" 391 - [mod."github.com/lestrrat-go/option"] 392 - version = "v1.0.1" 393 - hash = "sha256-jVcIYYVsxElIS/l2akEw32vdEPR8+anR6oeT1FoYULI=" 394 373 [mod."github.com/lucasb-eyer/go-colorful"] 395 374 version = "v1.2.0" 396 375 hash = "sha256-Gg9dDJFCTaHrKHRR1SrJgZ8fWieJkybljybkI9x0gyE=" ··· 511 490 [mod."github.com/ryanuber/go-glob"] 512 491 version = "v1.0.0" 513 492 hash = "sha256-YkMl1utwUhi3E0sHK23ISpAsPyj4+KeXyXKoFYGXGVY=" 514 - [mod."github.com/segmentio/asm"] 515 - version = "v1.2.0" 516 - hash = "sha256-zbNuKxNrUDUc6IlmRQNuJQzVe5Ol/mqp7srDg9IMMqs=" 517 493 [mod."github.com/sergi/go-diff"] 518 494 version = "v1.1.0" 519 495 hash = "sha256-8NJMabldpf40uwQN20T6QXx5KORDibCBJL02KD661xY=" ··· 548 524 [mod."github.com/whyrusleeping/cbor-gen"] 549 525 version = "v0.3.1" 550 526 hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc=" 551 - [mod."github.com/wyatt915/goldmark-treeblood"] 552 - version = "v0.0.1" 553 - hash = "sha256-hAVFaktO02MiiqZFffr8ZlvFEfwxw4Y84OZ2t7e5G7g=" 554 - [mod."github.com/wyatt915/treeblood"] 555 - version = "v0.1.16" 556 - hash = "sha256-T68sa+iVx0qY7dDjXEAJvRWQEGXYIpUsf9tcWwO1tIw=" 557 527 [mod."github.com/xo/terminfo"] 558 528 version = "v0.0.0-20220910002029-abceb7e1c41e" 559 529 hash = "sha256-GyCDxxMQhXA3Pi/TsWXpA8cX5akEoZV7CFx4RO3rARU=" 560 530 [mod."github.com/yuin/goldmark"] 561 531 version = "v1.7.13" 562 532 hash = "sha256-vBCxZrPYPc8x/nvAAv3Au59dCCyfS80Vw3/a9EXK7TE=" 533 + [mod."github.com/yuin/goldmark-emoji"] 534 + version = "v1.0.6" 535 + hash = "sha256-+d6bZzOPE+JSFsZbQNZMCWE+n3jgcQnkPETVk47mxSY=" 563 536 [mod."github.com/yuin/goldmark-highlighting/v2"] 564 537 version = "v2.0.0-20230729083705-37449abec8cc" 565 538 hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
+41
nix/pkgs/docs.nix
··· 1 + { 2 + pandoc, 3 + tailwindcss, 4 + runCommandLocal, 5 + inter-fonts-src, 6 + ibm-plex-mono-src, 7 + lucide-src, 8 + src, 9 + }: 10 + runCommandLocal "docs" {} '' 11 + mkdir -p working 12 + 13 + # copy templates, themes, styles, filters to working directory 14 + cp ${src}/docs/*.html working/ 15 + cp ${src}/docs/*.theme working/ 16 + cp ${src}/docs/*.css working/ 17 + 18 + # icons 19 + cp -rf ${lucide-src}/*.svg working/ 20 + 21 + # content 22 + ${pandoc}/bin/pandoc ${src}/docs/DOCS.md \ 23 + -o $out/ \ 24 + -t chunkedhtml \ 25 + --variable toc \ 26 + --toc-depth=2 \ 27 + --css=stylesheet.css \ 28 + --chunk-template="%i.html" \ 29 + --highlight-style=working/highlight.theme \ 30 + --template=working/template.html 31 + 32 + # fonts 33 + mkdir -p $out/static/fonts 34 + cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/ 35 + cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/ 36 + cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/ 37 + cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/ 38 + 39 + # styles 40 + cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css 41 + ''
+7 -5
nix/pkgs/sqlite-lib.nix
··· 1 1 { 2 - gcc, 3 2 stdenv, 4 3 sqlite-lib-src, 5 4 }: 6 5 stdenv.mkDerivation { 7 6 name = "sqlite-lib"; 8 7 src = sqlite-lib-src; 9 - nativeBuildInputs = [gcc]; 8 + 10 9 buildPhase = '' 11 - gcc -c sqlite3.c 12 - ar rcs libsqlite3.a sqlite3.o 13 - ranlib libsqlite3.a 10 + $CC -c sqlite3.c 11 + $AR rcs libsqlite3.a sqlite3.o 12 + $RANLIB libsqlite3.a 13 + ''; 14 + 15 + installPhase = '' 14 16 mkdir -p $out/include $out/lib 15 17 cp *.h $out/include 16 18 cp libsqlite3.a $out/lib
+1 -1
nix/vm.nix
··· 8 8 var = builtins.getEnv name; 9 9 in 10 10 if var == "" 11 - then throw "\$${name} must be defined, see docs/hacking.md for more details" 11 + then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details" 12 12 else var; 13 13 envVarOr = name: default: let 14 14 var = builtins.getEnv name;
+122
orm/orm.go
··· 1 + package orm 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "fmt" 7 + "log/slog" 8 + "reflect" 9 + "strings" 10 + ) 11 + 12 + type migrationFn = func(*sql.Tx) error 13 + 14 + func RunMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error { 15 + logger = logger.With("migration", name) 16 + 17 + tx, err := c.BeginTx(context.Background(), nil) 18 + if err != nil { 19 + return err 20 + } 21 + defer tx.Rollback() 22 + 23 + var exists bool 24 + err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists) 25 + if err != nil { 26 + return err 27 + } 28 + 29 + if !exists { 30 + // run migration 31 + err = migrationFn(tx) 32 + if err != nil { 33 + logger.Error("failed to run migration", "err", err) 34 + return err 35 + } 36 + 37 + // mark migration as complete 38 + _, err = tx.Exec("insert into migrations (name) values (?)", name) 39 + if err != nil { 40 + logger.Error("failed to mark migration as complete", "err", err) 41 + return err 42 + } 43 + 44 + // commit the transaction 45 + if err := tx.Commit(); err != nil { 46 + return err 47 + } 48 + 49 + logger.Info("migration applied successfully") 50 + } else { 51 + logger.Warn("skipped migration, already applied") 52 + } 53 + 54 + return nil 55 + } 56 + 57 + type Filter struct { 58 + Key string 59 + arg any 60 + Cmp string 61 + } 62 + 63 + func newFilter(key, cmp string, arg any) Filter { 64 + return Filter{ 65 + Key: key, 66 + arg: arg, 67 + Cmp: cmp, 68 + } 69 + } 70 + 71 + func FilterEq(key string, arg any) Filter { return newFilter(key, "=", arg) } 72 + func FilterNotEq(key string, arg any) Filter { return newFilter(key, "<>", arg) } 73 + func FilterGte(key string, arg any) Filter { return newFilter(key, ">=", arg) } 74 + func FilterLte(key string, arg any) Filter { return newFilter(key, "<=", arg) } 75 + func FilterIs(key string, arg any) Filter { return newFilter(key, "is", arg) } 76 + func FilterIsNot(key string, arg any) Filter { return newFilter(key, "is not", arg) } 77 + func FilterIn(key string, arg any) Filter { return newFilter(key, "in", arg) } 78 + func FilterLike(key string, arg any) Filter { return newFilter(key, "like", arg) } 79 + func FilterNotLike(key string, arg any) Filter { return newFilter(key, "not like", arg) } 80 + func FilterContains(key string, arg any) Filter { 81 + return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg)) 82 + } 83 + 84 + func (f Filter) Condition() string { 85 + rv := reflect.ValueOf(f.arg) 86 + kind := rv.Kind() 87 + 88 + // if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)` 89 + if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 90 + if rv.Len() == 0 { 91 + // always false 92 + return "1 = 0" 93 + } 94 + 95 + placeholders := make([]string, rv.Len()) 96 + for i := range placeholders { 97 + placeholders[i] = "?" 98 + } 99 + 100 + return fmt.Sprintf("%s %s (%s)", f.Key, f.Cmp, strings.Join(placeholders, ", ")) 101 + } 102 + 103 + return fmt.Sprintf("%s %s ?", f.Key, f.Cmp) 104 + } 105 + 106 + func (f Filter) Arg() []any { 107 + rv := reflect.ValueOf(f.arg) 108 + kind := rv.Kind() 109 + if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 110 + if rv.Len() == 0 { 111 + return nil 112 + } 113 + 114 + out := make([]any, rv.Len()) 115 + for i := range rv.Len() { 116 + out[i] = rv.Index(i).Interface() 117 + } 118 + return out 119 + } 120 + 121 + return []any{f.arg} 122 + }
-1
patchutil/patchutil.go
··· 296 296 } 297 297 298 298 nd := types.NiceDiff{} 299 - nd.Commit.Parent = targetBranch 300 299 301 300 for _, d := range diffs { 302 301 ndiff := types.Diff{}
+3 -3
readme.md
··· 10 10 11 11 ## docs 12 12 13 - * [knot hosting guide](/docs/knot-hosting.md) 14 - * [contributing guide](/docs/contributing.md) **please read before opening a PR!** 15 - * [hacking on tangled](/docs/hacking.md) 13 + - [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide) 14 + - [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!** 15 + - [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled) 16 16 17 17 ## security 18 18
+31
sets/gen.go
··· 1 + package sets 2 + 3 + import ( 4 + "math/rand" 5 + "reflect" 6 + "testing/quick" 7 + ) 8 + 9 + func (_ Set[T]) Generate(rand *rand.Rand, size int) reflect.Value { 10 + s := New[T]() 11 + 12 + var zero T 13 + itemType := reflect.TypeOf(zero) 14 + 15 + for { 16 + if s.Len() >= size { 17 + break 18 + } 19 + 20 + item, ok := quick.Value(itemType, rand) 21 + if !ok { 22 + continue 23 + } 24 + 25 + if val, ok := item.Interface().(T); ok { 26 + s.Insert(val) 27 + } 28 + } 29 + 30 + return reflect.ValueOf(s) 31 + }
+35
sets/readme.txt
··· 1 + sets 2 + ---- 3 + set datastructure for go with generics and iterators. the 4 + api is supposed to mimic rust's std::collections::HashSet api. 5 + 6 + s1 := sets.Collect(slices.Values([]int{1, 2, 3, 4})) 7 + s2 := sets.Collect(slices.Values([]int{1, 2, 3, 4, 5, 6})) 8 + 9 + union := sets.Collect(s1.Union(s2)) 10 + intersect := sets.Collect(s1.Intersection(s2)) 11 + diff := sets.Collect(s1.Difference(s2)) 12 + symdiff := sets.Collect(s1.SymmetricDifference(s2)) 13 + 14 + s1.Len() // 4 15 + s1.Contains(1) // true 16 + s1.IsEmpty() // false 17 + s1.IsSubset(s2) // true 18 + s1.IsSuperset(s2) // false 19 + s1.IsDisjoint(s2) // false 20 + 21 + if exists := s1.Insert(1); exists { 22 + // already existed in set 23 + } 24 + 25 + if existed := s1.Remove(1); existed { 26 + // existed in set, now removed 27 + } 28 + 29 + 30 + testing 31 + ------- 32 + includes property-based tests using the wonderful 33 + testing/quick module! 34 + 35 + go test -v
+174
sets/set.go
··· 1 + package sets 2 + 3 + import ( 4 + "iter" 5 + "maps" 6 + ) 7 + 8 + type Set[T comparable] struct { 9 + data map[T]struct{} 10 + } 11 + 12 + func New[T comparable]() Set[T] { 13 + return Set[T]{ 14 + data: make(map[T]struct{}), 15 + } 16 + } 17 + 18 + func (s *Set[T]) Insert(item T) bool { 19 + _, exists := s.data[item] 20 + s.data[item] = struct{}{} 21 + return !exists 22 + } 23 + 24 + func Singleton[T comparable](item T) Set[T] { 25 + n := New[T]() 26 + _ = n.Insert(item) 27 + return n 28 + } 29 + 30 + func (s *Set[T]) Remove(item T) bool { 31 + _, exists := s.data[item] 32 + if exists { 33 + delete(s.data, item) 34 + } 35 + return exists 36 + } 37 + 38 + func (s Set[T]) Contains(item T) bool { 39 + _, exists := s.data[item] 40 + return exists 41 + } 42 + 43 + func (s Set[T]) Len() int { 44 + return len(s.data) 45 + } 46 + 47 + func (s Set[T]) IsEmpty() bool { 48 + return len(s.data) == 0 49 + } 50 + 51 + func (s *Set[T]) Clear() { 52 + s.data = make(map[T]struct{}) 53 + } 54 + 55 + func (s Set[T]) All() iter.Seq[T] { 56 + return func(yield func(T) bool) { 57 + for item := range s.data { 58 + if !yield(item) { 59 + return 60 + } 61 + } 62 + } 63 + } 64 + 65 + func (s Set[T]) Clone() Set[T] { 66 + return Set[T]{ 67 + data: maps.Clone(s.data), 68 + } 69 + } 70 + 71 + func (s Set[T]) Union(other Set[T]) iter.Seq[T] { 72 + if s.Len() >= other.Len() { 73 + return chain(s.All(), other.Difference(s)) 74 + } else { 75 + return chain(other.All(), s.Difference(other)) 76 + } 77 + } 78 + 79 + func chain[T any](seqs ...iter.Seq[T]) iter.Seq[T] { 80 + return func(yield func(T) bool) { 81 + for _, seq := range seqs { 82 + for item := range seq { 83 + if !yield(item) { 84 + return 85 + } 86 + } 87 + } 88 + } 89 + } 90 + 91 + func (s Set[T]) Intersection(other Set[T]) iter.Seq[T] { 92 + return func(yield func(T) bool) { 93 + for item := range s.data { 94 + if other.Contains(item) { 95 + if !yield(item) { 96 + return 97 + } 98 + } 99 + } 100 + } 101 + } 102 + 103 + func (s Set[T]) Difference(other Set[T]) iter.Seq[T] { 104 + return func(yield func(T) bool) { 105 + for item := range s.data { 106 + if !other.Contains(item) { 107 + if !yield(item) { 108 + return 109 + } 110 + } 111 + } 112 + } 113 + } 114 + 115 + func (s Set[T]) SymmetricDifference(other Set[T]) iter.Seq[T] { 116 + return func(yield func(T) bool) { 117 + for item := range s.data { 118 + if !other.Contains(item) { 119 + if !yield(item) { 120 + return 121 + } 122 + } 123 + } 124 + for item := range other.data { 125 + if !s.Contains(item) { 126 + if !yield(item) { 127 + return 128 + } 129 + } 130 + } 131 + } 132 + } 133 + 134 + func (s Set[T]) IsSubset(other Set[T]) bool { 135 + for item := range s.data { 136 + if !other.Contains(item) { 137 + return false 138 + } 139 + } 140 + return true 141 + } 142 + 143 + func (s Set[T]) IsSuperset(other Set[T]) bool { 144 + return other.IsSubset(s) 145 + } 146 + 147 + func (s Set[T]) IsDisjoint(other Set[T]) bool { 148 + for item := range s.data { 149 + if other.Contains(item) { 150 + return false 151 + } 152 + } 153 + return true 154 + } 155 + 156 + func (s Set[T]) Equal(other Set[T]) bool { 157 + if s.Len() != other.Len() { 158 + return false 159 + } 160 + for item := range s.data { 161 + if !other.Contains(item) { 162 + return false 163 + } 164 + } 165 + return true 166 + } 167 + 168 + func Collect[T comparable](seq iter.Seq[T]) Set[T] { 169 + result := New[T]() 170 + for item := range seq { 171 + result.Insert(item) 172 + } 173 + return result 174 + }
+411
sets/set_test.go
··· 1 + package sets 2 + 3 + import ( 4 + "slices" 5 + "testing" 6 + "testing/quick" 7 + ) 8 + 9 + func TestNew(t *testing.T) { 10 + s := New[int]() 11 + if s.Len() != 0 { 12 + t.Errorf("New set should be empty, got length %d", s.Len()) 13 + } 14 + if !s.IsEmpty() { 15 + t.Error("New set should be empty") 16 + } 17 + } 18 + 19 + func TestFromSlice(t *testing.T) { 20 + s := Collect(slices.Values([]int{1, 2, 3, 2, 1})) 21 + if s.Len() != 3 { 22 + t.Errorf("Expected length 3, got %d", s.Len()) 23 + } 24 + if !s.Contains(1) || !s.Contains(2) || !s.Contains(3) { 25 + t.Error("Set should contain all unique elements from slice") 26 + } 27 + } 28 + 29 + func TestInsert(t *testing.T) { 30 + s := New[string]() 31 + 32 + if !s.Insert("hello") { 33 + t.Error("First insert should return true") 34 + } 35 + if s.Insert("hello") { 36 + t.Error("Duplicate insert should return false") 37 + } 38 + if s.Len() != 1 { 39 + t.Errorf("Expected length 1, got %d", s.Len()) 40 + } 41 + } 42 + 43 + func TestRemove(t *testing.T) { 44 + s := Collect(slices.Values([]int{1, 2, 3})) 45 + 46 + if !s.Remove(2) { 47 + t.Error("Remove existing element should return true") 48 + } 49 + if s.Remove(2) { 50 + t.Error("Remove non-existing element should return false") 51 + } 52 + if s.Contains(2) { 53 + t.Error("Element should be removed") 54 + } 55 + if s.Len() != 2 { 56 + t.Errorf("Expected length 2, got %d", s.Len()) 57 + } 58 + } 59 + 60 + func TestContains(t *testing.T) { 61 + s := Collect(slices.Values([]int{1, 2, 3})) 62 + 63 + if !s.Contains(1) { 64 + t.Error("Should contain 1") 65 + } 66 + if s.Contains(4) { 67 + t.Error("Should not contain 4") 68 + } 69 + } 70 + 71 + func TestClear(t *testing.T) { 72 + s := Collect(slices.Values([]int{1, 2, 3})) 73 + s.Clear() 74 + 75 + if !s.IsEmpty() { 76 + t.Error("Set should be empty after clear") 77 + } 78 + if s.Len() != 0 { 79 + t.Errorf("Expected length 0, got %d", s.Len()) 80 + } 81 + } 82 + 83 + func TestIterator(t *testing.T) { 84 + s := Collect(slices.Values([]int{1, 2, 3})) 85 + var items []int 86 + 87 + for item := range s.All() { 88 + items = append(items, item) 89 + } 90 + 91 + slices.Sort(items) 92 + expected := []int{1, 2, 3} 93 + if !slices.Equal(items, expected) { 94 + t.Errorf("Expected %v, got %v", expected, items) 95 + } 96 + } 97 + 98 + func TestClone(t *testing.T) { 99 + s1 := Collect(slices.Values([]int{1, 2, 3})) 100 + s2 := s1.Clone() 101 + 102 + if !s1.Equal(s2) { 103 + t.Error("Cloned set should be equal to original") 104 + } 105 + 106 + s2.Insert(4) 107 + if s1.Contains(4) { 108 + t.Error("Modifying clone should not affect original") 109 + } 110 + } 111 + 112 + func TestUnion(t *testing.T) { 113 + s1 := Collect(slices.Values([]int{1, 2})) 114 + s2 := Collect(slices.Values([]int{2, 3})) 115 + 116 + result := Collect(s1.Union(s2)) 117 + expected := Collect(slices.Values([]int{1, 2, 3})) 118 + 119 + if !result.Equal(expected) { 120 + t.Errorf("Expected %v, got %v", expected, result) 121 + } 122 + } 123 + 124 + func TestIntersection(t *testing.T) { 125 + s1 := Collect(slices.Values([]int{1, 2, 3})) 126 + s2 := Collect(slices.Values([]int{2, 3, 4})) 127 + 128 + expected := Collect(slices.Values([]int{2, 3})) 129 + result := Collect(s1.Intersection(s2)) 130 + 131 + if !result.Equal(expected) { 132 + t.Errorf("Expected %v, got %v", expected, result) 133 + } 134 + } 135 + 136 + func TestDifference(t *testing.T) { 137 + s1 := Collect(slices.Values([]int{1, 2, 3})) 138 + s2 := Collect(slices.Values([]int{2, 3, 4})) 139 + 140 + expected := Collect(slices.Values([]int{1})) 141 + result := Collect(s1.Difference(s2)) 142 + 143 + if !result.Equal(expected) { 144 + t.Errorf("Expected %v, got %v", expected, result) 145 + } 146 + } 147 + 148 + func TestSymmetricDifference(t *testing.T) { 149 + s1 := Collect(slices.Values([]int{1, 2, 3})) 150 + s2 := Collect(slices.Values([]int{2, 3, 4})) 151 + 152 + expected := Collect(slices.Values([]int{1, 4})) 153 + result := Collect(s1.SymmetricDifference(s2)) 154 + 155 + if !result.Equal(expected) { 156 + t.Errorf("Expected %v, got %v", expected, result) 157 + } 158 + } 159 + 160 + func TestSymmetricDifferenceCommutativeProperty(t *testing.T) { 161 + s1 := Collect(slices.Values([]int{1, 2, 3})) 162 + s2 := Collect(slices.Values([]int{2, 3, 4})) 163 + 164 + result1 := Collect(s1.SymmetricDifference(s2)) 165 + result2 := Collect(s2.SymmetricDifference(s1)) 166 + 167 + if !result1.Equal(result2) { 168 + t.Errorf("Expected %v, got %v", result1, result2) 169 + } 170 + } 171 + 172 + func TestIsSubset(t *testing.T) { 173 + s1 := Collect(slices.Values([]int{1, 2})) 174 + s2 := Collect(slices.Values([]int{1, 2, 3})) 175 + 176 + if !s1.IsSubset(s2) { 177 + t.Error("s1 should be subset of s2") 178 + } 179 + if s2.IsSubset(s1) { 180 + t.Error("s2 should not be subset of s1") 181 + } 182 + } 183 + 184 + func TestIsSuperset(t *testing.T) { 185 + s1 := Collect(slices.Values([]int{1, 2, 3})) 186 + s2 := Collect(slices.Values([]int{1, 2})) 187 + 188 + if !s1.IsSuperset(s2) { 189 + t.Error("s1 should be superset of s2") 190 + } 191 + if s2.IsSuperset(s1) { 192 + t.Error("s2 should not be superset of s1") 193 + } 194 + } 195 + 196 + func TestIsDisjoint(t *testing.T) { 197 + s1 := Collect(slices.Values([]int{1, 2})) 198 + s2 := Collect(slices.Values([]int{3, 4})) 199 + s3 := Collect(slices.Values([]int{2, 3})) 200 + 201 + if !s1.IsDisjoint(s2) { 202 + t.Error("s1 and s2 should be disjoint") 203 + } 204 + if s1.IsDisjoint(s3) { 205 + t.Error("s1 and s3 should not be disjoint") 206 + } 207 + } 208 + 209 + func TestEqual(t *testing.T) { 210 + s1 := Collect(slices.Values([]int{1, 2, 3})) 211 + s2 := Collect(slices.Values([]int{3, 2, 1})) 212 + s3 := Collect(slices.Values([]int{1, 2})) 213 + 214 + if !s1.Equal(s2) { 215 + t.Error("s1 and s2 should be equal") 216 + } 217 + if s1.Equal(s3) { 218 + t.Error("s1 and s3 should not be equal") 219 + } 220 + } 221 + 222 + func TestCollect(t *testing.T) { 223 + s1 := Collect(slices.Values([]int{1, 2})) 224 + s2 := Collect(slices.Values([]int{2, 3})) 225 + 226 + unionSet := Collect(s1.Union(s2)) 227 + if unionSet.Len() != 3 { 228 + t.Errorf("Expected union set length 3, got %d", unionSet.Len()) 229 + } 230 + if !unionSet.Contains(1) || !unionSet.Contains(2) || !unionSet.Contains(3) { 231 + t.Error("Union set should contain 1, 2, and 3") 232 + } 233 + 234 + diffSet := Collect(s1.Difference(s2)) 235 + if diffSet.Len() != 1 { 236 + t.Errorf("Expected difference set length 1, got %d", diffSet.Len()) 237 + } 238 + if !diffSet.Contains(1) { 239 + t.Error("Difference set should contain 1") 240 + } 241 + } 242 + 243 + func TestPropertySingleonLen(t *testing.T) { 244 + f := func(item int) bool { 245 + single := Singleton(item) 246 + return single.Len() == 1 247 + } 248 + 249 + if err := quick.Check(f, nil); err != nil { 250 + t.Error(err) 251 + } 252 + } 253 + 254 + func TestPropertyInsertIdempotent(t *testing.T) { 255 + f := func(s Set[int], item int) bool { 256 + clone := s.Clone() 257 + 258 + clone.Insert(item) 259 + firstLen := clone.Len() 260 + 261 + clone.Insert(item) 262 + secondLen := clone.Len() 263 + 264 + return firstLen == secondLen 265 + } 266 + 267 + if err := quick.Check(f, nil); err != nil { 268 + t.Error(err) 269 + } 270 + } 271 + 272 + func TestPropertyUnionCommutative(t *testing.T) { 273 + f := func(s1 Set[int], s2 Set[int]) bool { 274 + union1 := Collect(s1.Union(s2)) 275 + union2 := Collect(s2.Union(s1)) 276 + return union1.Equal(union2) 277 + } 278 + 279 + if err := quick.Check(f, nil); err != nil { 280 + t.Error(err) 281 + } 282 + } 283 + 284 + func TestPropertyIntersectionCommutative(t *testing.T) { 285 + f := func(s1 Set[int], s2 Set[int]) bool { 286 + inter1 := Collect(s1.Intersection(s2)) 287 + inter2 := Collect(s2.Intersection(s1)) 288 + return inter1.Equal(inter2) 289 + } 290 + 291 + if err := quick.Check(f, nil); err != nil { 292 + t.Error(err) 293 + } 294 + } 295 + 296 + func TestPropertyCloneEquals(t *testing.T) { 297 + f := func(s Set[int]) bool { 298 + clone := s.Clone() 299 + return s.Equal(clone) 300 + } 301 + 302 + if err := quick.Check(f, nil); err != nil { 303 + t.Error(err) 304 + } 305 + } 306 + 307 + func TestPropertyIntersectionIsSubset(t *testing.T) { 308 + f := func(s1 Set[int], s2 Set[int]) bool { 309 + inter := Collect(s1.Intersection(s2)) 310 + return inter.IsSubset(s1) && inter.IsSubset(s2) 311 + } 312 + 313 + if err := quick.Check(f, nil); err != nil { 314 + t.Error(err) 315 + } 316 + } 317 + 318 + func TestPropertyUnionIsSuperset(t *testing.T) { 319 + f := func(s1 Set[int], s2 Set[int]) bool { 320 + union := Collect(s1.Union(s2)) 321 + return union.IsSuperset(s1) && union.IsSuperset(s2) 322 + } 323 + 324 + if err := quick.Check(f, nil); err != nil { 325 + t.Error(err) 326 + } 327 + } 328 + 329 + func TestPropertyDifferenceDisjoint(t *testing.T) { 330 + f := func(s1 Set[int], s2 Set[int]) bool { 331 + diff := Collect(s1.Difference(s2)) 332 + return diff.IsDisjoint(s2) 333 + } 334 + 335 + if err := quick.Check(f, nil); err != nil { 336 + t.Error(err) 337 + } 338 + } 339 + 340 + func TestPropertySymmetricDifferenceCommutative(t *testing.T) { 341 + f := func(s1 Set[int], s2 Set[int]) bool { 342 + symDiff1 := Collect(s1.SymmetricDifference(s2)) 343 + symDiff2 := Collect(s2.SymmetricDifference(s1)) 344 + return symDiff1.Equal(symDiff2) 345 + } 346 + 347 + if err := quick.Check(f, nil); err != nil { 348 + t.Error(err) 349 + } 350 + } 351 + 352 + func TestPropertyRemoveWorks(t *testing.T) { 353 + f := func(s Set[int], item int) bool { 354 + clone := s.Clone() 355 + clone.Insert(item) 356 + clone.Remove(item) 357 + return !clone.Contains(item) 358 + } 359 + 360 + if err := quick.Check(f, nil); err != nil { 361 + t.Error(err) 362 + } 363 + } 364 + 365 + func TestPropertyClearEmpty(t *testing.T) { 366 + f := func(s Set[int]) bool { 367 + s.Clear() 368 + return s.IsEmpty() && s.Len() == 0 369 + } 370 + 371 + if err := quick.Check(f, nil); err != nil { 372 + t.Error(err) 373 + } 374 + } 375 + 376 + func TestPropertyIsSubsetReflexive(t *testing.T) { 377 + f := func(s Set[int]) bool { 378 + return s.IsSubset(s) 379 + } 380 + 381 + if err := quick.Check(f, nil); err != nil { 382 + t.Error(err) 383 + } 384 + } 385 + 386 + func TestPropertyDeMorganUnion(t *testing.T) { 387 + f := func(s1 Set[int], s2 Set[int], universe Set[int]) bool { 388 + // create a universe that contains both sets 389 + u := universe.Clone() 390 + for item := range s1.All() { 391 + u.Insert(item) 392 + } 393 + for item := range s2.All() { 394 + u.Insert(item) 395 + } 396 + 397 + // (A u B)' = A' n B' 398 + union := Collect(s1.Union(s2)) 399 + complementUnion := Collect(u.Difference(union)) 400 + 401 + complementS1 := Collect(u.Difference(s1)) 402 + complementS2 := Collect(u.Difference(s2)) 403 + intersectionComplements := Collect(complementS1.Intersection(complementS2)) 404 + 405 + return complementUnion.Equal(intersectionComplements) 406 + } 407 + 408 + if err := quick.Check(f, nil); err != nil { 409 + t.Error(err) 410 + } 411 + }
+1
spindle/db/repos.go
··· 16 16 if err != nil { 17 17 return nil, err 18 18 } 19 + defer rows.Close() 19 20 20 21 var knots []string 21 22 for rows.Next() {
+22 -21
spindle/engine/engine.go
··· 3 3 import ( 4 4 "context" 5 5 "errors" 6 - "fmt" 7 6 "log/slog" 7 + "sync" 8 8 9 9 securejoin "github.com/cyphar/filepath-securejoin" 10 - "golang.org/x/sync/errgroup" 11 10 "tangled.org/core/notifier" 12 11 "tangled.org/core/spindle/config" 13 12 "tangled.org/core/spindle/db" ··· 31 30 } 32 31 } 33 32 34 - eg, ctx := errgroup.WithContext(ctx) 33 + var wg sync.WaitGroup 35 34 for eng, wfs := range pipeline.Workflows { 36 35 workflowTimeout := eng.WorkflowTimeout() 37 36 l.Info("using workflow timeout", "timeout", workflowTimeout) 38 37 39 38 for _, w := range wfs { 40 - eg.Go(func() error { 39 + wg.Add(1) 40 + go func() { 41 + defer wg.Done() 42 + 41 43 wid := models.WorkflowId{ 42 44 PipelineId: pipelineId, 43 45 Name: w.Name, ··· 45 47 46 48 err := db.StatusRunning(wid, n) 47 49 if err != nil { 48 - return err 50 + l.Error("failed to set workflow status to running", "wid", wid, "err", err) 51 + return 49 52 } 50 53 51 54 err = eng.SetupWorkflow(ctx, wid, &w) ··· 61 64 62 65 dbErr := db.StatusFailed(wid, err.Error(), -1, n) 63 66 if dbErr != nil { 64 - return dbErr 67 + l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr) 65 68 } 66 - return err 69 + return 67 70 } 68 71 defer eng.DestroyWorkflow(ctx, wid) 69 72 70 - wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid) 73 + secretValues := make([]string, len(allSecrets)) 74 + for i, s := range allSecrets { 75 + secretValues[i] = s.Value 76 + } 77 + wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid, secretValues) 71 78 if err != nil { 72 79 l.Warn("failed to setup step logger; logs will not be persisted", "error", err) 73 80 wfLogger = nil ··· 99 106 if errors.Is(err, ErrTimedOut) { 100 107 dbErr := db.StatusTimeout(wid, n) 101 108 if dbErr != nil { 102 - return dbErr 109 + l.Error("failed to set workflow status to timeout", "wid", wid, "err", dbErr) 103 110 } 104 111 } else { 105 112 dbErr := db.StatusFailed(wid, err.Error(), -1, n) 106 113 if dbErr != nil { 107 - return dbErr 114 + l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr) 108 115 } 109 116 } 110 - 111 - return fmt.Errorf("starting steps image: %w", err) 117 + return 112 118 } 113 119 } 114 120 115 121 err = db.StatusSuccess(wid, n) 116 122 if err != nil { 117 - return err 123 + l.Error("failed to set workflow status to success", "wid", wid, "err", err) 118 124 } 119 - 120 - return nil 121 - }) 125 + }() 122 126 } 123 127 } 124 128 125 - if err := eg.Wait(); err != nil { 126 - l.Error("failed to run one or more workflows", "err", err) 127 - } else { 128 - l.Info("successfully ran full pipeline") 129 - } 129 + wg.Wait() 130 + l.Info("all workflows completed") 130 131 }
+5 -3
spindle/engines/nixery/engine.go
··· 294 294 workflowEnvs.AddEnv(s.Key, s.Value) 295 295 } 296 296 297 - step := w.Steps[idx].(Step) 297 + step := w.Steps[idx] 298 298 299 299 select { 300 300 case <-ctx.Done(): ··· 303 303 } 304 304 305 305 envs := append(EnvVars(nil), workflowEnvs...) 306 - for k, v := range step.environment { 307 - envs.AddEnv(k, v) 306 + if nixStep, ok := step.(Step); ok { 307 + for k, v := range nixStep.environment { 308 + envs.AddEnv(k, v) 309 + } 308 310 } 309 311 envs.AddEnv("HOME", homeDir) 310 312
+6 -1
spindle/models/logger.go
··· 12 12 type WorkflowLogger struct { 13 13 file *os.File 14 14 encoder *json.Encoder 15 + mask *SecretMask 15 16 } 16 17 17 - func NewWorkflowLogger(baseDir string, wid WorkflowId) (*WorkflowLogger, error) { 18 + func NewWorkflowLogger(baseDir string, wid WorkflowId, secretValues []string) (*WorkflowLogger, error) { 18 19 path := LogFilePath(baseDir, wid) 19 20 20 21 file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) ··· 25 26 return &WorkflowLogger{ 26 27 file: file, 27 28 encoder: json.NewEncoder(file), 29 + mask: NewSecretMask(secretValues), 28 30 }, nil 29 31 } 30 32 ··· 62 64 63 65 func (w *dataWriter) Write(p []byte) (int, error) { 64 66 line := strings.TrimRight(string(p), "\r\n") 67 + if w.logger.mask != nil { 68 + line = w.logger.mask.Mask(line) 69 + } 65 70 entry := NewDataLogLine(w.idx, line, w.stream) 66 71 if err := w.logger.encoder.Encode(entry); err != nil { 67 72 return 0, err
+51
spindle/models/secret_mask.go
··· 1 + package models 2 + 3 + import ( 4 + "encoding/base64" 5 + "strings" 6 + ) 7 + 8 + // SecretMask replaces secret values in strings with "***". 9 + type SecretMask struct { 10 + replacer *strings.Replacer 11 + } 12 + 13 + // NewSecretMask creates a mask for the given secret values. 14 + // Also registers base64-encoded variants of each secret. 15 + func NewSecretMask(values []string) *SecretMask { 16 + var pairs []string 17 + 18 + for _, value := range values { 19 + if value == "" { 20 + continue 21 + } 22 + 23 + pairs = append(pairs, value, "***") 24 + 25 + b64 := base64.StdEncoding.EncodeToString([]byte(value)) 26 + if b64 != value { 27 + pairs = append(pairs, b64, "***") 28 + } 29 + 30 + b64NoPad := strings.TrimRight(b64, "=") 31 + if b64NoPad != b64 && b64NoPad != value { 32 + pairs = append(pairs, b64NoPad, "***") 33 + } 34 + } 35 + 36 + if len(pairs) == 0 { 37 + return nil 38 + } 39 + 40 + return &SecretMask{ 41 + replacer: strings.NewReplacer(pairs...), 42 + } 43 + } 44 + 45 + // Mask replaces all registered secret values with "***". 46 + func (m *SecretMask) Mask(input string) string { 47 + if m == nil || m.replacer == nil { 48 + return input 49 + } 50 + return m.replacer.Replace(input) 51 + }
+135
spindle/models/secret_mask_test.go
··· 1 + package models 2 + 3 + import ( 4 + "encoding/base64" 5 + "testing" 6 + ) 7 + 8 + func TestSecretMask_BasicMasking(t *testing.T) { 9 + mask := NewSecretMask([]string{"mysecret123"}) 10 + 11 + input := "The password is mysecret123 in this log" 12 + expected := "The password is *** in this log" 13 + 14 + result := mask.Mask(input) 15 + if result != expected { 16 + t.Errorf("expected %q, got %q", expected, result) 17 + } 18 + } 19 + 20 + func TestSecretMask_Base64Encoded(t *testing.T) { 21 + secret := "mysecret123" 22 + mask := NewSecretMask([]string{secret}) 23 + 24 + b64 := base64.StdEncoding.EncodeToString([]byte(secret)) 25 + input := "Encoded: " + b64 26 + expected := "Encoded: ***" 27 + 28 + result := mask.Mask(input) 29 + if result != expected { 30 + t.Errorf("expected %q, got %q", expected, result) 31 + } 32 + } 33 + 34 + func TestSecretMask_Base64NoPadding(t *testing.T) { 35 + // "test" encodes to "dGVzdA==" with padding 36 + secret := "test" 37 + mask := NewSecretMask([]string{secret}) 38 + 39 + b64NoPad := "dGVzdA" // base64 without padding 40 + input := "Token: " + b64NoPad 41 + expected := "Token: ***" 42 + 43 + result := mask.Mask(input) 44 + if result != expected { 45 + t.Errorf("expected %q, got %q", expected, result) 46 + } 47 + } 48 + 49 + func TestSecretMask_MultipleSecrets(t *testing.T) { 50 + mask := NewSecretMask([]string{"password1", "apikey123"}) 51 + 52 + input := "Using password1 and apikey123 for auth" 53 + expected := "Using *** and *** for auth" 54 + 55 + result := mask.Mask(input) 56 + if result != expected { 57 + t.Errorf("expected %q, got %q", expected, result) 58 + } 59 + } 60 + 61 + func TestSecretMask_MultipleOccurrences(t *testing.T) { 62 + mask := NewSecretMask([]string{"secret"}) 63 + 64 + input := "secret appears twice: secret" 65 + expected := "*** appears twice: ***" 66 + 67 + result := mask.Mask(input) 68 + if result != expected { 69 + t.Errorf("expected %q, got %q", expected, result) 70 + } 71 + } 72 + 73 + func TestSecretMask_ShortValues(t *testing.T) { 74 + mask := NewSecretMask([]string{"abc", "xy", ""}) 75 + 76 + if mask == nil { 77 + t.Fatal("expected non-nil mask") 78 + } 79 + 80 + input := "abc xy test" 81 + expected := "*** *** test" 82 + result := mask.Mask(input) 83 + if result != expected { 84 + t.Errorf("expected %q, got %q", expected, result) 85 + } 86 + } 87 + 88 + func TestSecretMask_NilMask(t *testing.T) { 89 + var mask *SecretMask 90 + 91 + input := "some input text" 92 + result := mask.Mask(input) 93 + if result != input { 94 + t.Errorf("expected %q, got %q", input, result) 95 + } 96 + } 97 + 98 + func TestSecretMask_EmptyInput(t *testing.T) { 99 + mask := NewSecretMask([]string{"secret"}) 100 + 101 + result := mask.Mask("") 102 + if result != "" { 103 + t.Errorf("expected empty string, got %q", result) 104 + } 105 + } 106 + 107 + func TestSecretMask_NoMatch(t *testing.T) { 108 + mask := NewSecretMask([]string{"secretvalue"}) 109 + 110 + input := "nothing to mask here" 111 + result := mask.Mask(input) 112 + if result != input { 113 + t.Errorf("expected %q, got %q", input, result) 114 + } 115 + } 116 + 117 + func TestSecretMask_EmptySecretsList(t *testing.T) { 118 + mask := NewSecretMask([]string{}) 119 + 120 + if mask != nil { 121 + t.Error("expected nil mask for empty secrets list") 122 + } 123 + } 124 + 125 + func TestSecretMask_EmptySecretsFiltered(t *testing.T) { 126 + mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"}) 127 + 128 + input := "Using validpassword here" 129 + expected := "Using *** here" 130 + 131 + result := mask.Mask(input) 132 + if result != expected { 133 + t.Errorf("expected %q, got %q", expected, result) 134 + } 135 + }
+1 -1
spindle/motd
··· 20 20 ** 21 21 ******** 22 22 23 - This is a spindle server. More info at https://tangled.sh/@tangled.sh/core/tree/master/docs/spindle 23 + This is a spindle server. More info at https://docs.tangled.org/spindles.html#spindles 24 24 25 25 Most API routes are under /xrpc/
+1 -1
tailwind.config.js
··· 2 2 const colors = require("tailwindcss/colors"); 3 3 4 4 module.exports = { 5 - content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"], 5 + content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"], 6 6 darkMode: "media", 7 7 theme: { 8 8 container: {
+199
types/commit.go
··· 1 + package types 2 + 3 + import ( 4 + "bytes" 5 + "encoding/json" 6 + "fmt" 7 + "maps" 8 + "regexp" 9 + "strings" 10 + 11 + "github.com/go-git/go-git/v5/plumbing" 12 + "github.com/go-git/go-git/v5/plumbing/object" 13 + ) 14 + 15 + type Commit struct { 16 + // hash of the commit object. 17 + Hash plumbing.Hash `json:"hash,omitempty"` 18 + 19 + // author is the original author of the commit. 20 + Author object.Signature `json:"author"` 21 + 22 + // committer is the one performing the commit, might be different from author. 23 + Committer object.Signature `json:"committer"` 24 + 25 + // message is the commit message, contains arbitrary text. 26 + Message string `json:"message"` 27 + 28 + // treehash is the hash of the root tree of the commit. 29 + Tree string `json:"tree"` 30 + 31 + // parents are the hashes of the parent commits of the commit. 32 + ParentHashes []plumbing.Hash `json:"parent_hashes,omitempty"` 33 + 34 + // pgpsignature is the pgp signature of the commit. 35 + PGPSignature string `json:"pgp_signature,omitempty"` 36 + 37 + // mergetag is the embedded tag object when a merge commit is created by 38 + // merging a signed tag. 39 + MergeTag string `json:"merge_tag,omitempty"` 40 + 41 + // changeid is a unique identifier for the change (e.g., gerrit change-id). 42 + ChangeId string `json:"change_id,omitempty"` 43 + 44 + // extraheaders contains additional headers not captured by other fields. 45 + ExtraHeaders map[string][]byte `json:"extra_headers,omitempty"` 46 + 47 + // deprecated: kept for backwards compatibility with old json format. 48 + This string `json:"this,omitempty"` 49 + 50 + // deprecated: kept for backwards compatibility with old json format. 51 + Parent string `json:"parent,omitempty"` 52 + } 53 + 54 + // types.Commit is an unify two commit structs: 55 + // - git.object.Commit from 56 + // - types.NiceDiff.commit 57 + // 58 + // to do this in backwards compatible fashion, we define the base struct 59 + // to use the same fields as NiceDiff.Commit, and then we also unmarshal 60 + // the struct fields from go-git structs, this custom unmarshal makes sense 61 + // of both representations and unifies them to have maximal data in either 62 + // form. 63 + func (c *Commit) UnmarshalJSON(data []byte) error { 64 + type Alias Commit 65 + 66 + aux := &struct { 67 + *object.Commit 68 + *Alias 69 + }{ 70 + Alias: (*Alias)(c), 71 + } 72 + 73 + if err := json.Unmarshal(data, aux); err != nil { 74 + return err 75 + } 76 + 77 + c.FromGoGitCommit(aux.Commit) 78 + 79 + return nil 80 + } 81 + 82 + // fill in as much of Commit as possible from the given go-git commit 83 + func (c *Commit) FromGoGitCommit(gc *object.Commit) { 84 + if gc == nil { 85 + return 86 + } 87 + 88 + if c.Hash.IsZero() { 89 + c.Hash = gc.Hash 90 + } 91 + if c.This == "" { 92 + c.This = gc.Hash.String() 93 + } 94 + if isEmptySignature(c.Author) { 95 + c.Author = gc.Author 96 + } 97 + if isEmptySignature(c.Committer) { 98 + c.Committer = gc.Committer 99 + } 100 + if c.Message == "" { 101 + c.Message = gc.Message 102 + } 103 + if c.Tree == "" { 104 + c.Tree = gc.TreeHash.String() 105 + } 106 + if c.PGPSignature == "" { 107 + c.PGPSignature = gc.PGPSignature 108 + } 109 + if c.MergeTag == "" { 110 + c.MergeTag = gc.MergeTag 111 + } 112 + 113 + if len(c.ParentHashes) == 0 { 114 + c.ParentHashes = gc.ParentHashes 115 + } 116 + if c.Parent == "" && len(gc.ParentHashes) > 0 { 117 + c.Parent = gc.ParentHashes[0].String() 118 + } 119 + 120 + if len(c.ExtraHeaders) == 0 { 121 + c.ExtraHeaders = make(map[string][]byte) 122 + maps.Copy(c.ExtraHeaders, gc.ExtraHeaders) 123 + } 124 + 125 + if c.ChangeId == "" { 126 + if v, ok := gc.ExtraHeaders["change-id"]; ok { 127 + c.ChangeId = string(v) 128 + } 129 + } 130 + } 131 + 132 + func isEmptySignature(s object.Signature) bool { 133 + return s.Email == "" && s.Name == "" && s.When.IsZero() 134 + } 135 + 136 + // produce a verifiable payload from this commit's metadata 137 + func (c *Commit) Payload() string { 138 + author := bytes.NewBuffer([]byte{}) 139 + c.Author.Encode(author) 140 + 141 + committer := bytes.NewBuffer([]byte{}) 142 + c.Committer.Encode(committer) 143 + 144 + payload := strings.Builder{} 145 + 146 + fmt.Fprintf(&payload, "tree %s\n", c.Tree) 147 + 148 + if len(c.ParentHashes) > 0 { 149 + for _, p := range c.ParentHashes { 150 + fmt.Fprintf(&payload, "parent %s\n", p.String()) 151 + } 152 + } else { 153 + // present for backwards compatibility 154 + fmt.Fprintf(&payload, "parent %s\n", c.Parent) 155 + } 156 + 157 + fmt.Fprintf(&payload, "author %s\n", author.String()) 158 + fmt.Fprintf(&payload, "committer %s\n", committer.String()) 159 + 160 + if c.ChangeId != "" { 161 + fmt.Fprintf(&payload, "change-id %s\n", c.ChangeId) 162 + } else if v, ok := c.ExtraHeaders["change-id"]; ok { 163 + fmt.Fprintf(&payload, "change-id %s\n", string(v)) 164 + } 165 + 166 + fmt.Fprintf(&payload, "\n%s", c.Message) 167 + 168 + return payload.String() 169 + } 170 + 171 + var ( 172 + coAuthorRegex = regexp.MustCompile(`(?im)^Co-authored-by:\s*(.+?)\s*<([^>]+)>`) 173 + ) 174 + 175 + func (commit Commit) CoAuthors() []object.Signature { 176 + var coAuthors []object.Signature 177 + seen := make(map[string]bool) 178 + matches := coAuthorRegex.FindAllStringSubmatch(commit.Message, -1) 179 + 180 + for _, match := range matches { 181 + if len(match) >= 3 { 182 + name := strings.TrimSpace(match[1]) 183 + email := strings.TrimSpace(match[2]) 184 + 185 + if seen[email] { 186 + continue 187 + } 188 + seen[email] = true 189 + 190 + coAuthors = append(coAuthors, object.Signature{ 191 + Name: name, 192 + Email: email, 193 + When: commit.Committer.When, 194 + }) 195 + } 196 + } 197 + 198 + return coAuthors 199 + }
+5 -12
types/diff.go
··· 2 2 3 3 import ( 4 4 "github.com/bluekeyes/go-gitdiff/gitdiff" 5 - "github.com/go-git/go-git/v5/plumbing/object" 6 5 ) 7 6 8 7 type DiffOpts struct { ··· 43 42 44 43 // A nicer git diff representation. 45 44 type NiceDiff struct { 46 - Commit struct { 47 - Message string `json:"message"` 48 - Author object.Signature `json:"author"` 49 - This string `json:"this"` 50 - Parent string `json:"parent"` 51 - PGPSignature string `json:"pgp_signature"` 52 - Committer object.Signature `json:"committer"` 53 - Tree string `json:"tree"` 54 - ChangedId string `json:"change_id"` 55 - } `json:"commit"` 56 - Stat struct { 45 + Commit Commit `json:"commit"` 46 + Stat struct { 57 47 FilesChanged int `json:"files_changed"` 58 48 Insertions int `json:"insertions"` 59 49 Deletions int `json:"deletions"` ··· 84 74 85 75 // used by html elements as a unique ID for hrefs 86 76 func (d *Diff) Id() string { 77 + if d.IsDelete { 78 + return d.Name.Old 79 + } 87 80 return d.Name.New 88 81 } 89 82
+112
types/diff_test.go
··· 1 + package types 2 + 3 + import "testing" 4 + 5 + func TestDiffId(t *testing.T) { 6 + tests := []struct { 7 + name string 8 + diff Diff 9 + expected string 10 + }{ 11 + { 12 + name: "regular file uses new name", 13 + diff: Diff{ 14 + Name: struct { 15 + Old string `json:"old"` 16 + New string `json:"new"` 17 + }{Old: "", New: "src/main.go"}, 18 + }, 19 + expected: "src/main.go", 20 + }, 21 + { 22 + name: "new file uses new name", 23 + diff: Diff{ 24 + Name: struct { 25 + Old string `json:"old"` 26 + New string `json:"new"` 27 + }{Old: "", New: "src/new.go"}, 28 + IsNew: true, 29 + }, 30 + expected: "src/new.go", 31 + }, 32 + { 33 + name: "deleted file uses old name", 34 + diff: Diff{ 35 + Name: struct { 36 + Old string `json:"old"` 37 + New string `json:"new"` 38 + }{Old: "src/deleted.go", New: ""}, 39 + IsDelete: true, 40 + }, 41 + expected: "src/deleted.go", 42 + }, 43 + { 44 + name: "renamed file uses new name", 45 + diff: Diff{ 46 + Name: struct { 47 + Old string `json:"old"` 48 + New string `json:"new"` 49 + }{Old: "src/old.go", New: "src/renamed.go"}, 50 + IsRename: true, 51 + }, 52 + expected: "src/renamed.go", 53 + }, 54 + } 55 + 56 + for _, tt := range tests { 57 + t.Run(tt.name, func(t *testing.T) { 58 + if got := tt.diff.Id(); got != tt.expected { 59 + t.Errorf("Diff.Id() = %q, want %q", got, tt.expected) 60 + } 61 + }) 62 + } 63 + } 64 + 65 + func TestChangedFilesMatchesDiffId(t *testing.T) { 66 + // ChangedFiles() must return values matching each Diff's Id() 67 + // so that sidebar links point to the correct anchors. 68 + // Tests existing, deleted, new, and renamed files. 69 + nd := NiceDiff{ 70 + Diff: []Diff{ 71 + { 72 + Name: struct { 73 + Old string `json:"old"` 74 + New string `json:"new"` 75 + }{Old: "", New: "src/modified.go"}, 76 + }, 77 + { 78 + Name: struct { 79 + Old string `json:"old"` 80 + New string `json:"new"` 81 + }{Old: "src/deleted.go", New: ""}, 82 + IsDelete: true, 83 + }, 84 + { 85 + Name: struct { 86 + Old string `json:"old"` 87 + New string `json:"new"` 88 + }{Old: "", New: "src/new.go"}, 89 + IsNew: true, 90 + }, 91 + { 92 + Name: struct { 93 + Old string `json:"old"` 94 + New string `json:"new"` 95 + }{Old: "src/old.go", New: "src/renamed.go"}, 96 + IsRename: true, 97 + }, 98 + }, 99 + } 100 + 101 + changedFiles := nd.ChangedFiles() 102 + 103 + if len(changedFiles) != len(nd.Diff) { 104 + t.Fatalf("ChangedFiles() returned %d items, want %d", len(changedFiles), len(nd.Diff)) 105 + } 106 + 107 + for i, diff := range nd.Diff { 108 + if changedFiles[i] != diff.Id() { 109 + t.Errorf("ChangedFiles()[%d] = %q, but Diff.Id() = %q", i, changedFiles[i], diff.Id()) 110 + } 111 + } 112 + }
+17 -17
types/repo.go
··· 8 8 ) 9 9 10 10 type RepoIndexResponse struct { 11 - IsEmpty bool `json:"is_empty"` 12 - Ref string `json:"ref,omitempty"` 13 - Readme string `json:"readme,omitempty"` 14 - ReadmeFileName string `json:"readme_file_name,omitempty"` 15 - Commits []*object.Commit `json:"commits,omitempty"` 16 - Description string `json:"description,omitempty"` 17 - Files []NiceTree `json:"files,omitempty"` 18 - Branches []Branch `json:"branches,omitempty"` 19 - Tags []*TagReference `json:"tags,omitempty"` 20 - TotalCommits int `json:"total_commits,omitempty"` 11 + IsEmpty bool `json:"is_empty"` 12 + Ref string `json:"ref,omitempty"` 13 + Readme string `json:"readme,omitempty"` 14 + ReadmeFileName string `json:"readme_file_name,omitempty"` 15 + Commits []Commit `json:"commits,omitempty"` 16 + Description string `json:"description,omitempty"` 17 + Files []NiceTree `json:"files,omitempty"` 18 + Branches []Branch `json:"branches,omitempty"` 19 + Tags []*TagReference `json:"tags,omitempty"` 20 + TotalCommits int `json:"total_commits,omitempty"` 21 21 } 22 22 23 23 type RepoLogResponse struct { 24 - Commits []*object.Commit `json:"commits,omitempty"` 25 - Ref string `json:"ref,omitempty"` 26 - Description string `json:"description,omitempty"` 27 - Log bool `json:"log,omitempty"` 28 - Total int `json:"total,omitempty"` 29 - Page int `json:"page,omitempty"` 30 - PerPage int `json:"per_page,omitempty"` 24 + Commits []Commit `json:"commits,omitempty"` 25 + Ref string `json:"ref,omitempty"` 26 + Description string `json:"description,omitempty"` 27 + Log bool `json:"log,omitempty"` 28 + Total int `json:"total,omitempty"` 29 + Page int `json:"page,omitempty"` 30 + PerPage int `json:"per_page,omitempty"` 31 31 } 32 32 33 33 type RepoCommitResponse struct {