···7 "time"
89 "tangled.org/core/appview/models"
010)
1112-func GetPipelines(e Execer, filters ...filter) ([]models.Pipeline, error) {
13 var pipelines []models.Pipeline
1415 var conditions []string
···168169// this is a mega query, but the most useful one:
170// get N pipelines, for each one get the latest status of its N workflows
171-func GetPipelineStatuses(e Execer, limit int, filters ...filter) ([]models.Pipeline, error) {
172 var conditions []string
173 var args []any
174 for _, filter := range filters {
175- filter.key = "p." + filter.key // the table is aliased in the query to `p`
176 conditions = append(conditions, filter.Condition())
177 args = append(args, filter.Arg()...)
178 }
···264 conditions = nil
265 args = nil
266 for _, p := range pipelines {
267- knotFilter := FilterEq("pipeline_knot", p.Knot)
268- rkeyFilter := FilterEq("pipeline_rkey", p.Rkey)
269 conditions = append(conditions, fmt.Sprintf("(%s and %s)", knotFilter.Condition(), rkeyFilter.Condition()))
270 args = append(args, p.Knot)
271 args = append(args, p.Rkey)
···7 "time"
89 "tangled.org/core/appview/models"
10+ "tangled.org/core/orm"
11)
1213+func GetPipelines(e Execer, filters ...orm.Filter) ([]models.Pipeline, error) {
14 var pipelines []models.Pipeline
1516 var conditions []string
···169170// this is a mega query, but the most useful one:
171// get N pipelines, for each one get the latest status of its N workflows
172+func GetPipelineStatuses(e Execer, limit int, filters ...orm.Filter) ([]models.Pipeline, error) {
173 var conditions []string
174 var args []any
175 for _, filter := range filters {
176+ filter.Key = "p." + filter.Key // the table is aliased in the query to `p`
177 conditions = append(conditions, filter.Condition())
178 args = append(args, filter.Arg()...)
179 }
···265 conditions = nil
266 args = nil
267 for _, p := range pipelines {
268+ knotFilter := orm.FilterEq("pipeline_knot", p.Knot)
269+ rkeyFilter := orm.FilterEq("pipeline_rkey", p.Rkey)
270 conditions = append(conditions, fmt.Sprintf("(%s and %s)", knotFilter.Condition(), rkeyFilter.Condition()))
271 args = append(args, p.Knot)
272 args = append(args, p.Rkey)
+32-16
appview/db/profile.go
···1112 "github.com/bluesky-social/indigo/atproto/syntax"
13 "tangled.org/core/appview/models"
014)
1516const TimeframeMonths = 7
···19 timeline := models.ProfileTimeline{
20 ByMonth: make([]models.ByMonth, TimeframeMonths),
21 }
22- currentMonth := time.Now().Month()
23 timeframe := fmt.Sprintf("-%d months", TimeframeMonths)
2425 pulls, err := GetPullsByOwnerDid(e, forDid, timeframe)
···2930 // group pulls by month
31 for _, pull := range pulls {
32- pullMonth := pull.Created.Month()
3334- if currentMonth-pullMonth >= TimeframeMonths {
35 // shouldn't happen; but times are weird
36 continue
37 }
3839- idx := currentMonth - pullMonth
40 items := &timeline.ByMonth[idx].PullEvents.Items
4142 *items = append(*items, &pull)
···4445 issues, err := GetIssues(
46 e,
47- FilterEq("did", forDid),
48- FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
49 )
50 if err != nil {
51 return nil, fmt.Errorf("error getting issues by owner did: %w", err)
52 }
5354 for _, issue := range issues {
55- issueMonth := issue.Created.Month()
5657- if currentMonth-issueMonth >= TimeframeMonths {
58 // shouldn't happen; but times are weird
59 continue
60 }
6162- idx := currentMonth - issueMonth
63 items := &timeline.ByMonth[idx].IssueEvents.Items
6465 *items = append(*items, &issue)
66 }
6768- repos, err := GetRepos(e, 0, FilterEq("did", forDid))
69 if err != nil {
70 return nil, fmt.Errorf("error getting all repos by did: %w", err)
71 }
···76 if repo.Source != "" {
77 sourceRepo, err = GetRepoByAtUri(e, repo.Source)
78 if err != nil {
79- return nil, err
080 }
81 }
8283- repoMonth := repo.Created.Month()
8485- if currentMonth-repoMonth >= TimeframeMonths {
86 // shouldn't happen; but times are weird
87 continue
88 }
8990- idx := currentMonth - repoMonth
9192 items := &timeline.ByMonth[idx].RepoEvents
93 *items = append(*items, models.RepoEvent{
···97 }
9899 return &timeline, nil
000000100}
101102func UpsertProfile(tx *sql.Tx, profile *models.Profile) error {
···199 return tx.Commit()
200}
201202-func GetProfiles(e Execer, filters ...filter) (map[string]*models.Profile, error) {
203 var conditions []string
204 var args []any
205 for _, filter := range filters {
···229 if err != nil {
230 return nil, err
231 }
0232233 profileMap := make(map[string]*models.Profile)
234 for rows.Next() {
···269 if err != nil {
270 return nil, err
271 }
00272 idxs := make(map[string]int)
273 for did := range profileMap {
274 idxs[did] = 0
···289 if err != nil {
290 return nil, err
291 }
00292 idxs = make(map[string]int)
293 for did := range profileMap {
294 idxs[did] = 0
···407 case models.VanityStatRepositoryCount:
408 query = `select count(id) from repos where did = ?`
409 args = append(args, did)
000410 }
411412 var result uint64
···441 }
442443 // ensure all pinned repos are either own repos or collaborating repos
444- repos, err := GetRepos(e, 0, FilterEq("did", profile.Did))
445 if err != nil {
446 log.Printf("getting repos for %s: %s", profile.Did, err)
447 }
···1112 "github.com/bluesky-social/indigo/atproto/syntax"
13 "tangled.org/core/appview/models"
14+ "tangled.org/core/orm"
15)
1617const TimeframeMonths = 7
···20 timeline := models.ProfileTimeline{
21 ByMonth: make([]models.ByMonth, TimeframeMonths),
22 }
23+ now := time.Now()
24 timeframe := fmt.Sprintf("-%d months", TimeframeMonths)
2526 pulls, err := GetPullsByOwnerDid(e, forDid, timeframe)
···3031 // group pulls by month
32 for _, pull := range pulls {
33+ monthsAgo := monthsBetween(pull.Created, now)
3435+ if monthsAgo >= TimeframeMonths {
36 // shouldn't happen; but times are weird
37 continue
38 }
3940+ idx := monthsAgo
41 items := &timeline.ByMonth[idx].PullEvents.Items
4243 *items = append(*items, &pull)
···4546 issues, err := GetIssues(
47 e,
48+ orm.FilterEq("did", forDid),
49+ orm.FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
50 )
51 if err != nil {
52 return nil, fmt.Errorf("error getting issues by owner did: %w", err)
53 }
5455 for _, issue := range issues {
56+ monthsAgo := monthsBetween(issue.Created, now)
5758+ if monthsAgo >= TimeframeMonths {
59 // shouldn't happen; but times are weird
60 continue
61 }
6263+ idx := monthsAgo
64 items := &timeline.ByMonth[idx].IssueEvents.Items
6566 *items = append(*items, &issue)
67 }
6869+ repos, err := GetRepos(e, 0, orm.FilterEq("did", forDid))
70 if err != nil {
71 return nil, fmt.Errorf("error getting all repos by did: %w", err)
72 }
···77 if repo.Source != "" {
78 sourceRepo, err = GetRepoByAtUri(e, repo.Source)
79 if err != nil {
80+ // the source repo was not found, skip this bit
81+ log.Println("profile", "err", err)
82 }
83 }
8485+ monthsAgo := monthsBetween(repo.Created, now)
8687+ if monthsAgo >= TimeframeMonths {
88 // shouldn't happen; but times are weird
89 continue
90 }
9192+ idx := monthsAgo
9394 items := &timeline.ByMonth[idx].RepoEvents
95 *items = append(*items, models.RepoEvent{
···99 }
100101 return &timeline, nil
102+}
103+104+func monthsBetween(from, to time.Time) int {
105+ years := to.Year() - from.Year()
106+ months := int(to.Month() - from.Month())
107+ return years*12 + months
108}
109110func UpsertProfile(tx *sql.Tx, profile *models.Profile) error {
···207 return tx.Commit()
208}
209210+func GetProfiles(e Execer, filters ...orm.Filter) (map[string]*models.Profile, error) {
211 var conditions []string
212 var args []any
213 for _, filter := range filters {
···237 if err != nil {
238 return nil, err
239 }
240+ defer rows.Close()
241242 profileMap := make(map[string]*models.Profile)
243 for rows.Next() {
···278 if err != nil {
279 return nil, err
280 }
281+ defer rows.Close()
282+283 idxs := make(map[string]int)
284 for did := range profileMap {
285 idxs[did] = 0
···300 if err != nil {
301 return nil, err
302 }
303+ defer rows.Close()
304+305 idxs = make(map[string]int)
306 for did := range profileMap {
307 idxs[did] = 0
···420 case models.VanityStatRepositoryCount:
421 query = `select count(id) from repos where did = ?`
422 args = append(args, did)
423+ case models.VanityStatStarCount:
424+ query = `select count(id) from stars where did = ?`
425+ args = append(args, did)
426 }
427428 var result uint64
···457 }
458459 // ensure all pinned repos are either own repos or collaborating repos
460+ repos, err := GetRepos(e, 0, orm.FilterEq("did", profile.Did))
461 if err != nil {
462 log.Printf("getting repos for %s: %s", profile.Did, err)
463 }
+21-20
appview/db/pulls.go
···1314 "github.com/bluesky-social/indigo/atproto/syntax"
15 "tangled.org/core/appview/models"
016)
1718func NewPull(tx *sql.Tx, pull *models.Pull) error {
···118 return pullId - 1, err
119}
120121-func GetPullsWithLimit(e Execer, limit int, filters ...filter) ([]*models.Pull, error) {
122 pulls := make(map[syntax.ATURI]*models.Pull)
123124 var conditions []string
···229 for _, p := range pulls {
230 pullAts = append(pullAts, p.AtUri())
231 }
232- submissionsMap, err := GetPullSubmissions(e, FilterIn("pull_at", pullAts))
233 if err != nil {
234 return nil, fmt.Errorf("failed to get submissions: %w", err)
235 }
···241 }
242243 // collect allLabels for each issue
244- allLabels, err := GetLabels(e, FilterIn("subject", pullAts))
245 if err != nil {
246 return nil, fmt.Errorf("failed to query labels: %w", err)
247 }
···258 sourceAts = append(sourceAts, *p.PullSource.RepoAt)
259 }
260 }
261- sourceRepos, err := GetRepos(e, 0, FilterIn("at_uri", sourceAts))
262 if err != nil && !errors.Is(err, sql.ErrNoRows) {
263 return nil, fmt.Errorf("failed to get source repos: %w", err)
264 }
···274 }
275 }
276277- allReferences, err := GetReferencesAll(e, FilterIn("from_at", pullAts))
278 if err != nil {
279 return nil, fmt.Errorf("failed to query reference_links: %w", err)
280 }
···295 return orderedByPullId, nil
296}
297298-func GetPulls(e Execer, filters ...filter) ([]*models.Pull, error) {
299 return GetPullsWithLimit(e, 0, filters...)
300}
301302func GetPullIDs(e Execer, opts models.PullSearchOptions) ([]int64, error) {
303 var ids []int64
304305- var filters []filter
306- filters = append(filters, FilterEq("state", opts.State))
307 if opts.RepoAt != "" {
308- filters = append(filters, FilterEq("repo_at", opts.RepoAt))
309 }
310311 var conditions []string
···361}
362363func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) {
364- pulls, err := GetPullsWithLimit(e, 1, FilterEq("repo_at", repoAt), FilterEq("pull_id", pullId))
365 if err != nil {
366 return nil, err
367 }
···373}
374375// mapping from pull -> pull submissions
376-func GetPullSubmissions(e Execer, filters ...filter) (map[syntax.ATURI][]*models.PullSubmission, error) {
377 var conditions []string
378 var args []any
379 for _, filter := range filters {
···448449 // Get comments for all submissions using GetPullComments
450 submissionIds := slices.Collect(maps.Keys(submissionMap))
451- comments, err := GetPullComments(e, FilterIn("submission_id", submissionIds))
452 if err != nil {
453 return nil, fmt.Errorf("failed to get pull comments: %w", err)
454 }
···474 return m, nil
475}
476477-func GetPullComments(e Execer, filters ...filter) ([]models.PullComment, error) {
478 var conditions []string
479 var args []any
480 for _, filter := range filters {
···542543 // collect references for each comments
544 commentAts := slices.Collect(maps.Keys(commentMap))
545- allReferencs, err := GetReferencesAll(e, FilterIn("from_at", commentAts))
546 if err != nil {
547 return nil, fmt.Errorf("failed to query reference_links: %w", err)
548 }
···708 return err
709}
710711-func SetPullParentChangeId(e Execer, parentChangeId string, filters ...filter) error {
712 var conditions []string
713 var args []any
714···732733// Only used when stacking to update contents in the event of a rebase (the interdiff should be empty).
734// otherwise submissions are immutable
735-func UpdatePull(e Execer, newPatch, sourceRev string, filters ...filter) error {
736 var conditions []string
737 var args []any
738···790func GetStack(e Execer, stackId string) (models.Stack, error) {
791 unorderedPulls, err := GetPulls(
792 e,
793- FilterEq("stack_id", stackId),
794- FilterNotEq("state", models.PullDeleted),
795 )
796 if err != nil {
797 return nil, err
···835func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) {
836 pulls, err := GetPulls(
837 e,
838- FilterEq("stack_id", stackId),
839- FilterEq("state", models.PullDeleted),
840 )
841 if err != nil {
842 return nil, err
···1314 "github.com/bluesky-social/indigo/atproto/syntax"
15 "tangled.org/core/appview/models"
16+ "tangled.org/core/orm"
17)
1819func NewPull(tx *sql.Tx, pull *models.Pull) error {
···119 return pullId - 1, err
120}
121122+func GetPullsWithLimit(e Execer, limit int, filters ...orm.Filter) ([]*models.Pull, error) {
123 pulls := make(map[syntax.ATURI]*models.Pull)
124125 var conditions []string
···230 for _, p := range pulls {
231 pullAts = append(pullAts, p.AtUri())
232 }
233+ submissionsMap, err := GetPullSubmissions(e, orm.FilterIn("pull_at", pullAts))
234 if err != nil {
235 return nil, fmt.Errorf("failed to get submissions: %w", err)
236 }
···242 }
243244 // collect allLabels for each issue
245+ allLabels, err := GetLabels(e, orm.FilterIn("subject", pullAts))
246 if err != nil {
247 return nil, fmt.Errorf("failed to query labels: %w", err)
248 }
···259 sourceAts = append(sourceAts, *p.PullSource.RepoAt)
260 }
261 }
262+ sourceRepos, err := GetRepos(e, 0, orm.FilterIn("at_uri", sourceAts))
263 if err != nil && !errors.Is(err, sql.ErrNoRows) {
264 return nil, fmt.Errorf("failed to get source repos: %w", err)
265 }
···275 }
276 }
277278+ allReferences, err := GetReferencesAll(e, orm.FilterIn("from_at", pullAts))
279 if err != nil {
280 return nil, fmt.Errorf("failed to query reference_links: %w", err)
281 }
···296 return orderedByPullId, nil
297}
298299+func GetPulls(e Execer, filters ...orm.Filter) ([]*models.Pull, error) {
300 return GetPullsWithLimit(e, 0, filters...)
301}
302303func GetPullIDs(e Execer, opts models.PullSearchOptions) ([]int64, error) {
304 var ids []int64
305306+ var filters []orm.Filter
307+ filters = append(filters, orm.FilterEq("state", opts.State))
308 if opts.RepoAt != "" {
309+ filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt))
310 }
311312 var conditions []string
···362}
363364func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) {
365+ pulls, err := GetPullsWithLimit(e, 1, orm.FilterEq("repo_at", repoAt), orm.FilterEq("pull_id", pullId))
366 if err != nil {
367 return nil, err
368 }
···374}
375376// mapping from pull -> pull submissions
377+func GetPullSubmissions(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]*models.PullSubmission, error) {
378 var conditions []string
379 var args []any
380 for _, filter := range filters {
···449450 // Get comments for all submissions using GetPullComments
451 submissionIds := slices.Collect(maps.Keys(submissionMap))
452+ comments, err := GetPullComments(e, orm.FilterIn("submission_id", submissionIds))
453 if err != nil {
454 return nil, fmt.Errorf("failed to get pull comments: %w", err)
455 }
···475 return m, nil
476}
477478+func GetPullComments(e Execer, filters ...orm.Filter) ([]models.PullComment, error) {
479 var conditions []string
480 var args []any
481 for _, filter := range filters {
···543544 // collect references for each comments
545 commentAts := slices.Collect(maps.Keys(commentMap))
546+ allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
547 if err != nil {
548 return nil, fmt.Errorf("failed to query reference_links: %w", err)
549 }
···709 return err
710}
711712+func SetPullParentChangeId(e Execer, parentChangeId string, filters ...orm.Filter) error {
713 var conditions []string
714 var args []any
715···733734// Only used when stacking to update contents in the event of a rebase (the interdiff should be empty).
735// otherwise submissions are immutable
736+func UpdatePull(e Execer, newPatch, sourceRev string, filters ...orm.Filter) error {
737 var conditions []string
738 var args []any
739···791func GetStack(e Execer, stackId string) (models.Stack, error) {
792 unorderedPulls, err := GetPulls(
793 e,
794+ orm.FilterEq("stack_id", stackId),
795+ orm.FilterNotEq("state", models.PullDeleted),
796 )
797 if err != nil {
798 return nil, err
···836func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) {
837 pulls, err := GetPulls(
838 e,
839+ orm.FilterEq("stack_id", stackId),
840+ orm.FilterEq("state", models.PullDeleted),
841 )
842 if err != nil {
843 return nil, err
···83 Repo *Repo
84}
8586+// NOTE: This method does not include patch blob in returned atproto record
87func (p Pull) AsRecord() tangled.RepoPull {
88 var source *tangled.RepoPull_Source
89 if p.PullSource != nil {
···114 Repo: p.RepoAt.String(),
115 Branch: p.TargetBranch,
116 },
0117 Source: source,
118 }
119 return record
+5-4
appview/notifications/notifications.go
···11 "tangled.org/core/appview/oauth"
12 "tangled.org/core/appview/pages"
13 "tangled.org/core/appview/pagination"
014)
1516type Notifications struct {
···5354 total, err := db.CountNotifications(
55 n.db,
56- db.FilterEq("recipient_did", user.Did),
57 )
58 if err != nil {
59 l.Error("failed to get total notifications", "err", err)
···64 notifications, err := db.GetNotificationsWithEntities(
65 n.db,
66 page,
67- db.FilterEq("recipient_did", user.Did),
68 )
69 if err != nil {
70 l.Error("failed to get notifications", "err", err)
···9697 count, err := db.CountNotifications(
98 n.db,
99- db.FilterEq("recipient_did", user.Did),
100- db.FilterEq("read", 0),
101 )
102 if err != nil {
103 http.Error(w, "Failed to get unread count", http.StatusInternalServerError)
···11 "tangled.org/core/appview/oauth"
12 "tangled.org/core/appview/pages"
13 "tangled.org/core/appview/pagination"
14+ "tangled.org/core/orm"
15)
1617type Notifications struct {
···5455 total, err := db.CountNotifications(
56 n.db,
57+ orm.FilterEq("recipient_did", user.Did),
58 )
59 if err != nil {
60 l.Error("failed to get total notifications", "err", err)
···65 notifications, err := db.GetNotificationsWithEntities(
66 n.db,
67 page,
68+ orm.FilterEq("recipient_did", user.Did),
69 )
70 if err != nil {
71 l.Error("failed to get notifications", "err", err)
···9798 count, err := db.CountNotifications(
99 n.db,
100+ orm.FilterEq("recipient_did", user.Did),
101+ orm.FilterEq("read", 0),
102 )
103 if err != nil {
104 http.Error(w, "Failed to get unread count", http.StatusInternalServerError)
+77-66
appview/notify/db/db.go
···3import (
4 "context"
5 "log"
6- "maps"
7 "slices"
89 "github.com/bluesky-social/indigo/atproto/syntax"
···12 "tangled.org/core/appview/models"
13 "tangled.org/core/appview/notify"
14 "tangled.org/core/idresolver"
0015)
1617const (
18- maxMentions = 5
19)
2021type databaseNotifier struct {
···42 return
43 }
44 var err error
45- repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(star.RepoAt)))
46 if err != nil {
47 log.Printf("NewStar: failed to get repos: %v", err)
48 return
49 }
5051 actorDid := syntax.DID(star.Did)
52- recipients := []syntax.DID{syntax.DID(repo.Did)}
53 eventType := models.NotificationTypeRepoStarred
54 entityType := "repo"
55 entityId := star.RepoAt.String()
···74}
7576func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {
77-78- // build the recipients list
79- // - owner of the repo
80- // - collaborators in the repo
81- var recipients []syntax.DID
82- recipients = append(recipients, syntax.DID(issue.Repo.Did))
83- collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt()))
84 if err != nil {
85 log.Printf("failed to fetch collaborators: %v", err)
86 return
87 }
00000088 for _, c := range collaborators {
89- recipients = append(recipients, c.SubjectDid)
00090 }
9192 actorDid := syntax.DID(issue.Did)
···108 )
109 n.notifyEvent(
110 actorDid,
111- mentions,
112 models.NotificationTypeUserMentioned,
113 entityType,
114 entityId,
···119}
120121func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) {
122- issues, err := db.GetIssues(n.db, db.FilterEq("at_uri", comment.IssueAt))
123 if err != nil {
124 log.Printf("NewIssueComment: failed to get issues: %v", err)
125 return
···130 }
131 issue := issues[0]
132133- var recipients []syntax.DID
134- recipients = append(recipients, syntax.DID(issue.Repo.Did))
0000135136 if comment.IsReply() {
137 // if this comment is a reply, then notify everybody in that thread
138 parentAtUri := *comment.ReplyTo
139- allThreads := issue.CommentList()
140141 // find the parent thread, and add all DIDs from here to the recipient list
142- for _, t := range allThreads {
143 if t.Self.AtUri().String() == parentAtUri {
144- recipients = append(recipients, t.Participants()...)
00145 }
146 }
147 } else {
148 // not a reply, notify just the issue author
149- recipients = append(recipients, syntax.DID(issue.Did))
0000150 }
151152 actorDid := syntax.DID(comment.Did)
···168 )
169 n.notifyEvent(
170 actorDid,
171- mentions,
172 models.NotificationTypeUserMentioned,
173 entityType,
174 entityId,
···184185func (n *databaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) {
186 actorDid := syntax.DID(follow.UserDid)
187- recipients := []syntax.DID{syntax.DID(follow.SubjectDid)}
188 eventType := models.NotificationTypeFollowed
189 entityType := "follow"
190 entityId := follow.UserDid
···207}
208209func (n *databaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {
210- repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt)))
211 if err != nil {
212 log.Printf("NewPull: failed to get repos: %v", err)
213 return
214 }
215-216- // build the recipients list
217- // - owner of the repo
218- // - collaborators in the repo
219- var recipients []syntax.DID
220- recipients = append(recipients, syntax.DID(repo.Did))
221- collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt()))
222 if err != nil {
223 log.Printf("failed to fetch collaborators: %v", err)
224 return
225 }
00000226 for _, c := range collaborators {
227- recipients = append(recipients, c.SubjectDid)
228 }
229230 actorDid := syntax.DID(pull.OwnerDid)
···258 return
259 }
260261- repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", comment.RepoAt))
262 if err != nil {
263 log.Printf("NewPullComment: failed to get repos: %v", err)
264 return
···267 // build up the recipients list:
268 // - repo owner
269 // - all pull participants
270- var recipients []syntax.DID
271- recipients = append(recipients, syntax.DID(repo.Did))
272 for _, p := range pull.Participants() {
273- recipients = append(recipients, syntax.DID(p))
000274 }
275276 actorDid := syntax.DID(comment.OwnerDid)
···294 )
295 n.notifyEvent(
296 actorDid,
297- mentions,
298 models.NotificationTypeUserMentioned,
299 entityType,
300 entityId,
···321}
322323func (n *databaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) {
324- // build up the recipients list:
325- // - repo owner
326- // - repo collaborators
327- // - all issue participants
328- var recipients []syntax.DID
329- recipients = append(recipients, syntax.DID(issue.Repo.Did))
330- collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt()))
331 if err != nil {
332 log.Printf("failed to fetch collaborators: %v", err)
333 return
334 }
000000335 for _, c := range collaborators {
336- recipients = append(recipients, c.SubjectDid)
337 }
338 for _, p := range issue.Participants() {
339- recipients = append(recipients, syntax.DID(p))
340 }
341342 entityType := "pull"
···366367func (n *databaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {
368 // Get repo details
369- repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt)))
370 if err != nil {
371 log.Printf("NewPullState: failed to get repos: %v", err)
372 return
373 }
374375- // build up the recipients list:
376- // - repo owner
377- // - all pull participants
378- var recipients []syntax.DID
379- recipients = append(recipients, syntax.DID(repo.Did))
380- collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt()))
381 if err != nil {
382 log.Printf("failed to fetch collaborators: %v", err)
383 return
384 }
00000385 for _, c := range collaborators {
386- recipients = append(recipients, c.SubjectDid)
387 }
388 for _, p := range pull.Participants() {
389- recipients = append(recipients, syntax.DID(p))
390 }
391392 entityType := "pull"
···422423func (n *databaseNotifier) notifyEvent(
424 actorDid syntax.DID,
425- recipients []syntax.DID,
426 eventType models.NotificationType,
427 entityType string,
428 entityId string,
···430 issueId *int64,
431 pullId *int64,
432) {
433- if eventType == models.NotificationTypeUserMentioned && len(recipients) > maxMentions {
434- recipients = recipients[:maxMentions]
0435 }
436- recipientSet := make(map[syntax.DID]struct{})
437- for _, did := range recipients {
438- // everybody except actor themselves
439- if did != actorDid {
440- recipientSet[did] = struct{}{}
441- }
442- }
443444 prefMap, err := db.GetNotificationPreferences(
445 n.db,
446- db.FilterIn("user_did", slices.Collect(maps.Keys(recipientSet))),
447 )
448 if err != nil {
449 // failed to get prefs for users
···459 defer tx.Rollback()
460461 // filter based on preferences
462- for recipientDid := range recipientSet {
463 prefs, ok := prefMap[recipientDid]
464 if !ok {
465 prefs = models.DefaultNotificationPreferences(recipientDid)
···3import (
4 "context"
5 "log"
06 "slices"
78 "github.com/bluesky-social/indigo/atproto/syntax"
···11 "tangled.org/core/appview/models"
12 "tangled.org/core/appview/notify"
13 "tangled.org/core/idresolver"
14+ "tangled.org/core/orm"
15+ "tangled.org/core/sets"
16)
1718const (
19+ maxMentions = 8
20)
2122type databaseNotifier struct {
···43 return
44 }
45 var err error
46+ repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(star.RepoAt)))
47 if err != nil {
48 log.Printf("NewStar: failed to get repos: %v", err)
49 return
50 }
5152 actorDid := syntax.DID(star.Did)
53+ recipients := sets.Singleton(syntax.DID(repo.Did))
54 eventType := models.NotificationTypeRepoStarred
55 entityType := "repo"
56 entityId := star.RepoAt.String()
···75}
7677func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {
78+ collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt()))
00000079 if err != nil {
80 log.Printf("failed to fetch collaborators: %v", err)
81 return
82 }
83+84+ // build the recipients list
85+ // - owner of the repo
86+ // - collaborators in the repo
87+ // - remove users already mentioned
88+ recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
89 for _, c := range collaborators {
90+ recipients.Insert(c.SubjectDid)
91+ }
92+ for _, m := range mentions {
93+ recipients.Remove(m)
94 }
9596 actorDid := syntax.DID(issue.Did)
···112 )
113 n.notifyEvent(
114 actorDid,
115+ sets.Collect(slices.Values(mentions)),
116 models.NotificationTypeUserMentioned,
117 entityType,
118 entityId,
···123}
124125func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) {
126+ issues, err := db.GetIssues(n.db, orm.FilterEq("at_uri", comment.IssueAt))
127 if err != nil {
128 log.Printf("NewIssueComment: failed to get issues: %v", err)
129 return
···134 }
135 issue := issues[0]
136137+ // built the recipients list:
138+ // - the owner of the repo
139+ // - | if the comment is a reply -> everybody on that thread
140+ // | if the comment is a top level -> just the issue owner
141+ // - remove mentioned users from the recipients list
142+ recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
143144 if comment.IsReply() {
145 // if this comment is a reply, then notify everybody in that thread
146 parentAtUri := *comment.ReplyTo
0147148 // find the parent thread, and add all DIDs from here to the recipient list
149+ for _, t := range issue.CommentList() {
150 if t.Self.AtUri().String() == parentAtUri {
151+ for _, p := range t.Participants() {
152+ recipients.Insert(p)
153+ }
154 }
155 }
156 } else {
157 // not a reply, notify just the issue author
158+ recipients.Insert(syntax.DID(issue.Did))
159+ }
160+161+ for _, m := range mentions {
162+ recipients.Remove(m)
163 }
164165 actorDid := syntax.DID(comment.Did)
···181 )
182 n.notifyEvent(
183 actorDid,
184+ sets.Collect(slices.Values(mentions)),
185 models.NotificationTypeUserMentioned,
186 entityType,
187 entityId,
···197198func (n *databaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) {
199 actorDid := syntax.DID(follow.UserDid)
200+ recipients := sets.Singleton(syntax.DID(follow.SubjectDid))
201 eventType := models.NotificationTypeFollowed
202 entityType := "follow"
203 entityId := follow.UserDid
···220}
221222func (n *databaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {
223+ repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt)))
224 if err != nil {
225 log.Printf("NewPull: failed to get repos: %v", err)
226 return
227 }
228+ collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt()))
000000229 if err != nil {
230 log.Printf("failed to fetch collaborators: %v", err)
231 return
232 }
233+234+ // build the recipients list
235+ // - owner of the repo
236+ // - collaborators in the repo
237+ recipients := sets.Singleton(syntax.DID(repo.Did))
238 for _, c := range collaborators {
239+ recipients.Insert(c.SubjectDid)
240 }
241242 actorDid := syntax.DID(pull.OwnerDid)
···270 return
271 }
272273+ repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", comment.RepoAt))
274 if err != nil {
275 log.Printf("NewPullComment: failed to get repos: %v", err)
276 return
···279 // build up the recipients list:
280 // - repo owner
281 // - all pull participants
282+ // - remove those already mentioned
283+ recipients := sets.Singleton(syntax.DID(repo.Did))
284 for _, p := range pull.Participants() {
285+ recipients.Insert(syntax.DID(p))
286+ }
287+ for _, m := range mentions {
288+ recipients.Remove(m)
289 }
290291 actorDid := syntax.DID(comment.OwnerDid)
···309 )
310 n.notifyEvent(
311 actorDid,
312+ sets.Collect(slices.Values(mentions)),
313 models.NotificationTypeUserMentioned,
314 entityType,
315 entityId,
···336}
337338func (n *databaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) {
339+ collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt()))
000000340 if err != nil {
341 log.Printf("failed to fetch collaborators: %v", err)
342 return
343 }
344+345+ // build up the recipients list:
346+ // - repo owner
347+ // - repo collaborators
348+ // - all issue participants
349+ recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
350 for _, c := range collaborators {
351+ recipients.Insert(c.SubjectDid)
352 }
353 for _, p := range issue.Participants() {
354+ recipients.Insert(syntax.DID(p))
355 }
356357 entityType := "pull"
···381382func (n *databaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {
383 // Get repo details
384+ repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt)))
385 if err != nil {
386 log.Printf("NewPullState: failed to get repos: %v", err)
387 return
388 }
389390+ collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt()))
00000391 if err != nil {
392 log.Printf("failed to fetch collaborators: %v", err)
393 return
394 }
395+396+ // build up the recipients list:
397+ // - repo owner
398+ // - all pull participants
399+ recipients := sets.Singleton(syntax.DID(repo.Did))
400 for _, c := range collaborators {
401+ recipients.Insert(c.SubjectDid)
402 }
403 for _, p := range pull.Participants() {
404+ recipients.Insert(syntax.DID(p))
405 }
406407 entityType := "pull"
···437438func (n *databaseNotifier) notifyEvent(
439 actorDid syntax.DID,
440+ recipients sets.Set[syntax.DID],
441 eventType models.NotificationType,
442 entityType string,
443 entityId string,
···445 issueId *int64,
446 pullId *int64,
447) {
448+ // if the user is attempting to mention >maxMentions users, this is probably spam, do not mention anybody
449+ if eventType == models.NotificationTypeUserMentioned && recipients.Len() > maxMentions {
450+ return
451 }
452+453+ recipients.Remove(actorDid)
00000454455 prefMap, err := db.GetNotificationPreferences(
456 n.db,
457+ orm.FilterIn("user_did", slices.Collect(recipients.All())),
458 )
459 if err != nil {
460 // failed to get prefs for users
···470 defer tx.Rollback()
471472 // filter based on preferences
473+ for recipientDid := range recipients.All() {
474 prefs, ok := prefMap[recipientDid]
475 if !ok {
476 prefs = models.DefaultNotificationPreferences(recipientDid)
···30 <div class="mx-6">
31 These services may not be fully accessible until upgraded.
32 <a class="underline text-red-800 dark:text-red-200"
33- href="https://tangled.org/@tangled.org/core/tree/master/docs/migrations.md">
34 Click to read the upgrade guide</a>.
35 </div>
36 </details>
···30 <div class="mx-6">
31 These services may not be fully accessible until upgraded.
32 <a class="underline text-red-800 dark:text-red-200"
33+ href="https://docs.tangled.org/migrating-knots-spindles.html#migrating-knots-spindles">
34 Click to read the upgrade guide</a>.
35 </div>
36 </details>
···22 <p class="text-gray-500 dark:text-gray-400">
23 Choose a spindle to execute your workflows on. Only repository owners
24 can configure spindles. Spindles can be selfhosted,
25- <a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
26 click to learn more.
27 </a>
28 </p>
···22 <p class="text-gray-500 dark:text-gray-400">
23 Choose a spindle to execute your workflows on. Only repository owners
24 can configure spindles. Spindles can be selfhosted,
25+ <a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide">
26 click to learn more.
27 </a>
28 </p>
···11 "tangled.org/core/appview/pages"
12 "tangled.org/core/appview/pagination"
13 "tangled.org/core/consts"
014)
1516func (s *State) GoodFirstIssues(w http.ResponseWriter, r *http.Request) {
···2021 goodFirstIssueLabel := s.config.Label.GoodFirstIssue
2223- gfiLabelDef, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", goodFirstIssueLabel))
24 if err != nil {
25 log.Println("failed to get gfi label def", err)
26 s.pages.Error500(w)
27 return
28 }
2930- repoLabels, err := db.GetRepoLabels(s.db, db.FilterEq("label_at", goodFirstIssueLabel))
31 if err != nil {
32 log.Println("failed to get repo labels", err)
33 s.pages.Error503(w)
···55 pagination.Page{
56 Limit: 500,
57 },
58- db.FilterIn("repo_at", repoUris),
59- db.FilterEq("open", 1),
60 )
61 if err != nil {
62 log.Println("failed to get issues", err)
···132 }
133134 if len(uriList) > 0 {
135- allLabelDefs, err = db.GetLabelDefinitions(s.db, db.FilterIn("at_uri", uriList))
136 if err != nil {
137 log.Println("failed to fetch labels", err)
138 }
···11 "tangled.org/core/appview/pages"
12 "tangled.org/core/appview/pagination"
13 "tangled.org/core/consts"
14+ "tangled.org/core/orm"
15)
1617func (s *State) GoodFirstIssues(w http.ResponseWriter, r *http.Request) {
···2122 goodFirstIssueLabel := s.config.Label.GoodFirstIssue
2324+ gfiLabelDef, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", goodFirstIssueLabel))
25 if err != nil {
26 log.Println("failed to get gfi label def", err)
27 s.pages.Error500(w)
28 return
29 }
3031+ repoLabels, err := db.GetRepoLabels(s.db, orm.FilterEq("label_at", goodFirstIssueLabel))
32 if err != nil {
33 log.Println("failed to get repo labels", err)
34 s.pages.Error503(w)
···56 pagination.Page{
57 Limit: 500,
58 },
59+ orm.FilterIn("repo_at", repoUris),
60+ orm.FilterEq("open", 1),
61 )
62 if err != nil {
63 log.Println("failed to get issues", err)
···133 }
134135 if len(uriList) > 0 {
136+ allLabelDefs, err = db.GetLabelDefinitions(s.db, orm.FilterIn("at_uri", uriList))
137 if err != nil {
138 log.Println("failed to fetch labels", err)
139 }
+17
appview/state/git_http.go
···2526}
270000000000000000028func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) {
29 user, ok := r.Context().Value("resolvedId").(identity.Identity)
30 if !ok {
···2526}
2728+func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) {
29+ user, ok := r.Context().Value("resolvedId").(identity.Identity)
30+ if !ok {
31+ http.Error(w, "failed to resolve user", http.StatusInternalServerError)
32+ return
33+ }
34+ repo := r.Context().Value("repo").(*models.Repo)
35+36+ scheme := "https"
37+ if s.config.Core.Dev {
38+ scheme = "http"
39+ }
40+41+ targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
42+ s.proxyRequest(w, r, targetURL)
43+}
44+45func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) {
46 user, ok := r.Context().Value("resolvedId").(identity.Identity)
47 if !ok {
···1+---
2+title: Tangled docs
3+author: The Tangled Contributors
4+date: 21 Sun, Dec 2025
5+---
6+7+# Introduction
8+9+Tangled is a decentralized code hosting and collaboration
10+platform. Every component of Tangled is open-source and
11+self-hostable. [tangled.org](https://tangled.org) also
12+provides hosting and CI services that are free to use.
13+14+There are several models for decentralized code
15+collaboration platforms, ranging from ActivityPubโs
16+(Forgejo) federated model, to Radicleโs entirely P2P model.
17+Our approach attempts to be the best of both worlds by
18+adopting the AT Protocolโa protocol for building decentralized
19+social applications with a central identity
20+21+Our approach to this is the idea of โknotsโ. Knots are
22+lightweight, headless servers that enable users to host Git
23+repositories with ease. Knots are designed for either single
24+or multi-tenant use which is perfect for self-hosting on a
25+Raspberry Pi at home, or larger โcommunityโ servers. By
26+default, Tangled provides managed knots where you can host
27+your repositories for free.
28+29+The appview at tangled.org acts as a consolidated "view"
30+into the whole network, allowing users to access, clone and
31+contribute to repositories hosted across different knots
32+seamlessly.
33+34+# Quick start guide
35+36+## Login or sign up
37+38+You can [login](https://tangled.org) by using your AT Protocol
39+account. If you are unclear on what that means, simply head
40+to the [signup](https://tangled.org/signup) page and create
41+an account. By doing so, you will be choosing Tangled as
42+your account provider (you will be granted a handle of the
43+form `user.tngl.sh`).
44+45+In the AT Protocol network, users are free to choose their account
46+provider (known as a "Personal Data Service", or PDS), and
47+login to applications that support AT accounts.
48+49+You can think of it as "one account for all of the atmosphere"!
50+51+If you already have an AT account (you may have one if you
52+signed up to Bluesky, for example), you can login with the
53+same handle on Tangled (so just use `user.bsky.social` on
54+the login page).
55+56+## Add an SSH key
57+58+Once you are logged in, you can start creating repositories
59+and pushing code. Tangled supports pushing git repositories
60+over SSH.
61+62+First, you'll need to generate an SSH key if you don't
63+already have one:
64+65+```bash
66+ssh-keygen -t ed25519 -C "foo@bar.com"
67+```
68+69+When prompted, save the key to the default location
70+(`~/.ssh/id_ed25519`) and optionally set a passphrase.
71+72+Copy your public key to your clipboard:
73+74+```bash
75+# on X11
76+cat ~/.ssh/id_ed25519.pub | xclip -sel c
77+78+# on wayland
79+cat ~/.ssh/id_ed25519.pub | wl-copy
80+81+# on macos
82+cat ~/.ssh/id_ed25519.pub | pbcopy
83+```
84+85+Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
86+paste your public key, give it a descriptive name, and hit
87+save.
88+89+## Create a repository
90+91+Once your SSH key is added, create your first repository:
92+93+1. Hit the green `+` icon on the topbar, and select
94+ repository
95+2. Enter a repository name
96+3. Add a description
97+4. Choose a knotserver to host this repository on
98+5. Hit create
99+100+Knots are self-hostable, lightweight Git servers that can
101+host your repository. Unlike traditional code forges, your
102+code can live on any server. Read the [Knots](TODO) section
103+for more.
104+105+## Configure SSH
106+107+To ensure Git uses the correct SSH key and connects smoothly
108+to Tangled, add this configuration to your `~/.ssh/config`
109+file:
110+111+```
112+Host tangled.org
113+ Hostname tangled.org
114+ User git
115+ IdentityFile ~/.ssh/id_ed25519
116+ AddressFamily inet
117+```
118+119+This tells SSH to use your specific key when connecting to
120+Tangled and prevents authentication issues if you have
121+multiple SSH keys.
122+123+Note that this configuration only works for knotservers that
124+are hosted by tangled.org. If you use a custom knot, refer
125+to the [Knots](TODO) section.
126+127+## Push your first repository
128+129+Initialize a new Git repository:
130+131+```bash
132+mkdir my-project
133+cd my-project
134+135+git init
136+echo "# My Project" > README.md
137+```
138+139+Add some content and push!
140+141+```bash
142+git add README.md
143+git commit -m "Initial commit"
144+git remote add origin git@tangled.org:user.tngl.sh/my-project
145+git push -u origin main
146+```
147+148+That's it! Your code is now hosted on Tangled.
149+150+## Migrating an existing repository
151+152+Moving your repositories from GitHub, GitLab, Bitbucket, or
153+any other Git forge to Tangled is straightforward. You'll
154+simply change your repository's remote URL. At the moment,
155+Tangled does not have any tooling to migrate data such as
156+GitHub issues or pull requests.
157+158+First, create a new repository on tangled.org as described
159+in the [Quick Start Guide](#create-a-repository).
160+161+Navigate to your existing local repository:
162+163+```bash
164+cd /path/to/your/existing/repo
165+```
166+167+You can inspect your existing Git remote like so:
168+169+```bash
170+git remote -v
171+```
172+173+You'll see something like:
174+175+```
176+origin git@github.com:username/my-project (fetch)
177+origin git@github.com:username/my-project (push)
178+```
179+180+Update the remote URL to point to tangled:
181+182+```bash
183+git remote set-url origin git@tangled.org:user.tngl.sh/my-project
184+```
185+186+Verify the change:
187+188+```bash
189+git remote -v
190+```
191+192+You should now see:
193+194+```
195+origin git@tangled.org:user.tngl.sh/my-project (fetch)
196+origin git@tangled.org:user.tngl.sh/my-project (push)
197+```
198+199+Push all your branches and tags to Tangled:
200+201+```bash
202+git push -u origin --all
203+git push -u origin --tags
204+```
205+206+Your repository is now migrated to Tangled! All commit
207+history, branches, and tags have been preserved.
208+209+## Mirroring a repository to Tangled
210+211+If you want to maintain your repository on multiple forges
212+simultaneously, for example, keeping your primary repository
213+on GitHub while mirroring to Tangled for backup or
214+redundancy, you can do so by adding multiple remotes.
215+216+You can configure your local repository to push to both
217+Tangled and, say, GitHub. You may already have the following
218+setup:
219+220+```
221+$ git remote -v
222+origin git@github.com:username/my-project (fetch)
223+origin git@github.com:username/my-project (push)
224+```
225+226+Now add Tangled as an additional push URL to the same
227+remote:
228+229+```bash
230+git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
231+```
232+233+You also need to re-add the original URL as a push
234+destination (Git replaces the push URL when you use `--add`
235+the first time):
236+237+```bash
238+git remote set-url --add --push origin git@github.com:username/my-project
239+```
240+241+Verify your configuration:
242+243+```
244+$ git remote -v
245+origin git@github.com:username/repo (fetch)
246+origin git@tangled.org:username/my-project (push)
247+origin git@github.com:username/repo (push)
248+```
249+250+Notice that there's one fetch URL (the primary remote) and
251+two push URLs. Now, whenever you push, Git will
252+automatically push to both remotes:
253+254+```bash
255+git push origin main
256+```
257+258+This single command pushes your `main` branch to both GitHub
259+and Tangled simultaneously.
260+261+To push all branches and tags:
262+263+```bash
264+git push origin --all
265+git push origin --tags
266+```
267+268+If you prefer more control over which remote you push to,
269+you can maintain separate remotes:
270+271+```bash
272+git remote add github git@github.com:username/my-project
273+git remote add tangled git@tangled.org:username/my-project
274+```
275+276+Then push to each explicitly:
277+278+```bash
279+git push github main
280+git push tangled main
281+```
282+283+# Knot self-hosting guide
284+285+So you want to run your own knot server? Great! Here are a few prerequisites:
286+287+1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
288+2. A (sub)domain name. People generally use `knot.example.com`.
289+3. A valid SSL certificate for your domain.
290+291+## NixOS
292+293+Refer to the [knot
294+module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
295+for a full list of options. Sample configurations:
296+297+- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
298+- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
299+300+## Docker
301+302+Refer to
303+[@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker).
304+Note that this is community maintained.
305+306+## Manual setup
307+308+First, clone this repository:
309+310+```
311+git clone https://tangled.org/@tangled.org/core
312+```
313+314+Then, build the `knot` CLI. This is the knot administration
315+and operation tool. For the purpose of this guide, we're
316+only concerned with these subcommands:
317+318+ * `knot server`: the main knot server process, typically
319+ run as a supervised service
320+ * `knot guard`: handles role-based access control for git
321+ over SSH (you'll never have to run this yourself)
322+ * `knot keys`: fetches SSH keys associated with your knot;
323+ we'll use this to generate the SSH
324+ `AuthorizedKeysCommand`
325+326+```
327+cd core
328+export CGO_ENABLED=1
329+go build -o knot ./cmd/knot
330+```
331+332+Next, move the `knot` binary to a location owned by `root` --
333+`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
334+335+```
336+sudo mv knot /usr/local/bin/knot
337+sudo chown root:root /usr/local/bin/knot
338+```
339+340+This is necessary because SSH `AuthorizedKeysCommand` requires [really
341+specific permissions](https://stackoverflow.com/a/27638306). The
342+`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
343+retrieve a user's public SSH keys dynamically for authentication. Let's
344+set that up.
345+346+```
347+sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
348+Match User git
349+ AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
350+ AuthorizedKeysCommandUser nobody
351+EOF
352+```
353+354+Then, reload `sshd`:
355+356+```
357+sudo systemctl reload ssh
358+```
359+360+Next, create the `git` user. We'll use the `git` user's home directory
361+to store repositories:
362+363+```
364+sudo adduser git
365+```
366+367+Create `/home/git/.knot.env` with the following, updating the values as
368+necessary. The `KNOT_SERVER_OWNER` should be set to your
369+DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
370+371+```
372+KNOT_REPO_SCAN_PATH=/home/git
373+KNOT_SERVER_HOSTNAME=knot.example.com
374+APPVIEW_ENDPOINT=https://tangled.org
375+KNOT_SERVER_OWNER=did:plc:foobar
376+KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
377+KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
378+```
379+380+If you run a Linux distribution that uses systemd, you can use the provided
381+service file to run the server. Copy
382+[`knotserver.service`](/systemd/knotserver.service)
383+to `/etc/systemd/system/`. Then, run:
384+385+```
386+systemctl enable knotserver
387+systemctl start knotserver
388+```
389+390+The last step is to configure a reverse proxy like Nginx or Caddy to front your
391+knot. Here's an example configuration for Nginx:
392+393+```
394+server {
395+ listen 80;
396+ listen [::]:80;
397+ server_name knot.example.com;
398+399+ location / {
400+ proxy_pass http://localhost:5555;
401+ proxy_set_header Host $host;
402+ proxy_set_header X-Real-IP $remote_addr;
403+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
404+ proxy_set_header X-Forwarded-Proto $scheme;
405+ }
406+407+ # wss endpoint for git events
408+ location /events {
409+ proxy_set_header X-Forwarded-For $remote_addr;
410+ proxy_set_header Host $http_host;
411+ proxy_set_header Upgrade websocket;
412+ proxy_set_header Connection Upgrade;
413+ proxy_pass http://localhost:5555;
414+ }
415+ # additional config for SSL/TLS go here.
416+}
417+418+```
419+420+Remember to use Let's Encrypt or similar to procure a certificate for your
421+knot domain.
422+423+You should now have a running knot server! You can finalize
424+your registration by hitting the `verify` button on the
425+[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
426+a record on your PDS to announce the existence of the knot.
427+428+### Custom paths
429+430+(This section applies to manual setup only. Docker users should edit the mounts
431+in `docker-compose.yml` instead.)
432+433+Right now, the database and repositories of your knot lives in `/home/git`. You
434+can move these paths if you'd like to store them in another folder. Be careful
435+when adjusting these paths:
436+437+* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
438+any possible side effects. Remember to restart it once you're done.
439+* Make backups before moving in case something goes wrong.
440+* Make sure the `git` user can read and write from the new paths.
441+442+#### Database
443+444+As an example, let's say the current database is at `/home/git/knotserver.db`,
445+and we want to move it to `/home/git/database/knotserver.db`.
446+447+Copy the current database to the new location. Make sure to copy the `.db-shm`
448+and `.db-wal` files if they exist.
449+450+```
451+mkdir /home/git/database
452+cp /home/git/knotserver.db* /home/git/database
453+```
454+455+In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
456+the new file path (_not_ the directory):
457+458+```
459+KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
460+```
461+462+#### Repositories
463+464+As an example, let's say the repositories are currently in `/home/git`, and we
465+want to move them into `/home/git/repositories`.
466+467+Create the new folder, then move the existing repositories (if there are any):
468+469+```
470+mkdir /home/git/repositories
471+# move all DIDs into the new folder; these will vary for you!
472+mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
473+```
474+475+In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
476+to the new directory:
477+478+```
479+KNOT_REPO_SCAN_PATH=/home/git/repositories
480+```
481+482+Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
483+repository path:
484+485+```
486+sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
487+Match User git
488+ AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
489+ AuthorizedKeysCommandUser nobody
490+EOF
491+```
492+493+Make sure to restart your SSH server!
494+495+#### MOTD (message of the day)
496+497+To configure the MOTD used ("Welcome to this knot!" by default), edit the
498+`/home/git/motd` file:
499+500+```
501+printf "Hi from this knot!\n" > /home/git/motd
502+```
503+504+Note that you should add a newline at the end if setting a non-empty message
505+since the knot won't do this for you.
506+507+# Spindles
508+509+## Pipelines
510+511+Spindle workflows allow you to write CI/CD pipelines in a
512+simple format. They're located in the `.tangled/workflows`
513+directory at the root of your repository, and are defined
514+using YAML.
515+516+The fields are:
517+518+- [Trigger](#trigger): A **required** field that defines
519+ when a workflow should be triggered.
520+- [Engine](#engine): A **required** field that defines which
521+ engine a workflow should run on.
522+- [Clone options](#clone-options): An **optional** field
523+ that defines how the repository should be cloned.
524+- [Dependencies](#dependencies): An **optional** field that
525+ allows you to list dependencies you may need.
526+- [Environment](#environment): An **optional** field that
527+ allows you to define environment variables.
528+- [Steps](#steps): An **optional** field that allows you to
529+ define what steps should run in the workflow.
530+531+### Trigger
532+533+The first thing to add to a workflow is the trigger, which
534+defines when a workflow runs. This is defined using a `when`
535+field, which takes in a list of conditions. Each condition
536+has the following fields:
537+538+- `event`: This is a **required** field that defines when
539+ your workflow should run. It's a list that can take one or
540+ more of the following values:
541+ - `push`: The workflow should run every time a commit is
542+ pushed to the repository.
543+ - `pull_request`: The workflow should run every time a
544+ pull request is made or updated.
545+ - `manual`: The workflow can be triggered manually.
546+- `branch`: Defines which branches the workflow should run
547+ for. If used with the `push` event, commits to the
548+ branch(es) listed here will trigger the workflow. If used
549+ with the `pull_request` event, updates to pull requests
550+ targeting the branch(es) listed here will trigger the
551+ workflow. This field has no effect with the `manual`
552+ event. Supports glob patterns using `*` and `**` (e.g.,
553+ `main`, `develop`, `release-*`). Either `branch` or `tag`
554+ (or both) must be specified for `push` events.
555+- `tag`: Defines which tags the workflow should run for.
556+ Only used with the `push` event - when tags matching the
557+ pattern(s) listed here are pushed, the workflow will
558+ trigger. This field has no effect with `pull_request` or
559+ `manual` events. Supports glob patterns using `*` and `**`
560+ (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
561+ `tag` (or both) must be specified for `push` events.
562+563+For example, if you'd like to define a workflow that runs
564+when commits are pushed to the `main` and `develop`
565+branches, or when pull requests that target the `main`
566+branch are updated, or manually, you can do so with:
567+568+```yaml
569+when:
570+ - event: ["push", "manual"]
571+ branch: ["main", "develop"]
572+ - event: ["pull_request"]
573+ branch: ["main"]
574+```
575+576+You can also trigger workflows on tag pushes. For instance,
577+to run a deployment workflow when tags matching `v*` are
578+pushed:
579+580+```yaml
581+when:
582+ - event: ["push"]
583+ tag: ["v*"]
584+```
585+586+You can even combine branch and tag patterns in a single
587+constraint (the workflow triggers if either matches):
588+589+```yaml
590+when:
591+ - event: ["push"]
592+ branch: ["main", "release-*"]
593+ tag: ["v*", "stable"]
594+```
595+596+### Engine
597+598+Next is the engine on which the workflow should run, defined
599+using the **required** `engine` field. The currently
600+supported engines are:
601+602+- `nixery`: This uses an instance of
603+ [Nixery](https://nixery.dev) to run steps, which allows
604+ you to add [dependencies](#dependencies) from
605+ Nixpkgs (https://github.com/NixOS/nixpkgs). You can
606+ search for packages on https://search.nixos.org, and
607+ there's a pretty good chance the package(s) you're looking
608+ for will be there.
609+610+Example:
611+612+```yaml
613+engine: "nixery"
614+```
615+616+### Clone options
617+618+When a workflow starts, the first step is to clone the
619+repository. You can customize this behavior using the
620+**optional** `clone` field. It has the following fields:
621+622+- `skip`: Setting this to `true` will skip cloning the
623+ repository. This can be useful if your workflow is doing
624+ something that doesn't require anything from the
625+ repository itself. This is `false` by default.
626+- `depth`: This sets the number of commits, or the "clone
627+ depth", to fetch from the repository. For example, if you
628+ set this to 2, the last 2 commits will be fetched. By
629+ default, the depth is set to 1, meaning only the most
630+ recent commit will be fetched, which is the commit that
631+ triggered the workflow.
632+- `submodules`: If you use Git submodules
633+ (https://git-scm.com/book/en/v2/Git-Tools-Submodules)
634+ in your repository, setting this field to `true` will
635+ recursively fetch all submodules. This is `false` by
636+ default.
637+638+The default settings are:
639+640+```yaml
641+clone:
642+ skip: false
643+ depth: 1
644+ submodules: false
645+```
646+647+### Dependencies
648+649+Usually when you're running a workflow, you'll need
650+additional dependencies. The `dependencies` field lets you
651+define which dependencies to get, and from where. It's a
652+key-value map, with the key being the registry to fetch
653+dependencies from, and the value being the list of
654+dependencies to fetch.
655+656+Say you want to fetch Node.js and Go from `nixpkgs`, and a
657+package called `my_pkg` you've made from your own registry
658+at your repository at
659+`https://tangled.org/@example.com/my_pkg`. You can define
660+those dependencies like so:
661+662+```yaml
663+dependencies:
664+ # nixpkgs
665+ nixpkgs:
666+ - nodejs
667+ - go
668+ # custom registry
669+ git+https://tangled.org/@example.com/my_pkg:
670+ - my_pkg
671+```
672+673+Now these dependencies are available to use in your
674+workflow!
675+676+### Environment
677+678+The `environment` field allows you define environment
679+variables that will be available throughout the entire
680+workflow. **Do not put secrets here, these environment
681+variables are visible to anyone viewing the repository. You
682+can add secrets for pipelines in your repository's
683+settings.**
684+685+Example:
686+687+```yaml
688+environment:
689+ GOOS: "linux"
690+ GOARCH: "arm64"
691+ NODE_ENV: "production"
692+ MY_ENV_VAR: "MY_ENV_VALUE"
693+```
694+695+### Steps
696+697+The `steps` field allows you to define what steps should run
698+in the workflow. It's a list of step objects, each with the
699+following fields:
700+701+- `name`: This field allows you to give your step a name.
702+ This name is visible in your workflow runs, and is used to
703+ describe what the step is doing.
704+- `command`: This field allows you to define a command to
705+ run in that step. The step is run in a Bash shell, and the
706+ logs from the command will be visible in the pipelines
707+ page on the Tangled website. The
708+ [dependencies](#dependencies) you added will be available
709+ to use here.
710+- `environment`: Similar to the global
711+ [environment](#environment) config, this **optional**
712+ field is a key-value map that allows you to set
713+ environment variables for the step. **Do not put secrets
714+ here, these environment variables are visible to anyone
715+ viewing the repository. You can add secrets for pipelines
716+ in your repository's settings.**
717+718+Example:
719+720+```yaml
721+steps:
722+ - name: "Build backend"
723+ command: "go build"
724+ environment:
725+ GOOS: "darwin"
726+ GOARCH: "arm64"
727+ - name: "Build frontend"
728+ command: "npm run build"
729+ environment:
730+ NODE_ENV: "production"
731+```
732+733+### Complete workflow
734+735+```yaml
736+# .tangled/workflows/build.yml
737+738+when:
739+ - event: ["push", "manual"]
740+ branch: ["main", "develop"]
741+ - event: ["pull_request"]
742+ branch: ["main"]
743+744+engine: "nixery"
745+746+# using the default values
747+clone:
748+ skip: false
749+ depth: 1
750+ submodules: false
751+752+dependencies:
753+ # nixpkgs
754+ nixpkgs:
755+ - nodejs
756+ - go
757+ # custom registry
758+ git+https://tangled.org/@example.com/my_pkg:
759+ - my_pkg
760+761+environment:
762+ GOOS: "linux"
763+ GOARCH: "arm64"
764+ NODE_ENV: "production"
765+ MY_ENV_VAR: "MY_ENV_VALUE"
766+767+steps:
768+ - name: "Build backend"
769+ command: "go build"
770+ environment:
771+ GOOS: "darwin"
772+ GOARCH: "arm64"
773+ - name: "Build frontend"
774+ command: "npm run build"
775+ environment:
776+ NODE_ENV: "production"
777+```
778+779+If you want another example of a workflow, you can look at
780+the one [Tangled uses to build the
781+project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml).
782+783+## Self-hosting guide
784+785+### Prerequisites
786+787+* Go
788+* Docker (the only supported backend currently)
789+790+### Configuration
791+792+Spindle is configured using environment variables. The following environment variables are available:
793+794+* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
795+* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
796+* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
797+* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
798+* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
799+* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
800+* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
801+* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
802+* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
803+804+### Running spindle
805+806+1. **Set the environment variables.** For example:
807+808+ ```shell
809+ export SPINDLE_SERVER_HOSTNAME="your-hostname"
810+ export SPINDLE_SERVER_OWNER="your-did"
811+ ```
812+813+2. **Build the Spindle binary.**
814+815+ ```shell
816+ cd core
817+ go mod download
818+ go build -o cmd/spindle/spindle cmd/spindle/main.go
819+ ```
820+821+3. **Create the log directory.**
822+823+ ```shell
824+ sudo mkdir -p /var/log/spindle
825+ sudo chown $USER:$USER -R /var/log/spindle
826+ ```
827+828+4. **Run the Spindle binary.**
829+830+ ```shell
831+ ./cmd/spindle/spindle
832+ ```
833+834+Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
835+836+## Architecture
837+838+Spindle is a small CI runner service. Here's a high-level overview of how it operates:
839+840+* Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
841+[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
842+* When a new repo record comes through (typically when you add a spindle to a
843+repo from the settings), spindle then resolves the underlying knot and
844+subscribes to repo events (see:
845+[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
846+* The spindle engine then handles execution of the pipeline, with results and
847+logs beamed on the spindle event stream over WebSocket
848+849+### The engine
850+851+At present, the only supported backend is Docker (and Podman, if Docker
852+compatibility is enabled, so that `/run/docker.sock` is created). spindle
853+executes each step in the pipeline in a fresh container, with state persisted
854+across steps within the `/tangled/workspace` directory.
855+856+The base image for the container is constructed on the fly using
857+[Nixery](https://nixery.dev), which is handy for caching layers for frequently
858+used packages.
859+860+The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines).
861+862+## Secrets with openbao
863+864+This document covers setting up spindle to use OpenBao for secrets
865+management via OpenBao Proxy instead of the default SQLite backend.
866+867+### Overview
868+869+Spindle now uses OpenBao Proxy for secrets management. The proxy handles
870+authentication automatically using AppRole credentials, while spindle
871+connects to the local proxy instead of directly to the OpenBao server.
872+873+This approach provides better security, automatic token renewal, and
874+simplified application code.
875+876+### Installation
877+878+Install OpenBao from Nixpkgs:
879+880+```bash
881+nix shell nixpkgs#openbao # for a local server
882+```
883+884+### Setup
885+886+The setup process can is documented for both local development and production.
887+888+#### Local development
889+890+Start OpenBao in dev mode:
891+892+```bash
893+bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
894+```
895+896+This starts OpenBao on `http://localhost:8201` with a root token.
897+898+Set up environment for bao CLI:
899+900+```bash
901+export BAO_ADDR=http://localhost:8200
902+export BAO_TOKEN=root
903+```
904+905+#### Production
906+907+You would typically use a systemd service with a
908+configuration file. Refer to
909+[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
910+for how this can be achieved using Nix.
911+912+Then, initialize the bao server:
913+914+```bash
915+bao operator init -key-shares=1 -key-threshold=1
916+```
917+918+This will print out an unseal key and a root key. Save them
919+somewhere (like a password manager). Then unseal the vault
920+to begin setting it up:
921+922+```bash
923+bao operator unseal <unseal_key>
924+```
925+926+All steps below remain the same across both dev and
927+production setups.
928+929+#### Configure openbao server
930+931+Create the spindle KV mount:
932+933+```bash
934+bao secrets enable -path=spindle -version=2 kv
935+```
936+937+Set up AppRole authentication and policy:
938+939+Create a policy file `spindle-policy.hcl`:
940+941+```hcl
942+# Full access to spindle KV v2 data
943+path "spindle/data/*" {
944+ capabilities = ["create", "read", "update", "delete"]
945+}
946+947+# Access to metadata for listing and management
948+path "spindle/metadata/*" {
949+ capabilities = ["list", "read", "delete", "update"]
950+}
951+952+# Allow listing at root level
953+path "spindle/" {
954+ capabilities = ["list"]
955+}
956+957+# Required for connection testing and health checks
958+path "auth/token/lookup-self" {
959+ capabilities = ["read"]
960+}
961+```
962+963+Apply the policy and create an AppRole:
964+965+```bash
966+bao policy write spindle-policy spindle-policy.hcl
967+bao auth enable approle
968+bao write auth/approle/role/spindle \
969+ token_policies="spindle-policy" \
970+ token_ttl=1h \
971+ token_max_ttl=4h \
972+ bind_secret_id=true \
973+ secret_id_ttl=0 \
974+ secret_id_num_uses=0
975+```
976+977+Get the credentials:
978+979+```bash
980+# Get role ID (static)
981+ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
982+983+# Generate secret ID
984+SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
985+986+echo "Role ID: $ROLE_ID"
987+echo "Secret ID: $SECRET_ID"
988+```
989+990+#### Create proxy configuration
991+992+Create the credential files:
993+994+```bash
995+# Create directory for OpenBao files
996+mkdir -p /tmp/openbao
997+998+# Save credentials
999+echo "$ROLE_ID" > /tmp/openbao/role-id
1000+echo "$SECRET_ID" > /tmp/openbao/secret-id
1001+chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
1002+```
1003+1004+Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
1005+1006+```hcl
1007+# OpenBao server connection
1008+vault {
1009+ address = "http://localhost:8200"
1010+}
1011+1012+# Auto-Auth using AppRole
1013+auto_auth {
1014+ method "approle" {
1015+ mount_path = "auth/approle"
1016+ config = {
1017+ role_id_file_path = "/tmp/openbao/role-id"
1018+ secret_id_file_path = "/tmp/openbao/secret-id"
1019+ }
1020+ }
1021+1022+ # Optional: write token to file for debugging
1023+ sink "file" {
1024+ config = {
1025+ path = "/tmp/openbao/token"
1026+ mode = 0640
1027+ }
1028+ }
1029+}
1030+1031+# Proxy listener for spindle
1032+listener "tcp" {
1033+ address = "127.0.0.1:8201"
1034+ tls_disable = true
1035+}
1036+1037+# Enable API proxy with auto-auth token
1038+api_proxy {
1039+ use_auto_auth_token = true
1040+}
1041+1042+# Enable response caching
1043+cache {
1044+ use_auto_auth_token = true
1045+}
1046+1047+# Logging
1048+log_level = "info"
1049+```
1050+1051+#### Start the proxy
1052+1053+Start OpenBao Proxy:
1054+1055+```bash
1056+bao proxy -config=/tmp/openbao/proxy.hcl
1057+```
1058+1059+The proxy will authenticate with OpenBao and start listening on
1060+`127.0.0.1:8201`.
1061+1062+#### Configure spindle
1063+1064+Set these environment variables for spindle:
1065+1066+```bash
1067+export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
1068+export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
1069+export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
1070+```
1071+1072+On startup, spindle will now connect to the local proxy,
1073+which handles all authentication automatically.
1074+1075+### Production setup for proxy
1076+1077+For production, you'll want to run the proxy as a service:
1078+1079+Place your production configuration in
1080+`/etc/openbao/proxy.hcl` with proper TLS settings for the
1081+vault connection.
1082+1083+### Verifying setup
1084+1085+Test the proxy directly:
1086+1087+```bash
1088+# Check proxy health
1089+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
1090+1091+# Test token lookup through proxy
1092+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
1093+```
1094+1095+Test OpenBao operations through the server:
1096+1097+```bash
1098+# List all secrets
1099+bao kv list spindle/
1100+1101+# Add a test secret via the spindle API, then check it exists
1102+bao kv list spindle/repos/
1103+1104+# Get a specific secret
1105+bao kv get spindle/repos/your_repo_path/SECRET_NAME
1106+```
1107+1108+### How it works
1109+1110+- Spindle connects to OpenBao Proxy on localhost (typically
1111+ port 8200 or 8201)
1112+- The proxy authenticates with OpenBao using AppRole
1113+ credentials
1114+- All spindle requests go through the proxy, which injects
1115+ authentication tokens
1116+- Secrets are stored at
1117+ `spindle/repos/{sanitized_repo_path}/{secret_key}`
1118+- Repository paths like `did:plc:alice/myrepo` become
1119+ `did_plc_alice_myrepo`
1120+- The proxy handles all token renewal automatically
1121+- Spindle no longer manages tokens or authentication
1122+ directly
1123+1124+### Troubleshooting
1125+1126+**Connection refused**: Check that the OpenBao Proxy is
1127+running and listening on the configured address.
1128+1129+**403 errors**: Verify the AppRole credentials are correct
1130+and the policy has the necessary permissions.
1131+1132+**404 route errors**: The spindle KV mount probably doesn't
1133+existโrun the mount creation step again.
1134+1135+**Proxy authentication failures**: Check the proxy logs and
1136+verify the role-id and secret-id files are readable and
1137+contain valid credentials.
1138+1139+**Secret not found after writing**: This can indicate policy
1140+permission issues. Verify the policy includes both
1141+`spindle/data/*` and `spindle/metadata/*` paths with
1142+appropriate capabilities.
1143+1144+Check proxy logs:
1145+1146+```bash
1147+# If running as systemd service
1148+journalctl -u openbao-proxy -f
1149+1150+# If running directly, check the console output
1151+```
1152+1153+Test AppRole authentication manually:
1154+1155+```bash
1156+bao write auth/approle/login \
1157+ role_id="$(cat /tmp/openbao/role-id)" \
1158+ secret_id="$(cat /tmp/openbao/secret-id)"
1159+```
1160+1161+# Migrating knots and spindles
1162+1163+Sometimes, non-backwards compatible changes are made to the
1164+knot/spindle XRPC APIs. If you host a knot or a spindle, you
1165+will need to follow this guide to upgrade. Typically, this
1166+only requires you to deploy the newest version.
1167+1168+This document is laid out in reverse-chronological order.
1169+Newer migration guides are listed first, and older guides
1170+are further down the page.
1171+1172+## Upgrading from v1.8.x
1173+1174+After v1.8.2, the HTTP API for knots and spindles has been
1175+deprecated and replaced with XRPC. Repositories on outdated
1176+knots will not be viewable from the appview. Upgrading is
1177+straightforward however.
1178+1179+For knots:
1180+1181+- Upgrade to the latest tag (v1.9.0 or above)
1182+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1183+ hit the "retry" button to verify your knot
1184+1185+For spindles:
1186+1187+- Upgrade to the latest tag (v1.9.0 or above)
1188+- Head to the [spindle
1189+ dashboard](https://tangled.org/settings/spindles) and hit the
1190+ "retry" button to verify your spindle
1191+1192+## Upgrading from v1.7.x
1193+1194+After v1.7.0, knot secrets have been deprecated. You no
1195+longer need a secret from the appview to run a knot. All
1196+authorized commands to knots are managed via [Inter-Service
1197+Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
1198+Knots will be read-only until upgraded.
1199+1200+Upgrading is quite easy, in essence:
1201+1202+- `KNOT_SERVER_SECRET` is no more, you can remove this
1203+ environment variable entirely
1204+- `KNOT_SERVER_OWNER` is now required on boot, set this to
1205+ your DID. You can find your DID in the
1206+ [settings](https://tangled.org/settings) page.
1207+- Restart your knot once you have replaced the environment
1208+ variable
1209+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1210+ hit the "retry" button to verify your knot. This simply
1211+ writes a `sh.tangled.knot` record to your PDS.
1212+1213+If you use the nix module, simply bump the flake to the
1214+latest revision, and change your config block like so:
1215+1216+```diff
1217+ services.tangled.knot = {
1218+ enable = true;
1219+ server = {
1220+- secretFile = /path/to/secret;
1221++ owner = "did:plc:foo";
1222+ };
1223+ };
1224+```
1225+1226+# Hacking on Tangled
1227+1228+We highly recommend [installing
1229+Nix](https://nixos.org/download/) (the package manager)
1230+before working on the codebase. The Nix flake provides a lot
1231+of helpers to get started and most importantly, builds and
1232+dev shells are entirely deterministic.
1233+1234+To set up your dev environment:
1235+1236+```bash
1237+nix develop
1238+```
1239+1240+Non-Nix users can look at the `devShell` attribute in the
1241+`flake.nix` file to determine necessary dependencies.
1242+1243+## Running the appview
1244+1245+The Nix flake also exposes a few `app` attributes (run `nix
1246+flake show` to see a full list of what the flake provides),
1247+one of the apps runs the appview with the `air`
1248+live-reloader:
1249+1250+```bash
1251+TANGLED_DEV=true nix run .#watch-appview
1252+1253+# TANGLED_DB_PATH might be of interest to point to
1254+# different sqlite DBs
1255+1256+# in a separate shell, you can live-reload tailwind
1257+nix run .#watch-tailwind
1258+```
1259+1260+To authenticate with the appview, you will need Redis and
1261+OAuth JWKs to be set up:
1262+1263+```
1264+# OAuth JWKs should already be set up by the Nix devshell:
1265+echo $TANGLED_OAUTH_CLIENT_SECRET
1266+z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
1267+1268+echo $TANGLED_OAUTH_CLIENT_KID
1269+1761667908
1270+1271+# if not, you can set it up yourself:
1272+goat key generate -t P-256
1273+Key Type: P-256 / secp256r1 / ES256 private key
1274+Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
1275+ z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
1276+Public Key (DID Key Syntax): share or publish this (eg, in DID document)
1277+ did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
1278+1279+# the secret key from above
1280+export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
1281+1282+# Run Redis in a new shell to store OAuth sessions
1283+redis-server
1284+```
1285+1286+## Running knots and spindles
1287+1288+An end-to-end knot setup requires setting up a machine with
1289+`sshd`, `AuthorizedKeysCommand`, and a Git user, which is
1290+quite cumbersome. So the Nix flake provides a
1291+`nixosConfiguration` to do so.
1292+1293+<details>
1294+ <summary><strong>macOS users will have to set up a Nix Builder first</strong></summary>
1295+1296+ In order to build Tangled's dev VM on macOS, you will
1297+ first need to set up a Linux Nix builder. The recommended
1298+ way to do so is to run a [`darwin.linux-builder`
1299+ VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
1300+ and to register it in `nix.conf` as a builder for Linux
1301+ with the same architecture as your Mac (`linux-aarch64` if
1302+ you are using Apple Silicon).
1303+1304+ > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
1305+ > the Tangled repo so that it doesn't conflict with the other VM. For example,
1306+ > you can do
1307+ >
1308+ > ```shell
1309+ > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
1310+ > ```
1311+ >
1312+ > to store the builder VM in a temporary dir.
1313+ >
1314+ > You should read and follow [all the other intructions][darwin builder vm] to
1315+ > avoid subtle problems.
1316+1317+ Alternatively, you can use any other method to set up a
1318+ Linux machine with Nix installed that you can `sudo ssh`
1319+ into (in other words, root user on your Mac has to be able
1320+ to ssh into the Linux machine without entering a password)
1321+ and that has the same architecture as your Mac. See
1322+ [remote builder
1323+ instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
1324+ for how to register such a builder in `nix.conf`.
1325+1326+ > WARNING: If you'd like to use
1327+ > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
1328+ > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
1329+ > ssh` works can be tricky. It seems to be [possible with
1330+ > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
1331+1332+</details>
1333+1334+To begin, grab your DID from http://localhost:3000/settings.
1335+Then, set `TANGLED_VM_KNOT_OWNER` and
1336+`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
1337+lightweight NixOS VM like so:
1338+1339+```bash
1340+nix run --impure .#vm
1341+1342+# type `poweroff` at the shell to exit the VM
1343+```
1344+1345+This starts a knot on port 6444, a spindle on port 6555
1346+with `ssh` exposed on port 2222.
1347+1348+Once the services are running, head to
1349+http://localhost:3000/settings/knots and hit "Verify". It should
1350+verify the ownership of the services instantly if everything
1351+went smoothly.
1352+1353+You can push repositories to this VM with this ssh config
1354+block on your main machine:
1355+1356+```bash
1357+Host nixos-shell
1358+ Hostname localhost
1359+ Port 2222
1360+ User git
1361+ IdentityFile ~/.ssh/my_tangled_key
1362+```
1363+1364+Set up a remote called `local-dev` on a git repo:
1365+1366+```bash
1367+git remote add local-dev git@nixos-shell:user/repo
1368+git push local-dev main
1369+```
1370+1371+The above VM should already be running a spindle on
1372+`localhost:6555`. Head to http://localhost:3000/settings/spindles and
1373+hit "Verify". You can then configure each repository to use
1374+this spindle and run CI jobs.
1375+1376+Of interest when debugging spindles:
1377+1378+```
1379+# Service logs from journald:
1380+journalctl -xeu spindle
1381+1382+# CI job logs from disk:
1383+ls /var/log/spindle
1384+1385+# Debugging spindle database:
1386+sqlite3 /var/lib/spindle/spindle.db
1387+1388+# litecli has a nicer REPL interface:
1389+litecli /var/lib/spindle/spindle.db
1390+```
1391+1392+If for any reason you wish to disable either one of the
1393+services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
1394+`services.tangled.spindle.enable` (or
1395+`services.tangled.knot.enable`) to `false`.
1396+1397+# Contribution guide
1398+1399+## Commit guidelines
1400+1401+We follow a commit style similar to the Go project. Please keep commits:
1402+1403+* **atomic**: each commit should represent one logical change
1404+* **descriptive**: the commit message should clearly describe what the
1405+change does and why it's needed
1406+1407+### Message format
1408+1409+```
1410+<service/top-level directory>/<affected package/directory>: <short summary of change>
1411+1412+Optional longer description can go here, if necessary. Explain what the
1413+change does and why, especially if not obvious. Reference relevant
1414+issues or PRs when applicable. These can be links for now since we don't
1415+auto-link issues/PRs yet.
1416+```
1417+1418+Here are some examples:
1419+1420+```
1421+appview/state: fix token expiry check in middleware
1422+1423+The previous check did not account for clock drift, leading to premature
1424+token invalidation.
1425+```
1426+1427+```
1428+knotserver/git/service: improve error checking in upload-pack
1429+```
1430+1431+1432+### General notes
1433+1434+- PRs get merged "as-is" (fast-forward)โlike applying a patch-series
1435+using `git am`. At present, there is no squashingโso please author
1436+your commits as they would appear on `master`, following the above
1437+guidelines.
1438+- If there is a lot of nesting, for example "appview:
1439+pages/templates/repo/fragments: ...", these can be truncated down to
1440+just "appview: repo/fragments: ...". If the change affects a lot of
1441+subdirectories, you may abbreviate to just the top-level names, e.g.
1442+"appview: ..." or "knotserver: ...".
1443+- Keep commits lowercased with no trailing period.
1444+- Use the imperative mood in the summary line (e.g., "fix bug" not
1445+"fixed bug" or "fixes bug").
1446+- Try to keep the summary line under 72 characters, but we aren't too
1447+fussed about this.
1448+- Follow the same formatting for PR titles if filled manually.
1449+- Don't include unrelated changes in the same commit.
1450+- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
1451+before submitting if necessary.
1452+1453+## Code formatting
1454+1455+We use a variety of tools to format our code, and multiplex them with
1456+[`treefmt`](https://treefmt.com). All you need to do to format your changes
1457+is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
1458+1459+## Proposals for bigger changes
1460+1461+Small fixes like typos, minor bugs, or trivial refactors can be
1462+submitted directly as PRs.
1463+1464+For larger changesโespecially those introducing new features, significant
1465+refactoring, or altering system behaviorโplease open a proposal first. This
1466+helps us evaluate the scope, design, and potential impact before implementation.
1467+1468+Create a new issue titled:
1469+1470+```
1471+proposal: <affected scope>: <summary of change>
1472+```
1473+1474+In the description, explain:
1475+1476+- What the change is
1477+- Why it's needed
1478+- How you plan to implement it (roughly)
1479+- Any open questions or tradeoffs
1480+1481+We'll use the issue thread to discuss and refine the idea before moving
1482+forward.
1483+1484+## Developer Certificate of Origin (DCO)
1485+1486+We require all contributors to certify that they have the right to
1487+submit the code they're contributing. To do this, we follow the
1488+[Developer Certificate of Origin
1489+(DCO)](https://developercertificate.org/).
1490+1491+By signing your commits, you're stating that the contribution is your
1492+own work, or that you have the right to submit it under the project's
1493+license. This helps us keep things clean and legally sound.
1494+1495+To sign your commit, just add the `-s` flag when committing:
1496+1497+```sh
1498+git commit -s -m "your commit message"
1499+```
1500+1501+This appends a line like:
1502+1503+```
1504+Signed-off-by: Your Name <your.email@example.com>
1505+```
1506+1507+We won't merge commits if they aren't signed off. If you forget, you can
1508+amend the last commit like this:
1509+1510+```sh
1511+git commit --amend -s
1512+```
1513+1514+If you're submitting a PR with multiple commits, make sure each one is
1515+signed.
1516+1517+For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
1518+to make it sign off commits in the tangled repo:
1519+1520+```shell
1521+# Safety check, should say "No matching config key..."
1522+jj config list templates.commit_trailers
1523+# The command below may need to be adjusted if the command above returned something.
1524+jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
1525+```
1526+1527+Refer to the [jujutsu
1528+documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
1529+for more information.
-136
docs/contributing.md
···1-# tangled contributing guide
2-3-## commit guidelines
4-5-We follow a commit style similar to the Go project. Please keep commits:
6-7-* **atomic**: each commit should represent one logical change
8-* **descriptive**: the commit message should clearly describe what the
9-change does and why it's needed
10-11-### message format
12-13-```
14-<service/top-level directory>/<affected package/directory>: <short summary of change>
15-16-17-Optional longer description can go here, if necessary. Explain what the
18-change does and why, especially if not obvious. Reference relevant
19-issues or PRs when applicable. These can be links for now since we don't
20-auto-link issues/PRs yet.
21-```
22-23-Here are some examples:
24-25-```
26-appview/state: fix token expiry check in middleware
27-28-The previous check did not account for clock drift, leading to premature
29-token invalidation.
30-```
31-32-```
33-knotserver/git/service: improve error checking in upload-pack
34-```
35-36-37-### general notes
38-39-- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
40-using `git am`. At present, there is no squashing -- so please author
41-your commits as they would appear on `master`, following the above
42-guidelines.
43-- If there is a lot of nesting, for example "appview:
44-pages/templates/repo/fragments: ...", these can be truncated down to
45-just "appview: repo/fragments: ...". If the change affects a lot of
46-subdirectories, you may abbreviate to just the top-level names, e.g.
47-"appview: ..." or "knotserver: ...".
48-- Keep commits lowercased with no trailing period.
49-- Use the imperative mood in the summary line (e.g., "fix bug" not
50-"fixed bug" or "fixes bug").
51-- Try to keep the summary line under 72 characters, but we aren't too
52-fussed about this.
53-- Follow the same formatting for PR titles if filled manually.
54-- Don't include unrelated changes in the same commit.
55-- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
56-before submitting if necessary.
57-58-## code formatting
59-60-We use a variety of tools to format our code, and multiplex them with
61-[`treefmt`](https://treefmt.com): all you need to do to format your changes
62-is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
63-64-## proposals for bigger changes
65-66-Small fixes like typos, minor bugs, or trivial refactors can be
67-submitted directly as PRs.
68-69-For larger changesโespecially those introducing new features, significant
70-refactoring, or altering system behaviorโplease open a proposal first. This
71-helps us evaluate the scope, design, and potential impact before implementation.
72-73-### proposal format
74-75-Create a new issue titled:
76-77-```
78-proposal: <affected scope>: <summary of change>
79-```
80-81-In the description, explain:
82-83-- What the change is
84-- Why it's needed
85-- How you plan to implement it (roughly)
86-- Any open questions or tradeoffs
87-88-We'll use the issue thread to discuss and refine the idea before moving
89-forward.
90-91-## developer certificate of origin (DCO)
92-93-We require all contributors to certify that they have the right to
94-submit the code they're contributing. To do this, we follow the
95-[Developer Certificate of Origin
96-(DCO)](https://developercertificate.org/).
97-98-By signing your commits, you're stating that the contribution is your
99-own work, or that you have the right to submit it under the project's
100-license. This helps us keep things clean and legally sound.
101-102-To sign your commit, just add the `-s` flag when committing:
103-104-```sh
105-git commit -s -m "your commit message"
106-```
107-108-This appends a line like:
109-110-```
111-Signed-off-by: Your Name <your.email@example.com>
112-```
113-114-We won't merge commits if they aren't signed off. If you forget, you can
115-amend the last commit like this:
116-117-```sh
118-git commit --amend -s
119-```
120-121-If you're submitting a PR with multiple commits, make sure each one is
122-signed.
123-124-For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125-to make it sign off commits in the tangled repo:
126-127-```shell
128-# Safety check, should say "No matching config key..."
129-jj config list templates.commit_trailers
130-# The command below may need to be adjusted if the command above returned something.
131-jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
132-```
133-134-Refer to the [jj
135-documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
136-for more information.
···1-# hacking on tangled
2-3-We highly recommend [installing
4-nix](https://nixos.org/download/) (the package manager)
5-before working on the codebase. The nix flake provides a lot
6-of helpers to get started and most importantly, builds and
7-dev shells are entirely deterministic.
8-9-To set up your dev environment:
10-11-```bash
12-nix develop
13-```
14-15-Non-nix users can look at the `devShell` attribute in the
16-`flake.nix` file to determine necessary dependencies.
17-18-## running the appview
19-20-The nix flake also exposes a few `app` attributes (run `nix
21-flake show` to see a full list of what the flake provides),
22-one of the apps runs the appview with the `air`
23-live-reloader:
24-25-```bash
26-TANGLED_DEV=true nix run .#watch-appview
27-28-# TANGLED_DB_PATH might be of interest to point to
29-# different sqlite DBs
30-31-# in a separate shell, you can live-reload tailwind
32-nix run .#watch-tailwind
33-```
34-35-To authenticate with the appview, you will need redis and
36-OAUTH JWKs to be setup:
37-38-```
39-# oauth jwks should already be setup by the nix devshell:
40-echo $TANGLED_OAUTH_CLIENT_SECRET
41-z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
42-43-echo $TANGLED_OAUTH_CLIENT_KID
44-1761667908
45-46-# if not, you can set it up yourself:
47-goat key generate -t P-256
48-Key Type: P-256 / secp256r1 / ES256 private key
49-Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
50- z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
51-Public Key (DID Key Syntax): share or publish this (eg, in DID document)
52- did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
53-54-# the secret key from above
55-export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
56-57-# run redis in at a new shell to store oauth sessions
58-redis-server
59-```
60-61-## running knots and spindles
62-63-An end-to-end knot setup requires setting up a machine with
64-`sshd`, `AuthorizedKeysCommand`, and git user, which is
65-quite cumbersome. So the nix flake provides a
66-`nixosConfiguration` to do so.
67-68-<details>
69- <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
70-71- In order to build Tangled's dev VM on macOS, you will
72- first need to set up a Linux Nix builder. The recommended
73- way to do so is to run a [`darwin.linux-builder`
74- VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
75- and to register it in `nix.conf` as a builder for Linux
76- with the same architecture as your Mac (`linux-aarch64` if
77- you are using Apple Silicon).
78-79- > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
80- > the tangled repo so that it doesn't conflict with the other VM. For example,
81- > you can do
82- >
83- > ```shell
84- > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
85- > ```
86- >
87- > to store the builder VM in a temporary dir.
88- >
89- > You should read and follow [all the other intructions][darwin builder vm] to
90- > avoid subtle problems.
91-92- Alternatively, you can use any other method to set up a
93- Linux machine with `nix` installed that you can `sudo ssh`
94- into (in other words, root user on your Mac has to be able
95- to ssh into the Linux machine without entering a password)
96- and that has the same architecture as your Mac. See
97- [remote builder
98- instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
99- for how to register such a builder in `nix.conf`.
100-101- > WARNING: If you'd like to use
102- > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
103- > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
104- > ssh` works can be tricky. It seems to be [possible with
105- > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
106-107-</details>
108-109-To begin, grab your DID from http://localhost:3000/settings.
110-Then, set `TANGLED_VM_KNOT_OWNER` and
111-`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
112-lightweight NixOS VM like so:
113-114-```bash
115-nix run --impure .#vm
116-117-# type `poweroff` at the shell to exit the VM
118-```
119-120-This starts a knot on port 6444, a spindle on port 6555
121-with `ssh` exposed on port 2222.
122-123-Once the services are running, head to
124-http://localhost:3000/settings/knots and hit verify. It should
125-verify the ownership of the services instantly if everything
126-went smoothly.
127-128-You can push repositories to this VM with this ssh config
129-block on your main machine:
130-131-```bash
132-Host nixos-shell
133- Hostname localhost
134- Port 2222
135- User git
136- IdentityFile ~/.ssh/my_tangled_key
137-```
138-139-Set up a remote called `local-dev` on a git repo:
140-141-```bash
142-git remote add local-dev git@nixos-shell:user/repo
143-git push local-dev main
144-```
145-146-### running a spindle
147-148-The above VM should already be running a spindle on
149-`localhost:6555`. Head to http://localhost:3000/settings/spindles and
150-hit verify. You can then configure each repository to use
151-this spindle and run CI jobs.
152-153-Of interest when debugging spindles:
154-155-```
156-# service logs from journald:
157-journalctl -xeu spindle
158-159-# CI job logs from disk:
160-ls /var/log/spindle
161-162-# debugging spindle db:
163-sqlite3 /var/lib/spindle/spindle.db
164-165-# litecli has a nicer REPL interface:
166-litecli /var/lib/spindle/spindle.db
167-```
168-169-If for any reason you wish to disable either one of the
170-services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171-`services.tangled.spindle.enable` (or
172-`services.tangled.knot.enable`) to `false`.
···1-# knot self-hosting guide
2-3-So you want to run your own knot server? Great! Here are a few prerequisites:
4-5-1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
6-2. A (sub)domain name. People generally use `knot.example.com`.
7-3. A valid SSL certificate for your domain.
8-9-There's a couple of ways to get started:
10-* NixOS: refer to
11-[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
12-* Docker: Documented at
13-[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
14-(community maintained: support is not guaranteed!)
15-* Manual: Documented below.
16-17-## manual setup
18-19-First, clone this repository:
20-21-```
22-git clone https://tangled.org/@tangled.org/core
23-```
24-25-Then, build the `knot` CLI. This is the knot administration and operation tool.
26-For the purpose of this guide, we're only concerned with these subcommands:
27-28-* `knot server`: the main knot server process, typically run as a
29-supervised service
30-* `knot guard`: handles role-based access control for git over SSH
31-(you'll never have to run this yourself)
32-* `knot keys`: fetches SSH keys associated with your knot; we'll use
33-this to generate the SSH `AuthorizedKeysCommand`
34-35-```
36-cd core
37-export CGO_ENABLED=1
38-go build -o knot ./cmd/knot
39-```
40-41-Next, move the `knot` binary to a location owned by `root` --
42-`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
43-44-```
45-sudo mv knot /usr/local/bin/knot
46-sudo chown root:root /usr/local/bin/knot
47-```
48-49-This is necessary because SSH `AuthorizedKeysCommand` requires [really
50-specific permissions](https://stackoverflow.com/a/27638306). The
51-`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
52-retrieve a user's public SSH keys dynamically for authentication. Let's
53-set that up.
54-55-```
56-sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
57-Match User git
58- AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
59- AuthorizedKeysCommandUser nobody
60-EOF
61-```
62-63-Then, reload `sshd`:
64-65-```
66-sudo systemctl reload ssh
67-```
68-69-Next, create the `git` user. We'll use the `git` user's home directory
70-to store repositories:
71-72-```
73-sudo adduser git
74-```
75-76-Create `/home/git/.knot.env` with the following, updating the values as
77-necessary. The `KNOT_SERVER_OWNER` should be set to your
78-DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
79-80-```
81-KNOT_REPO_SCAN_PATH=/home/git
82-KNOT_SERVER_HOSTNAME=knot.example.com
83-APPVIEW_ENDPOINT=https://tangled.sh
84-KNOT_SERVER_OWNER=did:plc:foobar
85-KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
86-KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
87-```
88-89-If you run a Linux distribution that uses systemd, you can use the provided
90-service file to run the server. Copy
91-[`knotserver.service`](/systemd/knotserver.service)
92-to `/etc/systemd/system/`. Then, run:
93-94-```
95-systemctl enable knotserver
96-systemctl start knotserver
97-```
98-99-The last step is to configure a reverse proxy like Nginx or Caddy to front your
100-knot. Here's an example configuration for Nginx:
101-102-```
103-server {
104- listen 80;
105- listen [::]:80;
106- server_name knot.example.com;
107-108- location / {
109- proxy_pass http://localhost:5555;
110- proxy_set_header Host $host;
111- proxy_set_header X-Real-IP $remote_addr;
112- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
113- proxy_set_header X-Forwarded-Proto $scheme;
114- }
115-116- # wss endpoint for git events
117- location /events {
118- proxy_set_header X-Forwarded-For $remote_addr;
119- proxy_set_header Host $http_host;
120- proxy_set_header Upgrade websocket;
121- proxy_set_header Connection Upgrade;
122- proxy_pass http://localhost:5555;
123- }
124- # additional config for SSL/TLS go here.
125-}
126-127-```
128-129-Remember to use Let's Encrypt or similar to procure a certificate for your
130-knot domain.
131-132-You should now have a running knot server! You can finalize
133-your registration by hitting the `verify` button on the
134-[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
135-a record on your PDS to announce the existence of the knot.
136-137-### custom paths
138-139-(This section applies to manual setup only. Docker users should edit the mounts
140-in `docker-compose.yml` instead.)
141-142-Right now, the database and repositories of your knot lives in `/home/git`. You
143-can move these paths if you'd like to store them in another folder. Be careful
144-when adjusting these paths:
145-146-* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
147-any possible side effects. Remember to restart it once you're done.
148-* Make backups before moving in case something goes wrong.
149-* Make sure the `git` user can read and write from the new paths.
150-151-#### database
152-153-As an example, let's say the current database is at `/home/git/knotserver.db`,
154-and we want to move it to `/home/git/database/knotserver.db`.
155-156-Copy the current database to the new location. Make sure to copy the `.db-shm`
157-and `.db-wal` files if they exist.
158-159-```
160-mkdir /home/git/database
161-cp /home/git/knotserver.db* /home/git/database
162-```
163-164-In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
165-the new file path (_not_ the directory):
166-167-```
168-KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
169-```
170-171-#### repositories
172-173-As an example, let's say the repositories are currently in `/home/git`, and we
174-want to move them into `/home/git/repositories`.
175-176-Create the new folder, then move the existing repositories (if there are any):
177-178-```
179-mkdir /home/git/repositories
180-# move all DIDs into the new folder; these will vary for you!
181-mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
182-```
183-184-In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
185-to the new directory:
186-187-```
188-KNOT_REPO_SCAN_PATH=/home/git/repositories
189-```
190-191-Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
192-repository path:
193-194-```
195-sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
196-Match User git
197- AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
198- AuthorizedKeysCommandUser nobody
199-EOF
200-```
201-202-Make sure to restart your SSH server!
203-204-#### MOTD (message of the day)
205-206-To configure the MOTD used ("Welcome to this knot!" by default), edit the
207-`/home/git/motd` file:
208-209-```
210-printf "Hi from this knot!\n" > /home/git/motd
211-```
212-213-Note that you should add a newline at the end if setting a non-empty message
214-since the knot won't do this for you.
···1-# Migrations
2-3-This document is laid out in reverse-chronological order.
4-Newer migration guides are listed first, and older guides
5-are further down the page.
6-7-## Upgrading from v1.8.x
8-9-After v1.8.2, the HTTP API for knot and spindles have been
10-deprecated and replaced with XRPC. Repositories on outdated
11-knots will not be viewable from the appview. Upgrading is
12-straightforward however.
13-14-For knots:
15-16-- Upgrade to latest tag (v1.9.0 or above)
17-- Head to the [knot dashboard](https://tangled.org/settings/knots) and
18- hit the "retry" button to verify your knot
19-20-For spindles:
21-22-- Upgrade to latest tag (v1.9.0 or above)
23-- Head to the [spindle
24- dashboard](https://tangled.org/settings/spindles) and hit the
25- "retry" button to verify your spindle
26-27-## Upgrading from v1.7.x
28-29-After v1.7.0, knot secrets have been deprecated. You no
30-longer need a secret from the appview to run a knot. All
31-authorized commands to knots are managed via [Inter-Service
32-Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
33-Knots will be read-only until upgraded.
34-35-Upgrading is quite easy, in essence:
36-37-- `KNOT_SERVER_SECRET` is no more, you can remove this
38- environment variable entirely
39-- `KNOT_SERVER_OWNER` is now required on boot, set this to
40- your DID. You can find your DID in the
41- [settings](https://tangled.org/settings) page.
42-- Restart your knot once you have replaced the environment
43- variable
44-- Head to the [knot dashboard](https://tangled.org/settings/knots) and
45- hit the "retry" button to verify your knot. This simply
46- writes a `sh.tangled.knot` record to your PDS.
47-48-If you use the nix module, simply bump the flake to the
49-latest revision, and change your config block like so:
50-51-```diff
52- services.tangled.knot = {
53- enable = true;
54- server = {
55-- secretFile = /path/to/secret;
56-+ owner = "did:plc:foo";
57- };
58- };
59-```
···1-# spindle architecture
2-3-Spindle is a small CI runner service. Here's a high level overview of how it operates:
4-5-* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
6-[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
7-* when a new repo record comes through (typically when you add a spindle to a
8-repo from the settings), spindle then resolves the underlying knot and
9-subscribes to repo events (see:
10-[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
11-* the spindle engine then handles execution of the pipeline, with results and
12-logs beamed on the spindle event stream over wss
13-14-### the engine
15-16-At present, the only supported backend is Docker (and Podman, if Docker
17-compatibility is enabled, so that `/run/docker.sock` is created). Spindle
18-executes each step in the pipeline in a fresh container, with state persisted
19-across steps within the `/tangled/workspace` directory.
20-21-The base image for the container is constructed on the fly using
22-[Nixery](https://nixery.dev), which is handy for caching layers for frequently
23-used packages.
24-25-The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
···0000000000000000000000000
-52
docs/spindle/hosting.md
···1-# spindle self-hosting guide
2-3-## prerequisites
4-5-* Go
6-* Docker (the only supported backend currently)
7-8-## configuration
9-10-Spindle is configured using environment variables. The following environment variables are available:
11-12-* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
13-* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
14-* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
15-* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
16-* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
17-* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
18-* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
19-* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
20-* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
21-22-## running spindle
23-24-1. **Set the environment variables.** For example:
25-26- ```shell
27- export SPINDLE_SERVER_HOSTNAME="your-hostname"
28- export SPINDLE_SERVER_OWNER="your-did"
29- ```
30-31-2. **Build the Spindle binary.**
32-33- ```shell
34- cd core
35- go mod download
36- go build -o cmd/spindle/spindle cmd/spindle/main.go
37- ```
38-39-3. **Create the log directory.**
40-41- ```shell
42- sudo mkdir -p /var/log/spindle
43- sudo chown $USER:$USER -R /var/log/spindle
44- ```
45-46-4. **Run the Spindle binary.**
47-48- ```shell
49- ./cmd/spindle/spindle
50- ```
51-52-Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
···1-# spindle secrets with openbao
2-3-This document covers setting up Spindle to use OpenBao for secrets
4-management via OpenBao Proxy instead of the default SQLite backend.
5-6-## overview
7-8-Spindle now uses OpenBao Proxy for secrets management. The proxy handles
9-authentication automatically using AppRole credentials, while Spindle
10-connects to the local proxy instead of directly to the OpenBao server.
11-12-This approach provides better security, automatic token renewal, and
13-simplified application code.
14-15-## installation
16-17-Install OpenBao from nixpkgs:
18-19-```bash
20-nix shell nixpkgs#openbao # for a local server
21-```
22-23-## setup
24-25-The setup process can is documented for both local development and production.
26-27-### local development
28-29-Start OpenBao in dev mode:
30-31-```bash
32-bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
33-```
34-35-This starts OpenBao on `http://localhost:8201` with a root token.
36-37-Set up environment for bao CLI:
38-39-```bash
40-export BAO_ADDR=http://localhost:8200
41-export BAO_TOKEN=root
42-```
43-44-### production
45-46-You would typically use a systemd service with a configuration file. Refer to
47-[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
48-achieved using Nix.
49-50-Then, initialize the bao server:
51-```bash
52-bao operator init -key-shares=1 -key-threshold=1
53-```
54-55-This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
56-```bash
57-bao operator unseal <unseal_key>
58-```
59-60-All steps below remain the same across both dev and production setups.
61-62-### configure openbao server
63-64-Create the spindle KV mount:
65-66-```bash
67-bao secrets enable -path=spindle -version=2 kv
68-```
69-70-Set up AppRole authentication and policy:
71-72-Create a policy file `spindle-policy.hcl`:
73-74-```hcl
75-# Full access to spindle KV v2 data
76-path "spindle/data/*" {
77- capabilities = ["create", "read", "update", "delete"]
78-}
79-80-# Access to metadata for listing and management
81-path "spindle/metadata/*" {
82- capabilities = ["list", "read", "delete", "update"]
83-}
84-85-# Allow listing at root level
86-path "spindle/" {
87- capabilities = ["list"]
88-}
89-90-# Required for connection testing and health checks
91-path "auth/token/lookup-self" {
92- capabilities = ["read"]
93-}
94-```
95-96-Apply the policy and create an AppRole:
97-98-```bash
99-bao policy write spindle-policy spindle-policy.hcl
100-bao auth enable approle
101-bao write auth/approle/role/spindle \
102- token_policies="spindle-policy" \
103- token_ttl=1h \
104- token_max_ttl=4h \
105- bind_secret_id=true \
106- secret_id_ttl=0 \
107- secret_id_num_uses=0
108-```
109-110-Get the credentials:
111-112-```bash
113-# Get role ID (static)
114-ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115-116-# Generate secret ID
117-SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118-119-echo "Role ID: $ROLE_ID"
120-echo "Secret ID: $SECRET_ID"
121-```
122-123-### create proxy configuration
124-125-Create the credential files:
126-127-```bash
128-# Create directory for OpenBao files
129-mkdir -p /tmp/openbao
130-131-# Save credentials
132-echo "$ROLE_ID" > /tmp/openbao/role-id
133-echo "$SECRET_ID" > /tmp/openbao/secret-id
134-chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135-```
136-137-Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138-139-```hcl
140-# OpenBao server connection
141-vault {
142- address = "http://localhost:8200"
143-}
144-145-# Auto-Auth using AppRole
146-auto_auth {
147- method "approle" {
148- mount_path = "auth/approle"
149- config = {
150- role_id_file_path = "/tmp/openbao/role-id"
151- secret_id_file_path = "/tmp/openbao/secret-id"
152- }
153- }
154-155- # Optional: write token to file for debugging
156- sink "file" {
157- config = {
158- path = "/tmp/openbao/token"
159- mode = 0640
160- }
161- }
162-}
163-164-# Proxy listener for Spindle
165-listener "tcp" {
166- address = "127.0.0.1:8201"
167- tls_disable = true
168-}
169-170-# Enable API proxy with auto-auth token
171-api_proxy {
172- use_auto_auth_token = true
173-}
174-175-# Enable response caching
176-cache {
177- use_auto_auth_token = true
178-}
179-180-# Logging
181-log_level = "info"
182-```
183-184-### start the proxy
185-186-Start OpenBao Proxy:
187-188-```bash
189-bao proxy -config=/tmp/openbao/proxy.hcl
190-```
191-192-The proxy will authenticate with OpenBao and start listening on
193-`127.0.0.1:8201`.
194-195-### configure spindle
196-197-Set these environment variables for Spindle:
198-199-```bash
200-export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201-export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
202-export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203-```
204-205-Start Spindle:
206-207-Spindle will now connect to the local proxy, which handles all
208-authentication automatically.
209-210-## production setup for proxy
211-212-For production, you'll want to run the proxy as a service:
213-214-Place your production configuration in `/etc/openbao/proxy.hcl` with
215-proper TLS settings for the vault connection.
216-217-## verifying setup
218-219-Test the proxy directly:
220-221-```bash
222-# Check proxy health
223-curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224-225-# Test token lookup through proxy
226-curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227-```
228-229-Test OpenBao operations through the server:
230-231-```bash
232-# List all secrets
233-bao kv list spindle/
234-235-# Add a test secret via Spindle API, then check it exists
236-bao kv list spindle/repos/
237-238-# Get a specific secret
239-bao kv get spindle/repos/your_repo_path/SECRET_NAME
240-```
241-242-## how it works
243-244-- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245-- The proxy authenticates with OpenBao using AppRole credentials
246-- All Spindle requests go through the proxy, which injects authentication tokens
247-- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248-- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249-- The proxy handles all token renewal automatically
250-- Spindle no longer manages tokens or authentication directly
251-252-## troubleshooting
253-254-**Connection refused**: Check that the OpenBao Proxy is running and
255-listening on the configured address.
256-257-**403 errors**: Verify the AppRole credentials are correct and the policy
258-has the necessary permissions.
259-260-**404 route errors**: The spindle KV mount probably doesn't exist - run
261-the mount creation step again.
262-263-**Proxy authentication failures**: Check the proxy logs and verify the
264-role-id and secret-id files are readable and contain valid credentials.
265-266-**Secret not found after writing**: This can indicate policy permission
267-issues. Verify the policy includes both `spindle/data/*` and
268-`spindle/metadata/*` paths with appropriate capabilities.
269-270-Check proxy logs:
271-272-```bash
273-# If running as systemd service
274-journalctl -u openbao-proxy -f
275-276-# If running directly, check the console output
277-```
278-279-Test AppRole authentication manually:
280-281-```bash
282-bao write auth/approle/login \
283- role_id="$(cat /tmp/openbao/role-id)" \
284- secret_id="$(cat /tmp/openbao/secret-id)"
285-```
···1-# spindle pipelines
2-3-Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
4-5-The fields are:
6-7-- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
8-- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
9-- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
10-- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
11-- [Environment](#environment): An **optional** field that allows you to define environment variables.
12-- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
13-14-## Trigger
15-16-The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
17-18-- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
19- - `push`: The workflow should run every time a commit is pushed to the repository.
20- - `pull_request`: The workflow should run every time a pull request is made or updated.
21- - `manual`: The workflow can be triggered manually.
22-- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
23-- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
24-25-For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
26-27-```yaml
28-when:
29- - event: ["push", "manual"]
30- branch: ["main", "develop"]
31- - event: ["pull_request"]
32- branch: ["main"]
33-```
34-35-You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
36-37-```yaml
38-when:
39- - event: ["push"]
40- tag: ["v*"]
41-```
42-43-You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
44-45-```yaml
46-when:
47- - event: ["push"]
48- branch: ["main", "release-*"]
49- tag: ["v*", "stable"]
50-```
51-52-## Engine
53-54-Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
55-56-- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
57-58-Example:
59-60-```yaml
61-engine: "nixery"
62-```
63-64-## Clone options
65-66-When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
67-68-- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
69-- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
70-- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
71-72-The default settings are:
73-74-```yaml
75-clone:
76- skip: false
77- depth: 1
78- submodules: false
79-```
80-81-## Dependencies
82-83-Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
84-85-Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
86-87-```yaml
88-dependencies:
89- # nixpkgs
90- nixpkgs:
91- - nodejs
92- - go
93- # custom registry
94- git+https://tangled.org/@example.com/my_pkg:
95- - my_pkg
96-```
97-98-Now these dependencies are available to use in your workflow!
99-100-## Environment
101-102-The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103-104-Example:
105-106-```yaml
107-environment:
108- GOOS: "linux"
109- GOARCH: "arm64"
110- NODE_ENV: "production"
111- MY_ENV_VAR: "MY_ENV_VALUE"
112-```
113-114-## Steps
115-116-The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
117-118-- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
119-- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
120-- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
121-122-Example:
123-124-```yaml
125-steps:
126- - name: "Build backend"
127- command: "go build"
128- environment:
129- GOOS: "darwin"
130- GOARCH: "arm64"
131- - name: "Build frontend"
132- command: "npm run build"
133- environment:
134- NODE_ENV: "production"
135-```
136-137-## Complete workflow
138-139-```yaml
140-# .tangled/workflows/build.yml
141-142-when:
143- - event: ["push", "manual"]
144- branch: ["main", "develop"]
145- - event: ["pull_request"]
146- branch: ["main"]
147-148-engine: "nixery"
149-150-# using the default values
151-clone:
152- skip: false
153- depth: 1
154- submodules: false
155-156-dependencies:
157- # nixpkgs
158- nixpkgs:
159- - nodejs
160- - go
161- # custom registry
162- git+https://tangled.org/@example.com/my_pkg:
163- - my_pkg
164-165-environment:
166- GOOS: "linux"
167- GOARCH: "arm64"
168- NODE_ENV: "production"
169- MY_ENV_VAR: "MY_ENV_VALUE"
170-171-steps:
172- - name: "Build backend"
173- command: "go build"
174- environment:
175- GOOS: "darwin"
176- GOARCH: "arm64"
177- - name: "Build frontend"
178- command: "npm run build"
179- environment:
180- NODE_ENV: "production"
181-```
182-183-If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
···1+package db
2+3+import (
4+ "context"
5+ "database/sql"
6+ "log/slog"
7+ "strings"
8+9+ _ "github.com/mattn/go-sqlite3"
10+ "tangled.org/core/log"
11+)
12+13+type DB struct {
14+ db *sql.DB
15+ logger *slog.Logger
16+}
17+18+func Setup(ctx context.Context, dbPath string) (*DB, error) {
19+ // https://github.com/mattn/go-sqlite3#connection-string
20+ opts := []string{
21+ "_foreign_keys=1",
22+ "_journal_mode=WAL",
23+ "_synchronous=NORMAL",
24+ "_auto_vacuum=incremental",
25+ }
26+27+ logger := log.FromContext(ctx)
28+ logger = log.SubLogger(logger, "db")
29+30+ db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
31+ if err != nil {
32+ return nil, err
33+ }
34+35+ conn, err := db.Conn(ctx)
36+ if err != nil {
37+ return nil, err
38+ }
39+ defer conn.Close()
40+41+ _, err = conn.ExecContext(ctx, `
42+ create table if not exists known_dids (
43+ did text primary key
44+ );
45+46+ create table if not exists public_keys (
47+ id integer primary key autoincrement,
48+ did text not null,
49+ key text not null,
50+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
51+ unique(did, key),
52+ foreign key (did) references known_dids(did) on delete cascade
53+ );
54+55+ create table if not exists _jetstream (
56+ id integer primary key autoincrement,
57+ last_time_us integer not null
58+ );
59+60+ create table if not exists events (
61+ rkey text not null,
62+ nsid text not null,
63+ event text not null, -- json
64+ created integer not null default (strftime('%s', 'now')),
65+ primary key (rkey, nsid)
66+ );
67+68+ create table if not exists migrations (
69+ id integer primary key autoincrement,
70+ name text unique
71+ );
72+ `)
73+ if err != nil {
74+ return nil, err
75+ }
76+77+ return &DB{
78+ db: db,
79+ logger: logger,
80+ }, nil
81+}
-64
knotserver/db/init.go
···1-package db
2-3-import (
4- "database/sql"
5- "strings"
6-7- _ "github.com/mattn/go-sqlite3"
8-)
9-10-type DB struct {
11- db *sql.DB
12-}
13-14-func Setup(dbPath string) (*DB, error) {
15- // https://github.com/mattn/go-sqlite3#connection-string
16- opts := []string{
17- "_foreign_keys=1",
18- "_journal_mode=WAL",
19- "_synchronous=NORMAL",
20- "_auto_vacuum=incremental",
21- }
22-23- db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
24- if err != nil {
25- return nil, err
26- }
27-28- // NOTE: If any other migration is added here, you MUST
29- // copy the pattern in appview: use a single sql.Conn
30- // for every migration.
31-32- _, err = db.Exec(`
33- create table if not exists known_dids (
34- did text primary key
35- );
36-37- create table if not exists public_keys (
38- id integer primary key autoincrement,
39- did text not null,
40- key text not null,
41- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
42- unique(did, key),
43- foreign key (did) references known_dids(did) on delete cascade
44- );
45-46- create table if not exists _jetstream (
47- id integer primary key autoincrement,
48- last_time_us integer not null
49- );
50-51- create table if not exists events (
52- rkey text not null,
53- nsid text not null,
54- event text not null, -- json
55- created integer not null default (strftime('%s', 'now')),
56- primary key (rkey, nsid)
57- );
58- `)
59- if err != nil {
60- return nil, err
61- }
62-63- return &DB{db: db}, nil
64-}
···8 var = builtins.getEnv name;
9 in
10 if var == ""
11- then throw "\$${name} must be defined, see docs/hacking.md for more details"
12 else var;
13 envVarOr = name: default: let
14 var = builtins.getEnv name;
···92 jetstreamEndpoint = jetstream;
93 listenAddr = "0.0.0.0:6444";
94 };
95- environmentFile = "${config.services.tangled.knot.stateDir}/.env";
96 };
97 services.tangled.spindle = {
98 enable = true;
···8 var = builtins.getEnv name;
9 in
10 if var == ""
11+ then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details"
12 else var;
13 envVarOr = name: default: let
14 var = builtins.getEnv name;
···92 jetstreamEndpoint = jetstream;
93 listenAddr = "0.0.0.0:6444";
94 };
095 };
96 services.tangled.spindle = {
97 enable = true;
···1+package models
2+3+import (
4+ "encoding/base64"
5+ "testing"
6+)
7+8+func TestSecretMask_BasicMasking(t *testing.T) {
9+ mask := NewSecretMask([]string{"mysecret123"})
10+11+ input := "The password is mysecret123 in this log"
12+ expected := "The password is *** in this log"
13+14+ result := mask.Mask(input)
15+ if result != expected {
16+ t.Errorf("expected %q, got %q", expected, result)
17+ }
18+}
19+20+func TestSecretMask_Base64Encoded(t *testing.T) {
21+ secret := "mysecret123"
22+ mask := NewSecretMask([]string{secret})
23+24+ b64 := base64.StdEncoding.EncodeToString([]byte(secret))
25+ input := "Encoded: " + b64
26+ expected := "Encoded: ***"
27+28+ result := mask.Mask(input)
29+ if result != expected {
30+ t.Errorf("expected %q, got %q", expected, result)
31+ }
32+}
33+34+func TestSecretMask_Base64NoPadding(t *testing.T) {
35+ // "test" encodes to "dGVzdA==" with padding
36+ secret := "test"
37+ mask := NewSecretMask([]string{secret})
38+39+ b64NoPad := "dGVzdA" // base64 without padding
40+ input := "Token: " + b64NoPad
41+ expected := "Token: ***"
42+43+ result := mask.Mask(input)
44+ if result != expected {
45+ t.Errorf("expected %q, got %q", expected, result)
46+ }
47+}
48+49+func TestSecretMask_MultipleSecrets(t *testing.T) {
50+ mask := NewSecretMask([]string{"password1", "apikey123"})
51+52+ input := "Using password1 and apikey123 for auth"
53+ expected := "Using *** and *** for auth"
54+55+ result := mask.Mask(input)
56+ if result != expected {
57+ t.Errorf("expected %q, got %q", expected, result)
58+ }
59+}
60+61+func TestSecretMask_MultipleOccurrences(t *testing.T) {
62+ mask := NewSecretMask([]string{"secret"})
63+64+ input := "secret appears twice: secret"
65+ expected := "*** appears twice: ***"
66+67+ result := mask.Mask(input)
68+ if result != expected {
69+ t.Errorf("expected %q, got %q", expected, result)
70+ }
71+}
72+73+func TestSecretMask_ShortValues(t *testing.T) {
74+ mask := NewSecretMask([]string{"abc", "xy", ""})
75+76+ if mask == nil {
77+ t.Fatal("expected non-nil mask")
78+ }
79+80+ input := "abc xy test"
81+ expected := "*** *** test"
82+ result := mask.Mask(input)
83+ if result != expected {
84+ t.Errorf("expected %q, got %q", expected, result)
85+ }
86+}
87+88+func TestSecretMask_NilMask(t *testing.T) {
89+ var mask *SecretMask
90+91+ input := "some input text"
92+ result := mask.Mask(input)
93+ if result != input {
94+ t.Errorf("expected %q, got %q", input, result)
95+ }
96+}
97+98+func TestSecretMask_EmptyInput(t *testing.T) {
99+ mask := NewSecretMask([]string{"secret"})
100+101+ result := mask.Mask("")
102+ if result != "" {
103+ t.Errorf("expected empty string, got %q", result)
104+ }
105+}
106+107+func TestSecretMask_NoMatch(t *testing.T) {
108+ mask := NewSecretMask([]string{"secretvalue"})
109+110+ input := "nothing to mask here"
111+ result := mask.Mask(input)
112+ if result != input {
113+ t.Errorf("expected %q, got %q", input, result)
114+ }
115+}
116+117+func TestSecretMask_EmptySecretsList(t *testing.T) {
118+ mask := NewSecretMask([]string{})
119+120+ if mask != nil {
121+ t.Error("expected nil mask for empty secrets list")
122+ }
123+}
124+125+func TestSecretMask_EmptySecretsFiltered(t *testing.T) {
126+ mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"})
127+128+ input := "Using validpassword here"
129+ expected := "Using *** here"
130+131+ result := mask.Mask(input)
132+ if result != expected {
133+ t.Errorf("expected %q, got %q", expected, result)
134+ }
135+}
+1-1
spindle/motd
···20 **
21 ********
2223-This is a spindle server. More info at https://tangled.sh/@tangled.sh/core/tree/master/docs/spindle
2425Most API routes are under /xrpc/
···20 **
21 ********
2223+This is a spindle server. More info at https://docs.tangled.org/spindles.html#spindles
2425Most API routes are under /xrpc/
+21-3
spindle/server.go
···8 "log/slog"
9 "maps"
10 "net/http"
01112 "github.com/go-chi/chi/v5"
13 "tangled.org/core/api/tangled"
···30)
3132//go:embed motd
33-var motd []byte
3435const (
36 rbacDomain = "thisserver"
···47 cfg *config.Config
48 ks *eventconsumer.Consumer
49 res *idresolver.Resolver
50- vault secrets.Manager
0051}
5253// New creates a new Spindle server with the provided configuration and engines.
···128 cfg: cfg,
129 res: resolver,
130 vault: vault,
0131 }
132133 err = e.AddSpindle(rbacDomain)
···201 return s.e
202}
20300000000000000204// Start starts the Spindle server (blocking).
205func (s *Spindle) Start(ctx context.Context) error {
206 // starts a job queue runner in the background
···246 mux := chi.NewRouter()
247248 mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
249- w.Write(motd)
250 })
251 mux.HandleFunc("/events", s.Events)
252 mux.HandleFunc("/logs/{knot}/{rkey}/{name}", s.Logs)
···8 "log/slog"
9 "maps"
10 "net/http"
11+ "sync"
1213 "github.com/go-chi/chi/v5"
14 "tangled.org/core/api/tangled"
···31)
3233//go:embed motd
34+var defaultMotd []byte
3536const (
37 rbacDomain = "thisserver"
···48 cfg *config.Config
49 ks *eventconsumer.Consumer
50 res *idresolver.Resolver
51+ vault secrets.Manager
52+ motd []byte
53+ motdMu sync.RWMutex
54}
5556// New creates a new Spindle server with the provided configuration and engines.
···131 cfg: cfg,
132 res: resolver,
133 vault: vault,
134+ motd: defaultMotd,
135 }
136137 err = e.AddSpindle(rbacDomain)
···205 return s.e
206}
207208+// SetMotdContent sets custom MOTD content, replacing the embedded default.
209+func (s *Spindle) SetMotdContent(content []byte) {
210+ s.motdMu.Lock()
211+ defer s.motdMu.Unlock()
212+ s.motd = content
213+}
214+215+// GetMotdContent returns the current MOTD content.
216+func (s *Spindle) GetMotdContent() []byte {
217+ s.motdMu.RLock()
218+ defer s.motdMu.RUnlock()
219+ return s.motd
220+}
221+222// Start starts the Spindle server (blocking).
223func (s *Spindle) Start(ctx context.Context) error {
224 // starts a job queue runner in the background
···264 mux := chi.NewRouter()
265266 mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
267+ w.Write(s.GetMotdContent())
268 })
269 mux.HandleFunc("/events", s.Events)
270 mux.HandleFunc("/logs/{knot}/{rkey}/{name}", s.Logs)
···174175func (commit Commit) CoAuthors() []object.Signature {
176 var coAuthors []object.Signature
177-178 matches := coAuthorRegex.FindAllStringSubmatch(commit.Message, -1)
179180 for _, match := range matches {
181 if len(match) >= 3 {
182 name := strings.TrimSpace(match[1])
183 email := strings.TrimSpace(match[2])
00000184185 coAuthors = append(coAuthors, object.Signature{
186 Name: name,
···174175func (commit Commit) CoAuthors() []object.Signature {
176 var coAuthors []object.Signature
177+ seen := make(map[string]bool)
178 matches := coAuthorRegex.FindAllStringSubmatch(commit.Message, -1)
179180 for _, match := range matches {
181 if len(match) >= 3 {
182 name := strings.TrimSpace(match[1])
183 email := strings.TrimSpace(match[2])
184+185+ if seen[email] {
186+ continue
187+ }
188+ seen[email] = true
189190 coAuthors = append(coAuthors, object.Signature{
191 Name: name,
+3
types/diff.go
···7475// used by html elements as a unique ID for hrefs
76func (d *Diff) Id() string {
00077 return d.Name.New
78}
79
···7475// used by html elements as a unique ID for hrefs
76func (d *Diff) Id() string {
77+ if d.IsDelete {
78+ return d.Name.Old
79+ }
80 return d.Name.New
81}
82