Monorepo for Tangled tangled.org

nix: add tap service for spindle

Signed-off-by: Seongmin Lee <git@boltless.me>

boltless.me 3d1607fb e6f933a9

verified
+2
appview/db/follow.go
··· 167 167 if err != nil { 168 168 return nil, err 169 169 } 170 + defer rows.Close() 171 + 170 172 for rows.Next() { 171 173 var follow models.Follow 172 174 var followedAt string
+1
appview/db/issues.go
··· 452 452 if err != nil { 453 453 return nil, err 454 454 } 455 + defer rows.Close() 455 456 456 457 for rows.Next() { 457 458 var comment models.IssueComment
+1 -1
appview/db/language.go
··· 28 28 whereClause, 29 29 ) 30 30 rows, err := e.Query(query, args...) 31 - 32 31 if err != nil { 33 32 return nil, fmt.Errorf("failed to execute query: %w ", err) 34 33 } 34 + defer rows.Close() 35 35 36 36 var langs []models.RepoLanguage 37 37 for rows.Next() {
+5
appview/db/profile.go
··· 230 230 if err != nil { 231 231 return nil, err 232 232 } 233 + defer rows.Close() 233 234 234 235 profileMap := make(map[string]*models.Profile) 235 236 for rows.Next() { ··· 270 271 if err != nil { 271 272 return nil, err 272 273 } 274 + defer rows.Close() 275 + 273 276 idxs := make(map[string]int) 274 277 for did := range profileMap { 275 278 idxs[did] = 0 ··· 290 293 if err != nil { 291 294 return nil, err 292 295 } 296 + defer rows.Close() 297 + 293 298 idxs = make(map[string]int) 294 299 for did := range profileMap { 295 300 idxs[did] = 0
+1
appview/db/registration.go
··· 38 38 if err != nil { 39 39 return nil, err 40 40 } 41 + defer rows.Close() 41 42 42 43 for rows.Next() { 43 44 var createdAt string
+12 -1
appview/db/repos.go
··· 56 56 limitClause, 57 57 ) 58 58 rows, err := e.Query(repoQuery, args...) 59 - 60 59 if err != nil { 61 60 return nil, fmt.Errorf("failed to execute repo query: %w ", err) 62 61 } 62 + defer rows.Close() 63 63 64 64 for rows.Next() { 65 65 var repo models.Repo ··· 128 128 if err != nil { 129 129 return nil, fmt.Errorf("failed to execute labels query: %w ", err) 130 130 } 131 + defer rows.Close() 132 + 131 133 for rows.Next() { 132 134 var repoat, labelat string 133 135 if err := rows.Scan(&repoat, &labelat); err != nil { ··· 156 158 from repo_languages 157 159 where repo_at in (%s) 158 160 and is_default_ref = 1 161 + and language <> '' 159 162 ) 160 163 where rn = 1 161 164 `, ··· 165 168 if err != nil { 166 169 return nil, fmt.Errorf("failed to execute lang query: %w ", err) 167 170 } 171 + defer rows.Close() 172 + 168 173 for rows.Next() { 169 174 var repoat, lang string 170 175 if err := rows.Scan(&repoat, &lang); err != nil { ··· 191 196 if err != nil { 192 197 return nil, fmt.Errorf("failed to execute star-count query: %w ", err) 193 198 } 199 + defer rows.Close() 200 + 194 201 for rows.Next() { 195 202 var repoat string 196 203 var count int ··· 220 227 if err != nil { 221 228 return nil, fmt.Errorf("failed to execute issue-count query: %w ", err) 222 229 } 230 + defer rows.Close() 231 + 223 232 for rows.Next() { 224 233 var repoat string 225 234 var open, closed int ··· 261 270 if err != nil { 262 271 return nil, fmt.Errorf("failed to execute pulls-count query: %w ", err) 263 272 } 273 + defer rows.Close() 274 + 264 275 for rows.Next() { 265 276 var repoat string 266 277 var open, merged, closed, deleted int
+1
appview/db/star.go
··· 165 165 if err != nil { 166 166 return nil, err 167 167 } 168 + defer rows.Close() 168 169 169 170 starMap := make(map[string][]models.Star) 170 171 for rows.Next() {
-1
appview/notify/merged_notifier.go
··· 39 39 v.Call(in) 40 40 }(n) 41 41 } 42 - wg.Wait() 43 42 } 44 43 45 44 func (m *mergedNotifier) NewRepo(ctx context.Context, repo *models.Repo) {
+6 -1
appview/pages/funcmap.go
··· 25 25 "github.com/dustin/go-humanize" 26 26 "github.com/go-enry/go-enry/v2" 27 27 "github.com/yuin/goldmark" 28 + emoji "github.com/yuin/goldmark-emoji" 28 29 "tangled.org/core/appview/filetree" 29 30 "tangled.org/core/appview/models" 30 31 "tangled.org/core/appview/pages/markup" ··· 261 262 }, 262 263 "description": func(text string) template.HTML { 263 264 p.rctx.RendererType = markup.RendererTypeDefault 264 - htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New()) 265 + htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New( 266 + goldmark.WithExtensions( 267 + emoji.Emoji, 268 + ), 269 + )) 265 270 sanitized := p.rctx.SanitizeDescription(htmlString) 266 271 return template.HTML(sanitized) 267 272 },
+13 -3
appview/pages/markup/extension/atlink.go
··· 35 35 return KindAt 36 36 } 37 37 38 - var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`) 38 + var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`) 39 + var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`) 39 40 40 41 type atParser struct{} 41 42 ··· 55 56 if m == nil { 56 57 return nil 57 58 } 59 + 60 + // Check for all links in the markdown to see if the handle found is inside one 61 + linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1) 62 + for _, linkMatch := range linksIndexes { 63 + if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] { 64 + return nil 65 + } 66 + } 67 + 58 68 atSegment := text.NewSegment(segment.Start, segment.Start+m[1]) 59 69 block.Advance(m[1]) 60 70 node := &AtNode{} ··· 87 97 88 98 func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) { 89 99 if entering { 90 - w.WriteString(`<a href="/@`) 100 + w.WriteString(`<a href="/`) 91 101 w.WriteString(n.(*AtNode).Handle) 92 - w.WriteString(`" class="mention font-bold">`) 102 + w.WriteString(`" class="mention">`) 93 103 } else { 94 104 w.WriteString("</a>") 95 105 }
+2
appview/pages/markup/markdown.go
··· 13 13 chromahtml "github.com/alecthomas/chroma/v2/formatters/html" 14 14 "github.com/alecthomas/chroma/v2/styles" 15 15 "github.com/yuin/goldmark" 16 + "github.com/yuin/goldmark-emoji" 16 17 highlighting "github.com/yuin/goldmark-highlighting/v2" 17 18 "github.com/yuin/goldmark/ast" 18 19 "github.com/yuin/goldmark/extension" ··· 66 67 ), 67 68 callout.CalloutExtention, 68 69 textension.AtExt, 70 + emoji.Emoji, 69 71 ), 70 72 goldmark.WithParserOptions( 71 73 parser.WithAutoHeadingID(),
+121
appview/pages/markup/markdown_test.go
··· 1 + package markup 2 + 3 + import ( 4 + "bytes" 5 + "testing" 6 + ) 7 + 8 + func TestAtExtension_Rendering(t *testing.T) { 9 + tests := []struct { 10 + name string 11 + markdown string 12 + expected string 13 + }{ 14 + { 15 + name: "renders simple at mention", 16 + markdown: "Hello @user.tngl.sh!", 17 + expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`, 18 + }, 19 + { 20 + name: "renders multiple at mentions", 21 + markdown: "Hi @alice.tngl.sh and @bob.example.com", 22 + expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`, 23 + }, 24 + { 25 + name: "renders at mention in parentheses", 26 + markdown: "Check this out (@user.tngl.sh)", 27 + expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`, 28 + }, 29 + { 30 + name: "does not render email", 31 + markdown: "Contact me at test@example.com", 32 + expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`, 33 + }, 34 + { 35 + name: "renders at mention with hyphen", 36 + markdown: "Follow @user-name.tngl.sh", 37 + expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`, 38 + }, 39 + { 40 + name: "renders at mention with numbers", 41 + markdown: "@user123.test456.social", 42 + expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`, 43 + }, 44 + { 45 + name: "at mention at start of line", 46 + markdown: "@user.tngl.sh is cool", 47 + expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`, 48 + }, 49 + } 50 + 51 + for _, tt := range tests { 52 + t.Run(tt.name, func(t *testing.T) { 53 + md := NewMarkdown() 54 + 55 + var buf bytes.Buffer 56 + if err := md.Convert([]byte(tt.markdown), &buf); err != nil { 57 + t.Fatalf("failed to convert markdown: %v", err) 58 + } 59 + 60 + result := buf.String() 61 + if result != tt.expected+"\n" { 62 + t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result) 63 + } 64 + }) 65 + } 66 + } 67 + 68 + func TestAtExtension_WithOtherMarkdown(t *testing.T) { 69 + tests := []struct { 70 + name string 71 + markdown string 72 + contains string 73 + }{ 74 + { 75 + name: "at mention with bold", 76 + markdown: "**Hello @user.tngl.sh**", 77 + contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`, 78 + }, 79 + { 80 + name: "at mention with italic", 81 + markdown: "*Check @user.tngl.sh*", 82 + contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`, 83 + }, 84 + { 85 + name: "at mention in list", 86 + markdown: "- Item 1\n- @user.tngl.sh\n- Item 3", 87 + contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`, 88 + }, 89 + { 90 + name: "at mention in link", 91 + markdown: "[@regnault.dev](https://regnault.dev)", 92 + contains: `<a href="https://regnault.dev">@regnault.dev</a>`, 93 + }, 94 + { 95 + name: "at mention in link again", 96 + markdown: "[check out @regnault.dev](https://regnault.dev)", 97 + contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`, 98 + }, 99 + { 100 + name: "at mention in link again, multiline", 101 + markdown: "[\ncheck out @regnault.dev](https://regnault.dev)", 102 + contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>", 103 + }, 104 + } 105 + 106 + for _, tt := range tests { 107 + t.Run(tt.name, func(t *testing.T) { 108 + md := NewMarkdown() 109 + 110 + var buf bytes.Buffer 111 + if err := md.Convert([]byte(tt.markdown), &buf); err != nil { 112 + t.Fatalf("failed to convert markdown: %v", err) 113 + } 114 + 115 + result := buf.String() 116 + if !bytes.Contains([]byte(result), []byte(tt.contains)) { 117 + t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result) 118 + } 119 + }) 120 + } 121 + }
+1 -1
appview/pages/pages.go
··· 640 640 } 641 641 642 642 func (p *Pages) StarBtnFragment(w io.Writer, params StarBtnFragmentParams) error { 643 - return p.executePlain("fragments/starBtn", w, params) 643 + return p.executePlain("fragments/starBtn-oob", w, params) 644 644 } 645 645 646 646 type RepoIndexParams struct {
+1 -1
appview/pages/templates/banner.html
··· 30 30 <div class="mx-6"> 31 31 These services may not be fully accessible until upgraded. 32 32 <a class="underline text-red-800 dark:text-red-200" 33 - href="https://tangled.org/@tangled.org/core/tree/master/docs/migrations.md"> 33 + href="https://docs.tangled.org/migrating-knots-spindles.html#migrating-knots-spindles"> 34 34 Click to read the upgrade guide</a>. 35 35 </div> 36 36 </details>
+5
appview/pages/templates/fragments/starBtn-oob.html
··· 1 + {{ define "fragments/starBtn-oob" }} 2 + <div hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]'> 3 + {{ template "fragments/starBtn" . }} 4 + </div> 5 + {{ end }}
+1 -3
appview/pages/templates/fragments/starBtn.html
··· 1 1 {{ define "fragments/starBtn" }} 2 + {{/* NOTE: this fragment is always replaced with hx-swap-oob */}} 2 3 <button 3 4 id="starBtn" 4 5 class="btn disabled:opacity-50 disabled:cursor-not-allowed flex gap-2 items-center group" ··· 10 11 {{ end }} 11 12 12 13 hx-trigger="click" 13 - hx-target="this" 14 - hx-swap="outerHTML" 15 - hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]' 16 14 hx-disabled-elt="#starBtn" 17 15 > 18 16 {{ if .IsStarred }}
+1 -1
appview/pages/templates/knots/index.html
··· 105 105 {{ define "docsButton" }} 106 106 <a 107 107 class="btn flex items-center gap-2" 108 - href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md"> 108 + href="https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide"> 109 109 {{ i "book" "size-4" }} 110 110 docs 111 111 </a>
+2 -2
appview/pages/templates/layouts/fragments/footer.html
··· 26 26 <div class="flex flex-col gap-1"> 27 27 <div class="{{ $headerStyle }}">resources</div> 28 28 <a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a> 29 - <a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 29 + <a href="https://docs.tangled.org" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 30 30 <a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a> 31 31 <a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a> 32 32 </div> ··· 73 73 <div class="flex flex-col gap-1"> 74 74 <div class="{{ $headerStyle }}">resources</div> 75 75 <a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a> 76 - <a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 76 + <a href="https://docs.tangled.org" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 77 77 <a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a> 78 78 <a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a> 79 79 </div>
+1 -1
appview/pages/templates/repo/empty.html
··· 26 26 {{ else if (and .LoggedInUser (eq .LoggedInUser.Did .RepoInfo.OwnerDid)) }} 27 27 {{ $knot := .RepoInfo.Knot }} 28 28 {{ if eq $knot "knot1.tangled.sh" }} 29 - {{ $knot = "tangled.sh" }} 29 + {{ $knot = "tangled.org" }} 30 30 {{ end }} 31 31 <div class="w-full flex place-content-center"> 32 32 <div class="py-6 w-fit flex flex-col gap-4">
+6 -6
appview/pages/templates/repo/fragments/backlinks.html
··· 14 14 <div class="flex gap-2 items-center"> 15 15 {{ if .State.IsClosed }} 16 16 <span class="text-gray-500 dark:text-gray-400"> 17 - {{ i "ban" "w-4 h-4" }} 17 + {{ i "ban" "size-3" }} 18 18 </span> 19 19 {{ else if eq .Kind.String "issues" }} 20 20 <span class="text-green-600 dark:text-green-500"> 21 - {{ i "circle-dot" "w-4 h-4" }} 21 + {{ i "circle-dot" "size-3" }} 22 22 </span> 23 23 {{ else if .State.IsOpen }} 24 24 <span class="text-green-600 dark:text-green-500"> 25 - {{ i "git-pull-request" "w-4 h-4" }} 25 + {{ i "git-pull-request" "size-3" }} 26 26 </span> 27 27 {{ else if .State.IsMerged }} 28 28 <span class="text-purple-600 dark:text-purple-500"> 29 - {{ i "git-merge" "w-4 h-4" }} 29 + {{ i "git-merge" "size-3" }} 30 30 </span> 31 31 {{ else }} 32 32 <span class="text-gray-600 dark:text-gray-300"> 33 - {{ i "git-pull-request-closed" "w-4 h-4" }} 33 + {{ i "git-pull-request-closed" "size-3" }} 34 34 </span> 35 35 {{ end }} 36 - <a href="{{ . }}"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a> 36 + <a href="{{ . }}" class="line-clamp-1 text-sm"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a> 37 37 </div> 38 38 {{ if not (eq $.RepoInfo.FullName $repoUrl) }} 39 39 <div>
+1 -1
appview/pages/templates/repo/pipelines/pipelines.html
··· 23 23 </p> 24 24 <p> 25 25 <span class="{{ $bullet }}">2</span>Configure your CI/CD 26 - <a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/pipeline.md" class="underline">pipeline</a>. 26 + <a href="https://docs.tangled.org/spindles.html#pipelines" class="underline">pipeline</a>. 27 27 </p> 28 28 <p><span class="{{ $bullet }}">3</span>Trigger a workflow with a push or a pull-request!</p> 29 29 </div>
+1 -1
appview/pages/templates/repo/settings/pipelines.html
··· 22 22 <p class="text-gray-500 dark:text-gray-400"> 23 23 Choose a spindle to execute your workflows on. Only repository owners 24 24 can configure spindles. Spindles can be selfhosted, 25 - <a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md"> 25 + <a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide"> 26 26 click to learn more. 27 27 </a> 28 28 </p>
+1 -1
appview/pages/templates/spindles/index.html
··· 102 102 {{ define "docsButton" }} 103 103 <a 104 104 class="btn flex items-center gap-2" 105 - href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md"> 105 + href="https://docs.tangled.org/spindles.html#self-hosting-guide"> 106 106 {{ i "book" "size-4" }} 107 107 docs 108 108 </a>
+1 -1
appview/pages/templates/strings/string.html
··· 17 17 <span class="select-none">/</span> 18 18 <a href="/strings/{{ $ownerId }}/{{ .String.Rkey }}" class="font-bold">{{ .String.Filename }}</a> 19 19 </div> 20 - <div class="flex gap-2 text-base"> 20 + <div class="flex gap-2 items-stretch text-base"> 21 21 {{ if and .LoggedInUser (eq .LoggedInUser.Did .String.Did) }} 22 22 <a class="btn flex items-center gap-2 no-underline hover:no-underline p-2 group" 23 23 hx-boost="true"
+2 -2
appview/pages/templates/user/fragments/followCard.html
··· 6 6 <img class="object-cover rounded-full p-2" src="{{ fullAvatar $userIdent }}" alt="{{ $userIdent }}" /> 7 7 </div> 8 8 9 - <div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full"> 9 + <div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full min-w-0"> 10 10 <div class="flex-1 min-h-0 justify-around flex flex-col"> 11 11 <a href="/{{ $userIdent }}"> 12 12 <span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $userIdent | truncateAt30 }}</span> 13 13 </a> 14 14 {{ with .Profile }} 15 - <p class="text-sm pb-2 md:pb-2">{{.Description}}</p> 15 + <p class="text-sm pb-2 md:pb-2 break-words">{{.Description}}</p> 16 16 {{ end }} 17 17 <div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full"> 18 18 <span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
+8
appview/pulls/pulls.go
··· 1366 1366 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1367 1367 return 1368 1368 } 1369 + 1369 1370 } 1370 1371 1371 1372 if err = tx.Commit(); err != nil { 1372 1373 log.Println("failed to create pull request", err) 1373 1374 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1374 1375 return 1376 + } 1377 + 1378 + // notify about each pull 1379 + // 1380 + // this is performed after tx.Commit, because it could result in a locked DB otherwise 1381 + for _, p := range stack { 1382 + s.notifier.NewPull(r.Context(), p) 1375 1383 } 1376 1384 1377 1385 ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
+1529
docs/DOCS.md
··· 1 + --- 2 + title: Tangled docs 3 + author: The Tangled Contributors 4 + date: 21 Sun, Dec 2025 5 + --- 6 + 7 + # Introduction 8 + 9 + Tangled is a decentralized code hosting and collaboration 10 + platform. Every component of Tangled is open-source and 11 + self-hostable. [tangled.org](https://tangled.org) also 12 + provides hosting and CI services that are free to use. 13 + 14 + There are several models for decentralized code 15 + collaboration platforms, ranging from ActivityPub’s 16 + (Forgejo) federated model, to Radicle’s entirely P2P model. 17 + Our approach attempts to be the best of both worlds by 18 + adopting the AT Protocol—a protocol for building decentralized 19 + social applications with a central identity 20 + 21 + Our approach to this is the idea of “knots”. Knots are 22 + lightweight, headless servers that enable users to host Git 23 + repositories with ease. Knots are designed for either single 24 + or multi-tenant use which is perfect for self-hosting on a 25 + Raspberry Pi at home, or larger “community” servers. By 26 + default, Tangled provides managed knots where you can host 27 + your repositories for free. 28 + 29 + The appview at tangled.org acts as a consolidated "view" 30 + into the whole network, allowing users to access, clone and 31 + contribute to repositories hosted across different knots 32 + seamlessly. 33 + 34 + # Quick start guide 35 + 36 + ## Login or sign up 37 + 38 + You can [login](https://tangled.org) by using your AT Protocol 39 + account. If you are unclear on what that means, simply head 40 + to the [signup](https://tangled.org/signup) page and create 41 + an account. By doing so, you will be choosing Tangled as 42 + your account provider (you will be granted a handle of the 43 + form `user.tngl.sh`). 44 + 45 + In the AT Protocol network, users are free to choose their account 46 + provider (known as a "Personal Data Service", or PDS), and 47 + login to applications that support AT accounts. 48 + 49 + You can think of it as "one account for all of the atmosphere"! 50 + 51 + If you already have an AT account (you may have one if you 52 + signed up to Bluesky, for example), you can login with the 53 + same handle on Tangled (so just use `user.bsky.social` on 54 + the login page). 55 + 56 + ## Add an SSH key 57 + 58 + Once you are logged in, you can start creating repositories 59 + and pushing code. Tangled supports pushing git repositories 60 + over SSH. 61 + 62 + First, you'll need to generate an SSH key if you don't 63 + already have one: 64 + 65 + ```bash 66 + ssh-keygen -t ed25519 -C "foo@bar.com" 67 + ``` 68 + 69 + When prompted, save the key to the default location 70 + (`~/.ssh/id_ed25519`) and optionally set a passphrase. 71 + 72 + Copy your public key to your clipboard: 73 + 74 + ```bash 75 + # on X11 76 + cat ~/.ssh/id_ed25519.pub | xclip -sel c 77 + 78 + # on wayland 79 + cat ~/.ssh/id_ed25519.pub | wl-copy 80 + 81 + # on macos 82 + cat ~/.ssh/id_ed25519.pub | pbcopy 83 + ``` 84 + 85 + Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key', 86 + paste your public key, give it a descriptive name, and hit 87 + save. 88 + 89 + ## Create a repository 90 + 91 + Once your SSH key is added, create your first repository: 92 + 93 + 1. Hit the green `+` icon on the topbar, and select 94 + repository 95 + 2. Enter a repository name 96 + 3. Add a description 97 + 4. Choose a knotserver to host this repository on 98 + 5. Hit create 99 + 100 + Knots are self-hostable, lightweight Git servers that can 101 + host your repository. Unlike traditional code forges, your 102 + code can live on any server. Read the [Knots](TODO) section 103 + for more. 104 + 105 + ## Configure SSH 106 + 107 + To ensure Git uses the correct SSH key and connects smoothly 108 + to Tangled, add this configuration to your `~/.ssh/config` 109 + file: 110 + 111 + ``` 112 + Host tangled.org 113 + Hostname tangled.org 114 + User git 115 + IdentityFile ~/.ssh/id_ed25519 116 + AddressFamily inet 117 + ``` 118 + 119 + This tells SSH to use your specific key when connecting to 120 + Tangled and prevents authentication issues if you have 121 + multiple SSH keys. 122 + 123 + Note that this configuration only works for knotservers that 124 + are hosted by tangled.org. If you use a custom knot, refer 125 + to the [Knots](TODO) section. 126 + 127 + ## Push your first repository 128 + 129 + Initialize a new Git repository: 130 + 131 + ```bash 132 + mkdir my-project 133 + cd my-project 134 + 135 + git init 136 + echo "# My Project" > README.md 137 + ``` 138 + 139 + Add some content and push! 140 + 141 + ```bash 142 + git add README.md 143 + git commit -m "Initial commit" 144 + git remote add origin git@tangled.org:user.tngl.sh/my-project 145 + git push -u origin main 146 + ``` 147 + 148 + That's it! Your code is now hosted on Tangled. 149 + 150 + ## Migrating an existing repository 151 + 152 + Moving your repositories from GitHub, GitLab, Bitbucket, or 153 + any other Git forge to Tangled is straightforward. You'll 154 + simply change your repository's remote URL. At the moment, 155 + Tangled does not have any tooling to migrate data such as 156 + GitHub issues or pull requests. 157 + 158 + First, create a new repository on tangled.org as described 159 + in the [Quick Start Guide](#create-a-repository). 160 + 161 + Navigate to your existing local repository: 162 + 163 + ```bash 164 + cd /path/to/your/existing/repo 165 + ``` 166 + 167 + You can inspect your existing Git remote like so: 168 + 169 + ```bash 170 + git remote -v 171 + ``` 172 + 173 + You'll see something like: 174 + 175 + ``` 176 + origin git@github.com:username/my-project (fetch) 177 + origin git@github.com:username/my-project (push) 178 + ``` 179 + 180 + Update the remote URL to point to tangled: 181 + 182 + ```bash 183 + git remote set-url origin git@tangled.org:user.tngl.sh/my-project 184 + ``` 185 + 186 + Verify the change: 187 + 188 + ```bash 189 + git remote -v 190 + ``` 191 + 192 + You should now see: 193 + 194 + ``` 195 + origin git@tangled.org:user.tngl.sh/my-project (fetch) 196 + origin git@tangled.org:user.tngl.sh/my-project (push) 197 + ``` 198 + 199 + Push all your branches and tags to Tangled: 200 + 201 + ```bash 202 + git push -u origin --all 203 + git push -u origin --tags 204 + ``` 205 + 206 + Your repository is now migrated to Tangled! All commit 207 + history, branches, and tags have been preserved. 208 + 209 + ## Mirroring a repository to Tangled 210 + 211 + If you want to maintain your repository on multiple forges 212 + simultaneously, for example, keeping your primary repository 213 + on GitHub while mirroring to Tangled for backup or 214 + redundancy, you can do so by adding multiple remotes. 215 + 216 + You can configure your local repository to push to both 217 + Tangled and, say, GitHub. You may already have the following 218 + setup: 219 + 220 + ``` 221 + $ git remote -v 222 + origin git@github.com:username/my-project (fetch) 223 + origin git@github.com:username/my-project (push) 224 + ``` 225 + 226 + Now add Tangled as an additional push URL to the same 227 + remote: 228 + 229 + ```bash 230 + git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project 231 + ``` 232 + 233 + You also need to re-add the original URL as a push 234 + destination (Git replaces the push URL when you use `--add` 235 + the first time): 236 + 237 + ```bash 238 + git remote set-url --add --push origin git@github.com:username/my-project 239 + ``` 240 + 241 + Verify your configuration: 242 + 243 + ``` 244 + $ git remote -v 245 + origin git@github.com:username/repo (fetch) 246 + origin git@tangled.org:username/my-project (push) 247 + origin git@github.com:username/repo (push) 248 + ``` 249 + 250 + Notice that there's one fetch URL (the primary remote) and 251 + two push URLs. Now, whenever you push, Git will 252 + automatically push to both remotes: 253 + 254 + ```bash 255 + git push origin main 256 + ``` 257 + 258 + This single command pushes your `main` branch to both GitHub 259 + and Tangled simultaneously. 260 + 261 + To push all branches and tags: 262 + 263 + ```bash 264 + git push origin --all 265 + git push origin --tags 266 + ``` 267 + 268 + If you prefer more control over which remote you push to, 269 + you can maintain separate remotes: 270 + 271 + ```bash 272 + git remote add github git@github.com:username/my-project 273 + git remote add tangled git@tangled.org:username/my-project 274 + ``` 275 + 276 + Then push to each explicitly: 277 + 278 + ```bash 279 + git push github main 280 + git push tangled main 281 + ``` 282 + 283 + # Knot self-hosting guide 284 + 285 + So you want to run your own knot server? Great! Here are a few prerequisites: 286 + 287 + 1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind. 288 + 2. A (sub)domain name. People generally use `knot.example.com`. 289 + 3. A valid SSL certificate for your domain. 290 + 291 + ## NixOS 292 + 293 + Refer to the [knot 294 + module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix) 295 + for a full list of options. Sample configurations: 296 + 297 + - [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85) 298 + - [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25) 299 + 300 + ## Docker 301 + 302 + Refer to 303 + [@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker). 304 + Note that this is community maintained. 305 + 306 + ## Manual setup 307 + 308 + First, clone this repository: 309 + 310 + ``` 311 + git clone https://tangled.org/@tangled.org/core 312 + ``` 313 + 314 + Then, build the `knot` CLI. This is the knot administration 315 + and operation tool. For the purpose of this guide, we're 316 + only concerned with these subcommands: 317 + 318 + * `knot server`: the main knot server process, typically 319 + run as a supervised service 320 + * `knot guard`: handles role-based access control for git 321 + over SSH (you'll never have to run this yourself) 322 + * `knot keys`: fetches SSH keys associated with your knot; 323 + we'll use this to generate the SSH 324 + `AuthorizedKeysCommand` 325 + 326 + ``` 327 + cd core 328 + export CGO_ENABLED=1 329 + go build -o knot ./cmd/knot 330 + ``` 331 + 332 + Next, move the `knot` binary to a location owned by `root` -- 333 + `/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`: 334 + 335 + ``` 336 + sudo mv knot /usr/local/bin/knot 337 + sudo chown root:root /usr/local/bin/knot 338 + ``` 339 + 340 + This is necessary because SSH `AuthorizedKeysCommand` requires [really 341 + specific permissions](https://stackoverflow.com/a/27638306). The 342 + `AuthorizedKeysCommand` specifies a command that is run by `sshd` to 343 + retrieve a user's public SSH keys dynamically for authentication. Let's 344 + set that up. 345 + 346 + ``` 347 + sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 348 + Match User git 349 + AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys 350 + AuthorizedKeysCommandUser nobody 351 + EOF 352 + ``` 353 + 354 + Then, reload `sshd`: 355 + 356 + ``` 357 + sudo systemctl reload ssh 358 + ``` 359 + 360 + Next, create the `git` user. We'll use the `git` user's home directory 361 + to store repositories: 362 + 363 + ``` 364 + sudo adduser git 365 + ``` 366 + 367 + Create `/home/git/.knot.env` with the following, updating the values as 368 + necessary. The `KNOT_SERVER_OWNER` should be set to your 369 + DID, you can find your DID in the [Settings](https://tangled.sh/settings) page. 370 + 371 + ``` 372 + KNOT_REPO_SCAN_PATH=/home/git 373 + KNOT_SERVER_HOSTNAME=knot.example.com 374 + APPVIEW_ENDPOINT=https://tangled.org 375 + KNOT_SERVER_OWNER=did:plc:foobar 376 + KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444 377 + KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555 378 + ``` 379 + 380 + If you run a Linux distribution that uses systemd, you can use the provided 381 + service file to run the server. Copy 382 + [`knotserver.service`](/systemd/knotserver.service) 383 + to `/etc/systemd/system/`. Then, run: 384 + 385 + ``` 386 + systemctl enable knotserver 387 + systemctl start knotserver 388 + ``` 389 + 390 + The last step is to configure a reverse proxy like Nginx or Caddy to front your 391 + knot. Here's an example configuration for Nginx: 392 + 393 + ``` 394 + server { 395 + listen 80; 396 + listen [::]:80; 397 + server_name knot.example.com; 398 + 399 + location / { 400 + proxy_pass http://localhost:5555; 401 + proxy_set_header Host $host; 402 + proxy_set_header X-Real-IP $remote_addr; 403 + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 404 + proxy_set_header X-Forwarded-Proto $scheme; 405 + } 406 + 407 + # wss endpoint for git events 408 + location /events { 409 + proxy_set_header X-Forwarded-For $remote_addr; 410 + proxy_set_header Host $http_host; 411 + proxy_set_header Upgrade websocket; 412 + proxy_set_header Connection Upgrade; 413 + proxy_pass http://localhost:5555; 414 + } 415 + # additional config for SSL/TLS go here. 416 + } 417 + 418 + ``` 419 + 420 + Remember to use Let's Encrypt or similar to procure a certificate for your 421 + knot domain. 422 + 423 + You should now have a running knot server! You can finalize 424 + your registration by hitting the `verify` button on the 425 + [/settings/knots](https://tangled.org/settings/knots) page. This simply creates 426 + a record on your PDS to announce the existence of the knot. 427 + 428 + ### Custom paths 429 + 430 + (This section applies to manual setup only. Docker users should edit the mounts 431 + in `docker-compose.yml` instead.) 432 + 433 + Right now, the database and repositories of your knot lives in `/home/git`. You 434 + can move these paths if you'd like to store them in another folder. Be careful 435 + when adjusting these paths: 436 + 437 + * Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent 438 + any possible side effects. Remember to restart it once you're done. 439 + * Make backups before moving in case something goes wrong. 440 + * Make sure the `git` user can read and write from the new paths. 441 + 442 + #### Database 443 + 444 + As an example, let's say the current database is at `/home/git/knotserver.db`, 445 + and we want to move it to `/home/git/database/knotserver.db`. 446 + 447 + Copy the current database to the new location. Make sure to copy the `.db-shm` 448 + and `.db-wal` files if they exist. 449 + 450 + ``` 451 + mkdir /home/git/database 452 + cp /home/git/knotserver.db* /home/git/database 453 + ``` 454 + 455 + In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to 456 + the new file path (_not_ the directory): 457 + 458 + ``` 459 + KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db 460 + ``` 461 + 462 + #### Repositories 463 + 464 + As an example, let's say the repositories are currently in `/home/git`, and we 465 + want to move them into `/home/git/repositories`. 466 + 467 + Create the new folder, then move the existing repositories (if there are any): 468 + 469 + ``` 470 + mkdir /home/git/repositories 471 + # move all DIDs into the new folder; these will vary for you! 472 + mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories 473 + ``` 474 + 475 + In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH` 476 + to the new directory: 477 + 478 + ``` 479 + KNOT_REPO_SCAN_PATH=/home/git/repositories 480 + ``` 481 + 482 + Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated 483 + repository path: 484 + 485 + ``` 486 + sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 487 + Match User git 488 + AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories 489 + AuthorizedKeysCommandUser nobody 490 + EOF 491 + ``` 492 + 493 + Make sure to restart your SSH server! 494 + 495 + #### MOTD (message of the day) 496 + 497 + To configure the MOTD used ("Welcome to this knot!" by default), edit the 498 + `/home/git/motd` file: 499 + 500 + ``` 501 + printf "Hi from this knot!\n" > /home/git/motd 502 + ``` 503 + 504 + Note that you should add a newline at the end if setting a non-empty message 505 + since the knot won't do this for you. 506 + 507 + # Spindles 508 + 509 + ## Pipelines 510 + 511 + Spindle workflows allow you to write CI/CD pipelines in a 512 + simple format. They're located in the `.tangled/workflows` 513 + directory at the root of your repository, and are defined 514 + using YAML. 515 + 516 + The fields are: 517 + 518 + - [Trigger](#trigger): A **required** field that defines 519 + when a workflow should be triggered. 520 + - [Engine](#engine): A **required** field that defines which 521 + engine a workflow should run on. 522 + - [Clone options](#clone-options): An **optional** field 523 + that defines how the repository should be cloned. 524 + - [Dependencies](#dependencies): An **optional** field that 525 + allows you to list dependencies you may need. 526 + - [Environment](#environment): An **optional** field that 527 + allows you to define environment variables. 528 + - [Steps](#steps): An **optional** field that allows you to 529 + define what steps should run in the workflow. 530 + 531 + ### Trigger 532 + 533 + The first thing to add to a workflow is the trigger, which 534 + defines when a workflow runs. This is defined using a `when` 535 + field, which takes in a list of conditions. Each condition 536 + has the following fields: 537 + 538 + - `event`: This is a **required** field that defines when 539 + your workflow should run. It's a list that can take one or 540 + more of the following values: 541 + - `push`: The workflow should run every time a commit is 542 + pushed to the repository. 543 + - `pull_request`: The workflow should run every time a 544 + pull request is made or updated. 545 + - `manual`: The workflow can be triggered manually. 546 + - `branch`: Defines which branches the workflow should run 547 + for. If used with the `push` event, commits to the 548 + branch(es) listed here will trigger the workflow. If used 549 + with the `pull_request` event, updates to pull requests 550 + targeting the branch(es) listed here will trigger the 551 + workflow. This field has no effect with the `manual` 552 + event. Supports glob patterns using `*` and `**` (e.g., 553 + `main`, `develop`, `release-*`). Either `branch` or `tag` 554 + (or both) must be specified for `push` events. 555 + - `tag`: Defines which tags the workflow should run for. 556 + Only used with the `push` event - when tags matching the 557 + pattern(s) listed here are pushed, the workflow will 558 + trigger. This field has no effect with `pull_request` or 559 + `manual` events. Supports glob patterns using `*` and `**` 560 + (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or 561 + `tag` (or both) must be specified for `push` events. 562 + 563 + For example, if you'd like to define a workflow that runs 564 + when commits are pushed to the `main` and `develop` 565 + branches, or when pull requests that target the `main` 566 + branch are updated, or manually, you can do so with: 567 + 568 + ```yaml 569 + when: 570 + - event: ["push", "manual"] 571 + branch: ["main", "develop"] 572 + - event: ["pull_request"] 573 + branch: ["main"] 574 + ``` 575 + 576 + You can also trigger workflows on tag pushes. For instance, 577 + to run a deployment workflow when tags matching `v*` are 578 + pushed: 579 + 580 + ```yaml 581 + when: 582 + - event: ["push"] 583 + tag: ["v*"] 584 + ``` 585 + 586 + You can even combine branch and tag patterns in a single 587 + constraint (the workflow triggers if either matches): 588 + 589 + ```yaml 590 + when: 591 + - event: ["push"] 592 + branch: ["main", "release-*"] 593 + tag: ["v*", "stable"] 594 + ``` 595 + 596 + ### Engine 597 + 598 + Next is the engine on which the workflow should run, defined 599 + using the **required** `engine` field. The currently 600 + supported engines are: 601 + 602 + - `nixery`: This uses an instance of 603 + [Nixery](https://nixery.dev) to run steps, which allows 604 + you to add [dependencies](#dependencies) from 605 + Nixpkgs (https://github.com/NixOS/nixpkgs). You can 606 + search for packages on https://search.nixos.org, and 607 + there's a pretty good chance the package(s) you're looking 608 + for will be there. 609 + 610 + Example: 611 + 612 + ```yaml 613 + engine: "nixery" 614 + ``` 615 + 616 + ### Clone options 617 + 618 + When a workflow starts, the first step is to clone the 619 + repository. You can customize this behavior using the 620 + **optional** `clone` field. It has the following fields: 621 + 622 + - `skip`: Setting this to `true` will skip cloning the 623 + repository. This can be useful if your workflow is doing 624 + something that doesn't require anything from the 625 + repository itself. This is `false` by default. 626 + - `depth`: This sets the number of commits, or the "clone 627 + depth", to fetch from the repository. For example, if you 628 + set this to 2, the last 2 commits will be fetched. By 629 + default, the depth is set to 1, meaning only the most 630 + recent commit will be fetched, which is the commit that 631 + triggered the workflow. 632 + - `submodules`: If you use Git submodules 633 + (https://git-scm.com/book/en/v2/Git-Tools-Submodules) 634 + in your repository, setting this field to `true` will 635 + recursively fetch all submodules. This is `false` by 636 + default. 637 + 638 + The default settings are: 639 + 640 + ```yaml 641 + clone: 642 + skip: false 643 + depth: 1 644 + submodules: false 645 + ``` 646 + 647 + ### Dependencies 648 + 649 + Usually when you're running a workflow, you'll need 650 + additional dependencies. The `dependencies` field lets you 651 + define which dependencies to get, and from where. It's a 652 + key-value map, with the key being the registry to fetch 653 + dependencies from, and the value being the list of 654 + dependencies to fetch. 655 + 656 + Say you want to fetch Node.js and Go from `nixpkgs`, and a 657 + package called `my_pkg` you've made from your own registry 658 + at your repository at 659 + `https://tangled.org/@example.com/my_pkg`. You can define 660 + those dependencies like so: 661 + 662 + ```yaml 663 + dependencies: 664 + # nixpkgs 665 + nixpkgs: 666 + - nodejs 667 + - go 668 + # custom registry 669 + git+https://tangled.org/@example.com/my_pkg: 670 + - my_pkg 671 + ``` 672 + 673 + Now these dependencies are available to use in your 674 + workflow! 675 + 676 + ### Environment 677 + 678 + The `environment` field allows you define environment 679 + variables that will be available throughout the entire 680 + workflow. **Do not put secrets here, these environment 681 + variables are visible to anyone viewing the repository. You 682 + can add secrets for pipelines in your repository's 683 + settings.** 684 + 685 + Example: 686 + 687 + ```yaml 688 + environment: 689 + GOOS: "linux" 690 + GOARCH: "arm64" 691 + NODE_ENV: "production" 692 + MY_ENV_VAR: "MY_ENV_VALUE" 693 + ``` 694 + 695 + ### Steps 696 + 697 + The `steps` field allows you to define what steps should run 698 + in the workflow. It's a list of step objects, each with the 699 + following fields: 700 + 701 + - `name`: This field allows you to give your step a name. 702 + This name is visible in your workflow runs, and is used to 703 + describe what the step is doing. 704 + - `command`: This field allows you to define a command to 705 + run in that step. The step is run in a Bash shell, and the 706 + logs from the command will be visible in the pipelines 707 + page on the Tangled website. The 708 + [dependencies](#dependencies) you added will be available 709 + to use here. 710 + - `environment`: Similar to the global 711 + [environment](#environment) config, this **optional** 712 + field is a key-value map that allows you to set 713 + environment variables for the step. **Do not put secrets 714 + here, these environment variables are visible to anyone 715 + viewing the repository. You can add secrets for pipelines 716 + in your repository's settings.** 717 + 718 + Example: 719 + 720 + ```yaml 721 + steps: 722 + - name: "Build backend" 723 + command: "go build" 724 + environment: 725 + GOOS: "darwin" 726 + GOARCH: "arm64" 727 + - name: "Build frontend" 728 + command: "npm run build" 729 + environment: 730 + NODE_ENV: "production" 731 + ``` 732 + 733 + ### Complete workflow 734 + 735 + ```yaml 736 + # .tangled/workflows/build.yml 737 + 738 + when: 739 + - event: ["push", "manual"] 740 + branch: ["main", "develop"] 741 + - event: ["pull_request"] 742 + branch: ["main"] 743 + 744 + engine: "nixery" 745 + 746 + # using the default values 747 + clone: 748 + skip: false 749 + depth: 1 750 + submodules: false 751 + 752 + dependencies: 753 + # nixpkgs 754 + nixpkgs: 755 + - nodejs 756 + - go 757 + # custom registry 758 + git+https://tangled.org/@example.com/my_pkg: 759 + - my_pkg 760 + 761 + environment: 762 + GOOS: "linux" 763 + GOARCH: "arm64" 764 + NODE_ENV: "production" 765 + MY_ENV_VAR: "MY_ENV_VALUE" 766 + 767 + steps: 768 + - name: "Build backend" 769 + command: "go build" 770 + environment: 771 + GOOS: "darwin" 772 + GOARCH: "arm64" 773 + - name: "Build frontend" 774 + command: "npm run build" 775 + environment: 776 + NODE_ENV: "production" 777 + ``` 778 + 779 + If you want another example of a workflow, you can look at 780 + the one [Tangled uses to build the 781 + project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml). 782 + 783 + ## Self-hosting guide 784 + 785 + ### Prerequisites 786 + 787 + * Go 788 + * Docker (the only supported backend currently) 789 + 790 + ### Configuration 791 + 792 + Spindle is configured using environment variables. The following environment variables are available: 793 + 794 + * `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`). 795 + * `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`). 796 + * `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required). 797 + * `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`). 798 + * `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`). 799 + * `SPINDLE_SERVER_OWNER`: The DID of the owner (required). 800 + * `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`). 801 + * `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`). 802 + * `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`). 803 + 804 + ### Running spindle 805 + 806 + 1. **Set the environment variables.** For example: 807 + 808 + ```shell 809 + export SPINDLE_SERVER_HOSTNAME="your-hostname" 810 + export SPINDLE_SERVER_OWNER="your-did" 811 + ``` 812 + 813 + 2. **Build the Spindle binary.** 814 + 815 + ```shell 816 + cd core 817 + go mod download 818 + go build -o cmd/spindle/spindle cmd/spindle/main.go 819 + ``` 820 + 821 + 3. **Create the log directory.** 822 + 823 + ```shell 824 + sudo mkdir -p /var/log/spindle 825 + sudo chown $USER:$USER -R /var/log/spindle 826 + ``` 827 + 828 + 4. **Run the Spindle binary.** 829 + 830 + ```shell 831 + ./cmd/spindle/spindle 832 + ``` 833 + 834 + Spindle will now start, connect to the Jetstream server, and begin processing pipelines. 835 + 836 + ## Architecture 837 + 838 + Spindle is a small CI runner service. Here's a high-level overview of how it operates: 839 + 840 + * Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and 841 + [`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream. 842 + * When a new repo record comes through (typically when you add a spindle to a 843 + repo from the settings), spindle then resolves the underlying knot and 844 + subscribes to repo events (see: 845 + [`sh.tangled.pipeline`](/lexicons/pipeline.json)). 846 + * The spindle engine then handles execution of the pipeline, with results and 847 + logs beamed on the spindle event stream over WebSocket 848 + 849 + ### The engine 850 + 851 + At present, the only supported backend is Docker (and Podman, if Docker 852 + compatibility is enabled, so that `/run/docker.sock` is created). spindle 853 + executes each step in the pipeline in a fresh container, with state persisted 854 + across steps within the `/tangled/workspace` directory. 855 + 856 + The base image for the container is constructed on the fly using 857 + [Nixery](https://nixery.dev), which is handy for caching layers for frequently 858 + used packages. 859 + 860 + The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines). 861 + 862 + ## Secrets with openbao 863 + 864 + This document covers setting up spindle to use OpenBao for secrets 865 + management via OpenBao Proxy instead of the default SQLite backend. 866 + 867 + ### Overview 868 + 869 + Spindle now uses OpenBao Proxy for secrets management. The proxy handles 870 + authentication automatically using AppRole credentials, while spindle 871 + connects to the local proxy instead of directly to the OpenBao server. 872 + 873 + This approach provides better security, automatic token renewal, and 874 + simplified application code. 875 + 876 + ### Installation 877 + 878 + Install OpenBao from Nixpkgs: 879 + 880 + ```bash 881 + nix shell nixpkgs#openbao # for a local server 882 + ``` 883 + 884 + ### Setup 885 + 886 + The setup process can is documented for both local development and production. 887 + 888 + #### Local development 889 + 890 + Start OpenBao in dev mode: 891 + 892 + ```bash 893 + bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201 894 + ``` 895 + 896 + This starts OpenBao on `http://localhost:8201` with a root token. 897 + 898 + Set up environment for bao CLI: 899 + 900 + ```bash 901 + export BAO_ADDR=http://localhost:8200 902 + export BAO_TOKEN=root 903 + ``` 904 + 905 + #### Production 906 + 907 + You would typically use a systemd service with a 908 + configuration file. Refer to 909 + [@tangled.org/infra](https://tangled.org/@tangled.org/infra) 910 + for how this can be achieved using Nix. 911 + 912 + Then, initialize the bao server: 913 + 914 + ```bash 915 + bao operator init -key-shares=1 -key-threshold=1 916 + ``` 917 + 918 + This will print out an unseal key and a root key. Save them 919 + somewhere (like a password manager). Then unseal the vault 920 + to begin setting it up: 921 + 922 + ```bash 923 + bao operator unseal <unseal_key> 924 + ``` 925 + 926 + All steps below remain the same across both dev and 927 + production setups. 928 + 929 + #### Configure openbao server 930 + 931 + Create the spindle KV mount: 932 + 933 + ```bash 934 + bao secrets enable -path=spindle -version=2 kv 935 + ``` 936 + 937 + Set up AppRole authentication and policy: 938 + 939 + Create a policy file `spindle-policy.hcl`: 940 + 941 + ```hcl 942 + # Full access to spindle KV v2 data 943 + path "spindle/data/*" { 944 + capabilities = ["create", "read", "update", "delete"] 945 + } 946 + 947 + # Access to metadata for listing and management 948 + path "spindle/metadata/*" { 949 + capabilities = ["list", "read", "delete", "update"] 950 + } 951 + 952 + # Allow listing at root level 953 + path "spindle/" { 954 + capabilities = ["list"] 955 + } 956 + 957 + # Required for connection testing and health checks 958 + path "auth/token/lookup-self" { 959 + capabilities = ["read"] 960 + } 961 + ``` 962 + 963 + Apply the policy and create an AppRole: 964 + 965 + ```bash 966 + bao policy write spindle-policy spindle-policy.hcl 967 + bao auth enable approle 968 + bao write auth/approle/role/spindle \ 969 + token_policies="spindle-policy" \ 970 + token_ttl=1h \ 971 + token_max_ttl=4h \ 972 + bind_secret_id=true \ 973 + secret_id_ttl=0 \ 974 + secret_id_num_uses=0 975 + ``` 976 + 977 + Get the credentials: 978 + 979 + ```bash 980 + # Get role ID (static) 981 + ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id) 982 + 983 + # Generate secret ID 984 + SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id) 985 + 986 + echo "Role ID: $ROLE_ID" 987 + echo "Secret ID: $SECRET_ID" 988 + ``` 989 + 990 + #### Create proxy configuration 991 + 992 + Create the credential files: 993 + 994 + ```bash 995 + # Create directory for OpenBao files 996 + mkdir -p /tmp/openbao 997 + 998 + # Save credentials 999 + echo "$ROLE_ID" > /tmp/openbao/role-id 1000 + echo "$SECRET_ID" > /tmp/openbao/secret-id 1001 + chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id 1002 + ``` 1003 + 1004 + Create a proxy configuration file `/tmp/openbao/proxy.hcl`: 1005 + 1006 + ```hcl 1007 + # OpenBao server connection 1008 + vault { 1009 + address = "http://localhost:8200" 1010 + } 1011 + 1012 + # Auto-Auth using AppRole 1013 + auto_auth { 1014 + method "approle" { 1015 + mount_path = "auth/approle" 1016 + config = { 1017 + role_id_file_path = "/tmp/openbao/role-id" 1018 + secret_id_file_path = "/tmp/openbao/secret-id" 1019 + } 1020 + } 1021 + 1022 + # Optional: write token to file for debugging 1023 + sink "file" { 1024 + config = { 1025 + path = "/tmp/openbao/token" 1026 + mode = 0640 1027 + } 1028 + } 1029 + } 1030 + 1031 + # Proxy listener for spindle 1032 + listener "tcp" { 1033 + address = "127.0.0.1:8201" 1034 + tls_disable = true 1035 + } 1036 + 1037 + # Enable API proxy with auto-auth token 1038 + api_proxy { 1039 + use_auto_auth_token = true 1040 + } 1041 + 1042 + # Enable response caching 1043 + cache { 1044 + use_auto_auth_token = true 1045 + } 1046 + 1047 + # Logging 1048 + log_level = "info" 1049 + ``` 1050 + 1051 + #### Start the proxy 1052 + 1053 + Start OpenBao Proxy: 1054 + 1055 + ```bash 1056 + bao proxy -config=/tmp/openbao/proxy.hcl 1057 + ``` 1058 + 1059 + The proxy will authenticate with OpenBao and start listening on 1060 + `127.0.0.1:8201`. 1061 + 1062 + #### Configure spindle 1063 + 1064 + Set these environment variables for spindle: 1065 + 1066 + ```bash 1067 + export SPINDLE_SERVER_SECRETS_PROVIDER=openbao 1068 + export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201 1069 + export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle 1070 + ``` 1071 + 1072 + On startup, spindle will now connect to the local proxy, 1073 + which handles all authentication automatically. 1074 + 1075 + ### Production setup for proxy 1076 + 1077 + For production, you'll want to run the proxy as a service: 1078 + 1079 + Place your production configuration in 1080 + `/etc/openbao/proxy.hcl` with proper TLS settings for the 1081 + vault connection. 1082 + 1083 + ### Verifying setup 1084 + 1085 + Test the proxy directly: 1086 + 1087 + ```bash 1088 + # Check proxy health 1089 + curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health 1090 + 1091 + # Test token lookup through proxy 1092 + curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self 1093 + ``` 1094 + 1095 + Test OpenBao operations through the server: 1096 + 1097 + ```bash 1098 + # List all secrets 1099 + bao kv list spindle/ 1100 + 1101 + # Add a test secret via the spindle API, then check it exists 1102 + bao kv list spindle/repos/ 1103 + 1104 + # Get a specific secret 1105 + bao kv get spindle/repos/your_repo_path/SECRET_NAME 1106 + ``` 1107 + 1108 + ### How it works 1109 + 1110 + - Spindle connects to OpenBao Proxy on localhost (typically 1111 + port 8200 or 8201) 1112 + - The proxy authenticates with OpenBao using AppRole 1113 + credentials 1114 + - All spindle requests go through the proxy, which injects 1115 + authentication tokens 1116 + - Secrets are stored at 1117 + `spindle/repos/{sanitized_repo_path}/{secret_key}` 1118 + - Repository paths like `did:plc:alice/myrepo` become 1119 + `did_plc_alice_myrepo` 1120 + - The proxy handles all token renewal automatically 1121 + - Spindle no longer manages tokens or authentication 1122 + directly 1123 + 1124 + ### Troubleshooting 1125 + 1126 + **Connection refused**: Check that the OpenBao Proxy is 1127 + running and listening on the configured address. 1128 + 1129 + **403 errors**: Verify the AppRole credentials are correct 1130 + and the policy has the necessary permissions. 1131 + 1132 + **404 route errors**: The spindle KV mount probably doesn't 1133 + exist—run the mount creation step again. 1134 + 1135 + **Proxy authentication failures**: Check the proxy logs and 1136 + verify the role-id and secret-id files are readable and 1137 + contain valid credentials. 1138 + 1139 + **Secret not found after writing**: This can indicate policy 1140 + permission issues. Verify the policy includes both 1141 + `spindle/data/*` and `spindle/metadata/*` paths with 1142 + appropriate capabilities. 1143 + 1144 + Check proxy logs: 1145 + 1146 + ```bash 1147 + # If running as systemd service 1148 + journalctl -u openbao-proxy -f 1149 + 1150 + # If running directly, check the console output 1151 + ``` 1152 + 1153 + Test AppRole authentication manually: 1154 + 1155 + ```bash 1156 + bao write auth/approle/login \ 1157 + role_id="$(cat /tmp/openbao/role-id)" \ 1158 + secret_id="$(cat /tmp/openbao/secret-id)" 1159 + ``` 1160 + 1161 + # Migrating knots and spindles 1162 + 1163 + Sometimes, non-backwards compatible changes are made to the 1164 + knot/spindle XRPC APIs. If you host a knot or a spindle, you 1165 + will need to follow this guide to upgrade. Typically, this 1166 + only requires you to deploy the newest version. 1167 + 1168 + This document is laid out in reverse-chronological order. 1169 + Newer migration guides are listed first, and older guides 1170 + are further down the page. 1171 + 1172 + ## Upgrading from v1.8.x 1173 + 1174 + After v1.8.2, the HTTP API for knots and spindles has been 1175 + deprecated and replaced with XRPC. Repositories on outdated 1176 + knots will not be viewable from the appview. Upgrading is 1177 + straightforward however. 1178 + 1179 + For knots: 1180 + 1181 + - Upgrade to the latest tag (v1.9.0 or above) 1182 + - Head to the [knot dashboard](https://tangled.org/settings/knots) and 1183 + hit the "retry" button to verify your knot 1184 + 1185 + For spindles: 1186 + 1187 + - Upgrade to the latest tag (v1.9.0 or above) 1188 + - Head to the [spindle 1189 + dashboard](https://tangled.org/settings/spindles) and hit the 1190 + "retry" button to verify your spindle 1191 + 1192 + ## Upgrading from v1.7.x 1193 + 1194 + After v1.7.0, knot secrets have been deprecated. You no 1195 + longer need a secret from the appview to run a knot. All 1196 + authorized commands to knots are managed via [Inter-Service 1197 + Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt). 1198 + Knots will be read-only until upgraded. 1199 + 1200 + Upgrading is quite easy, in essence: 1201 + 1202 + - `KNOT_SERVER_SECRET` is no more, you can remove this 1203 + environment variable entirely 1204 + - `KNOT_SERVER_OWNER` is now required on boot, set this to 1205 + your DID. You can find your DID in the 1206 + [settings](https://tangled.org/settings) page. 1207 + - Restart your knot once you have replaced the environment 1208 + variable 1209 + - Head to the [knot dashboard](https://tangled.org/settings/knots) and 1210 + hit the "retry" button to verify your knot. This simply 1211 + writes a `sh.tangled.knot` record to your PDS. 1212 + 1213 + If you use the nix module, simply bump the flake to the 1214 + latest revision, and change your config block like so: 1215 + 1216 + ```diff 1217 + services.tangled.knot = { 1218 + enable = true; 1219 + server = { 1220 + - secretFile = /path/to/secret; 1221 + + owner = "did:plc:foo"; 1222 + }; 1223 + }; 1224 + ``` 1225 + 1226 + # Hacking on Tangled 1227 + 1228 + We highly recommend [installing 1229 + Nix](https://nixos.org/download/) (the package manager) 1230 + before working on the codebase. The Nix flake provides a lot 1231 + of helpers to get started and most importantly, builds and 1232 + dev shells are entirely deterministic. 1233 + 1234 + To set up your dev environment: 1235 + 1236 + ```bash 1237 + nix develop 1238 + ``` 1239 + 1240 + Non-Nix users can look at the `devShell` attribute in the 1241 + `flake.nix` file to determine necessary dependencies. 1242 + 1243 + ## Running the appview 1244 + 1245 + The Nix flake also exposes a few `app` attributes (run `nix 1246 + flake show` to see a full list of what the flake provides), 1247 + one of the apps runs the appview with the `air` 1248 + live-reloader: 1249 + 1250 + ```bash 1251 + TANGLED_DEV=true nix run .#watch-appview 1252 + 1253 + # TANGLED_DB_PATH might be of interest to point to 1254 + # different sqlite DBs 1255 + 1256 + # in a separate shell, you can live-reload tailwind 1257 + nix run .#watch-tailwind 1258 + ``` 1259 + 1260 + To authenticate with the appview, you will need Redis and 1261 + OAuth JWKs to be set up: 1262 + 1263 + ``` 1264 + # OAuth JWKs should already be set up by the Nix devshell: 1265 + echo $TANGLED_OAUTH_CLIENT_SECRET 1266 + z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc 1267 + 1268 + echo $TANGLED_OAUTH_CLIENT_KID 1269 + 1761667908 1270 + 1271 + # if not, you can set it up yourself: 1272 + goat key generate -t P-256 1273 + Key Type: P-256 / secp256r1 / ES256 private key 1274 + Secret Key (Multibase Syntax): save this securely (eg, add to password manager) 1275 + z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL 1276 + Public Key (DID Key Syntax): share or publish this (eg, in DID document) 1277 + did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR 1278 + 1279 + # the secret key from above 1280 + export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..." 1281 + 1282 + # Run Redis in a new shell to store OAuth sessions 1283 + redis-server 1284 + ``` 1285 + 1286 + ## Running knots and spindles 1287 + 1288 + An end-to-end knot setup requires setting up a machine with 1289 + `sshd`, `AuthorizedKeysCommand`, and a Git user, which is 1290 + quite cumbersome. So the Nix flake provides a 1291 + `nixosConfiguration` to do so. 1292 + 1293 + <details> 1294 + <summary><strong>macOS users will have to set up a Nix Builder first</strong></summary> 1295 + 1296 + In order to build Tangled's dev VM on macOS, you will 1297 + first need to set up a Linux Nix builder. The recommended 1298 + way to do so is to run a [`darwin.linux-builder` 1299 + VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder) 1300 + and to register it in `nix.conf` as a builder for Linux 1301 + with the same architecture as your Mac (`linux-aarch64` if 1302 + you are using Apple Silicon). 1303 + 1304 + > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside 1305 + > the Tangled repo so that it doesn't conflict with the other VM. For example, 1306 + > you can do 1307 + > 1308 + > ```shell 1309 + > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder 1310 + > ``` 1311 + > 1312 + > to store the builder VM in a temporary dir. 1313 + > 1314 + > You should read and follow [all the other intructions][darwin builder vm] to 1315 + > avoid subtle problems. 1316 + 1317 + Alternatively, you can use any other method to set up a 1318 + Linux machine with Nix installed that you can `sudo ssh` 1319 + into (in other words, root user on your Mac has to be able 1320 + to ssh into the Linux machine without entering a password) 1321 + and that has the same architecture as your Mac. See 1322 + [remote builder 1323 + instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements) 1324 + for how to register such a builder in `nix.conf`. 1325 + 1326 + > WARNING: If you'd like to use 1327 + > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or 1328 + > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo 1329 + > ssh` works can be tricky. It seems to be [possible with 1330 + > Orbstack](https://github.com/orgs/orbstack/discussions/1669). 1331 + 1332 + </details> 1333 + 1334 + To begin, grab your DID from http://localhost:3000/settings. 1335 + Then, set `TANGLED_VM_KNOT_OWNER` and 1336 + `TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a 1337 + lightweight NixOS VM like so: 1338 + 1339 + ```bash 1340 + nix run --impure .#vm 1341 + 1342 + # type `poweroff` at the shell to exit the VM 1343 + ``` 1344 + 1345 + This starts a knot on port 6444, a spindle on port 6555 1346 + with `ssh` exposed on port 2222. 1347 + 1348 + Once the services are running, head to 1349 + http://localhost:3000/settings/knots and hit "Verify". It should 1350 + verify the ownership of the services instantly if everything 1351 + went smoothly. 1352 + 1353 + You can push repositories to this VM with this ssh config 1354 + block on your main machine: 1355 + 1356 + ```bash 1357 + Host nixos-shell 1358 + Hostname localhost 1359 + Port 2222 1360 + User git 1361 + IdentityFile ~/.ssh/my_tangled_key 1362 + ``` 1363 + 1364 + Set up a remote called `local-dev` on a git repo: 1365 + 1366 + ```bash 1367 + git remote add local-dev git@nixos-shell:user/repo 1368 + git push local-dev main 1369 + ``` 1370 + 1371 + The above VM should already be running a spindle on 1372 + `localhost:6555`. Head to http://localhost:3000/settings/spindles and 1373 + hit "Verify". You can then configure each repository to use 1374 + this spindle and run CI jobs. 1375 + 1376 + Of interest when debugging spindles: 1377 + 1378 + ``` 1379 + # Service logs from journald: 1380 + journalctl -xeu spindle 1381 + 1382 + # CI job logs from disk: 1383 + ls /var/log/spindle 1384 + 1385 + # Debugging spindle database: 1386 + sqlite3 /var/lib/spindle/spindle.db 1387 + 1388 + # litecli has a nicer REPL interface: 1389 + litecli /var/lib/spindle/spindle.db 1390 + ``` 1391 + 1392 + If for any reason you wish to disable either one of the 1393 + services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set 1394 + `services.tangled.spindle.enable` (or 1395 + `services.tangled.knot.enable`) to `false`. 1396 + 1397 + # Contribution guide 1398 + 1399 + ## Commit guidelines 1400 + 1401 + We follow a commit style similar to the Go project. Please keep commits: 1402 + 1403 + * **atomic**: each commit should represent one logical change 1404 + * **descriptive**: the commit message should clearly describe what the 1405 + change does and why it's needed 1406 + 1407 + ### Message format 1408 + 1409 + ``` 1410 + <service/top-level directory>/<affected package/directory>: <short summary of change> 1411 + 1412 + Optional longer description can go here, if necessary. Explain what the 1413 + change does and why, especially if not obvious. Reference relevant 1414 + issues or PRs when applicable. These can be links for now since we don't 1415 + auto-link issues/PRs yet. 1416 + ``` 1417 + 1418 + Here are some examples: 1419 + 1420 + ``` 1421 + appview/state: fix token expiry check in middleware 1422 + 1423 + The previous check did not account for clock drift, leading to premature 1424 + token invalidation. 1425 + ``` 1426 + 1427 + ``` 1428 + knotserver/git/service: improve error checking in upload-pack 1429 + ``` 1430 + 1431 + 1432 + ### General notes 1433 + 1434 + - PRs get merged "as-is" (fast-forward)—like applying a patch-series 1435 + using `git am`. At present, there is no squashing—so please author 1436 + your commits as they would appear on `master`, following the above 1437 + guidelines. 1438 + - If there is a lot of nesting, for example "appview: 1439 + pages/templates/repo/fragments: ...", these can be truncated down to 1440 + just "appview: repo/fragments: ...". If the change affects a lot of 1441 + subdirectories, you may abbreviate to just the top-level names, e.g. 1442 + "appview: ..." or "knotserver: ...". 1443 + - Keep commits lowercased with no trailing period. 1444 + - Use the imperative mood in the summary line (e.g., "fix bug" not 1445 + "fixed bug" or "fixes bug"). 1446 + - Try to keep the summary line under 72 characters, but we aren't too 1447 + fussed about this. 1448 + - Follow the same formatting for PR titles if filled manually. 1449 + - Don't include unrelated changes in the same commit. 1450 + - Avoid noisy commit messages like "wip" or "final fix"—rewrite history 1451 + before submitting if necessary. 1452 + 1453 + ## Code formatting 1454 + 1455 + We use a variety of tools to format our code, and multiplex them with 1456 + [`treefmt`](https://treefmt.com). All you need to do to format your changes 1457 + is run `nix run .#fmt` (or just `treefmt` if you're in the devshell). 1458 + 1459 + ## Proposals for bigger changes 1460 + 1461 + Small fixes like typos, minor bugs, or trivial refactors can be 1462 + submitted directly as PRs. 1463 + 1464 + For larger changes—especially those introducing new features, significant 1465 + refactoring, or altering system behavior—please open a proposal first. This 1466 + helps us evaluate the scope, design, and potential impact before implementation. 1467 + 1468 + Create a new issue titled: 1469 + 1470 + ``` 1471 + proposal: <affected scope>: <summary of change> 1472 + ``` 1473 + 1474 + In the description, explain: 1475 + 1476 + - What the change is 1477 + - Why it's needed 1478 + - How you plan to implement it (roughly) 1479 + - Any open questions or tradeoffs 1480 + 1481 + We'll use the issue thread to discuss and refine the idea before moving 1482 + forward. 1483 + 1484 + ## Developer Certificate of Origin (DCO) 1485 + 1486 + We require all contributors to certify that they have the right to 1487 + submit the code they're contributing. To do this, we follow the 1488 + [Developer Certificate of Origin 1489 + (DCO)](https://developercertificate.org/). 1490 + 1491 + By signing your commits, you're stating that the contribution is your 1492 + own work, or that you have the right to submit it under the project's 1493 + license. This helps us keep things clean and legally sound. 1494 + 1495 + To sign your commit, just add the `-s` flag when committing: 1496 + 1497 + ```sh 1498 + git commit -s -m "your commit message" 1499 + ``` 1500 + 1501 + This appends a line like: 1502 + 1503 + ``` 1504 + Signed-off-by: Your Name <your.email@example.com> 1505 + ``` 1506 + 1507 + We won't merge commits if they aren't signed off. If you forget, you can 1508 + amend the last commit like this: 1509 + 1510 + ```sh 1511 + git commit --amend -s 1512 + ``` 1513 + 1514 + If you're submitting a PR with multiple commits, make sure each one is 1515 + signed. 1516 + 1517 + For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command 1518 + to make it sign off commits in the tangled repo: 1519 + 1520 + ```shell 1521 + # Safety check, should say "No matching config key..." 1522 + jj config list templates.commit_trailers 1523 + # The command below may need to be adjusted if the command above returned something. 1524 + jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)" 1525 + ``` 1526 + 1527 + Refer to the [jujutsu 1528 + documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers) 1529 + for more information.
-136
docs/contributing.md
··· 1 - # tangled contributing guide 2 - 3 - ## commit guidelines 4 - 5 - We follow a commit style similar to the Go project. Please keep commits: 6 - 7 - * **atomic**: each commit should represent one logical change 8 - * **descriptive**: the commit message should clearly describe what the 9 - change does and why it's needed 10 - 11 - ### message format 12 - 13 - ``` 14 - <service/top-level directory>/<affected package/directory>: <short summary of change> 15 - 16 - 17 - Optional longer description can go here, if necessary. Explain what the 18 - change does and why, especially if not obvious. Reference relevant 19 - issues or PRs when applicable. These can be links for now since we don't 20 - auto-link issues/PRs yet. 21 - ``` 22 - 23 - Here are some examples: 24 - 25 - ``` 26 - appview/state: fix token expiry check in middleware 27 - 28 - The previous check did not account for clock drift, leading to premature 29 - token invalidation. 30 - ``` 31 - 32 - ``` 33 - knotserver/git/service: improve error checking in upload-pack 34 - ``` 35 - 36 - 37 - ### general notes 38 - 39 - - PRs get merged "as-is" (fast-forward) -- like applying a patch-series 40 - using `git am`. At present, there is no squashing -- so please author 41 - your commits as they would appear on `master`, following the above 42 - guidelines. 43 - - If there is a lot of nesting, for example "appview: 44 - pages/templates/repo/fragments: ...", these can be truncated down to 45 - just "appview: repo/fragments: ...". If the change affects a lot of 46 - subdirectories, you may abbreviate to just the top-level names, e.g. 47 - "appview: ..." or "knotserver: ...". 48 - - Keep commits lowercased with no trailing period. 49 - - Use the imperative mood in the summary line (e.g., "fix bug" not 50 - "fixed bug" or "fixes bug"). 51 - - Try to keep the summary line under 72 characters, but we aren't too 52 - fussed about this. 53 - - Follow the same formatting for PR titles if filled manually. 54 - - Don't include unrelated changes in the same commit. 55 - - Avoid noisy commit messages like "wip" or "final fix"—rewrite history 56 - before submitting if necessary. 57 - 58 - ## code formatting 59 - 60 - We use a variety of tools to format our code, and multiplex them with 61 - [`treefmt`](https://treefmt.com): all you need to do to format your changes 62 - is run `nix run .#fmt` (or just `treefmt` if you're in the devshell). 63 - 64 - ## proposals for bigger changes 65 - 66 - Small fixes like typos, minor bugs, or trivial refactors can be 67 - submitted directly as PRs. 68 - 69 - For larger changes—especially those introducing new features, significant 70 - refactoring, or altering system behavior—please open a proposal first. This 71 - helps us evaluate the scope, design, and potential impact before implementation. 72 - 73 - ### proposal format 74 - 75 - Create a new issue titled: 76 - 77 - ``` 78 - proposal: <affected scope>: <summary of change> 79 - ``` 80 - 81 - In the description, explain: 82 - 83 - - What the change is 84 - - Why it's needed 85 - - How you plan to implement it (roughly) 86 - - Any open questions or tradeoffs 87 - 88 - We'll use the issue thread to discuss and refine the idea before moving 89 - forward. 90 - 91 - ## developer certificate of origin (DCO) 92 - 93 - We require all contributors to certify that they have the right to 94 - submit the code they're contributing. To do this, we follow the 95 - [Developer Certificate of Origin 96 - (DCO)](https://developercertificate.org/). 97 - 98 - By signing your commits, you're stating that the contribution is your 99 - own work, or that you have the right to submit it under the project's 100 - license. This helps us keep things clean and legally sound. 101 - 102 - To sign your commit, just add the `-s` flag when committing: 103 - 104 - ```sh 105 - git commit -s -m "your commit message" 106 - ``` 107 - 108 - This appends a line like: 109 - 110 - ``` 111 - Signed-off-by: Your Name <your.email@example.com> 112 - ``` 113 - 114 - We won't merge commits if they aren't signed off. If you forget, you can 115 - amend the last commit like this: 116 - 117 - ```sh 118 - git commit --amend -s 119 - ``` 120 - 121 - If you're submitting a PR with multiple commits, make sure each one is 122 - signed. 123 - 124 - For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command 125 - to make it sign off commits in the tangled repo: 126 - 127 - ```shell 128 - # Safety check, should say "No matching config key..." 129 - jj config list templates.commit_trailers 130 - # The command below may need to be adjusted if the command above returned something. 131 - jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)" 132 - ``` 133 - 134 - Refer to the [jj 135 - documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers) 136 - for more information.
-172
docs/hacking.md
··· 1 - # hacking on tangled 2 - 3 - We highly recommend [installing 4 - nix](https://nixos.org/download/) (the package manager) 5 - before working on the codebase. The nix flake provides a lot 6 - of helpers to get started and most importantly, builds and 7 - dev shells are entirely deterministic. 8 - 9 - To set up your dev environment: 10 - 11 - ```bash 12 - nix develop 13 - ``` 14 - 15 - Non-nix users can look at the `devShell` attribute in the 16 - `flake.nix` file to determine necessary dependencies. 17 - 18 - ## running the appview 19 - 20 - The nix flake also exposes a few `app` attributes (run `nix 21 - flake show` to see a full list of what the flake provides), 22 - one of the apps runs the appview with the `air` 23 - live-reloader: 24 - 25 - ```bash 26 - TANGLED_DEV=true nix run .#watch-appview 27 - 28 - # TANGLED_DB_PATH might be of interest to point to 29 - # different sqlite DBs 30 - 31 - # in a separate shell, you can live-reload tailwind 32 - nix run .#watch-tailwind 33 - ``` 34 - 35 - To authenticate with the appview, you will need redis and 36 - OAUTH JWKs to be setup: 37 - 38 - ``` 39 - # oauth jwks should already be setup by the nix devshell: 40 - echo $TANGLED_OAUTH_CLIENT_SECRET 41 - z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc 42 - 43 - echo $TANGLED_OAUTH_CLIENT_KID 44 - 1761667908 45 - 46 - # if not, you can set it up yourself: 47 - goat key generate -t P-256 48 - Key Type: P-256 / secp256r1 / ES256 private key 49 - Secret Key (Multibase Syntax): save this securely (eg, add to password manager) 50 - z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL 51 - Public Key (DID Key Syntax): share or publish this (eg, in DID document) 52 - did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR 53 - 54 - # the secret key from above 55 - export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..." 56 - 57 - # run redis in at a new shell to store oauth sessions 58 - redis-server 59 - ``` 60 - 61 - ## running knots and spindles 62 - 63 - An end-to-end knot setup requires setting up a machine with 64 - `sshd`, `AuthorizedKeysCommand`, and git user, which is 65 - quite cumbersome. So the nix flake provides a 66 - `nixosConfiguration` to do so. 67 - 68 - <details> 69 - <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary> 70 - 71 - In order to build Tangled's dev VM on macOS, you will 72 - first need to set up a Linux Nix builder. The recommended 73 - way to do so is to run a [`darwin.linux-builder` 74 - VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder) 75 - and to register it in `nix.conf` as a builder for Linux 76 - with the same architecture as your Mac (`linux-aarch64` if 77 - you are using Apple Silicon). 78 - 79 - > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside 80 - > the tangled repo so that it doesn't conflict with the other VM. For example, 81 - > you can do 82 - > 83 - > ```shell 84 - > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder 85 - > ``` 86 - > 87 - > to store the builder VM in a temporary dir. 88 - > 89 - > You should read and follow [all the other intructions][darwin builder vm] to 90 - > avoid subtle problems. 91 - 92 - Alternatively, you can use any other method to set up a 93 - Linux machine with `nix` installed that you can `sudo ssh` 94 - into (in other words, root user on your Mac has to be able 95 - to ssh into the Linux machine without entering a password) 96 - and that has the same architecture as your Mac. See 97 - [remote builder 98 - instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements) 99 - for how to register such a builder in `nix.conf`. 100 - 101 - > WARNING: If you'd like to use 102 - > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or 103 - > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo 104 - > ssh` works can be tricky. It seems to be [possible with 105 - > Orbstack](https://github.com/orgs/orbstack/discussions/1669). 106 - 107 - </details> 108 - 109 - To begin, grab your DID from http://localhost:3000/settings. 110 - Then, set `TANGLED_VM_KNOT_OWNER` and 111 - `TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a 112 - lightweight NixOS VM like so: 113 - 114 - ```bash 115 - nix run --impure .#vm 116 - 117 - # type `poweroff` at the shell to exit the VM 118 - ``` 119 - 120 - This starts a knot on port 6444, a spindle on port 6555 121 - with `ssh` exposed on port 2222. 122 - 123 - Once the services are running, head to 124 - http://localhost:3000/settings/knots and hit verify. It should 125 - verify the ownership of the services instantly if everything 126 - went smoothly. 127 - 128 - You can push repositories to this VM with this ssh config 129 - block on your main machine: 130 - 131 - ```bash 132 - Host nixos-shell 133 - Hostname localhost 134 - Port 2222 135 - User git 136 - IdentityFile ~/.ssh/my_tangled_key 137 - ``` 138 - 139 - Set up a remote called `local-dev` on a git repo: 140 - 141 - ```bash 142 - git remote add local-dev git@nixos-shell:user/repo 143 - git push local-dev main 144 - ``` 145 - 146 - ### running a spindle 147 - 148 - The above VM should already be running a spindle on 149 - `localhost:6555`. Head to http://localhost:3000/settings/spindles and 150 - hit verify. You can then configure each repository to use 151 - this spindle and run CI jobs. 152 - 153 - Of interest when debugging spindles: 154 - 155 - ``` 156 - # service logs from journald: 157 - journalctl -xeu spindle 158 - 159 - # CI job logs from disk: 160 - ls /var/log/spindle 161 - 162 - # debugging spindle db: 163 - sqlite3 /var/lib/spindle/spindle.db 164 - 165 - # litecli has a nicer REPL interface: 166 - litecli /var/lib/spindle/spindle.db 167 - ``` 168 - 169 - If for any reason you wish to disable either one of the 170 - services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set 171 - `services.tangled.spindle.enable` (or 172 - `services.tangled.knot.enable`) to `false`.
+93
docs/highlight.theme
··· 1 + { 2 + "text-color": null, 3 + "background-color": null, 4 + "line-number-color": null, 5 + "line-number-background-color": null, 6 + "text-styles": { 7 + "Annotation": { 8 + "text-color": null, 9 + "background-color": null, 10 + "bold": false, 11 + "italic": true, 12 + "underline": false 13 + }, 14 + "ControlFlow": { 15 + "text-color": null, 16 + "background-color": null, 17 + "bold": true, 18 + "italic": false, 19 + "underline": false 20 + }, 21 + "Error": { 22 + "text-color": null, 23 + "background-color": null, 24 + "bold": true, 25 + "italic": false, 26 + "underline": false 27 + }, 28 + "Alert": { 29 + "text-color": null, 30 + "background-color": null, 31 + "bold": true, 32 + "italic": false, 33 + "underline": false 34 + }, 35 + "Preprocessor": { 36 + "text-color": null, 37 + "background-color": null, 38 + "bold": true, 39 + "italic": false, 40 + "underline": false 41 + }, 42 + "Information": { 43 + "text-color": null, 44 + "background-color": null, 45 + "bold": false, 46 + "italic": true, 47 + "underline": false 48 + }, 49 + "Warning": { 50 + "text-color": null, 51 + "background-color": null, 52 + "bold": false, 53 + "italic": true, 54 + "underline": false 55 + }, 56 + "Documentation": { 57 + "text-color": null, 58 + "background-color": null, 59 + "bold": false, 60 + "italic": true, 61 + "underline": false 62 + }, 63 + "DataType": { 64 + "text-color": "#8f4e8b", 65 + "background-color": null, 66 + "bold": false, 67 + "italic": false, 68 + "underline": false 69 + }, 70 + "Comment": { 71 + "text-color": null, 72 + "background-color": null, 73 + "bold": false, 74 + "italic": true, 75 + "underline": false 76 + }, 77 + "CommentVar": { 78 + "text-color": null, 79 + "background-color": null, 80 + "bold": false, 81 + "italic": true, 82 + "underline": false 83 + }, 84 + "Keyword": { 85 + "text-color": null, 86 + "background-color": null, 87 + "bold": true, 88 + "italic": false, 89 + "underline": false 90 + } 91 + } 92 + } 93 +
-214
docs/knot-hosting.md
··· 1 - # knot self-hosting guide 2 - 3 - So you want to run your own knot server? Great! Here are a few prerequisites: 4 - 5 - 1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind. 6 - 2. A (sub)domain name. People generally use `knot.example.com`. 7 - 3. A valid SSL certificate for your domain. 8 - 9 - There's a couple of ways to get started: 10 - * NixOS: refer to 11 - [flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix) 12 - * Docker: Documented at 13 - [@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker) 14 - (community maintained: support is not guaranteed!) 15 - * Manual: Documented below. 16 - 17 - ## manual setup 18 - 19 - First, clone this repository: 20 - 21 - ``` 22 - git clone https://tangled.org/@tangled.org/core 23 - ``` 24 - 25 - Then, build the `knot` CLI. This is the knot administration and operation tool. 26 - For the purpose of this guide, we're only concerned with these subcommands: 27 - 28 - * `knot server`: the main knot server process, typically run as a 29 - supervised service 30 - * `knot guard`: handles role-based access control for git over SSH 31 - (you'll never have to run this yourself) 32 - * `knot keys`: fetches SSH keys associated with your knot; we'll use 33 - this to generate the SSH `AuthorizedKeysCommand` 34 - 35 - ``` 36 - cd core 37 - export CGO_ENABLED=1 38 - go build -o knot ./cmd/knot 39 - ``` 40 - 41 - Next, move the `knot` binary to a location owned by `root` -- 42 - `/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`: 43 - 44 - ``` 45 - sudo mv knot /usr/local/bin/knot 46 - sudo chown root:root /usr/local/bin/knot 47 - ``` 48 - 49 - This is necessary because SSH `AuthorizedKeysCommand` requires [really 50 - specific permissions](https://stackoverflow.com/a/27638306). The 51 - `AuthorizedKeysCommand` specifies a command that is run by `sshd` to 52 - retrieve a user's public SSH keys dynamically for authentication. Let's 53 - set that up. 54 - 55 - ``` 56 - sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 57 - Match User git 58 - AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys 59 - AuthorizedKeysCommandUser nobody 60 - EOF 61 - ``` 62 - 63 - Then, reload `sshd`: 64 - 65 - ``` 66 - sudo systemctl reload ssh 67 - ``` 68 - 69 - Next, create the `git` user. We'll use the `git` user's home directory 70 - to store repositories: 71 - 72 - ``` 73 - sudo adduser git 74 - ``` 75 - 76 - Create `/home/git/.knot.env` with the following, updating the values as 77 - necessary. The `KNOT_SERVER_OWNER` should be set to your 78 - DID, you can find your DID in the [Settings](https://tangled.sh/settings) page. 79 - 80 - ``` 81 - KNOT_REPO_SCAN_PATH=/home/git 82 - KNOT_SERVER_HOSTNAME=knot.example.com 83 - APPVIEW_ENDPOINT=https://tangled.sh 84 - KNOT_SERVER_OWNER=did:plc:foobar 85 - KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444 86 - KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555 87 - ``` 88 - 89 - If you run a Linux distribution that uses systemd, you can use the provided 90 - service file to run the server. Copy 91 - [`knotserver.service`](/systemd/knotserver.service) 92 - to `/etc/systemd/system/`. Then, run: 93 - 94 - ``` 95 - systemctl enable knotserver 96 - systemctl start knotserver 97 - ``` 98 - 99 - The last step is to configure a reverse proxy like Nginx or Caddy to front your 100 - knot. Here's an example configuration for Nginx: 101 - 102 - ``` 103 - server { 104 - listen 80; 105 - listen [::]:80; 106 - server_name knot.example.com; 107 - 108 - location / { 109 - proxy_pass http://localhost:5555; 110 - proxy_set_header Host $host; 111 - proxy_set_header X-Real-IP $remote_addr; 112 - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 113 - proxy_set_header X-Forwarded-Proto $scheme; 114 - } 115 - 116 - # wss endpoint for git events 117 - location /events { 118 - proxy_set_header X-Forwarded-For $remote_addr; 119 - proxy_set_header Host $http_host; 120 - proxy_set_header Upgrade websocket; 121 - proxy_set_header Connection Upgrade; 122 - proxy_pass http://localhost:5555; 123 - } 124 - # additional config for SSL/TLS go here. 125 - } 126 - 127 - ``` 128 - 129 - Remember to use Let's Encrypt or similar to procure a certificate for your 130 - knot domain. 131 - 132 - You should now have a running knot server! You can finalize 133 - your registration by hitting the `verify` button on the 134 - [/settings/knots](https://tangled.org/settings/knots) page. This simply creates 135 - a record on your PDS to announce the existence of the knot. 136 - 137 - ### custom paths 138 - 139 - (This section applies to manual setup only. Docker users should edit the mounts 140 - in `docker-compose.yml` instead.) 141 - 142 - Right now, the database and repositories of your knot lives in `/home/git`. You 143 - can move these paths if you'd like to store them in another folder. Be careful 144 - when adjusting these paths: 145 - 146 - * Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent 147 - any possible side effects. Remember to restart it once you're done. 148 - * Make backups before moving in case something goes wrong. 149 - * Make sure the `git` user can read and write from the new paths. 150 - 151 - #### database 152 - 153 - As an example, let's say the current database is at `/home/git/knotserver.db`, 154 - and we want to move it to `/home/git/database/knotserver.db`. 155 - 156 - Copy the current database to the new location. Make sure to copy the `.db-shm` 157 - and `.db-wal` files if they exist. 158 - 159 - ``` 160 - mkdir /home/git/database 161 - cp /home/git/knotserver.db* /home/git/database 162 - ``` 163 - 164 - In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to 165 - the new file path (_not_ the directory): 166 - 167 - ``` 168 - KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db 169 - ``` 170 - 171 - #### repositories 172 - 173 - As an example, let's say the repositories are currently in `/home/git`, and we 174 - want to move them into `/home/git/repositories`. 175 - 176 - Create the new folder, then move the existing repositories (if there are any): 177 - 178 - ``` 179 - mkdir /home/git/repositories 180 - # move all DIDs into the new folder; these will vary for you! 181 - mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories 182 - ``` 183 - 184 - In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH` 185 - to the new directory: 186 - 187 - ``` 188 - KNOT_REPO_SCAN_PATH=/home/git/repositories 189 - ``` 190 - 191 - Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated 192 - repository path: 193 - 194 - ``` 195 - sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 196 - Match User git 197 - AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories 198 - AuthorizedKeysCommandUser nobody 199 - EOF 200 - ``` 201 - 202 - Make sure to restart your SSH server! 203 - 204 - #### MOTD (message of the day) 205 - 206 - To configure the MOTD used ("Welcome to this knot!" by default), edit the 207 - `/home/git/motd` file: 208 - 209 - ``` 210 - printf "Hi from this knot!\n" > /home/git/motd 211 - ``` 212 - 213 - Note that you should add a newline at the end if setting a non-empty message 214 - since the knot won't do this for you.
-59
docs/migrations.md
··· 1 - # Migrations 2 - 3 - This document is laid out in reverse-chronological order. 4 - Newer migration guides are listed first, and older guides 5 - are further down the page. 6 - 7 - ## Upgrading from v1.8.x 8 - 9 - After v1.8.2, the HTTP API for knot and spindles have been 10 - deprecated and replaced with XRPC. Repositories on outdated 11 - knots will not be viewable from the appview. Upgrading is 12 - straightforward however. 13 - 14 - For knots: 15 - 16 - - Upgrade to latest tag (v1.9.0 or above) 17 - - Head to the [knot dashboard](https://tangled.org/settings/knots) and 18 - hit the "retry" button to verify your knot 19 - 20 - For spindles: 21 - 22 - - Upgrade to latest tag (v1.9.0 or above) 23 - - Head to the [spindle 24 - dashboard](https://tangled.org/settings/spindles) and hit the 25 - "retry" button to verify your spindle 26 - 27 - ## Upgrading from v1.7.x 28 - 29 - After v1.7.0, knot secrets have been deprecated. You no 30 - longer need a secret from the appview to run a knot. All 31 - authorized commands to knots are managed via [Inter-Service 32 - Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt). 33 - Knots will be read-only until upgraded. 34 - 35 - Upgrading is quite easy, in essence: 36 - 37 - - `KNOT_SERVER_SECRET` is no more, you can remove this 38 - environment variable entirely 39 - - `KNOT_SERVER_OWNER` is now required on boot, set this to 40 - your DID. You can find your DID in the 41 - [settings](https://tangled.org/settings) page. 42 - - Restart your knot once you have replaced the environment 43 - variable 44 - - Head to the [knot dashboard](https://tangled.org/settings/knots) and 45 - hit the "retry" button to verify your knot. This simply 46 - writes a `sh.tangled.knot` record to your PDS. 47 - 48 - If you use the nix module, simply bump the flake to the 49 - latest revision, and change your config block like so: 50 - 51 - ```diff 52 - services.tangled.knot = { 53 - enable = true; 54 - server = { 55 - - secretFile = /path/to/secret; 56 - + owner = "did:plc:foo"; 57 - }; 58 - }; 59 - ```
-25
docs/spindle/architecture.md
··· 1 - # spindle architecture 2 - 3 - Spindle is a small CI runner service. Here's a high level overview of how it operates: 4 - 5 - * listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and 6 - [`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream. 7 - * when a new repo record comes through (typically when you add a spindle to a 8 - repo from the settings), spindle then resolves the underlying knot and 9 - subscribes to repo events (see: 10 - [`sh.tangled.pipeline`](/lexicons/pipeline.json)). 11 - * the spindle engine then handles execution of the pipeline, with results and 12 - logs beamed on the spindle event stream over wss 13 - 14 - ### the engine 15 - 16 - At present, the only supported backend is Docker (and Podman, if Docker 17 - compatibility is enabled, so that `/run/docker.sock` is created). Spindle 18 - executes each step in the pipeline in a fresh container, with state persisted 19 - across steps within the `/tangled/workspace` directory. 20 - 21 - The base image for the container is constructed on the fly using 22 - [Nixery](https://nixery.dev), which is handy for caching layers for frequently 23 - used packages. 24 - 25 - The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
··· 1 - # spindle self-hosting guide 2 - 3 - ## prerequisites 4 - 5 - * Go 6 - * Docker (the only supported backend currently) 7 - 8 - ## configuration 9 - 10 - Spindle is configured using environment variables. The following environment variables are available: 11 - 12 - * `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`). 13 - * `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`). 14 - * `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required). 15 - * `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`). 16 - * `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`). 17 - * `SPINDLE_SERVER_OWNER`: The DID of the owner (required). 18 - * `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`). 19 - * `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`). 20 - * `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`). 21 - 22 - ## running spindle 23 - 24 - 1. **Set the environment variables.** For example: 25 - 26 - ```shell 27 - export SPINDLE_SERVER_HOSTNAME="your-hostname" 28 - export SPINDLE_SERVER_OWNER="your-did" 29 - ``` 30 - 31 - 2. **Build the Spindle binary.** 32 - 33 - ```shell 34 - cd core 35 - go mod download 36 - go build -o cmd/spindle/spindle cmd/spindle/main.go 37 - ``` 38 - 39 - 3. **Create the log directory.** 40 - 41 - ```shell 42 - sudo mkdir -p /var/log/spindle 43 - sudo chown $USER:$USER -R /var/log/spindle 44 - ``` 45 - 46 - 4. **Run the Spindle binary.** 47 - 48 - ```shell 49 - ./cmd/spindle/spindle 50 - ``` 51 - 52 - Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
··· 1 - # spindle secrets with openbao 2 - 3 - This document covers setting up Spindle to use OpenBao for secrets 4 - management via OpenBao Proxy instead of the default SQLite backend. 5 - 6 - ## overview 7 - 8 - Spindle now uses OpenBao Proxy for secrets management. The proxy handles 9 - authentication automatically using AppRole credentials, while Spindle 10 - connects to the local proxy instead of directly to the OpenBao server. 11 - 12 - This approach provides better security, automatic token renewal, and 13 - simplified application code. 14 - 15 - ## installation 16 - 17 - Install OpenBao from nixpkgs: 18 - 19 - ```bash 20 - nix shell nixpkgs#openbao # for a local server 21 - ``` 22 - 23 - ## setup 24 - 25 - The setup process can is documented for both local development and production. 26 - 27 - ### local development 28 - 29 - Start OpenBao in dev mode: 30 - 31 - ```bash 32 - bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201 33 - ``` 34 - 35 - This starts OpenBao on `http://localhost:8201` with a root token. 36 - 37 - Set up environment for bao CLI: 38 - 39 - ```bash 40 - export BAO_ADDR=http://localhost:8200 41 - export BAO_TOKEN=root 42 - ``` 43 - 44 - ### production 45 - 46 - You would typically use a systemd service with a configuration file. Refer to 47 - [@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be 48 - achieved using Nix. 49 - 50 - Then, initialize the bao server: 51 - ```bash 52 - bao operator init -key-shares=1 -key-threshold=1 53 - ``` 54 - 55 - This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up: 56 - ```bash 57 - bao operator unseal <unseal_key> 58 - ``` 59 - 60 - All steps below remain the same across both dev and production setups. 61 - 62 - ### configure openbao server 63 - 64 - Create the spindle KV mount: 65 - 66 - ```bash 67 - bao secrets enable -path=spindle -version=2 kv 68 - ``` 69 - 70 - Set up AppRole authentication and policy: 71 - 72 - Create a policy file `spindle-policy.hcl`: 73 - 74 - ```hcl 75 - # Full access to spindle KV v2 data 76 - path "spindle/data/*" { 77 - capabilities = ["create", "read", "update", "delete"] 78 - } 79 - 80 - # Access to metadata for listing and management 81 - path "spindle/metadata/*" { 82 - capabilities = ["list", "read", "delete", "update"] 83 - } 84 - 85 - # Allow listing at root level 86 - path "spindle/" { 87 - capabilities = ["list"] 88 - } 89 - 90 - # Required for connection testing and health checks 91 - path "auth/token/lookup-self" { 92 - capabilities = ["read"] 93 - } 94 - ``` 95 - 96 - Apply the policy and create an AppRole: 97 - 98 - ```bash 99 - bao policy write spindle-policy spindle-policy.hcl 100 - bao auth enable approle 101 - bao write auth/approle/role/spindle \ 102 - token_policies="spindle-policy" \ 103 - token_ttl=1h \ 104 - token_max_ttl=4h \ 105 - bind_secret_id=true \ 106 - secret_id_ttl=0 \ 107 - secret_id_num_uses=0 108 - ``` 109 - 110 - Get the credentials: 111 - 112 - ```bash 113 - # Get role ID (static) 114 - ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id) 115 - 116 - # Generate secret ID 117 - SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id) 118 - 119 - echo "Role ID: $ROLE_ID" 120 - echo "Secret ID: $SECRET_ID" 121 - ``` 122 - 123 - ### create proxy configuration 124 - 125 - Create the credential files: 126 - 127 - ```bash 128 - # Create directory for OpenBao files 129 - mkdir -p /tmp/openbao 130 - 131 - # Save credentials 132 - echo "$ROLE_ID" > /tmp/openbao/role-id 133 - echo "$SECRET_ID" > /tmp/openbao/secret-id 134 - chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id 135 - ``` 136 - 137 - Create a proxy configuration file `/tmp/openbao/proxy.hcl`: 138 - 139 - ```hcl 140 - # OpenBao server connection 141 - vault { 142 - address = "http://localhost:8200" 143 - } 144 - 145 - # Auto-Auth using AppRole 146 - auto_auth { 147 - method "approle" { 148 - mount_path = "auth/approle" 149 - config = { 150 - role_id_file_path = "/tmp/openbao/role-id" 151 - secret_id_file_path = "/tmp/openbao/secret-id" 152 - } 153 - } 154 - 155 - # Optional: write token to file for debugging 156 - sink "file" { 157 - config = { 158 - path = "/tmp/openbao/token" 159 - mode = 0640 160 - } 161 - } 162 - } 163 - 164 - # Proxy listener for Spindle 165 - listener "tcp" { 166 - address = "127.0.0.1:8201" 167 - tls_disable = true 168 - } 169 - 170 - # Enable API proxy with auto-auth token 171 - api_proxy { 172 - use_auto_auth_token = true 173 - } 174 - 175 - # Enable response caching 176 - cache { 177 - use_auto_auth_token = true 178 - } 179 - 180 - # Logging 181 - log_level = "info" 182 - ``` 183 - 184 - ### start the proxy 185 - 186 - Start OpenBao Proxy: 187 - 188 - ```bash 189 - bao proxy -config=/tmp/openbao/proxy.hcl 190 - ``` 191 - 192 - The proxy will authenticate with OpenBao and start listening on 193 - `127.0.0.1:8201`. 194 - 195 - ### configure spindle 196 - 197 - Set these environment variables for Spindle: 198 - 199 - ```bash 200 - export SPINDLE_SERVER_SECRETS_PROVIDER=openbao 201 - export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201 202 - export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle 203 - ``` 204 - 205 - Start Spindle: 206 - 207 - Spindle will now connect to the local proxy, which handles all 208 - authentication automatically. 209 - 210 - ## production setup for proxy 211 - 212 - For production, you'll want to run the proxy as a service: 213 - 214 - Place your production configuration in `/etc/openbao/proxy.hcl` with 215 - proper TLS settings for the vault connection. 216 - 217 - ## verifying setup 218 - 219 - Test the proxy directly: 220 - 221 - ```bash 222 - # Check proxy health 223 - curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health 224 - 225 - # Test token lookup through proxy 226 - curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self 227 - ``` 228 - 229 - Test OpenBao operations through the server: 230 - 231 - ```bash 232 - # List all secrets 233 - bao kv list spindle/ 234 - 235 - # Add a test secret via Spindle API, then check it exists 236 - bao kv list spindle/repos/ 237 - 238 - # Get a specific secret 239 - bao kv get spindle/repos/your_repo_path/SECRET_NAME 240 - ``` 241 - 242 - ## how it works 243 - 244 - - Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201) 245 - - The proxy authenticates with OpenBao using AppRole credentials 246 - - All Spindle requests go through the proxy, which injects authentication tokens 247 - - Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}` 248 - - Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo` 249 - - The proxy handles all token renewal automatically 250 - - Spindle no longer manages tokens or authentication directly 251 - 252 - ## troubleshooting 253 - 254 - **Connection refused**: Check that the OpenBao Proxy is running and 255 - listening on the configured address. 256 - 257 - **403 errors**: Verify the AppRole credentials are correct and the policy 258 - has the necessary permissions. 259 - 260 - **404 route errors**: The spindle KV mount probably doesn't exist - run 261 - the mount creation step again. 262 - 263 - **Proxy authentication failures**: Check the proxy logs and verify the 264 - role-id and secret-id files are readable and contain valid credentials. 265 - 266 - **Secret not found after writing**: This can indicate policy permission 267 - issues. Verify the policy includes both `spindle/data/*` and 268 - `spindle/metadata/*` paths with appropriate capabilities. 269 - 270 - Check proxy logs: 271 - 272 - ```bash 273 - # If running as systemd service 274 - journalctl -u openbao-proxy -f 275 - 276 - # If running directly, check the console output 277 - ``` 278 - 279 - Test AppRole authentication manually: 280 - 281 - ```bash 282 - bao write auth/approle/login \ 283 - role_id="$(cat /tmp/openbao/role-id)" \ 284 - secret_id="$(cat /tmp/openbao/secret-id)" 285 - ```
-183
docs/spindle/pipeline.md
··· 1 - # spindle pipelines 2 - 3 - Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML. 4 - 5 - The fields are: 6 - 7 - - [Trigger](#trigger): A **required** field that defines when a workflow should be triggered. 8 - - [Engine](#engine): A **required** field that defines which engine a workflow should run on. 9 - - [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned. 10 - - [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need. 11 - - [Environment](#environment): An **optional** field that allows you to define environment variables. 12 - - [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow. 13 - 14 - ## Trigger 15 - 16 - The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields: 17 - 18 - - `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values: 19 - - `push`: The workflow should run every time a commit is pushed to the repository. 20 - - `pull_request`: The workflow should run every time a pull request is made or updated. 21 - - `manual`: The workflow can be triggered manually. 22 - - `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events. 23 - - `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events. 24 - 25 - For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with: 26 - 27 - ```yaml 28 - when: 29 - - event: ["push", "manual"] 30 - branch: ["main", "develop"] 31 - - event: ["pull_request"] 32 - branch: ["main"] 33 - ``` 34 - 35 - You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed: 36 - 37 - ```yaml 38 - when: 39 - - event: ["push"] 40 - tag: ["v*"] 41 - ``` 42 - 43 - You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches): 44 - 45 - ```yaml 46 - when: 47 - - event: ["push"] 48 - branch: ["main", "release-*"] 49 - tag: ["v*", "stable"] 50 - ``` 51 - 52 - ## Engine 53 - 54 - Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are: 55 - 56 - - `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there. 57 - 58 - Example: 59 - 60 - ```yaml 61 - engine: "nixery" 62 - ``` 63 - 64 - ## Clone options 65 - 66 - When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields: 67 - 68 - - `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default. 69 - - `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow. 70 - - `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default. 71 - 72 - The default settings are: 73 - 74 - ```yaml 75 - clone: 76 - skip: false 77 - depth: 1 78 - submodules: false 79 - ``` 80 - 81 - ## Dependencies 82 - 83 - Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch. 84 - 85 - Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so: 86 - 87 - ```yaml 88 - dependencies: 89 - # nixpkgs 90 - nixpkgs: 91 - - nodejs 92 - - go 93 - # custom registry 94 - git+https://tangled.org/@example.com/my_pkg: 95 - - my_pkg 96 - ``` 97 - 98 - Now these dependencies are available to use in your workflow! 99 - 100 - ## Environment 101 - 102 - The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.** 103 - 104 - Example: 105 - 106 - ```yaml 107 - environment: 108 - GOOS: "linux" 109 - GOARCH: "arm64" 110 - NODE_ENV: "production" 111 - MY_ENV_VAR: "MY_ENV_VALUE" 112 - ``` 113 - 114 - ## Steps 115 - 116 - The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields: 117 - 118 - - `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing. 119 - - `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here. 120 - - `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.** 121 - 122 - Example: 123 - 124 - ```yaml 125 - steps: 126 - - name: "Build backend" 127 - command: "go build" 128 - environment: 129 - GOOS: "darwin" 130 - GOARCH: "arm64" 131 - - name: "Build frontend" 132 - command: "npm run build" 133 - environment: 134 - NODE_ENV: "production" 135 - ``` 136 - 137 - ## Complete workflow 138 - 139 - ```yaml 140 - # .tangled/workflows/build.yml 141 - 142 - when: 143 - - event: ["push", "manual"] 144 - branch: ["main", "develop"] 145 - - event: ["pull_request"] 146 - branch: ["main"] 147 - 148 - engine: "nixery" 149 - 150 - # using the default values 151 - clone: 152 - skip: false 153 - depth: 1 154 - submodules: false 155 - 156 - dependencies: 157 - # nixpkgs 158 - nixpkgs: 159 - - nodejs 160 - - go 161 - # custom registry 162 - git+https://tangled.org/@example.com/my_pkg: 163 - - my_pkg 164 - 165 - environment: 166 - GOOS: "linux" 167 - GOARCH: "arm64" 168 - NODE_ENV: "production" 169 - MY_ENV_VAR: "MY_ENV_VALUE" 170 - 171 - steps: 172 - - name: "Build backend" 173 - command: "go build" 174 - environment: 175 - GOOS: "darwin" 176 - GOARCH: "arm64" 177 - - name: "Build frontend" 178 - command: "npm run build" 179 - environment: 180 - NODE_ENV: "production" 181 - ``` 182 - 183 - If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+101
docs/styles.css
··· 1 + svg { 2 + width: 16px; 3 + height: 16px; 4 + } 5 + 6 + :root { 7 + --syntax-alert: #d20f39; 8 + --syntax-annotation: #fe640b; 9 + --syntax-attribute: #df8e1d; 10 + --syntax-basen: #40a02b; 11 + --syntax-builtin: #1e66f5; 12 + --syntax-controlflow: #8839ef; 13 + --syntax-char: #04a5e5; 14 + --syntax-constant: #fe640b; 15 + --syntax-comment: #9ca0b0; 16 + --syntax-commentvar: #7c7f93; 17 + --syntax-documentation: #9ca0b0; 18 + --syntax-datatype: #df8e1d; 19 + --syntax-decval: #40a02b; 20 + --syntax-error: #d20f39; 21 + --syntax-extension: #4c4f69; 22 + --syntax-float: #40a02b; 23 + --syntax-function: #1e66f5; 24 + --syntax-import: #40a02b; 25 + --syntax-information: #04a5e5; 26 + --syntax-keyword: #8839ef; 27 + --syntax-operator: #179299; 28 + --syntax-other: #8839ef; 29 + --syntax-preprocessor: #ea76cb; 30 + --syntax-specialchar: #04a5e5; 31 + --syntax-specialstring: #ea76cb; 32 + --syntax-string: #40a02b; 33 + --syntax-variable: #8839ef; 34 + --syntax-verbatimstring: #40a02b; 35 + --syntax-warning: #df8e1d; 36 + } 37 + 38 + @media (prefers-color-scheme: dark) { 39 + :root { 40 + --syntax-alert: #f38ba8; 41 + --syntax-annotation: #fab387; 42 + --syntax-attribute: #f9e2af; 43 + --syntax-basen: #a6e3a1; 44 + --syntax-builtin: #89b4fa; 45 + --syntax-controlflow: #cba6f7; 46 + --syntax-char: #89dceb; 47 + --syntax-constant: #fab387; 48 + --syntax-comment: #6c7086; 49 + --syntax-commentvar: #585b70; 50 + --syntax-documentation: #6c7086; 51 + --syntax-datatype: #f9e2af; 52 + --syntax-decval: #a6e3a1; 53 + --syntax-error: #f38ba8; 54 + --syntax-extension: #cdd6f4; 55 + --syntax-float: #a6e3a1; 56 + --syntax-function: #89b4fa; 57 + --syntax-import: #a6e3a1; 58 + --syntax-information: #89dceb; 59 + --syntax-keyword: #cba6f7; 60 + --syntax-operator: #94e2d5; 61 + --syntax-other: #cba6f7; 62 + --syntax-preprocessor: #f5c2e7; 63 + --syntax-specialchar: #89dceb; 64 + --syntax-specialstring: #f5c2e7; 65 + --syntax-string: #a6e3a1; 66 + --syntax-variable: #cba6f7; 67 + --syntax-verbatimstring: #a6e3a1; 68 + --syntax-warning: #f9e2af; 69 + } 70 + } 71 + 72 + /* pandoc syntax highlighting classes */ 73 + code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */ 74 + code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */ 75 + code span.at { color: var(--syntax-attribute); } /* attribute */ 76 + code span.bn { color: var(--syntax-basen); } /* basen */ 77 + code span.bu { color: var(--syntax-builtin); } /* builtin */ 78 + code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */ 79 + code span.ch { color: var(--syntax-char); } /* char */ 80 + code span.cn { color: var(--syntax-constant); } /* constant */ 81 + code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */ 82 + code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */ 83 + code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */ 84 + code span.dt { color: var(--syntax-datatype); } /* datatype */ 85 + code span.dv { color: var(--syntax-decval); } /* decval */ 86 + code span.er { color: var(--syntax-error); font-weight: bold; } /* error */ 87 + code span.ex { color: var(--syntax-extension); } /* extension */ 88 + code span.fl { color: var(--syntax-float); } /* float */ 89 + code span.fu { color: var(--syntax-function); } /* function */ 90 + code span.im { color: var(--syntax-import); font-weight: bold; } /* import */ 91 + code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */ 92 + code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */ 93 + code span.op { color: var(--syntax-operator); } /* operator */ 94 + code span.ot { color: var(--syntax-other); } /* other */ 95 + code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */ 96 + code span.sc { color: var(--syntax-specialchar); } /* specialchar */ 97 + code span.ss { color: var(--syntax-specialstring); } /* specialstring */ 98 + code span.st { color: var(--syntax-string); } /* string */ 99 + code span.va { color: var(--syntax-variable); } /* variable */ 100 + code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */ 101 + code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
+117
docs/template.html
··· 1 + <!DOCTYPE html> 2 + <html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$> 3 + <head> 4 + <meta charset="utf-8" /> 5 + <meta name="generator" content="pandoc" /> 6 + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> 7 + $for(author-meta)$ 8 + <meta name="author" content="$author-meta$" /> 9 + $endfor$ 10 + 11 + $if(date-meta)$ 12 + <meta name="dcterms.date" content="$date-meta$" /> 13 + $endif$ 14 + 15 + $if(keywords)$ 16 + <meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" /> 17 + $endif$ 18 + 19 + $if(description-meta)$ 20 + <meta name="description" content="$description-meta$" /> 21 + $endif$ 22 + 23 + <title>$pagetitle$</title> 24 + 25 + <style> 26 + $styles.css()$ 27 + </style> 28 + 29 + $for(css)$ 30 + <link rel="stylesheet" href="$css$" /> 31 + $endfor$ 32 + 33 + $for(header-includes)$ 34 + $header-includes$ 35 + $endfor$ 36 + 37 + <link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin /> 38 + 39 + </head> 40 + <body class="bg-white dark:bg-gray-900 min-h-screen flex flex-col min-h-screen"> 41 + $for(include-before)$ 42 + $include-before$ 43 + $endfor$ 44 + 45 + $if(toc)$ 46 + <!-- mobile topbar toc --> 47 + <details id="mobile-$idprefix$TOC" role="doc-toc" class="md:hidden bg-gray-50 dark:bg-gray-800 border-b border-gray-200 dark:border-gray-700 z-50 space-y-4 group px-6 py-4"> 48 + <summary class="cursor-pointer list-none text-sm font-semibold select-none flex gap-2 justify-between items-center dark:text-white"> 49 + $if(toc-title)$$toc-title$$else$Table of Contents$endif$ 50 + <span class="group-open:hidden inline">${ menu.svg() }</span> 51 + <span class="hidden group-open:inline">${ x.svg() }</span> 52 + </summary> 53 + ${ table-of-contents:toc.html() } 54 + </details> 55 + <!-- desktop sidebar toc --> 56 + <nav id="$idprefix$TOC" role="doc-toc" class="hidden md:block fixed left-0 top-0 w-80 h-screen bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 overflow-y-auto p-4 z-50"> 57 + $if(toc-title)$ 58 + <h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2> 59 + $endif$ 60 + ${ table-of-contents:toc.html() } 61 + </nav> 62 + $endif$ 63 + 64 + <div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col"> 65 + <main class="max-w-4xl w-full mx-auto p-6 flex-1"> 66 + $if(top)$ 67 + $-- only print title block if this is NOT the top page 68 + $else$ 69 + $if(title)$ 70 + <header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700"> 71 + <h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1> 72 + $if(subtitle)$ 73 + <p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p> 74 + $endif$ 75 + $for(author)$ 76 + <p class="text-sm text-gray-500 dark:text-gray-400">$author$</p> 77 + $endfor$ 78 + $if(date)$ 79 + <p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p> 80 + $endif$ 81 + $if(abstract)$ 82 + <div class="mt-6 p-4 bg-gray-50 rounded-lg"> 83 + <div class="text-sm font-semibold text-gray-700 uppercase mb-2">$abstract-title$</div> 84 + <div class="text-gray-700">$abstract$</div> 85 + </div> 86 + $endif$ 87 + $endif$ 88 + </header> 89 + $endif$ 90 + <article class="prose dark:prose-invert max-w-none"> 91 + $body$ 92 + </article> 93 + </main> 94 + <nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800 "> 95 + <div class="max-w-4xl mx-auto px-8 py-4"> 96 + <div class="flex justify-between gap-4"> 97 + <span class="flex-1"> 98 + $if(previous.url)$ 99 + <span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span> 100 + <a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a> 101 + $endif$ 102 + </span> 103 + <span class="flex-1 text-right"> 104 + $if(next.url)$ 105 + <span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span> 106 + <a href="$next.url$" accesskey="n" rel="next">$next.title$</a> 107 + $endif$ 108 + </span> 109 + </div> 110 + </div> 111 + </nav> 112 + </div> 113 + $for(include-after)$ 114 + $include-after$ 115 + $endfor$ 116 + </body> 117 + </html>
+4
docs/toc.html
··· 1 + <div class="[&_ul]:space-y-6 [&_ul]:pl-0 [&_ul]:font-bold [&_ul_ul]:pl-4 [&_ul_ul]:font-normal [&_ul_ul]:space-y-2 [&_li]:space-y-2"> 2 + $table-of-contents$ 3 + </div> 4 +
+9 -9
flake.lock
··· 35 35 "systems": "systems" 36 36 }, 37 37 "locked": { 38 - "lastModified": 1694529238, 39 - "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", 38 + "lastModified": 1731533236, 39 + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 40 40 "owner": "numtide", 41 41 "repo": "flake-utils", 42 - "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", 42 + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 43 43 "type": "github" 44 44 }, 45 45 "original": { ··· 56 56 ] 57 57 }, 58 58 "locked": { 59 - "lastModified": 1754078208, 60 - "narHash": "sha256-YVoIFDCDpYuU3riaDEJ3xiGdPOtsx4sR5eTzHTytPV8=", 59 + "lastModified": 1763982521, 60 + "narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=", 61 61 "owner": "nix-community", 62 62 "repo": "gomod2nix", 63 - "rev": "7f963246a71626c7fc70b431a315c4388a0c95cf", 63 + "rev": "02e63a239d6eabd595db56852535992c898eba72", 64 64 "type": "github" 65 65 }, 66 66 "original": { ··· 150 150 }, 151 151 "nixpkgs": { 152 152 "locked": { 153 - "lastModified": 1765186076, 154 - "narHash": "sha256-hM20uyap1a0M9d344I692r+ik4gTMyj60cQWO+hAYP8=", 153 + "lastModified": 1766070988, 154 + "narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=", 155 155 "owner": "nixos", 156 156 "repo": "nixpkgs", 157 - "rev": "addf7cf5f383a3101ecfba091b98d0a1263dc9b8", 157 + "rev": "c6245e83d836d0433170a16eb185cefe0572f8b8", 158 158 "type": "github" 159 159 }, 160 160 "original": {
+34 -2
flake.nix
··· 88 88 inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src; 89 89 }; 90 90 appview = self.callPackage ./nix/pkgs/appview.nix {}; 91 + docs = self.callPackage ./nix/pkgs/docs.nix { 92 + inherit inter-fonts-src ibm-plex-mono-src lucide-src; 93 + }; 91 94 spindle = self.callPackage ./nix/pkgs/spindle.nix {}; 92 95 knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {}; 93 96 knot = self.callPackage ./nix/pkgs/knot.nix {}; 97 + did-method-plc = self.callPackage ./nix/pkgs/did-method-plc.nix {}; 98 + bluesky-jetstream = self.callPackage ./nix/pkgs/bluesky-jetstream.nix {}; 99 + bluesky-relay = self.callPackage ./nix/pkgs/bluesky-relay.nix {}; 100 + tap = self.callPackage ./nix/pkgs/tap.nix {}; 94 101 }); 95 102 in { 96 103 overlays.default = final: prev: { 97 - inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview; 104 + inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs did-method-plc bluesky-jetstream bluesky-relay tap; 98 105 }; 99 106 100 107 packages = forAllSystems (system: let ··· 103 110 staticPackages = mkPackageSet pkgs.pkgsStatic; 104 111 crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic; 105 112 in { 106 - inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib; 113 + inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs did-method-plc bluesky-jetstream bluesky-relay tap; 107 114 108 115 pkgsStatic-appview = staticPackages.appview; 109 116 pkgsStatic-knot = staticPackages.knot; ··· 302 309 imports = [./nix/modules/spindle.nix]; 303 310 304 311 services.tangled.spindle.package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.spindle; 312 + services.tangled.spindle.tap-package = lib.mkDefault self.packages.${pkgs.system}.tap; 313 + }; 314 + nixosModules.did-method-plc = { 315 + lib, 316 + pkgs, 317 + ... 318 + }: { 319 + imports = [./nix/modules/did-method-plc.nix]; 320 + services.did-method-plc.package = lib.mkDefault self.packages.${pkgs.system}.did-method-plc; 321 + }; 322 + nixosModules.bluesky-relay = { 323 + lib, 324 + pkgs, 325 + ... 326 + }: { 327 + imports = [./nix/modules/bluesky-relay.nix]; 328 + services.bluesky-relay.package = lib.mkDefault self.packages.${pkgs.system}.bluesky-relay; 329 + }; 330 + nixosModules.bluesky-jetstream = { 331 + lib, 332 + pkgs, 333 + ... 334 + }: { 335 + imports = [./nix/modules/bluesky-jetstream.nix]; 336 + services.bluesky-jetstream.package = lib.mkDefault self.packages.${pkgs.system}.bluesky-jetstream; 305 337 }; 306 338 }; 307 339 }
+2 -1
go.mod
··· 45 45 github.com/urfave/cli/v3 v3.3.3 46 46 github.com/whyrusleeping/cbor-gen v0.3.1 47 47 github.com/yuin/goldmark v1.7.13 48 + github.com/yuin/goldmark-emoji v1.0.6 48 49 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc 49 50 gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab 50 51 golang.org/x/crypto v0.40.0 51 52 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b 52 53 golang.org/x/image v0.31.0 53 54 golang.org/x/net v0.42.0 54 - golang.org/x/sync v0.17.0 55 55 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da 56 56 gopkg.in/yaml.v3 v3.0.1 57 57 ) ··· 203 203 go.uber.org/atomic v1.11.0 // indirect 204 204 go.uber.org/multierr v1.11.0 // indirect 205 205 go.uber.org/zap v1.27.0 // indirect 206 + golang.org/x/sync v0.17.0 // indirect 206 207 golang.org/x/sys v0.34.0 // indirect 207 208 golang.org/x/text v0.29.0 // indirect 208 209 golang.org/x/time v0.12.0 // indirect
+2
go.sum
··· 505 505 github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 506 506 github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= 507 507 github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= 508 + github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= 509 + github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= 508 510 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ= 509 511 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I= 510 512 gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab h1:gK9tS6QJw5F0SIhYJnGG2P83kuabOdmWBbSmZhJkz2A=
+1 -1
input.css
··· 162 162 } 163 163 164 164 .prose a.mention { 165 - @apply no-underline hover:underline; 165 + @apply no-underline hover:underline font-bold; 166 166 } 167 167 168 168 .prose li {
+3
nix/gomod2nix.toml
··· 530 530 [mod."github.com/yuin/goldmark"] 531 531 version = "v1.7.13" 532 532 hash = "sha256-vBCxZrPYPc8x/nvAAv3Au59dCCyfS80Vw3/a9EXK7TE=" 533 + [mod."github.com/yuin/goldmark-emoji"] 534 + version = "v1.0.6" 535 + hash = "sha256-+d6bZzOPE+JSFsZbQNZMCWE+n3jgcQnkPETVk47mxSY=" 533 536 [mod."github.com/yuin/goldmark-highlighting/v2"] 534 537 version = "v2.0.0-20230729083705-37449abec8cc" 535 538 hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
+64
nix/modules/bluesky-jetstream.nix
··· 1 + { 2 + config, 3 + pkgs, 4 + lib, 5 + ... 6 + }: let 7 + cfg = config.services.bluesky-jetstream; 8 + in 9 + with lib; { 10 + options.services.bluesky-jetstream = { 11 + enable = mkEnableOption "jetstream server"; 12 + package = mkPackageOption pkgs "bluesky-jetstream" {}; 13 + 14 + # dataDir = mkOption { 15 + # type = types.str; 16 + # default = "/var/lib/jetstream"; 17 + # description = "directory to store data (pebbleDB)"; 18 + # }; 19 + livenessTtl = mkOption { 20 + type = types.int; 21 + default = 15; 22 + description = "time to restart when no event detected (seconds)"; 23 + }; 24 + websocketUrl = mkOption { 25 + type = types.str; 26 + default = "wss://bsky.network/xrpc/com.atproto.sync.subscribeRepos"; 27 + description = "full websocket path to the ATProto SubscribeRepos XRPC endpoint"; 28 + }; 29 + }; 30 + config = mkIf cfg.enable { 31 + systemd.services.bluesky-jetstream = { 32 + description = "bluesky jetstream"; 33 + after = ["network.target" "pds.service"]; 34 + wantedBy = ["multi-user.target"]; 35 + 36 + serviceConfig = { 37 + User = "jetstream"; 38 + Group = "jetstream"; 39 + StateDirectory = "jetstream"; 40 + StateDirectoryMode = "0755"; 41 + # preStart = '' 42 + # mkdir -p "${cfg.dataDir}" 43 + # chown -R jetstream:jetstream "${cfg.dataDir}" 44 + # ''; 45 + # WorkingDirectory = cfg.dataDir; 46 + Environment = [ 47 + "JETSTREAM_DATA_DIR=/var/lib/jetstream/data" 48 + "JETSTREAM_LIVENESS_TTL=${toString cfg.livenessTtl}s" 49 + "JETSTREAM_WS_URL=${cfg.websocketUrl}" 50 + ]; 51 + ExecStart = getExe cfg.package; 52 + Restart = "always"; 53 + RestartSec = 5; 54 + }; 55 + }; 56 + users = { 57 + users.jetstream = { 58 + group = "jetstream"; 59 + isSystemUser = true; 60 + }; 61 + groups.jetstream = {}; 62 + }; 63 + }; 64 + }
+48
nix/modules/bluesky-relay.nix
··· 1 + { 2 + config, 3 + pkgs, 4 + lib, 5 + ... 6 + }: let 7 + cfg = config.services.bluesky-relay; 8 + in 9 + with lib; { 10 + options.services.bluesky-relay = { 11 + enable = mkEnableOption "relay server"; 12 + package = mkPackageOption pkgs "bluesky-relay" {}; 13 + }; 14 + config = mkIf cfg.enable { 15 + systemd.services.bluesky-relay = { 16 + description = "bluesky relay"; 17 + after = ["network.target" "pds.service"]; 18 + wantedBy = ["multi-user.target"]; 19 + 20 + serviceConfig = { 21 + User = "relay"; 22 + Group = "relay"; 23 + StateDirectory = "relay"; 24 + StateDirectoryMode = "0755"; 25 + Environment = [ 26 + "RELAY_ADMIN_PASSWORD=password" 27 + "RELAY_PLC_HOST=https://plc.tngl.boltless.dev" 28 + "DATABASE_URL=sqlite:///var/lib/relay/relay.sqlite" 29 + "RELAY_IP_BIND=:2470" 30 + "RELAY_PERSIST_DIR=/var/lib/relay" 31 + "RELAY_DISABLE_REQUEST_CRAWL=0" 32 + "RELAY_INITIAL_SEQ_NUMBER=1" 33 + "RELAY_ALLOW_INSECURE_HOSTS=1" 34 + ]; 35 + ExecStart = "${getExe cfg.package} serve"; 36 + Restart = "always"; 37 + RestartSec = 5; 38 + }; 39 + }; 40 + users = { 41 + users.relay = { 42 + group = "relay"; 43 + isSystemUser = true; 44 + }; 45 + groups.relay = {}; 46 + }; 47 + }; 48 + }
+76
nix/modules/did-method-plc.nix
··· 1 + { 2 + config, 3 + pkgs, 4 + lib, 5 + ... 6 + }: let 7 + cfg = config.services.did-method-plc; 8 + in 9 + with lib; { 10 + options.services.did-method-plc = { 11 + enable = mkEnableOption "did-method-plc server"; 12 + package = mkPackageOption pkgs "did-method-plc" {}; 13 + }; 14 + config = mkIf cfg.enable { 15 + services.postgresql = { 16 + enable = true; 17 + package = pkgs.postgresql_14; 18 + ensureDatabases = ["plc"]; 19 + ensureUsers = [ 20 + { 21 + name = "pg"; 22 + # ensurePermissions."DATABASE plc" = "ALL PRIVILEGES"; 23 + } 24 + ]; 25 + authentication = '' 26 + local all all trust 27 + host all all 127.0.0.1/32 trust 28 + ''; 29 + }; 30 + systemd.services.did-method-plc = { 31 + description = "did-method-plc"; 32 + 33 + after = ["postgresql.service"]; 34 + wants = ["postgresql.service"]; 35 + wantedBy = ["multi-user.target"]; 36 + 37 + environment = let 38 + db_creds_json = builtins.toJSON { 39 + username = "pg"; 40 + password = ""; 41 + host = "127.0.0.1"; 42 + port = 5432; 43 + }; 44 + in { 45 + # TODO: inherit from config 46 + DEBUG_MODE = "1"; 47 + LOG_ENABLED = "true"; 48 + LOG_LEVEL = "debug"; 49 + LOG_DESTINATION = "1"; 50 + ENABLE_MIGRATIONS = "true"; 51 + DB_CREDS_JSON = db_creds_json; 52 + DB_MIGRATE_CREDS_JSON = db_creds_json; 53 + PLC_VERSION = "0.0.1"; 54 + PORT = "8080"; 55 + }; 56 + 57 + serviceConfig = { 58 + ExecStart = getExe cfg.package; 59 + User = "plc"; 60 + Group = "plc"; 61 + StateDirectory = "plc"; 62 + StateDirectoryMode = "0755"; 63 + Restart = "always"; 64 + 65 + # Hardening 66 + }; 67 + }; 68 + users = { 69 + users.plc = { 70 + group = "plc"; 71 + isSystemUser = true; 72 + }; 73 + groups.plc = {}; 74 + }; 75 + }; 76 + }
+33
nix/modules/spindle.nix
··· 17 17 type = types.package; 18 18 description = "Package to use for the spindle"; 19 19 }; 20 + tap-package = mkOption { 21 + type = types.package; 22 + description = "Package to use for the spindle"; 23 + }; 24 + 25 + atpRelayUrl = mkOption { 26 + type = types.str; 27 + default = "https://relay1.us-east.bsky.network"; 28 + description = "atproto relay"; 29 + }; 20 30 21 31 server = { 22 32 listenAddr = mkOption { ··· 113 123 114 124 config = mkIf cfg.enable { 115 125 virtualisation.docker.enable = true; 126 + 127 + systemd.services.spindle-tap = { 128 + description = "spindle tap service"; 129 + after = ["network.target" "docker.service"]; 130 + wantedBy = ["multi-user.target"]; 131 + serviceConfig = { 132 + LogsDirectory = "spindle-tap"; 133 + StateDirectory = "spindle-tap"; 134 + Environment = [ 135 + "TAP_BIND=:2480" 136 + "TAP_PLC_URL=${cfg.server.plcUrl}" 137 + "TAP_RELAY_URL=${cfg.atpRelayUrl}" 138 + "TAP_DATABASE_URL=sqlite:///var/lib/spindle-tap/tap.db" 139 + "TAP_RETRY_TIMEOUT=3s" 140 + "TAP_COLLECTION_FILTERS=${concatStringsSep "," [ 141 + "sh.tangled.repo" 142 + "sh.tangled.repo.collaborator" 143 + "sh.tangled.spindle.member" 144 + ]}" 145 + ]; 146 + ExecStart = "${getExe cfg.tap-package} run"; 147 + }; 148 + }; 116 149 117 150 systemd.services.spindle = { 118 151 description = "spindle service";
+20
nix/pkgs/bluesky-jetstream.nix
··· 1 + { 2 + buildGoModule, 3 + fetchFromGitHub, 4 + }: 5 + buildGoModule { 6 + pname = "bluesky-jetstream"; 7 + version = "0.1.0"; 8 + src = fetchFromGitHub { 9 + owner = "bluesky-social"; 10 + repo = "jetstream"; 11 + rev = "7d7efa58d7f14101a80ccc4f1085953948b7d5de"; 12 + sha256 = "sha256-1e9SL/8gaDPMA4YZed51ffzgpkptbMd0VTbTTDbPTFw="; 13 + }; 14 + subPackages = ["cmd/jetstream"]; 15 + vendorHash = "sha256-/21XJQH6fo9uPzlABUAbdBwt1O90odmppH6gXu2wkiQ="; 16 + doCheck = false; 17 + meta = { 18 + mainProgram = "jetstream"; 19 + }; 20 + }
+20
nix/pkgs/bluesky-relay.nix
··· 1 + { 2 + buildGoModule, 3 + fetchFromGitHub, 4 + }: 5 + buildGoModule { 6 + pname = "bluesky-relay"; 7 + version = "0.1.0"; 8 + src = fetchFromGitHub { 9 + owner = "boltlessengineer"; 10 + repo = "indigo"; 11 + rev = "7fe70a304d795b998f354d2b7b2050b909709c99"; 12 + sha256 = "sha256-+h34x67cqH5t30+8rua53/ucvbn3BanrmH0Og3moHok="; 13 + }; 14 + subPackages = ["cmd/relay"]; 15 + vendorHash = "sha256-UOedwNYnM8Jx6B7Y9tFcZX8IeUBESAFAPTRYk7n0yo8="; 16 + doCheck = false; 17 + meta = { 18 + mainProgram = "relay"; 19 + }; 20 + }
+65
nix/pkgs/did-method-plc.nix
··· 1 + # inspired by https://github.com/NixOS/nixpkgs/blob/333bfb7c258fab089a834555ea1c435674c459b4/pkgs/by-name/ga/gatsby-cli/package.nix 2 + { 3 + lib, 4 + stdenv, 5 + fetchFromGitHub, 6 + fetchYarnDeps, 7 + yarnConfigHook, 8 + yarnBuildHook, 9 + nodejs, 10 + makeBinaryWrapper, 11 + }: 12 + stdenv.mkDerivation (finalAttrs: { 13 + pname = "did-method-plc"; 14 + version = "0.0.1"; 15 + 16 + src = fetchFromGitHub { 17 + owner = "did-method-plc"; 18 + repo = "did-method-plc"; 19 + rev = "158ba5535ac3da4fd4309954bde41deab0b45972"; 20 + sha256 = "sha256-O5smubbrnTDMCvL6iRyMXkddr5G7YHxkQRVMRULHanQ="; 21 + }; 22 + postPatch = '' 23 + # remove dd-trace dependency 24 + sed -i '3d' packages/server/service/index.js 25 + ''; 26 + 27 + yarnOfflineCache = fetchYarnDeps { 28 + yarnLock = finalAttrs.src + "/yarn.lock"; 29 + hash = "sha256-g8GzaAbWSnWwbQjJMV2DL5/ZlWCCX0sRkjjvX3tqU4Y="; 30 + }; 31 + 32 + nativeBuildInputs = [ 33 + yarnConfigHook 34 + yarnBuildHook 35 + nodejs 36 + makeBinaryWrapper 37 + ]; 38 + yarnBuildScript = "lerna"; 39 + yarnBuildFlags = [ 40 + "run" 41 + "build" 42 + "--scope" 43 + "@did-plc/server" 44 + "--include-dependencies" 45 + ]; 46 + 47 + installPhase = '' 48 + runHook preInstall 49 + 50 + mkdir -p $out/lib/node_modules/ 51 + mv packages/ $out/lib/packages/ 52 + mv node_modules/* $out/lib/node_modules/ 53 + 54 + makeWrapper ${lib.getExe nodejs} $out/bin/plc \ 55 + --add-flags $out/lib/packages/server/service/index.js \ 56 + --add-flags --enable-source-maps \ 57 + --set NODE_PATH $out/lib/node_modules 58 + 59 + runHook postInstall 60 + ''; 61 + 62 + meta = { 63 + mainProgram = "plc"; 64 + }; 65 + })
+41
nix/pkgs/docs.nix
··· 1 + { 2 + pandoc, 3 + tailwindcss, 4 + runCommandLocal, 5 + inter-fonts-src, 6 + ibm-plex-mono-src, 7 + lucide-src, 8 + src, 9 + }: 10 + runCommandLocal "docs" {} '' 11 + mkdir -p working 12 + 13 + # copy templates, themes, styles, filters to working directory 14 + cp ${src}/docs/*.html working/ 15 + cp ${src}/docs/*.theme working/ 16 + cp ${src}/docs/*.css working/ 17 + 18 + # icons 19 + cp -rf ${lucide-src}/*.svg working/ 20 + 21 + # content 22 + ${pandoc}/bin/pandoc ${src}/docs/DOCS.md \ 23 + -o $out/ \ 24 + -t chunkedhtml \ 25 + --variable toc \ 26 + --toc-depth=2 \ 27 + --css=stylesheet.css \ 28 + --chunk-template="%i.html" \ 29 + --highlight-style=working/highlight.theme \ 30 + --template=working/template.html 31 + 32 + # fonts 33 + mkdir -p $out/static/fonts 34 + cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/ 35 + cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/ 36 + cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/ 37 + cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/ 38 + 39 + # styles 40 + cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css 41 + ''
+20
nix/pkgs/tap.nix
··· 1 + { 2 + buildGoModule, 3 + fetchFromGitHub, 4 + }: 5 + buildGoModule { 6 + pname = "tap"; 7 + version = "0.1.0"; 8 + src = fetchFromGitHub { 9 + owner = "bluesky-social"; 10 + repo = "indigo"; 11 + rev = "498ecb9693e8ae050f73234c86f340f51ad896a9"; 12 + sha256 = "sha256-KASCdwkg/hlKBt7RTW3e3R5J3hqJkphoarFbaMgtN1k="; 13 + }; 14 + subPackages = ["cmd/tap"]; 15 + vendorHash = "sha256-UOedwNYnM8Jx6B7Y9tFcZX8IeUBESAFAPTRYk7n0yo8="; 16 + doCheck = false; 17 + meta = { 18 + mainProgram = "tap"; 19 + }; 20 + }
+3 -1
nix/vm.nix
··· 8 8 var = builtins.getEnv name; 9 9 in 10 10 if var == "" 11 - then throw "\$${name} must be defined, see docs/hacking.md for more details" 11 + then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details" 12 12 else var; 13 13 envVarOr = name: default: let 14 14 var = builtins.getEnv name; ··· 19 19 20 20 plcUrl = envVarOr "TANGLED_VM_PLC_URL" "https://plc.directory"; 21 21 jetstream = envVarOr "TANGLED_VM_JETSTREAM_ENDPOINT" "wss://jetstream1.us-west.bsky.network/subscribe"; 22 + relayUrl = envVarOr "TANGLED_VM_RELAY_URL" "https://relay1.us-east.bsky.network"; 22 23 in 23 24 nixpkgs.lib.nixosSystem { 24 25 inherit system; ··· 95 96 }; 96 97 services.tangled.spindle = { 97 98 enable = true; 99 + atpRelayUrl = relayUrl; 98 100 server = { 99 101 owner = envVar "TANGLED_VM_SPINDLE_OWNER"; 100 102 hostname = envVarOr "TANGLED_VM_SPINDLE_HOST" "localhost:6555";
+3 -3
readme.md
··· 10 10 11 11 ## docs 12 12 13 - * [knot hosting guide](/docs/knot-hosting.md) 14 - * [contributing guide](/docs/contributing.md) **please read before opening a PR!** 15 - * [hacking on tangled](/docs/hacking.md) 13 + - [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide) 14 + - [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!** 15 + - [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled) 16 16 17 17 ## security 18 18
+1
spindle/db/repos.go
··· 16 16 if err != nil { 17 17 return nil, err 18 18 } 19 + defer rows.Close() 19 20 20 21 var knots []string 21 22 for rows.Next() {
+5 -1
spindle/engine/engine.go
··· 70 70 } 71 71 defer eng.DestroyWorkflow(ctx, wid) 72 72 73 - wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid) 73 + secretValues := make([]string, len(allSecrets)) 74 + for i, s := range allSecrets { 75 + secretValues[i] = s.Value 76 + } 77 + wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid, secretValues) 74 78 if err != nil { 75 79 l.Warn("failed to setup step logger; logs will not be persisted", "error", err) 76 80 wfLogger = nil
+6 -1
spindle/models/logger.go
··· 12 12 type WorkflowLogger struct { 13 13 file *os.File 14 14 encoder *json.Encoder 15 + mask *SecretMask 15 16 } 16 17 17 - func NewWorkflowLogger(baseDir string, wid WorkflowId) (*WorkflowLogger, error) { 18 + func NewWorkflowLogger(baseDir string, wid WorkflowId, secretValues []string) (*WorkflowLogger, error) { 18 19 path := LogFilePath(baseDir, wid) 19 20 20 21 file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) ··· 25 26 return &WorkflowLogger{ 26 27 file: file, 27 28 encoder: json.NewEncoder(file), 29 + mask: NewSecretMask(secretValues), 28 30 }, nil 29 31 } 30 32 ··· 62 64 63 65 func (w *dataWriter) Write(p []byte) (int, error) { 64 66 line := strings.TrimRight(string(p), "\r\n") 67 + if w.logger.mask != nil { 68 + line = w.logger.mask.Mask(line) 69 + } 65 70 entry := NewDataLogLine(w.idx, line, w.stream) 66 71 if err := w.logger.encoder.Encode(entry); err != nil { 67 72 return 0, err
+51
spindle/models/secret_mask.go
··· 1 + package models 2 + 3 + import ( 4 + "encoding/base64" 5 + "strings" 6 + ) 7 + 8 + // SecretMask replaces secret values in strings with "***". 9 + type SecretMask struct { 10 + replacer *strings.Replacer 11 + } 12 + 13 + // NewSecretMask creates a mask for the given secret values. 14 + // Also registers base64-encoded variants of each secret. 15 + func NewSecretMask(values []string) *SecretMask { 16 + var pairs []string 17 + 18 + for _, value := range values { 19 + if value == "" { 20 + continue 21 + } 22 + 23 + pairs = append(pairs, value, "***") 24 + 25 + b64 := base64.StdEncoding.EncodeToString([]byte(value)) 26 + if b64 != value { 27 + pairs = append(pairs, b64, "***") 28 + } 29 + 30 + b64NoPad := strings.TrimRight(b64, "=") 31 + if b64NoPad != b64 && b64NoPad != value { 32 + pairs = append(pairs, b64NoPad, "***") 33 + } 34 + } 35 + 36 + if len(pairs) == 0 { 37 + return nil 38 + } 39 + 40 + return &SecretMask{ 41 + replacer: strings.NewReplacer(pairs...), 42 + } 43 + } 44 + 45 + // Mask replaces all registered secret values with "***". 46 + func (m *SecretMask) Mask(input string) string { 47 + if m == nil || m.replacer == nil { 48 + return input 49 + } 50 + return m.replacer.Replace(input) 51 + }
+135
spindle/models/secret_mask_test.go
··· 1 + package models 2 + 3 + import ( 4 + "encoding/base64" 5 + "testing" 6 + ) 7 + 8 + func TestSecretMask_BasicMasking(t *testing.T) { 9 + mask := NewSecretMask([]string{"mysecret123"}) 10 + 11 + input := "The password is mysecret123 in this log" 12 + expected := "The password is *** in this log" 13 + 14 + result := mask.Mask(input) 15 + if result != expected { 16 + t.Errorf("expected %q, got %q", expected, result) 17 + } 18 + } 19 + 20 + func TestSecretMask_Base64Encoded(t *testing.T) { 21 + secret := "mysecret123" 22 + mask := NewSecretMask([]string{secret}) 23 + 24 + b64 := base64.StdEncoding.EncodeToString([]byte(secret)) 25 + input := "Encoded: " + b64 26 + expected := "Encoded: ***" 27 + 28 + result := mask.Mask(input) 29 + if result != expected { 30 + t.Errorf("expected %q, got %q", expected, result) 31 + } 32 + } 33 + 34 + func TestSecretMask_Base64NoPadding(t *testing.T) { 35 + // "test" encodes to "dGVzdA==" with padding 36 + secret := "test" 37 + mask := NewSecretMask([]string{secret}) 38 + 39 + b64NoPad := "dGVzdA" // base64 without padding 40 + input := "Token: " + b64NoPad 41 + expected := "Token: ***" 42 + 43 + result := mask.Mask(input) 44 + if result != expected { 45 + t.Errorf("expected %q, got %q", expected, result) 46 + } 47 + } 48 + 49 + func TestSecretMask_MultipleSecrets(t *testing.T) { 50 + mask := NewSecretMask([]string{"password1", "apikey123"}) 51 + 52 + input := "Using password1 and apikey123 for auth" 53 + expected := "Using *** and *** for auth" 54 + 55 + result := mask.Mask(input) 56 + if result != expected { 57 + t.Errorf("expected %q, got %q", expected, result) 58 + } 59 + } 60 + 61 + func TestSecretMask_MultipleOccurrences(t *testing.T) { 62 + mask := NewSecretMask([]string{"secret"}) 63 + 64 + input := "secret appears twice: secret" 65 + expected := "*** appears twice: ***" 66 + 67 + result := mask.Mask(input) 68 + if result != expected { 69 + t.Errorf("expected %q, got %q", expected, result) 70 + } 71 + } 72 + 73 + func TestSecretMask_ShortValues(t *testing.T) { 74 + mask := NewSecretMask([]string{"abc", "xy", ""}) 75 + 76 + if mask == nil { 77 + t.Fatal("expected non-nil mask") 78 + } 79 + 80 + input := "abc xy test" 81 + expected := "*** *** test" 82 + result := mask.Mask(input) 83 + if result != expected { 84 + t.Errorf("expected %q, got %q", expected, result) 85 + } 86 + } 87 + 88 + func TestSecretMask_NilMask(t *testing.T) { 89 + var mask *SecretMask 90 + 91 + input := "some input text" 92 + result := mask.Mask(input) 93 + if result != input { 94 + t.Errorf("expected %q, got %q", input, result) 95 + } 96 + } 97 + 98 + func TestSecretMask_EmptyInput(t *testing.T) { 99 + mask := NewSecretMask([]string{"secret"}) 100 + 101 + result := mask.Mask("") 102 + if result != "" { 103 + t.Errorf("expected empty string, got %q", result) 104 + } 105 + } 106 + 107 + func TestSecretMask_NoMatch(t *testing.T) { 108 + mask := NewSecretMask([]string{"secretvalue"}) 109 + 110 + input := "nothing to mask here" 111 + result := mask.Mask(input) 112 + if result != input { 113 + t.Errorf("expected %q, got %q", input, result) 114 + } 115 + } 116 + 117 + func TestSecretMask_EmptySecretsList(t *testing.T) { 118 + mask := NewSecretMask([]string{}) 119 + 120 + if mask != nil { 121 + t.Error("expected nil mask for empty secrets list") 122 + } 123 + } 124 + 125 + func TestSecretMask_EmptySecretsFiltered(t *testing.T) { 126 + mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"}) 127 + 128 + input := "Using validpassword here" 129 + expected := "Using *** here" 130 + 131 + result := mask.Mask(input) 132 + if result != expected { 133 + t.Errorf("expected %q, got %q", expected, result) 134 + } 135 + }
+1 -1
spindle/motd
··· 20 20 ** 21 21 ******** 22 22 23 - This is a spindle server. More info at https://tangled.sh/@tangled.sh/core/tree/master/docs/spindle 23 + This is a spindle server. More info at https://docs.tangled.org/spindles.html#spindles 24 24 25 25 Most API routes are under /xrpc/
+1 -1
tailwind.config.js
··· 2 2 const colors = require("tailwindcss/colors"); 3 3 4 4 module.exports = { 5 - content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"], 5 + content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"], 6 6 darkMode: "media", 7 7 theme: { 8 8 container: {