+1
appview/db/repos.go
+1
appview/db/repos.go
+13
-3
appview/pages/markup/extension/atlink.go
+13
-3
appview/pages/markup/extension/atlink.go
···
35
35
return KindAt
36
36
}
37
37
38
-
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`)
38
+
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`)
39
+
var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`)
39
40
40
41
type atParser struct{}
41
42
···
55
56
if m == nil {
56
57
return nil
57
58
}
59
+
60
+
// Check for all links in the markdown to see if the handle found is inside one
61
+
linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1)
62
+
for _, linkMatch := range linksIndexes {
63
+
if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] {
64
+
return nil
65
+
}
66
+
}
67
+
58
68
atSegment := text.NewSegment(segment.Start, segment.Start+m[1])
59
69
block.Advance(m[1])
60
70
node := &AtNode{}
···
87
97
88
98
func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
89
99
if entering {
90
-
w.WriteString(`<a href="/@`)
100
+
w.WriteString(`<a href="/`)
91
101
w.WriteString(n.(*AtNode).Handle)
92
-
w.WriteString(`" class="mention font-bold">`)
102
+
w.WriteString(`" class="mention">`)
93
103
} else {
94
104
w.WriteString("</a>")
95
105
}
+121
appview/pages/markup/markdown_test.go
+121
appview/pages/markup/markdown_test.go
···
1
+
package markup
2
+
3
+
import (
4
+
"bytes"
5
+
"testing"
6
+
)
7
+
8
+
func TestAtExtension_Rendering(t *testing.T) {
9
+
tests := []struct {
10
+
name string
11
+
markdown string
12
+
expected string
13
+
}{
14
+
{
15
+
name: "renders simple at mention",
16
+
markdown: "Hello @user.tngl.sh!",
17
+
expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`,
18
+
},
19
+
{
20
+
name: "renders multiple at mentions",
21
+
markdown: "Hi @alice.tngl.sh and @bob.example.com",
22
+
expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`,
23
+
},
24
+
{
25
+
name: "renders at mention in parentheses",
26
+
markdown: "Check this out (@user.tngl.sh)",
27
+
expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`,
28
+
},
29
+
{
30
+
name: "does not render email",
31
+
markdown: "Contact me at test@example.com",
32
+
expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`,
33
+
},
34
+
{
35
+
name: "renders at mention with hyphen",
36
+
markdown: "Follow @user-name.tngl.sh",
37
+
expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`,
38
+
},
39
+
{
40
+
name: "renders at mention with numbers",
41
+
markdown: "@user123.test456.social",
42
+
expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`,
43
+
},
44
+
{
45
+
name: "at mention at start of line",
46
+
markdown: "@user.tngl.sh is cool",
47
+
expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`,
48
+
},
49
+
}
50
+
51
+
for _, tt := range tests {
52
+
t.Run(tt.name, func(t *testing.T) {
53
+
md := NewMarkdown()
54
+
55
+
var buf bytes.Buffer
56
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
57
+
t.Fatalf("failed to convert markdown: %v", err)
58
+
}
59
+
60
+
result := buf.String()
61
+
if result != tt.expected+"\n" {
62
+
t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result)
63
+
}
64
+
})
65
+
}
66
+
}
67
+
68
+
func TestAtExtension_WithOtherMarkdown(t *testing.T) {
69
+
tests := []struct {
70
+
name string
71
+
markdown string
72
+
contains string
73
+
}{
74
+
{
75
+
name: "at mention with bold",
76
+
markdown: "**Hello @user.tngl.sh**",
77
+
contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`,
78
+
},
79
+
{
80
+
name: "at mention with italic",
81
+
markdown: "*Check @user.tngl.sh*",
82
+
contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`,
83
+
},
84
+
{
85
+
name: "at mention in list",
86
+
markdown: "- Item 1\n- @user.tngl.sh\n- Item 3",
87
+
contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`,
88
+
},
89
+
{
90
+
name: "at mention in link",
91
+
markdown: "[@regnault.dev](https://regnault.dev)",
92
+
contains: `<a href="https://regnault.dev">@regnault.dev</a>`,
93
+
},
94
+
{
95
+
name: "at mention in link again",
96
+
markdown: "[check out @regnault.dev](https://regnault.dev)",
97
+
contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`,
98
+
},
99
+
{
100
+
name: "at mention in link again, multiline",
101
+
markdown: "[\ncheck out @regnault.dev](https://regnault.dev)",
102
+
contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>",
103
+
},
104
+
}
105
+
106
+
for _, tt := range tests {
107
+
t.Run(tt.name, func(t *testing.T) {
108
+
md := NewMarkdown()
109
+
110
+
var buf bytes.Buffer
111
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
112
+
t.Fatalf("failed to convert markdown: %v", err)
113
+
}
114
+
115
+
result := buf.String()
116
+
if !bytes.Contains([]byte(result), []byte(tt.contains)) {
117
+
t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result)
118
+
}
119
+
})
120
+
}
121
+
}
+1
-1
appview/pages/templates/repo/empty.html
+1
-1
appview/pages/templates/repo/empty.html
···
26
26
{{ else if (and .LoggedInUser (eq .LoggedInUser.Did .RepoInfo.OwnerDid)) }}
27
27
{{ $knot := .RepoInfo.Knot }}
28
28
{{ if eq $knot "knot1.tangled.sh" }}
29
-
{{ $knot = "tangled.sh" }}
29
+
{{ $knot = "tangled.org" }}
30
30
{{ end }}
31
31
<div class="w-full flex place-content-center">
32
32
<div class="py-6 w-fit flex flex-col gap-4">
+35
-35
appview/pages/templates/repo/fragments/splitDiff.html
+35
-35
appview/pages/templates/repo/fragments/splitDiff.html
···
3
3
{{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800" -}}
4
4
{{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}}
5
5
{{- $lineNrSepStyle := "pr-2 border-r border-gray-200 dark:border-gray-700" -}}
6
-
{{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
6
+
{{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
7
7
{{- $emptyStyle := "bg-gray-200/30 dark:bg-gray-700/30" -}}
8
8
{{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400" -}}
9
9
{{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}}
10
10
{{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}}
11
11
{{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}}
12
12
<div class="grid grid-cols-2 divide-x divide-gray-200 dark:divide-gray-700">
13
-
<pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
13
+
<div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
14
14
{{- range .LeftLines -}}
15
15
{{- if .IsEmpty -}}
16
-
<div class="{{ $emptyStyle }} {{ $containerStyle }}">
17
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div>
18
-
<div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div>
19
-
<div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div>
20
-
</div>
16
+
<span class="{{ $emptyStyle }} {{ $containerStyle }}">
17
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span>
18
+
<span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span>
19
+
<span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span>
20
+
</span>
21
21
{{- else if eq .Op.String "-" -}}
22
-
<div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
23
-
<div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div>
24
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
25
-
<div class="px-2">{{ .Content }}</div>
26
-
</div>
22
+
<span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
23
+
<span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span>
24
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
25
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
26
+
</span>
27
27
{{- else if eq .Op.String " " -}}
28
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
29
-
<div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div>
30
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
31
-
<div class="px-2">{{ .Content }}</div>
32
-
</div>
28
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
29
+
<span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span>
30
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
31
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
32
+
</span>
33
33
{{- end -}}
34
34
{{- end -}}
35
-
{{- end -}}</div></div></pre>
35
+
{{- end -}}</div></div></div>
36
36
37
-
<pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
37
+
<div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
38
38
{{- range .RightLines -}}
39
39
{{- if .IsEmpty -}}
40
-
<div class="{{ $emptyStyle }} {{ $containerStyle }}">
41
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div>
42
-
<div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div>
43
-
<div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div>
44
-
</div>
40
+
<span class="{{ $emptyStyle }} {{ $containerStyle }}">
41
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span>
42
+
<span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span>
43
+
<span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span>
44
+
</span>
45
45
{{- else if eq .Op.String "+" -}}
46
-
<div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
47
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div>
48
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
49
-
<div class="px-2" >{{ .Content }}</div>
50
-
</div>
46
+
<span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
47
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></span>
48
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
49
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
50
+
</span>
51
51
{{- else if eq .Op.String " " -}}
52
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
53
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div>
54
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
55
-
<div class="px-2">{{ .Content }}</div>
56
-
</div>
52
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
53
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a> </span>
54
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
55
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
56
+
</span>
57
57
{{- end -}}
58
58
{{- end -}}
59
-
{{- end -}}</div></div></pre>
59
+
{{- end -}}</div></div></div>
60
60
</div>
61
61
{{ end }}
+21
-22
appview/pages/templates/repo/fragments/unifiedDiff.html
+21
-22
appview/pages/templates/repo/fragments/unifiedDiff.html
···
1
1
{{ define "repo/fragments/unifiedDiff" }}
2
2
{{ $name := .Id }}
3
-
<pre class="overflow-x-auto"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
3
+
<div class="overflow-x-auto font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
4
4
{{- $oldStart := .OldPosition -}}
5
5
{{- $newStart := .NewPosition -}}
6
6
{{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800 target:bg-yellow-200 target:dark:bg-yellow-600" -}}
7
7
{{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}}
8
8
{{- $lineNrSepStyle1 := "" -}}
9
9
{{- $lineNrSepStyle2 := "pr-2 border-r border-gray-200 dark:border-gray-700" -}}
10
-
{{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
10
+
{{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
11
11
{{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400 " -}}
12
12
{{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}}
13
13
{{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}}
14
14
{{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}}
15
15
{{- range .Lines -}}
16
16
{{- if eq .Op.String "+" -}}
17
-
<div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}">
18
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></div>
19
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></div>
20
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
21
-
<div class="px-2">{{ .Line }}</div>
22
-
</div>
17
+
<span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}">
18
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></span>
19
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></span>
20
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
21
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
22
+
</span>
23
23
{{- $newStart = add64 $newStart 1 -}}
24
24
{{- end -}}
25
25
{{- if eq .Op.String "-" -}}
26
-
<div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}">
27
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></div>
28
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></div>
29
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
30
-
<div class="px-2">{{ .Line }}</div>
31
-
</div>
26
+
<span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}">
27
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></span>
28
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></span>
29
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
30
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
31
+
</span>
32
32
{{- $oldStart = add64 $oldStart 1 -}}
33
33
{{- end -}}
34
34
{{- if eq .Op.String " " -}}
35
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}">
36
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></div>
37
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></div>
38
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
39
-
<div class="px-2">{{ .Line }}</div>
40
-
</div>
35
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}">
36
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></span>
37
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></span>
38
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
39
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
40
+
</span>
41
41
{{- $newStart = add64 $newStart 1 -}}
42
42
{{- $oldStart = add64 $oldStart 1 -}}
43
43
{{- end -}}
44
44
{{- end -}}
45
-
{{- end -}}</div></div></pre>
45
+
{{- end -}}</div></div></div>
46
46
{{ end }}
47
-
+1530
docs/DOCS.md
+1530
docs/DOCS.md
···
1
+
---
2
+
title: Tangled Documentation
3
+
author: The Tangled Contributors
4
+
date: 21 Sun, Dec 2025
5
+
---
6
+
7
+
# Introduction
8
+
9
+
Tangled is a decentralized code hosting and collaboration
10
+
platform. Every component of Tangled is open-source and
11
+
selfhostable. [tangled.org](https://tangled.org) also
12
+
provides hosting and CI services that are free to use.
13
+
14
+
There are several models for decentralized code
15
+
collaboration platforms, ranging from ActivityPub’s
16
+
(Forgejo) federated model, to Radicle’s entirely P2P model.
17
+
Our approach attempts to be the best of both worlds by
18
+
adopting atproto—a protocol for building decentralized
19
+
social applications with a central identity
20
+
21
+
Our approach to this is the idea of “knots”. Knots are
22
+
lightweight, headless servers that enable users to host Git
23
+
repositories with ease. Knots are designed for either single
24
+
or multi-tenant use which is perfect for self-hosting on a
25
+
Raspberry Pi at home, or larger “community” servers. By
26
+
default, Tangled provides managed knots where you can host
27
+
your repositories for free.
28
+
29
+
The "appview" at tangled.org acts as a consolidated “view”
30
+
into the whole network, allowing users to access, clone and
31
+
contribute to repositories hosted across different knots
32
+
seamlessly.
33
+
34
+
# Quick Start Guide
35
+
36
+
## Login or Sign up
37
+
38
+
You can [login](https://tangled.org) by using your AT
39
+
account. If you are unclear on what that means, simply head
40
+
to the [signup](https://tangled.org/signup) page and create
41
+
an account. By doing so, you will be choosing Tangled as
42
+
your account provider (you will be granted a handle of the
43
+
form `user.tngl.sh`).
44
+
45
+
In the AT network, users are free to choose their account
46
+
provider (known as a "Personal Data Service", or PDS), and
47
+
login to applications that support AT accounts.
48
+
49
+
You can think of it as "one account for all of the
50
+
atmosphere"!
51
+
52
+
If you already have an AT account (you may have one if you
53
+
signed up to Bluesky, for example), you can login with the
54
+
same handle on Tangled (so just use `user.bsky.social` on
55
+
the login page).
56
+
57
+
## Add an SSH Key
58
+
59
+
Once you are logged in, you can start creating repositories
60
+
and pushing code. Tangled supports pushing git repositories
61
+
over SSH.
62
+
63
+
First, you'll need to generate an SSH key if you don't
64
+
already have one:
65
+
66
+
```bash
67
+
ssh-keygen -t ed25519 -C "foo@bar.com"
68
+
```
69
+
70
+
When prompted, save the key to the default location
71
+
(`~/.ssh/id_ed25519`) and optionally set a passphrase.
72
+
73
+
Copy your public key to your clipboard:
74
+
75
+
```bash
76
+
# on X11
77
+
cat ~/.ssh/id_ed25519.pub | xclip -sel c
78
+
79
+
# on wayland
80
+
cat ~/.ssh/id_ed25519.pub | wl-copy
81
+
82
+
# on macos
83
+
cat ~/.ssh/id_ed25519.pub | pbcopy
84
+
```
85
+
86
+
Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
87
+
paste your public key, give it a descriptive name, and hit
88
+
save.
89
+
90
+
## Create a Repository
91
+
92
+
Once your SSH key is added, create your first repository:
93
+
94
+
1. Hit the green `+` icon on the topbar, and select
95
+
repository
96
+
2. Enter a repository name
97
+
3. Add a description
98
+
4. Choose a knotserver to host this repository on
99
+
5. Hit create
100
+
101
+
"Knots" are selfhostable, lightweight git servers that can
102
+
host your repository. Unlike traditional code forges, your
103
+
code can live on any server. Read the [Knots](TODO) section
104
+
for more.
105
+
106
+
## Configure SSH
107
+
108
+
To ensure Git uses the correct SSH key and connects smoothly
109
+
to Tangled, add this configuration to your `~/.ssh/config`
110
+
file:
111
+
112
+
```
113
+
Host tangled.org
114
+
Hostname tangled.org
115
+
User git
116
+
IdentityFile ~/.ssh/id_ed25519
117
+
AddressFamily inet
118
+
```
119
+
120
+
This tells SSH to use your specific key when connecting to
121
+
Tangled and prevents authentication issues if you have
122
+
multiple SSH keys.
123
+
124
+
Note that this configuration only works for knotservers that
125
+
are hosted by tangled.org. If you use a custom knot, refer
126
+
to the [Knots](TODO) section.
127
+
128
+
## Push Your First Repository
129
+
130
+
Initialize a new git repository:
131
+
132
+
```bash
133
+
mkdir my-project
134
+
cd my-project
135
+
136
+
git init
137
+
echo "# My Project" > README.md
138
+
```
139
+
140
+
Add some content and push!
141
+
142
+
```bash
143
+
git add README.md
144
+
git commit -m "Initial commit"
145
+
git remote add origin git@tangled.org:user.tngl.sh/my-project
146
+
git push -u origin main
147
+
```
148
+
149
+
That's it! Your code is now hosted on Tangled.
150
+
151
+
## Migrating an existing repository
152
+
153
+
Moving your repositories from GitHub, GitLab, Bitbucket, or
154
+
any other Git forge to Tangled is straightforward. You'll
155
+
simply change your repository's remote URL. At the moment,
156
+
Tangled does not have any tooling to migrate data such as
157
+
GitHub issues or pull requests.
158
+
159
+
First, create a new repository on tangled.org as described
160
+
in the [Quick Start Guide](#create-a-repository).
161
+
162
+
Navigate to your existing local repository:
163
+
164
+
```bash
165
+
cd /path/to/your/existing/repo
166
+
```
167
+
168
+
You can inspect your existing git remote like so:
169
+
170
+
```bash
171
+
git remote -v
172
+
```
173
+
174
+
You'll see something like:
175
+
176
+
```
177
+
origin git@github.com:username/my-project (fetch)
178
+
origin git@github.com:username/my-project (push)
179
+
```
180
+
181
+
Update the remote URL to point to tangled:
182
+
183
+
```bash
184
+
git remote set-url origin git@tangled.org:user.tngl.sh/my-project
185
+
```
186
+
187
+
Verify the change:
188
+
189
+
```bash
190
+
git remote -v
191
+
```
192
+
193
+
You should now see:
194
+
195
+
```
196
+
origin git@tangled.org:user.tngl.sh/my-project (fetch)
197
+
origin git@tangled.org:user.tngl.sh/my-project (push)
198
+
```
199
+
200
+
Push all your branches and tags to tangled:
201
+
202
+
```bash
203
+
git push -u origin --all
204
+
git push -u origin --tags
205
+
```
206
+
207
+
Your repository is now migrated to Tangled! All commit
208
+
history, branches, and tags have been preserved.
209
+
210
+
## Mirroring a repository to Tangled
211
+
212
+
If you want to maintain your repository on multiple forges
213
+
simultaneously, for example, keeping your primary repository
214
+
on GitHub while mirroring to Tangled for backup or
215
+
redundancy, you can do so by adding multiple remotes.
216
+
217
+
You can configure your local repository to push to both
218
+
Tangled and, say, GitHub. You may already have the following
219
+
setup:
220
+
221
+
```
222
+
$ git remote -v
223
+
origin git@github.com:username/my-project (fetch)
224
+
origin git@github.com:username/my-project (push)
225
+
```
226
+
227
+
Now add Tangled as an additional push URL to the same
228
+
remote:
229
+
230
+
```bash
231
+
git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
232
+
```
233
+
234
+
You also need to re-add the original URL as a push
235
+
destination (git replaces the push URL when you use `--add`
236
+
the first time):
237
+
238
+
```bash
239
+
git remote set-url --add --push origin git@github.com:username/my-project
240
+
```
241
+
242
+
Verify your configuration:
243
+
244
+
```
245
+
$ git remote -v
246
+
origin git@github.com:username/repo (fetch)
247
+
origin git@tangled.org:username/my-project (push)
248
+
origin git@github.com:username/repo (push)
249
+
```
250
+
251
+
Notice that there's one fetch URL (the primary remote) and
252
+
two push URLs. Now, whenever you push, git will
253
+
automatically push to both remotes:
254
+
255
+
```bash
256
+
git push origin main
257
+
```
258
+
259
+
This single command pushes your `main` branch to both GitHub
260
+
and Tangled simultaneously.
261
+
262
+
To push all branches and tags:
263
+
264
+
```bash
265
+
git push origin --all
266
+
git push origin --tags
267
+
```
268
+
269
+
If you prefer more control over which remote you push to,
270
+
you can maintain separate remotes:
271
+
272
+
```bash
273
+
git remote add github git@github.com:username/my-project
274
+
git remote add tangled git@tangled.org:username/my-project
275
+
```
276
+
277
+
Then push to each explicitly:
278
+
279
+
```bash
280
+
git push github main
281
+
git push tangled main
282
+
```
283
+
284
+
# Knot self-hosting guide
285
+
286
+
So you want to run your own knot server? Great! Here are a few prerequisites:
287
+
288
+
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
289
+
2. A (sub)domain name. People generally use `knot.example.com`.
290
+
3. A valid SSL certificate for your domain.
291
+
292
+
## NixOS
293
+
294
+
Refer to the [knot
295
+
module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
296
+
for a full list of options. Sample configurations:
297
+
298
+
- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
299
+
- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
300
+
301
+
## Docker
302
+
303
+
Refer to
304
+
[@tangled.org/knot-docker](https://tangled.sh/@tangled.sh/knot-docker).
305
+
Note that this is community maintained.
306
+
307
+
## Manual setup
308
+
309
+
First, clone this repository:
310
+
311
+
```
312
+
git clone https://tangled.org/@tangled.org/core
313
+
```
314
+
315
+
Then, build the `knot` CLI. This is the knot administration
316
+
and operation tool. For the purpose of this guide, we're
317
+
only concerned with these subcommands:
318
+
319
+
* `knot server`: the main knot server process, typically
320
+
run as a supervised service
321
+
* `knot guard`: handles role-based access control for git
322
+
over SSH (you'll never have to run this yourself)
323
+
* `knot keys`: fetches SSH keys associated with your knot;
324
+
we'll use this to generate the SSH
325
+
`AuthorizedKeysCommand`
326
+
327
+
```
328
+
cd core
329
+
export CGO_ENABLED=1
330
+
go build -o knot ./cmd/knot
331
+
```
332
+
333
+
Next, move the `knot` binary to a location owned by `root` --
334
+
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
335
+
336
+
```
337
+
sudo mv knot /usr/local/bin/knot
338
+
sudo chown root:root /usr/local/bin/knot
339
+
```
340
+
341
+
This is necessary because SSH `AuthorizedKeysCommand` requires [really
342
+
specific permissions](https://stackoverflow.com/a/27638306). The
343
+
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
344
+
retrieve a user's public SSH keys dynamically for authentication. Let's
345
+
set that up.
346
+
347
+
```
348
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
349
+
Match User git
350
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
351
+
AuthorizedKeysCommandUser nobody
352
+
EOF
353
+
```
354
+
355
+
Then, reload `sshd`:
356
+
357
+
```
358
+
sudo systemctl reload ssh
359
+
```
360
+
361
+
Next, create the `git` user. We'll use the `git` user's home directory
362
+
to store repositories:
363
+
364
+
```
365
+
sudo adduser git
366
+
```
367
+
368
+
Create `/home/git/.knot.env` with the following, updating the values as
369
+
necessary. The `KNOT_SERVER_OWNER` should be set to your
370
+
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
371
+
372
+
```
373
+
KNOT_REPO_SCAN_PATH=/home/git
374
+
KNOT_SERVER_HOSTNAME=knot.example.com
375
+
APPVIEW_ENDPOINT=https://tangled.sh
376
+
KNOT_SERVER_OWNER=did:plc:foobar
377
+
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
378
+
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
379
+
```
380
+
381
+
If you run a Linux distribution that uses systemd, you can use the provided
382
+
service file to run the server. Copy
383
+
[`knotserver.service`](/systemd/knotserver.service)
384
+
to `/etc/systemd/system/`. Then, run:
385
+
386
+
```
387
+
systemctl enable knotserver
388
+
systemctl start knotserver
389
+
```
390
+
391
+
The last step is to configure a reverse proxy like Nginx or Caddy to front your
392
+
knot. Here's an example configuration for Nginx:
393
+
394
+
```
395
+
server {
396
+
listen 80;
397
+
listen [::]:80;
398
+
server_name knot.example.com;
399
+
400
+
location / {
401
+
proxy_pass http://localhost:5555;
402
+
proxy_set_header Host $host;
403
+
proxy_set_header X-Real-IP $remote_addr;
404
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
405
+
proxy_set_header X-Forwarded-Proto $scheme;
406
+
}
407
+
408
+
# wss endpoint for git events
409
+
location /events {
410
+
proxy_set_header X-Forwarded-For $remote_addr;
411
+
proxy_set_header Host $http_host;
412
+
proxy_set_header Upgrade websocket;
413
+
proxy_set_header Connection Upgrade;
414
+
proxy_pass http://localhost:5555;
415
+
}
416
+
# additional config for SSL/TLS go here.
417
+
}
418
+
419
+
```
420
+
421
+
Remember to use Let's Encrypt or similar to procure a certificate for your
422
+
knot domain.
423
+
424
+
You should now have a running knot server! You can finalize
425
+
your registration by hitting the `verify` button on the
426
+
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
427
+
a record on your PDS to announce the existence of the knot.
428
+
429
+
### Custom paths
430
+
431
+
(This section applies to manual setup only. Docker users should edit the mounts
432
+
in `docker-compose.yml` instead.)
433
+
434
+
Right now, the database and repositories of your knot lives in `/home/git`. You
435
+
can move these paths if you'd like to store them in another folder. Be careful
436
+
when adjusting these paths:
437
+
438
+
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
439
+
any possible side effects. Remember to restart it once you're done.
440
+
* Make backups before moving in case something goes wrong.
441
+
* Make sure the `git` user can read and write from the new paths.
442
+
443
+
#### Database
444
+
445
+
As an example, let's say the current database is at `/home/git/knotserver.db`,
446
+
and we want to move it to `/home/git/database/knotserver.db`.
447
+
448
+
Copy the current database to the new location. Make sure to copy the `.db-shm`
449
+
and `.db-wal` files if they exist.
450
+
451
+
```
452
+
mkdir /home/git/database
453
+
cp /home/git/knotserver.db* /home/git/database
454
+
```
455
+
456
+
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
457
+
the new file path (_not_ the directory):
458
+
459
+
```
460
+
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
461
+
```
462
+
463
+
#### Repositories
464
+
465
+
As an example, let's say the repositories are currently in `/home/git`, and we
466
+
want to move them into `/home/git/repositories`.
467
+
468
+
Create the new folder, then move the existing repositories (if there are any):
469
+
470
+
```
471
+
mkdir /home/git/repositories
472
+
# move all DIDs into the new folder; these will vary for you!
473
+
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
474
+
```
475
+
476
+
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
477
+
to the new directory:
478
+
479
+
```
480
+
KNOT_REPO_SCAN_PATH=/home/git/repositories
481
+
```
482
+
483
+
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
484
+
repository path:
485
+
486
+
```
487
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
488
+
Match User git
489
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
490
+
AuthorizedKeysCommandUser nobody
491
+
EOF
492
+
```
493
+
494
+
Make sure to restart your SSH server!
495
+
496
+
#### MOTD (message of the day)
497
+
498
+
To configure the MOTD used ("Welcome to this knot!" by default), edit the
499
+
`/home/git/motd` file:
500
+
501
+
```
502
+
printf "Hi from this knot!\n" > /home/git/motd
503
+
```
504
+
505
+
Note that you should add a newline at the end if setting a non-empty message
506
+
since the knot won't do this for you.
507
+
508
+
# Spindles
509
+
510
+
## Pipelines
511
+
512
+
Spindle workflows allow you to write CI/CD pipelines in a
513
+
simple format. They're located in the `.tangled/workflows`
514
+
directory at the root of your repository, and are defined
515
+
using YAML.
516
+
517
+
The fields are:
518
+
519
+
- [Trigger](#trigger): A **required** field that defines
520
+
when a workflow should be triggered.
521
+
- [Engine](#engine): A **required** field that defines which
522
+
engine a workflow should run on.
523
+
- [Clone options](#clone-options): An **optional** field
524
+
that defines how the repository should be cloned.
525
+
- [Dependencies](#dependencies): An **optional** field that
526
+
allows you to list dependencies you may need.
527
+
- [Environment](#environment): An **optional** field that
528
+
allows you to define environment variables.
529
+
- [Steps](#steps): An **optional** field that allows you to
530
+
define what steps should run in the workflow.
531
+
532
+
### Trigger
533
+
534
+
The first thing to add to a workflow is the trigger, which
535
+
defines when a workflow runs. This is defined using a `when`
536
+
field, which takes in a list of conditions. Each condition
537
+
has the following fields:
538
+
539
+
- `event`: This is a **required** field that defines when
540
+
your workflow should run. It's a list that can take one or
541
+
more of the following values:
542
+
- `push`: The workflow should run every time a commit is
543
+
pushed to the repository.
544
+
- `pull_request`: The workflow should run every time a
545
+
pull request is made or updated.
546
+
- `manual`: The workflow can be triggered manually.
547
+
- `branch`: Defines which branches the workflow should run
548
+
for. If used with the `push` event, commits to the
549
+
branch(es) listed here will trigger the workflow. If used
550
+
with the `pull_request` event, updates to pull requests
551
+
targeting the branch(es) listed here will trigger the
552
+
workflow. This field has no effect with the `manual`
553
+
event. Supports glob patterns using `*` and `**` (e.g.,
554
+
`main`, `develop`, `release-*`). Either `branch` or `tag`
555
+
(or both) must be specified for `push` events.
556
+
- `tag`: Defines which tags the workflow should run for.
557
+
Only used with the `push` event - when tags matching the
558
+
pattern(s) listed here are pushed, the workflow will
559
+
trigger. This field has no effect with `pull_request` or
560
+
`manual` events. Supports glob patterns using `*` and `**`
561
+
(e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
562
+
`tag` (or both) must be specified for `push` events.
563
+
564
+
For example, if you'd like to define a workflow that runs
565
+
when commits are pushed to the `main` and `develop`
566
+
branches, or when pull requests that target the `main`
567
+
branch are updated, or manually, you can do so with:
568
+
569
+
```yaml
570
+
when:
571
+
- event: ["push", "manual"]
572
+
branch: ["main", "develop"]
573
+
- event: ["pull_request"]
574
+
branch: ["main"]
575
+
```
576
+
577
+
You can also trigger workflows on tag pushes. For instance,
578
+
to run a deployment workflow when tags matching `v*` are
579
+
pushed:
580
+
581
+
```yaml
582
+
when:
583
+
- event: ["push"]
584
+
tag: ["v*"]
585
+
```
586
+
587
+
You can even combine branch and tag patterns in a single
588
+
constraint (the workflow triggers if either matches):
589
+
590
+
```yaml
591
+
when:
592
+
- event: ["push"]
593
+
branch: ["main", "release-*"]
594
+
tag: ["v*", "stable"]
595
+
```
596
+
597
+
### Engine
598
+
599
+
Next is the engine on which the workflow should run, defined
600
+
using the **required** `engine` field. The currently
601
+
supported engines are:
602
+
603
+
- `nixery`: This uses an instance of
604
+
[Nixery](https://nixery.dev) to run steps, which allows
605
+
you to add [dependencies](#dependencies) from
606
+
[Nixpkgs](https://github.com/NixOS/nixpkgs). You can
607
+
search for packages on https://search.nixos.org, and
608
+
there's a pretty good chance the package(s) you're looking
609
+
for will be there.
610
+
611
+
Example:
612
+
613
+
```yaml
614
+
engine: "nixery"
615
+
```
616
+
617
+
### Clone options
618
+
619
+
When a workflow starts, the first step is to clone the
620
+
repository. You can customize this behavior using the
621
+
**optional** `clone` field. It has the following fields:
622
+
623
+
- `skip`: Setting this to `true` will skip cloning the
624
+
repository. This can be useful if your workflow is doing
625
+
something that doesn't require anything from the
626
+
repository itself. This is `false` by default.
627
+
- `depth`: This sets the number of commits, or the "clone
628
+
depth", to fetch from the repository. For example, if you
629
+
set this to 2, the last 2 commits will be fetched. By
630
+
default, the depth is set to 1, meaning only the most
631
+
recent commit will be fetched, which is the commit that
632
+
triggered the workflow.
633
+
- `submodules`: If you use [git
634
+
submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules)
635
+
in your repository, setting this field to `true` will
636
+
recursively fetch all submodules. This is `false` by
637
+
default.
638
+
639
+
The default settings are:
640
+
641
+
```yaml
642
+
clone:
643
+
skip: false
644
+
depth: 1
645
+
submodules: false
646
+
```
647
+
648
+
### Dependencies
649
+
650
+
Usually when you're running a workflow, you'll need
651
+
additional dependencies. The `dependencies` field lets you
652
+
define which dependencies to get, and from where. It's a
653
+
key-value map, with the key being the registry to fetch
654
+
dependencies from, and the value being the list of
655
+
dependencies to fetch.
656
+
657
+
Say you want to fetch Node.js and Go from `nixpkgs`, and a
658
+
package called `my_pkg` you've made from your own registry
659
+
at your repository at
660
+
`https://tangled.sh/@example.com/my_pkg`. You can define
661
+
those dependencies like so:
662
+
663
+
```yaml
664
+
dependencies:
665
+
# nixpkgs
666
+
nixpkgs:
667
+
- nodejs
668
+
- go
669
+
# custom registry
670
+
git+https://tangled.org/@example.com/my_pkg:
671
+
- my_pkg
672
+
```
673
+
674
+
Now these dependencies are available to use in your
675
+
workflow!
676
+
677
+
### Environment
678
+
679
+
The `environment` field allows you define environment
680
+
variables that will be available throughout the entire
681
+
workflow. **Do not put secrets here, these environment
682
+
variables are visible to anyone viewing the repository. You
683
+
can add secrets for pipelines in your repository's
684
+
settings.**
685
+
686
+
Example:
687
+
688
+
```yaml
689
+
environment:
690
+
GOOS: "linux"
691
+
GOARCH: "arm64"
692
+
NODE_ENV: "production"
693
+
MY_ENV_VAR: "MY_ENV_VALUE"
694
+
```
695
+
696
+
### Steps
697
+
698
+
The `steps` field allows you to define what steps should run
699
+
in the workflow. It's a list of step objects, each with the
700
+
following fields:
701
+
702
+
- `name`: This field allows you to give your step a name.
703
+
This name is visible in your workflow runs, and is used to
704
+
describe what the step is doing.
705
+
- `command`: This field allows you to define a command to
706
+
run in that step. The step is run in a Bash shell, and the
707
+
logs from the command will be visible in the pipelines
708
+
page on the Tangled website. The
709
+
[dependencies](#dependencies) you added will be available
710
+
to use here.
711
+
- `environment`: Similar to the global
712
+
[environment](#environment) config, this **optional**
713
+
field is a key-value map that allows you to set
714
+
environment variables for the step. **Do not put secrets
715
+
here, these environment variables are visible to anyone
716
+
viewing the repository. You can add secrets for pipelines
717
+
in your repository's settings.**
718
+
719
+
Example:
720
+
721
+
```yaml
722
+
steps:
723
+
- name: "Build backend"
724
+
command: "go build"
725
+
environment:
726
+
GOOS: "darwin"
727
+
GOARCH: "arm64"
728
+
- name: "Build frontend"
729
+
command: "npm run build"
730
+
environment:
731
+
NODE_ENV: "production"
732
+
```
733
+
734
+
### Complete workflow
735
+
736
+
```yaml
737
+
# .tangled/workflows/build.yml
738
+
739
+
when:
740
+
- event: ["push", "manual"]
741
+
branch: ["main", "develop"]
742
+
- event: ["pull_request"]
743
+
branch: ["main"]
744
+
745
+
engine: "nixery"
746
+
747
+
# using the default values
748
+
clone:
749
+
skip: false
750
+
depth: 1
751
+
submodules: false
752
+
753
+
dependencies:
754
+
# nixpkgs
755
+
nixpkgs:
756
+
- nodejs
757
+
- go
758
+
# custom registry
759
+
git+https://tangled.org/@example.com/my_pkg:
760
+
- my_pkg
761
+
762
+
environment:
763
+
GOOS: "linux"
764
+
GOARCH: "arm64"
765
+
NODE_ENV: "production"
766
+
MY_ENV_VAR: "MY_ENV_VALUE"
767
+
768
+
steps:
769
+
- name: "Build backend"
770
+
command: "go build"
771
+
environment:
772
+
GOOS: "darwin"
773
+
GOARCH: "arm64"
774
+
- name: "Build frontend"
775
+
command: "npm run build"
776
+
environment:
777
+
NODE_ENV: "production"
778
+
```
779
+
780
+
If you want another example of a workflow, you can look at
781
+
the one [Tangled uses to build the
782
+
project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
783
+
784
+
## Self-hosting guide
785
+
786
+
### Prerequisites
787
+
788
+
* Go
789
+
* Docker (the only supported backend currently)
790
+
791
+
### Configuration
792
+
793
+
Spindle is configured using environment variables. The following environment variables are available:
794
+
795
+
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
796
+
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
797
+
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
798
+
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
799
+
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
800
+
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
801
+
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
802
+
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
803
+
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
804
+
805
+
### Running spindle
806
+
807
+
1. **Set the environment variables.** For example:
808
+
809
+
```shell
810
+
export SPINDLE_SERVER_HOSTNAME="your-hostname"
811
+
export SPINDLE_SERVER_OWNER="your-did"
812
+
```
813
+
814
+
2. **Build the Spindle binary.**
815
+
816
+
```shell
817
+
cd core
818
+
go mod download
819
+
go build -o cmd/spindle/spindle cmd/spindle/main.go
820
+
```
821
+
822
+
3. **Create the log directory.**
823
+
824
+
```shell
825
+
sudo mkdir -p /var/log/spindle
826
+
sudo chown $USER:$USER -R /var/log/spindle
827
+
```
828
+
829
+
4. **Run the Spindle binary.**
830
+
831
+
```shell
832
+
./cmd/spindle/spindle
833
+
```
834
+
835
+
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
836
+
837
+
## Architecture
838
+
839
+
Spindle is a small CI runner service. Here's a high level overview of how it operates:
840
+
841
+
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
842
+
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
843
+
* when a new repo record comes through (typically when you add a spindle to a
844
+
repo from the settings), spindle then resolves the underlying knot and
845
+
subscribes to repo events (see:
846
+
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
847
+
* the spindle engine then handles execution of the pipeline, with results and
848
+
logs beamed on the spindle event stream over wss
849
+
850
+
### The engine
851
+
852
+
At present, the only supported backend is Docker (and Podman, if Docker
853
+
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
854
+
executes each step in the pipeline in a fresh container, with state persisted
855
+
across steps within the `/tangled/workspace` directory.
856
+
857
+
The base image for the container is constructed on the fly using
858
+
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
859
+
used packages.
860
+
861
+
The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
862
+
863
+
## Secrets with openbao
864
+
865
+
This document covers setting up Spindle to use OpenBao for secrets
866
+
management via OpenBao Proxy instead of the default SQLite backend.
867
+
868
+
### Overview
869
+
870
+
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
871
+
authentication automatically using AppRole credentials, while Spindle
872
+
connects to the local proxy instead of directly to the OpenBao server.
873
+
874
+
This approach provides better security, automatic token renewal, and
875
+
simplified application code.
876
+
877
+
### Installation
878
+
879
+
Install OpenBao from nixpkgs:
880
+
881
+
```bash
882
+
nix shell nixpkgs#openbao # for a local server
883
+
```
884
+
885
+
### Setup
886
+
887
+
The setup process can is documented for both local development and production.
888
+
889
+
#### Local development
890
+
891
+
Start OpenBao in dev mode:
892
+
893
+
```bash
894
+
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
895
+
```
896
+
897
+
This starts OpenBao on `http://localhost:8201` with a root token.
898
+
899
+
Set up environment for bao CLI:
900
+
901
+
```bash
902
+
export BAO_ADDR=http://localhost:8200
903
+
export BAO_TOKEN=root
904
+
```
905
+
906
+
#### Production
907
+
908
+
You would typically use a systemd service with a
909
+
configuration file. Refer to
910
+
[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
911
+
for how this can be achieved using Nix.
912
+
913
+
Then, initialize the bao server:
914
+
915
+
```bash
916
+
bao operator init -key-shares=1 -key-threshold=1
917
+
```
918
+
919
+
This will print out an unseal key and a root key. Save them
920
+
somewhere (like a password manager). Then unseal the vault
921
+
to begin setting it up:
922
+
923
+
```bash
924
+
bao operator unseal <unseal_key>
925
+
```
926
+
927
+
All steps below remain the same across both dev and
928
+
production setups.
929
+
930
+
#### Configure openbao server
931
+
932
+
Create the spindle KV mount:
933
+
934
+
```bash
935
+
bao secrets enable -path=spindle -version=2 kv
936
+
```
937
+
938
+
Set up AppRole authentication and policy:
939
+
940
+
Create a policy file `spindle-policy.hcl`:
941
+
942
+
```hcl
943
+
# Full access to spindle KV v2 data
944
+
path "spindle/data/*" {
945
+
capabilities = ["create", "read", "update", "delete"]
946
+
}
947
+
948
+
# Access to metadata for listing and management
949
+
path "spindle/metadata/*" {
950
+
capabilities = ["list", "read", "delete", "update"]
951
+
}
952
+
953
+
# Allow listing at root level
954
+
path "spindle/" {
955
+
capabilities = ["list"]
956
+
}
957
+
958
+
# Required for connection testing and health checks
959
+
path "auth/token/lookup-self" {
960
+
capabilities = ["read"]
961
+
}
962
+
```
963
+
964
+
Apply the policy and create an AppRole:
965
+
966
+
```bash
967
+
bao policy write spindle-policy spindle-policy.hcl
968
+
bao auth enable approle
969
+
bao write auth/approle/role/spindle \
970
+
token_policies="spindle-policy" \
971
+
token_ttl=1h \
972
+
token_max_ttl=4h \
973
+
bind_secret_id=true \
974
+
secret_id_ttl=0 \
975
+
secret_id_num_uses=0
976
+
```
977
+
978
+
Get the credentials:
979
+
980
+
```bash
981
+
# Get role ID (static)
982
+
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
983
+
984
+
# Generate secret ID
985
+
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
986
+
987
+
echo "Role ID: $ROLE_ID"
988
+
echo "Secret ID: $SECRET_ID"
989
+
```
990
+
991
+
#### Create proxy configuration
992
+
993
+
Create the credential files:
994
+
995
+
```bash
996
+
# Create directory for OpenBao files
997
+
mkdir -p /tmp/openbao
998
+
999
+
# Save credentials
1000
+
echo "$ROLE_ID" > /tmp/openbao/role-id
1001
+
echo "$SECRET_ID" > /tmp/openbao/secret-id
1002
+
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
1003
+
```
1004
+
1005
+
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
1006
+
1007
+
```hcl
1008
+
# OpenBao server connection
1009
+
vault {
1010
+
address = "http://localhost:8200"
1011
+
}
1012
+
1013
+
# Auto-Auth using AppRole
1014
+
auto_auth {
1015
+
method "approle" {
1016
+
mount_path = "auth/approle"
1017
+
config = {
1018
+
role_id_file_path = "/tmp/openbao/role-id"
1019
+
secret_id_file_path = "/tmp/openbao/secret-id"
1020
+
}
1021
+
}
1022
+
1023
+
# Optional: write token to file for debugging
1024
+
sink "file" {
1025
+
config = {
1026
+
path = "/tmp/openbao/token"
1027
+
mode = 0640
1028
+
}
1029
+
}
1030
+
}
1031
+
1032
+
# Proxy listener for Spindle
1033
+
listener "tcp" {
1034
+
address = "127.0.0.1:8201"
1035
+
tls_disable = true
1036
+
}
1037
+
1038
+
# Enable API proxy with auto-auth token
1039
+
api_proxy {
1040
+
use_auto_auth_token = true
1041
+
}
1042
+
1043
+
# Enable response caching
1044
+
cache {
1045
+
use_auto_auth_token = true
1046
+
}
1047
+
1048
+
# Logging
1049
+
log_level = "info"
1050
+
```
1051
+
1052
+
#### Start the proxy
1053
+
1054
+
Start OpenBao Proxy:
1055
+
1056
+
```bash
1057
+
bao proxy -config=/tmp/openbao/proxy.hcl
1058
+
```
1059
+
1060
+
The proxy will authenticate with OpenBao and start listening on
1061
+
`127.0.0.1:8201`.
1062
+
1063
+
#### Configure spindle
1064
+
1065
+
Set these environment variables for Spindle:
1066
+
1067
+
```bash
1068
+
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
1069
+
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
1070
+
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
1071
+
```
1072
+
1073
+
On startup, the spindle will now connect to the local proxy,
1074
+
which handles all authentication automatically.
1075
+
1076
+
### Production setup for proxy
1077
+
1078
+
For production, you'll want to run the proxy as a service:
1079
+
1080
+
Place your production configuration in
1081
+
`/etc/openbao/proxy.hcl` with proper TLS settings for the
1082
+
vault connection.
1083
+
1084
+
### Verifying setup
1085
+
1086
+
Test the proxy directly:
1087
+
1088
+
```bash
1089
+
# Check proxy health
1090
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
1091
+
1092
+
# Test token lookup through proxy
1093
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
1094
+
```
1095
+
1096
+
Test OpenBao operations through the server:
1097
+
1098
+
```bash
1099
+
# List all secrets
1100
+
bao kv list spindle/
1101
+
1102
+
# Add a test secret via Spindle API, then check it exists
1103
+
bao kv list spindle/repos/
1104
+
1105
+
# Get a specific secret
1106
+
bao kv get spindle/repos/your_repo_path/SECRET_NAME
1107
+
```
1108
+
1109
+
### How it works
1110
+
1111
+
- Spindle connects to OpenBao Proxy on localhost (typically
1112
+
port 8200 or 8201)
1113
+
- The proxy authenticates with OpenBao using AppRole
1114
+
credentials
1115
+
- All Spindle requests go through the proxy, which injects
1116
+
authentication tokens
1117
+
- Secrets are stored at
1118
+
`spindle/repos/{sanitized_repo_path}/{secret_key}`
1119
+
- Repository paths like `did:plc:alice/myrepo` become
1120
+
`did_plc_alice_myrepo`
1121
+
- The proxy handles all token renewal automatically
1122
+
- Spindle no longer manages tokens or authentication
1123
+
directly
1124
+
1125
+
### Troubleshooting
1126
+
1127
+
**Connection refused**: Check that the OpenBao Proxy is
1128
+
running and listening on the configured address.
1129
+
1130
+
**403 errors**: Verify the AppRole credentials are correct
1131
+
and the policy has the necessary permissions.
1132
+
1133
+
**404 route errors**: The spindle KV mount probably doesn't
1134
+
exist - run the mount creation step again.
1135
+
1136
+
**Proxy authentication failures**: Check the proxy logs and
1137
+
verify the role-id and secret-id files are readable and
1138
+
contain valid credentials.
1139
+
1140
+
**Secret not found after writing**: This can indicate policy
1141
+
permission issues. Verify the policy includes both
1142
+
`spindle/data/*` and `spindle/metadata/*` paths with
1143
+
appropriate capabilities.
1144
+
1145
+
Check proxy logs:
1146
+
1147
+
```bash
1148
+
# If running as systemd service
1149
+
journalctl -u openbao-proxy -f
1150
+
1151
+
# If running directly, check the console output
1152
+
```
1153
+
1154
+
Test AppRole authentication manually:
1155
+
1156
+
```bash
1157
+
bao write auth/approle/login \
1158
+
role_id="$(cat /tmp/openbao/role-id)" \
1159
+
secret_id="$(cat /tmp/openbao/secret-id)"
1160
+
```
1161
+
1162
+
# Migrating knots & spindles
1163
+
1164
+
Sometimes, non-backwards compatible changes are made to the
1165
+
knot/spindle XRPC APIs. If you host a knot or a spindle, you
1166
+
will need to follow this guide to upgrade. Typically, this
1167
+
only requires you to deploy the newest version.
1168
+
1169
+
This document is laid out in reverse-chronological order.
1170
+
Newer migration guides are listed first, and older guides
1171
+
are further down the page.
1172
+
1173
+
## Upgrading from v1.8.x
1174
+
1175
+
After v1.8.2, the HTTP API for knot and spindles have been
1176
+
deprecated and replaced with XRPC. Repositories on outdated
1177
+
knots will not be viewable from the appview. Upgrading is
1178
+
straightforward however.
1179
+
1180
+
For knots:
1181
+
1182
+
- Upgrade to latest tag (v1.9.0 or above)
1183
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1184
+
hit the "retry" button to verify your knot
1185
+
1186
+
For spindles:
1187
+
1188
+
- Upgrade to latest tag (v1.9.0 or above)
1189
+
- Head to the [spindle
1190
+
dashboard](https://tangled.org/settings/spindles) and hit the
1191
+
"retry" button to verify your spindle
1192
+
1193
+
## Upgrading from v1.7.x
1194
+
1195
+
After v1.7.0, knot secrets have been deprecated. You no
1196
+
longer need a secret from the appview to run a knot. All
1197
+
authorized commands to knots are managed via [Inter-Service
1198
+
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
1199
+
Knots will be read-only until upgraded.
1200
+
1201
+
Upgrading is quite easy, in essence:
1202
+
1203
+
- `KNOT_SERVER_SECRET` is no more, you can remove this
1204
+
environment variable entirely
1205
+
- `KNOT_SERVER_OWNER` is now required on boot, set this to
1206
+
your DID. You can find your DID in the
1207
+
[settings](https://tangled.org/settings) page.
1208
+
- Restart your knot once you have replaced the environment
1209
+
variable
1210
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1211
+
hit the "retry" button to verify your knot. This simply
1212
+
writes a `sh.tangled.knot` record to your PDS.
1213
+
1214
+
If you use the nix module, simply bump the flake to the
1215
+
latest revision, and change your config block like so:
1216
+
1217
+
```diff
1218
+
services.tangled.knot = {
1219
+
enable = true;
1220
+
server = {
1221
+
- secretFile = /path/to/secret;
1222
+
+ owner = "did:plc:foo";
1223
+
};
1224
+
};
1225
+
```
1226
+
1227
+
# Hacking on Tangled
1228
+
1229
+
We highly recommend [installing
1230
+
nix](https://nixos.org/download/) (the package manager)
1231
+
before working on the codebase. The nix flake provides a lot
1232
+
of helpers to get started and most importantly, builds and
1233
+
dev shells are entirely deterministic.
1234
+
1235
+
To set up your dev environment:
1236
+
1237
+
```bash
1238
+
nix develop
1239
+
```
1240
+
1241
+
Non-nix users can look at the `devShell` attribute in the
1242
+
`flake.nix` file to determine necessary dependencies.
1243
+
1244
+
## Running the appview
1245
+
1246
+
The nix flake also exposes a few `app` attributes (run `nix
1247
+
flake show` to see a full list of what the flake provides),
1248
+
one of the apps runs the appview with the `air`
1249
+
live-reloader:
1250
+
1251
+
```bash
1252
+
TANGLED_DEV=true nix run .#watch-appview
1253
+
1254
+
# TANGLED_DB_PATH might be of interest to point to
1255
+
# different sqlite DBs
1256
+
1257
+
# in a separate shell, you can live-reload tailwind
1258
+
nix run .#watch-tailwind
1259
+
```
1260
+
1261
+
To authenticate with the appview, you will need redis and
1262
+
OAUTH JWKs to be setup:
1263
+
1264
+
```
1265
+
# oauth jwks should already be setup by the nix devshell:
1266
+
echo $TANGLED_OAUTH_CLIENT_SECRET
1267
+
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
1268
+
1269
+
echo $TANGLED_OAUTH_CLIENT_KID
1270
+
1761667908
1271
+
1272
+
# if not, you can set it up yourself:
1273
+
goat key generate -t P-256
1274
+
Key Type: P-256 / secp256r1 / ES256 private key
1275
+
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
1276
+
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
1277
+
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
1278
+
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
1279
+
1280
+
# the secret key from above
1281
+
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
1282
+
1283
+
# run redis in at a new shell to store oauth sessions
1284
+
redis-server
1285
+
```
1286
+
1287
+
## Running knots and spindles
1288
+
1289
+
An end-to-end knot setup requires setting up a machine with
1290
+
`sshd`, `AuthorizedKeysCommand`, and git user, which is
1291
+
quite cumbersome. So the nix flake provides a
1292
+
`nixosConfiguration` to do so.
1293
+
1294
+
<details>
1295
+
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
1296
+
1297
+
In order to build Tangled's dev VM on macOS, you will
1298
+
first need to set up a Linux Nix builder. The recommended
1299
+
way to do so is to run a [`darwin.linux-builder`
1300
+
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
1301
+
and to register it in `nix.conf` as a builder for Linux
1302
+
with the same architecture as your Mac (`linux-aarch64` if
1303
+
you are using Apple Silicon).
1304
+
1305
+
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
1306
+
> the tangled repo so that it doesn't conflict with the other VM. For example,
1307
+
> you can do
1308
+
>
1309
+
> ```shell
1310
+
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
1311
+
> ```
1312
+
>
1313
+
> to store the builder VM in a temporary dir.
1314
+
>
1315
+
> You should read and follow [all the other intructions][darwin builder vm] to
1316
+
> avoid subtle problems.
1317
+
1318
+
Alternatively, you can use any other method to set up a
1319
+
Linux machine with `nix` installed that you can `sudo ssh`
1320
+
into (in other words, root user on your Mac has to be able
1321
+
to ssh into the Linux machine without entering a password)
1322
+
and that has the same architecture as your Mac. See
1323
+
[remote builder
1324
+
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
1325
+
for how to register such a builder in `nix.conf`.
1326
+
1327
+
> WARNING: If you'd like to use
1328
+
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
1329
+
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
1330
+
> ssh` works can be tricky. It seems to be [possible with
1331
+
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
1332
+
1333
+
</details>
1334
+
1335
+
To begin, grab your DID from http://localhost:3000/settings.
1336
+
Then, set `TANGLED_VM_KNOT_OWNER` and
1337
+
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
1338
+
lightweight NixOS VM like so:
1339
+
1340
+
```bash
1341
+
nix run --impure .#vm
1342
+
1343
+
# type `poweroff` at the shell to exit the VM
1344
+
```
1345
+
1346
+
This starts a knot on port 6444, a spindle on port 6555
1347
+
with `ssh` exposed on port 2222.
1348
+
1349
+
Once the services are running, head to
1350
+
http://localhost:3000/settings/knots and hit verify. It should
1351
+
verify the ownership of the services instantly if everything
1352
+
went smoothly.
1353
+
1354
+
You can push repositories to this VM with this ssh config
1355
+
block on your main machine:
1356
+
1357
+
```bash
1358
+
Host nixos-shell
1359
+
Hostname localhost
1360
+
Port 2222
1361
+
User git
1362
+
IdentityFile ~/.ssh/my_tangled_key
1363
+
```
1364
+
1365
+
Set up a remote called `local-dev` on a git repo:
1366
+
1367
+
```bash
1368
+
git remote add local-dev git@nixos-shell:user/repo
1369
+
git push local-dev main
1370
+
```
1371
+
1372
+
The above VM should already be running a spindle on
1373
+
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
1374
+
hit verify. You can then configure each repository to use
1375
+
this spindle and run CI jobs.
1376
+
1377
+
Of interest when debugging spindles:
1378
+
1379
+
```
1380
+
# service logs from journald:
1381
+
journalctl -xeu spindle
1382
+
1383
+
# CI job logs from disk:
1384
+
ls /var/log/spindle
1385
+
1386
+
# debugging spindle db:
1387
+
sqlite3 /var/lib/spindle/spindle.db
1388
+
1389
+
# litecli has a nicer REPL interface:
1390
+
litecli /var/lib/spindle/spindle.db
1391
+
```
1392
+
1393
+
If for any reason you wish to disable either one of the
1394
+
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
1395
+
`services.tangled.spindle.enable` (or
1396
+
`services.tangled.knot.enable`) to `false`.
1397
+
1398
+
# Contribution guide
1399
+
1400
+
## Commit guidelines
1401
+
1402
+
We follow a commit style similar to the Go project. Please keep commits:
1403
+
1404
+
* **atomic**: each commit should represent one logical change
1405
+
* **descriptive**: the commit message should clearly describe what the
1406
+
change does and why it's needed
1407
+
1408
+
### Message format
1409
+
1410
+
```
1411
+
<service/top-level directory>/<affected package/directory>: <short summary of change>
1412
+
1413
+
Optional longer description can go here, if necessary. Explain what the
1414
+
change does and why, especially if not obvious. Reference relevant
1415
+
issues or PRs when applicable. These can be links for now since we don't
1416
+
auto-link issues/PRs yet.
1417
+
```
1418
+
1419
+
Here are some examples:
1420
+
1421
+
```
1422
+
appview/state: fix token expiry check in middleware
1423
+
1424
+
The previous check did not account for clock drift, leading to premature
1425
+
token invalidation.
1426
+
```
1427
+
1428
+
```
1429
+
knotserver/git/service: improve error checking in upload-pack
1430
+
```
1431
+
1432
+
1433
+
### General notes
1434
+
1435
+
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
1436
+
using `git am`. At present, there is no squashing -- so please author
1437
+
your commits as they would appear on `master`, following the above
1438
+
guidelines.
1439
+
- If there is a lot of nesting, for example "appview:
1440
+
pages/templates/repo/fragments: ...", these can be truncated down to
1441
+
just "appview: repo/fragments: ...". If the change affects a lot of
1442
+
subdirectories, you may abbreviate to just the top-level names, e.g.
1443
+
"appview: ..." or "knotserver: ...".
1444
+
- Keep commits lowercased with no trailing period.
1445
+
- Use the imperative mood in the summary line (e.g., "fix bug" not
1446
+
"fixed bug" or "fixes bug").
1447
+
- Try to keep the summary line under 72 characters, but we aren't too
1448
+
fussed about this.
1449
+
- Follow the same formatting for PR titles if filled manually.
1450
+
- Don't include unrelated changes in the same commit.
1451
+
- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
1452
+
before submitting if necessary.
1453
+
1454
+
## Code formatting
1455
+
1456
+
We use a variety of tools to format our code, and multiplex them with
1457
+
[`treefmt`](https://treefmt.com): all you need to do to format your changes
1458
+
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
1459
+
1460
+
## Proposals for bigger changes
1461
+
1462
+
Small fixes like typos, minor bugs, or trivial refactors can be
1463
+
submitted directly as PRs.
1464
+
1465
+
For larger changes—especially those introducing new features, significant
1466
+
refactoring, or altering system behavior—please open a proposal first. This
1467
+
helps us evaluate the scope, design, and potential impact before implementation.
1468
+
1469
+
Create a new issue titled:
1470
+
1471
+
```
1472
+
proposal: <affected scope>: <summary of change>
1473
+
```
1474
+
1475
+
In the description, explain:
1476
+
1477
+
- What the change is
1478
+
- Why it's needed
1479
+
- How you plan to implement it (roughly)
1480
+
- Any open questions or tradeoffs
1481
+
1482
+
We'll use the issue thread to discuss and refine the idea before moving
1483
+
forward.
1484
+
1485
+
## Developer certificate of origin (DCO)
1486
+
1487
+
We require all contributors to certify that they have the right to
1488
+
submit the code they're contributing. To do this, we follow the
1489
+
[Developer Certificate of Origin
1490
+
(DCO)](https://developercertificate.org/).
1491
+
1492
+
By signing your commits, you're stating that the contribution is your
1493
+
own work, or that you have the right to submit it under the project's
1494
+
license. This helps us keep things clean and legally sound.
1495
+
1496
+
To sign your commit, just add the `-s` flag when committing:
1497
+
1498
+
```sh
1499
+
git commit -s -m "your commit message"
1500
+
```
1501
+
1502
+
This appends a line like:
1503
+
1504
+
```
1505
+
Signed-off-by: Your Name <your.email@example.com>
1506
+
```
1507
+
1508
+
We won't merge commits if they aren't signed off. If you forget, you can
1509
+
amend the last commit like this:
1510
+
1511
+
```sh
1512
+
git commit --amend -s
1513
+
```
1514
+
1515
+
If you're submitting a PR with multiple commits, make sure each one is
1516
+
signed.
1517
+
1518
+
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
1519
+
to make it sign off commits in the tangled repo:
1520
+
1521
+
```shell
1522
+
# Safety check, should say "No matching config key..."
1523
+
jj config list templates.commit_trailers
1524
+
# The command below may need to be adjusted if the command above returned something.
1525
+
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
1526
+
```
1527
+
1528
+
Refer to the [jujutsu
1529
+
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
1530
+
for more information.
-136
docs/contributing.md
-136
docs/contributing.md
···
1
-
# tangled contributing guide
2
-
3
-
## commit guidelines
4
-
5
-
We follow a commit style similar to the Go project. Please keep commits:
6
-
7
-
* **atomic**: each commit should represent one logical change
8
-
* **descriptive**: the commit message should clearly describe what the
9
-
change does and why it's needed
10
-
11
-
### message format
12
-
13
-
```
14
-
<service/top-level directory>/<affected package/directory>: <short summary of change>
15
-
16
-
17
-
Optional longer description can go here, if necessary. Explain what the
18
-
change does and why, especially if not obvious. Reference relevant
19
-
issues or PRs when applicable. These can be links for now since we don't
20
-
auto-link issues/PRs yet.
21
-
```
22
-
23
-
Here are some examples:
24
-
25
-
```
26
-
appview/state: fix token expiry check in middleware
27
-
28
-
The previous check did not account for clock drift, leading to premature
29
-
token invalidation.
30
-
```
31
-
32
-
```
33
-
knotserver/git/service: improve error checking in upload-pack
34
-
```
35
-
36
-
37
-
### general notes
38
-
39
-
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
40
-
using `git am`. At present, there is no squashing -- so please author
41
-
your commits as they would appear on `master`, following the above
42
-
guidelines.
43
-
- If there is a lot of nesting, for example "appview:
44
-
pages/templates/repo/fragments: ...", these can be truncated down to
45
-
just "appview: repo/fragments: ...". If the change affects a lot of
46
-
subdirectories, you may abbreviate to just the top-level names, e.g.
47
-
"appview: ..." or "knotserver: ...".
48
-
- Keep commits lowercased with no trailing period.
49
-
- Use the imperative mood in the summary line (e.g., "fix bug" not
50
-
"fixed bug" or "fixes bug").
51
-
- Try to keep the summary line under 72 characters, but we aren't too
52
-
fussed about this.
53
-
- Follow the same formatting for PR titles if filled manually.
54
-
- Don't include unrelated changes in the same commit.
55
-
- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
56
-
before submitting if necessary.
57
-
58
-
## code formatting
59
-
60
-
We use a variety of tools to format our code, and multiplex them with
61
-
[`treefmt`](https://treefmt.com): all you need to do to format your changes
62
-
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
63
-
64
-
## proposals for bigger changes
65
-
66
-
Small fixes like typos, minor bugs, or trivial refactors can be
67
-
submitted directly as PRs.
68
-
69
-
For larger changes—especially those introducing new features, significant
70
-
refactoring, or altering system behavior—please open a proposal first. This
71
-
helps us evaluate the scope, design, and potential impact before implementation.
72
-
73
-
### proposal format
74
-
75
-
Create a new issue titled:
76
-
77
-
```
78
-
proposal: <affected scope>: <summary of change>
79
-
```
80
-
81
-
In the description, explain:
82
-
83
-
- What the change is
84
-
- Why it's needed
85
-
- How you plan to implement it (roughly)
86
-
- Any open questions or tradeoffs
87
-
88
-
We'll use the issue thread to discuss and refine the idea before moving
89
-
forward.
90
-
91
-
## developer certificate of origin (DCO)
92
-
93
-
We require all contributors to certify that they have the right to
94
-
submit the code they're contributing. To do this, we follow the
95
-
[Developer Certificate of Origin
96
-
(DCO)](https://developercertificate.org/).
97
-
98
-
By signing your commits, you're stating that the contribution is your
99
-
own work, or that you have the right to submit it under the project's
100
-
license. This helps us keep things clean and legally sound.
101
-
102
-
To sign your commit, just add the `-s` flag when committing:
103
-
104
-
```sh
105
-
git commit -s -m "your commit message"
106
-
```
107
-
108
-
This appends a line like:
109
-
110
-
```
111
-
Signed-off-by: Your Name <your.email@example.com>
112
-
```
113
-
114
-
We won't merge commits if they aren't signed off. If you forget, you can
115
-
amend the last commit like this:
116
-
117
-
```sh
118
-
git commit --amend -s
119
-
```
120
-
121
-
If you're submitting a PR with multiple commits, make sure each one is
122
-
signed.
123
-
124
-
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125
-
to make it sign off commits in the tangled repo:
126
-
127
-
```shell
128
-
# Safety check, should say "No matching config key..."
129
-
jj config list templates.commit_trailers
130
-
# The command below may need to be adjusted if the command above returned something.
131
-
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
132
-
```
133
-
134
-
Refer to the [jj
135
-
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
136
-
for more information.
-172
docs/hacking.md
-172
docs/hacking.md
···
1
-
# hacking on tangled
2
-
3
-
We highly recommend [installing
4
-
nix](https://nixos.org/download/) (the package manager)
5
-
before working on the codebase. The nix flake provides a lot
6
-
of helpers to get started and most importantly, builds and
7
-
dev shells are entirely deterministic.
8
-
9
-
To set up your dev environment:
10
-
11
-
```bash
12
-
nix develop
13
-
```
14
-
15
-
Non-nix users can look at the `devShell` attribute in the
16
-
`flake.nix` file to determine necessary dependencies.
17
-
18
-
## running the appview
19
-
20
-
The nix flake also exposes a few `app` attributes (run `nix
21
-
flake show` to see a full list of what the flake provides),
22
-
one of the apps runs the appview with the `air`
23
-
live-reloader:
24
-
25
-
```bash
26
-
TANGLED_DEV=true nix run .#watch-appview
27
-
28
-
# TANGLED_DB_PATH might be of interest to point to
29
-
# different sqlite DBs
30
-
31
-
# in a separate shell, you can live-reload tailwind
32
-
nix run .#watch-tailwind
33
-
```
34
-
35
-
To authenticate with the appview, you will need redis and
36
-
OAUTH JWKs to be setup:
37
-
38
-
```
39
-
# oauth jwks should already be setup by the nix devshell:
40
-
echo $TANGLED_OAUTH_CLIENT_SECRET
41
-
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
42
-
43
-
echo $TANGLED_OAUTH_CLIENT_KID
44
-
1761667908
45
-
46
-
# if not, you can set it up yourself:
47
-
goat key generate -t P-256
48
-
Key Type: P-256 / secp256r1 / ES256 private key
49
-
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
50
-
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
51
-
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
52
-
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
53
-
54
-
# the secret key from above
55
-
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
56
-
57
-
# run redis in at a new shell to store oauth sessions
58
-
redis-server
59
-
```
60
-
61
-
## running knots and spindles
62
-
63
-
An end-to-end knot setup requires setting up a machine with
64
-
`sshd`, `AuthorizedKeysCommand`, and git user, which is
65
-
quite cumbersome. So the nix flake provides a
66
-
`nixosConfiguration` to do so.
67
-
68
-
<details>
69
-
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
70
-
71
-
In order to build Tangled's dev VM on macOS, you will
72
-
first need to set up a Linux Nix builder. The recommended
73
-
way to do so is to run a [`darwin.linux-builder`
74
-
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
75
-
and to register it in `nix.conf` as a builder for Linux
76
-
with the same architecture as your Mac (`linux-aarch64` if
77
-
you are using Apple Silicon).
78
-
79
-
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
80
-
> the tangled repo so that it doesn't conflict with the other VM. For example,
81
-
> you can do
82
-
>
83
-
> ```shell
84
-
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
85
-
> ```
86
-
>
87
-
> to store the builder VM in a temporary dir.
88
-
>
89
-
> You should read and follow [all the other intructions][darwin builder vm] to
90
-
> avoid subtle problems.
91
-
92
-
Alternatively, you can use any other method to set up a
93
-
Linux machine with `nix` installed that you can `sudo ssh`
94
-
into (in other words, root user on your Mac has to be able
95
-
to ssh into the Linux machine without entering a password)
96
-
and that has the same architecture as your Mac. See
97
-
[remote builder
98
-
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
99
-
for how to register such a builder in `nix.conf`.
100
-
101
-
> WARNING: If you'd like to use
102
-
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
103
-
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
104
-
> ssh` works can be tricky. It seems to be [possible with
105
-
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
106
-
107
-
</details>
108
-
109
-
To begin, grab your DID from http://localhost:3000/settings.
110
-
Then, set `TANGLED_VM_KNOT_OWNER` and
111
-
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
112
-
lightweight NixOS VM like so:
113
-
114
-
```bash
115
-
nix run --impure .#vm
116
-
117
-
# type `poweroff` at the shell to exit the VM
118
-
```
119
-
120
-
This starts a knot on port 6444, a spindle on port 6555
121
-
with `ssh` exposed on port 2222.
122
-
123
-
Once the services are running, head to
124
-
http://localhost:3000/settings/knots and hit verify. It should
125
-
verify the ownership of the services instantly if everything
126
-
went smoothly.
127
-
128
-
You can push repositories to this VM with this ssh config
129
-
block on your main machine:
130
-
131
-
```bash
132
-
Host nixos-shell
133
-
Hostname localhost
134
-
Port 2222
135
-
User git
136
-
IdentityFile ~/.ssh/my_tangled_key
137
-
```
138
-
139
-
Set up a remote called `local-dev` on a git repo:
140
-
141
-
```bash
142
-
git remote add local-dev git@nixos-shell:user/repo
143
-
git push local-dev main
144
-
```
145
-
146
-
### running a spindle
147
-
148
-
The above VM should already be running a spindle on
149
-
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
150
-
hit verify. You can then configure each repository to use
151
-
this spindle and run CI jobs.
152
-
153
-
Of interest when debugging spindles:
154
-
155
-
```
156
-
# service logs from journald:
157
-
journalctl -xeu spindle
158
-
159
-
# CI job logs from disk:
160
-
ls /var/log/spindle
161
-
162
-
# debugging spindle db:
163
-
sqlite3 /var/lib/spindle/spindle.db
164
-
165
-
# litecli has a nicer REPL interface:
166
-
litecli /var/lib/spindle/spindle.db
167
-
```
168
-
169
-
If for any reason you wish to disable either one of the
170
-
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171
-
`services.tangled.spindle.enable` (or
172
-
`services.tangled.knot.enable`) to `false`.
+93
docs/highlight.theme
+93
docs/highlight.theme
···
1
+
{
2
+
"text-color": null,
3
+
"background-color": null,
4
+
"line-number-color": null,
5
+
"line-number-background-color": null,
6
+
"text-styles": {
7
+
"Annotation": {
8
+
"text-color": null,
9
+
"background-color": null,
10
+
"bold": false,
11
+
"italic": true,
12
+
"underline": false
13
+
},
14
+
"ControlFlow": {
15
+
"text-color": null,
16
+
"background-color": null,
17
+
"bold": true,
18
+
"italic": false,
19
+
"underline": false
20
+
},
21
+
"Error": {
22
+
"text-color": null,
23
+
"background-color": null,
24
+
"bold": true,
25
+
"italic": false,
26
+
"underline": false
27
+
},
28
+
"Alert": {
29
+
"text-color": null,
30
+
"background-color": null,
31
+
"bold": true,
32
+
"italic": false,
33
+
"underline": false
34
+
},
35
+
"Preprocessor": {
36
+
"text-color": null,
37
+
"background-color": null,
38
+
"bold": true,
39
+
"italic": false,
40
+
"underline": false
41
+
},
42
+
"Information": {
43
+
"text-color": null,
44
+
"background-color": null,
45
+
"bold": false,
46
+
"italic": true,
47
+
"underline": false
48
+
},
49
+
"Warning": {
50
+
"text-color": null,
51
+
"background-color": null,
52
+
"bold": false,
53
+
"italic": true,
54
+
"underline": false
55
+
},
56
+
"Documentation": {
57
+
"text-color": null,
58
+
"background-color": null,
59
+
"bold": false,
60
+
"italic": true,
61
+
"underline": false
62
+
},
63
+
"DataType": {
64
+
"text-color": "#8f4e8b",
65
+
"background-color": null,
66
+
"bold": false,
67
+
"italic": false,
68
+
"underline": false
69
+
},
70
+
"Comment": {
71
+
"text-color": null,
72
+
"background-color": null,
73
+
"bold": false,
74
+
"italic": true,
75
+
"underline": false
76
+
},
77
+
"CommentVar": {
78
+
"text-color": null,
79
+
"background-color": null,
80
+
"bold": false,
81
+
"italic": true,
82
+
"underline": false
83
+
},
84
+
"Keyword": {
85
+
"text-color": null,
86
+
"background-color": null,
87
+
"bold": true,
88
+
"italic": false,
89
+
"underline": false
90
+
}
91
+
}
92
+
}
93
+
-214
docs/knot-hosting.md
-214
docs/knot-hosting.md
···
1
-
# knot self-hosting guide
2
-
3
-
So you want to run your own knot server? Great! Here are a few prerequisites:
4
-
5
-
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
6
-
2. A (sub)domain name. People generally use `knot.example.com`.
7
-
3. A valid SSL certificate for your domain.
8
-
9
-
There's a couple of ways to get started:
10
-
* NixOS: refer to
11
-
[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
12
-
* Docker: Documented at
13
-
[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
14
-
(community maintained: support is not guaranteed!)
15
-
* Manual: Documented below.
16
-
17
-
## manual setup
18
-
19
-
First, clone this repository:
20
-
21
-
```
22
-
git clone https://tangled.org/@tangled.org/core
23
-
```
24
-
25
-
Then, build the `knot` CLI. This is the knot administration and operation tool.
26
-
For the purpose of this guide, we're only concerned with these subcommands:
27
-
28
-
* `knot server`: the main knot server process, typically run as a
29
-
supervised service
30
-
* `knot guard`: handles role-based access control for git over SSH
31
-
(you'll never have to run this yourself)
32
-
* `knot keys`: fetches SSH keys associated with your knot; we'll use
33
-
this to generate the SSH `AuthorizedKeysCommand`
34
-
35
-
```
36
-
cd core
37
-
export CGO_ENABLED=1
38
-
go build -o knot ./cmd/knot
39
-
```
40
-
41
-
Next, move the `knot` binary to a location owned by `root` --
42
-
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
43
-
44
-
```
45
-
sudo mv knot /usr/local/bin/knot
46
-
sudo chown root:root /usr/local/bin/knot
47
-
```
48
-
49
-
This is necessary because SSH `AuthorizedKeysCommand` requires [really
50
-
specific permissions](https://stackoverflow.com/a/27638306). The
51
-
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
52
-
retrieve a user's public SSH keys dynamically for authentication. Let's
53
-
set that up.
54
-
55
-
```
56
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
57
-
Match User git
58
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
59
-
AuthorizedKeysCommandUser nobody
60
-
EOF
61
-
```
62
-
63
-
Then, reload `sshd`:
64
-
65
-
```
66
-
sudo systemctl reload ssh
67
-
```
68
-
69
-
Next, create the `git` user. We'll use the `git` user's home directory
70
-
to store repositories:
71
-
72
-
```
73
-
sudo adduser git
74
-
```
75
-
76
-
Create `/home/git/.knot.env` with the following, updating the values as
77
-
necessary. The `KNOT_SERVER_OWNER` should be set to your
78
-
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
79
-
80
-
```
81
-
KNOT_REPO_SCAN_PATH=/home/git
82
-
KNOT_SERVER_HOSTNAME=knot.example.com
83
-
APPVIEW_ENDPOINT=https://tangled.sh
84
-
KNOT_SERVER_OWNER=did:plc:foobar
85
-
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
86
-
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
87
-
```
88
-
89
-
If you run a Linux distribution that uses systemd, you can use the provided
90
-
service file to run the server. Copy
91
-
[`knotserver.service`](/systemd/knotserver.service)
92
-
to `/etc/systemd/system/`. Then, run:
93
-
94
-
```
95
-
systemctl enable knotserver
96
-
systemctl start knotserver
97
-
```
98
-
99
-
The last step is to configure a reverse proxy like Nginx or Caddy to front your
100
-
knot. Here's an example configuration for Nginx:
101
-
102
-
```
103
-
server {
104
-
listen 80;
105
-
listen [::]:80;
106
-
server_name knot.example.com;
107
-
108
-
location / {
109
-
proxy_pass http://localhost:5555;
110
-
proxy_set_header Host $host;
111
-
proxy_set_header X-Real-IP $remote_addr;
112
-
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
113
-
proxy_set_header X-Forwarded-Proto $scheme;
114
-
}
115
-
116
-
# wss endpoint for git events
117
-
location /events {
118
-
proxy_set_header X-Forwarded-For $remote_addr;
119
-
proxy_set_header Host $http_host;
120
-
proxy_set_header Upgrade websocket;
121
-
proxy_set_header Connection Upgrade;
122
-
proxy_pass http://localhost:5555;
123
-
}
124
-
# additional config for SSL/TLS go here.
125
-
}
126
-
127
-
```
128
-
129
-
Remember to use Let's Encrypt or similar to procure a certificate for your
130
-
knot domain.
131
-
132
-
You should now have a running knot server! You can finalize
133
-
your registration by hitting the `verify` button on the
134
-
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
135
-
a record on your PDS to announce the existence of the knot.
136
-
137
-
### custom paths
138
-
139
-
(This section applies to manual setup only. Docker users should edit the mounts
140
-
in `docker-compose.yml` instead.)
141
-
142
-
Right now, the database and repositories of your knot lives in `/home/git`. You
143
-
can move these paths if you'd like to store them in another folder. Be careful
144
-
when adjusting these paths:
145
-
146
-
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
147
-
any possible side effects. Remember to restart it once you're done.
148
-
* Make backups before moving in case something goes wrong.
149
-
* Make sure the `git` user can read and write from the new paths.
150
-
151
-
#### database
152
-
153
-
As an example, let's say the current database is at `/home/git/knotserver.db`,
154
-
and we want to move it to `/home/git/database/knotserver.db`.
155
-
156
-
Copy the current database to the new location. Make sure to copy the `.db-shm`
157
-
and `.db-wal` files if they exist.
158
-
159
-
```
160
-
mkdir /home/git/database
161
-
cp /home/git/knotserver.db* /home/git/database
162
-
```
163
-
164
-
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
165
-
the new file path (_not_ the directory):
166
-
167
-
```
168
-
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
169
-
```
170
-
171
-
#### repositories
172
-
173
-
As an example, let's say the repositories are currently in `/home/git`, and we
174
-
want to move them into `/home/git/repositories`.
175
-
176
-
Create the new folder, then move the existing repositories (if there are any):
177
-
178
-
```
179
-
mkdir /home/git/repositories
180
-
# move all DIDs into the new folder; these will vary for you!
181
-
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
182
-
```
183
-
184
-
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
185
-
to the new directory:
186
-
187
-
```
188
-
KNOT_REPO_SCAN_PATH=/home/git/repositories
189
-
```
190
-
191
-
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
192
-
repository path:
193
-
194
-
```
195
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
196
-
Match User git
197
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
198
-
AuthorizedKeysCommandUser nobody
199
-
EOF
200
-
```
201
-
202
-
Make sure to restart your SSH server!
203
-
204
-
#### MOTD (message of the day)
205
-
206
-
To configure the MOTD used ("Welcome to this knot!" by default), edit the
207
-
`/home/git/motd` file:
208
-
209
-
```
210
-
printf "Hi from this knot!\n" > /home/git/motd
211
-
```
212
-
213
-
Note that you should add a newline at the end if setting a non-empty message
214
-
since the knot won't do this for you.
-59
docs/migrations.md
-59
docs/migrations.md
···
1
-
# Migrations
2
-
3
-
This document is laid out in reverse-chronological order.
4
-
Newer migration guides are listed first, and older guides
5
-
are further down the page.
6
-
7
-
## Upgrading from v1.8.x
8
-
9
-
After v1.8.2, the HTTP API for knot and spindles have been
10
-
deprecated and replaced with XRPC. Repositories on outdated
11
-
knots will not be viewable from the appview. Upgrading is
12
-
straightforward however.
13
-
14
-
For knots:
15
-
16
-
- Upgrade to latest tag (v1.9.0 or above)
17
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
18
-
hit the "retry" button to verify your knot
19
-
20
-
For spindles:
21
-
22
-
- Upgrade to latest tag (v1.9.0 or above)
23
-
- Head to the [spindle
24
-
dashboard](https://tangled.org/settings/spindles) and hit the
25
-
"retry" button to verify your spindle
26
-
27
-
## Upgrading from v1.7.x
28
-
29
-
After v1.7.0, knot secrets have been deprecated. You no
30
-
longer need a secret from the appview to run a knot. All
31
-
authorized commands to knots are managed via [Inter-Service
32
-
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
33
-
Knots will be read-only until upgraded.
34
-
35
-
Upgrading is quite easy, in essence:
36
-
37
-
- `KNOT_SERVER_SECRET` is no more, you can remove this
38
-
environment variable entirely
39
-
- `KNOT_SERVER_OWNER` is now required on boot, set this to
40
-
your DID. You can find your DID in the
41
-
[settings](https://tangled.org/settings) page.
42
-
- Restart your knot once you have replaced the environment
43
-
variable
44
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
45
-
hit the "retry" button to verify your knot. This simply
46
-
writes a `sh.tangled.knot` record to your PDS.
47
-
48
-
If you use the nix module, simply bump the flake to the
49
-
latest revision, and change your config block like so:
50
-
51
-
```diff
52
-
services.tangled.knot = {
53
-
enable = true;
54
-
server = {
55
-
- secretFile = /path/to/secret;
56
-
+ owner = "did:plc:foo";
57
-
};
58
-
};
59
-
```
-25
docs/spindle/architecture.md
-25
docs/spindle/architecture.md
···
1
-
# spindle architecture
2
-
3
-
Spindle is a small CI runner service. Here's a high level overview of how it operates:
4
-
5
-
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
6
-
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
7
-
* when a new repo record comes through (typically when you add a spindle to a
8
-
repo from the settings), spindle then resolves the underlying knot and
9
-
subscribes to repo events (see:
10
-
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
11
-
* the spindle engine then handles execution of the pipeline, with results and
12
-
logs beamed on the spindle event stream over wss
13
-
14
-
### the engine
15
-
16
-
At present, the only supported backend is Docker (and Podman, if Docker
17
-
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
18
-
executes each step in the pipeline in a fresh container, with state persisted
19
-
across steps within the `/tangled/workspace` directory.
20
-
21
-
The base image for the container is constructed on the fly using
22
-
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
23
-
used packages.
24
-
25
-
The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
-52
docs/spindle/hosting.md
···
1
-
# spindle self-hosting guide
2
-
3
-
## prerequisites
4
-
5
-
* Go
6
-
* Docker (the only supported backend currently)
7
-
8
-
## configuration
9
-
10
-
Spindle is configured using environment variables. The following environment variables are available:
11
-
12
-
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
13
-
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
14
-
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
15
-
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
16
-
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
17
-
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
18
-
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
19
-
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
20
-
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
21
-
22
-
## running spindle
23
-
24
-
1. **Set the environment variables.** For example:
25
-
26
-
```shell
27
-
export SPINDLE_SERVER_HOSTNAME="your-hostname"
28
-
export SPINDLE_SERVER_OWNER="your-did"
29
-
```
30
-
31
-
2. **Build the Spindle binary.**
32
-
33
-
```shell
34
-
cd core
35
-
go mod download
36
-
go build -o cmd/spindle/spindle cmd/spindle/main.go
37
-
```
38
-
39
-
3. **Create the log directory.**
40
-
41
-
```shell
42
-
sudo mkdir -p /var/log/spindle
43
-
sudo chown $USER:$USER -R /var/log/spindle
44
-
```
45
-
46
-
4. **Run the Spindle binary.**
47
-
48
-
```shell
49
-
./cmd/spindle/spindle
50
-
```
51
-
52
-
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
-285
docs/spindle/openbao.md
···
1
-
# spindle secrets with openbao
2
-
3
-
This document covers setting up Spindle to use OpenBao for secrets
4
-
management via OpenBao Proxy instead of the default SQLite backend.
5
-
6
-
## overview
7
-
8
-
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
9
-
authentication automatically using AppRole credentials, while Spindle
10
-
connects to the local proxy instead of directly to the OpenBao server.
11
-
12
-
This approach provides better security, automatic token renewal, and
13
-
simplified application code.
14
-
15
-
## installation
16
-
17
-
Install OpenBao from nixpkgs:
18
-
19
-
```bash
20
-
nix shell nixpkgs#openbao # for a local server
21
-
```
22
-
23
-
## setup
24
-
25
-
The setup process can is documented for both local development and production.
26
-
27
-
### local development
28
-
29
-
Start OpenBao in dev mode:
30
-
31
-
```bash
32
-
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
33
-
```
34
-
35
-
This starts OpenBao on `http://localhost:8201` with a root token.
36
-
37
-
Set up environment for bao CLI:
38
-
39
-
```bash
40
-
export BAO_ADDR=http://localhost:8200
41
-
export BAO_TOKEN=root
42
-
```
43
-
44
-
### production
45
-
46
-
You would typically use a systemd service with a configuration file. Refer to
47
-
[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
48
-
achieved using Nix.
49
-
50
-
Then, initialize the bao server:
51
-
```bash
52
-
bao operator init -key-shares=1 -key-threshold=1
53
-
```
54
-
55
-
This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
56
-
```bash
57
-
bao operator unseal <unseal_key>
58
-
```
59
-
60
-
All steps below remain the same across both dev and production setups.
61
-
62
-
### configure openbao server
63
-
64
-
Create the spindle KV mount:
65
-
66
-
```bash
67
-
bao secrets enable -path=spindle -version=2 kv
68
-
```
69
-
70
-
Set up AppRole authentication and policy:
71
-
72
-
Create a policy file `spindle-policy.hcl`:
73
-
74
-
```hcl
75
-
# Full access to spindle KV v2 data
76
-
path "spindle/data/*" {
77
-
capabilities = ["create", "read", "update", "delete"]
78
-
}
79
-
80
-
# Access to metadata for listing and management
81
-
path "spindle/metadata/*" {
82
-
capabilities = ["list", "read", "delete", "update"]
83
-
}
84
-
85
-
# Allow listing at root level
86
-
path "spindle/" {
87
-
capabilities = ["list"]
88
-
}
89
-
90
-
# Required for connection testing and health checks
91
-
path "auth/token/lookup-self" {
92
-
capabilities = ["read"]
93
-
}
94
-
```
95
-
96
-
Apply the policy and create an AppRole:
97
-
98
-
```bash
99
-
bao policy write spindle-policy spindle-policy.hcl
100
-
bao auth enable approle
101
-
bao write auth/approle/role/spindle \
102
-
token_policies="spindle-policy" \
103
-
token_ttl=1h \
104
-
token_max_ttl=4h \
105
-
bind_secret_id=true \
106
-
secret_id_ttl=0 \
107
-
secret_id_num_uses=0
108
-
```
109
-
110
-
Get the credentials:
111
-
112
-
```bash
113
-
# Get role ID (static)
114
-
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115
-
116
-
# Generate secret ID
117
-
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118
-
119
-
echo "Role ID: $ROLE_ID"
120
-
echo "Secret ID: $SECRET_ID"
121
-
```
122
-
123
-
### create proxy configuration
124
-
125
-
Create the credential files:
126
-
127
-
```bash
128
-
# Create directory for OpenBao files
129
-
mkdir -p /tmp/openbao
130
-
131
-
# Save credentials
132
-
echo "$ROLE_ID" > /tmp/openbao/role-id
133
-
echo "$SECRET_ID" > /tmp/openbao/secret-id
134
-
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135
-
```
136
-
137
-
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138
-
139
-
```hcl
140
-
# OpenBao server connection
141
-
vault {
142
-
address = "http://localhost:8200"
143
-
}
144
-
145
-
# Auto-Auth using AppRole
146
-
auto_auth {
147
-
method "approle" {
148
-
mount_path = "auth/approle"
149
-
config = {
150
-
role_id_file_path = "/tmp/openbao/role-id"
151
-
secret_id_file_path = "/tmp/openbao/secret-id"
152
-
}
153
-
}
154
-
155
-
# Optional: write token to file for debugging
156
-
sink "file" {
157
-
config = {
158
-
path = "/tmp/openbao/token"
159
-
mode = 0640
160
-
}
161
-
}
162
-
}
163
-
164
-
# Proxy listener for Spindle
165
-
listener "tcp" {
166
-
address = "127.0.0.1:8201"
167
-
tls_disable = true
168
-
}
169
-
170
-
# Enable API proxy with auto-auth token
171
-
api_proxy {
172
-
use_auto_auth_token = true
173
-
}
174
-
175
-
# Enable response caching
176
-
cache {
177
-
use_auto_auth_token = true
178
-
}
179
-
180
-
# Logging
181
-
log_level = "info"
182
-
```
183
-
184
-
### start the proxy
185
-
186
-
Start OpenBao Proxy:
187
-
188
-
```bash
189
-
bao proxy -config=/tmp/openbao/proxy.hcl
190
-
```
191
-
192
-
The proxy will authenticate with OpenBao and start listening on
193
-
`127.0.0.1:8201`.
194
-
195
-
### configure spindle
196
-
197
-
Set these environment variables for Spindle:
198
-
199
-
```bash
200
-
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201
-
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
202
-
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203
-
```
204
-
205
-
Start Spindle:
206
-
207
-
Spindle will now connect to the local proxy, which handles all
208
-
authentication automatically.
209
-
210
-
## production setup for proxy
211
-
212
-
For production, you'll want to run the proxy as a service:
213
-
214
-
Place your production configuration in `/etc/openbao/proxy.hcl` with
215
-
proper TLS settings for the vault connection.
216
-
217
-
## verifying setup
218
-
219
-
Test the proxy directly:
220
-
221
-
```bash
222
-
# Check proxy health
223
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224
-
225
-
# Test token lookup through proxy
226
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227
-
```
228
-
229
-
Test OpenBao operations through the server:
230
-
231
-
```bash
232
-
# List all secrets
233
-
bao kv list spindle/
234
-
235
-
# Add a test secret via Spindle API, then check it exists
236
-
bao kv list spindle/repos/
237
-
238
-
# Get a specific secret
239
-
bao kv get spindle/repos/your_repo_path/SECRET_NAME
240
-
```
241
-
242
-
## how it works
243
-
244
-
- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245
-
- The proxy authenticates with OpenBao using AppRole credentials
246
-
- All Spindle requests go through the proxy, which injects authentication tokens
247
-
- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248
-
- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249
-
- The proxy handles all token renewal automatically
250
-
- Spindle no longer manages tokens or authentication directly
251
-
252
-
## troubleshooting
253
-
254
-
**Connection refused**: Check that the OpenBao Proxy is running and
255
-
listening on the configured address.
256
-
257
-
**403 errors**: Verify the AppRole credentials are correct and the policy
258
-
has the necessary permissions.
259
-
260
-
**404 route errors**: The spindle KV mount probably doesn't exist - run
261
-
the mount creation step again.
262
-
263
-
**Proxy authentication failures**: Check the proxy logs and verify the
264
-
role-id and secret-id files are readable and contain valid credentials.
265
-
266
-
**Secret not found after writing**: This can indicate policy permission
267
-
issues. Verify the policy includes both `spindle/data/*` and
268
-
`spindle/metadata/*` paths with appropriate capabilities.
269
-
270
-
Check proxy logs:
271
-
272
-
```bash
273
-
# If running as systemd service
274
-
journalctl -u openbao-proxy -f
275
-
276
-
# If running directly, check the console output
277
-
```
278
-
279
-
Test AppRole authentication manually:
280
-
281
-
```bash
282
-
bao write auth/approle/login \
283
-
role_id="$(cat /tmp/openbao/role-id)" \
284
-
secret_id="$(cat /tmp/openbao/secret-id)"
285
-
```
-183
docs/spindle/pipeline.md
-183
docs/spindle/pipeline.md
···
1
-
# spindle pipelines
2
-
3
-
Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
4
-
5
-
The fields are:
6
-
7
-
- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
8
-
- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
9
-
- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
10
-
- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
11
-
- [Environment](#environment): An **optional** field that allows you to define environment variables.
12
-
- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
13
-
14
-
## Trigger
15
-
16
-
The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
17
-
18
-
- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
19
-
- `push`: The workflow should run every time a commit is pushed to the repository.
20
-
- `pull_request`: The workflow should run every time a pull request is made or updated.
21
-
- `manual`: The workflow can be triggered manually.
22
-
- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
23
-
- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
24
-
25
-
For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
26
-
27
-
```yaml
28
-
when:
29
-
- event: ["push", "manual"]
30
-
branch: ["main", "develop"]
31
-
- event: ["pull_request"]
32
-
branch: ["main"]
33
-
```
34
-
35
-
You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
36
-
37
-
```yaml
38
-
when:
39
-
- event: ["push"]
40
-
tag: ["v*"]
41
-
```
42
-
43
-
You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
44
-
45
-
```yaml
46
-
when:
47
-
- event: ["push"]
48
-
branch: ["main", "release-*"]
49
-
tag: ["v*", "stable"]
50
-
```
51
-
52
-
## Engine
53
-
54
-
Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
55
-
56
-
- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
57
-
58
-
Example:
59
-
60
-
```yaml
61
-
engine: "nixery"
62
-
```
63
-
64
-
## Clone options
65
-
66
-
When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
67
-
68
-
- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
69
-
- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
70
-
- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
71
-
72
-
The default settings are:
73
-
74
-
```yaml
75
-
clone:
76
-
skip: false
77
-
depth: 1
78
-
submodules: false
79
-
```
80
-
81
-
## Dependencies
82
-
83
-
Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
84
-
85
-
Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
86
-
87
-
```yaml
88
-
dependencies:
89
-
# nixpkgs
90
-
nixpkgs:
91
-
- nodejs
92
-
- go
93
-
# custom registry
94
-
git+https://tangled.org/@example.com/my_pkg:
95
-
- my_pkg
96
-
```
97
-
98
-
Now these dependencies are available to use in your workflow!
99
-
100
-
## Environment
101
-
102
-
The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103
-
104
-
Example:
105
-
106
-
```yaml
107
-
environment:
108
-
GOOS: "linux"
109
-
GOARCH: "arm64"
110
-
NODE_ENV: "production"
111
-
MY_ENV_VAR: "MY_ENV_VALUE"
112
-
```
113
-
114
-
## Steps
115
-
116
-
The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
117
-
118
-
- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
119
-
- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
120
-
- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
121
-
122
-
Example:
123
-
124
-
```yaml
125
-
steps:
126
-
- name: "Build backend"
127
-
command: "go build"
128
-
environment:
129
-
GOOS: "darwin"
130
-
GOARCH: "arm64"
131
-
- name: "Build frontend"
132
-
command: "npm run build"
133
-
environment:
134
-
NODE_ENV: "production"
135
-
```
136
-
137
-
## Complete workflow
138
-
139
-
```yaml
140
-
# .tangled/workflows/build.yml
141
-
142
-
when:
143
-
- event: ["push", "manual"]
144
-
branch: ["main", "develop"]
145
-
- event: ["pull_request"]
146
-
branch: ["main"]
147
-
148
-
engine: "nixery"
149
-
150
-
# using the default values
151
-
clone:
152
-
skip: false
153
-
depth: 1
154
-
submodules: false
155
-
156
-
dependencies:
157
-
# nixpkgs
158
-
nixpkgs:
159
-
- nodejs
160
-
- go
161
-
# custom registry
162
-
git+https://tangled.org/@example.com/my_pkg:
163
-
- my_pkg
164
-
165
-
environment:
166
-
GOOS: "linux"
167
-
GOARCH: "arm64"
168
-
NODE_ENV: "production"
169
-
MY_ENV_VAR: "MY_ENV_VALUE"
170
-
171
-
steps:
172
-
- name: "Build backend"
173
-
command: "go build"
174
-
environment:
175
-
GOOS: "darwin"
176
-
GOARCH: "arm64"
177
-
- name: "Build frontend"
178
-
command: "npm run build"
179
-
environment:
180
-
NODE_ENV: "production"
181
-
```
182
-
183
-
If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+101
docs/styles.css
+101
docs/styles.css
···
1
+
svg {
2
+
width: 16px;
3
+
height: 16px;
4
+
}
5
+
6
+
:root {
7
+
--syntax-alert: #d20f39;
8
+
--syntax-annotation: #fe640b;
9
+
--syntax-attribute: #df8e1d;
10
+
--syntax-basen: #40a02b;
11
+
--syntax-builtin: #1e66f5;
12
+
--syntax-controlflow: #8839ef;
13
+
--syntax-char: #04a5e5;
14
+
--syntax-constant: #fe640b;
15
+
--syntax-comment: #9ca0b0;
16
+
--syntax-commentvar: #7c7f93;
17
+
--syntax-documentation: #9ca0b0;
18
+
--syntax-datatype: #df8e1d;
19
+
--syntax-decval: #40a02b;
20
+
--syntax-error: #d20f39;
21
+
--syntax-extension: #4c4f69;
22
+
--syntax-float: #40a02b;
23
+
--syntax-function: #1e66f5;
24
+
--syntax-import: #40a02b;
25
+
--syntax-information: #04a5e5;
26
+
--syntax-keyword: #8839ef;
27
+
--syntax-operator: #179299;
28
+
--syntax-other: #8839ef;
29
+
--syntax-preprocessor: #ea76cb;
30
+
--syntax-specialchar: #04a5e5;
31
+
--syntax-specialstring: #ea76cb;
32
+
--syntax-string: #40a02b;
33
+
--syntax-variable: #8839ef;
34
+
--syntax-verbatimstring: #40a02b;
35
+
--syntax-warning: #df8e1d;
36
+
}
37
+
38
+
@media (prefers-color-scheme: dark) {
39
+
:root {
40
+
--syntax-alert: #f38ba8;
41
+
--syntax-annotation: #fab387;
42
+
--syntax-attribute: #f9e2af;
43
+
--syntax-basen: #a6e3a1;
44
+
--syntax-builtin: #89b4fa;
45
+
--syntax-controlflow: #cba6f7;
46
+
--syntax-char: #89dceb;
47
+
--syntax-constant: #fab387;
48
+
--syntax-comment: #6c7086;
49
+
--syntax-commentvar: #585b70;
50
+
--syntax-documentation: #6c7086;
51
+
--syntax-datatype: #f9e2af;
52
+
--syntax-decval: #a6e3a1;
53
+
--syntax-error: #f38ba8;
54
+
--syntax-extension: #cdd6f4;
55
+
--syntax-float: #a6e3a1;
56
+
--syntax-function: #89b4fa;
57
+
--syntax-import: #a6e3a1;
58
+
--syntax-information: #89dceb;
59
+
--syntax-keyword: #cba6f7;
60
+
--syntax-operator: #94e2d5;
61
+
--syntax-other: #cba6f7;
62
+
--syntax-preprocessor: #f5c2e7;
63
+
--syntax-specialchar: #89dceb;
64
+
--syntax-specialstring: #f5c2e7;
65
+
--syntax-string: #a6e3a1;
66
+
--syntax-variable: #cba6f7;
67
+
--syntax-verbatimstring: #a6e3a1;
68
+
--syntax-warning: #f9e2af;
69
+
}
70
+
}
71
+
72
+
/* pandoc syntax highlighting classes */
73
+
code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */
74
+
code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */
75
+
code span.at { color: var(--syntax-attribute); } /* attribute */
76
+
code span.bn { color: var(--syntax-basen); } /* basen */
77
+
code span.bu { color: var(--syntax-builtin); } /* builtin */
78
+
code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */
79
+
code span.ch { color: var(--syntax-char); } /* char */
80
+
code span.cn { color: var(--syntax-constant); } /* constant */
81
+
code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */
82
+
code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */
83
+
code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */
84
+
code span.dt { color: var(--syntax-datatype); } /* datatype */
85
+
code span.dv { color: var(--syntax-decval); } /* decval */
86
+
code span.er { color: var(--syntax-error); font-weight: bold; } /* error */
87
+
code span.ex { color: var(--syntax-extension); } /* extension */
88
+
code span.fl { color: var(--syntax-float); } /* float */
89
+
code span.fu { color: var(--syntax-function); } /* function */
90
+
code span.im { color: var(--syntax-import); font-weight: bold; } /* import */
91
+
code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */
92
+
code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */
93
+
code span.op { color: var(--syntax-operator); } /* operator */
94
+
code span.ot { color: var(--syntax-other); } /* other */
95
+
code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */
96
+
code span.sc { color: var(--syntax-specialchar); } /* specialchar */
97
+
code span.ss { color: var(--syntax-specialstring); } /* specialstring */
98
+
code span.st { color: var(--syntax-string); } /* string */
99
+
code span.va { color: var(--syntax-variable); } /* variable */
100
+
code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */
101
+
code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
+117
docs/template.html
+117
docs/template.html
···
1
+
<!DOCTYPE html>
2
+
<html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$>
3
+
<head>
4
+
<meta charset="utf-8" />
5
+
<meta name="generator" content="pandoc" />
6
+
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
7
+
$for(author-meta)$
8
+
<meta name="author" content="$author-meta$" />
9
+
$endfor$
10
+
11
+
$if(date-meta)$
12
+
<meta name="dcterms.date" content="$date-meta$" />
13
+
$endif$
14
+
15
+
$if(keywords)$
16
+
<meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" />
17
+
$endif$
18
+
19
+
$if(description-meta)$
20
+
<meta name="description" content="$description-meta$" />
21
+
$endif$
22
+
23
+
<title>$pagetitle$ - Tangled docs</title>
24
+
25
+
<style>
26
+
$styles.css()$
27
+
</style>
28
+
29
+
$for(css)$
30
+
<link rel="stylesheet" href="$css$" />
31
+
$endfor$
32
+
33
+
$for(header-includes)$
34
+
$header-includes$
35
+
$endfor$
36
+
37
+
<link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin />
38
+
39
+
</head>
40
+
<body class="bg-white dark:bg-gray-900 min-h-screen flex flex-col min-h-screen">
41
+
$for(include-before)$
42
+
$include-before$
43
+
$endfor$
44
+
45
+
$if(toc)$
46
+
<!-- mobile topbar toc -->
47
+
<details id="mobile-$idprefix$TOC" role="doc-toc" class="md:hidden bg-gray-50 dark:bg-gray-800 border-b border-gray-200 dark:border-gray-700 z-50 space-y-4 group px-6 py-4">
48
+
<summary class="cursor-pointer list-none text-sm font-semibold select-none flex gap-2 justify-between items-center dark:text-white">
49
+
$if(toc-title)$$toc-title$$else$Table of Contents$endif$
50
+
<span class="group-open:hidden inline">${ menu.svg() }</span>
51
+
<span class="hidden group-open:inline">${ x.svg() }</span>
52
+
</summary>
53
+
${ table-of-contents:toc.html() }
54
+
</details>
55
+
<!-- desktop sidebar toc -->
56
+
<nav id="$idprefix$TOC" role="doc-toc" class="hidden md:block fixed left-0 top-0 w-80 h-screen bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 overflow-y-auto p-4 z-50">
57
+
$if(toc-title)$
58
+
<h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2>
59
+
$endif$
60
+
${ table-of-contents:toc.html() }
61
+
</nav>
62
+
$endif$
63
+
64
+
<div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col">
65
+
<main class="max-w-4xl w-full mx-auto p-6 flex-1">
66
+
$if(top)$
67
+
$-- only print title block if this is NOT the top page
68
+
$else$
69
+
$if(title)$
70
+
<header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700">
71
+
<h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1>
72
+
$if(subtitle)$
73
+
<p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p>
74
+
$endif$
75
+
$for(author)$
76
+
<p class="text-sm text-gray-500 dark:text-gray-400">$author$</p>
77
+
$endfor$
78
+
$if(date)$
79
+
<p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p>
80
+
$endif$
81
+
$if(abstract)$
82
+
<div class="mt-6 p-4 bg-gray-50 rounded-lg">
83
+
<div class="text-sm font-semibold text-gray-700 uppercase mb-2">$abstract-title$</div>
84
+
<div class="text-gray-700">$abstract$</div>
85
+
</div>
86
+
$endif$
87
+
$endif$
88
+
</header>
89
+
$endif$
90
+
<article class="prose dark:prose-invert max-w-none">
91
+
$body$
92
+
</article>
93
+
</main>
94
+
<nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800 ">
95
+
<div class="max-w-4xl mx-auto px-8 py-4">
96
+
<div class="flex justify-between gap-4">
97
+
<span class="flex-1">
98
+
$if(previous.url)$
99
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span>
100
+
<a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a>
101
+
$endif$
102
+
</span>
103
+
<span class="flex-1 text-right">
104
+
$if(next.url)$
105
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span>
106
+
<a href="$next.url$" accesskey="n" rel="next">$next.title$</a>
107
+
$endif$
108
+
</span>
109
+
</div>
110
+
</div>
111
+
</nav>
112
+
</div>
113
+
$for(include-after)$
114
+
$include-after$
115
+
$endfor$
116
+
</body>
117
+
</html>
+4
docs/toc.html
+4
docs/toc.html
+3
-3
flake.lock
+3
-3
flake.lock
···
150
150
},
151
151
"nixpkgs": {
152
152
"locked": {
153
-
"lastModified": 1765186076,
154
-
"narHash": "sha256-hM20uyap1a0M9d344I692r+ik4gTMyj60cQWO+hAYP8=",
153
+
"lastModified": 1766070988,
154
+
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
155
155
"owner": "nixos",
156
156
"repo": "nixpkgs",
157
-
"rev": "addf7cf5f383a3101ecfba091b98d0a1263dc9b8",
157
+
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
158
158
"type": "github"
159
159
},
160
160
"original": {
+5
-2
flake.nix
+5
-2
flake.nix
···
88
88
inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src;
89
89
};
90
90
appview = self.callPackage ./nix/pkgs/appview.nix {};
91
+
docs = self.callPackage ./nix/pkgs/docs.nix {
92
+
inherit inter-fonts-src ibm-plex-mono-src lucide-src;
93
+
};
91
94
spindle = self.callPackage ./nix/pkgs/spindle.nix {};
92
95
knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {};
93
96
knot = self.callPackage ./nix/pkgs/knot.nix {};
94
97
});
95
98
in {
96
99
overlays.default = final: prev: {
97
-
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview;
100
+
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs;
98
101
};
99
102
100
103
packages = forAllSystems (system: let
···
103
106
staticPackages = mkPackageSet pkgs.pkgsStatic;
104
107
crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic;
105
108
in {
106
-
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib;
109
+
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs;
107
110
108
111
pkgsStatic-appview = staticPackages.appview;
109
112
pkgsStatic-knot = staticPackages.knot;
+1
-1
input.css
+1
-1
input.css
+41
nix/pkgs/docs.nix
+41
nix/pkgs/docs.nix
···
1
+
{
2
+
pandoc,
3
+
tailwindcss,
4
+
runCommandLocal,
5
+
inter-fonts-src,
6
+
ibm-plex-mono-src,
7
+
lucide-src,
8
+
src,
9
+
}:
10
+
runCommandLocal "docs" {} ''
11
+
mkdir -p working
12
+
13
+
# copy templates, themes, styles, filters to working directory
14
+
cp ${src}/docs/*.html working/
15
+
cp ${src}/docs/*.theme working/
16
+
cp ${src}/docs/*.css working/
17
+
18
+
# icons
19
+
cp -rf ${lucide-src}/*.svg working/
20
+
21
+
# content
22
+
${pandoc}/bin/pandoc ${src}/docs/DOCS.md \
23
+
-o $out/ \
24
+
-t chunkedhtml \
25
+
--variable toc \
26
+
--toc-depth=2 \
27
+
--css=stylesheet.css \
28
+
--chunk-template="%i.html" \
29
+
--highlight-style=working/highlight.theme \
30
+
--template=working/template.html
31
+
32
+
# fonts
33
+
mkdir -p $out/static/fonts
34
+
cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/
35
+
cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/
36
+
cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/
37
+
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/
38
+
39
+
# styles
40
+
cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css
41
+
''
+1
-1
tailwind.config.js
+1
-1
tailwind.config.js
···
2
2
const colors = require("tailwindcss/colors");
3
3
4
4
module.exports = {
5
-
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"],
5
+
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"],
6
6
darkMode: "media",
7
7
theme: {
8
8
container: {