-1
appview/db/repos.go
-1
appview/db/repos.go
+3
-13
appview/pages/markup/extension/atlink.go
+3
-13
appview/pages/markup/extension/atlink.go
···
35
35
return KindAt
36
36
}
37
37
38
-
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`)
39
-
var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`)
38
+
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`)
40
39
41
40
type atParser struct{}
42
41
···
56
55
if m == nil {
57
56
return nil
58
57
}
59
-
60
-
// Check for all links in the markdown to see if the handle found is inside one
61
-
linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1)
62
-
for _, linkMatch := range linksIndexes {
63
-
if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] {
64
-
return nil
65
-
}
66
-
}
67
-
68
58
atSegment := text.NewSegment(segment.Start, segment.Start+m[1])
69
59
block.Advance(m[1])
70
60
node := &AtNode{}
···
97
87
98
88
func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
99
89
if entering {
100
-
w.WriteString(`<a href="/`)
90
+
w.WriteString(`<a href="/@`)
101
91
w.WriteString(n.(*AtNode).Handle)
102
-
w.WriteString(`" class="mention">`)
92
+
w.WriteString(`" class="mention font-bold">`)
103
93
} else {
104
94
w.WriteString("</a>")
105
95
}
-121
appview/pages/markup/markdown_test.go
-121
appview/pages/markup/markdown_test.go
···
1
-
package markup
2
-
3
-
import (
4
-
"bytes"
5
-
"testing"
6
-
)
7
-
8
-
func TestAtExtension_Rendering(t *testing.T) {
9
-
tests := []struct {
10
-
name string
11
-
markdown string
12
-
expected string
13
-
}{
14
-
{
15
-
name: "renders simple at mention",
16
-
markdown: "Hello @user.tngl.sh!",
17
-
expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`,
18
-
},
19
-
{
20
-
name: "renders multiple at mentions",
21
-
markdown: "Hi @alice.tngl.sh and @bob.example.com",
22
-
expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`,
23
-
},
24
-
{
25
-
name: "renders at mention in parentheses",
26
-
markdown: "Check this out (@user.tngl.sh)",
27
-
expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`,
28
-
},
29
-
{
30
-
name: "does not render email",
31
-
markdown: "Contact me at test@example.com",
32
-
expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`,
33
-
},
34
-
{
35
-
name: "renders at mention with hyphen",
36
-
markdown: "Follow @user-name.tngl.sh",
37
-
expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`,
38
-
},
39
-
{
40
-
name: "renders at mention with numbers",
41
-
markdown: "@user123.test456.social",
42
-
expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`,
43
-
},
44
-
{
45
-
name: "at mention at start of line",
46
-
markdown: "@user.tngl.sh is cool",
47
-
expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`,
48
-
},
49
-
}
50
-
51
-
for _, tt := range tests {
52
-
t.Run(tt.name, func(t *testing.T) {
53
-
md := NewMarkdown()
54
-
55
-
var buf bytes.Buffer
56
-
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
57
-
t.Fatalf("failed to convert markdown: %v", err)
58
-
}
59
-
60
-
result := buf.String()
61
-
if result != tt.expected+"\n" {
62
-
t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result)
63
-
}
64
-
})
65
-
}
66
-
}
67
-
68
-
func TestAtExtension_WithOtherMarkdown(t *testing.T) {
69
-
tests := []struct {
70
-
name string
71
-
markdown string
72
-
contains string
73
-
}{
74
-
{
75
-
name: "at mention with bold",
76
-
markdown: "**Hello @user.tngl.sh**",
77
-
contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`,
78
-
},
79
-
{
80
-
name: "at mention with italic",
81
-
markdown: "*Check @user.tngl.sh*",
82
-
contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`,
83
-
},
84
-
{
85
-
name: "at mention in list",
86
-
markdown: "- Item 1\n- @user.tngl.sh\n- Item 3",
87
-
contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`,
88
-
},
89
-
{
90
-
name: "at mention in link",
91
-
markdown: "[@regnault.dev](https://regnault.dev)",
92
-
contains: `<a href="https://regnault.dev">@regnault.dev</a>`,
93
-
},
94
-
{
95
-
name: "at mention in link again",
96
-
markdown: "[check out @regnault.dev](https://regnault.dev)",
97
-
contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`,
98
-
},
99
-
{
100
-
name: "at mention in link again, multiline",
101
-
markdown: "[\ncheck out @regnault.dev](https://regnault.dev)",
102
-
contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>",
103
-
},
104
-
}
105
-
106
-
for _, tt := range tests {
107
-
t.Run(tt.name, func(t *testing.T) {
108
-
md := NewMarkdown()
109
-
110
-
var buf bytes.Buffer
111
-
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
112
-
t.Fatalf("failed to convert markdown: %v", err)
113
-
}
114
-
115
-
result := buf.String()
116
-
if !bytes.Contains([]byte(result), []byte(tt.contains)) {
117
-
t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result)
118
-
}
119
-
})
120
-
}
121
-
}
+1
-1
appview/pages/templates/knots/index.html
+1
-1
appview/pages/templates/knots/index.html
···
105
105
{{ define "docsButton" }}
106
106
<a
107
107
class="btn flex items-center gap-2"
108
-
href="https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide">
108
+
href="https://tangled.org/@tangled.org/core/blob/master/docs/knot-hosting.md">
109
109
{{ i "book" "size-4" }}
110
110
docs
111
111
</a>
+1
-1
appview/pages/templates/repo/pipelines/pipelines.html
+1
-1
appview/pages/templates/repo/pipelines/pipelines.html
···
23
23
</p>
24
24
<p>
25
25
<span class="{{ $bullet }}">2</span>Configure your CI/CD
26
-
<a href="https://docs.tangled.org/spindles.html#pipelines" class="underline">pipeline</a>.
26
+
<a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/pipeline.md" class="underline">pipeline</a>.
27
27
</p>
28
28
<p><span class="{{ $bullet }}">3</span>Trigger a workflow with a push or a pull-request!</p>
29
29
</div>
+1
-1
appview/pages/templates/repo/settings/pipelines.html
+1
-1
appview/pages/templates/repo/settings/pipelines.html
···
22
22
<p class="text-gray-500 dark:text-gray-400">
23
23
Choose a spindle to execute your workflows on. Only repository owners
24
24
can configure spindles. Spindles can be selfhosted,
25
-
<a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide">
25
+
<a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
26
26
click to learn more.
27
27
</a>
28
28
</p>
+1
-1
appview/pages/templates/spindles/index.html
+1
-1
appview/pages/templates/spindles/index.html
···
102
102
{{ define "docsButton" }}
103
103
<a
104
104
class="btn flex items-center gap-2"
105
-
href="https://docs.tangled.org/spindles.html#self-hosting-guide">
105
+
href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
106
106
{{ i "book" "size-4" }}
107
107
docs
108
108
</a>
-49
appview/repo/archive.go
-49
appview/repo/archive.go
···
1
-
package repo
2
-
3
-
import (
4
-
"fmt"
5
-
"net/http"
6
-
"net/url"
7
-
"strings"
8
-
9
-
"tangled.org/core/api/tangled"
10
-
xrpcclient "tangled.org/core/appview/xrpcclient"
11
-
12
-
indigoxrpc "github.com/bluesky-social/indigo/xrpc"
13
-
"github.com/go-chi/chi/v5"
14
-
"github.com/go-git/go-git/v5/plumbing"
15
-
)
16
-
17
-
func (rp *Repo) DownloadArchive(w http.ResponseWriter, r *http.Request) {
18
-
l := rp.logger.With("handler", "DownloadArchive")
19
-
ref := chi.URLParam(r, "ref")
20
-
ref, _ = url.PathUnescape(ref)
21
-
f, err := rp.repoResolver.Resolve(r)
22
-
if err != nil {
23
-
l.Error("failed to get repo and knot", "err", err)
24
-
return
25
-
}
26
-
scheme := "http"
27
-
if !rp.config.Core.Dev {
28
-
scheme = "https"
29
-
}
30
-
host := fmt.Sprintf("%s://%s", scheme, f.Knot)
31
-
xrpcc := &indigoxrpc.Client{
32
-
Host: host,
33
-
}
34
-
didSlashRepo := f.DidSlashRepo()
35
-
archiveBytes, err := tangled.RepoArchive(r.Context(), xrpcc, "tar.gz", "", ref, didSlashRepo)
36
-
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
37
-
l.Error("failed to call XRPC repo.archive", "err", xrpcerr)
38
-
rp.pages.Error503(w)
39
-
return
40
-
}
41
-
// Set headers for file download, just pass along whatever the knot specifies
42
-
safeRefFilename := strings.ReplaceAll(plumbing.ReferenceName(ref).Short(), "/", "-")
43
-
filename := fmt.Sprintf("%s-%s.tar.gz", f.Name, safeRefFilename)
44
-
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
45
-
w.Header().Set("Content-Type", "application/gzip")
46
-
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(archiveBytes)))
47
-
// Write the archive data directly
48
-
w.Write(archiveBytes)
49
-
}
-4
appview/repo/router.go
-4
appview/repo/router.go
···
40
40
r.Get("/blob/{ref}/*", rp.Blob)
41
41
r.Get("/raw/{ref}/*", rp.RepoBlobRaw)
42
42
43
-
// intentionally doesn't use /* as this isn't
44
-
// a file path
45
-
r.Get("/archive/{ref}", rp.DownloadArchive)
46
-
47
43
r.Route("/fork", func(r chi.Router) {
48
44
r.Use(middleware.AuthMiddleware(rp.oauth))
49
45
r.Get("/", rp.ForkRepo)
-114
appview/state/git_http.go
-114
appview/state/git_http.go
···
1
-
package state
2
-
3
-
import (
4
-
"fmt"
5
-
"io"
6
-
"maps"
7
-
"net/http"
8
-
9
-
"github.com/bluesky-social/indigo/atproto/identity"
10
-
"github.com/go-chi/chi/v5"
11
-
"tangled.org/core/appview/models"
12
-
)
13
-
14
-
func (s *State) InfoRefs(w http.ResponseWriter, r *http.Request) {
15
-
user := r.Context().Value("resolvedId").(identity.Identity)
16
-
repo := r.Context().Value("repo").(*models.Repo)
17
-
18
-
scheme := "https"
19
-
if s.config.Core.Dev {
20
-
scheme = "http"
21
-
}
22
-
23
-
targetURL := fmt.Sprintf("%s://%s/%s/%s/info/refs?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
24
-
s.proxyRequest(w, r, targetURL)
25
-
26
-
}
27
-
28
-
func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) {
29
-
user, ok := r.Context().Value("resolvedId").(identity.Identity)
30
-
if !ok {
31
-
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
32
-
return
33
-
}
34
-
repo := r.Context().Value("repo").(*models.Repo)
35
-
36
-
scheme := "https"
37
-
if s.config.Core.Dev {
38
-
scheme = "http"
39
-
}
40
-
41
-
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
42
-
s.proxyRequest(w, r, targetURL)
43
-
}
44
-
45
-
func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) {
46
-
user, ok := r.Context().Value("resolvedId").(identity.Identity)
47
-
if !ok {
48
-
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
49
-
return
50
-
}
51
-
repo := r.Context().Value("repo").(*models.Repo)
52
-
53
-
scheme := "https"
54
-
if s.config.Core.Dev {
55
-
scheme = "http"
56
-
}
57
-
58
-
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-pack?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
59
-
s.proxyRequest(w, r, targetURL)
60
-
}
61
-
62
-
func (s *State) ReceivePack(w http.ResponseWriter, r *http.Request) {
63
-
user, ok := r.Context().Value("resolvedId").(identity.Identity)
64
-
if !ok {
65
-
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
66
-
return
67
-
}
68
-
repo := r.Context().Value("repo").(*models.Repo)
69
-
70
-
scheme := "https"
71
-
if s.config.Core.Dev {
72
-
scheme = "http"
73
-
}
74
-
75
-
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-receive-pack?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
76
-
s.proxyRequest(w, r, targetURL)
77
-
}
78
-
79
-
func (s *State) proxyRequest(w http.ResponseWriter, r *http.Request, targetURL string) {
80
-
client := &http.Client{}
81
-
82
-
// Create new request
83
-
proxyReq, err := http.NewRequest(r.Method, targetURL, r.Body)
84
-
if err != nil {
85
-
http.Error(w, err.Error(), http.StatusInternalServerError)
86
-
return
87
-
}
88
-
89
-
// Copy original headers
90
-
proxyReq.Header = r.Header
91
-
92
-
repoOwnerHandle := chi.URLParam(r, "user")
93
-
proxyReq.Header.Add("x-tangled-repo-owner-handle", repoOwnerHandle)
94
-
95
-
// Execute request
96
-
resp, err := client.Do(proxyReq)
97
-
if err != nil {
98
-
http.Error(w, err.Error(), http.StatusInternalServerError)
99
-
return
100
-
}
101
-
defer resp.Body.Close()
102
-
103
-
// Copy response headers
104
-
maps.Copy(w.Header(), resp.Header)
105
-
106
-
// Set response status code
107
-
w.WriteHeader(resp.StatusCode)
108
-
109
-
// Copy response body
110
-
if _, err := io.Copy(w, resp.Body); err != nil {
111
-
http.Error(w, err.Error(), http.StatusInternalServerError)
112
-
return
113
-
}
114
-
}
+185
appview/state/proxy_knot.go
+185
appview/state/proxy_knot.go
···
1
+
package state
2
+
3
+
import (
4
+
"fmt"
5
+
"io"
6
+
"maps"
7
+
"net/http"
8
+
"strings"
9
+
10
+
"github.com/bluesky-social/indigo/atproto/identity"
11
+
indigoxrpc "github.com/bluesky-social/indigo/xrpc"
12
+
"github.com/go-chi/chi/v5"
13
+
"github.com/go-git/go-git/v5/plumbing"
14
+
"github.com/hashicorp/go-version"
15
+
"tangled.org/core/api/tangled"
16
+
"tangled.org/core/appview/models"
17
+
xrpcclient "tangled.org/core/appview/xrpcclient"
18
+
)
19
+
20
+
func (s *State) InfoRefs(w http.ResponseWriter, r *http.Request) {
21
+
user := r.Context().Value("resolvedId").(identity.Identity)
22
+
repo := r.Context().Value("repo").(*models.Repo)
23
+
24
+
scheme := "https"
25
+
if s.config.Core.Dev {
26
+
scheme = "http"
27
+
}
28
+
29
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/info/refs?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
30
+
s.proxyRequest(w, r, targetURL)
31
+
32
+
}
33
+
34
+
func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) {
35
+
user, ok := r.Context().Value("resolvedId").(identity.Identity)
36
+
if !ok {
37
+
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
38
+
return
39
+
}
40
+
repo := r.Context().Value("repo").(*models.Repo)
41
+
42
+
scheme := "https"
43
+
if s.config.Core.Dev {
44
+
scheme = "http"
45
+
}
46
+
47
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
48
+
s.proxyRequest(w, r, targetURL)
49
+
}
50
+
51
+
func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) {
52
+
user, ok := r.Context().Value("resolvedId").(identity.Identity)
53
+
if !ok {
54
+
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
55
+
return
56
+
}
57
+
repo := r.Context().Value("repo").(*models.Repo)
58
+
59
+
scheme := "https"
60
+
if s.config.Core.Dev {
61
+
scheme = "http"
62
+
}
63
+
64
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-pack?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
65
+
s.proxyRequest(w, r, targetURL)
66
+
}
67
+
68
+
func (s *State) ReceivePack(w http.ResponseWriter, r *http.Request) {
69
+
user, ok := r.Context().Value("resolvedId").(identity.Identity)
70
+
if !ok {
71
+
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
72
+
return
73
+
}
74
+
repo := r.Context().Value("repo").(*models.Repo)
75
+
76
+
scheme := "https"
77
+
if s.config.Core.Dev {
78
+
scheme = "http"
79
+
}
80
+
81
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-receive-pack?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
82
+
s.proxyRequest(w, r, targetURL)
83
+
}
84
+
85
+
var knotVersionDownloadArchiveConstraint = version.MustConstraints(version.NewConstraint(">= 1.12.0-alpha"))
86
+
87
+
func (s *State) DownloadArchive(w http.ResponseWriter, r *http.Request) {
88
+
l := s.logger.With("handler", "DownloadArchive")
89
+
ref := chi.URLParam(r, "ref")
90
+
91
+
user, ok := r.Context().Value("resolvedId").(identity.Identity)
92
+
if !ok {
93
+
l.Error("failed to resolve user")
94
+
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
95
+
return
96
+
}
97
+
repo := r.Context().Value("repo").(*models.Repo)
98
+
99
+
scheme := "https"
100
+
if s.config.Core.Dev {
101
+
scheme = "http"
102
+
}
103
+
104
+
host := fmt.Sprintf("%s://%s", scheme, repo.Knot)
105
+
xrpcc := &indigoxrpc.Client{
106
+
Host: host,
107
+
}
108
+
l = l.With("knot", repo.Knot)
109
+
110
+
isCompatible := func() bool {
111
+
out, err := tangled.KnotVersion(r.Context(), xrpcc)
112
+
if err != nil {
113
+
l.Warn("failed to get knot version", "err", err)
114
+
return false
115
+
}
116
+
117
+
v, err := version.NewVersion(out.Version)
118
+
if err != nil {
119
+
l.Warn("failed to parse knot version", "version", out.Version, "err", err)
120
+
return false
121
+
}
122
+
123
+
if !knotVersionDownloadArchiveConstraint.Check(v) {
124
+
l.Warn("knot version incompatible.", "version", v)
125
+
return false
126
+
}
127
+
return true
128
+
}()
129
+
l.Debug("knot compatibility check", "isCompatible", isCompatible)
130
+
if isCompatible {
131
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/archive/%s", scheme, repo.Knot, user.DID, repo.Name, ref)
132
+
s.proxyRequest(w, r, targetURL)
133
+
} else {
134
+
l.Debug("requesting xrpc/sh.tangled.repo.archive")
135
+
archiveBytes, err := tangled.RepoArchive(r.Context(), xrpcc, "tar.gz", "", ref, repo.DidSlashRepo())
136
+
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
137
+
l.Error("failed to call XRPC repo.archive", "err", xrpcerr)
138
+
s.pages.Error503(w)
139
+
return
140
+
}
141
+
safeRefFilename := strings.ReplaceAll(plumbing.ReferenceName(ref).Short(), "/", "-")
142
+
filename := fmt.Sprintf("%s-%s.tar.gz", repo.Name, safeRefFilename)
143
+
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
144
+
w.Header().Set("Content-Type", "application/gzip")
145
+
w.Header().Set("Content-Length", fmt.Sprintf("%d", len(archiveBytes)))
146
+
w.Write(archiveBytes)
147
+
}
148
+
}
149
+
150
+
func (s *State) proxyRequest(w http.ResponseWriter, r *http.Request, targetURL string) {
151
+
client := &http.Client{}
152
+
153
+
// Create new request
154
+
proxyReq, err := http.NewRequest(r.Method, targetURL, r.Body)
155
+
if err != nil {
156
+
http.Error(w, err.Error(), http.StatusInternalServerError)
157
+
return
158
+
}
159
+
160
+
// Copy original headers
161
+
proxyReq.Header = r.Header
162
+
163
+
repoOwnerHandle := chi.URLParam(r, "user")
164
+
proxyReq.Header.Add("x-tangled-repo-owner-handle", repoOwnerHandle)
165
+
166
+
// Execute request
167
+
resp, err := client.Do(proxyReq)
168
+
if err != nil {
169
+
http.Error(w, err.Error(), http.StatusInternalServerError)
170
+
return
171
+
}
172
+
defer resp.Body.Close()
173
+
174
+
// Copy response headers
175
+
maps.Copy(w.Header(), resp.Header)
176
+
177
+
// Set response status code
178
+
w.WriteHeader(resp.StatusCode)
179
+
180
+
// Copy response body
181
+
if _, err := io.Copy(w, resp.Body); err != nil {
182
+
http.Error(w, err.Error(), http.StatusInternalServerError)
183
+
return
184
+
}
185
+
}
+3
-1
appview/state/router.go
+3
-1
appview/state/router.go
···
104
104
r.Post("/git-upload-archive", s.UploadArchive)
105
105
r.Post("/git-upload-pack", s.UploadPack)
106
106
r.Post("/git-receive-pack", s.ReceivePack)
107
-
107
+
// intentionally doesn't use /* as this isn't
108
+
// a file path
109
+
r.Get("/archive/{ref}", s.DownloadArchive)
108
110
})
109
111
})
110
112
-1530
docs/DOCS.md
-1530
docs/DOCS.md
···
1
-
---
2
-
title: Tangled Documentation
3
-
author: The Tangled Contributors
4
-
date: 21 Sun, Dec 2025
5
-
---
6
-
7
-
# Introduction
8
-
9
-
Tangled is a decentralized code hosting and collaboration
10
-
platform. Every component of Tangled is open-source and
11
-
selfhostable. [tangled.org](https://tangled.org) also
12
-
provides hosting and CI services that are free to use.
13
-
14
-
There are several models for decentralized code
15
-
collaboration platforms, ranging from ActivityPubโs
16
-
(Forgejo) federated model, to Radicleโs entirely P2P model.
17
-
Our approach attempts to be the best of both worlds by
18
-
adopting atprotoโa protocol for building decentralized
19
-
social applications with a central identity
20
-
21
-
Our approach to this is the idea of โknotsโ. Knots are
22
-
lightweight, headless servers that enable users to host Git
23
-
repositories with ease. Knots are designed for either single
24
-
or multi-tenant use which is perfect for self-hosting on a
25
-
Raspberry Pi at home, or larger โcommunityโ servers. By
26
-
default, Tangled provides managed knots where you can host
27
-
your repositories for free.
28
-
29
-
The "appview" at tangled.org acts as a consolidated โviewโ
30
-
into the whole network, allowing users to access, clone and
31
-
contribute to repositories hosted across different knots
32
-
seamlessly.
33
-
34
-
# Quick Start Guide
35
-
36
-
## Login or Sign up
37
-
38
-
You can [login](https://tangled.org) by using your AT
39
-
account. If you are unclear on what that means, simply head
40
-
to the [signup](https://tangled.org/signup) page and create
41
-
an account. By doing so, you will be choosing Tangled as
42
-
your account provider (you will be granted a handle of the
43
-
form `user.tngl.sh`).
44
-
45
-
In the AT network, users are free to choose their account
46
-
provider (known as a "Personal Data Service", or PDS), and
47
-
login to applications that support AT accounts.
48
-
49
-
You can think of it as "one account for all of the
50
-
atmosphere"!
51
-
52
-
If you already have an AT account (you may have one if you
53
-
signed up to Bluesky, for example), you can login with the
54
-
same handle on Tangled (so just use `user.bsky.social` on
55
-
the login page).
56
-
57
-
## Add an SSH Key
58
-
59
-
Once you are logged in, you can start creating repositories
60
-
and pushing code. Tangled supports pushing git repositories
61
-
over SSH.
62
-
63
-
First, you'll need to generate an SSH key if you don't
64
-
already have one:
65
-
66
-
```bash
67
-
ssh-keygen -t ed25519 -C "foo@bar.com"
68
-
```
69
-
70
-
When prompted, save the key to the default location
71
-
(`~/.ssh/id_ed25519`) and optionally set a passphrase.
72
-
73
-
Copy your public key to your clipboard:
74
-
75
-
```bash
76
-
# on X11
77
-
cat ~/.ssh/id_ed25519.pub | xclip -sel c
78
-
79
-
# on wayland
80
-
cat ~/.ssh/id_ed25519.pub | wl-copy
81
-
82
-
# on macos
83
-
cat ~/.ssh/id_ed25519.pub | pbcopy
84
-
```
85
-
86
-
Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
87
-
paste your public key, give it a descriptive name, and hit
88
-
save.
89
-
90
-
## Create a Repository
91
-
92
-
Once your SSH key is added, create your first repository:
93
-
94
-
1. Hit the green `+` icon on the topbar, and select
95
-
repository
96
-
2. Enter a repository name
97
-
3. Add a description
98
-
4. Choose a knotserver to host this repository on
99
-
5. Hit create
100
-
101
-
"Knots" are selfhostable, lightweight git servers that can
102
-
host your repository. Unlike traditional code forges, your
103
-
code can live on any server. Read the [Knots](TODO) section
104
-
for more.
105
-
106
-
## Configure SSH
107
-
108
-
To ensure Git uses the correct SSH key and connects smoothly
109
-
to Tangled, add this configuration to your `~/.ssh/config`
110
-
file:
111
-
112
-
```
113
-
Host tangled.org
114
-
Hostname tangled.org
115
-
User git
116
-
IdentityFile ~/.ssh/id_ed25519
117
-
AddressFamily inet
118
-
```
119
-
120
-
This tells SSH to use your specific key when connecting to
121
-
Tangled and prevents authentication issues if you have
122
-
multiple SSH keys.
123
-
124
-
Note that this configuration only works for knotservers that
125
-
are hosted by tangled.org. If you use a custom knot, refer
126
-
to the [Knots](TODO) section.
127
-
128
-
## Push Your First Repository
129
-
130
-
Initialize a new git repository:
131
-
132
-
```bash
133
-
mkdir my-project
134
-
cd my-project
135
-
136
-
git init
137
-
echo "# My Project" > README.md
138
-
```
139
-
140
-
Add some content and push!
141
-
142
-
```bash
143
-
git add README.md
144
-
git commit -m "Initial commit"
145
-
git remote add origin git@tangled.org:user.tngl.sh/my-project
146
-
git push -u origin main
147
-
```
148
-
149
-
That's it! Your code is now hosted on Tangled.
150
-
151
-
## Migrating an existing repository
152
-
153
-
Moving your repositories from GitHub, GitLab, Bitbucket, or
154
-
any other Git forge to Tangled is straightforward. You'll
155
-
simply change your repository's remote URL. At the moment,
156
-
Tangled does not have any tooling to migrate data such as
157
-
GitHub issues or pull requests.
158
-
159
-
First, create a new repository on tangled.org as described
160
-
in the [Quick Start Guide](#create-a-repository).
161
-
162
-
Navigate to your existing local repository:
163
-
164
-
```bash
165
-
cd /path/to/your/existing/repo
166
-
```
167
-
168
-
You can inspect your existing git remote like so:
169
-
170
-
```bash
171
-
git remote -v
172
-
```
173
-
174
-
You'll see something like:
175
-
176
-
```
177
-
origin git@github.com:username/my-project (fetch)
178
-
origin git@github.com:username/my-project (push)
179
-
```
180
-
181
-
Update the remote URL to point to tangled:
182
-
183
-
```bash
184
-
git remote set-url origin git@tangled.org:user.tngl.sh/my-project
185
-
```
186
-
187
-
Verify the change:
188
-
189
-
```bash
190
-
git remote -v
191
-
```
192
-
193
-
You should now see:
194
-
195
-
```
196
-
origin git@tangled.org:user.tngl.sh/my-project (fetch)
197
-
origin git@tangled.org:user.tngl.sh/my-project (push)
198
-
```
199
-
200
-
Push all your branches and tags to tangled:
201
-
202
-
```bash
203
-
git push -u origin --all
204
-
git push -u origin --tags
205
-
```
206
-
207
-
Your repository is now migrated to Tangled! All commit
208
-
history, branches, and tags have been preserved.
209
-
210
-
## Mirroring a repository to Tangled
211
-
212
-
If you want to maintain your repository on multiple forges
213
-
simultaneously, for example, keeping your primary repository
214
-
on GitHub while mirroring to Tangled for backup or
215
-
redundancy, you can do so by adding multiple remotes.
216
-
217
-
You can configure your local repository to push to both
218
-
Tangled and, say, GitHub. You may already have the following
219
-
setup:
220
-
221
-
```
222
-
$ git remote -v
223
-
origin git@github.com:username/my-project (fetch)
224
-
origin git@github.com:username/my-project (push)
225
-
```
226
-
227
-
Now add Tangled as an additional push URL to the same
228
-
remote:
229
-
230
-
```bash
231
-
git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
232
-
```
233
-
234
-
You also need to re-add the original URL as a push
235
-
destination (git replaces the push URL when you use `--add`
236
-
the first time):
237
-
238
-
```bash
239
-
git remote set-url --add --push origin git@github.com:username/my-project
240
-
```
241
-
242
-
Verify your configuration:
243
-
244
-
```
245
-
$ git remote -v
246
-
origin git@github.com:username/repo (fetch)
247
-
origin git@tangled.org:username/my-project (push)
248
-
origin git@github.com:username/repo (push)
249
-
```
250
-
251
-
Notice that there's one fetch URL (the primary remote) and
252
-
two push URLs. Now, whenever you push, git will
253
-
automatically push to both remotes:
254
-
255
-
```bash
256
-
git push origin main
257
-
```
258
-
259
-
This single command pushes your `main` branch to both GitHub
260
-
and Tangled simultaneously.
261
-
262
-
To push all branches and tags:
263
-
264
-
```bash
265
-
git push origin --all
266
-
git push origin --tags
267
-
```
268
-
269
-
If you prefer more control over which remote you push to,
270
-
you can maintain separate remotes:
271
-
272
-
```bash
273
-
git remote add github git@github.com:username/my-project
274
-
git remote add tangled git@tangled.org:username/my-project
275
-
```
276
-
277
-
Then push to each explicitly:
278
-
279
-
```bash
280
-
git push github main
281
-
git push tangled main
282
-
```
283
-
284
-
# Knot self-hosting guide
285
-
286
-
So you want to run your own knot server? Great! Here are a few prerequisites:
287
-
288
-
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
289
-
2. A (sub)domain name. People generally use `knot.example.com`.
290
-
3. A valid SSL certificate for your domain.
291
-
292
-
## NixOS
293
-
294
-
Refer to the [knot
295
-
module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
296
-
for a full list of options. Sample configurations:
297
-
298
-
- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
299
-
- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
300
-
301
-
## Docker
302
-
303
-
Refer to
304
-
[@tangled.org/knot-docker](https://tangled.sh/@tangled.sh/knot-docker).
305
-
Note that this is community maintained.
306
-
307
-
## Manual setup
308
-
309
-
First, clone this repository:
310
-
311
-
```
312
-
git clone https://tangled.org/@tangled.org/core
313
-
```
314
-
315
-
Then, build the `knot` CLI. This is the knot administration
316
-
and operation tool. For the purpose of this guide, we're
317
-
only concerned with these subcommands:
318
-
319
-
* `knot server`: the main knot server process, typically
320
-
run as a supervised service
321
-
* `knot guard`: handles role-based access control for git
322
-
over SSH (you'll never have to run this yourself)
323
-
* `knot keys`: fetches SSH keys associated with your knot;
324
-
we'll use this to generate the SSH
325
-
`AuthorizedKeysCommand`
326
-
327
-
```
328
-
cd core
329
-
export CGO_ENABLED=1
330
-
go build -o knot ./cmd/knot
331
-
```
332
-
333
-
Next, move the `knot` binary to a location owned by `root` --
334
-
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
335
-
336
-
```
337
-
sudo mv knot /usr/local/bin/knot
338
-
sudo chown root:root /usr/local/bin/knot
339
-
```
340
-
341
-
This is necessary because SSH `AuthorizedKeysCommand` requires [really
342
-
specific permissions](https://stackoverflow.com/a/27638306). The
343
-
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
344
-
retrieve a user's public SSH keys dynamically for authentication. Let's
345
-
set that up.
346
-
347
-
```
348
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
349
-
Match User git
350
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
351
-
AuthorizedKeysCommandUser nobody
352
-
EOF
353
-
```
354
-
355
-
Then, reload `sshd`:
356
-
357
-
```
358
-
sudo systemctl reload ssh
359
-
```
360
-
361
-
Next, create the `git` user. We'll use the `git` user's home directory
362
-
to store repositories:
363
-
364
-
```
365
-
sudo adduser git
366
-
```
367
-
368
-
Create `/home/git/.knot.env` with the following, updating the values as
369
-
necessary. The `KNOT_SERVER_OWNER` should be set to your
370
-
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
371
-
372
-
```
373
-
KNOT_REPO_SCAN_PATH=/home/git
374
-
KNOT_SERVER_HOSTNAME=knot.example.com
375
-
APPVIEW_ENDPOINT=https://tangled.sh
376
-
KNOT_SERVER_OWNER=did:plc:foobar
377
-
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
378
-
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
379
-
```
380
-
381
-
If you run a Linux distribution that uses systemd, you can use the provided
382
-
service file to run the server. Copy
383
-
[`knotserver.service`](/systemd/knotserver.service)
384
-
to `/etc/systemd/system/`. Then, run:
385
-
386
-
```
387
-
systemctl enable knotserver
388
-
systemctl start knotserver
389
-
```
390
-
391
-
The last step is to configure a reverse proxy like Nginx or Caddy to front your
392
-
knot. Here's an example configuration for Nginx:
393
-
394
-
```
395
-
server {
396
-
listen 80;
397
-
listen [::]:80;
398
-
server_name knot.example.com;
399
-
400
-
location / {
401
-
proxy_pass http://localhost:5555;
402
-
proxy_set_header Host $host;
403
-
proxy_set_header X-Real-IP $remote_addr;
404
-
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
405
-
proxy_set_header X-Forwarded-Proto $scheme;
406
-
}
407
-
408
-
# wss endpoint for git events
409
-
location /events {
410
-
proxy_set_header X-Forwarded-For $remote_addr;
411
-
proxy_set_header Host $http_host;
412
-
proxy_set_header Upgrade websocket;
413
-
proxy_set_header Connection Upgrade;
414
-
proxy_pass http://localhost:5555;
415
-
}
416
-
# additional config for SSL/TLS go here.
417
-
}
418
-
419
-
```
420
-
421
-
Remember to use Let's Encrypt or similar to procure a certificate for your
422
-
knot domain.
423
-
424
-
You should now have a running knot server! You can finalize
425
-
your registration by hitting the `verify` button on the
426
-
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
427
-
a record on your PDS to announce the existence of the knot.
428
-
429
-
### Custom paths
430
-
431
-
(This section applies to manual setup only. Docker users should edit the mounts
432
-
in `docker-compose.yml` instead.)
433
-
434
-
Right now, the database and repositories of your knot lives in `/home/git`. You
435
-
can move these paths if you'd like to store them in another folder. Be careful
436
-
when adjusting these paths:
437
-
438
-
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
439
-
any possible side effects. Remember to restart it once you're done.
440
-
* Make backups before moving in case something goes wrong.
441
-
* Make sure the `git` user can read and write from the new paths.
442
-
443
-
#### Database
444
-
445
-
As an example, let's say the current database is at `/home/git/knotserver.db`,
446
-
and we want to move it to `/home/git/database/knotserver.db`.
447
-
448
-
Copy the current database to the new location. Make sure to copy the `.db-shm`
449
-
and `.db-wal` files if they exist.
450
-
451
-
```
452
-
mkdir /home/git/database
453
-
cp /home/git/knotserver.db* /home/git/database
454
-
```
455
-
456
-
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
457
-
the new file path (_not_ the directory):
458
-
459
-
```
460
-
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
461
-
```
462
-
463
-
#### Repositories
464
-
465
-
As an example, let's say the repositories are currently in `/home/git`, and we
466
-
want to move them into `/home/git/repositories`.
467
-
468
-
Create the new folder, then move the existing repositories (if there are any):
469
-
470
-
```
471
-
mkdir /home/git/repositories
472
-
# move all DIDs into the new folder; these will vary for you!
473
-
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
474
-
```
475
-
476
-
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
477
-
to the new directory:
478
-
479
-
```
480
-
KNOT_REPO_SCAN_PATH=/home/git/repositories
481
-
```
482
-
483
-
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
484
-
repository path:
485
-
486
-
```
487
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
488
-
Match User git
489
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
490
-
AuthorizedKeysCommandUser nobody
491
-
EOF
492
-
```
493
-
494
-
Make sure to restart your SSH server!
495
-
496
-
#### MOTD (message of the day)
497
-
498
-
To configure the MOTD used ("Welcome to this knot!" by default), edit the
499
-
`/home/git/motd` file:
500
-
501
-
```
502
-
printf "Hi from this knot!\n" > /home/git/motd
503
-
```
504
-
505
-
Note that you should add a newline at the end if setting a non-empty message
506
-
since the knot won't do this for you.
507
-
508
-
# Spindles
509
-
510
-
## Pipelines
511
-
512
-
Spindle workflows allow you to write CI/CD pipelines in a
513
-
simple format. They're located in the `.tangled/workflows`
514
-
directory at the root of your repository, and are defined
515
-
using YAML.
516
-
517
-
The fields are:
518
-
519
-
- [Trigger](#trigger): A **required** field that defines
520
-
when a workflow should be triggered.
521
-
- [Engine](#engine): A **required** field that defines which
522
-
engine a workflow should run on.
523
-
- [Clone options](#clone-options): An **optional** field
524
-
that defines how the repository should be cloned.
525
-
- [Dependencies](#dependencies): An **optional** field that
526
-
allows you to list dependencies you may need.
527
-
- [Environment](#environment): An **optional** field that
528
-
allows you to define environment variables.
529
-
- [Steps](#steps): An **optional** field that allows you to
530
-
define what steps should run in the workflow.
531
-
532
-
### Trigger
533
-
534
-
The first thing to add to a workflow is the trigger, which
535
-
defines when a workflow runs. This is defined using a `when`
536
-
field, which takes in a list of conditions. Each condition
537
-
has the following fields:
538
-
539
-
- `event`: This is a **required** field that defines when
540
-
your workflow should run. It's a list that can take one or
541
-
more of the following values:
542
-
- `push`: The workflow should run every time a commit is
543
-
pushed to the repository.
544
-
- `pull_request`: The workflow should run every time a
545
-
pull request is made or updated.
546
-
- `manual`: The workflow can be triggered manually.
547
-
- `branch`: Defines which branches the workflow should run
548
-
for. If used with the `push` event, commits to the
549
-
branch(es) listed here will trigger the workflow. If used
550
-
with the `pull_request` event, updates to pull requests
551
-
targeting the branch(es) listed here will trigger the
552
-
workflow. This field has no effect with the `manual`
553
-
event. Supports glob patterns using `*` and `**` (e.g.,
554
-
`main`, `develop`, `release-*`). Either `branch` or `tag`
555
-
(or both) must be specified for `push` events.
556
-
- `tag`: Defines which tags the workflow should run for.
557
-
Only used with the `push` event - when tags matching the
558
-
pattern(s) listed here are pushed, the workflow will
559
-
trigger. This field has no effect with `pull_request` or
560
-
`manual` events. Supports glob patterns using `*` and `**`
561
-
(e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
562
-
`tag` (or both) must be specified for `push` events.
563
-
564
-
For example, if you'd like to define a workflow that runs
565
-
when commits are pushed to the `main` and `develop`
566
-
branches, or when pull requests that target the `main`
567
-
branch are updated, or manually, you can do so with:
568
-
569
-
```yaml
570
-
when:
571
-
- event: ["push", "manual"]
572
-
branch: ["main", "develop"]
573
-
- event: ["pull_request"]
574
-
branch: ["main"]
575
-
```
576
-
577
-
You can also trigger workflows on tag pushes. For instance,
578
-
to run a deployment workflow when tags matching `v*` are
579
-
pushed:
580
-
581
-
```yaml
582
-
when:
583
-
- event: ["push"]
584
-
tag: ["v*"]
585
-
```
586
-
587
-
You can even combine branch and tag patterns in a single
588
-
constraint (the workflow triggers if either matches):
589
-
590
-
```yaml
591
-
when:
592
-
- event: ["push"]
593
-
branch: ["main", "release-*"]
594
-
tag: ["v*", "stable"]
595
-
```
596
-
597
-
### Engine
598
-
599
-
Next is the engine on which the workflow should run, defined
600
-
using the **required** `engine` field. The currently
601
-
supported engines are:
602
-
603
-
- `nixery`: This uses an instance of
604
-
[Nixery](https://nixery.dev) to run steps, which allows
605
-
you to add [dependencies](#dependencies) from
606
-
[Nixpkgs](https://github.com/NixOS/nixpkgs). You can
607
-
search for packages on https://search.nixos.org, and
608
-
there's a pretty good chance the package(s) you're looking
609
-
for will be there.
610
-
611
-
Example:
612
-
613
-
```yaml
614
-
engine: "nixery"
615
-
```
616
-
617
-
### Clone options
618
-
619
-
When a workflow starts, the first step is to clone the
620
-
repository. You can customize this behavior using the
621
-
**optional** `clone` field. It has the following fields:
622
-
623
-
- `skip`: Setting this to `true` will skip cloning the
624
-
repository. This can be useful if your workflow is doing
625
-
something that doesn't require anything from the
626
-
repository itself. This is `false` by default.
627
-
- `depth`: This sets the number of commits, or the "clone
628
-
depth", to fetch from the repository. For example, if you
629
-
set this to 2, the last 2 commits will be fetched. By
630
-
default, the depth is set to 1, meaning only the most
631
-
recent commit will be fetched, which is the commit that
632
-
triggered the workflow.
633
-
- `submodules`: If you use [git
634
-
submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules)
635
-
in your repository, setting this field to `true` will
636
-
recursively fetch all submodules. This is `false` by
637
-
default.
638
-
639
-
The default settings are:
640
-
641
-
```yaml
642
-
clone:
643
-
skip: false
644
-
depth: 1
645
-
submodules: false
646
-
```
647
-
648
-
### Dependencies
649
-
650
-
Usually when you're running a workflow, you'll need
651
-
additional dependencies. The `dependencies` field lets you
652
-
define which dependencies to get, and from where. It's a
653
-
key-value map, with the key being the registry to fetch
654
-
dependencies from, and the value being the list of
655
-
dependencies to fetch.
656
-
657
-
Say you want to fetch Node.js and Go from `nixpkgs`, and a
658
-
package called `my_pkg` you've made from your own registry
659
-
at your repository at
660
-
`https://tangled.sh/@example.com/my_pkg`. You can define
661
-
those dependencies like so:
662
-
663
-
```yaml
664
-
dependencies:
665
-
# nixpkgs
666
-
nixpkgs:
667
-
- nodejs
668
-
- go
669
-
# custom registry
670
-
git+https://tangled.org/@example.com/my_pkg:
671
-
- my_pkg
672
-
```
673
-
674
-
Now these dependencies are available to use in your
675
-
workflow!
676
-
677
-
### Environment
678
-
679
-
The `environment` field allows you define environment
680
-
variables that will be available throughout the entire
681
-
workflow. **Do not put secrets here, these environment
682
-
variables are visible to anyone viewing the repository. You
683
-
can add secrets for pipelines in your repository's
684
-
settings.**
685
-
686
-
Example:
687
-
688
-
```yaml
689
-
environment:
690
-
GOOS: "linux"
691
-
GOARCH: "arm64"
692
-
NODE_ENV: "production"
693
-
MY_ENV_VAR: "MY_ENV_VALUE"
694
-
```
695
-
696
-
### Steps
697
-
698
-
The `steps` field allows you to define what steps should run
699
-
in the workflow. It's a list of step objects, each with the
700
-
following fields:
701
-
702
-
- `name`: This field allows you to give your step a name.
703
-
This name is visible in your workflow runs, and is used to
704
-
describe what the step is doing.
705
-
- `command`: This field allows you to define a command to
706
-
run in that step. The step is run in a Bash shell, and the
707
-
logs from the command will be visible in the pipelines
708
-
page on the Tangled website. The
709
-
[dependencies](#dependencies) you added will be available
710
-
to use here.
711
-
- `environment`: Similar to the global
712
-
[environment](#environment) config, this **optional**
713
-
field is a key-value map that allows you to set
714
-
environment variables for the step. **Do not put secrets
715
-
here, these environment variables are visible to anyone
716
-
viewing the repository. You can add secrets for pipelines
717
-
in your repository's settings.**
718
-
719
-
Example:
720
-
721
-
```yaml
722
-
steps:
723
-
- name: "Build backend"
724
-
command: "go build"
725
-
environment:
726
-
GOOS: "darwin"
727
-
GOARCH: "arm64"
728
-
- name: "Build frontend"
729
-
command: "npm run build"
730
-
environment:
731
-
NODE_ENV: "production"
732
-
```
733
-
734
-
### Complete workflow
735
-
736
-
```yaml
737
-
# .tangled/workflows/build.yml
738
-
739
-
when:
740
-
- event: ["push", "manual"]
741
-
branch: ["main", "develop"]
742
-
- event: ["pull_request"]
743
-
branch: ["main"]
744
-
745
-
engine: "nixery"
746
-
747
-
# using the default values
748
-
clone:
749
-
skip: false
750
-
depth: 1
751
-
submodules: false
752
-
753
-
dependencies:
754
-
# nixpkgs
755
-
nixpkgs:
756
-
- nodejs
757
-
- go
758
-
# custom registry
759
-
git+https://tangled.org/@example.com/my_pkg:
760
-
- my_pkg
761
-
762
-
environment:
763
-
GOOS: "linux"
764
-
GOARCH: "arm64"
765
-
NODE_ENV: "production"
766
-
MY_ENV_VAR: "MY_ENV_VALUE"
767
-
768
-
steps:
769
-
- name: "Build backend"
770
-
command: "go build"
771
-
environment:
772
-
GOOS: "darwin"
773
-
GOARCH: "arm64"
774
-
- name: "Build frontend"
775
-
command: "npm run build"
776
-
environment:
777
-
NODE_ENV: "production"
778
-
```
779
-
780
-
If you want another example of a workflow, you can look at
781
-
the one [Tangled uses to build the
782
-
project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
783
-
784
-
## Self-hosting guide
785
-
786
-
### Prerequisites
787
-
788
-
* Go
789
-
* Docker (the only supported backend currently)
790
-
791
-
### Configuration
792
-
793
-
Spindle is configured using environment variables. The following environment variables are available:
794
-
795
-
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
796
-
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
797
-
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
798
-
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
799
-
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
800
-
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
801
-
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
802
-
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
803
-
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
804
-
805
-
### Running spindle
806
-
807
-
1. **Set the environment variables.** For example:
808
-
809
-
```shell
810
-
export SPINDLE_SERVER_HOSTNAME="your-hostname"
811
-
export SPINDLE_SERVER_OWNER="your-did"
812
-
```
813
-
814
-
2. **Build the Spindle binary.**
815
-
816
-
```shell
817
-
cd core
818
-
go mod download
819
-
go build -o cmd/spindle/spindle cmd/spindle/main.go
820
-
```
821
-
822
-
3. **Create the log directory.**
823
-
824
-
```shell
825
-
sudo mkdir -p /var/log/spindle
826
-
sudo chown $USER:$USER -R /var/log/spindle
827
-
```
828
-
829
-
4. **Run the Spindle binary.**
830
-
831
-
```shell
832
-
./cmd/spindle/spindle
833
-
```
834
-
835
-
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
836
-
837
-
## Architecture
838
-
839
-
Spindle is a small CI runner service. Here's a high level overview of how it operates:
840
-
841
-
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
842
-
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
843
-
* when a new repo record comes through (typically when you add a spindle to a
844
-
repo from the settings), spindle then resolves the underlying knot and
845
-
subscribes to repo events (see:
846
-
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
847
-
* the spindle engine then handles execution of the pipeline, with results and
848
-
logs beamed on the spindle event stream over wss
849
-
850
-
### The engine
851
-
852
-
At present, the only supported backend is Docker (and Podman, if Docker
853
-
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
854
-
executes each step in the pipeline in a fresh container, with state persisted
855
-
across steps within the `/tangled/workspace` directory.
856
-
857
-
The base image for the container is constructed on the fly using
858
-
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
859
-
used packages.
860
-
861
-
The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines).
862
-
863
-
## Secrets with openbao
864
-
865
-
This document covers setting up Spindle to use OpenBao for secrets
866
-
management via OpenBao Proxy instead of the default SQLite backend.
867
-
868
-
### Overview
869
-
870
-
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
871
-
authentication automatically using AppRole credentials, while Spindle
872
-
connects to the local proxy instead of directly to the OpenBao server.
873
-
874
-
This approach provides better security, automatic token renewal, and
875
-
simplified application code.
876
-
877
-
### Installation
878
-
879
-
Install OpenBao from nixpkgs:
880
-
881
-
```bash
882
-
nix shell nixpkgs#openbao # for a local server
883
-
```
884
-
885
-
### Setup
886
-
887
-
The setup process can is documented for both local development and production.
888
-
889
-
#### Local development
890
-
891
-
Start OpenBao in dev mode:
892
-
893
-
```bash
894
-
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
895
-
```
896
-
897
-
This starts OpenBao on `http://localhost:8201` with a root token.
898
-
899
-
Set up environment for bao CLI:
900
-
901
-
```bash
902
-
export BAO_ADDR=http://localhost:8200
903
-
export BAO_TOKEN=root
904
-
```
905
-
906
-
#### Production
907
-
908
-
You would typically use a systemd service with a
909
-
configuration file. Refer to
910
-
[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
911
-
for how this can be achieved using Nix.
912
-
913
-
Then, initialize the bao server:
914
-
915
-
```bash
916
-
bao operator init -key-shares=1 -key-threshold=1
917
-
```
918
-
919
-
This will print out an unseal key and a root key. Save them
920
-
somewhere (like a password manager). Then unseal the vault
921
-
to begin setting it up:
922
-
923
-
```bash
924
-
bao operator unseal <unseal_key>
925
-
```
926
-
927
-
All steps below remain the same across both dev and
928
-
production setups.
929
-
930
-
#### Configure openbao server
931
-
932
-
Create the spindle KV mount:
933
-
934
-
```bash
935
-
bao secrets enable -path=spindle -version=2 kv
936
-
```
937
-
938
-
Set up AppRole authentication and policy:
939
-
940
-
Create a policy file `spindle-policy.hcl`:
941
-
942
-
```hcl
943
-
# Full access to spindle KV v2 data
944
-
path "spindle/data/*" {
945
-
capabilities = ["create", "read", "update", "delete"]
946
-
}
947
-
948
-
# Access to metadata for listing and management
949
-
path "spindle/metadata/*" {
950
-
capabilities = ["list", "read", "delete", "update"]
951
-
}
952
-
953
-
# Allow listing at root level
954
-
path "spindle/" {
955
-
capabilities = ["list"]
956
-
}
957
-
958
-
# Required for connection testing and health checks
959
-
path "auth/token/lookup-self" {
960
-
capabilities = ["read"]
961
-
}
962
-
```
963
-
964
-
Apply the policy and create an AppRole:
965
-
966
-
```bash
967
-
bao policy write spindle-policy spindle-policy.hcl
968
-
bao auth enable approle
969
-
bao write auth/approle/role/spindle \
970
-
token_policies="spindle-policy" \
971
-
token_ttl=1h \
972
-
token_max_ttl=4h \
973
-
bind_secret_id=true \
974
-
secret_id_ttl=0 \
975
-
secret_id_num_uses=0
976
-
```
977
-
978
-
Get the credentials:
979
-
980
-
```bash
981
-
# Get role ID (static)
982
-
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
983
-
984
-
# Generate secret ID
985
-
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
986
-
987
-
echo "Role ID: $ROLE_ID"
988
-
echo "Secret ID: $SECRET_ID"
989
-
```
990
-
991
-
#### Create proxy configuration
992
-
993
-
Create the credential files:
994
-
995
-
```bash
996
-
# Create directory for OpenBao files
997
-
mkdir -p /tmp/openbao
998
-
999
-
# Save credentials
1000
-
echo "$ROLE_ID" > /tmp/openbao/role-id
1001
-
echo "$SECRET_ID" > /tmp/openbao/secret-id
1002
-
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
1003
-
```
1004
-
1005
-
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
1006
-
1007
-
```hcl
1008
-
# OpenBao server connection
1009
-
vault {
1010
-
address = "http://localhost:8200"
1011
-
}
1012
-
1013
-
# Auto-Auth using AppRole
1014
-
auto_auth {
1015
-
method "approle" {
1016
-
mount_path = "auth/approle"
1017
-
config = {
1018
-
role_id_file_path = "/tmp/openbao/role-id"
1019
-
secret_id_file_path = "/tmp/openbao/secret-id"
1020
-
}
1021
-
}
1022
-
1023
-
# Optional: write token to file for debugging
1024
-
sink "file" {
1025
-
config = {
1026
-
path = "/tmp/openbao/token"
1027
-
mode = 0640
1028
-
}
1029
-
}
1030
-
}
1031
-
1032
-
# Proxy listener for Spindle
1033
-
listener "tcp" {
1034
-
address = "127.0.0.1:8201"
1035
-
tls_disable = true
1036
-
}
1037
-
1038
-
# Enable API proxy with auto-auth token
1039
-
api_proxy {
1040
-
use_auto_auth_token = true
1041
-
}
1042
-
1043
-
# Enable response caching
1044
-
cache {
1045
-
use_auto_auth_token = true
1046
-
}
1047
-
1048
-
# Logging
1049
-
log_level = "info"
1050
-
```
1051
-
1052
-
#### Start the proxy
1053
-
1054
-
Start OpenBao Proxy:
1055
-
1056
-
```bash
1057
-
bao proxy -config=/tmp/openbao/proxy.hcl
1058
-
```
1059
-
1060
-
The proxy will authenticate with OpenBao and start listening on
1061
-
`127.0.0.1:8201`.
1062
-
1063
-
#### Configure spindle
1064
-
1065
-
Set these environment variables for Spindle:
1066
-
1067
-
```bash
1068
-
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
1069
-
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
1070
-
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
1071
-
```
1072
-
1073
-
On startup, the spindle will now connect to the local proxy,
1074
-
which handles all authentication automatically.
1075
-
1076
-
### Production setup for proxy
1077
-
1078
-
For production, you'll want to run the proxy as a service:
1079
-
1080
-
Place your production configuration in
1081
-
`/etc/openbao/proxy.hcl` with proper TLS settings for the
1082
-
vault connection.
1083
-
1084
-
### Verifying setup
1085
-
1086
-
Test the proxy directly:
1087
-
1088
-
```bash
1089
-
# Check proxy health
1090
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
1091
-
1092
-
# Test token lookup through proxy
1093
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
1094
-
```
1095
-
1096
-
Test OpenBao operations through the server:
1097
-
1098
-
```bash
1099
-
# List all secrets
1100
-
bao kv list spindle/
1101
-
1102
-
# Add a test secret via Spindle API, then check it exists
1103
-
bao kv list spindle/repos/
1104
-
1105
-
# Get a specific secret
1106
-
bao kv get spindle/repos/your_repo_path/SECRET_NAME
1107
-
```
1108
-
1109
-
### How it works
1110
-
1111
-
- Spindle connects to OpenBao Proxy on localhost (typically
1112
-
port 8200 or 8201)
1113
-
- The proxy authenticates with OpenBao using AppRole
1114
-
credentials
1115
-
- All Spindle requests go through the proxy, which injects
1116
-
authentication tokens
1117
-
- Secrets are stored at
1118
-
`spindle/repos/{sanitized_repo_path}/{secret_key}`
1119
-
- Repository paths like `did:plc:alice/myrepo` become
1120
-
`did_plc_alice_myrepo`
1121
-
- The proxy handles all token renewal automatically
1122
-
- Spindle no longer manages tokens or authentication
1123
-
directly
1124
-
1125
-
### Troubleshooting
1126
-
1127
-
**Connection refused**: Check that the OpenBao Proxy is
1128
-
running and listening on the configured address.
1129
-
1130
-
**403 errors**: Verify the AppRole credentials are correct
1131
-
and the policy has the necessary permissions.
1132
-
1133
-
**404 route errors**: The spindle KV mount probably doesn't
1134
-
exist - run the mount creation step again.
1135
-
1136
-
**Proxy authentication failures**: Check the proxy logs and
1137
-
verify the role-id and secret-id files are readable and
1138
-
contain valid credentials.
1139
-
1140
-
**Secret not found after writing**: This can indicate policy
1141
-
permission issues. Verify the policy includes both
1142
-
`spindle/data/*` and `spindle/metadata/*` paths with
1143
-
appropriate capabilities.
1144
-
1145
-
Check proxy logs:
1146
-
1147
-
```bash
1148
-
# If running as systemd service
1149
-
journalctl -u openbao-proxy -f
1150
-
1151
-
# If running directly, check the console output
1152
-
```
1153
-
1154
-
Test AppRole authentication manually:
1155
-
1156
-
```bash
1157
-
bao write auth/approle/login \
1158
-
role_id="$(cat /tmp/openbao/role-id)" \
1159
-
secret_id="$(cat /tmp/openbao/secret-id)"
1160
-
```
1161
-
1162
-
# Migrating knots & spindles
1163
-
1164
-
Sometimes, non-backwards compatible changes are made to the
1165
-
knot/spindle XRPC APIs. If you host a knot or a spindle, you
1166
-
will need to follow this guide to upgrade. Typically, this
1167
-
only requires you to deploy the newest version.
1168
-
1169
-
This document is laid out in reverse-chronological order.
1170
-
Newer migration guides are listed first, and older guides
1171
-
are further down the page.
1172
-
1173
-
## Upgrading from v1.8.x
1174
-
1175
-
After v1.8.2, the HTTP API for knot and spindles have been
1176
-
deprecated and replaced with XRPC. Repositories on outdated
1177
-
knots will not be viewable from the appview. Upgrading is
1178
-
straightforward however.
1179
-
1180
-
For knots:
1181
-
1182
-
- Upgrade to latest tag (v1.9.0 or above)
1183
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1184
-
hit the "retry" button to verify your knot
1185
-
1186
-
For spindles:
1187
-
1188
-
- Upgrade to latest tag (v1.9.0 or above)
1189
-
- Head to the [spindle
1190
-
dashboard](https://tangled.org/settings/spindles) and hit the
1191
-
"retry" button to verify your spindle
1192
-
1193
-
## Upgrading from v1.7.x
1194
-
1195
-
After v1.7.0, knot secrets have been deprecated. You no
1196
-
longer need a secret from the appview to run a knot. All
1197
-
authorized commands to knots are managed via [Inter-Service
1198
-
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
1199
-
Knots will be read-only until upgraded.
1200
-
1201
-
Upgrading is quite easy, in essence:
1202
-
1203
-
- `KNOT_SERVER_SECRET` is no more, you can remove this
1204
-
environment variable entirely
1205
-
- `KNOT_SERVER_OWNER` is now required on boot, set this to
1206
-
your DID. You can find your DID in the
1207
-
[settings](https://tangled.org/settings) page.
1208
-
- Restart your knot once you have replaced the environment
1209
-
variable
1210
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1211
-
hit the "retry" button to verify your knot. This simply
1212
-
writes a `sh.tangled.knot` record to your PDS.
1213
-
1214
-
If you use the nix module, simply bump the flake to the
1215
-
latest revision, and change your config block like so:
1216
-
1217
-
```diff
1218
-
services.tangled.knot = {
1219
-
enable = true;
1220
-
server = {
1221
-
- secretFile = /path/to/secret;
1222
-
+ owner = "did:plc:foo";
1223
-
};
1224
-
};
1225
-
```
1226
-
1227
-
# Hacking on Tangled
1228
-
1229
-
We highly recommend [installing
1230
-
nix](https://nixos.org/download/) (the package manager)
1231
-
before working on the codebase. The nix flake provides a lot
1232
-
of helpers to get started and most importantly, builds and
1233
-
dev shells are entirely deterministic.
1234
-
1235
-
To set up your dev environment:
1236
-
1237
-
```bash
1238
-
nix develop
1239
-
```
1240
-
1241
-
Non-nix users can look at the `devShell` attribute in the
1242
-
`flake.nix` file to determine necessary dependencies.
1243
-
1244
-
## Running the appview
1245
-
1246
-
The nix flake also exposes a few `app` attributes (run `nix
1247
-
flake show` to see a full list of what the flake provides),
1248
-
one of the apps runs the appview with the `air`
1249
-
live-reloader:
1250
-
1251
-
```bash
1252
-
TANGLED_DEV=true nix run .#watch-appview
1253
-
1254
-
# TANGLED_DB_PATH might be of interest to point to
1255
-
# different sqlite DBs
1256
-
1257
-
# in a separate shell, you can live-reload tailwind
1258
-
nix run .#watch-tailwind
1259
-
```
1260
-
1261
-
To authenticate with the appview, you will need redis and
1262
-
OAUTH JWKs to be setup:
1263
-
1264
-
```
1265
-
# oauth jwks should already be setup by the nix devshell:
1266
-
echo $TANGLED_OAUTH_CLIENT_SECRET
1267
-
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
1268
-
1269
-
echo $TANGLED_OAUTH_CLIENT_KID
1270
-
1761667908
1271
-
1272
-
# if not, you can set it up yourself:
1273
-
goat key generate -t P-256
1274
-
Key Type: P-256 / secp256r1 / ES256 private key
1275
-
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
1276
-
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
1277
-
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
1278
-
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
1279
-
1280
-
# the secret key from above
1281
-
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
1282
-
1283
-
# run redis in at a new shell to store oauth sessions
1284
-
redis-server
1285
-
```
1286
-
1287
-
## Running knots and spindles
1288
-
1289
-
An end-to-end knot setup requires setting up a machine with
1290
-
`sshd`, `AuthorizedKeysCommand`, and git user, which is
1291
-
quite cumbersome. So the nix flake provides a
1292
-
`nixosConfiguration` to do so.
1293
-
1294
-
<details>
1295
-
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
1296
-
1297
-
In order to build Tangled's dev VM on macOS, you will
1298
-
first need to set up a Linux Nix builder. The recommended
1299
-
way to do so is to run a [`darwin.linux-builder`
1300
-
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
1301
-
and to register it in `nix.conf` as a builder for Linux
1302
-
with the same architecture as your Mac (`linux-aarch64` if
1303
-
you are using Apple Silicon).
1304
-
1305
-
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
1306
-
> the tangled repo so that it doesn't conflict with the other VM. For example,
1307
-
> you can do
1308
-
>
1309
-
> ```shell
1310
-
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
1311
-
> ```
1312
-
>
1313
-
> to store the builder VM in a temporary dir.
1314
-
>
1315
-
> You should read and follow [all the other intructions][darwin builder vm] to
1316
-
> avoid subtle problems.
1317
-
1318
-
Alternatively, you can use any other method to set up a
1319
-
Linux machine with `nix` installed that you can `sudo ssh`
1320
-
into (in other words, root user on your Mac has to be able
1321
-
to ssh into the Linux machine without entering a password)
1322
-
and that has the same architecture as your Mac. See
1323
-
[remote builder
1324
-
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
1325
-
for how to register such a builder in `nix.conf`.
1326
-
1327
-
> WARNING: If you'd like to use
1328
-
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
1329
-
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
1330
-
> ssh` works can be tricky. It seems to be [possible with
1331
-
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
1332
-
1333
-
</details>
1334
-
1335
-
To begin, grab your DID from http://localhost:3000/settings.
1336
-
Then, set `TANGLED_VM_KNOT_OWNER` and
1337
-
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
1338
-
lightweight NixOS VM like so:
1339
-
1340
-
```bash
1341
-
nix run --impure .#vm
1342
-
1343
-
# type `poweroff` at the shell to exit the VM
1344
-
```
1345
-
1346
-
This starts a knot on port 6444, a spindle on port 6555
1347
-
with `ssh` exposed on port 2222.
1348
-
1349
-
Once the services are running, head to
1350
-
http://localhost:3000/settings/knots and hit verify. It should
1351
-
verify the ownership of the services instantly if everything
1352
-
went smoothly.
1353
-
1354
-
You can push repositories to this VM with this ssh config
1355
-
block on your main machine:
1356
-
1357
-
```bash
1358
-
Host nixos-shell
1359
-
Hostname localhost
1360
-
Port 2222
1361
-
User git
1362
-
IdentityFile ~/.ssh/my_tangled_key
1363
-
```
1364
-
1365
-
Set up a remote called `local-dev` on a git repo:
1366
-
1367
-
```bash
1368
-
git remote add local-dev git@nixos-shell:user/repo
1369
-
git push local-dev main
1370
-
```
1371
-
1372
-
The above VM should already be running a spindle on
1373
-
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
1374
-
hit verify. You can then configure each repository to use
1375
-
this spindle and run CI jobs.
1376
-
1377
-
Of interest when debugging spindles:
1378
-
1379
-
```
1380
-
# service logs from journald:
1381
-
journalctl -xeu spindle
1382
-
1383
-
# CI job logs from disk:
1384
-
ls /var/log/spindle
1385
-
1386
-
# debugging spindle db:
1387
-
sqlite3 /var/lib/spindle/spindle.db
1388
-
1389
-
# litecli has a nicer REPL interface:
1390
-
litecli /var/lib/spindle/spindle.db
1391
-
```
1392
-
1393
-
If for any reason you wish to disable either one of the
1394
-
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
1395
-
`services.tangled.spindle.enable` (or
1396
-
`services.tangled.knot.enable`) to `false`.
1397
-
1398
-
# Contribution guide
1399
-
1400
-
## Commit guidelines
1401
-
1402
-
We follow a commit style similar to the Go project. Please keep commits:
1403
-
1404
-
* **atomic**: each commit should represent one logical change
1405
-
* **descriptive**: the commit message should clearly describe what the
1406
-
change does and why it's needed
1407
-
1408
-
### Message format
1409
-
1410
-
```
1411
-
<service/top-level directory>/<affected package/directory>: <short summary of change>
1412
-
1413
-
Optional longer description can go here, if necessary. Explain what the
1414
-
change does and why, especially if not obvious. Reference relevant
1415
-
issues or PRs when applicable. These can be links for now since we don't
1416
-
auto-link issues/PRs yet.
1417
-
```
1418
-
1419
-
Here are some examples:
1420
-
1421
-
```
1422
-
appview/state: fix token expiry check in middleware
1423
-
1424
-
The previous check did not account for clock drift, leading to premature
1425
-
token invalidation.
1426
-
```
1427
-
1428
-
```
1429
-
knotserver/git/service: improve error checking in upload-pack
1430
-
```
1431
-
1432
-
1433
-
### General notes
1434
-
1435
-
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
1436
-
using `git am`. At present, there is no squashing -- so please author
1437
-
your commits as they would appear on `master`, following the above
1438
-
guidelines.
1439
-
- If there is a lot of nesting, for example "appview:
1440
-
pages/templates/repo/fragments: ...", these can be truncated down to
1441
-
just "appview: repo/fragments: ...". If the change affects a lot of
1442
-
subdirectories, you may abbreviate to just the top-level names, e.g.
1443
-
"appview: ..." or "knotserver: ...".
1444
-
- Keep commits lowercased with no trailing period.
1445
-
- Use the imperative mood in the summary line (e.g., "fix bug" not
1446
-
"fixed bug" or "fixes bug").
1447
-
- Try to keep the summary line under 72 characters, but we aren't too
1448
-
fussed about this.
1449
-
- Follow the same formatting for PR titles if filled manually.
1450
-
- Don't include unrelated changes in the same commit.
1451
-
- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
1452
-
before submitting if necessary.
1453
-
1454
-
## Code formatting
1455
-
1456
-
We use a variety of tools to format our code, and multiplex them with
1457
-
[`treefmt`](https://treefmt.com): all you need to do to format your changes
1458
-
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
1459
-
1460
-
## Proposals for bigger changes
1461
-
1462
-
Small fixes like typos, minor bugs, or trivial refactors can be
1463
-
submitted directly as PRs.
1464
-
1465
-
For larger changesโespecially those introducing new features, significant
1466
-
refactoring, or altering system behaviorโplease open a proposal first. This
1467
-
helps us evaluate the scope, design, and potential impact before implementation.
1468
-
1469
-
Create a new issue titled:
1470
-
1471
-
```
1472
-
proposal: <affected scope>: <summary of change>
1473
-
```
1474
-
1475
-
In the description, explain:
1476
-
1477
-
- What the change is
1478
-
- Why it's needed
1479
-
- How you plan to implement it (roughly)
1480
-
- Any open questions or tradeoffs
1481
-
1482
-
We'll use the issue thread to discuss and refine the idea before moving
1483
-
forward.
1484
-
1485
-
## Developer certificate of origin (DCO)
1486
-
1487
-
We require all contributors to certify that they have the right to
1488
-
submit the code they're contributing. To do this, we follow the
1489
-
[Developer Certificate of Origin
1490
-
(DCO)](https://developercertificate.org/).
1491
-
1492
-
By signing your commits, you're stating that the contribution is your
1493
-
own work, or that you have the right to submit it under the project's
1494
-
license. This helps us keep things clean and legally sound.
1495
-
1496
-
To sign your commit, just add the `-s` flag when committing:
1497
-
1498
-
```sh
1499
-
git commit -s -m "your commit message"
1500
-
```
1501
-
1502
-
This appends a line like:
1503
-
1504
-
```
1505
-
Signed-off-by: Your Name <your.email@example.com>
1506
-
```
1507
-
1508
-
We won't merge commits if they aren't signed off. If you forget, you can
1509
-
amend the last commit like this:
1510
-
1511
-
```sh
1512
-
git commit --amend -s
1513
-
```
1514
-
1515
-
If you're submitting a PR with multiple commits, make sure each one is
1516
-
signed.
1517
-
1518
-
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
1519
-
to make it sign off commits in the tangled repo:
1520
-
1521
-
```shell
1522
-
# Safety check, should say "No matching config key..."
1523
-
jj config list templates.commit_trailers
1524
-
# The command below may need to be adjusted if the command above returned something.
1525
-
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
1526
-
```
1527
-
1528
-
Refer to the [jujutsu
1529
-
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
1530
-
for more information.
+136
docs/contributing.md
+136
docs/contributing.md
···
1
+
# tangled contributing guide
2
+
3
+
## commit guidelines
4
+
5
+
We follow a commit style similar to the Go project. Please keep commits:
6
+
7
+
* **atomic**: each commit should represent one logical change
8
+
* **descriptive**: the commit message should clearly describe what the
9
+
change does and why it's needed
10
+
11
+
### message format
12
+
13
+
```
14
+
<service/top-level directory>/<affected package/directory>: <short summary of change>
15
+
16
+
17
+
Optional longer description can go here, if necessary. Explain what the
18
+
change does and why, especially if not obvious. Reference relevant
19
+
issues or PRs when applicable. These can be links for now since we don't
20
+
auto-link issues/PRs yet.
21
+
```
22
+
23
+
Here are some examples:
24
+
25
+
```
26
+
appview/state: fix token expiry check in middleware
27
+
28
+
The previous check did not account for clock drift, leading to premature
29
+
token invalidation.
30
+
```
31
+
32
+
```
33
+
knotserver/git/service: improve error checking in upload-pack
34
+
```
35
+
36
+
37
+
### general notes
38
+
39
+
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
40
+
using `git am`. At present, there is no squashing -- so please author
41
+
your commits as they would appear on `master`, following the above
42
+
guidelines.
43
+
- If there is a lot of nesting, for example "appview:
44
+
pages/templates/repo/fragments: ...", these can be truncated down to
45
+
just "appview: repo/fragments: ...". If the change affects a lot of
46
+
subdirectories, you may abbreviate to just the top-level names, e.g.
47
+
"appview: ..." or "knotserver: ...".
48
+
- Keep commits lowercased with no trailing period.
49
+
- Use the imperative mood in the summary line (e.g., "fix bug" not
50
+
"fixed bug" or "fixes bug").
51
+
- Try to keep the summary line under 72 characters, but we aren't too
52
+
fussed about this.
53
+
- Follow the same formatting for PR titles if filled manually.
54
+
- Don't include unrelated changes in the same commit.
55
+
- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
56
+
before submitting if necessary.
57
+
58
+
## code formatting
59
+
60
+
We use a variety of tools to format our code, and multiplex them with
61
+
[`treefmt`](https://treefmt.com): all you need to do to format your changes
62
+
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
63
+
64
+
## proposals for bigger changes
65
+
66
+
Small fixes like typos, minor bugs, or trivial refactors can be
67
+
submitted directly as PRs.
68
+
69
+
For larger changesโespecially those introducing new features, significant
70
+
refactoring, or altering system behaviorโplease open a proposal first. This
71
+
helps us evaluate the scope, design, and potential impact before implementation.
72
+
73
+
### proposal format
74
+
75
+
Create a new issue titled:
76
+
77
+
```
78
+
proposal: <affected scope>: <summary of change>
79
+
```
80
+
81
+
In the description, explain:
82
+
83
+
- What the change is
84
+
- Why it's needed
85
+
- How you plan to implement it (roughly)
86
+
- Any open questions or tradeoffs
87
+
88
+
We'll use the issue thread to discuss and refine the idea before moving
89
+
forward.
90
+
91
+
## developer certificate of origin (DCO)
92
+
93
+
We require all contributors to certify that they have the right to
94
+
submit the code they're contributing. To do this, we follow the
95
+
[Developer Certificate of Origin
96
+
(DCO)](https://developercertificate.org/).
97
+
98
+
By signing your commits, you're stating that the contribution is your
99
+
own work, or that you have the right to submit it under the project's
100
+
license. This helps us keep things clean and legally sound.
101
+
102
+
To sign your commit, just add the `-s` flag when committing:
103
+
104
+
```sh
105
+
git commit -s -m "your commit message"
106
+
```
107
+
108
+
This appends a line like:
109
+
110
+
```
111
+
Signed-off-by: Your Name <your.email@example.com>
112
+
```
113
+
114
+
We won't merge commits if they aren't signed off. If you forget, you can
115
+
amend the last commit like this:
116
+
117
+
```sh
118
+
git commit --amend -s
119
+
```
120
+
121
+
If you're submitting a PR with multiple commits, make sure each one is
122
+
signed.
123
+
124
+
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125
+
to make it sign off commits in the tangled repo:
126
+
127
+
```shell
128
+
# Safety check, should say "No matching config key..."
129
+
jj config list templates.commit_trailers
130
+
# The command below may need to be adjusted if the command above returned something.
131
+
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
132
+
```
133
+
134
+
Refer to the [jj
135
+
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
136
+
for more information.
+172
docs/hacking.md
+172
docs/hacking.md
···
1
+
# hacking on tangled
2
+
3
+
We highly recommend [installing
4
+
nix](https://nixos.org/download/) (the package manager)
5
+
before working on the codebase. The nix flake provides a lot
6
+
of helpers to get started and most importantly, builds and
7
+
dev shells are entirely deterministic.
8
+
9
+
To set up your dev environment:
10
+
11
+
```bash
12
+
nix develop
13
+
```
14
+
15
+
Non-nix users can look at the `devShell` attribute in the
16
+
`flake.nix` file to determine necessary dependencies.
17
+
18
+
## running the appview
19
+
20
+
The nix flake also exposes a few `app` attributes (run `nix
21
+
flake show` to see a full list of what the flake provides),
22
+
one of the apps runs the appview with the `air`
23
+
live-reloader:
24
+
25
+
```bash
26
+
TANGLED_DEV=true nix run .#watch-appview
27
+
28
+
# TANGLED_DB_PATH might be of interest to point to
29
+
# different sqlite DBs
30
+
31
+
# in a separate shell, you can live-reload tailwind
32
+
nix run .#watch-tailwind
33
+
```
34
+
35
+
To authenticate with the appview, you will need redis and
36
+
OAUTH JWKs to be setup:
37
+
38
+
```
39
+
# oauth jwks should already be setup by the nix devshell:
40
+
echo $TANGLED_OAUTH_CLIENT_SECRET
41
+
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
42
+
43
+
echo $TANGLED_OAUTH_CLIENT_KID
44
+
1761667908
45
+
46
+
# if not, you can set it up yourself:
47
+
goat key generate -t P-256
48
+
Key Type: P-256 / secp256r1 / ES256 private key
49
+
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
50
+
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
51
+
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
52
+
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
53
+
54
+
# the secret key from above
55
+
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
56
+
57
+
# run redis in at a new shell to store oauth sessions
58
+
redis-server
59
+
```
60
+
61
+
## running knots and spindles
62
+
63
+
An end-to-end knot setup requires setting up a machine with
64
+
`sshd`, `AuthorizedKeysCommand`, and git user, which is
65
+
quite cumbersome. So the nix flake provides a
66
+
`nixosConfiguration` to do so.
67
+
68
+
<details>
69
+
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
70
+
71
+
In order to build Tangled's dev VM on macOS, you will
72
+
first need to set up a Linux Nix builder. The recommended
73
+
way to do so is to run a [`darwin.linux-builder`
74
+
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
75
+
and to register it in `nix.conf` as a builder for Linux
76
+
with the same architecture as your Mac (`linux-aarch64` if
77
+
you are using Apple Silicon).
78
+
79
+
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
80
+
> the tangled repo so that it doesn't conflict with the other VM. For example,
81
+
> you can do
82
+
>
83
+
> ```shell
84
+
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
85
+
> ```
86
+
>
87
+
> to store the builder VM in a temporary dir.
88
+
>
89
+
> You should read and follow [all the other intructions][darwin builder vm] to
90
+
> avoid subtle problems.
91
+
92
+
Alternatively, you can use any other method to set up a
93
+
Linux machine with `nix` installed that you can `sudo ssh`
94
+
into (in other words, root user on your Mac has to be able
95
+
to ssh into the Linux machine without entering a password)
96
+
and that has the same architecture as your Mac. See
97
+
[remote builder
98
+
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
99
+
for how to register such a builder in `nix.conf`.
100
+
101
+
> WARNING: If you'd like to use
102
+
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
103
+
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
104
+
> ssh` works can be tricky. It seems to be [possible with
105
+
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
106
+
107
+
</details>
108
+
109
+
To begin, grab your DID from http://localhost:3000/settings.
110
+
Then, set `TANGLED_VM_KNOT_OWNER` and
111
+
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
112
+
lightweight NixOS VM like so:
113
+
114
+
```bash
115
+
nix run --impure .#vm
116
+
117
+
# type `poweroff` at the shell to exit the VM
118
+
```
119
+
120
+
This starts a knot on port 6444, a spindle on port 6555
121
+
with `ssh` exposed on port 2222.
122
+
123
+
Once the services are running, head to
124
+
http://localhost:3000/settings/knots and hit verify. It should
125
+
verify the ownership of the services instantly if everything
126
+
went smoothly.
127
+
128
+
You can push repositories to this VM with this ssh config
129
+
block on your main machine:
130
+
131
+
```bash
132
+
Host nixos-shell
133
+
Hostname localhost
134
+
Port 2222
135
+
User git
136
+
IdentityFile ~/.ssh/my_tangled_key
137
+
```
138
+
139
+
Set up a remote called `local-dev` on a git repo:
140
+
141
+
```bash
142
+
git remote add local-dev git@nixos-shell:user/repo
143
+
git push local-dev main
144
+
```
145
+
146
+
### running a spindle
147
+
148
+
The above VM should already be running a spindle on
149
+
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
150
+
hit verify. You can then configure each repository to use
151
+
this spindle and run CI jobs.
152
+
153
+
Of interest when debugging spindles:
154
+
155
+
```
156
+
# service logs from journald:
157
+
journalctl -xeu spindle
158
+
159
+
# CI job logs from disk:
160
+
ls /var/log/spindle
161
+
162
+
# debugging spindle db:
163
+
sqlite3 /var/lib/spindle/spindle.db
164
+
165
+
# litecli has a nicer REPL interface:
166
+
litecli /var/lib/spindle/spindle.db
167
+
```
168
+
169
+
If for any reason you wish to disable either one of the
170
+
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171
+
`services.tangled.spindle.enable` (or
172
+
`services.tangled.knot.enable`) to `false`.
-93
docs/highlight.theme
-93
docs/highlight.theme
···
1
-
{
2
-
"text-color": null,
3
-
"background-color": null,
4
-
"line-number-color": null,
5
-
"line-number-background-color": null,
6
-
"text-styles": {
7
-
"Annotation": {
8
-
"text-color": null,
9
-
"background-color": null,
10
-
"bold": false,
11
-
"italic": true,
12
-
"underline": false
13
-
},
14
-
"ControlFlow": {
15
-
"text-color": null,
16
-
"background-color": null,
17
-
"bold": true,
18
-
"italic": false,
19
-
"underline": false
20
-
},
21
-
"Error": {
22
-
"text-color": null,
23
-
"background-color": null,
24
-
"bold": true,
25
-
"italic": false,
26
-
"underline": false
27
-
},
28
-
"Alert": {
29
-
"text-color": null,
30
-
"background-color": null,
31
-
"bold": true,
32
-
"italic": false,
33
-
"underline": false
34
-
},
35
-
"Preprocessor": {
36
-
"text-color": null,
37
-
"background-color": null,
38
-
"bold": true,
39
-
"italic": false,
40
-
"underline": false
41
-
},
42
-
"Information": {
43
-
"text-color": null,
44
-
"background-color": null,
45
-
"bold": false,
46
-
"italic": true,
47
-
"underline": false
48
-
},
49
-
"Warning": {
50
-
"text-color": null,
51
-
"background-color": null,
52
-
"bold": false,
53
-
"italic": true,
54
-
"underline": false
55
-
},
56
-
"Documentation": {
57
-
"text-color": null,
58
-
"background-color": null,
59
-
"bold": false,
60
-
"italic": true,
61
-
"underline": false
62
-
},
63
-
"DataType": {
64
-
"text-color": "#8f4e8b",
65
-
"background-color": null,
66
-
"bold": false,
67
-
"italic": false,
68
-
"underline": false
69
-
},
70
-
"Comment": {
71
-
"text-color": null,
72
-
"background-color": null,
73
-
"bold": false,
74
-
"italic": true,
75
-
"underline": false
76
-
},
77
-
"CommentVar": {
78
-
"text-color": null,
79
-
"background-color": null,
80
-
"bold": false,
81
-
"italic": true,
82
-
"underline": false
83
-
},
84
-
"Keyword": {
85
-
"text-color": null,
86
-
"background-color": null,
87
-
"bold": true,
88
-
"italic": false,
89
-
"underline": false
90
-
}
91
-
}
92
-
}
93
-
+214
docs/knot-hosting.md
+214
docs/knot-hosting.md
···
1
+
# knot self-hosting guide
2
+
3
+
So you want to run your own knot server? Great! Here are a few prerequisites:
4
+
5
+
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
6
+
2. A (sub)domain name. People generally use `knot.example.com`.
7
+
3. A valid SSL certificate for your domain.
8
+
9
+
There's a couple of ways to get started:
10
+
* NixOS: refer to
11
+
[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
12
+
* Docker: Documented at
13
+
[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
14
+
(community maintained: support is not guaranteed!)
15
+
* Manual: Documented below.
16
+
17
+
## manual setup
18
+
19
+
First, clone this repository:
20
+
21
+
```
22
+
git clone https://tangled.org/@tangled.org/core
23
+
```
24
+
25
+
Then, build the `knot` CLI. This is the knot administration and operation tool.
26
+
For the purpose of this guide, we're only concerned with these subcommands:
27
+
28
+
* `knot server`: the main knot server process, typically run as a
29
+
supervised service
30
+
* `knot guard`: handles role-based access control for git over SSH
31
+
(you'll never have to run this yourself)
32
+
* `knot keys`: fetches SSH keys associated with your knot; we'll use
33
+
this to generate the SSH `AuthorizedKeysCommand`
34
+
35
+
```
36
+
cd core
37
+
export CGO_ENABLED=1
38
+
go build -o knot ./cmd/knot
39
+
```
40
+
41
+
Next, move the `knot` binary to a location owned by `root` --
42
+
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
43
+
44
+
```
45
+
sudo mv knot /usr/local/bin/knot
46
+
sudo chown root:root /usr/local/bin/knot
47
+
```
48
+
49
+
This is necessary because SSH `AuthorizedKeysCommand` requires [really
50
+
specific permissions](https://stackoverflow.com/a/27638306). The
51
+
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
52
+
retrieve a user's public SSH keys dynamically for authentication. Let's
53
+
set that up.
54
+
55
+
```
56
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
57
+
Match User git
58
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
59
+
AuthorizedKeysCommandUser nobody
60
+
EOF
61
+
```
62
+
63
+
Then, reload `sshd`:
64
+
65
+
```
66
+
sudo systemctl reload ssh
67
+
```
68
+
69
+
Next, create the `git` user. We'll use the `git` user's home directory
70
+
to store repositories:
71
+
72
+
```
73
+
sudo adduser git
74
+
```
75
+
76
+
Create `/home/git/.knot.env` with the following, updating the values as
77
+
necessary. The `KNOT_SERVER_OWNER` should be set to your
78
+
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
79
+
80
+
```
81
+
KNOT_REPO_SCAN_PATH=/home/git
82
+
KNOT_SERVER_HOSTNAME=knot.example.com
83
+
APPVIEW_ENDPOINT=https://tangled.sh
84
+
KNOT_SERVER_OWNER=did:plc:foobar
85
+
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
86
+
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
87
+
```
88
+
89
+
If you run a Linux distribution that uses systemd, you can use the provided
90
+
service file to run the server. Copy
91
+
[`knotserver.service`](/systemd/knotserver.service)
92
+
to `/etc/systemd/system/`. Then, run:
93
+
94
+
```
95
+
systemctl enable knotserver
96
+
systemctl start knotserver
97
+
```
98
+
99
+
The last step is to configure a reverse proxy like Nginx or Caddy to front your
100
+
knot. Here's an example configuration for Nginx:
101
+
102
+
```
103
+
server {
104
+
listen 80;
105
+
listen [::]:80;
106
+
server_name knot.example.com;
107
+
108
+
location / {
109
+
proxy_pass http://localhost:5555;
110
+
proxy_set_header Host $host;
111
+
proxy_set_header X-Real-IP $remote_addr;
112
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
113
+
proxy_set_header X-Forwarded-Proto $scheme;
114
+
}
115
+
116
+
# wss endpoint for git events
117
+
location /events {
118
+
proxy_set_header X-Forwarded-For $remote_addr;
119
+
proxy_set_header Host $http_host;
120
+
proxy_set_header Upgrade websocket;
121
+
proxy_set_header Connection Upgrade;
122
+
proxy_pass http://localhost:5555;
123
+
}
124
+
# additional config for SSL/TLS go here.
125
+
}
126
+
127
+
```
128
+
129
+
Remember to use Let's Encrypt or similar to procure a certificate for your
130
+
knot domain.
131
+
132
+
You should now have a running knot server! You can finalize
133
+
your registration by hitting the `verify` button on the
134
+
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
135
+
a record on your PDS to announce the existence of the knot.
136
+
137
+
### custom paths
138
+
139
+
(This section applies to manual setup only. Docker users should edit the mounts
140
+
in `docker-compose.yml` instead.)
141
+
142
+
Right now, the database and repositories of your knot lives in `/home/git`. You
143
+
can move these paths if you'd like to store them in another folder. Be careful
144
+
when adjusting these paths:
145
+
146
+
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
147
+
any possible side effects. Remember to restart it once you're done.
148
+
* Make backups before moving in case something goes wrong.
149
+
* Make sure the `git` user can read and write from the new paths.
150
+
151
+
#### database
152
+
153
+
As an example, let's say the current database is at `/home/git/knotserver.db`,
154
+
and we want to move it to `/home/git/database/knotserver.db`.
155
+
156
+
Copy the current database to the new location. Make sure to copy the `.db-shm`
157
+
and `.db-wal` files if they exist.
158
+
159
+
```
160
+
mkdir /home/git/database
161
+
cp /home/git/knotserver.db* /home/git/database
162
+
```
163
+
164
+
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
165
+
the new file path (_not_ the directory):
166
+
167
+
```
168
+
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
169
+
```
170
+
171
+
#### repositories
172
+
173
+
As an example, let's say the repositories are currently in `/home/git`, and we
174
+
want to move them into `/home/git/repositories`.
175
+
176
+
Create the new folder, then move the existing repositories (if there are any):
177
+
178
+
```
179
+
mkdir /home/git/repositories
180
+
# move all DIDs into the new folder; these will vary for you!
181
+
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
182
+
```
183
+
184
+
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
185
+
to the new directory:
186
+
187
+
```
188
+
KNOT_REPO_SCAN_PATH=/home/git/repositories
189
+
```
190
+
191
+
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
192
+
repository path:
193
+
194
+
```
195
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
196
+
Match User git
197
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
198
+
AuthorizedKeysCommandUser nobody
199
+
EOF
200
+
```
201
+
202
+
Make sure to restart your SSH server!
203
+
204
+
#### MOTD (message of the day)
205
+
206
+
To configure the MOTD used ("Welcome to this knot!" by default), edit the
207
+
`/home/git/motd` file:
208
+
209
+
```
210
+
printf "Hi from this knot!\n" > /home/git/motd
211
+
```
212
+
213
+
Note that you should add a newline at the end if setting a non-empty message
214
+
since the knot won't do this for you.
+59
docs/migrations.md
+59
docs/migrations.md
···
1
+
# Migrations
2
+
3
+
This document is laid out in reverse-chronological order.
4
+
Newer migration guides are listed first, and older guides
5
+
are further down the page.
6
+
7
+
## Upgrading from v1.8.x
8
+
9
+
After v1.8.2, the HTTP API for knot and spindles have been
10
+
deprecated and replaced with XRPC. Repositories on outdated
11
+
knots will not be viewable from the appview. Upgrading is
12
+
straightforward however.
13
+
14
+
For knots:
15
+
16
+
- Upgrade to latest tag (v1.9.0 or above)
17
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
18
+
hit the "retry" button to verify your knot
19
+
20
+
For spindles:
21
+
22
+
- Upgrade to latest tag (v1.9.0 or above)
23
+
- Head to the [spindle
24
+
dashboard](https://tangled.org/settings/spindles) and hit the
25
+
"retry" button to verify your spindle
26
+
27
+
## Upgrading from v1.7.x
28
+
29
+
After v1.7.0, knot secrets have been deprecated. You no
30
+
longer need a secret from the appview to run a knot. All
31
+
authorized commands to knots are managed via [Inter-Service
32
+
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
33
+
Knots will be read-only until upgraded.
34
+
35
+
Upgrading is quite easy, in essence:
36
+
37
+
- `KNOT_SERVER_SECRET` is no more, you can remove this
38
+
environment variable entirely
39
+
- `KNOT_SERVER_OWNER` is now required on boot, set this to
40
+
your DID. You can find your DID in the
41
+
[settings](https://tangled.org/settings) page.
42
+
- Restart your knot once you have replaced the environment
43
+
variable
44
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
45
+
hit the "retry" button to verify your knot. This simply
46
+
writes a `sh.tangled.knot` record to your PDS.
47
+
48
+
If you use the nix module, simply bump the flake to the
49
+
latest revision, and change your config block like so:
50
+
51
+
```diff
52
+
services.tangled.knot = {
53
+
enable = true;
54
+
server = {
55
+
- secretFile = /path/to/secret;
56
+
+ owner = "did:plc:foo";
57
+
};
58
+
};
59
+
```
+25
docs/spindle/architecture.md
+25
docs/spindle/architecture.md
···
1
+
# spindle architecture
2
+
3
+
Spindle is a small CI runner service. Here's a high level overview of how it operates:
4
+
5
+
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
6
+
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
7
+
* when a new repo record comes through (typically when you add a spindle to a
8
+
repo from the settings), spindle then resolves the underlying knot and
9
+
subscribes to repo events (see:
10
+
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
11
+
* the spindle engine then handles execution of the pipeline, with results and
12
+
logs beamed on the spindle event stream over wss
13
+
14
+
### the engine
15
+
16
+
At present, the only supported backend is Docker (and Podman, if Docker
17
+
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
18
+
executes each step in the pipeline in a fresh container, with state persisted
19
+
across steps within the `/tangled/workspace` directory.
20
+
21
+
The base image for the container is constructed on the fly using
22
+
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
23
+
used packages.
24
+
25
+
The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
+52
docs/spindle/hosting.md
+52
docs/spindle/hosting.md
···
1
+
# spindle self-hosting guide
2
+
3
+
## prerequisites
4
+
5
+
* Go
6
+
* Docker (the only supported backend currently)
7
+
8
+
## configuration
9
+
10
+
Spindle is configured using environment variables. The following environment variables are available:
11
+
12
+
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
13
+
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
14
+
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
15
+
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
16
+
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
17
+
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
18
+
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
19
+
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
20
+
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
21
+
22
+
## running spindle
23
+
24
+
1. **Set the environment variables.** For example:
25
+
26
+
```shell
27
+
export SPINDLE_SERVER_HOSTNAME="your-hostname"
28
+
export SPINDLE_SERVER_OWNER="your-did"
29
+
```
30
+
31
+
2. **Build the Spindle binary.**
32
+
33
+
```shell
34
+
cd core
35
+
go mod download
36
+
go build -o cmd/spindle/spindle cmd/spindle/main.go
37
+
```
38
+
39
+
3. **Create the log directory.**
40
+
41
+
```shell
42
+
sudo mkdir -p /var/log/spindle
43
+
sudo chown $USER:$USER -R /var/log/spindle
44
+
```
45
+
46
+
4. **Run the Spindle binary.**
47
+
48
+
```shell
49
+
./cmd/spindle/spindle
50
+
```
51
+
52
+
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
+285
docs/spindle/openbao.md
+285
docs/spindle/openbao.md
···
1
+
# spindle secrets with openbao
2
+
3
+
This document covers setting up Spindle to use OpenBao for secrets
4
+
management via OpenBao Proxy instead of the default SQLite backend.
5
+
6
+
## overview
7
+
8
+
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
9
+
authentication automatically using AppRole credentials, while Spindle
10
+
connects to the local proxy instead of directly to the OpenBao server.
11
+
12
+
This approach provides better security, automatic token renewal, and
13
+
simplified application code.
14
+
15
+
## installation
16
+
17
+
Install OpenBao from nixpkgs:
18
+
19
+
```bash
20
+
nix shell nixpkgs#openbao # for a local server
21
+
```
22
+
23
+
## setup
24
+
25
+
The setup process can is documented for both local development and production.
26
+
27
+
### local development
28
+
29
+
Start OpenBao in dev mode:
30
+
31
+
```bash
32
+
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
33
+
```
34
+
35
+
This starts OpenBao on `http://localhost:8201` with a root token.
36
+
37
+
Set up environment for bao CLI:
38
+
39
+
```bash
40
+
export BAO_ADDR=http://localhost:8200
41
+
export BAO_TOKEN=root
42
+
```
43
+
44
+
### production
45
+
46
+
You would typically use a systemd service with a configuration file. Refer to
47
+
[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
48
+
achieved using Nix.
49
+
50
+
Then, initialize the bao server:
51
+
```bash
52
+
bao operator init -key-shares=1 -key-threshold=1
53
+
```
54
+
55
+
This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
56
+
```bash
57
+
bao operator unseal <unseal_key>
58
+
```
59
+
60
+
All steps below remain the same across both dev and production setups.
61
+
62
+
### configure openbao server
63
+
64
+
Create the spindle KV mount:
65
+
66
+
```bash
67
+
bao secrets enable -path=spindle -version=2 kv
68
+
```
69
+
70
+
Set up AppRole authentication and policy:
71
+
72
+
Create a policy file `spindle-policy.hcl`:
73
+
74
+
```hcl
75
+
# Full access to spindle KV v2 data
76
+
path "spindle/data/*" {
77
+
capabilities = ["create", "read", "update", "delete"]
78
+
}
79
+
80
+
# Access to metadata for listing and management
81
+
path "spindle/metadata/*" {
82
+
capabilities = ["list", "read", "delete", "update"]
83
+
}
84
+
85
+
# Allow listing at root level
86
+
path "spindle/" {
87
+
capabilities = ["list"]
88
+
}
89
+
90
+
# Required for connection testing and health checks
91
+
path "auth/token/lookup-self" {
92
+
capabilities = ["read"]
93
+
}
94
+
```
95
+
96
+
Apply the policy and create an AppRole:
97
+
98
+
```bash
99
+
bao policy write spindle-policy spindle-policy.hcl
100
+
bao auth enable approle
101
+
bao write auth/approle/role/spindle \
102
+
token_policies="spindle-policy" \
103
+
token_ttl=1h \
104
+
token_max_ttl=4h \
105
+
bind_secret_id=true \
106
+
secret_id_ttl=0 \
107
+
secret_id_num_uses=0
108
+
```
109
+
110
+
Get the credentials:
111
+
112
+
```bash
113
+
# Get role ID (static)
114
+
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115
+
116
+
# Generate secret ID
117
+
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118
+
119
+
echo "Role ID: $ROLE_ID"
120
+
echo "Secret ID: $SECRET_ID"
121
+
```
122
+
123
+
### create proxy configuration
124
+
125
+
Create the credential files:
126
+
127
+
```bash
128
+
# Create directory for OpenBao files
129
+
mkdir -p /tmp/openbao
130
+
131
+
# Save credentials
132
+
echo "$ROLE_ID" > /tmp/openbao/role-id
133
+
echo "$SECRET_ID" > /tmp/openbao/secret-id
134
+
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135
+
```
136
+
137
+
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138
+
139
+
```hcl
140
+
# OpenBao server connection
141
+
vault {
142
+
address = "http://localhost:8200"
143
+
}
144
+
145
+
# Auto-Auth using AppRole
146
+
auto_auth {
147
+
method "approle" {
148
+
mount_path = "auth/approle"
149
+
config = {
150
+
role_id_file_path = "/tmp/openbao/role-id"
151
+
secret_id_file_path = "/tmp/openbao/secret-id"
152
+
}
153
+
}
154
+
155
+
# Optional: write token to file for debugging
156
+
sink "file" {
157
+
config = {
158
+
path = "/tmp/openbao/token"
159
+
mode = 0640
160
+
}
161
+
}
162
+
}
163
+
164
+
# Proxy listener for Spindle
165
+
listener "tcp" {
166
+
address = "127.0.0.1:8201"
167
+
tls_disable = true
168
+
}
169
+
170
+
# Enable API proxy with auto-auth token
171
+
api_proxy {
172
+
use_auto_auth_token = true
173
+
}
174
+
175
+
# Enable response caching
176
+
cache {
177
+
use_auto_auth_token = true
178
+
}
179
+
180
+
# Logging
181
+
log_level = "info"
182
+
```
183
+
184
+
### start the proxy
185
+
186
+
Start OpenBao Proxy:
187
+
188
+
```bash
189
+
bao proxy -config=/tmp/openbao/proxy.hcl
190
+
```
191
+
192
+
The proxy will authenticate with OpenBao and start listening on
193
+
`127.0.0.1:8201`.
194
+
195
+
### configure spindle
196
+
197
+
Set these environment variables for Spindle:
198
+
199
+
```bash
200
+
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201
+
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
202
+
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203
+
```
204
+
205
+
Start Spindle:
206
+
207
+
Spindle will now connect to the local proxy, which handles all
208
+
authentication automatically.
209
+
210
+
## production setup for proxy
211
+
212
+
For production, you'll want to run the proxy as a service:
213
+
214
+
Place your production configuration in `/etc/openbao/proxy.hcl` with
215
+
proper TLS settings for the vault connection.
216
+
217
+
## verifying setup
218
+
219
+
Test the proxy directly:
220
+
221
+
```bash
222
+
# Check proxy health
223
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224
+
225
+
# Test token lookup through proxy
226
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227
+
```
228
+
229
+
Test OpenBao operations through the server:
230
+
231
+
```bash
232
+
# List all secrets
233
+
bao kv list spindle/
234
+
235
+
# Add a test secret via Spindle API, then check it exists
236
+
bao kv list spindle/repos/
237
+
238
+
# Get a specific secret
239
+
bao kv get spindle/repos/your_repo_path/SECRET_NAME
240
+
```
241
+
242
+
## how it works
243
+
244
+
- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245
+
- The proxy authenticates with OpenBao using AppRole credentials
246
+
- All Spindle requests go through the proxy, which injects authentication tokens
247
+
- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248
+
- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249
+
- The proxy handles all token renewal automatically
250
+
- Spindle no longer manages tokens or authentication directly
251
+
252
+
## troubleshooting
253
+
254
+
**Connection refused**: Check that the OpenBao Proxy is running and
255
+
listening on the configured address.
256
+
257
+
**403 errors**: Verify the AppRole credentials are correct and the policy
258
+
has the necessary permissions.
259
+
260
+
**404 route errors**: The spindle KV mount probably doesn't exist - run
261
+
the mount creation step again.
262
+
263
+
**Proxy authentication failures**: Check the proxy logs and verify the
264
+
role-id and secret-id files are readable and contain valid credentials.
265
+
266
+
**Secret not found after writing**: This can indicate policy permission
267
+
issues. Verify the policy includes both `spindle/data/*` and
268
+
`spindle/metadata/*` paths with appropriate capabilities.
269
+
270
+
Check proxy logs:
271
+
272
+
```bash
273
+
# If running as systemd service
274
+
journalctl -u openbao-proxy -f
275
+
276
+
# If running directly, check the console output
277
+
```
278
+
279
+
Test AppRole authentication manually:
280
+
281
+
```bash
282
+
bao write auth/approle/login \
283
+
role_id="$(cat /tmp/openbao/role-id)" \
284
+
secret_id="$(cat /tmp/openbao/secret-id)"
285
+
```
+183
docs/spindle/pipeline.md
+183
docs/spindle/pipeline.md
···
1
+
# spindle pipelines
2
+
3
+
Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
4
+
5
+
The fields are:
6
+
7
+
- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
8
+
- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
9
+
- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
10
+
- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
11
+
- [Environment](#environment): An **optional** field that allows you to define environment variables.
12
+
- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
13
+
14
+
## Trigger
15
+
16
+
The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
17
+
18
+
- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
19
+
- `push`: The workflow should run every time a commit is pushed to the repository.
20
+
- `pull_request`: The workflow should run every time a pull request is made or updated.
21
+
- `manual`: The workflow can be triggered manually.
22
+
- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
23
+
- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
24
+
25
+
For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
26
+
27
+
```yaml
28
+
when:
29
+
- event: ["push", "manual"]
30
+
branch: ["main", "develop"]
31
+
- event: ["pull_request"]
32
+
branch: ["main"]
33
+
```
34
+
35
+
You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
36
+
37
+
```yaml
38
+
when:
39
+
- event: ["push"]
40
+
tag: ["v*"]
41
+
```
42
+
43
+
You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
44
+
45
+
```yaml
46
+
when:
47
+
- event: ["push"]
48
+
branch: ["main", "release-*"]
49
+
tag: ["v*", "stable"]
50
+
```
51
+
52
+
## Engine
53
+
54
+
Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
55
+
56
+
- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
57
+
58
+
Example:
59
+
60
+
```yaml
61
+
engine: "nixery"
62
+
```
63
+
64
+
## Clone options
65
+
66
+
When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
67
+
68
+
- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
69
+
- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
70
+
- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
71
+
72
+
The default settings are:
73
+
74
+
```yaml
75
+
clone:
76
+
skip: false
77
+
depth: 1
78
+
submodules: false
79
+
```
80
+
81
+
## Dependencies
82
+
83
+
Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
84
+
85
+
Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
86
+
87
+
```yaml
88
+
dependencies:
89
+
# nixpkgs
90
+
nixpkgs:
91
+
- nodejs
92
+
- go
93
+
# custom registry
94
+
git+https://tangled.org/@example.com/my_pkg:
95
+
- my_pkg
96
+
```
97
+
98
+
Now these dependencies are available to use in your workflow!
99
+
100
+
## Environment
101
+
102
+
The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103
+
104
+
Example:
105
+
106
+
```yaml
107
+
environment:
108
+
GOOS: "linux"
109
+
GOARCH: "arm64"
110
+
NODE_ENV: "production"
111
+
MY_ENV_VAR: "MY_ENV_VALUE"
112
+
```
113
+
114
+
## Steps
115
+
116
+
The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
117
+
118
+
- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
119
+
- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
120
+
- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
121
+
122
+
Example:
123
+
124
+
```yaml
125
+
steps:
126
+
- name: "Build backend"
127
+
command: "go build"
128
+
environment:
129
+
GOOS: "darwin"
130
+
GOARCH: "arm64"
131
+
- name: "Build frontend"
132
+
command: "npm run build"
133
+
environment:
134
+
NODE_ENV: "production"
135
+
```
136
+
137
+
## Complete workflow
138
+
139
+
```yaml
140
+
# .tangled/workflows/build.yml
141
+
142
+
when:
143
+
- event: ["push", "manual"]
144
+
branch: ["main", "develop"]
145
+
- event: ["pull_request"]
146
+
branch: ["main"]
147
+
148
+
engine: "nixery"
149
+
150
+
# using the default values
151
+
clone:
152
+
skip: false
153
+
depth: 1
154
+
submodules: false
155
+
156
+
dependencies:
157
+
# nixpkgs
158
+
nixpkgs:
159
+
- nodejs
160
+
- go
161
+
# custom registry
162
+
git+https://tangled.org/@example.com/my_pkg:
163
+
- my_pkg
164
+
165
+
environment:
166
+
GOOS: "linux"
167
+
GOARCH: "arm64"
168
+
NODE_ENV: "production"
169
+
MY_ENV_VAR: "MY_ENV_VALUE"
170
+
171
+
steps:
172
+
- name: "Build backend"
173
+
command: "go build"
174
+
environment:
175
+
GOOS: "darwin"
176
+
GOARCH: "arm64"
177
+
- name: "Build frontend"
178
+
command: "npm run build"
179
+
environment:
180
+
NODE_ENV: "production"
181
+
```
182
+
183
+
If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
-101
docs/styles.css
-101
docs/styles.css
···
1
-
svg {
2
-
width: 16px;
3
-
height: 16px;
4
-
}
5
-
6
-
:root {
7
-
--syntax-alert: #d20f39;
8
-
--syntax-annotation: #fe640b;
9
-
--syntax-attribute: #df8e1d;
10
-
--syntax-basen: #40a02b;
11
-
--syntax-builtin: #1e66f5;
12
-
--syntax-controlflow: #8839ef;
13
-
--syntax-char: #04a5e5;
14
-
--syntax-constant: #fe640b;
15
-
--syntax-comment: #9ca0b0;
16
-
--syntax-commentvar: #7c7f93;
17
-
--syntax-documentation: #9ca0b0;
18
-
--syntax-datatype: #df8e1d;
19
-
--syntax-decval: #40a02b;
20
-
--syntax-error: #d20f39;
21
-
--syntax-extension: #4c4f69;
22
-
--syntax-float: #40a02b;
23
-
--syntax-function: #1e66f5;
24
-
--syntax-import: #40a02b;
25
-
--syntax-information: #04a5e5;
26
-
--syntax-keyword: #8839ef;
27
-
--syntax-operator: #179299;
28
-
--syntax-other: #8839ef;
29
-
--syntax-preprocessor: #ea76cb;
30
-
--syntax-specialchar: #04a5e5;
31
-
--syntax-specialstring: #ea76cb;
32
-
--syntax-string: #40a02b;
33
-
--syntax-variable: #8839ef;
34
-
--syntax-verbatimstring: #40a02b;
35
-
--syntax-warning: #df8e1d;
36
-
}
37
-
38
-
@media (prefers-color-scheme: dark) {
39
-
:root {
40
-
--syntax-alert: #f38ba8;
41
-
--syntax-annotation: #fab387;
42
-
--syntax-attribute: #f9e2af;
43
-
--syntax-basen: #a6e3a1;
44
-
--syntax-builtin: #89b4fa;
45
-
--syntax-controlflow: #cba6f7;
46
-
--syntax-char: #89dceb;
47
-
--syntax-constant: #fab387;
48
-
--syntax-comment: #6c7086;
49
-
--syntax-commentvar: #585b70;
50
-
--syntax-documentation: #6c7086;
51
-
--syntax-datatype: #f9e2af;
52
-
--syntax-decval: #a6e3a1;
53
-
--syntax-error: #f38ba8;
54
-
--syntax-extension: #cdd6f4;
55
-
--syntax-float: #a6e3a1;
56
-
--syntax-function: #89b4fa;
57
-
--syntax-import: #a6e3a1;
58
-
--syntax-information: #89dceb;
59
-
--syntax-keyword: #cba6f7;
60
-
--syntax-operator: #94e2d5;
61
-
--syntax-other: #cba6f7;
62
-
--syntax-preprocessor: #f5c2e7;
63
-
--syntax-specialchar: #89dceb;
64
-
--syntax-specialstring: #f5c2e7;
65
-
--syntax-string: #a6e3a1;
66
-
--syntax-variable: #cba6f7;
67
-
--syntax-verbatimstring: #a6e3a1;
68
-
--syntax-warning: #f9e2af;
69
-
}
70
-
}
71
-
72
-
/* pandoc syntax highlighting classes */
73
-
code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */
74
-
code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */
75
-
code span.at { color: var(--syntax-attribute); } /* attribute */
76
-
code span.bn { color: var(--syntax-basen); } /* basen */
77
-
code span.bu { color: var(--syntax-builtin); } /* builtin */
78
-
code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */
79
-
code span.ch { color: var(--syntax-char); } /* char */
80
-
code span.cn { color: var(--syntax-constant); } /* constant */
81
-
code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */
82
-
code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */
83
-
code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */
84
-
code span.dt { color: var(--syntax-datatype); } /* datatype */
85
-
code span.dv { color: var(--syntax-decval); } /* decval */
86
-
code span.er { color: var(--syntax-error); font-weight: bold; } /* error */
87
-
code span.ex { color: var(--syntax-extension); } /* extension */
88
-
code span.fl { color: var(--syntax-float); } /* float */
89
-
code span.fu { color: var(--syntax-function); } /* function */
90
-
code span.im { color: var(--syntax-import); font-weight: bold; } /* import */
91
-
code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */
92
-
code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */
93
-
code span.op { color: var(--syntax-operator); } /* operator */
94
-
code span.ot { color: var(--syntax-other); } /* other */
95
-
code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */
96
-
code span.sc { color: var(--syntax-specialchar); } /* specialchar */
97
-
code span.ss { color: var(--syntax-specialstring); } /* specialstring */
98
-
code span.st { color: var(--syntax-string); } /* string */
99
-
code span.va { color: var(--syntax-variable); } /* variable */
100
-
code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */
101
-
code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
-117
docs/template.html
-117
docs/template.html
···
1
-
<!DOCTYPE html>
2
-
<html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$>
3
-
<head>
4
-
<meta charset="utf-8" />
5
-
<meta name="generator" content="pandoc" />
6
-
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
7
-
$for(author-meta)$
8
-
<meta name="author" content="$author-meta$" />
9
-
$endfor$
10
-
11
-
$if(date-meta)$
12
-
<meta name="dcterms.date" content="$date-meta$" />
13
-
$endif$
14
-
15
-
$if(keywords)$
16
-
<meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" />
17
-
$endif$
18
-
19
-
$if(description-meta)$
20
-
<meta name="description" content="$description-meta$" />
21
-
$endif$
22
-
23
-
<title>$pagetitle$ - Tangled docs</title>
24
-
25
-
<style>
26
-
$styles.css()$
27
-
</style>
28
-
29
-
$for(css)$
30
-
<link rel="stylesheet" href="$css$" />
31
-
$endfor$
32
-
33
-
$for(header-includes)$
34
-
$header-includes$
35
-
$endfor$
36
-
37
-
<link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin />
38
-
39
-
</head>
40
-
<body class="bg-white dark:bg-gray-900 min-h-screen flex flex-col min-h-screen">
41
-
$for(include-before)$
42
-
$include-before$
43
-
$endfor$
44
-
45
-
$if(toc)$
46
-
<!-- mobile topbar toc -->
47
-
<details id="mobile-$idprefix$TOC" role="doc-toc" class="md:hidden bg-gray-50 dark:bg-gray-800 border-b border-gray-200 dark:border-gray-700 z-50 space-y-4 group px-6 py-4">
48
-
<summary class="cursor-pointer list-none text-sm font-semibold select-none flex gap-2 justify-between items-center dark:text-white">
49
-
$if(toc-title)$$toc-title$$else$Table of Contents$endif$
50
-
<span class="group-open:hidden inline">${ menu.svg() }</span>
51
-
<span class="hidden group-open:inline">${ x.svg() }</span>
52
-
</summary>
53
-
${ table-of-contents:toc.html() }
54
-
</details>
55
-
<!-- desktop sidebar toc -->
56
-
<nav id="$idprefix$TOC" role="doc-toc" class="hidden md:block fixed left-0 top-0 w-80 h-screen bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 overflow-y-auto p-4 z-50">
57
-
$if(toc-title)$
58
-
<h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2>
59
-
$endif$
60
-
${ table-of-contents:toc.html() }
61
-
</nav>
62
-
$endif$
63
-
64
-
<div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col">
65
-
<main class="max-w-4xl w-full mx-auto p-6 flex-1">
66
-
$if(top)$
67
-
$-- only print title block if this is NOT the top page
68
-
$else$
69
-
$if(title)$
70
-
<header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700">
71
-
<h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1>
72
-
$if(subtitle)$
73
-
<p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p>
74
-
$endif$
75
-
$for(author)$
76
-
<p class="text-sm text-gray-500 dark:text-gray-400">$author$</p>
77
-
$endfor$
78
-
$if(date)$
79
-
<p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p>
80
-
$endif$
81
-
$if(abstract)$
82
-
<div class="mt-6 p-4 bg-gray-50 rounded-lg">
83
-
<div class="text-sm font-semibold text-gray-700 uppercase mb-2">$abstract-title$</div>
84
-
<div class="text-gray-700">$abstract$</div>
85
-
</div>
86
-
$endif$
87
-
$endif$
88
-
</header>
89
-
$endif$
90
-
<article class="prose dark:prose-invert max-w-none">
91
-
$body$
92
-
</article>
93
-
</main>
94
-
<nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800 ">
95
-
<div class="max-w-4xl mx-auto px-8 py-4">
96
-
<div class="flex justify-between gap-4">
97
-
<span class="flex-1">
98
-
$if(previous.url)$
99
-
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span>
100
-
<a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a>
101
-
$endif$
102
-
</span>
103
-
<span class="flex-1 text-right">
104
-
$if(next.url)$
105
-
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span>
106
-
<a href="$next.url$" accesskey="n" rel="next">$next.title$</a>
107
-
$endif$
108
-
</span>
109
-
</div>
110
-
</div>
111
-
</nav>
112
-
</div>
113
-
$for(include-after)$
114
-
$include-after$
115
-
$endfor$
116
-
</body>
117
-
</html>
-4
docs/toc.html
-4
docs/toc.html
+2
-5
flake.nix
+2
-5
flake.nix
···
88
88
inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src;
89
89
};
90
90
appview = self.callPackage ./nix/pkgs/appview.nix {};
91
-
docs = self.callPackage ./nix/pkgs/docs.nix {
92
-
inherit inter-fonts-src ibm-plex-mono-src lucide-src;
93
-
};
94
91
spindle = self.callPackage ./nix/pkgs/spindle.nix {};
95
92
knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {};
96
93
knot = self.callPackage ./nix/pkgs/knot.nix {};
97
94
});
98
95
in {
99
96
overlays.default = final: prev: {
100
-
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs;
97
+
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview;
101
98
};
102
99
103
100
packages = forAllSystems (system: let
···
106
103
staticPackages = mkPackageSet pkgs.pkgsStatic;
107
104
crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic;
108
105
in {
109
-
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs;
106
+
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib;
110
107
111
108
pkgsStatic-appview = staticPackages.appview;
112
109
pkgsStatic-knot = staticPackages.knot;
+1
go.mod
+1
go.mod
···
131
131
github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 // indirect
132
132
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
133
133
github.com/hashicorp/go-sockaddr v1.0.7 // indirect
134
+
github.com/hashicorp/go-version v1.8.0 // indirect
134
135
github.com/hashicorp/golang-lru v1.0.2 // indirect
135
136
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
136
137
github.com/hashicorp/hcl v1.0.1-vault-7 // indirect
+2
go.sum
+2
go.sum
···
264
264
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
265
265
github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=
266
266
github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=
267
+
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
268
+
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
267
269
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
268
270
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
269
271
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+1
-1
input.css
+1
-1
input.css
+69
knotserver/archive.go
+69
knotserver/archive.go
···
1
+
package knotserver
2
+
3
+
import (
4
+
"compress/gzip"
5
+
"fmt"
6
+
"net/http"
7
+
"strings"
8
+
9
+
securejoin "github.com/cyphar/filepath-securejoin"
10
+
"github.com/go-chi/chi/v5"
11
+
"github.com/go-git/go-git/v5/plumbing"
12
+
"tangled.org/core/knotserver/git"
13
+
)
14
+
15
+
func (h *Knot) Archive(w http.ResponseWriter, r *http.Request) {
16
+
var (
17
+
did = chi.URLParam(r, "did")
18
+
name = chi.URLParam(r, "name")
19
+
ref = chi.URLParam(r, "ref")
20
+
)
21
+
repo, err := securejoin.SecureJoin(did, name)
22
+
if err != nil {
23
+
gitError(w, "repository not found", http.StatusNotFound)
24
+
h.l.Error("git: failed to secure join repo path", "handler", "InfoRefs", "error", err)
25
+
return
26
+
}
27
+
28
+
repoPath, err := securejoin.SecureJoin(h.c.Repo.ScanPath, repo)
29
+
if err != nil {
30
+
gitError(w, "repository not found", http.StatusNotFound)
31
+
h.l.Error("git: failed to secure join repo path", "handler", "InfoRefs", "error", err)
32
+
return
33
+
}
34
+
35
+
gr, err := git.Open(repoPath, ref)
36
+
37
+
immutableLink := fmt.Sprintf(
38
+
"https://%s/%s/%s/archive/%s",
39
+
h.c.Server.Hostname,
40
+
did,
41
+
name,
42
+
gr.Hash(),
43
+
)
44
+
45
+
safeRefFilename := strings.ReplaceAll(plumbing.ReferenceName(ref).Short(), "/", "-")
46
+
filename := fmt.Sprintf("%s-%s.tar.gz", name, safeRefFilename)
47
+
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
48
+
w.Header().Set("Content-Type", "application/gzip")
49
+
w.Header().Set("Link", fmt.Sprintf("<%s>; rel=\"immutable\"", immutableLink))
50
+
51
+
gw := gzip.NewWriter(w)
52
+
defer gw.Close()
53
+
54
+
err = gr.WriteTar(gw, "")
55
+
if err != nil {
56
+
// once we start writing to the body we can't report error anymore
57
+
// so we are only left with logging the error
58
+
h.l.Error("writing tar file", "error", err)
59
+
return
60
+
}
61
+
62
+
err = gw.Flush()
63
+
if err != nil {
64
+
// once we start writing to the body we can't report error anymore
65
+
// so we are only left with logging the error
66
+
h.l.Error("flushing", "error", err.Error())
67
+
return
68
+
}
69
+
}
+4
knotserver/git/git.go
+4
knotserver/git/git.go
+2
knotserver/router.go
+2
knotserver/router.go
-81
knotserver/xrpc/repo_archive.go
-81
knotserver/xrpc/repo_archive.go
···
1
-
package xrpc
2
-
3
-
import (
4
-
"compress/gzip"
5
-
"fmt"
6
-
"net/http"
7
-
"strings"
8
-
9
-
"github.com/go-git/go-git/v5/plumbing"
10
-
11
-
"tangled.org/core/knotserver/git"
12
-
xrpcerr "tangled.org/core/xrpc/errors"
13
-
)
14
-
15
-
func (x *Xrpc) RepoArchive(w http.ResponseWriter, r *http.Request) {
16
-
repo := r.URL.Query().Get("repo")
17
-
repoPath, err := x.parseRepoParam(repo)
18
-
if err != nil {
19
-
writeError(w, err.(xrpcerr.XrpcError), http.StatusBadRequest)
20
-
return
21
-
}
22
-
23
-
ref := r.URL.Query().Get("ref")
24
-
// ref can be empty (git.Open handles this)
25
-
26
-
format := r.URL.Query().Get("format")
27
-
if format == "" {
28
-
format = "tar.gz" // default
29
-
}
30
-
31
-
prefix := r.URL.Query().Get("prefix")
32
-
33
-
if format != "tar.gz" {
34
-
writeError(w, xrpcerr.NewXrpcError(
35
-
xrpcerr.WithTag("InvalidRequest"),
36
-
xrpcerr.WithMessage("only tar.gz format is supported"),
37
-
), http.StatusBadRequest)
38
-
return
39
-
}
40
-
41
-
gr, err := git.Open(repoPath, ref)
42
-
if err != nil {
43
-
writeError(w, xrpcerr.RefNotFoundError, http.StatusNotFound)
44
-
return
45
-
}
46
-
47
-
repoParts := strings.Split(repo, "/")
48
-
repoName := repoParts[len(repoParts)-1]
49
-
50
-
safeRefFilename := strings.ReplaceAll(plumbing.ReferenceName(ref).Short(), "/", "-")
51
-
52
-
var archivePrefix string
53
-
if prefix != "" {
54
-
archivePrefix = prefix
55
-
} else {
56
-
archivePrefix = fmt.Sprintf("%s-%s", repoName, safeRefFilename)
57
-
}
58
-
59
-
filename := fmt.Sprintf("%s-%s.tar.gz", repoName, safeRefFilename)
60
-
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
61
-
w.Header().Set("Content-Type", "application/gzip")
62
-
63
-
gw := gzip.NewWriter(w)
64
-
defer gw.Close()
65
-
66
-
err = gr.WriteTar(gw, archivePrefix)
67
-
if err != nil {
68
-
// once we start writing to the body we can't report error anymore
69
-
// so we are only left with logging the error
70
-
x.Logger.Error("writing tar file", "error", err.Error())
71
-
return
72
-
}
73
-
74
-
err = gw.Flush()
75
-
if err != nil {
76
-
// once we start writing to the body we can't report error anymore
77
-
// so we are only left with logging the error
78
-
x.Logger.Error("flushing", "error", err.Error())
79
-
return
80
-
}
81
-
}
-1
knotserver/xrpc/xrpc.go
-1
knotserver/xrpc/xrpc.go
···
64
64
r.Get("/"+tangled.RepoCompareNSID, x.RepoCompare)
65
65
r.Get("/"+tangled.RepoGetDefaultBranchNSID, x.RepoGetDefaultBranch)
66
66
r.Get("/"+tangled.RepoBranchNSID, x.RepoBranch)
67
-
r.Get("/"+tangled.RepoArchiveNSID, x.RepoArchive)
68
67
r.Get("/"+tangled.RepoLanguagesNSID, x.RepoLanguages)
69
68
70
69
// knot query endpoints (no auth required)
+1
lexicons/repo/archive.json
+1
lexicons/repo/archive.json
+3
nix/gomod2nix.toml
+3
nix/gomod2nix.toml
···
304
304
[mod."github.com/hashicorp/go-sockaddr"]
305
305
version = "v1.0.7"
306
306
hash = "sha256-p6eDOrGzN1jMmT/F/f/VJMq0cKNFhUcEuVVwTE6vSrs="
307
+
[mod."github.com/hashicorp/go-version"]
308
+
version = "v1.8.0"
309
+
hash = "sha256-KXtqERmYrWdpqPCViWcHbe6jnuH7k16bvBIcuJuevj8="
307
310
[mod."github.com/hashicorp/golang-lru"]
308
311
version = "v1.0.2"
309
312
hash = "sha256-yy+5botc6T5wXgOe2mfNXJP3wr+MkVlUZ2JBkmmrA48="
-41
nix/pkgs/docs.nix
-41
nix/pkgs/docs.nix
···
1
-
{
2
-
pandoc,
3
-
tailwindcss,
4
-
runCommandLocal,
5
-
inter-fonts-src,
6
-
ibm-plex-mono-src,
7
-
lucide-src,
8
-
src,
9
-
}:
10
-
runCommandLocal "docs" {} ''
11
-
mkdir -p working
12
-
13
-
# copy templates, themes, styles, filters to working directory
14
-
cp ${src}/docs/*.html working/
15
-
cp ${src}/docs/*.theme working/
16
-
cp ${src}/docs/*.css working/
17
-
18
-
# icons
19
-
cp -rf ${lucide-src}/*.svg working/
20
-
21
-
# content
22
-
${pandoc}/bin/pandoc ${src}/docs/DOCS.md \
23
-
-o $out/ \
24
-
-t chunkedhtml \
25
-
--variable toc \
26
-
--toc-depth=2 \
27
-
--css=stylesheet.css \
28
-
--chunk-template="%i.html" \
29
-
--highlight-style=working/highlight.theme \
30
-
--template=working/template.html
31
-
32
-
# fonts
33
-
mkdir -p $out/static/fonts
34
-
cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/
35
-
cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/
36
-
cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/
37
-
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/
38
-
39
-
# styles
40
-
cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css
41
-
''
+1
-1
nix/vm.nix
+1
-1
nix/vm.nix
···
8
8
var = builtins.getEnv name;
9
9
in
10
10
if var == ""
11
-
then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details"
11
+
then throw "\$${name} must be defined, see docs/hacking.md for more details"
12
12
else var;
13
13
envVarOr = name: default: let
14
14
var = builtins.getEnv name;
+3
-3
readme.md
+3
-3
readme.md
···
10
10
11
11
## docs
12
12
13
-
- [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide)
14
-
- [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!**
15
-
- [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled)
13
+
* [knot hosting guide](/docs/knot-hosting.md)
14
+
* [contributing guide](/docs/contributing.md) **please read before opening a PR!**
15
+
* [hacking on tangled](/docs/hacking.md)
16
16
17
17
## security
18
18
+1
-1
spindle/motd
+1
-1
spindle/motd
+1
-1
tailwind.config.js
+1
-1
tailwind.config.js
···
2
2
const colors = require("tailwindcss/colors");
3
3
4
4
module.exports = {
5
-
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"],
5
+
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"],
6
6
darkMode: "media",
7
7
theme: {
8
8
container: {