+34
api/tangled/pipelinecancelPipeline.go
+34
api/tangled/pipelinecancelPipeline.go
···
1
+
// Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT.
2
+
3
+
package tangled
4
+
5
+
// schema: sh.tangled.pipeline.cancelPipeline
6
+
7
+
import (
8
+
"context"
9
+
10
+
"github.com/bluesky-social/indigo/lex/util"
11
+
)
12
+
13
+
const (
14
+
PipelineCancelPipelineNSID = "sh.tangled.pipeline.cancelPipeline"
15
+
)
16
+
17
+
// PipelineCancelPipeline_Input is the input argument to a sh.tangled.pipeline.cancelPipeline call.
18
+
type PipelineCancelPipeline_Input struct {
19
+
// pipeline: pipeline at-uri
20
+
Pipeline string `json:"pipeline" cborgen:"pipeline"`
21
+
// repo: repo at-uri, spindle can't resolve repo from pipeline at-uri yet
22
+
Repo string `json:"repo" cborgen:"repo"`
23
+
// workflow: workflow name
24
+
Workflow string `json:"workflow" cborgen:"workflow"`
25
+
}
26
+
27
+
// PipelineCancelPipeline calls the XRPC method "sh.tangled.pipeline.cancelPipeline".
28
+
func PipelineCancelPipeline(ctx context.Context, c util.LexClient, input *PipelineCancelPipeline_Input) error {
29
+
if err := c.LexDo(ctx, util.Procedure, "application/json", "sh.tangled.pipeline.cancelPipeline", nil, input, nil); err != nil {
30
+
return err
31
+
}
32
+
33
+
return nil
34
+
}
+6
-6
appview/db/pipeline.go
+6
-6
appview/db/pipeline.go
···
6
6
"strings"
7
7
"time"
8
8
9
+
"github.com/bluesky-social/indigo/atproto/syntax"
9
10
"tangled.org/core/appview/models"
10
11
"tangled.org/core/orm"
11
12
)
···
216
217
}
217
218
defer rows.Close()
218
219
219
-
pipelines := make(map[string]models.Pipeline)
220
+
pipelines := make(map[syntax.ATURI]models.Pipeline)
220
221
for rows.Next() {
221
222
var p models.Pipeline
222
223
var t models.Trigger
···
253
254
p.Trigger = &t
254
255
p.Statuses = make(map[string]models.WorkflowStatus)
255
256
256
-
k := fmt.Sprintf("%s/%s", p.Knot, p.Rkey)
257
-
pipelines[k] = p
257
+
pipelines[p.AtUri()] = p
258
258
}
259
259
260
260
// get all statuses
···
314
314
return nil, fmt.Errorf("invalid status created timestamp %q: %w", created, err)
315
315
}
316
316
317
-
key := fmt.Sprintf("%s/%s", ps.PipelineKnot, ps.PipelineRkey)
317
+
pipelineAt := ps.PipelineAt()
318
318
319
319
// extract
320
-
pipeline, ok := pipelines[key]
320
+
pipeline, ok := pipelines[pipelineAt]
321
321
if !ok {
322
322
continue
323
323
}
···
331
331
332
332
// reassign
333
333
pipeline.Statuses[ps.Workflow] = statuses
334
-
pipelines[key] = pipeline
334
+
pipelines[pipelineAt] = pipeline
335
335
}
336
336
337
337
var all []models.Pipeline
-1
appview/db/repos.go
-1
appview/db/repos.go
+10
appview/models/pipeline.go
+10
appview/models/pipeline.go
···
1
1
package models
2
2
3
3
import (
4
+
"fmt"
4
5
"slices"
5
6
"time"
6
7
7
8
"github.com/bluesky-social/indigo/atproto/syntax"
8
9
"github.com/go-git/go-git/v5/plumbing"
10
+
"tangled.org/core/api/tangled"
9
11
spindle "tangled.org/core/spindle/models"
10
12
"tangled.org/core/workflow"
11
13
)
···
23
25
// populate when querying for reverse mappings
24
26
Trigger *Trigger
25
27
Statuses map[string]WorkflowStatus
28
+
}
29
+
30
+
func (p *Pipeline) AtUri() syntax.ATURI {
31
+
return syntax.ATURI(fmt.Sprintf("at://did:web:%s/%s/%s", p.Knot, tangled.PipelineNSID, p.Rkey))
26
32
}
27
33
28
34
type WorkflowStatus struct {
···
128
134
Error *string
129
135
ExitCode int
130
136
}
137
+
138
+
func (ps *PipelineStatus) PipelineAt() syntax.ATURI {
139
+
return syntax.ATURI(fmt.Sprintf("at://did:web:%s/%s/%s", ps.PipelineKnot, tangled.PipelineNSID, ps.PipelineRkey))
140
+
}
+3
-13
appview/pages/markup/extension/atlink.go
+3
-13
appview/pages/markup/extension/atlink.go
···
35
35
return KindAt
36
36
}
37
37
38
-
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`)
39
-
var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`)
38
+
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`)
40
39
41
40
type atParser struct{}
42
41
···
56
55
if m == nil {
57
56
return nil
58
57
}
59
-
60
-
// Check for all links in the markdown to see if the handle found is inside one
61
-
linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1)
62
-
for _, linkMatch := range linksIndexes {
63
-
if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] {
64
-
return nil
65
-
}
66
-
}
67
-
68
58
atSegment := text.NewSegment(segment.Start, segment.Start+m[1])
69
59
block.Advance(m[1])
70
60
node := &AtNode{}
···
97
87
98
88
func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
99
89
if entering {
100
-
w.WriteString(`<a href="/`)
90
+
w.WriteString(`<a href="/@`)
101
91
w.WriteString(n.(*AtNode).Handle)
102
-
w.WriteString(`" class="mention">`)
92
+
w.WriteString(`" class="mention font-bold">`)
103
93
} else {
104
94
w.WriteString("</a>")
105
95
}
-121
appview/pages/markup/markdown_test.go
-121
appview/pages/markup/markdown_test.go
···
1
-
package markup
2
-
3
-
import (
4
-
"bytes"
5
-
"testing"
6
-
)
7
-
8
-
func TestAtExtension_Rendering(t *testing.T) {
9
-
tests := []struct {
10
-
name string
11
-
markdown string
12
-
expected string
13
-
}{
14
-
{
15
-
name: "renders simple at mention",
16
-
markdown: "Hello @user.tngl.sh!",
17
-
expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`,
18
-
},
19
-
{
20
-
name: "renders multiple at mentions",
21
-
markdown: "Hi @alice.tngl.sh and @bob.example.com",
22
-
expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`,
23
-
},
24
-
{
25
-
name: "renders at mention in parentheses",
26
-
markdown: "Check this out (@user.tngl.sh)",
27
-
expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`,
28
-
},
29
-
{
30
-
name: "does not render email",
31
-
markdown: "Contact me at test@example.com",
32
-
expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`,
33
-
},
34
-
{
35
-
name: "renders at mention with hyphen",
36
-
markdown: "Follow @user-name.tngl.sh",
37
-
expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`,
38
-
},
39
-
{
40
-
name: "renders at mention with numbers",
41
-
markdown: "@user123.test456.social",
42
-
expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`,
43
-
},
44
-
{
45
-
name: "at mention at start of line",
46
-
markdown: "@user.tngl.sh is cool",
47
-
expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`,
48
-
},
49
-
}
50
-
51
-
for _, tt := range tests {
52
-
t.Run(tt.name, func(t *testing.T) {
53
-
md := NewMarkdown()
54
-
55
-
var buf bytes.Buffer
56
-
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
57
-
t.Fatalf("failed to convert markdown: %v", err)
58
-
}
59
-
60
-
result := buf.String()
61
-
if result != tt.expected+"\n" {
62
-
t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result)
63
-
}
64
-
})
65
-
}
66
-
}
67
-
68
-
func TestAtExtension_WithOtherMarkdown(t *testing.T) {
69
-
tests := []struct {
70
-
name string
71
-
markdown string
72
-
contains string
73
-
}{
74
-
{
75
-
name: "at mention with bold",
76
-
markdown: "**Hello @user.tngl.sh**",
77
-
contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`,
78
-
},
79
-
{
80
-
name: "at mention with italic",
81
-
markdown: "*Check @user.tngl.sh*",
82
-
contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`,
83
-
},
84
-
{
85
-
name: "at mention in list",
86
-
markdown: "- Item 1\n- @user.tngl.sh\n- Item 3",
87
-
contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`,
88
-
},
89
-
{
90
-
name: "at mention in link",
91
-
markdown: "[@regnault.dev](https://regnault.dev)",
92
-
contains: `<a href="https://regnault.dev">@regnault.dev</a>`,
93
-
},
94
-
{
95
-
name: "at mention in link again",
96
-
markdown: "[check out @regnault.dev](https://regnault.dev)",
97
-
contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`,
98
-
},
99
-
{
100
-
name: "at mention in link again, multiline",
101
-
markdown: "[\ncheck out @regnault.dev](https://regnault.dev)",
102
-
contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>",
103
-
},
104
-
}
105
-
106
-
for _, tt := range tests {
107
-
t.Run(tt.name, func(t *testing.T) {
108
-
md := NewMarkdown()
109
-
110
-
var buf bytes.Buffer
111
-
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
112
-
t.Fatalf("failed to convert markdown: %v", err)
113
-
}
114
-
115
-
result := buf.String()
116
-
if !bytes.Contains([]byte(result), []byte(tt.contains)) {
117
-
t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result)
118
-
}
119
-
})
120
-
}
121
-
}
+1
-1
appview/pages/templates/knots/index.html
+1
-1
appview/pages/templates/knots/index.html
···
105
105
{{ define "docsButton" }}
106
106
<a
107
107
class="btn flex items-center gap-2"
108
-
href="https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide">
108
+
href="https://tangled.org/@tangled.org/core/blob/master/docs/knot-hosting.md">
109
109
{{ i "book" "size-4" }}
110
110
docs
111
111
</a>
+1
-1
appview/pages/templates/repo/pipelines/pipelines.html
+1
-1
appview/pages/templates/repo/pipelines/pipelines.html
···
23
23
</p>
24
24
<p>
25
25
<span class="{{ $bullet }}">2</span>Configure your CI/CD
26
-
<a href="https://docs.tangled.org/spindles.html#pipelines" class="underline">pipeline</a>.
26
+
<a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/pipeline.md" class="underline">pipeline</a>.
27
27
</p>
28
28
<p><span class="{{ $bullet }}">3</span>Trigger a workflow with a push or a pull-request!</p>
29
29
</div>
+10
appview/pages/templates/repo/pipelines/workflow.html
+10
appview/pages/templates/repo/pipelines/workflow.html
···
12
12
{{ block "sidebar" . }} {{ end }}
13
13
</div>
14
14
<div class="col-span-1 md:col-span-3">
15
+
<div class="flex justify-end mb-2">
16
+
<button
17
+
class="btn"
18
+
hx-post="/{{ $.RepoInfo.FullName }}/pipelines/{{ .Pipeline.Id }}/workflow/{{ .Workflow }}/cancel"
19
+
hx-swap="none"
20
+
{{ if (index .Pipeline.Statuses .Workflow).Latest.Status.IsFinish -}}
21
+
disabled
22
+
{{- end }}
23
+
>Cancel</button>
24
+
</div>
15
25
{{ block "logs" . }} {{ end }}
16
26
</div>
17
27
</section>
+1
-1
appview/pages/templates/repo/settings/pipelines.html
+1
-1
appview/pages/templates/repo/settings/pipelines.html
···
22
22
<p class="text-gray-500 dark:text-gray-400">
23
23
Choose a spindle to execute your workflows on. Only repository owners
24
24
can configure spindles. Spindles can be selfhosted,
25
-
<a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide">
25
+
<a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
26
26
click to learn more.
27
27
</a>
28
28
</p>
+1
-1
appview/pages/templates/spindles/index.html
+1
-1
appview/pages/templates/spindles/index.html
···
102
102
{{ define "docsButton" }}
103
103
<a
104
104
class="btn flex items-center gap-2"
105
-
href="https://docs.tangled.org/spindles.html#self-hosting-guide">
105
+
href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
106
106
{{ i "book" "size-4" }}
107
107
docs
108
108
</a>
+82
appview/pipelines/pipelines.go
+82
appview/pipelines/pipelines.go
···
4
4
"bytes"
5
5
"context"
6
6
"encoding/json"
7
+
"fmt"
7
8
"log/slog"
8
9
"net/http"
9
10
"strings"
10
11
"time"
11
12
13
+
"tangled.org/core/api/tangled"
12
14
"tangled.org/core/appview/config"
13
15
"tangled.org/core/appview/db"
16
+
"tangled.org/core/appview/models"
14
17
"tangled.org/core/appview/oauth"
15
18
"tangled.org/core/appview/pages"
16
19
"tangled.org/core/appview/reporesolver"
···
41
44
r.Get("/", p.Index)
42
45
r.Get("/{pipeline}/workflow/{workflow}", p.Workflow)
43
46
r.Get("/{pipeline}/workflow/{workflow}/logs", p.Logs)
47
+
r.Post("/{pipeline}/workflow/{workflow}/cancel", p.Cancel)
44
48
45
49
return r
46
50
}
···
314
318
}
315
319
}
316
320
}
321
+
}
322
+
323
+
func (p *Pipelines) Cancel(w http.ResponseWriter, r *http.Request) {
324
+
l := p.logger.With("handler", "Cancel")
325
+
326
+
var (
327
+
pipelineId = chi.URLParam(r, "pipeline")
328
+
workflow = chi.URLParam(r, "workflow")
329
+
)
330
+
if pipelineId == "" || workflow == "" {
331
+
http.Error(w, "missing pipeline ID or workflow", http.StatusBadRequest)
332
+
return
333
+
}
334
+
335
+
f, err := p.repoResolver.Resolve(r)
336
+
if err != nil {
337
+
l.Error("failed to get repo and knot", "err", err)
338
+
http.Error(w, "bad repo/knot", http.StatusBadRequest)
339
+
return
340
+
}
341
+
342
+
pipeline, err := func() (models.Pipeline, error) {
343
+
ps, err := db.GetPipelineStatuses(
344
+
p.db,
345
+
1,
346
+
orm.FilterEq("repo_owner", f.Did),
347
+
orm.FilterEq("repo_name", f.Name),
348
+
orm.FilterEq("knot", f.Knot),
349
+
orm.FilterEq("id", pipelineId),
350
+
)
351
+
if err != nil {
352
+
return models.Pipeline{}, err
353
+
}
354
+
if len(ps) != 1 {
355
+
return models.Pipeline{}, fmt.Errorf("wrong pipeline count %d", len(ps))
356
+
}
357
+
return ps[0], nil
358
+
}()
359
+
if err != nil {
360
+
l.Error("pipeline query failed", "err", err)
361
+
http.Error(w, "pipeline not found", http.StatusNotFound)
362
+
}
363
+
var (
364
+
spindle = f.Spindle
365
+
knot = f.Knot
366
+
rkey = pipeline.Rkey
367
+
)
368
+
369
+
if spindle == "" || knot == "" || rkey == "" {
370
+
http.Error(w, "invalid repo info", http.StatusBadRequest)
371
+
return
372
+
}
373
+
374
+
spindleClient, err := p.oauth.ServiceClient(
375
+
r,
376
+
oauth.WithService(f.Spindle),
377
+
oauth.WithLxm(tangled.PipelineCancelPipelineNSID),
378
+
oauth.WithExp(60),
379
+
oauth.WithDev(p.config.Core.Dev),
380
+
oauth.WithTimeout(time.Second*30), // workflow cleanup usually takes time
381
+
)
382
+
383
+
err = tangled.PipelineCancelPipeline(
384
+
r.Context(),
385
+
spindleClient,
386
+
&tangled.PipelineCancelPipeline_Input{
387
+
Repo: string(f.RepoAt()),
388
+
Pipeline: pipeline.AtUri().String(),
389
+
Workflow: workflow,
390
+
},
391
+
)
392
+
errorId := "pipeline-action"
393
+
if err != nil {
394
+
l.Error("failed to cancel pipeline", "err", err)
395
+
p.pages.Notice(w, errorId, "Failed to add secret.")
396
+
return
397
+
}
398
+
l.Debug("canceled pipeline", "uri", pipeline.AtUri())
317
399
}
318
400
319
401
// either a message or an error
-1530
docs/DOCS.md
-1530
docs/DOCS.md
···
1
-
---
2
-
title: Tangled Documentation
3
-
author: The Tangled Contributors
4
-
date: 21 Sun, Dec 2025
5
-
---
6
-
7
-
# Introduction
8
-
9
-
Tangled is a decentralized code hosting and collaboration
10
-
platform. Every component of Tangled is open-source and
11
-
selfhostable. [tangled.org](https://tangled.org) also
12
-
provides hosting and CI services that are free to use.
13
-
14
-
There are several models for decentralized code
15
-
collaboration platforms, ranging from ActivityPubโs
16
-
(Forgejo) federated model, to Radicleโs entirely P2P model.
17
-
Our approach attempts to be the best of both worlds by
18
-
adopting atprotoโa protocol for building decentralized
19
-
social applications with a central identity
20
-
21
-
Our approach to this is the idea of โknotsโ. Knots are
22
-
lightweight, headless servers that enable users to host Git
23
-
repositories with ease. Knots are designed for either single
24
-
or multi-tenant use which is perfect for self-hosting on a
25
-
Raspberry Pi at home, or larger โcommunityโ servers. By
26
-
default, Tangled provides managed knots where you can host
27
-
your repositories for free.
28
-
29
-
The "appview" at tangled.org acts as a consolidated โviewโ
30
-
into the whole network, allowing users to access, clone and
31
-
contribute to repositories hosted across different knots
32
-
seamlessly.
33
-
34
-
# Quick Start Guide
35
-
36
-
## Login or Sign up
37
-
38
-
You can [login](https://tangled.org) by using your AT
39
-
account. If you are unclear on what that means, simply head
40
-
to the [signup](https://tangled.org/signup) page and create
41
-
an account. By doing so, you will be choosing Tangled as
42
-
your account provider (you will be granted a handle of the
43
-
form `user.tngl.sh`).
44
-
45
-
In the AT network, users are free to choose their account
46
-
provider (known as a "Personal Data Service", or PDS), and
47
-
login to applications that support AT accounts.
48
-
49
-
You can think of it as "one account for all of the
50
-
atmosphere"!
51
-
52
-
If you already have an AT account (you may have one if you
53
-
signed up to Bluesky, for example), you can login with the
54
-
same handle on Tangled (so just use `user.bsky.social` on
55
-
the login page).
56
-
57
-
## Add an SSH Key
58
-
59
-
Once you are logged in, you can start creating repositories
60
-
and pushing code. Tangled supports pushing git repositories
61
-
over SSH.
62
-
63
-
First, you'll need to generate an SSH key if you don't
64
-
already have one:
65
-
66
-
```bash
67
-
ssh-keygen -t ed25519 -C "foo@bar.com"
68
-
```
69
-
70
-
When prompted, save the key to the default location
71
-
(`~/.ssh/id_ed25519`) and optionally set a passphrase.
72
-
73
-
Copy your public key to your clipboard:
74
-
75
-
```bash
76
-
# on X11
77
-
cat ~/.ssh/id_ed25519.pub | xclip -sel c
78
-
79
-
# on wayland
80
-
cat ~/.ssh/id_ed25519.pub | wl-copy
81
-
82
-
# on macos
83
-
cat ~/.ssh/id_ed25519.pub | pbcopy
84
-
```
85
-
86
-
Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
87
-
paste your public key, give it a descriptive name, and hit
88
-
save.
89
-
90
-
## Create a Repository
91
-
92
-
Once your SSH key is added, create your first repository:
93
-
94
-
1. Hit the green `+` icon on the topbar, and select
95
-
repository
96
-
2. Enter a repository name
97
-
3. Add a description
98
-
4. Choose a knotserver to host this repository on
99
-
5. Hit create
100
-
101
-
"Knots" are selfhostable, lightweight git servers that can
102
-
host your repository. Unlike traditional code forges, your
103
-
code can live on any server. Read the [Knots](TODO) section
104
-
for more.
105
-
106
-
## Configure SSH
107
-
108
-
To ensure Git uses the correct SSH key and connects smoothly
109
-
to Tangled, add this configuration to your `~/.ssh/config`
110
-
file:
111
-
112
-
```
113
-
Host tangled.org
114
-
Hostname tangled.org
115
-
User git
116
-
IdentityFile ~/.ssh/id_ed25519
117
-
AddressFamily inet
118
-
```
119
-
120
-
This tells SSH to use your specific key when connecting to
121
-
Tangled and prevents authentication issues if you have
122
-
multiple SSH keys.
123
-
124
-
Note that this configuration only works for knotservers that
125
-
are hosted by tangled.org. If you use a custom knot, refer
126
-
to the [Knots](TODO) section.
127
-
128
-
## Push Your First Repository
129
-
130
-
Initialize a new git repository:
131
-
132
-
```bash
133
-
mkdir my-project
134
-
cd my-project
135
-
136
-
git init
137
-
echo "# My Project" > README.md
138
-
```
139
-
140
-
Add some content and push!
141
-
142
-
```bash
143
-
git add README.md
144
-
git commit -m "Initial commit"
145
-
git remote add origin git@tangled.org:user.tngl.sh/my-project
146
-
git push -u origin main
147
-
```
148
-
149
-
That's it! Your code is now hosted on Tangled.
150
-
151
-
## Migrating an existing repository
152
-
153
-
Moving your repositories from GitHub, GitLab, Bitbucket, or
154
-
any other Git forge to Tangled is straightforward. You'll
155
-
simply change your repository's remote URL. At the moment,
156
-
Tangled does not have any tooling to migrate data such as
157
-
GitHub issues or pull requests.
158
-
159
-
First, create a new repository on tangled.org as described
160
-
in the [Quick Start Guide](#create-a-repository).
161
-
162
-
Navigate to your existing local repository:
163
-
164
-
```bash
165
-
cd /path/to/your/existing/repo
166
-
```
167
-
168
-
You can inspect your existing git remote like so:
169
-
170
-
```bash
171
-
git remote -v
172
-
```
173
-
174
-
You'll see something like:
175
-
176
-
```
177
-
origin git@github.com:username/my-project (fetch)
178
-
origin git@github.com:username/my-project (push)
179
-
```
180
-
181
-
Update the remote URL to point to tangled:
182
-
183
-
```bash
184
-
git remote set-url origin git@tangled.org:user.tngl.sh/my-project
185
-
```
186
-
187
-
Verify the change:
188
-
189
-
```bash
190
-
git remote -v
191
-
```
192
-
193
-
You should now see:
194
-
195
-
```
196
-
origin git@tangled.org:user.tngl.sh/my-project (fetch)
197
-
origin git@tangled.org:user.tngl.sh/my-project (push)
198
-
```
199
-
200
-
Push all your branches and tags to tangled:
201
-
202
-
```bash
203
-
git push -u origin --all
204
-
git push -u origin --tags
205
-
```
206
-
207
-
Your repository is now migrated to Tangled! All commit
208
-
history, branches, and tags have been preserved.
209
-
210
-
## Mirroring a repository to Tangled
211
-
212
-
If you want to maintain your repository on multiple forges
213
-
simultaneously, for example, keeping your primary repository
214
-
on GitHub while mirroring to Tangled for backup or
215
-
redundancy, you can do so by adding multiple remotes.
216
-
217
-
You can configure your local repository to push to both
218
-
Tangled and, say, GitHub. You may already have the following
219
-
setup:
220
-
221
-
```
222
-
$ git remote -v
223
-
origin git@github.com:username/my-project (fetch)
224
-
origin git@github.com:username/my-project (push)
225
-
```
226
-
227
-
Now add Tangled as an additional push URL to the same
228
-
remote:
229
-
230
-
```bash
231
-
git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
232
-
```
233
-
234
-
You also need to re-add the original URL as a push
235
-
destination (git replaces the push URL when you use `--add`
236
-
the first time):
237
-
238
-
```bash
239
-
git remote set-url --add --push origin git@github.com:username/my-project
240
-
```
241
-
242
-
Verify your configuration:
243
-
244
-
```
245
-
$ git remote -v
246
-
origin git@github.com:username/repo (fetch)
247
-
origin git@tangled.org:username/my-project (push)
248
-
origin git@github.com:username/repo (push)
249
-
```
250
-
251
-
Notice that there's one fetch URL (the primary remote) and
252
-
two push URLs. Now, whenever you push, git will
253
-
automatically push to both remotes:
254
-
255
-
```bash
256
-
git push origin main
257
-
```
258
-
259
-
This single command pushes your `main` branch to both GitHub
260
-
and Tangled simultaneously.
261
-
262
-
To push all branches and tags:
263
-
264
-
```bash
265
-
git push origin --all
266
-
git push origin --tags
267
-
```
268
-
269
-
If you prefer more control over which remote you push to,
270
-
you can maintain separate remotes:
271
-
272
-
```bash
273
-
git remote add github git@github.com:username/my-project
274
-
git remote add tangled git@tangled.org:username/my-project
275
-
```
276
-
277
-
Then push to each explicitly:
278
-
279
-
```bash
280
-
git push github main
281
-
git push tangled main
282
-
```
283
-
284
-
# Knot self-hosting guide
285
-
286
-
So you want to run your own knot server? Great! Here are a few prerequisites:
287
-
288
-
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
289
-
2. A (sub)domain name. People generally use `knot.example.com`.
290
-
3. A valid SSL certificate for your domain.
291
-
292
-
## NixOS
293
-
294
-
Refer to the [knot
295
-
module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
296
-
for a full list of options. Sample configurations:
297
-
298
-
- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
299
-
- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
300
-
301
-
## Docker
302
-
303
-
Refer to
304
-
[@tangled.org/knot-docker](https://tangled.sh/@tangled.sh/knot-docker).
305
-
Note that this is community maintained.
306
-
307
-
## Manual setup
308
-
309
-
First, clone this repository:
310
-
311
-
```
312
-
git clone https://tangled.org/@tangled.org/core
313
-
```
314
-
315
-
Then, build the `knot` CLI. This is the knot administration
316
-
and operation tool. For the purpose of this guide, we're
317
-
only concerned with these subcommands:
318
-
319
-
* `knot server`: the main knot server process, typically
320
-
run as a supervised service
321
-
* `knot guard`: handles role-based access control for git
322
-
over SSH (you'll never have to run this yourself)
323
-
* `knot keys`: fetches SSH keys associated with your knot;
324
-
we'll use this to generate the SSH
325
-
`AuthorizedKeysCommand`
326
-
327
-
```
328
-
cd core
329
-
export CGO_ENABLED=1
330
-
go build -o knot ./cmd/knot
331
-
```
332
-
333
-
Next, move the `knot` binary to a location owned by `root` --
334
-
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
335
-
336
-
```
337
-
sudo mv knot /usr/local/bin/knot
338
-
sudo chown root:root /usr/local/bin/knot
339
-
```
340
-
341
-
This is necessary because SSH `AuthorizedKeysCommand` requires [really
342
-
specific permissions](https://stackoverflow.com/a/27638306). The
343
-
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
344
-
retrieve a user's public SSH keys dynamically for authentication. Let's
345
-
set that up.
346
-
347
-
```
348
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
349
-
Match User git
350
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
351
-
AuthorizedKeysCommandUser nobody
352
-
EOF
353
-
```
354
-
355
-
Then, reload `sshd`:
356
-
357
-
```
358
-
sudo systemctl reload ssh
359
-
```
360
-
361
-
Next, create the `git` user. We'll use the `git` user's home directory
362
-
to store repositories:
363
-
364
-
```
365
-
sudo adduser git
366
-
```
367
-
368
-
Create `/home/git/.knot.env` with the following, updating the values as
369
-
necessary. The `KNOT_SERVER_OWNER` should be set to your
370
-
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
371
-
372
-
```
373
-
KNOT_REPO_SCAN_PATH=/home/git
374
-
KNOT_SERVER_HOSTNAME=knot.example.com
375
-
APPVIEW_ENDPOINT=https://tangled.sh
376
-
KNOT_SERVER_OWNER=did:plc:foobar
377
-
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
378
-
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
379
-
```
380
-
381
-
If you run a Linux distribution that uses systemd, you can use the provided
382
-
service file to run the server. Copy
383
-
[`knotserver.service`](/systemd/knotserver.service)
384
-
to `/etc/systemd/system/`. Then, run:
385
-
386
-
```
387
-
systemctl enable knotserver
388
-
systemctl start knotserver
389
-
```
390
-
391
-
The last step is to configure a reverse proxy like Nginx or Caddy to front your
392
-
knot. Here's an example configuration for Nginx:
393
-
394
-
```
395
-
server {
396
-
listen 80;
397
-
listen [::]:80;
398
-
server_name knot.example.com;
399
-
400
-
location / {
401
-
proxy_pass http://localhost:5555;
402
-
proxy_set_header Host $host;
403
-
proxy_set_header X-Real-IP $remote_addr;
404
-
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
405
-
proxy_set_header X-Forwarded-Proto $scheme;
406
-
}
407
-
408
-
# wss endpoint for git events
409
-
location /events {
410
-
proxy_set_header X-Forwarded-For $remote_addr;
411
-
proxy_set_header Host $http_host;
412
-
proxy_set_header Upgrade websocket;
413
-
proxy_set_header Connection Upgrade;
414
-
proxy_pass http://localhost:5555;
415
-
}
416
-
# additional config for SSL/TLS go here.
417
-
}
418
-
419
-
```
420
-
421
-
Remember to use Let's Encrypt or similar to procure a certificate for your
422
-
knot domain.
423
-
424
-
You should now have a running knot server! You can finalize
425
-
your registration by hitting the `verify` button on the
426
-
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
427
-
a record on your PDS to announce the existence of the knot.
428
-
429
-
### Custom paths
430
-
431
-
(This section applies to manual setup only. Docker users should edit the mounts
432
-
in `docker-compose.yml` instead.)
433
-
434
-
Right now, the database and repositories of your knot lives in `/home/git`. You
435
-
can move these paths if you'd like to store them in another folder. Be careful
436
-
when adjusting these paths:
437
-
438
-
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
439
-
any possible side effects. Remember to restart it once you're done.
440
-
* Make backups before moving in case something goes wrong.
441
-
* Make sure the `git` user can read and write from the new paths.
442
-
443
-
#### Database
444
-
445
-
As an example, let's say the current database is at `/home/git/knotserver.db`,
446
-
and we want to move it to `/home/git/database/knotserver.db`.
447
-
448
-
Copy the current database to the new location. Make sure to copy the `.db-shm`
449
-
and `.db-wal` files if they exist.
450
-
451
-
```
452
-
mkdir /home/git/database
453
-
cp /home/git/knotserver.db* /home/git/database
454
-
```
455
-
456
-
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
457
-
the new file path (_not_ the directory):
458
-
459
-
```
460
-
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
461
-
```
462
-
463
-
#### Repositories
464
-
465
-
As an example, let's say the repositories are currently in `/home/git`, and we
466
-
want to move them into `/home/git/repositories`.
467
-
468
-
Create the new folder, then move the existing repositories (if there are any):
469
-
470
-
```
471
-
mkdir /home/git/repositories
472
-
# move all DIDs into the new folder; these will vary for you!
473
-
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
474
-
```
475
-
476
-
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
477
-
to the new directory:
478
-
479
-
```
480
-
KNOT_REPO_SCAN_PATH=/home/git/repositories
481
-
```
482
-
483
-
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
484
-
repository path:
485
-
486
-
```
487
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
488
-
Match User git
489
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
490
-
AuthorizedKeysCommandUser nobody
491
-
EOF
492
-
```
493
-
494
-
Make sure to restart your SSH server!
495
-
496
-
#### MOTD (message of the day)
497
-
498
-
To configure the MOTD used ("Welcome to this knot!" by default), edit the
499
-
`/home/git/motd` file:
500
-
501
-
```
502
-
printf "Hi from this knot!\n" > /home/git/motd
503
-
```
504
-
505
-
Note that you should add a newline at the end if setting a non-empty message
506
-
since the knot won't do this for you.
507
-
508
-
# Spindles
509
-
510
-
## Pipelines
511
-
512
-
Spindle workflows allow you to write CI/CD pipelines in a
513
-
simple format. They're located in the `.tangled/workflows`
514
-
directory at the root of your repository, and are defined
515
-
using YAML.
516
-
517
-
The fields are:
518
-
519
-
- [Trigger](#trigger): A **required** field that defines
520
-
when a workflow should be triggered.
521
-
- [Engine](#engine): A **required** field that defines which
522
-
engine a workflow should run on.
523
-
- [Clone options](#clone-options): An **optional** field
524
-
that defines how the repository should be cloned.
525
-
- [Dependencies](#dependencies): An **optional** field that
526
-
allows you to list dependencies you may need.
527
-
- [Environment](#environment): An **optional** field that
528
-
allows you to define environment variables.
529
-
- [Steps](#steps): An **optional** field that allows you to
530
-
define what steps should run in the workflow.
531
-
532
-
### Trigger
533
-
534
-
The first thing to add to a workflow is the trigger, which
535
-
defines when a workflow runs. This is defined using a `when`
536
-
field, which takes in a list of conditions. Each condition
537
-
has the following fields:
538
-
539
-
- `event`: This is a **required** field that defines when
540
-
your workflow should run. It's a list that can take one or
541
-
more of the following values:
542
-
- `push`: The workflow should run every time a commit is
543
-
pushed to the repository.
544
-
- `pull_request`: The workflow should run every time a
545
-
pull request is made or updated.
546
-
- `manual`: The workflow can be triggered manually.
547
-
- `branch`: Defines which branches the workflow should run
548
-
for. If used with the `push` event, commits to the
549
-
branch(es) listed here will trigger the workflow. If used
550
-
with the `pull_request` event, updates to pull requests
551
-
targeting the branch(es) listed here will trigger the
552
-
workflow. This field has no effect with the `manual`
553
-
event. Supports glob patterns using `*` and `**` (e.g.,
554
-
`main`, `develop`, `release-*`). Either `branch` or `tag`
555
-
(or both) must be specified for `push` events.
556
-
- `tag`: Defines which tags the workflow should run for.
557
-
Only used with the `push` event - when tags matching the
558
-
pattern(s) listed here are pushed, the workflow will
559
-
trigger. This field has no effect with `pull_request` or
560
-
`manual` events. Supports glob patterns using `*` and `**`
561
-
(e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
562
-
`tag` (or both) must be specified for `push` events.
563
-
564
-
For example, if you'd like to define a workflow that runs
565
-
when commits are pushed to the `main` and `develop`
566
-
branches, or when pull requests that target the `main`
567
-
branch are updated, or manually, you can do so with:
568
-
569
-
```yaml
570
-
when:
571
-
- event: ["push", "manual"]
572
-
branch: ["main", "develop"]
573
-
- event: ["pull_request"]
574
-
branch: ["main"]
575
-
```
576
-
577
-
You can also trigger workflows on tag pushes. For instance,
578
-
to run a deployment workflow when tags matching `v*` are
579
-
pushed:
580
-
581
-
```yaml
582
-
when:
583
-
- event: ["push"]
584
-
tag: ["v*"]
585
-
```
586
-
587
-
You can even combine branch and tag patterns in a single
588
-
constraint (the workflow triggers if either matches):
589
-
590
-
```yaml
591
-
when:
592
-
- event: ["push"]
593
-
branch: ["main", "release-*"]
594
-
tag: ["v*", "stable"]
595
-
```
596
-
597
-
### Engine
598
-
599
-
Next is the engine on which the workflow should run, defined
600
-
using the **required** `engine` field. The currently
601
-
supported engines are:
602
-
603
-
- `nixery`: This uses an instance of
604
-
[Nixery](https://nixery.dev) to run steps, which allows
605
-
you to add [dependencies](#dependencies) from
606
-
[Nixpkgs](https://github.com/NixOS/nixpkgs). You can
607
-
search for packages on https://search.nixos.org, and
608
-
there's a pretty good chance the package(s) you're looking
609
-
for will be there.
610
-
611
-
Example:
612
-
613
-
```yaml
614
-
engine: "nixery"
615
-
```
616
-
617
-
### Clone options
618
-
619
-
When a workflow starts, the first step is to clone the
620
-
repository. You can customize this behavior using the
621
-
**optional** `clone` field. It has the following fields:
622
-
623
-
- `skip`: Setting this to `true` will skip cloning the
624
-
repository. This can be useful if your workflow is doing
625
-
something that doesn't require anything from the
626
-
repository itself. This is `false` by default.
627
-
- `depth`: This sets the number of commits, or the "clone
628
-
depth", to fetch from the repository. For example, if you
629
-
set this to 2, the last 2 commits will be fetched. By
630
-
default, the depth is set to 1, meaning only the most
631
-
recent commit will be fetched, which is the commit that
632
-
triggered the workflow.
633
-
- `submodules`: If you use [git
634
-
submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules)
635
-
in your repository, setting this field to `true` will
636
-
recursively fetch all submodules. This is `false` by
637
-
default.
638
-
639
-
The default settings are:
640
-
641
-
```yaml
642
-
clone:
643
-
skip: false
644
-
depth: 1
645
-
submodules: false
646
-
```
647
-
648
-
### Dependencies
649
-
650
-
Usually when you're running a workflow, you'll need
651
-
additional dependencies. The `dependencies` field lets you
652
-
define which dependencies to get, and from where. It's a
653
-
key-value map, with the key being the registry to fetch
654
-
dependencies from, and the value being the list of
655
-
dependencies to fetch.
656
-
657
-
Say you want to fetch Node.js and Go from `nixpkgs`, and a
658
-
package called `my_pkg` you've made from your own registry
659
-
at your repository at
660
-
`https://tangled.sh/@example.com/my_pkg`. You can define
661
-
those dependencies like so:
662
-
663
-
```yaml
664
-
dependencies:
665
-
# nixpkgs
666
-
nixpkgs:
667
-
- nodejs
668
-
- go
669
-
# custom registry
670
-
git+https://tangled.org/@example.com/my_pkg:
671
-
- my_pkg
672
-
```
673
-
674
-
Now these dependencies are available to use in your
675
-
workflow!
676
-
677
-
### Environment
678
-
679
-
The `environment` field allows you define environment
680
-
variables that will be available throughout the entire
681
-
workflow. **Do not put secrets here, these environment
682
-
variables are visible to anyone viewing the repository. You
683
-
can add secrets for pipelines in your repository's
684
-
settings.**
685
-
686
-
Example:
687
-
688
-
```yaml
689
-
environment:
690
-
GOOS: "linux"
691
-
GOARCH: "arm64"
692
-
NODE_ENV: "production"
693
-
MY_ENV_VAR: "MY_ENV_VALUE"
694
-
```
695
-
696
-
### Steps
697
-
698
-
The `steps` field allows you to define what steps should run
699
-
in the workflow. It's a list of step objects, each with the
700
-
following fields:
701
-
702
-
- `name`: This field allows you to give your step a name.
703
-
This name is visible in your workflow runs, and is used to
704
-
describe what the step is doing.
705
-
- `command`: This field allows you to define a command to
706
-
run in that step. The step is run in a Bash shell, and the
707
-
logs from the command will be visible in the pipelines
708
-
page on the Tangled website. The
709
-
[dependencies](#dependencies) you added will be available
710
-
to use here.
711
-
- `environment`: Similar to the global
712
-
[environment](#environment) config, this **optional**
713
-
field is a key-value map that allows you to set
714
-
environment variables for the step. **Do not put secrets
715
-
here, these environment variables are visible to anyone
716
-
viewing the repository. You can add secrets for pipelines
717
-
in your repository's settings.**
718
-
719
-
Example:
720
-
721
-
```yaml
722
-
steps:
723
-
- name: "Build backend"
724
-
command: "go build"
725
-
environment:
726
-
GOOS: "darwin"
727
-
GOARCH: "arm64"
728
-
- name: "Build frontend"
729
-
command: "npm run build"
730
-
environment:
731
-
NODE_ENV: "production"
732
-
```
733
-
734
-
### Complete workflow
735
-
736
-
```yaml
737
-
# .tangled/workflows/build.yml
738
-
739
-
when:
740
-
- event: ["push", "manual"]
741
-
branch: ["main", "develop"]
742
-
- event: ["pull_request"]
743
-
branch: ["main"]
744
-
745
-
engine: "nixery"
746
-
747
-
# using the default values
748
-
clone:
749
-
skip: false
750
-
depth: 1
751
-
submodules: false
752
-
753
-
dependencies:
754
-
# nixpkgs
755
-
nixpkgs:
756
-
- nodejs
757
-
- go
758
-
# custom registry
759
-
git+https://tangled.org/@example.com/my_pkg:
760
-
- my_pkg
761
-
762
-
environment:
763
-
GOOS: "linux"
764
-
GOARCH: "arm64"
765
-
NODE_ENV: "production"
766
-
MY_ENV_VAR: "MY_ENV_VALUE"
767
-
768
-
steps:
769
-
- name: "Build backend"
770
-
command: "go build"
771
-
environment:
772
-
GOOS: "darwin"
773
-
GOARCH: "arm64"
774
-
- name: "Build frontend"
775
-
command: "npm run build"
776
-
environment:
777
-
NODE_ENV: "production"
778
-
```
779
-
780
-
If you want another example of a workflow, you can look at
781
-
the one [Tangled uses to build the
782
-
project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
783
-
784
-
## Self-hosting guide
785
-
786
-
### Prerequisites
787
-
788
-
* Go
789
-
* Docker (the only supported backend currently)
790
-
791
-
### Configuration
792
-
793
-
Spindle is configured using environment variables. The following environment variables are available:
794
-
795
-
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
796
-
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
797
-
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
798
-
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
799
-
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
800
-
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
801
-
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
802
-
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
803
-
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
804
-
805
-
### Running spindle
806
-
807
-
1. **Set the environment variables.** For example:
808
-
809
-
```shell
810
-
export SPINDLE_SERVER_HOSTNAME="your-hostname"
811
-
export SPINDLE_SERVER_OWNER="your-did"
812
-
```
813
-
814
-
2. **Build the Spindle binary.**
815
-
816
-
```shell
817
-
cd core
818
-
go mod download
819
-
go build -o cmd/spindle/spindle cmd/spindle/main.go
820
-
```
821
-
822
-
3. **Create the log directory.**
823
-
824
-
```shell
825
-
sudo mkdir -p /var/log/spindle
826
-
sudo chown $USER:$USER -R /var/log/spindle
827
-
```
828
-
829
-
4. **Run the Spindle binary.**
830
-
831
-
```shell
832
-
./cmd/spindle/spindle
833
-
```
834
-
835
-
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
836
-
837
-
## Architecture
838
-
839
-
Spindle is a small CI runner service. Here's a high level overview of how it operates:
840
-
841
-
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
842
-
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
843
-
* when a new repo record comes through (typically when you add a spindle to a
844
-
repo from the settings), spindle then resolves the underlying knot and
845
-
subscribes to repo events (see:
846
-
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
847
-
* the spindle engine then handles execution of the pipeline, with results and
848
-
logs beamed on the spindle event stream over wss
849
-
850
-
### The engine
851
-
852
-
At present, the only supported backend is Docker (and Podman, if Docker
853
-
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
854
-
executes each step in the pipeline in a fresh container, with state persisted
855
-
across steps within the `/tangled/workspace` directory.
856
-
857
-
The base image for the container is constructed on the fly using
858
-
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
859
-
used packages.
860
-
861
-
The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines).
862
-
863
-
## Secrets with openbao
864
-
865
-
This document covers setting up Spindle to use OpenBao for secrets
866
-
management via OpenBao Proxy instead of the default SQLite backend.
867
-
868
-
### Overview
869
-
870
-
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
871
-
authentication automatically using AppRole credentials, while Spindle
872
-
connects to the local proxy instead of directly to the OpenBao server.
873
-
874
-
This approach provides better security, automatic token renewal, and
875
-
simplified application code.
876
-
877
-
### Installation
878
-
879
-
Install OpenBao from nixpkgs:
880
-
881
-
```bash
882
-
nix shell nixpkgs#openbao # for a local server
883
-
```
884
-
885
-
### Setup
886
-
887
-
The setup process can is documented for both local development and production.
888
-
889
-
#### Local development
890
-
891
-
Start OpenBao in dev mode:
892
-
893
-
```bash
894
-
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
895
-
```
896
-
897
-
This starts OpenBao on `http://localhost:8201` with a root token.
898
-
899
-
Set up environment for bao CLI:
900
-
901
-
```bash
902
-
export BAO_ADDR=http://localhost:8200
903
-
export BAO_TOKEN=root
904
-
```
905
-
906
-
#### Production
907
-
908
-
You would typically use a systemd service with a
909
-
configuration file. Refer to
910
-
[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
911
-
for how this can be achieved using Nix.
912
-
913
-
Then, initialize the bao server:
914
-
915
-
```bash
916
-
bao operator init -key-shares=1 -key-threshold=1
917
-
```
918
-
919
-
This will print out an unseal key and a root key. Save them
920
-
somewhere (like a password manager). Then unseal the vault
921
-
to begin setting it up:
922
-
923
-
```bash
924
-
bao operator unseal <unseal_key>
925
-
```
926
-
927
-
All steps below remain the same across both dev and
928
-
production setups.
929
-
930
-
#### Configure openbao server
931
-
932
-
Create the spindle KV mount:
933
-
934
-
```bash
935
-
bao secrets enable -path=spindle -version=2 kv
936
-
```
937
-
938
-
Set up AppRole authentication and policy:
939
-
940
-
Create a policy file `spindle-policy.hcl`:
941
-
942
-
```hcl
943
-
# Full access to spindle KV v2 data
944
-
path "spindle/data/*" {
945
-
capabilities = ["create", "read", "update", "delete"]
946
-
}
947
-
948
-
# Access to metadata for listing and management
949
-
path "spindle/metadata/*" {
950
-
capabilities = ["list", "read", "delete", "update"]
951
-
}
952
-
953
-
# Allow listing at root level
954
-
path "spindle/" {
955
-
capabilities = ["list"]
956
-
}
957
-
958
-
# Required for connection testing and health checks
959
-
path "auth/token/lookup-self" {
960
-
capabilities = ["read"]
961
-
}
962
-
```
963
-
964
-
Apply the policy and create an AppRole:
965
-
966
-
```bash
967
-
bao policy write spindle-policy spindle-policy.hcl
968
-
bao auth enable approle
969
-
bao write auth/approle/role/spindle \
970
-
token_policies="spindle-policy" \
971
-
token_ttl=1h \
972
-
token_max_ttl=4h \
973
-
bind_secret_id=true \
974
-
secret_id_ttl=0 \
975
-
secret_id_num_uses=0
976
-
```
977
-
978
-
Get the credentials:
979
-
980
-
```bash
981
-
# Get role ID (static)
982
-
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
983
-
984
-
# Generate secret ID
985
-
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
986
-
987
-
echo "Role ID: $ROLE_ID"
988
-
echo "Secret ID: $SECRET_ID"
989
-
```
990
-
991
-
#### Create proxy configuration
992
-
993
-
Create the credential files:
994
-
995
-
```bash
996
-
# Create directory for OpenBao files
997
-
mkdir -p /tmp/openbao
998
-
999
-
# Save credentials
1000
-
echo "$ROLE_ID" > /tmp/openbao/role-id
1001
-
echo "$SECRET_ID" > /tmp/openbao/secret-id
1002
-
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
1003
-
```
1004
-
1005
-
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
1006
-
1007
-
```hcl
1008
-
# OpenBao server connection
1009
-
vault {
1010
-
address = "http://localhost:8200"
1011
-
}
1012
-
1013
-
# Auto-Auth using AppRole
1014
-
auto_auth {
1015
-
method "approle" {
1016
-
mount_path = "auth/approle"
1017
-
config = {
1018
-
role_id_file_path = "/tmp/openbao/role-id"
1019
-
secret_id_file_path = "/tmp/openbao/secret-id"
1020
-
}
1021
-
}
1022
-
1023
-
# Optional: write token to file for debugging
1024
-
sink "file" {
1025
-
config = {
1026
-
path = "/tmp/openbao/token"
1027
-
mode = 0640
1028
-
}
1029
-
}
1030
-
}
1031
-
1032
-
# Proxy listener for Spindle
1033
-
listener "tcp" {
1034
-
address = "127.0.0.1:8201"
1035
-
tls_disable = true
1036
-
}
1037
-
1038
-
# Enable API proxy with auto-auth token
1039
-
api_proxy {
1040
-
use_auto_auth_token = true
1041
-
}
1042
-
1043
-
# Enable response caching
1044
-
cache {
1045
-
use_auto_auth_token = true
1046
-
}
1047
-
1048
-
# Logging
1049
-
log_level = "info"
1050
-
```
1051
-
1052
-
#### Start the proxy
1053
-
1054
-
Start OpenBao Proxy:
1055
-
1056
-
```bash
1057
-
bao proxy -config=/tmp/openbao/proxy.hcl
1058
-
```
1059
-
1060
-
The proxy will authenticate with OpenBao and start listening on
1061
-
`127.0.0.1:8201`.
1062
-
1063
-
#### Configure spindle
1064
-
1065
-
Set these environment variables for Spindle:
1066
-
1067
-
```bash
1068
-
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
1069
-
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
1070
-
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
1071
-
```
1072
-
1073
-
On startup, the spindle will now connect to the local proxy,
1074
-
which handles all authentication automatically.
1075
-
1076
-
### Production setup for proxy
1077
-
1078
-
For production, you'll want to run the proxy as a service:
1079
-
1080
-
Place your production configuration in
1081
-
`/etc/openbao/proxy.hcl` with proper TLS settings for the
1082
-
vault connection.
1083
-
1084
-
### Verifying setup
1085
-
1086
-
Test the proxy directly:
1087
-
1088
-
```bash
1089
-
# Check proxy health
1090
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
1091
-
1092
-
# Test token lookup through proxy
1093
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
1094
-
```
1095
-
1096
-
Test OpenBao operations through the server:
1097
-
1098
-
```bash
1099
-
# List all secrets
1100
-
bao kv list spindle/
1101
-
1102
-
# Add a test secret via Spindle API, then check it exists
1103
-
bao kv list spindle/repos/
1104
-
1105
-
# Get a specific secret
1106
-
bao kv get spindle/repos/your_repo_path/SECRET_NAME
1107
-
```
1108
-
1109
-
### How it works
1110
-
1111
-
- Spindle connects to OpenBao Proxy on localhost (typically
1112
-
port 8200 or 8201)
1113
-
- The proxy authenticates with OpenBao using AppRole
1114
-
credentials
1115
-
- All Spindle requests go through the proxy, which injects
1116
-
authentication tokens
1117
-
- Secrets are stored at
1118
-
`spindle/repos/{sanitized_repo_path}/{secret_key}`
1119
-
- Repository paths like `did:plc:alice/myrepo` become
1120
-
`did_plc_alice_myrepo`
1121
-
- The proxy handles all token renewal automatically
1122
-
- Spindle no longer manages tokens or authentication
1123
-
directly
1124
-
1125
-
### Troubleshooting
1126
-
1127
-
**Connection refused**: Check that the OpenBao Proxy is
1128
-
running and listening on the configured address.
1129
-
1130
-
**403 errors**: Verify the AppRole credentials are correct
1131
-
and the policy has the necessary permissions.
1132
-
1133
-
**404 route errors**: The spindle KV mount probably doesn't
1134
-
exist - run the mount creation step again.
1135
-
1136
-
**Proxy authentication failures**: Check the proxy logs and
1137
-
verify the role-id and secret-id files are readable and
1138
-
contain valid credentials.
1139
-
1140
-
**Secret not found after writing**: This can indicate policy
1141
-
permission issues. Verify the policy includes both
1142
-
`spindle/data/*` and `spindle/metadata/*` paths with
1143
-
appropriate capabilities.
1144
-
1145
-
Check proxy logs:
1146
-
1147
-
```bash
1148
-
# If running as systemd service
1149
-
journalctl -u openbao-proxy -f
1150
-
1151
-
# If running directly, check the console output
1152
-
```
1153
-
1154
-
Test AppRole authentication manually:
1155
-
1156
-
```bash
1157
-
bao write auth/approle/login \
1158
-
role_id="$(cat /tmp/openbao/role-id)" \
1159
-
secret_id="$(cat /tmp/openbao/secret-id)"
1160
-
```
1161
-
1162
-
# Migrating knots & spindles
1163
-
1164
-
Sometimes, non-backwards compatible changes are made to the
1165
-
knot/spindle XRPC APIs. If you host a knot or a spindle, you
1166
-
will need to follow this guide to upgrade. Typically, this
1167
-
only requires you to deploy the newest version.
1168
-
1169
-
This document is laid out in reverse-chronological order.
1170
-
Newer migration guides are listed first, and older guides
1171
-
are further down the page.
1172
-
1173
-
## Upgrading from v1.8.x
1174
-
1175
-
After v1.8.2, the HTTP API for knot and spindles have been
1176
-
deprecated and replaced with XRPC. Repositories on outdated
1177
-
knots will not be viewable from the appview. Upgrading is
1178
-
straightforward however.
1179
-
1180
-
For knots:
1181
-
1182
-
- Upgrade to latest tag (v1.9.0 or above)
1183
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1184
-
hit the "retry" button to verify your knot
1185
-
1186
-
For spindles:
1187
-
1188
-
- Upgrade to latest tag (v1.9.0 or above)
1189
-
- Head to the [spindle
1190
-
dashboard](https://tangled.org/settings/spindles) and hit the
1191
-
"retry" button to verify your spindle
1192
-
1193
-
## Upgrading from v1.7.x
1194
-
1195
-
After v1.7.0, knot secrets have been deprecated. You no
1196
-
longer need a secret from the appview to run a knot. All
1197
-
authorized commands to knots are managed via [Inter-Service
1198
-
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
1199
-
Knots will be read-only until upgraded.
1200
-
1201
-
Upgrading is quite easy, in essence:
1202
-
1203
-
- `KNOT_SERVER_SECRET` is no more, you can remove this
1204
-
environment variable entirely
1205
-
- `KNOT_SERVER_OWNER` is now required on boot, set this to
1206
-
your DID. You can find your DID in the
1207
-
[settings](https://tangled.org/settings) page.
1208
-
- Restart your knot once you have replaced the environment
1209
-
variable
1210
-
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1211
-
hit the "retry" button to verify your knot. This simply
1212
-
writes a `sh.tangled.knot` record to your PDS.
1213
-
1214
-
If you use the nix module, simply bump the flake to the
1215
-
latest revision, and change your config block like so:
1216
-
1217
-
```diff
1218
-
services.tangled.knot = {
1219
-
enable = true;
1220
-
server = {
1221
-
- secretFile = /path/to/secret;
1222
-
+ owner = "did:plc:foo";
1223
-
};
1224
-
};
1225
-
```
1226
-
1227
-
# Hacking on Tangled
1228
-
1229
-
We highly recommend [installing
1230
-
nix](https://nixos.org/download/) (the package manager)
1231
-
before working on the codebase. The nix flake provides a lot
1232
-
of helpers to get started and most importantly, builds and
1233
-
dev shells are entirely deterministic.
1234
-
1235
-
To set up your dev environment:
1236
-
1237
-
```bash
1238
-
nix develop
1239
-
```
1240
-
1241
-
Non-nix users can look at the `devShell` attribute in the
1242
-
`flake.nix` file to determine necessary dependencies.
1243
-
1244
-
## Running the appview
1245
-
1246
-
The nix flake also exposes a few `app` attributes (run `nix
1247
-
flake show` to see a full list of what the flake provides),
1248
-
one of the apps runs the appview with the `air`
1249
-
live-reloader:
1250
-
1251
-
```bash
1252
-
TANGLED_DEV=true nix run .#watch-appview
1253
-
1254
-
# TANGLED_DB_PATH might be of interest to point to
1255
-
# different sqlite DBs
1256
-
1257
-
# in a separate shell, you can live-reload tailwind
1258
-
nix run .#watch-tailwind
1259
-
```
1260
-
1261
-
To authenticate with the appview, you will need redis and
1262
-
OAUTH JWKs to be setup:
1263
-
1264
-
```
1265
-
# oauth jwks should already be setup by the nix devshell:
1266
-
echo $TANGLED_OAUTH_CLIENT_SECRET
1267
-
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
1268
-
1269
-
echo $TANGLED_OAUTH_CLIENT_KID
1270
-
1761667908
1271
-
1272
-
# if not, you can set it up yourself:
1273
-
goat key generate -t P-256
1274
-
Key Type: P-256 / secp256r1 / ES256 private key
1275
-
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
1276
-
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
1277
-
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
1278
-
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
1279
-
1280
-
# the secret key from above
1281
-
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
1282
-
1283
-
# run redis in at a new shell to store oauth sessions
1284
-
redis-server
1285
-
```
1286
-
1287
-
## Running knots and spindles
1288
-
1289
-
An end-to-end knot setup requires setting up a machine with
1290
-
`sshd`, `AuthorizedKeysCommand`, and git user, which is
1291
-
quite cumbersome. So the nix flake provides a
1292
-
`nixosConfiguration` to do so.
1293
-
1294
-
<details>
1295
-
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
1296
-
1297
-
In order to build Tangled's dev VM on macOS, you will
1298
-
first need to set up a Linux Nix builder. The recommended
1299
-
way to do so is to run a [`darwin.linux-builder`
1300
-
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
1301
-
and to register it in `nix.conf` as a builder for Linux
1302
-
with the same architecture as your Mac (`linux-aarch64` if
1303
-
you are using Apple Silicon).
1304
-
1305
-
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
1306
-
> the tangled repo so that it doesn't conflict with the other VM. For example,
1307
-
> you can do
1308
-
>
1309
-
> ```shell
1310
-
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
1311
-
> ```
1312
-
>
1313
-
> to store the builder VM in a temporary dir.
1314
-
>
1315
-
> You should read and follow [all the other intructions][darwin builder vm] to
1316
-
> avoid subtle problems.
1317
-
1318
-
Alternatively, you can use any other method to set up a
1319
-
Linux machine with `nix` installed that you can `sudo ssh`
1320
-
into (in other words, root user on your Mac has to be able
1321
-
to ssh into the Linux machine without entering a password)
1322
-
and that has the same architecture as your Mac. See
1323
-
[remote builder
1324
-
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
1325
-
for how to register such a builder in `nix.conf`.
1326
-
1327
-
> WARNING: If you'd like to use
1328
-
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
1329
-
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
1330
-
> ssh` works can be tricky. It seems to be [possible with
1331
-
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
1332
-
1333
-
</details>
1334
-
1335
-
To begin, grab your DID from http://localhost:3000/settings.
1336
-
Then, set `TANGLED_VM_KNOT_OWNER` and
1337
-
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
1338
-
lightweight NixOS VM like so:
1339
-
1340
-
```bash
1341
-
nix run --impure .#vm
1342
-
1343
-
# type `poweroff` at the shell to exit the VM
1344
-
```
1345
-
1346
-
This starts a knot on port 6444, a spindle on port 6555
1347
-
with `ssh` exposed on port 2222.
1348
-
1349
-
Once the services are running, head to
1350
-
http://localhost:3000/settings/knots and hit verify. It should
1351
-
verify the ownership of the services instantly if everything
1352
-
went smoothly.
1353
-
1354
-
You can push repositories to this VM with this ssh config
1355
-
block on your main machine:
1356
-
1357
-
```bash
1358
-
Host nixos-shell
1359
-
Hostname localhost
1360
-
Port 2222
1361
-
User git
1362
-
IdentityFile ~/.ssh/my_tangled_key
1363
-
```
1364
-
1365
-
Set up a remote called `local-dev` on a git repo:
1366
-
1367
-
```bash
1368
-
git remote add local-dev git@nixos-shell:user/repo
1369
-
git push local-dev main
1370
-
```
1371
-
1372
-
The above VM should already be running a spindle on
1373
-
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
1374
-
hit verify. You can then configure each repository to use
1375
-
this spindle and run CI jobs.
1376
-
1377
-
Of interest when debugging spindles:
1378
-
1379
-
```
1380
-
# service logs from journald:
1381
-
journalctl -xeu spindle
1382
-
1383
-
# CI job logs from disk:
1384
-
ls /var/log/spindle
1385
-
1386
-
# debugging spindle db:
1387
-
sqlite3 /var/lib/spindle/spindle.db
1388
-
1389
-
# litecli has a nicer REPL interface:
1390
-
litecli /var/lib/spindle/spindle.db
1391
-
```
1392
-
1393
-
If for any reason you wish to disable either one of the
1394
-
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
1395
-
`services.tangled.spindle.enable` (or
1396
-
`services.tangled.knot.enable`) to `false`.
1397
-
1398
-
# Contribution guide
1399
-
1400
-
## Commit guidelines
1401
-
1402
-
We follow a commit style similar to the Go project. Please keep commits:
1403
-
1404
-
* **atomic**: each commit should represent one logical change
1405
-
* **descriptive**: the commit message should clearly describe what the
1406
-
change does and why it's needed
1407
-
1408
-
### Message format
1409
-
1410
-
```
1411
-
<service/top-level directory>/<affected package/directory>: <short summary of change>
1412
-
1413
-
Optional longer description can go here, if necessary. Explain what the
1414
-
change does and why, especially if not obvious. Reference relevant
1415
-
issues or PRs when applicable. These can be links for now since we don't
1416
-
auto-link issues/PRs yet.
1417
-
```
1418
-
1419
-
Here are some examples:
1420
-
1421
-
```
1422
-
appview/state: fix token expiry check in middleware
1423
-
1424
-
The previous check did not account for clock drift, leading to premature
1425
-
token invalidation.
1426
-
```
1427
-
1428
-
```
1429
-
knotserver/git/service: improve error checking in upload-pack
1430
-
```
1431
-
1432
-
1433
-
### General notes
1434
-
1435
-
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
1436
-
using `git am`. At present, there is no squashing -- so please author
1437
-
your commits as they would appear on `master`, following the above
1438
-
guidelines.
1439
-
- If there is a lot of nesting, for example "appview:
1440
-
pages/templates/repo/fragments: ...", these can be truncated down to
1441
-
just "appview: repo/fragments: ...". If the change affects a lot of
1442
-
subdirectories, you may abbreviate to just the top-level names, e.g.
1443
-
"appview: ..." or "knotserver: ...".
1444
-
- Keep commits lowercased with no trailing period.
1445
-
- Use the imperative mood in the summary line (e.g., "fix bug" not
1446
-
"fixed bug" or "fixes bug").
1447
-
- Try to keep the summary line under 72 characters, but we aren't too
1448
-
fussed about this.
1449
-
- Follow the same formatting for PR titles if filled manually.
1450
-
- Don't include unrelated changes in the same commit.
1451
-
- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
1452
-
before submitting if necessary.
1453
-
1454
-
## Code formatting
1455
-
1456
-
We use a variety of tools to format our code, and multiplex them with
1457
-
[`treefmt`](https://treefmt.com): all you need to do to format your changes
1458
-
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
1459
-
1460
-
## Proposals for bigger changes
1461
-
1462
-
Small fixes like typos, minor bugs, or trivial refactors can be
1463
-
submitted directly as PRs.
1464
-
1465
-
For larger changesโespecially those introducing new features, significant
1466
-
refactoring, or altering system behaviorโplease open a proposal first. This
1467
-
helps us evaluate the scope, design, and potential impact before implementation.
1468
-
1469
-
Create a new issue titled:
1470
-
1471
-
```
1472
-
proposal: <affected scope>: <summary of change>
1473
-
```
1474
-
1475
-
In the description, explain:
1476
-
1477
-
- What the change is
1478
-
- Why it's needed
1479
-
- How you plan to implement it (roughly)
1480
-
- Any open questions or tradeoffs
1481
-
1482
-
We'll use the issue thread to discuss and refine the idea before moving
1483
-
forward.
1484
-
1485
-
## Developer certificate of origin (DCO)
1486
-
1487
-
We require all contributors to certify that they have the right to
1488
-
submit the code they're contributing. To do this, we follow the
1489
-
[Developer Certificate of Origin
1490
-
(DCO)](https://developercertificate.org/).
1491
-
1492
-
By signing your commits, you're stating that the contribution is your
1493
-
own work, or that you have the right to submit it under the project's
1494
-
license. This helps us keep things clean and legally sound.
1495
-
1496
-
To sign your commit, just add the `-s` flag when committing:
1497
-
1498
-
```sh
1499
-
git commit -s -m "your commit message"
1500
-
```
1501
-
1502
-
This appends a line like:
1503
-
1504
-
```
1505
-
Signed-off-by: Your Name <your.email@example.com>
1506
-
```
1507
-
1508
-
We won't merge commits if they aren't signed off. If you forget, you can
1509
-
amend the last commit like this:
1510
-
1511
-
```sh
1512
-
git commit --amend -s
1513
-
```
1514
-
1515
-
If you're submitting a PR with multiple commits, make sure each one is
1516
-
signed.
1517
-
1518
-
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
1519
-
to make it sign off commits in the tangled repo:
1520
-
1521
-
```shell
1522
-
# Safety check, should say "No matching config key..."
1523
-
jj config list templates.commit_trailers
1524
-
# The command below may need to be adjusted if the command above returned something.
1525
-
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
1526
-
```
1527
-
1528
-
Refer to the [jujutsu
1529
-
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
1530
-
for more information.
+136
docs/contributing.md
+136
docs/contributing.md
···
1
+
# tangled contributing guide
2
+
3
+
## commit guidelines
4
+
5
+
We follow a commit style similar to the Go project. Please keep commits:
6
+
7
+
* **atomic**: each commit should represent one logical change
8
+
* **descriptive**: the commit message should clearly describe what the
9
+
change does and why it's needed
10
+
11
+
### message format
12
+
13
+
```
14
+
<service/top-level directory>/<affected package/directory>: <short summary of change>
15
+
16
+
17
+
Optional longer description can go here, if necessary. Explain what the
18
+
change does and why, especially if not obvious. Reference relevant
19
+
issues or PRs when applicable. These can be links for now since we don't
20
+
auto-link issues/PRs yet.
21
+
```
22
+
23
+
Here are some examples:
24
+
25
+
```
26
+
appview/state: fix token expiry check in middleware
27
+
28
+
The previous check did not account for clock drift, leading to premature
29
+
token invalidation.
30
+
```
31
+
32
+
```
33
+
knotserver/git/service: improve error checking in upload-pack
34
+
```
35
+
36
+
37
+
### general notes
38
+
39
+
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
40
+
using `git am`. At present, there is no squashing -- so please author
41
+
your commits as they would appear on `master`, following the above
42
+
guidelines.
43
+
- If there is a lot of nesting, for example "appview:
44
+
pages/templates/repo/fragments: ...", these can be truncated down to
45
+
just "appview: repo/fragments: ...". If the change affects a lot of
46
+
subdirectories, you may abbreviate to just the top-level names, e.g.
47
+
"appview: ..." or "knotserver: ...".
48
+
- Keep commits lowercased with no trailing period.
49
+
- Use the imperative mood in the summary line (e.g., "fix bug" not
50
+
"fixed bug" or "fixes bug").
51
+
- Try to keep the summary line under 72 characters, but we aren't too
52
+
fussed about this.
53
+
- Follow the same formatting for PR titles if filled manually.
54
+
- Don't include unrelated changes in the same commit.
55
+
- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
56
+
before submitting if necessary.
57
+
58
+
## code formatting
59
+
60
+
We use a variety of tools to format our code, and multiplex them with
61
+
[`treefmt`](https://treefmt.com): all you need to do to format your changes
62
+
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
63
+
64
+
## proposals for bigger changes
65
+
66
+
Small fixes like typos, minor bugs, or trivial refactors can be
67
+
submitted directly as PRs.
68
+
69
+
For larger changesโespecially those introducing new features, significant
70
+
refactoring, or altering system behaviorโplease open a proposal first. This
71
+
helps us evaluate the scope, design, and potential impact before implementation.
72
+
73
+
### proposal format
74
+
75
+
Create a new issue titled:
76
+
77
+
```
78
+
proposal: <affected scope>: <summary of change>
79
+
```
80
+
81
+
In the description, explain:
82
+
83
+
- What the change is
84
+
- Why it's needed
85
+
- How you plan to implement it (roughly)
86
+
- Any open questions or tradeoffs
87
+
88
+
We'll use the issue thread to discuss and refine the idea before moving
89
+
forward.
90
+
91
+
## developer certificate of origin (DCO)
92
+
93
+
We require all contributors to certify that they have the right to
94
+
submit the code they're contributing. To do this, we follow the
95
+
[Developer Certificate of Origin
96
+
(DCO)](https://developercertificate.org/).
97
+
98
+
By signing your commits, you're stating that the contribution is your
99
+
own work, or that you have the right to submit it under the project's
100
+
license. This helps us keep things clean and legally sound.
101
+
102
+
To sign your commit, just add the `-s` flag when committing:
103
+
104
+
```sh
105
+
git commit -s -m "your commit message"
106
+
```
107
+
108
+
This appends a line like:
109
+
110
+
```
111
+
Signed-off-by: Your Name <your.email@example.com>
112
+
```
113
+
114
+
We won't merge commits if they aren't signed off. If you forget, you can
115
+
amend the last commit like this:
116
+
117
+
```sh
118
+
git commit --amend -s
119
+
```
120
+
121
+
If you're submitting a PR with multiple commits, make sure each one is
122
+
signed.
123
+
124
+
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125
+
to make it sign off commits in the tangled repo:
126
+
127
+
```shell
128
+
# Safety check, should say "No matching config key..."
129
+
jj config list templates.commit_trailers
130
+
# The command below may need to be adjusted if the command above returned something.
131
+
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
132
+
```
133
+
134
+
Refer to the [jj
135
+
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
136
+
for more information.
+172
docs/hacking.md
+172
docs/hacking.md
···
1
+
# hacking on tangled
2
+
3
+
We highly recommend [installing
4
+
nix](https://nixos.org/download/) (the package manager)
5
+
before working on the codebase. The nix flake provides a lot
6
+
of helpers to get started and most importantly, builds and
7
+
dev shells are entirely deterministic.
8
+
9
+
To set up your dev environment:
10
+
11
+
```bash
12
+
nix develop
13
+
```
14
+
15
+
Non-nix users can look at the `devShell` attribute in the
16
+
`flake.nix` file to determine necessary dependencies.
17
+
18
+
## running the appview
19
+
20
+
The nix flake also exposes a few `app` attributes (run `nix
21
+
flake show` to see a full list of what the flake provides),
22
+
one of the apps runs the appview with the `air`
23
+
live-reloader:
24
+
25
+
```bash
26
+
TANGLED_DEV=true nix run .#watch-appview
27
+
28
+
# TANGLED_DB_PATH might be of interest to point to
29
+
# different sqlite DBs
30
+
31
+
# in a separate shell, you can live-reload tailwind
32
+
nix run .#watch-tailwind
33
+
```
34
+
35
+
To authenticate with the appview, you will need redis and
36
+
OAUTH JWKs to be setup:
37
+
38
+
```
39
+
# oauth jwks should already be setup by the nix devshell:
40
+
echo $TANGLED_OAUTH_CLIENT_SECRET
41
+
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
42
+
43
+
echo $TANGLED_OAUTH_CLIENT_KID
44
+
1761667908
45
+
46
+
# if not, you can set it up yourself:
47
+
goat key generate -t P-256
48
+
Key Type: P-256 / secp256r1 / ES256 private key
49
+
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
50
+
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
51
+
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
52
+
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
53
+
54
+
# the secret key from above
55
+
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
56
+
57
+
# run redis in at a new shell to store oauth sessions
58
+
redis-server
59
+
```
60
+
61
+
## running knots and spindles
62
+
63
+
An end-to-end knot setup requires setting up a machine with
64
+
`sshd`, `AuthorizedKeysCommand`, and git user, which is
65
+
quite cumbersome. So the nix flake provides a
66
+
`nixosConfiguration` to do so.
67
+
68
+
<details>
69
+
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
70
+
71
+
In order to build Tangled's dev VM on macOS, you will
72
+
first need to set up a Linux Nix builder. The recommended
73
+
way to do so is to run a [`darwin.linux-builder`
74
+
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
75
+
and to register it in `nix.conf` as a builder for Linux
76
+
with the same architecture as your Mac (`linux-aarch64` if
77
+
you are using Apple Silicon).
78
+
79
+
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
80
+
> the tangled repo so that it doesn't conflict with the other VM. For example,
81
+
> you can do
82
+
>
83
+
> ```shell
84
+
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
85
+
> ```
86
+
>
87
+
> to store the builder VM in a temporary dir.
88
+
>
89
+
> You should read and follow [all the other intructions][darwin builder vm] to
90
+
> avoid subtle problems.
91
+
92
+
Alternatively, you can use any other method to set up a
93
+
Linux machine with `nix` installed that you can `sudo ssh`
94
+
into (in other words, root user on your Mac has to be able
95
+
to ssh into the Linux machine without entering a password)
96
+
and that has the same architecture as your Mac. See
97
+
[remote builder
98
+
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
99
+
for how to register such a builder in `nix.conf`.
100
+
101
+
> WARNING: If you'd like to use
102
+
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
103
+
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
104
+
> ssh` works can be tricky. It seems to be [possible with
105
+
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
106
+
107
+
</details>
108
+
109
+
To begin, grab your DID from http://localhost:3000/settings.
110
+
Then, set `TANGLED_VM_KNOT_OWNER` and
111
+
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
112
+
lightweight NixOS VM like so:
113
+
114
+
```bash
115
+
nix run --impure .#vm
116
+
117
+
# type `poweroff` at the shell to exit the VM
118
+
```
119
+
120
+
This starts a knot on port 6444, a spindle on port 6555
121
+
with `ssh` exposed on port 2222.
122
+
123
+
Once the services are running, head to
124
+
http://localhost:3000/settings/knots and hit verify. It should
125
+
verify the ownership of the services instantly if everything
126
+
went smoothly.
127
+
128
+
You can push repositories to this VM with this ssh config
129
+
block on your main machine:
130
+
131
+
```bash
132
+
Host nixos-shell
133
+
Hostname localhost
134
+
Port 2222
135
+
User git
136
+
IdentityFile ~/.ssh/my_tangled_key
137
+
```
138
+
139
+
Set up a remote called `local-dev` on a git repo:
140
+
141
+
```bash
142
+
git remote add local-dev git@nixos-shell:user/repo
143
+
git push local-dev main
144
+
```
145
+
146
+
### running a spindle
147
+
148
+
The above VM should already be running a spindle on
149
+
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
150
+
hit verify. You can then configure each repository to use
151
+
this spindle and run CI jobs.
152
+
153
+
Of interest when debugging spindles:
154
+
155
+
```
156
+
# service logs from journald:
157
+
journalctl -xeu spindle
158
+
159
+
# CI job logs from disk:
160
+
ls /var/log/spindle
161
+
162
+
# debugging spindle db:
163
+
sqlite3 /var/lib/spindle/spindle.db
164
+
165
+
# litecli has a nicer REPL interface:
166
+
litecli /var/lib/spindle/spindle.db
167
+
```
168
+
169
+
If for any reason you wish to disable either one of the
170
+
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171
+
`services.tangled.spindle.enable` (or
172
+
`services.tangled.knot.enable`) to `false`.
-93
docs/highlight.theme
-93
docs/highlight.theme
···
1
-
{
2
-
"text-color": null,
3
-
"background-color": null,
4
-
"line-number-color": null,
5
-
"line-number-background-color": null,
6
-
"text-styles": {
7
-
"Annotation": {
8
-
"text-color": null,
9
-
"background-color": null,
10
-
"bold": false,
11
-
"italic": true,
12
-
"underline": false
13
-
},
14
-
"ControlFlow": {
15
-
"text-color": null,
16
-
"background-color": null,
17
-
"bold": true,
18
-
"italic": false,
19
-
"underline": false
20
-
},
21
-
"Error": {
22
-
"text-color": null,
23
-
"background-color": null,
24
-
"bold": true,
25
-
"italic": false,
26
-
"underline": false
27
-
},
28
-
"Alert": {
29
-
"text-color": null,
30
-
"background-color": null,
31
-
"bold": true,
32
-
"italic": false,
33
-
"underline": false
34
-
},
35
-
"Preprocessor": {
36
-
"text-color": null,
37
-
"background-color": null,
38
-
"bold": true,
39
-
"italic": false,
40
-
"underline": false
41
-
},
42
-
"Information": {
43
-
"text-color": null,
44
-
"background-color": null,
45
-
"bold": false,
46
-
"italic": true,
47
-
"underline": false
48
-
},
49
-
"Warning": {
50
-
"text-color": null,
51
-
"background-color": null,
52
-
"bold": false,
53
-
"italic": true,
54
-
"underline": false
55
-
},
56
-
"Documentation": {
57
-
"text-color": null,
58
-
"background-color": null,
59
-
"bold": false,
60
-
"italic": true,
61
-
"underline": false
62
-
},
63
-
"DataType": {
64
-
"text-color": "#8f4e8b",
65
-
"background-color": null,
66
-
"bold": false,
67
-
"italic": false,
68
-
"underline": false
69
-
},
70
-
"Comment": {
71
-
"text-color": null,
72
-
"background-color": null,
73
-
"bold": false,
74
-
"italic": true,
75
-
"underline": false
76
-
},
77
-
"CommentVar": {
78
-
"text-color": null,
79
-
"background-color": null,
80
-
"bold": false,
81
-
"italic": true,
82
-
"underline": false
83
-
},
84
-
"Keyword": {
85
-
"text-color": null,
86
-
"background-color": null,
87
-
"bold": true,
88
-
"italic": false,
89
-
"underline": false
90
-
}
91
-
}
92
-
}
93
-
+214
docs/knot-hosting.md
+214
docs/knot-hosting.md
···
1
+
# knot self-hosting guide
2
+
3
+
So you want to run your own knot server? Great! Here are a few prerequisites:
4
+
5
+
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
6
+
2. A (sub)domain name. People generally use `knot.example.com`.
7
+
3. A valid SSL certificate for your domain.
8
+
9
+
There's a couple of ways to get started:
10
+
* NixOS: refer to
11
+
[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
12
+
* Docker: Documented at
13
+
[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
14
+
(community maintained: support is not guaranteed!)
15
+
* Manual: Documented below.
16
+
17
+
## manual setup
18
+
19
+
First, clone this repository:
20
+
21
+
```
22
+
git clone https://tangled.org/@tangled.org/core
23
+
```
24
+
25
+
Then, build the `knot` CLI. This is the knot administration and operation tool.
26
+
For the purpose of this guide, we're only concerned with these subcommands:
27
+
28
+
* `knot server`: the main knot server process, typically run as a
29
+
supervised service
30
+
* `knot guard`: handles role-based access control for git over SSH
31
+
(you'll never have to run this yourself)
32
+
* `knot keys`: fetches SSH keys associated with your knot; we'll use
33
+
this to generate the SSH `AuthorizedKeysCommand`
34
+
35
+
```
36
+
cd core
37
+
export CGO_ENABLED=1
38
+
go build -o knot ./cmd/knot
39
+
```
40
+
41
+
Next, move the `knot` binary to a location owned by `root` --
42
+
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
43
+
44
+
```
45
+
sudo mv knot /usr/local/bin/knot
46
+
sudo chown root:root /usr/local/bin/knot
47
+
```
48
+
49
+
This is necessary because SSH `AuthorizedKeysCommand` requires [really
50
+
specific permissions](https://stackoverflow.com/a/27638306). The
51
+
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
52
+
retrieve a user's public SSH keys dynamically for authentication. Let's
53
+
set that up.
54
+
55
+
```
56
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
57
+
Match User git
58
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
59
+
AuthorizedKeysCommandUser nobody
60
+
EOF
61
+
```
62
+
63
+
Then, reload `sshd`:
64
+
65
+
```
66
+
sudo systemctl reload ssh
67
+
```
68
+
69
+
Next, create the `git` user. We'll use the `git` user's home directory
70
+
to store repositories:
71
+
72
+
```
73
+
sudo adduser git
74
+
```
75
+
76
+
Create `/home/git/.knot.env` with the following, updating the values as
77
+
necessary. The `KNOT_SERVER_OWNER` should be set to your
78
+
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
79
+
80
+
```
81
+
KNOT_REPO_SCAN_PATH=/home/git
82
+
KNOT_SERVER_HOSTNAME=knot.example.com
83
+
APPVIEW_ENDPOINT=https://tangled.sh
84
+
KNOT_SERVER_OWNER=did:plc:foobar
85
+
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
86
+
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
87
+
```
88
+
89
+
If you run a Linux distribution that uses systemd, you can use the provided
90
+
service file to run the server. Copy
91
+
[`knotserver.service`](/systemd/knotserver.service)
92
+
to `/etc/systemd/system/`. Then, run:
93
+
94
+
```
95
+
systemctl enable knotserver
96
+
systemctl start knotserver
97
+
```
98
+
99
+
The last step is to configure a reverse proxy like Nginx or Caddy to front your
100
+
knot. Here's an example configuration for Nginx:
101
+
102
+
```
103
+
server {
104
+
listen 80;
105
+
listen [::]:80;
106
+
server_name knot.example.com;
107
+
108
+
location / {
109
+
proxy_pass http://localhost:5555;
110
+
proxy_set_header Host $host;
111
+
proxy_set_header X-Real-IP $remote_addr;
112
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
113
+
proxy_set_header X-Forwarded-Proto $scheme;
114
+
}
115
+
116
+
# wss endpoint for git events
117
+
location /events {
118
+
proxy_set_header X-Forwarded-For $remote_addr;
119
+
proxy_set_header Host $http_host;
120
+
proxy_set_header Upgrade websocket;
121
+
proxy_set_header Connection Upgrade;
122
+
proxy_pass http://localhost:5555;
123
+
}
124
+
# additional config for SSL/TLS go here.
125
+
}
126
+
127
+
```
128
+
129
+
Remember to use Let's Encrypt or similar to procure a certificate for your
130
+
knot domain.
131
+
132
+
You should now have a running knot server! You can finalize
133
+
your registration by hitting the `verify` button on the
134
+
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
135
+
a record on your PDS to announce the existence of the knot.
136
+
137
+
### custom paths
138
+
139
+
(This section applies to manual setup only. Docker users should edit the mounts
140
+
in `docker-compose.yml` instead.)
141
+
142
+
Right now, the database and repositories of your knot lives in `/home/git`. You
143
+
can move these paths if you'd like to store them in another folder. Be careful
144
+
when adjusting these paths:
145
+
146
+
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
147
+
any possible side effects. Remember to restart it once you're done.
148
+
* Make backups before moving in case something goes wrong.
149
+
* Make sure the `git` user can read and write from the new paths.
150
+
151
+
#### database
152
+
153
+
As an example, let's say the current database is at `/home/git/knotserver.db`,
154
+
and we want to move it to `/home/git/database/knotserver.db`.
155
+
156
+
Copy the current database to the new location. Make sure to copy the `.db-shm`
157
+
and `.db-wal` files if they exist.
158
+
159
+
```
160
+
mkdir /home/git/database
161
+
cp /home/git/knotserver.db* /home/git/database
162
+
```
163
+
164
+
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
165
+
the new file path (_not_ the directory):
166
+
167
+
```
168
+
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
169
+
```
170
+
171
+
#### repositories
172
+
173
+
As an example, let's say the repositories are currently in `/home/git`, and we
174
+
want to move them into `/home/git/repositories`.
175
+
176
+
Create the new folder, then move the existing repositories (if there are any):
177
+
178
+
```
179
+
mkdir /home/git/repositories
180
+
# move all DIDs into the new folder; these will vary for you!
181
+
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
182
+
```
183
+
184
+
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
185
+
to the new directory:
186
+
187
+
```
188
+
KNOT_REPO_SCAN_PATH=/home/git/repositories
189
+
```
190
+
191
+
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
192
+
repository path:
193
+
194
+
```
195
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
196
+
Match User git
197
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
198
+
AuthorizedKeysCommandUser nobody
199
+
EOF
200
+
```
201
+
202
+
Make sure to restart your SSH server!
203
+
204
+
#### MOTD (message of the day)
205
+
206
+
To configure the MOTD used ("Welcome to this knot!" by default), edit the
207
+
`/home/git/motd` file:
208
+
209
+
```
210
+
printf "Hi from this knot!\n" > /home/git/motd
211
+
```
212
+
213
+
Note that you should add a newline at the end if setting a non-empty message
214
+
since the knot won't do this for you.
+59
docs/migrations.md
+59
docs/migrations.md
···
1
+
# Migrations
2
+
3
+
This document is laid out in reverse-chronological order.
4
+
Newer migration guides are listed first, and older guides
5
+
are further down the page.
6
+
7
+
## Upgrading from v1.8.x
8
+
9
+
After v1.8.2, the HTTP API for knot and spindles have been
10
+
deprecated and replaced with XRPC. Repositories on outdated
11
+
knots will not be viewable from the appview. Upgrading is
12
+
straightforward however.
13
+
14
+
For knots:
15
+
16
+
- Upgrade to latest tag (v1.9.0 or above)
17
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
18
+
hit the "retry" button to verify your knot
19
+
20
+
For spindles:
21
+
22
+
- Upgrade to latest tag (v1.9.0 or above)
23
+
- Head to the [spindle
24
+
dashboard](https://tangled.org/settings/spindles) and hit the
25
+
"retry" button to verify your spindle
26
+
27
+
## Upgrading from v1.7.x
28
+
29
+
After v1.7.0, knot secrets have been deprecated. You no
30
+
longer need a secret from the appview to run a knot. All
31
+
authorized commands to knots are managed via [Inter-Service
32
+
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
33
+
Knots will be read-only until upgraded.
34
+
35
+
Upgrading is quite easy, in essence:
36
+
37
+
- `KNOT_SERVER_SECRET` is no more, you can remove this
38
+
environment variable entirely
39
+
- `KNOT_SERVER_OWNER` is now required on boot, set this to
40
+
your DID. You can find your DID in the
41
+
[settings](https://tangled.org/settings) page.
42
+
- Restart your knot once you have replaced the environment
43
+
variable
44
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
45
+
hit the "retry" button to verify your knot. This simply
46
+
writes a `sh.tangled.knot` record to your PDS.
47
+
48
+
If you use the nix module, simply bump the flake to the
49
+
latest revision, and change your config block like so:
50
+
51
+
```diff
52
+
services.tangled.knot = {
53
+
enable = true;
54
+
server = {
55
+
- secretFile = /path/to/secret;
56
+
+ owner = "did:plc:foo";
57
+
};
58
+
};
59
+
```
+25
docs/spindle/architecture.md
+25
docs/spindle/architecture.md
···
1
+
# spindle architecture
2
+
3
+
Spindle is a small CI runner service. Here's a high level overview of how it operates:
4
+
5
+
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
6
+
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
7
+
* when a new repo record comes through (typically when you add a spindle to a
8
+
repo from the settings), spindle then resolves the underlying knot and
9
+
subscribes to repo events (see:
10
+
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
11
+
* the spindle engine then handles execution of the pipeline, with results and
12
+
logs beamed on the spindle event stream over wss
13
+
14
+
### the engine
15
+
16
+
At present, the only supported backend is Docker (and Podman, if Docker
17
+
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
18
+
executes each step in the pipeline in a fresh container, with state persisted
19
+
across steps within the `/tangled/workspace` directory.
20
+
21
+
The base image for the container is constructed on the fly using
22
+
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
23
+
used packages.
24
+
25
+
The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
+52
docs/spindle/hosting.md
+52
docs/spindle/hosting.md
···
1
+
# spindle self-hosting guide
2
+
3
+
## prerequisites
4
+
5
+
* Go
6
+
* Docker (the only supported backend currently)
7
+
8
+
## configuration
9
+
10
+
Spindle is configured using environment variables. The following environment variables are available:
11
+
12
+
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
13
+
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
14
+
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
15
+
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
16
+
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
17
+
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
18
+
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
19
+
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
20
+
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
21
+
22
+
## running spindle
23
+
24
+
1. **Set the environment variables.** For example:
25
+
26
+
```shell
27
+
export SPINDLE_SERVER_HOSTNAME="your-hostname"
28
+
export SPINDLE_SERVER_OWNER="your-did"
29
+
```
30
+
31
+
2. **Build the Spindle binary.**
32
+
33
+
```shell
34
+
cd core
35
+
go mod download
36
+
go build -o cmd/spindle/spindle cmd/spindle/main.go
37
+
```
38
+
39
+
3. **Create the log directory.**
40
+
41
+
```shell
42
+
sudo mkdir -p /var/log/spindle
43
+
sudo chown $USER:$USER -R /var/log/spindle
44
+
```
45
+
46
+
4. **Run the Spindle binary.**
47
+
48
+
```shell
49
+
./cmd/spindle/spindle
50
+
```
51
+
52
+
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
+285
docs/spindle/openbao.md
+285
docs/spindle/openbao.md
···
1
+
# spindle secrets with openbao
2
+
3
+
This document covers setting up Spindle to use OpenBao for secrets
4
+
management via OpenBao Proxy instead of the default SQLite backend.
5
+
6
+
## overview
7
+
8
+
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
9
+
authentication automatically using AppRole credentials, while Spindle
10
+
connects to the local proxy instead of directly to the OpenBao server.
11
+
12
+
This approach provides better security, automatic token renewal, and
13
+
simplified application code.
14
+
15
+
## installation
16
+
17
+
Install OpenBao from nixpkgs:
18
+
19
+
```bash
20
+
nix shell nixpkgs#openbao # for a local server
21
+
```
22
+
23
+
## setup
24
+
25
+
The setup process can is documented for both local development and production.
26
+
27
+
### local development
28
+
29
+
Start OpenBao in dev mode:
30
+
31
+
```bash
32
+
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
33
+
```
34
+
35
+
This starts OpenBao on `http://localhost:8201` with a root token.
36
+
37
+
Set up environment for bao CLI:
38
+
39
+
```bash
40
+
export BAO_ADDR=http://localhost:8200
41
+
export BAO_TOKEN=root
42
+
```
43
+
44
+
### production
45
+
46
+
You would typically use a systemd service with a configuration file. Refer to
47
+
[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
48
+
achieved using Nix.
49
+
50
+
Then, initialize the bao server:
51
+
```bash
52
+
bao operator init -key-shares=1 -key-threshold=1
53
+
```
54
+
55
+
This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
56
+
```bash
57
+
bao operator unseal <unseal_key>
58
+
```
59
+
60
+
All steps below remain the same across both dev and production setups.
61
+
62
+
### configure openbao server
63
+
64
+
Create the spindle KV mount:
65
+
66
+
```bash
67
+
bao secrets enable -path=spindle -version=2 kv
68
+
```
69
+
70
+
Set up AppRole authentication and policy:
71
+
72
+
Create a policy file `spindle-policy.hcl`:
73
+
74
+
```hcl
75
+
# Full access to spindle KV v2 data
76
+
path "spindle/data/*" {
77
+
capabilities = ["create", "read", "update", "delete"]
78
+
}
79
+
80
+
# Access to metadata for listing and management
81
+
path "spindle/metadata/*" {
82
+
capabilities = ["list", "read", "delete", "update"]
83
+
}
84
+
85
+
# Allow listing at root level
86
+
path "spindle/" {
87
+
capabilities = ["list"]
88
+
}
89
+
90
+
# Required for connection testing and health checks
91
+
path "auth/token/lookup-self" {
92
+
capabilities = ["read"]
93
+
}
94
+
```
95
+
96
+
Apply the policy and create an AppRole:
97
+
98
+
```bash
99
+
bao policy write spindle-policy spindle-policy.hcl
100
+
bao auth enable approle
101
+
bao write auth/approle/role/spindle \
102
+
token_policies="spindle-policy" \
103
+
token_ttl=1h \
104
+
token_max_ttl=4h \
105
+
bind_secret_id=true \
106
+
secret_id_ttl=0 \
107
+
secret_id_num_uses=0
108
+
```
109
+
110
+
Get the credentials:
111
+
112
+
```bash
113
+
# Get role ID (static)
114
+
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115
+
116
+
# Generate secret ID
117
+
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118
+
119
+
echo "Role ID: $ROLE_ID"
120
+
echo "Secret ID: $SECRET_ID"
121
+
```
122
+
123
+
### create proxy configuration
124
+
125
+
Create the credential files:
126
+
127
+
```bash
128
+
# Create directory for OpenBao files
129
+
mkdir -p /tmp/openbao
130
+
131
+
# Save credentials
132
+
echo "$ROLE_ID" > /tmp/openbao/role-id
133
+
echo "$SECRET_ID" > /tmp/openbao/secret-id
134
+
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135
+
```
136
+
137
+
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138
+
139
+
```hcl
140
+
# OpenBao server connection
141
+
vault {
142
+
address = "http://localhost:8200"
143
+
}
144
+
145
+
# Auto-Auth using AppRole
146
+
auto_auth {
147
+
method "approle" {
148
+
mount_path = "auth/approle"
149
+
config = {
150
+
role_id_file_path = "/tmp/openbao/role-id"
151
+
secret_id_file_path = "/tmp/openbao/secret-id"
152
+
}
153
+
}
154
+
155
+
# Optional: write token to file for debugging
156
+
sink "file" {
157
+
config = {
158
+
path = "/tmp/openbao/token"
159
+
mode = 0640
160
+
}
161
+
}
162
+
}
163
+
164
+
# Proxy listener for Spindle
165
+
listener "tcp" {
166
+
address = "127.0.0.1:8201"
167
+
tls_disable = true
168
+
}
169
+
170
+
# Enable API proxy with auto-auth token
171
+
api_proxy {
172
+
use_auto_auth_token = true
173
+
}
174
+
175
+
# Enable response caching
176
+
cache {
177
+
use_auto_auth_token = true
178
+
}
179
+
180
+
# Logging
181
+
log_level = "info"
182
+
```
183
+
184
+
### start the proxy
185
+
186
+
Start OpenBao Proxy:
187
+
188
+
```bash
189
+
bao proxy -config=/tmp/openbao/proxy.hcl
190
+
```
191
+
192
+
The proxy will authenticate with OpenBao and start listening on
193
+
`127.0.0.1:8201`.
194
+
195
+
### configure spindle
196
+
197
+
Set these environment variables for Spindle:
198
+
199
+
```bash
200
+
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201
+
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
202
+
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203
+
```
204
+
205
+
Start Spindle:
206
+
207
+
Spindle will now connect to the local proxy, which handles all
208
+
authentication automatically.
209
+
210
+
## production setup for proxy
211
+
212
+
For production, you'll want to run the proxy as a service:
213
+
214
+
Place your production configuration in `/etc/openbao/proxy.hcl` with
215
+
proper TLS settings for the vault connection.
216
+
217
+
## verifying setup
218
+
219
+
Test the proxy directly:
220
+
221
+
```bash
222
+
# Check proxy health
223
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224
+
225
+
# Test token lookup through proxy
226
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227
+
```
228
+
229
+
Test OpenBao operations through the server:
230
+
231
+
```bash
232
+
# List all secrets
233
+
bao kv list spindle/
234
+
235
+
# Add a test secret via Spindle API, then check it exists
236
+
bao kv list spindle/repos/
237
+
238
+
# Get a specific secret
239
+
bao kv get spindle/repos/your_repo_path/SECRET_NAME
240
+
```
241
+
242
+
## how it works
243
+
244
+
- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245
+
- The proxy authenticates with OpenBao using AppRole credentials
246
+
- All Spindle requests go through the proxy, which injects authentication tokens
247
+
- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248
+
- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249
+
- The proxy handles all token renewal automatically
250
+
- Spindle no longer manages tokens or authentication directly
251
+
252
+
## troubleshooting
253
+
254
+
**Connection refused**: Check that the OpenBao Proxy is running and
255
+
listening on the configured address.
256
+
257
+
**403 errors**: Verify the AppRole credentials are correct and the policy
258
+
has the necessary permissions.
259
+
260
+
**404 route errors**: The spindle KV mount probably doesn't exist - run
261
+
the mount creation step again.
262
+
263
+
**Proxy authentication failures**: Check the proxy logs and verify the
264
+
role-id and secret-id files are readable and contain valid credentials.
265
+
266
+
**Secret not found after writing**: This can indicate policy permission
267
+
issues. Verify the policy includes both `spindle/data/*` and
268
+
`spindle/metadata/*` paths with appropriate capabilities.
269
+
270
+
Check proxy logs:
271
+
272
+
```bash
273
+
# If running as systemd service
274
+
journalctl -u openbao-proxy -f
275
+
276
+
# If running directly, check the console output
277
+
```
278
+
279
+
Test AppRole authentication manually:
280
+
281
+
```bash
282
+
bao write auth/approle/login \
283
+
role_id="$(cat /tmp/openbao/role-id)" \
284
+
secret_id="$(cat /tmp/openbao/secret-id)"
285
+
```
+183
docs/spindle/pipeline.md
+183
docs/spindle/pipeline.md
···
1
+
# spindle pipelines
2
+
3
+
Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
4
+
5
+
The fields are:
6
+
7
+
- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
8
+
- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
9
+
- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
10
+
- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
11
+
- [Environment](#environment): An **optional** field that allows you to define environment variables.
12
+
- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
13
+
14
+
## Trigger
15
+
16
+
The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
17
+
18
+
- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
19
+
- `push`: The workflow should run every time a commit is pushed to the repository.
20
+
- `pull_request`: The workflow should run every time a pull request is made or updated.
21
+
- `manual`: The workflow can be triggered manually.
22
+
- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
23
+
- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
24
+
25
+
For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
26
+
27
+
```yaml
28
+
when:
29
+
- event: ["push", "manual"]
30
+
branch: ["main", "develop"]
31
+
- event: ["pull_request"]
32
+
branch: ["main"]
33
+
```
34
+
35
+
You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
36
+
37
+
```yaml
38
+
when:
39
+
- event: ["push"]
40
+
tag: ["v*"]
41
+
```
42
+
43
+
You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
44
+
45
+
```yaml
46
+
when:
47
+
- event: ["push"]
48
+
branch: ["main", "release-*"]
49
+
tag: ["v*", "stable"]
50
+
```
51
+
52
+
## Engine
53
+
54
+
Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
55
+
56
+
- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
57
+
58
+
Example:
59
+
60
+
```yaml
61
+
engine: "nixery"
62
+
```
63
+
64
+
## Clone options
65
+
66
+
When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
67
+
68
+
- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
69
+
- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
70
+
- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
71
+
72
+
The default settings are:
73
+
74
+
```yaml
75
+
clone:
76
+
skip: false
77
+
depth: 1
78
+
submodules: false
79
+
```
80
+
81
+
## Dependencies
82
+
83
+
Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
84
+
85
+
Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
86
+
87
+
```yaml
88
+
dependencies:
89
+
# nixpkgs
90
+
nixpkgs:
91
+
- nodejs
92
+
- go
93
+
# custom registry
94
+
git+https://tangled.org/@example.com/my_pkg:
95
+
- my_pkg
96
+
```
97
+
98
+
Now these dependencies are available to use in your workflow!
99
+
100
+
## Environment
101
+
102
+
The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103
+
104
+
Example:
105
+
106
+
```yaml
107
+
environment:
108
+
GOOS: "linux"
109
+
GOARCH: "arm64"
110
+
NODE_ENV: "production"
111
+
MY_ENV_VAR: "MY_ENV_VALUE"
112
+
```
113
+
114
+
## Steps
115
+
116
+
The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
117
+
118
+
- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
119
+
- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
120
+
- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
121
+
122
+
Example:
123
+
124
+
```yaml
125
+
steps:
126
+
- name: "Build backend"
127
+
command: "go build"
128
+
environment:
129
+
GOOS: "darwin"
130
+
GOARCH: "arm64"
131
+
- name: "Build frontend"
132
+
command: "npm run build"
133
+
environment:
134
+
NODE_ENV: "production"
135
+
```
136
+
137
+
## Complete workflow
138
+
139
+
```yaml
140
+
# .tangled/workflows/build.yml
141
+
142
+
when:
143
+
- event: ["push", "manual"]
144
+
branch: ["main", "develop"]
145
+
- event: ["pull_request"]
146
+
branch: ["main"]
147
+
148
+
engine: "nixery"
149
+
150
+
# using the default values
151
+
clone:
152
+
skip: false
153
+
depth: 1
154
+
submodules: false
155
+
156
+
dependencies:
157
+
# nixpkgs
158
+
nixpkgs:
159
+
- nodejs
160
+
- go
161
+
# custom registry
162
+
git+https://tangled.org/@example.com/my_pkg:
163
+
- my_pkg
164
+
165
+
environment:
166
+
GOOS: "linux"
167
+
GOARCH: "arm64"
168
+
NODE_ENV: "production"
169
+
MY_ENV_VAR: "MY_ENV_VALUE"
170
+
171
+
steps:
172
+
- name: "Build backend"
173
+
command: "go build"
174
+
environment:
175
+
GOOS: "darwin"
176
+
GOARCH: "arm64"
177
+
- name: "Build frontend"
178
+
command: "npm run build"
179
+
environment:
180
+
NODE_ENV: "production"
181
+
```
182
+
183
+
If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
-101
docs/styles.css
-101
docs/styles.css
···
1
-
svg {
2
-
width: 16px;
3
-
height: 16px;
4
-
}
5
-
6
-
:root {
7
-
--syntax-alert: #d20f39;
8
-
--syntax-annotation: #fe640b;
9
-
--syntax-attribute: #df8e1d;
10
-
--syntax-basen: #40a02b;
11
-
--syntax-builtin: #1e66f5;
12
-
--syntax-controlflow: #8839ef;
13
-
--syntax-char: #04a5e5;
14
-
--syntax-constant: #fe640b;
15
-
--syntax-comment: #9ca0b0;
16
-
--syntax-commentvar: #7c7f93;
17
-
--syntax-documentation: #9ca0b0;
18
-
--syntax-datatype: #df8e1d;
19
-
--syntax-decval: #40a02b;
20
-
--syntax-error: #d20f39;
21
-
--syntax-extension: #4c4f69;
22
-
--syntax-float: #40a02b;
23
-
--syntax-function: #1e66f5;
24
-
--syntax-import: #40a02b;
25
-
--syntax-information: #04a5e5;
26
-
--syntax-keyword: #8839ef;
27
-
--syntax-operator: #179299;
28
-
--syntax-other: #8839ef;
29
-
--syntax-preprocessor: #ea76cb;
30
-
--syntax-specialchar: #04a5e5;
31
-
--syntax-specialstring: #ea76cb;
32
-
--syntax-string: #40a02b;
33
-
--syntax-variable: #8839ef;
34
-
--syntax-verbatimstring: #40a02b;
35
-
--syntax-warning: #df8e1d;
36
-
}
37
-
38
-
@media (prefers-color-scheme: dark) {
39
-
:root {
40
-
--syntax-alert: #f38ba8;
41
-
--syntax-annotation: #fab387;
42
-
--syntax-attribute: #f9e2af;
43
-
--syntax-basen: #a6e3a1;
44
-
--syntax-builtin: #89b4fa;
45
-
--syntax-controlflow: #cba6f7;
46
-
--syntax-char: #89dceb;
47
-
--syntax-constant: #fab387;
48
-
--syntax-comment: #6c7086;
49
-
--syntax-commentvar: #585b70;
50
-
--syntax-documentation: #6c7086;
51
-
--syntax-datatype: #f9e2af;
52
-
--syntax-decval: #a6e3a1;
53
-
--syntax-error: #f38ba8;
54
-
--syntax-extension: #cdd6f4;
55
-
--syntax-float: #a6e3a1;
56
-
--syntax-function: #89b4fa;
57
-
--syntax-import: #a6e3a1;
58
-
--syntax-information: #89dceb;
59
-
--syntax-keyword: #cba6f7;
60
-
--syntax-operator: #94e2d5;
61
-
--syntax-other: #cba6f7;
62
-
--syntax-preprocessor: #f5c2e7;
63
-
--syntax-specialchar: #89dceb;
64
-
--syntax-specialstring: #f5c2e7;
65
-
--syntax-string: #a6e3a1;
66
-
--syntax-variable: #cba6f7;
67
-
--syntax-verbatimstring: #a6e3a1;
68
-
--syntax-warning: #f9e2af;
69
-
}
70
-
}
71
-
72
-
/* pandoc syntax highlighting classes */
73
-
code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */
74
-
code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */
75
-
code span.at { color: var(--syntax-attribute); } /* attribute */
76
-
code span.bn { color: var(--syntax-basen); } /* basen */
77
-
code span.bu { color: var(--syntax-builtin); } /* builtin */
78
-
code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */
79
-
code span.ch { color: var(--syntax-char); } /* char */
80
-
code span.cn { color: var(--syntax-constant); } /* constant */
81
-
code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */
82
-
code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */
83
-
code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */
84
-
code span.dt { color: var(--syntax-datatype); } /* datatype */
85
-
code span.dv { color: var(--syntax-decval); } /* decval */
86
-
code span.er { color: var(--syntax-error); font-weight: bold; } /* error */
87
-
code span.ex { color: var(--syntax-extension); } /* extension */
88
-
code span.fl { color: var(--syntax-float); } /* float */
89
-
code span.fu { color: var(--syntax-function); } /* function */
90
-
code span.im { color: var(--syntax-import); font-weight: bold; } /* import */
91
-
code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */
92
-
code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */
93
-
code span.op { color: var(--syntax-operator); } /* operator */
94
-
code span.ot { color: var(--syntax-other); } /* other */
95
-
code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */
96
-
code span.sc { color: var(--syntax-specialchar); } /* specialchar */
97
-
code span.ss { color: var(--syntax-specialstring); } /* specialstring */
98
-
code span.st { color: var(--syntax-string); } /* string */
99
-
code span.va { color: var(--syntax-variable); } /* variable */
100
-
code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */
101
-
code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
-117
docs/template.html
-117
docs/template.html
···
1
-
<!DOCTYPE html>
2
-
<html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$>
3
-
<head>
4
-
<meta charset="utf-8" />
5
-
<meta name="generator" content="pandoc" />
6
-
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
7
-
$for(author-meta)$
8
-
<meta name="author" content="$author-meta$" />
9
-
$endfor$
10
-
11
-
$if(date-meta)$
12
-
<meta name="dcterms.date" content="$date-meta$" />
13
-
$endif$
14
-
15
-
$if(keywords)$
16
-
<meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" />
17
-
$endif$
18
-
19
-
$if(description-meta)$
20
-
<meta name="description" content="$description-meta$" />
21
-
$endif$
22
-
23
-
<title>$pagetitle$ - Tangled docs</title>
24
-
25
-
<style>
26
-
$styles.css()$
27
-
</style>
28
-
29
-
$for(css)$
30
-
<link rel="stylesheet" href="$css$" />
31
-
$endfor$
32
-
33
-
$for(header-includes)$
34
-
$header-includes$
35
-
$endfor$
36
-
37
-
<link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin />
38
-
39
-
</head>
40
-
<body class="bg-white dark:bg-gray-900 min-h-screen flex flex-col min-h-screen">
41
-
$for(include-before)$
42
-
$include-before$
43
-
$endfor$
44
-
45
-
$if(toc)$
46
-
<!-- mobile topbar toc -->
47
-
<details id="mobile-$idprefix$TOC" role="doc-toc" class="md:hidden bg-gray-50 dark:bg-gray-800 border-b border-gray-200 dark:border-gray-700 z-50 space-y-4 group px-6 py-4">
48
-
<summary class="cursor-pointer list-none text-sm font-semibold select-none flex gap-2 justify-between items-center dark:text-white">
49
-
$if(toc-title)$$toc-title$$else$Table of Contents$endif$
50
-
<span class="group-open:hidden inline">${ menu.svg() }</span>
51
-
<span class="hidden group-open:inline">${ x.svg() }</span>
52
-
</summary>
53
-
${ table-of-contents:toc.html() }
54
-
</details>
55
-
<!-- desktop sidebar toc -->
56
-
<nav id="$idprefix$TOC" role="doc-toc" class="hidden md:block fixed left-0 top-0 w-80 h-screen bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 overflow-y-auto p-4 z-50">
57
-
$if(toc-title)$
58
-
<h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2>
59
-
$endif$
60
-
${ table-of-contents:toc.html() }
61
-
</nav>
62
-
$endif$
63
-
64
-
<div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col">
65
-
<main class="max-w-4xl w-full mx-auto p-6 flex-1">
66
-
$if(top)$
67
-
$-- only print title block if this is NOT the top page
68
-
$else$
69
-
$if(title)$
70
-
<header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700">
71
-
<h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1>
72
-
$if(subtitle)$
73
-
<p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p>
74
-
$endif$
75
-
$for(author)$
76
-
<p class="text-sm text-gray-500 dark:text-gray-400">$author$</p>
77
-
$endfor$
78
-
$if(date)$
79
-
<p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p>
80
-
$endif$
81
-
$if(abstract)$
82
-
<div class="mt-6 p-4 bg-gray-50 rounded-lg">
83
-
<div class="text-sm font-semibold text-gray-700 uppercase mb-2">$abstract-title$</div>
84
-
<div class="text-gray-700">$abstract$</div>
85
-
</div>
86
-
$endif$
87
-
$endif$
88
-
</header>
89
-
$endif$
90
-
<article class="prose dark:prose-invert max-w-none">
91
-
$body$
92
-
</article>
93
-
</main>
94
-
<nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800 ">
95
-
<div class="max-w-4xl mx-auto px-8 py-4">
96
-
<div class="flex justify-between gap-4">
97
-
<span class="flex-1">
98
-
$if(previous.url)$
99
-
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span>
100
-
<a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a>
101
-
$endif$
102
-
</span>
103
-
<span class="flex-1 text-right">
104
-
$if(next.url)$
105
-
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span>
106
-
<a href="$next.url$" accesskey="n" rel="next">$next.title$</a>
107
-
$endif$
108
-
</span>
109
-
</div>
110
-
</div>
111
-
</nav>
112
-
</div>
113
-
$for(include-after)$
114
-
$include-after$
115
-
$endfor$
116
-
</body>
117
-
</html>
-4
docs/toc.html
-4
docs/toc.html
+31
-5
flake.nix
+31
-5
flake.nix
···
88
88
inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src;
89
89
};
90
90
appview = self.callPackage ./nix/pkgs/appview.nix {};
91
-
docs = self.callPackage ./nix/pkgs/docs.nix {
92
-
inherit inter-fonts-src ibm-plex-mono-src lucide-src;
93
-
};
94
91
spindle = self.callPackage ./nix/pkgs/spindle.nix {};
95
92
knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {};
96
93
knot = self.callPackage ./nix/pkgs/knot.nix {};
94
+
did-method-plc = self.callPackage ./nix/pkgs/did-method-plc.nix {};
95
+
bluesky-jetstream = self.callPackage ./nix/pkgs/bluesky-jetstream.nix {};
96
+
bluesky-relay = self.callPackage ./nix/pkgs/bluesky-relay.nix {};
97
+
tap = self.callPackage ./nix/pkgs/tap.nix {};
97
98
});
98
99
in {
99
100
overlays.default = final: prev: {
100
-
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs;
101
+
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview did-method-plc bluesky-jetstream bluesky-relay tap;
101
102
};
102
103
103
104
packages = forAllSystems (system: let
···
106
107
staticPackages = mkPackageSet pkgs.pkgsStatic;
107
108
crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic;
108
109
in {
109
-
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs;
110
+
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib did-method-plc bluesky-jetstream bluesky-relay tap;
110
111
111
112
pkgsStatic-appview = staticPackages.appview;
112
113
pkgsStatic-knot = staticPackages.knot;
···
305
306
imports = [./nix/modules/spindle.nix];
306
307
307
308
services.tangled.spindle.package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.spindle;
309
+
services.tangled.spindle.tap-package = lib.mkDefault self.packages.${pkgs.system}.tap;
310
+
};
311
+
nixosModules.did-method-plc = {
312
+
lib,
313
+
pkgs,
314
+
...
315
+
}: {
316
+
imports = [./nix/modules/did-method-plc.nix];
317
+
services.did-method-plc.package = lib.mkDefault self.packages.${pkgs.system}.did-method-plc;
318
+
};
319
+
nixosModules.bluesky-relay = {
320
+
lib,
321
+
pkgs,
322
+
...
323
+
}: {
324
+
imports = [./nix/modules/bluesky-relay.nix];
325
+
services.bluesky-relay.package = lib.mkDefault self.packages.${pkgs.system}.bluesky-relay;
326
+
};
327
+
nixosModules.bluesky-jetstream = {
328
+
lib,
329
+
pkgs,
330
+
...
331
+
}: {
332
+
imports = [./nix/modules/bluesky-jetstream.nix];
333
+
services.bluesky-jetstream.package = lib.mkDefault self.packages.${pkgs.system}.bluesky-jetstream;
308
334
};
309
335
};
310
336
}
+1
go.mod
+1
go.mod
···
131
131
github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 // indirect
132
132
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
133
133
github.com/hashicorp/go-sockaddr v1.0.7 // indirect
134
+
github.com/hashicorp/go-version v1.8.0 // indirect
134
135
github.com/hashicorp/golang-lru v1.0.2 // indirect
135
136
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
136
137
github.com/hashicorp/hcl v1.0.1-vault-7 // indirect
+2
go.sum
+2
go.sum
···
264
264
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
265
265
github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=
266
266
github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=
267
+
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
268
+
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
267
269
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
268
270
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
269
271
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+1
-1
input.css
+1
-1
input.css
+33
lexicons/pipeline/cancelPipeline.json
+33
lexicons/pipeline/cancelPipeline.json
···
1
+
{
2
+
"lexicon": 1,
3
+
"id": "sh.tangled.pipeline.cancelPipeline",
4
+
"defs": {
5
+
"main": {
6
+
"type": "procedure",
7
+
"description": "Cancel a running pipeline",
8
+
"input": {
9
+
"encoding": "application/json",
10
+
"schema": {
11
+
"type": "object",
12
+
"required": ["repo", "pipeline", "workflow"],
13
+
"properties": {
14
+
"repo": {
15
+
"type": "string",
16
+
"format": "at-uri",
17
+
"description": "repo at-uri, spindle can't resolve repo from pipeline at-uri yet"
18
+
},
19
+
"pipeline": {
20
+
"type": "string",
21
+
"format": "at-uri",
22
+
"description": "pipeline at-uri"
23
+
},
24
+
"workflow": {
25
+
"type": "string",
26
+
"description": "workflow name"
27
+
}
28
+
}
29
+
}
30
+
}
31
+
}
32
+
}
33
+
}
+3
nix/gomod2nix.toml
+3
nix/gomod2nix.toml
···
304
304
[mod."github.com/hashicorp/go-sockaddr"]
305
305
version = "v1.0.7"
306
306
hash = "sha256-p6eDOrGzN1jMmT/F/f/VJMq0cKNFhUcEuVVwTE6vSrs="
307
+
[mod."github.com/hashicorp/go-version"]
308
+
version = "v1.8.0"
309
+
hash = "sha256-KXtqERmYrWdpqPCViWcHbe6jnuH7k16bvBIcuJuevj8="
307
310
[mod."github.com/hashicorp/golang-lru"]
308
311
version = "v1.0.2"
309
312
hash = "sha256-yy+5botc6T5wXgOe2mfNXJP3wr+MkVlUZ2JBkmmrA48="
+64
nix/modules/bluesky-jetstream.nix
+64
nix/modules/bluesky-jetstream.nix
···
1
+
{
2
+
config,
3
+
pkgs,
4
+
lib,
5
+
...
6
+
}: let
7
+
cfg = config.services.bluesky-jetstream;
8
+
in
9
+
with lib; {
10
+
options.services.bluesky-jetstream = {
11
+
enable = mkEnableOption "jetstream server";
12
+
package = mkPackageOption pkgs "bluesky-jetstream" {};
13
+
14
+
# dataDir = mkOption {
15
+
# type = types.str;
16
+
# default = "/var/lib/jetstream";
17
+
# description = "directory to store data (pebbleDB)";
18
+
# };
19
+
livenessTtl = mkOption {
20
+
type = types.int;
21
+
default = 15;
22
+
description = "time to restart when no event detected (seconds)";
23
+
};
24
+
websocketUrl = mkOption {
25
+
type = types.str;
26
+
default = "wss://bsky.network/xrpc/com.atproto.sync.subscribeRepos";
27
+
description = "full websocket path to the ATProto SubscribeRepos XRPC endpoint";
28
+
};
29
+
};
30
+
config = mkIf cfg.enable {
31
+
systemd.services.bluesky-jetstream = {
32
+
description = "bluesky jetstream";
33
+
after = ["network.target" "pds.service"];
34
+
wantedBy = ["multi-user.target"];
35
+
36
+
serviceConfig = {
37
+
User = "jetstream";
38
+
Group = "jetstream";
39
+
StateDirectory = "jetstream";
40
+
StateDirectoryMode = "0755";
41
+
# preStart = ''
42
+
# mkdir -p "${cfg.dataDir}"
43
+
# chown -R jetstream:jetstream "${cfg.dataDir}"
44
+
# '';
45
+
# WorkingDirectory = cfg.dataDir;
46
+
Environment = [
47
+
"JETSTREAM_DATA_DIR=/var/lib/jetstream/data"
48
+
"JETSTREAM_LIVENESS_TTL=${toString cfg.livenessTtl}s"
49
+
"JETSTREAM_WS_URL=${cfg.websocketUrl}"
50
+
];
51
+
ExecStart = getExe cfg.package;
52
+
Restart = "always";
53
+
RestartSec = 5;
54
+
};
55
+
};
56
+
users = {
57
+
users.jetstream = {
58
+
group = "jetstream";
59
+
isSystemUser = true;
60
+
};
61
+
groups.jetstream = {};
62
+
};
63
+
};
64
+
}
+48
nix/modules/bluesky-relay.nix
+48
nix/modules/bluesky-relay.nix
···
1
+
{
2
+
config,
3
+
pkgs,
4
+
lib,
5
+
...
6
+
}: let
7
+
cfg = config.services.bluesky-relay;
8
+
in
9
+
with lib; {
10
+
options.services.bluesky-relay = {
11
+
enable = mkEnableOption "relay server";
12
+
package = mkPackageOption pkgs "bluesky-relay" {};
13
+
};
14
+
config = mkIf cfg.enable {
15
+
systemd.services.bluesky-relay = {
16
+
description = "bluesky relay";
17
+
after = ["network.target" "pds.service"];
18
+
wantedBy = ["multi-user.target"];
19
+
20
+
serviceConfig = {
21
+
User = "relay";
22
+
Group = "relay";
23
+
StateDirectory = "relay";
24
+
StateDirectoryMode = "0755";
25
+
Environment = [
26
+
"RELAY_ADMIN_PASSWORD=password"
27
+
"RELAY_PLC_HOST=https://plc.tngl.boltless.dev"
28
+
"DATABASE_URL=sqlite:///var/lib/relay/relay.sqlite"
29
+
"RELAY_IP_BIND=:2470"
30
+
"RELAY_PERSIST_DIR=/var/lib/relay"
31
+
"RELAY_DISABLE_REQUEST_CRAWL=0"
32
+
"RELAY_INITIAL_SEQ_NUMBER=1"
33
+
"RELAY_ALLOW_INSECURE_HOSTS=1"
34
+
];
35
+
ExecStart = "${getExe cfg.package} serve";
36
+
Restart = "always";
37
+
RestartSec = 5;
38
+
};
39
+
};
40
+
users = {
41
+
users.relay = {
42
+
group = "relay";
43
+
isSystemUser = true;
44
+
};
45
+
groups.relay = {};
46
+
};
47
+
};
48
+
}
+76
nix/modules/did-method-plc.nix
+76
nix/modules/did-method-plc.nix
···
1
+
{
2
+
config,
3
+
pkgs,
4
+
lib,
5
+
...
6
+
}: let
7
+
cfg = config.services.did-method-plc;
8
+
in
9
+
with lib; {
10
+
options.services.did-method-plc = {
11
+
enable = mkEnableOption "did-method-plc server";
12
+
package = mkPackageOption pkgs "did-method-plc" {};
13
+
};
14
+
config = mkIf cfg.enable {
15
+
services.postgresql = {
16
+
enable = true;
17
+
package = pkgs.postgresql_14;
18
+
ensureDatabases = ["plc"];
19
+
ensureUsers = [
20
+
{
21
+
name = "pg";
22
+
# ensurePermissions."DATABASE plc" = "ALL PRIVILEGES";
23
+
}
24
+
];
25
+
authentication = ''
26
+
local all all trust
27
+
host all all 127.0.0.1/32 trust
28
+
'';
29
+
};
30
+
systemd.services.did-method-plc = {
31
+
description = "did-method-plc";
32
+
33
+
after = ["postgresql.service"];
34
+
wants = ["postgresql.service"];
35
+
wantedBy = ["multi-user.target"];
36
+
37
+
environment = let
38
+
db_creds_json = builtins.toJSON {
39
+
username = "pg";
40
+
password = "";
41
+
host = "127.0.0.1";
42
+
port = 5432;
43
+
};
44
+
in {
45
+
# TODO: inherit from config
46
+
DEBUG_MODE = "1";
47
+
LOG_ENABLED = "true";
48
+
LOG_LEVEL = "debug";
49
+
LOG_DESTINATION = "1";
50
+
ENABLE_MIGRATIONS = "true";
51
+
DB_CREDS_JSON = db_creds_json;
52
+
DB_MIGRATE_CREDS_JSON = db_creds_json;
53
+
PLC_VERSION = "0.0.1";
54
+
PORT = "8080";
55
+
};
56
+
57
+
serviceConfig = {
58
+
ExecStart = getExe cfg.package;
59
+
User = "plc";
60
+
Group = "plc";
61
+
StateDirectory = "plc";
62
+
StateDirectoryMode = "0755";
63
+
Restart = "always";
64
+
65
+
# Hardening
66
+
};
67
+
};
68
+
users = {
69
+
users.plc = {
70
+
group = "plc";
71
+
isSystemUser = true;
72
+
};
73
+
groups.plc = {};
74
+
};
75
+
};
76
+
}
+35
nix/modules/spindle.nix
+35
nix/modules/spindle.nix
···
1
1
{
2
2
config,
3
+
pkgs,
3
4
lib,
4
5
...
5
6
}: let
···
16
17
package = mkOption {
17
18
type = types.package;
18
19
description = "Package to use for the spindle";
20
+
};
21
+
tap-package = mkOption {
22
+
type = types.package;
23
+
description = "Package to use for the spindle";
24
+
};
25
+
26
+
atpRelayUrl = mkOption {
27
+
type = types.str;
28
+
default = "https://relay1.us-east.bsky.network";
29
+
description = "atproto relay";
19
30
};
20
31
21
32
server = {
···
114
125
config = mkIf cfg.enable {
115
126
virtualisation.docker.enable = true;
116
127
128
+
systemd.services.spindle-tap = {
129
+
description = "spindle tap service";
130
+
after = ["network.target" "docker.service"];
131
+
wantedBy = ["multi-user.target"];
132
+
serviceConfig = {
133
+
LogsDirectory = "spindle-tap";
134
+
StateDirectory = "spindle-tap";
135
+
Environment = [
136
+
"TAP_BIND=:2480"
137
+
"TAP_PLC_URL=${cfg.server.plcUrl}"
138
+
"TAP_RELAY_URL=${cfg.atpRelayUrl}"
139
+
"TAP_COLLECTION_FILTERS=${concatStringsSep "," [
140
+
"sh.tangled.repo"
141
+
"sh.tangled.repo.collaborator"
142
+
"sh.tangled.spindle.member"
143
+
]}"
144
+
];
145
+
ExecStart = "${getExe cfg.tap-package} run";
146
+
};
147
+
};
148
+
117
149
systemd.services.spindle = {
118
150
description = "spindle service";
119
151
after = ["network.target" "docker.service"];
120
152
wantedBy = ["multi-user.target"];
153
+
path = [
154
+
pkgs.git
155
+
];
121
156
serviceConfig = {
122
157
LogsDirectory = "spindle";
123
158
StateDirectory = "spindle";
+20
nix/pkgs/bluesky-jetstream.nix
+20
nix/pkgs/bluesky-jetstream.nix
···
1
+
{
2
+
buildGoModule,
3
+
fetchFromGitHub,
4
+
}:
5
+
buildGoModule {
6
+
pname = "bluesky-jetstream";
7
+
version = "0.1.0";
8
+
src = fetchFromGitHub {
9
+
owner = "bluesky-social";
10
+
repo = "jetstream";
11
+
rev = "7d7efa58d7f14101a80ccc4f1085953948b7d5de";
12
+
sha256 = "sha256-1e9SL/8gaDPMA4YZed51ffzgpkptbMd0VTbTTDbPTFw=";
13
+
};
14
+
subPackages = ["cmd/jetstream"];
15
+
vendorHash = "sha256-/21XJQH6fo9uPzlABUAbdBwt1O90odmppH6gXu2wkiQ=";
16
+
doCheck = false;
17
+
meta = {
18
+
mainProgram = "jetstream";
19
+
};
20
+
}
+20
nix/pkgs/bluesky-relay.nix
+20
nix/pkgs/bluesky-relay.nix
···
1
+
{
2
+
buildGoModule,
3
+
fetchFromGitHub,
4
+
}:
5
+
buildGoModule {
6
+
pname = "bluesky-relay";
7
+
version = "0.1.0";
8
+
src = fetchFromGitHub {
9
+
owner = "boltlessengineer";
10
+
repo = "indigo";
11
+
rev = "b769ea60b7dde5e2bd0b8ee3ce8462a0c0e596fe";
12
+
sha256 = "sha256-jHRY825TBYaH1WkKFUoNbo4UlMSyuHvCGjYPiBnKo44=";
13
+
};
14
+
subPackages = ["cmd/relay"];
15
+
vendorHash = "sha256-UOedwNYnM8Jx6B7Y9tFcZX8IeUBESAFAPTRYk7n0yo8=";
16
+
doCheck = false;
17
+
meta = {
18
+
mainProgram = "relay";
19
+
};
20
+
}
+65
nix/pkgs/did-method-plc.nix
+65
nix/pkgs/did-method-plc.nix
···
1
+
# inspired by https://github.com/NixOS/nixpkgs/blob/333bfb7c258fab089a834555ea1c435674c459b4/pkgs/by-name/ga/gatsby-cli/package.nix
2
+
{
3
+
lib,
4
+
stdenv,
5
+
fetchFromGitHub,
6
+
fetchYarnDeps,
7
+
yarnConfigHook,
8
+
yarnBuildHook,
9
+
nodejs,
10
+
makeBinaryWrapper,
11
+
}:
12
+
stdenv.mkDerivation (finalAttrs: {
13
+
pname = "did-method-plc";
14
+
version = "0.0.1";
15
+
16
+
src = fetchFromGitHub {
17
+
owner = "did-method-plc";
18
+
repo = "did-method-plc";
19
+
rev = "158ba5535ac3da4fd4309954bde41deab0b45972";
20
+
sha256 = "sha256-O5smubbrnTDMCvL6iRyMXkddr5G7YHxkQRVMRULHanQ=";
21
+
};
22
+
postPatch = ''
23
+
# remove dd-trace dependency
24
+
sed -i '3d' packages/server/service/index.js
25
+
'';
26
+
27
+
yarnOfflineCache = fetchYarnDeps {
28
+
yarnLock = finalAttrs.src + "/yarn.lock";
29
+
hash = "sha256-g8GzaAbWSnWwbQjJMV2DL5/ZlWCCX0sRkjjvX3tqU4Y=";
30
+
};
31
+
32
+
nativeBuildInputs = [
33
+
yarnConfigHook
34
+
yarnBuildHook
35
+
nodejs
36
+
makeBinaryWrapper
37
+
];
38
+
yarnBuildScript = "lerna";
39
+
yarnBuildFlags = [
40
+
"run"
41
+
"build"
42
+
"--scope"
43
+
"@did-plc/server"
44
+
"--include-dependencies"
45
+
];
46
+
47
+
installPhase = ''
48
+
runHook preInstall
49
+
50
+
mkdir -p $out/lib/node_modules/
51
+
mv packages/ $out/lib/packages/
52
+
mv node_modules/* $out/lib/node_modules/
53
+
54
+
makeWrapper ${lib.getExe nodejs} $out/bin/plc \
55
+
--add-flags $out/lib/packages/server/service/index.js \
56
+
--add-flags --enable-source-maps \
57
+
--set NODE_PATH $out/lib/node_modules
58
+
59
+
runHook postInstall
60
+
'';
61
+
62
+
meta = {
63
+
mainProgram = "plc";
64
+
};
65
+
})
-41
nix/pkgs/docs.nix
-41
nix/pkgs/docs.nix
···
1
-
{
2
-
pandoc,
3
-
tailwindcss,
4
-
runCommandLocal,
5
-
inter-fonts-src,
6
-
ibm-plex-mono-src,
7
-
lucide-src,
8
-
src,
9
-
}:
10
-
runCommandLocal "docs" {} ''
11
-
mkdir -p working
12
-
13
-
# copy templates, themes, styles, filters to working directory
14
-
cp ${src}/docs/*.html working/
15
-
cp ${src}/docs/*.theme working/
16
-
cp ${src}/docs/*.css working/
17
-
18
-
# icons
19
-
cp -rf ${lucide-src}/*.svg working/
20
-
21
-
# content
22
-
${pandoc}/bin/pandoc ${src}/docs/DOCS.md \
23
-
-o $out/ \
24
-
-t chunkedhtml \
25
-
--variable toc \
26
-
--toc-depth=2 \
27
-
--css=stylesheet.css \
28
-
--chunk-template="%i.html" \
29
-
--highlight-style=working/highlight.theme \
30
-
--template=working/template.html
31
-
32
-
# fonts
33
-
mkdir -p $out/static/fonts
34
-
cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/
35
-
cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/
36
-
cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/
37
-
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/
38
-
39
-
# styles
40
-
cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css
41
-
''
+20
nix/pkgs/tap.nix
+20
nix/pkgs/tap.nix
···
1
+
{
2
+
buildGoModule,
3
+
fetchFromGitHub,
4
+
}:
5
+
buildGoModule {
6
+
pname = "tap";
7
+
version = "0.1.0";
8
+
src = fetchFromGitHub {
9
+
owner = "bluesky-social";
10
+
repo = "indigo";
11
+
rev = "f92cb29224fcc60f666b20ee3514e431a58ff811";
12
+
sha256 = "sha256-35ltXnq0SJeo3j33D7Nndbcnw5XWBJLRrmZ+nCmZVQw=";
13
+
};
14
+
subPackages = ["cmd/tap"];
15
+
vendorHash = "sha256-UOedwNYnM8Jx6B7Y9tFcZX8IeUBESAFAPTRYk7n0yo8=";
16
+
doCheck = false;
17
+
meta = {
18
+
mainProgram = "tap";
19
+
};
20
+
}
+3
-1
nix/vm.nix
+3
-1
nix/vm.nix
···
8
8
var = builtins.getEnv name;
9
9
in
10
10
if var == ""
11
-
then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details"
11
+
then throw "\$${name} must be defined, see docs/hacking.md for more details"
12
12
else var;
13
13
envVarOr = name: default: let
14
14
var = builtins.getEnv name;
···
19
19
20
20
plcUrl = envVarOr "TANGLED_VM_PLC_URL" "https://plc.directory";
21
21
jetstream = envVarOr "TANGLED_VM_JETSTREAM_ENDPOINT" "wss://jetstream1.us-west.bsky.network/subscribe";
22
+
relayUrl = envVarOr "TANGLED_VM_RELAY_URL" "https://relay1.us-east.bsky.network";
22
23
in
23
24
nixpkgs.lib.nixosSystem {
24
25
inherit system;
···
95
96
};
96
97
services.tangled.spindle = {
97
98
enable = true;
99
+
atpRelayUrl = relayUrl;
98
100
server = {
99
101
owner = envVar "TANGLED_VM_SPINDLE_OWNER";
100
102
hostname = envVarOr "TANGLED_VM_SPINDLE_HOST" "localhost:6555";
+10
orm/orm.go
+10
orm/orm.go
···
20
20
}
21
21
defer tx.Rollback()
22
22
23
+
_, err = tx.Exec(`
24
+
create table if not exists migrations (
25
+
id integer primary key autoincrement,
26
+
name text unique
27
+
);
28
+
`)
29
+
if err != nil {
30
+
return fmt.Errorf("creating migrations table: %w", err)
31
+
}
32
+
23
33
var exists bool
24
34
err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists)
25
35
if err != nil {
+144
rbac2/rbac2.go
+144
rbac2/rbac2.go
···
1
+
package rbac2
2
+
3
+
import (
4
+
"database/sql"
5
+
"fmt"
6
+
7
+
adapter "github.com/Blank-Xu/sql-adapter"
8
+
"github.com/bluesky-social/indigo/atproto/syntax"
9
+
"github.com/casbin/casbin/v2"
10
+
"github.com/casbin/casbin/v2/model"
11
+
"github.com/casbin/casbin/v2/util"
12
+
"tangled.org/core/api/tangled"
13
+
)
14
+
15
+
const (
16
+
Model = `
17
+
[request_definition]
18
+
r = sub, dom, obj, act
19
+
20
+
[policy_definition]
21
+
p = sub, dom, obj, act
22
+
23
+
[role_definition]
24
+
g = _, _, _
25
+
26
+
[policy_effect]
27
+
e = some(where (p.eft == allow))
28
+
29
+
[matchers]
30
+
m = g(r.sub, p.sub, r.dom) && keyMatch4(r.dom, p.dom) && r.obj == p.obj && r.act == p.act
31
+
`
32
+
)
33
+
34
+
type Enforcer struct {
35
+
e *casbin.Enforcer
36
+
}
37
+
38
+
func NewEnforcer(path string) (*Enforcer, error) {
39
+
m, err := model.NewModelFromString(Model)
40
+
if err != nil {
41
+
return nil, err
42
+
}
43
+
44
+
db, err := sql.Open("sqlite3", path+"?_foreign_keys=1")
45
+
if err != nil {
46
+
return nil, err
47
+
}
48
+
49
+
a, err := adapter.NewAdapter(db, "sqlite3", "acl")
50
+
if err != nil {
51
+
return nil, err
52
+
}
53
+
54
+
e, err := casbin.NewEnforcer(m, a)
55
+
if err != nil {
56
+
return nil, err
57
+
}
58
+
59
+
if err := seedTangledPolicies(e); err != nil {
60
+
return nil, err
61
+
}
62
+
63
+
return &Enforcer{e}, nil
64
+
}
65
+
66
+
func seedTangledPolicies(e *casbin.Enforcer) error {
67
+
// policies
68
+
aturi := func(nsid string) string {
69
+
return fmt.Sprintf("at://{did}/%s/{rkey}", nsid)
70
+
}
71
+
72
+
_, err := e.AddPoliciesEx([][]string{
73
+
// sub | dom | obj | act
74
+
{"repo:owner", aturi(tangled.RepoNSID), "/", "write"},
75
+
{"repo:owner", aturi(tangled.RepoNSID), "/collaborator", "write"}, // invite
76
+
{"repo:collaborator", aturi(tangled.RepoNSID), "/settings", "write"},
77
+
{"repo:collaborator", aturi(tangled.RepoNSID), "/git", "write"}, // git push
78
+
79
+
{"server:owner", "/knot/{did}", "/member", "write"}, // invite
80
+
{"server:member", "/knot/{did}", "/git", "write"},
81
+
82
+
{"server:owner", "/spindle/{did}", "/member", "write"}, // invite
83
+
})
84
+
if err != nil {
85
+
return err
86
+
}
87
+
88
+
// grouping policies
89
+
// TODO(boltless): define our own matcher to replace keyMatch4
90
+
e.AddNamedDomainMatchingFunc("g", "keyMatch4", util.KeyMatch4)
91
+
_, err = e.AddGroupingPoliciesEx([][]string{
92
+
// sub | role | dom
93
+
{"repo:owner", "repo:collaborator", aturi(tangled.RepoNSID)},
94
+
95
+
// using '/knot/' prefix here because knot/spindle identifiers don't
96
+
// include the collection type
97
+
{"server:owner", "server:member", "/knot/{did}"},
98
+
{"server:owner", "server:member", "/spindle/{did}"},
99
+
})
100
+
return err
101
+
}
102
+
103
+
func (e *Enforcer) hasImplicitRoleForUser(name string, role string, domain ...string) (bool, error) {
104
+
roles, err := e.e.GetImplicitRolesForUser(name, domain...)
105
+
if err != nil {
106
+
return false, err
107
+
}
108
+
for _, r := range roles {
109
+
if r == role {
110
+
return true, nil
111
+
}
112
+
}
113
+
return false, nil
114
+
}
115
+
116
+
// setRoleForUser sets single user role for specified domain.
117
+
// All existing users with that role will be removed.
118
+
func (e *Enforcer) setRoleForUser(name string, role string, domain ...string) error {
119
+
currentUsers, err := e.e.GetUsersForRole(role, domain...)
120
+
if err != nil {
121
+
return err
122
+
}
123
+
124
+
for _, oldUser := range currentUsers {
125
+
_, err = e.e.DeleteRoleForUser(oldUser, role, domain...)
126
+
if err != nil {
127
+
return err
128
+
}
129
+
}
130
+
131
+
_, err = e.e.AddRoleForUser(name, role, domain...)
132
+
return err
133
+
}
134
+
135
+
// validateAtUri enforeces AT-URI to have valid did as authority and match collection NSID.
136
+
func validateAtUri(uri syntax.ATURI, expected string) error {
137
+
if !uri.Authority().IsDID() {
138
+
return fmt.Errorf("expected at-uri with did")
139
+
}
140
+
if expected != "" && uri.Collection().String() != expected {
141
+
return fmt.Errorf("incorrect repo at-uri collection nsid '%s' (expected '%s')", uri.Collection(), expected)
142
+
}
143
+
return nil
144
+
}
+115
rbac2/rbac2_test.go
+115
rbac2/rbac2_test.go
···
1
+
package rbac2_test
2
+
3
+
import (
4
+
"testing"
5
+
6
+
"github.com/bluesky-social/indigo/atproto/syntax"
7
+
_ "github.com/mattn/go-sqlite3"
8
+
"github.com/stretchr/testify/assert"
9
+
"tangled.org/core/rbac2"
10
+
)
11
+
12
+
func setup(t *testing.T) *rbac2.Enforcer {
13
+
enforcer, err := rbac2.NewEnforcer(":memory:")
14
+
assert.NoError(t, err)
15
+
16
+
return enforcer
17
+
}
18
+
19
+
func TestRepoOwnerPermissions(t *testing.T) {
20
+
var (
21
+
e = setup(t)
22
+
ok bool
23
+
err error
24
+
fooRepo = syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey")
25
+
fooUser = syntax.DID("did:plc:foo")
26
+
)
27
+
28
+
assert.NoError(t, e.AddRepo(fooRepo))
29
+
30
+
ok, err = e.IsRepoOwner(fooUser, fooRepo)
31
+
assert.NoError(t, err)
32
+
assert.True(t, ok, "repo author should be repo owner")
33
+
34
+
ok, err = e.IsRepoWriteAllowed(fooUser, fooRepo)
35
+
assert.NoError(t, err)
36
+
assert.True(t, ok, "repo owner should be able to modify the repo itself")
37
+
38
+
ok, err = e.IsRepoCollaborator(fooUser, fooRepo)
39
+
assert.NoError(t, err)
40
+
assert.True(t, ok, "repo owner should inherit role role:collaborator")
41
+
42
+
ok, err = e.IsRepoSettingsWriteAllowed(fooUser, fooRepo)
43
+
assert.NoError(t, err)
44
+
assert.True(t, ok, "repo owner should inherit collaborator permissions")
45
+
}
46
+
47
+
func TestRepoCollaboratorPermissions(t *testing.T) {
48
+
var (
49
+
e = setup(t)
50
+
ok bool
51
+
err error
52
+
fooRepo = syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey")
53
+
barUser = syntax.DID("did:plc:bar")
54
+
)
55
+
56
+
assert.NoError(t, e.AddRepo(fooRepo))
57
+
assert.NoError(t, e.AddRepoCollaborator(barUser, fooRepo))
58
+
59
+
ok, err = e.IsRepoCollaborator(barUser, fooRepo)
60
+
assert.NoError(t, err)
61
+
assert.True(t, ok, "should set repo collaborator")
62
+
63
+
ok, err = e.IsRepoSettingsWriteAllowed(barUser, fooRepo)
64
+
assert.NoError(t, err)
65
+
assert.True(t, ok, "repo collaborator should be able to edit repo settings")
66
+
67
+
ok, err = e.IsRepoWriteAllowed(barUser, fooRepo)
68
+
assert.NoError(t, err)
69
+
assert.False(t, ok, "repo collaborator shouldn't be able to modify the repo itself")
70
+
}
71
+
72
+
func TestGetByRole(t *testing.T) {
73
+
var (
74
+
e = setup(t)
75
+
err error
76
+
fooRepo = syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey")
77
+
owner = syntax.DID("did:plc:foo")
78
+
collaborator1 = syntax.DID("did:plc:bar")
79
+
collaborator2 = syntax.DID("did:plc:baz")
80
+
)
81
+
82
+
assert.NoError(t, e.AddRepo(fooRepo))
83
+
assert.NoError(t, e.AddRepoCollaborator(collaborator1, fooRepo))
84
+
assert.NoError(t, e.AddRepoCollaborator(collaborator2, fooRepo))
85
+
86
+
collaborators, err := e.GetRepoCollaborators(fooRepo)
87
+
assert.NoError(t, err)
88
+
assert.ElementsMatch(t, []syntax.DID{
89
+
owner,
90
+
collaborator1,
91
+
collaborator2,
92
+
}, collaborators)
93
+
}
94
+
95
+
func TestSpindleOwnerPermissions(t *testing.T) {
96
+
var (
97
+
e = setup(t)
98
+
ok bool
99
+
err error
100
+
spindle = syntax.DID("did:web:spindle.example.com")
101
+
owner = syntax.DID("did:plc:foo")
102
+
member = syntax.DID("did:plc:bar")
103
+
)
104
+
105
+
assert.NoError(t, e.SetSpindleOwner(owner, spindle))
106
+
assert.NoError(t, e.AddSpindleMember(member, spindle))
107
+
108
+
ok, err = e.IsSpindleMemberInviteAllowed(owner, spindle)
109
+
assert.NoError(t, err)
110
+
assert.True(t, ok, "spindle owner can invite members")
111
+
112
+
ok, err = e.IsSpindleMemberInviteAllowed(member, spindle)
113
+
assert.NoError(t, err)
114
+
assert.False(t, ok, "spindle member cannot invite members")
115
+
}
+91
rbac2/repo.go
+91
rbac2/repo.go
···
1
+
package rbac2
2
+
3
+
import (
4
+
"slices"
5
+
"strings"
6
+
7
+
"github.com/bluesky-social/indigo/atproto/syntax"
8
+
"tangled.org/core/api/tangled"
9
+
)
10
+
11
+
// AddRepo adds new repo with its owner to rbac enforcer
12
+
func (e *Enforcer) AddRepo(repo syntax.ATURI) error {
13
+
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
14
+
return err
15
+
}
16
+
user := repo.Authority()
17
+
18
+
return e.setRoleForUser(user.String(), "repo:owner", repo.String())
19
+
}
20
+
21
+
// DeleteRepo deletes all policies related to the repo
22
+
func (e *Enforcer) DeleteRepo(repo syntax.ATURI) error {
23
+
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
24
+
return err
25
+
}
26
+
27
+
_, err := e.e.DeleteDomains(repo.String())
28
+
return err
29
+
}
30
+
31
+
// AddRepoCollaborator adds new collaborator to the repo
32
+
func (e *Enforcer) AddRepoCollaborator(user syntax.DID, repo syntax.ATURI) error {
33
+
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
34
+
return err
35
+
}
36
+
37
+
_, err := e.e.AddRoleForUser(user.String(), "repo:collaborator", repo.String())
38
+
return err
39
+
}
40
+
41
+
// RemoveRepoCollaborator removes the collaborator from the repo.
42
+
// This won't remove inherited roles like repository owner.
43
+
func (e *Enforcer) RemoveRepoCollaborator(user syntax.DID, repo syntax.ATURI) error {
44
+
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
45
+
return err
46
+
}
47
+
48
+
_, err := e.e.DeleteRoleForUser(user.String(), "repo:collaborator", repo.String())
49
+
return err
50
+
}
51
+
52
+
func (e *Enforcer) GetRepoCollaborators(repo syntax.ATURI) ([]syntax.DID, error) {
53
+
var collaborators []syntax.DID
54
+
members, err := e.e.GetImplicitUsersForRole("repo:collaborator", repo.String())
55
+
if err != nil {
56
+
return nil, err
57
+
}
58
+
for _, m := range members {
59
+
if !strings.HasPrefix(m, "did:") { // skip non-user subjects like 'repo:owner'
60
+
continue
61
+
}
62
+
collaborators = append(collaborators, syntax.DID(m))
63
+
}
64
+
65
+
slices.Sort(collaborators)
66
+
return slices.Compact(collaborators), nil
67
+
}
68
+
69
+
func (e *Enforcer) IsRepoOwner(user syntax.DID, repo syntax.ATURI) (bool, error) {
70
+
return e.e.HasRoleForUser(user.String(), "repo:owner", repo.String())
71
+
}
72
+
73
+
func (e *Enforcer) IsRepoCollaborator(user syntax.DID, repo syntax.ATURI) (bool, error) {
74
+
return e.hasImplicitRoleForUser(user.String(), "repo:collaborator", repo.String())
75
+
}
76
+
77
+
func (e *Enforcer) IsRepoWriteAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
78
+
return e.e.Enforce(user.String(), repo.String(), "#/", "write")
79
+
}
80
+
81
+
func (e *Enforcer) IsRepoSettingsWriteAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
82
+
return e.e.Enforce(user.String(), repo.String(), "#/settings", "write")
83
+
}
84
+
85
+
func (e *Enforcer) IsRepoCollaboratorInviteAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
86
+
return e.e.Enforce(user.String(), repo.String(), "#/collaborator", "write")
87
+
}
88
+
89
+
func (e *Enforcer) IsRepoGitPushAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
90
+
return e.e.Enforce(user.String(), repo.String(), "#/git", "write")
91
+
}
+29
rbac2/spindle.go
+29
rbac2/spindle.go
···
1
+
package rbac2
2
+
3
+
import "github.com/bluesky-social/indigo/atproto/syntax"
4
+
5
+
func (e *Enforcer) SetSpindleOwner(user syntax.DID, spindle syntax.DID) error {
6
+
return e.setRoleForUser(user.String(), "server:owner", intoSpindle(spindle))
7
+
}
8
+
9
+
func (e *Enforcer) IsSpindleMember(user syntax.DID, spindle syntax.DID) (bool, error) {
10
+
return e.e.HasRoleForUser(user.String(), "server:member", spindle.String())
11
+
}
12
+
13
+
func (e *Enforcer) AddSpindleMember(user syntax.DID, spindle syntax.DID) error {
14
+
_, err := e.e.AddRoleForUser(user.String(), "server:member", intoSpindle(spindle))
15
+
return err
16
+
}
17
+
18
+
func (e *Enforcer) RemoveSpindleMember(user syntax.DID, spindle syntax.DID) error {
19
+
_, err := e.e.DeleteRoleForUser(user.String(), "server:member", intoSpindle(spindle))
20
+
return err
21
+
}
22
+
23
+
func (e *Enforcer) IsSpindleMemberInviteAllowed(user syntax.DID, spindle syntax.DID) (bool, error) {
24
+
return e.e.Enforce(user.String(), intoSpindle(spindle), "#/member", "write")
25
+
}
26
+
27
+
func intoSpindle(did syntax.DID) string {
28
+
return "/spindle/" + did.String()
29
+
}
+3
-3
readme.md
+3
-3
readme.md
···
10
10
11
11
## docs
12
12
13
-
- [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide)
14
-
- [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!**
15
-
- [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled)
13
+
* [knot hosting guide](/docs/knot-hosting.md)
14
+
* [contributing guide](/docs/contributing.md) **please read before opening a PR!**
15
+
* [hacking on tangled](/docs/hacking.md)
16
16
17
17
## security
18
18
+18
-11
spindle/config/config.go
+18
-11
spindle/config/config.go
···
3
3
import (
4
4
"context"
5
5
"fmt"
6
+
"path"
6
7
7
8
"github.com/bluesky-social/indigo/atproto/syntax"
8
9
"github.com/sethvargo/go-envconfig"
9
10
)
10
11
11
12
type Server struct {
12
-
ListenAddr string `env:"LISTEN_ADDR, default=0.0.0.0:6555"`
13
-
DBPath string `env:"DB_PATH, default=spindle.db"`
14
-
Hostname string `env:"HOSTNAME, required"`
15
-
JetstreamEndpoint string `env:"JETSTREAM_ENDPOINT, default=wss://jetstream1.us-west.bsky.network/subscribe"`
16
-
PlcUrl string `env:"PLC_URL, default=https://plc.directory"`
17
-
Dev bool `env:"DEV, default=false"`
18
-
Owner string `env:"OWNER, required"`
19
-
Secrets Secrets `env:",prefix=SECRETS_"`
20
-
LogDir string `env:"LOG_DIR, default=/var/log/spindle"`
21
-
QueueSize int `env:"QUEUE_SIZE, default=100"`
22
-
MaxJobCount int `env:"MAX_JOB_COUNT, default=2"` // max number of jobs that run at a time
13
+
ListenAddr string `env:"LISTEN_ADDR, default=0.0.0.0:6555"`
14
+
DBPath string `env:"DB_PATH, default=spindle.db"`
15
+
Hostname string `env:"HOSTNAME, required"`
16
+
JetstreamEndpoint string `env:"JETSTREAM_ENDPOINT, default=wss://jetstream1.us-west.bsky.network/subscribe"`
17
+
TapUrl string `env:"TAP_URL, required"`
18
+
PlcUrl string `env:"PLC_URL, default=https://plc.directory"`
19
+
Dev bool `env:"DEV, default=false"`
20
+
Owner syntax.DID `env:"OWNER, required"`
21
+
Secrets Secrets `env:",prefix=SECRETS_"`
22
+
LogDir string `env:"LOG_DIR, default=/var/log/spindle"`
23
+
DataDir string `env:"DATA_DIR, default=/var/lib/spindle"`
24
+
QueueSize int `env:"QUEUE_SIZE, default=100"`
25
+
MaxJobCount int `env:"MAX_JOB_COUNT, default=2"` // max number of jobs that run at a time
23
26
}
24
27
25
28
func (s Server) Did() syntax.DID {
26
29
return syntax.DID(fmt.Sprintf("did:web:%s", s.Hostname))
30
+
}
31
+
32
+
func (s Server) RepoDir() string {
33
+
return path.Join(s.DataDir, "repos")
27
34
}
28
35
29
36
type Secrets struct {
+59
-18
spindle/db/db.go
+59
-18
spindle/db/db.go
···
1
1
package db
2
2
3
3
import (
4
+
"context"
4
5
"database/sql"
5
6
"strings"
6
7
8
+
"github.com/bluesky-social/indigo/atproto/syntax"
7
9
_ "github.com/mattn/go-sqlite3"
10
+
"tangled.org/core/log"
11
+
"tangled.org/core/orm"
8
12
)
9
13
10
14
type DB struct {
11
15
*sql.DB
12
16
}
13
17
14
-
func Make(dbPath string) (*DB, error) {
18
+
func Make(ctx context.Context, dbPath string) (*DB, error) {
15
19
// https://github.com/mattn/go-sqlite3#connection-string
16
20
opts := []string{
17
21
"_foreign_keys=1",
···
19
23
"_synchronous=NORMAL",
20
24
"_auto_vacuum=incremental",
21
25
}
26
+
27
+
logger := log.FromContext(ctx)
28
+
logger = log.SubLogger(logger, "db")
22
29
23
30
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
24
31
if err != nil {
25
32
return nil, err
26
33
}
27
34
28
-
// NOTE: If any other migration is added here, you MUST
29
-
// copy the pattern in appview: use a single sql.Conn
30
-
// for every migration.
35
+
conn, err := db.Conn(ctx)
36
+
if err != nil {
37
+
return nil, err
38
+
}
39
+
defer conn.Close()
31
40
32
41
_, err = db.Exec(`
33
42
create table if not exists _jetstream (
···
76
85
return nil, err
77
86
}
78
87
79
-
return &DB{db}, nil
80
-
}
88
+
// run migrations
89
+
90
+
// NOTE: this won't migrate existing records
91
+
// they will be fetched again with tap instead
92
+
orm.RunMigration(conn, logger, "add-rkey-to-repos", func(tx *sql.Tx) error {
93
+
// archive legacy repos (just in case)
94
+
_, err = tx.Exec(`alter table repos rename to repos_old`)
95
+
if err != nil {
96
+
return err
97
+
}
98
+
99
+
_, err := tx.Exec(`
100
+
create table repos_new (
101
+
-- identifiers
102
+
id integer primary key autoincrement,
103
+
did text not null,
104
+
rkey text not null,
105
+
at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo' || '/' || rkey) stored,
106
+
107
+
name text not null,
108
+
knot text not null,
109
+
110
+
addedAt text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
111
+
unique(did, rkey)
112
+
);
113
+
`)
114
+
if err != nil {
115
+
return err
116
+
}
117
+
118
+
return nil
119
+
})
81
120
82
-
func (d *DB) SaveLastTimeUs(lastTimeUs int64) error {
83
-
_, err := d.Exec(`
84
-
insert into _jetstream (id, last_time_us)
85
-
values (1, ?)
86
-
on conflict(id) do update set last_time_us = excluded.last_time_us
87
-
`, lastTimeUs)
88
-
return err
121
+
return &DB{db}, nil
89
122
}
90
123
91
-
func (d *DB) GetLastTimeUs() (int64, error) {
92
-
var lastTimeUs int64
93
-
row := d.QueryRow(`select last_time_us from _jetstream where id = 1;`)
94
-
err := row.Scan(&lastTimeUs)
95
-
return lastTimeUs, err
124
+
func (d *DB) IsKnownDid(did syntax.DID) (bool, error) {
125
+
// is spindle member / repo collaborator
126
+
var exists bool
127
+
err := d.QueryRow(
128
+
`select exists (
129
+
select 1 from repo_collaborators where did = ?
130
+
union all
131
+
select 1 from spindle_members where did = ?
132
+
)`,
133
+
did,
134
+
did,
135
+
).Scan(&exists)
136
+
return exists, err
96
137
}
+6
-18
spindle/db/events.go
+6
-18
spindle/db/events.go
···
18
18
EventJson string `json:"event"`
19
19
}
20
20
21
-
func (d *DB) InsertEvent(event Event, notifier *notifier.Notifier) error {
21
+
func (d *DB) insertEvent(event Event, notifier *notifier.Notifier) error {
22
22
_, err := d.Exec(
23
23
`insert into events (rkey, nsid, event, created) values (?, ?, ?, ?)`,
24
24
event.Rkey,
···
70
70
return evts, nil
71
71
}
72
72
73
-
func (d *DB) CreateStatusEvent(rkey string, s tangled.PipelineStatus, n *notifier.Notifier) error {
74
-
eventJson, err := json.Marshal(s)
75
-
if err != nil {
76
-
return err
77
-
}
78
-
79
-
event := Event{
80
-
Rkey: rkey,
81
-
Nsid: tangled.PipelineStatusNSID,
82
-
Created: time.Now().UnixNano(),
83
-
EventJson: string(eventJson),
84
-
}
85
-
86
-
return d.InsertEvent(event, n)
87
-
}
88
-
89
73
func (d *DB) createStatusEvent(
90
74
workflowId models.WorkflowId,
91
75
statusKind models.StatusKind,
···
116
100
EventJson: string(eventJson),
117
101
}
118
102
119
-
return d.InsertEvent(event, n)
103
+
return d.insertEvent(event, n)
120
104
121
105
}
122
106
···
164
148
165
149
func (d *DB) StatusFailed(workflowId models.WorkflowId, workflowError string, exitCode int64, n *notifier.Notifier) error {
166
150
return d.createStatusEvent(workflowId, models.StatusKindFailed, &workflowError, &exitCode, n)
151
+
}
152
+
153
+
func (d *DB) StatusCancelled(workflowId models.WorkflowId, workflowError string, exitCode int64, n *notifier.Notifier) error {
154
+
return d.createStatusEvent(workflowId, models.StatusKindCancelled, &workflowError, &exitCode, n)
167
155
}
168
156
169
157
func (d *DB) StatusSuccess(workflowId models.WorkflowId, n *notifier.Notifier) error {
-44
spindle/db/known_dids.go
-44
spindle/db/known_dids.go
···
1
-
package db
2
-
3
-
func (d *DB) AddDid(did string) error {
4
-
_, err := d.Exec(`insert or ignore into known_dids (did) values (?)`, did)
5
-
return err
6
-
}
7
-
8
-
func (d *DB) RemoveDid(did string) error {
9
-
_, err := d.Exec(`delete from known_dids where did = ?`, did)
10
-
return err
11
-
}
12
-
13
-
func (d *DB) GetAllDids() ([]string, error) {
14
-
var dids []string
15
-
16
-
rows, err := d.Query(`select did from known_dids`)
17
-
if err != nil {
18
-
return nil, err
19
-
}
20
-
defer rows.Close()
21
-
22
-
for rows.Next() {
23
-
var did string
24
-
if err := rows.Scan(&did); err != nil {
25
-
return nil, err
26
-
}
27
-
dids = append(dids, did)
28
-
}
29
-
30
-
if err := rows.Err(); err != nil {
31
-
return nil, err
32
-
}
33
-
34
-
return dids, nil
35
-
}
36
-
37
-
func (d *DB) HasKnownDids() bool {
38
-
var count int
39
-
err := d.QueryRow(`select count(*) from known_dids`).Scan(&count)
40
-
if err != nil {
41
-
return false
42
-
}
43
-
return count > 0
44
-
}
+120
-11
spindle/db/repos.go
+120
-11
spindle/db/repos.go
···
1
1
package db
2
2
3
+
import "github.com/bluesky-social/indigo/atproto/syntax"
4
+
3
5
type Repo struct {
4
-
Knot string
5
-
Owner string
6
-
Name string
6
+
Did syntax.DID
7
+
Rkey syntax.RecordKey
8
+
Name string
9
+
Knot string
10
+
}
11
+
12
+
type RepoCollaborator struct {
13
+
Did syntax.DID
14
+
Rkey syntax.RecordKey
15
+
Repo syntax.ATURI
16
+
Subject syntax.DID
7
17
}
8
18
9
-
func (d *DB) AddRepo(knot, owner, name string) error {
10
-
_, err := d.Exec(`insert or ignore into repos (knot, owner, name) values (?, ?, ?)`, knot, owner, name)
19
+
func (d *DB) PutRepo(repo *Repo) error {
20
+
_, err := d.Exec(
21
+
`insert or ignore into repos (did, rkey, name, knot)
22
+
values (?, ?, ?, ?)
23
+
on conflict(did, rkey) do update set
24
+
name = excluded.name
25
+
knot = excluded.knot`,
26
+
repo.Did,
27
+
repo.Rkey,
28
+
repo.Name,
29
+
repo.Knot,
30
+
)
31
+
return err
32
+
}
33
+
34
+
func (d *DB) DeleteRepo(did syntax.DID, rkey syntax.RecordKey) error {
35
+
_, err := d.Exec(
36
+
`delete from repos where did = ? and rkey = ?`,
37
+
did,
38
+
rkey,
39
+
)
11
40
return err
12
41
}
13
42
···
34
63
return knots, nil
35
64
}
36
65
37
-
func (d *DB) GetRepo(knot, owner, name string) (*Repo, error) {
66
+
func (d *DB) GetRepo(did syntax.DID, rkey syntax.RecordKey) (*Repo, error) {
38
67
var repo Repo
68
+
err := d.DB.QueryRow(
69
+
`select
70
+
did,
71
+
rkey,
72
+
name,
73
+
knot
74
+
from repos where did = ? and rkey = ?`,
75
+
did,
76
+
rkey,
77
+
).Scan(
78
+
&repo.Did,
79
+
&repo.Rkey,
80
+
&repo.Name,
81
+
&repo.Knot,
82
+
)
83
+
if err != nil {
84
+
return nil, err
85
+
}
86
+
return &repo, nil
87
+
}
39
88
40
-
query := "select knot, owner, name from repos where knot = ? and owner = ? and name = ?"
41
-
err := d.DB.QueryRow(query, knot, owner, name).
42
-
Scan(&repo.Knot, &repo.Owner, &repo.Name)
89
+
func (d *DB) GetRepoWithName(did syntax.DID, name string) (*Repo, error) {
90
+
var repo Repo
91
+
err := d.DB.QueryRow(
92
+
`select
93
+
did,
94
+
rkey,
95
+
name,
96
+
knot
97
+
from repos where did = ? and name = ?`,
98
+
did,
99
+
name,
100
+
).Scan(
101
+
&repo.Did,
102
+
&repo.Rkey,
103
+
&repo.Name,
104
+
&repo.Knot,
105
+
)
106
+
if err != nil {
107
+
return nil, err
108
+
}
109
+
return &repo, nil
110
+
}
111
+
112
+
func (d *DB) PutRepoCollaborator(collaborator *RepoCollaborator) error {
113
+
_, err := d.Exec(
114
+
`insert into repo_collaborators (did, rkey, repo, subject)
115
+
values (?, ?, ?, ?)
116
+
on conflict(did, rkey) do update set
117
+
repo = excluded.repo
118
+
subject = excluded.subject`,
119
+
collaborator.Did,
120
+
collaborator.Rkey,
121
+
collaborator.Repo,
122
+
collaborator.Subject,
123
+
)
124
+
return err
125
+
}
126
+
127
+
func (d *DB) RemoveRepoCollaborator(did syntax.DID, rkey syntax.RecordKey) error {
128
+
_, err := d.Exec(
129
+
`delete from repo_collaborators where did = ? and rkey = ?`,
130
+
did,
131
+
rkey,
132
+
)
133
+
return err
134
+
}
43
135
136
+
func (d *DB) GetRepoCollaborator(did syntax.DID, rkey syntax.RecordKey) (*RepoCollaborator, error) {
137
+
var collaborator RepoCollaborator
138
+
err := d.DB.QueryRow(
139
+
`select
140
+
did,
141
+
rkey,
142
+
repo,
143
+
subject
144
+
from repo_collaborators
145
+
where did = ? and rkey = ?`,
146
+
did,
147
+
rkey,
148
+
).Scan(
149
+
&collaborator.Did,
150
+
&collaborator.Rkey,
151
+
&collaborator.Repo,
152
+
&collaborator.Subject,
153
+
)
44
154
if err != nil {
45
155
return nil, err
46
156
}
47
-
48
-
return &repo, nil
157
+
return &collaborator, nil
49
158
}
+24
-10
spindle/engines/nixery/engine.go
+24
-10
spindle/engines/nixery/engine.go
···
179
179
return err
180
180
}
181
181
e.registerCleanup(wid, func(ctx context.Context) error {
182
-
return e.docker.NetworkRemove(ctx, networkName(wid))
182
+
err := e.docker.NetworkRemove(ctx, networkName(wid))
183
+
if err != nil {
184
+
return fmt.Errorf("removing network: %w", err)
185
+
}
186
+
return nil
183
187
})
184
188
185
189
addl := wf.Data.(addlFields)
···
229
233
return fmt.Errorf("creating container: %w", err)
230
234
}
231
235
e.registerCleanup(wid, func(ctx context.Context) error {
232
-
err = e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{})
236
+
err := e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{})
233
237
if err != nil {
234
-
return err
238
+
return fmt.Errorf("stopping container: %w", err)
235
239
}
236
240
237
-
return e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{
241
+
err = e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{
238
242
RemoveVolumes: true,
239
243
RemoveLinks: false,
240
244
Force: false,
241
245
})
246
+
if err != nil {
247
+
return fmt.Errorf("removing container: %w", err)
248
+
}
249
+
return nil
242
250
})
243
251
244
252
err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{})
···
394
402
}
395
403
396
404
func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error {
397
-
e.cleanupMu.Lock()
398
-
key := wid.String()
399
-
400
-
fns := e.cleanup[key]
401
-
delete(e.cleanup, key)
402
-
e.cleanupMu.Unlock()
405
+
fns := e.drainCleanups(wid)
403
406
404
407
for _, fn := range fns {
405
408
if err := fn(ctx); err != nil {
···
415
418
416
419
key := wid.String()
417
420
e.cleanup[key] = append(e.cleanup[key], fn)
421
+
}
422
+
423
+
func (e *Engine) drainCleanups(wid models.WorkflowId) []cleanupFunc {
424
+
e.cleanupMu.Lock()
425
+
key := wid.String()
426
+
427
+
fns := e.cleanup[key]
428
+
delete(e.cleanup, key)
429
+
e.cleanupMu.Unlock()
430
+
431
+
return fns
418
432
}
419
433
420
434
func networkName(wid models.WorkflowId) string {
-300
spindle/ingester.go
-300
spindle/ingester.go
···
1
-
package spindle
2
-
3
-
import (
4
-
"context"
5
-
"encoding/json"
6
-
"errors"
7
-
"fmt"
8
-
"time"
9
-
10
-
"tangled.org/core/api/tangled"
11
-
"tangled.org/core/eventconsumer"
12
-
"tangled.org/core/rbac"
13
-
"tangled.org/core/spindle/db"
14
-
15
-
comatproto "github.com/bluesky-social/indigo/api/atproto"
16
-
"github.com/bluesky-social/indigo/atproto/identity"
17
-
"github.com/bluesky-social/indigo/atproto/syntax"
18
-
"github.com/bluesky-social/indigo/xrpc"
19
-
"github.com/bluesky-social/jetstream/pkg/models"
20
-
securejoin "github.com/cyphar/filepath-securejoin"
21
-
)
22
-
23
-
type Ingester func(ctx context.Context, e *models.Event) error
24
-
25
-
func (s *Spindle) ingest() Ingester {
26
-
return func(ctx context.Context, e *models.Event) error {
27
-
var err error
28
-
defer func() {
29
-
eventTime := e.TimeUS
30
-
lastTimeUs := eventTime + 1
31
-
if err := s.db.SaveLastTimeUs(lastTimeUs); err != nil {
32
-
err = fmt.Errorf("(deferred) failed to save last time us: %w", err)
33
-
}
34
-
}()
35
-
36
-
if e.Kind != models.EventKindCommit {
37
-
return nil
38
-
}
39
-
40
-
switch e.Commit.Collection {
41
-
case tangled.SpindleMemberNSID:
42
-
err = s.ingestMember(ctx, e)
43
-
case tangled.RepoNSID:
44
-
err = s.ingestRepo(ctx, e)
45
-
case tangled.RepoCollaboratorNSID:
46
-
err = s.ingestCollaborator(ctx, e)
47
-
}
48
-
49
-
if err != nil {
50
-
s.l.Debug("failed to process message", "nsid", e.Commit.Collection, "err", err)
51
-
}
52
-
53
-
return nil
54
-
}
55
-
}
56
-
57
-
func (s *Spindle) ingestMember(_ context.Context, e *models.Event) error {
58
-
var err error
59
-
did := e.Did
60
-
rkey := e.Commit.RKey
61
-
62
-
l := s.l.With("component", "ingester", "record", tangled.SpindleMemberNSID)
63
-
64
-
switch e.Commit.Operation {
65
-
case models.CommitOperationCreate, models.CommitOperationUpdate:
66
-
raw := e.Commit.Record
67
-
record := tangled.SpindleMember{}
68
-
err = json.Unmarshal(raw, &record)
69
-
if err != nil {
70
-
l.Error("invalid record", "error", err)
71
-
return err
72
-
}
73
-
74
-
domain := s.cfg.Server.Hostname
75
-
recordInstance := record.Instance
76
-
77
-
if recordInstance != domain {
78
-
l.Error("domain mismatch", "domain", recordInstance, "expected", domain)
79
-
return fmt.Errorf("domain mismatch: %s != %s", record.Instance, domain)
80
-
}
81
-
82
-
ok, err := s.e.IsSpindleInviteAllowed(did, rbacDomain)
83
-
if err != nil || !ok {
84
-
l.Error("failed to add member", "did", did, "error", err)
85
-
return fmt.Errorf("failed to enforce permissions: %w", err)
86
-
}
87
-
88
-
if err := db.AddSpindleMember(s.db, db.SpindleMember{
89
-
Did: syntax.DID(did),
90
-
Rkey: rkey,
91
-
Instance: recordInstance,
92
-
Subject: syntax.DID(record.Subject),
93
-
Created: time.Now(),
94
-
}); err != nil {
95
-
l.Error("failed to add member", "error", err)
96
-
return fmt.Errorf("failed to add member: %w", err)
97
-
}
98
-
99
-
if err := s.e.AddSpindleMember(rbacDomain, record.Subject); err != nil {
100
-
l.Error("failed to add member", "error", err)
101
-
return fmt.Errorf("failed to add member: %w", err)
102
-
}
103
-
l.Info("added member from firehose", "member", record.Subject)
104
-
105
-
if err := s.db.AddDid(record.Subject); err != nil {
106
-
l.Error("failed to add did", "error", err)
107
-
return fmt.Errorf("failed to add did: %w", err)
108
-
}
109
-
s.jc.AddDid(record.Subject)
110
-
111
-
return nil
112
-
113
-
case models.CommitOperationDelete:
114
-
record, err := db.GetSpindleMember(s.db, did, rkey)
115
-
if err != nil {
116
-
l.Error("failed to find member", "error", err)
117
-
return fmt.Errorf("failed to find member: %w", err)
118
-
}
119
-
120
-
if err := db.RemoveSpindleMember(s.db, did, rkey); err != nil {
121
-
l.Error("failed to remove member", "error", err)
122
-
return fmt.Errorf("failed to remove member: %w", err)
123
-
}
124
-
125
-
if err := s.e.RemoveSpindleMember(rbacDomain, record.Subject.String()); err != nil {
126
-
l.Error("failed to add member", "error", err)
127
-
return fmt.Errorf("failed to add member: %w", err)
128
-
}
129
-
l.Info("added member from firehose", "member", record.Subject)
130
-
131
-
if err := s.db.RemoveDid(record.Subject.String()); err != nil {
132
-
l.Error("failed to add did", "error", err)
133
-
return fmt.Errorf("failed to add did: %w", err)
134
-
}
135
-
s.jc.RemoveDid(record.Subject.String())
136
-
137
-
}
138
-
return nil
139
-
}
140
-
141
-
func (s *Spindle) ingestRepo(ctx context.Context, e *models.Event) error {
142
-
var err error
143
-
did := e.Did
144
-
145
-
l := s.l.With("component", "ingester", "record", tangled.RepoNSID)
146
-
147
-
l.Info("ingesting repo record", "did", did)
148
-
149
-
switch e.Commit.Operation {
150
-
case models.CommitOperationCreate, models.CommitOperationUpdate:
151
-
raw := e.Commit.Record
152
-
record := tangled.Repo{}
153
-
err = json.Unmarshal(raw, &record)
154
-
if err != nil {
155
-
l.Error("invalid record", "error", err)
156
-
return err
157
-
}
158
-
159
-
domain := s.cfg.Server.Hostname
160
-
161
-
// no spindle configured for this repo
162
-
if record.Spindle == nil {
163
-
l.Info("no spindle configured", "name", record.Name)
164
-
return nil
165
-
}
166
-
167
-
// this repo did not want this spindle
168
-
if *record.Spindle != domain {
169
-
l.Info("different spindle configured", "name", record.Name, "spindle", *record.Spindle, "domain", domain)
170
-
return nil
171
-
}
172
-
173
-
// add this repo to the watch list
174
-
if err := s.db.AddRepo(record.Knot, did, record.Name); err != nil {
175
-
l.Error("failed to add repo", "error", err)
176
-
return fmt.Errorf("failed to add repo: %w", err)
177
-
}
178
-
179
-
didSlashRepo, err := securejoin.SecureJoin(did, record.Name)
180
-
if err != nil {
181
-
return err
182
-
}
183
-
184
-
// add repo to rbac
185
-
if err := s.e.AddRepo(did, rbac.ThisServer, didSlashRepo); err != nil {
186
-
l.Error("failed to add repo to enforcer", "error", err)
187
-
return fmt.Errorf("failed to add repo: %w", err)
188
-
}
189
-
190
-
// add collaborators to rbac
191
-
owner, err := s.res.ResolveIdent(ctx, did)
192
-
if err != nil || owner.Handle.IsInvalidHandle() {
193
-
return err
194
-
}
195
-
if err := s.fetchAndAddCollaborators(ctx, owner, didSlashRepo); err != nil {
196
-
return err
197
-
}
198
-
199
-
// add this knot to the event consumer
200
-
src := eventconsumer.NewKnotSource(record.Knot)
201
-
s.ks.AddSource(context.Background(), src)
202
-
203
-
return nil
204
-
205
-
}
206
-
return nil
207
-
}
208
-
209
-
func (s *Spindle) ingestCollaborator(ctx context.Context, e *models.Event) error {
210
-
var err error
211
-
212
-
l := s.l.With("component", "ingester", "record", tangled.RepoCollaboratorNSID, "did", e.Did)
213
-
214
-
l.Info("ingesting collaborator record")
215
-
216
-
switch e.Commit.Operation {
217
-
case models.CommitOperationCreate, models.CommitOperationUpdate:
218
-
raw := e.Commit.Record
219
-
record := tangled.RepoCollaborator{}
220
-
err = json.Unmarshal(raw, &record)
221
-
if err != nil {
222
-
l.Error("invalid record", "error", err)
223
-
return err
224
-
}
225
-
226
-
subjectId, err := s.res.ResolveIdent(ctx, record.Subject)
227
-
if err != nil || subjectId.Handle.IsInvalidHandle() {
228
-
return err
229
-
}
230
-
231
-
repoAt, err := syntax.ParseATURI(record.Repo)
232
-
if err != nil {
233
-
l.Info("rejecting record, invalid repoAt", "repoAt", record.Repo)
234
-
return nil
235
-
}
236
-
237
-
// TODO: get rid of this entirely
238
-
// resolve this aturi to extract the repo record
239
-
owner, err := s.res.ResolveIdent(ctx, repoAt.Authority().String())
240
-
if err != nil || owner.Handle.IsInvalidHandle() {
241
-
return fmt.Errorf("failed to resolve handle: %w", err)
242
-
}
243
-
244
-
xrpcc := xrpc.Client{
245
-
Host: owner.PDSEndpoint(),
246
-
}
247
-
248
-
resp, err := comatproto.RepoGetRecord(ctx, &xrpcc, "", tangled.RepoNSID, repoAt.Authority().String(), repoAt.RecordKey().String())
249
-
if err != nil {
250
-
return err
251
-
}
252
-
253
-
repo := resp.Value.Val.(*tangled.Repo)
254
-
didSlashRepo, _ := securejoin.SecureJoin(owner.DID.String(), repo.Name)
255
-
256
-
// check perms for this user
257
-
if ok, err := s.e.IsCollaboratorInviteAllowed(owner.DID.String(), rbac.ThisServer, didSlashRepo); !ok || err != nil {
258
-
return fmt.Errorf("insufficient permissions: %w", err)
259
-
}
260
-
261
-
// add collaborator to rbac
262
-
if err := s.e.AddCollaborator(record.Subject, rbac.ThisServer, didSlashRepo); err != nil {
263
-
l.Error("failed to add repo to enforcer", "error", err)
264
-
return fmt.Errorf("failed to add repo: %w", err)
265
-
}
266
-
267
-
return nil
268
-
}
269
-
return nil
270
-
}
271
-
272
-
func (s *Spindle) fetchAndAddCollaborators(ctx context.Context, owner *identity.Identity, didSlashRepo string) error {
273
-
l := s.l.With("component", "ingester", "handler", "fetchAndAddCollaborators")
274
-
275
-
l.Info("fetching and adding existing collaborators")
276
-
277
-
xrpcc := xrpc.Client{
278
-
Host: owner.PDSEndpoint(),
279
-
}
280
-
281
-
resp, err := comatproto.RepoListRecords(ctx, &xrpcc, tangled.RepoCollaboratorNSID, "", 50, owner.DID.String(), false)
282
-
if err != nil {
283
-
return err
284
-
}
285
-
286
-
var errs error
287
-
for _, r := range resp.Records {
288
-
if r == nil {
289
-
continue
290
-
}
291
-
record := r.Value.Val.(*tangled.RepoCollaborator)
292
-
293
-
if err := s.e.AddCollaborator(record.Subject, rbac.ThisServer, didSlashRepo); err != nil {
294
-
l.Error("failed to add repo to enforcer", "error", err)
295
-
errors.Join(errs, fmt.Errorf("failed to add repo: %w", err))
296
-
}
297
-
}
298
-
299
-
return errs
300
-
}
+1
-1
spindle/models/pipeline_env.go
+1
-1
spindle/models/pipeline_env.go
+1
-1
spindle/motd
+1
-1
spindle/motd
+133
-70
spindle/server.go
+133
-70
spindle/server.go
···
1
1
package spindle
2
2
3
3
import (
4
+
"bytes"
4
5
"context"
5
6
_ "embed"
6
7
"encoding/json"
···
8
9
"log/slog"
9
10
"maps"
10
11
"net/http"
12
+
"os"
13
+
"os/exec"
14
+
"path"
15
+
"strings"
11
16
17
+
"github.com/bluesky-social/indigo/atproto/syntax"
12
18
"github.com/go-chi/chi/v5"
19
+
"github.com/hashicorp/go-version"
13
20
"tangled.org/core/api/tangled"
14
21
"tangled.org/core/eventconsumer"
15
22
"tangled.org/core/eventconsumer/cursor"
16
23
"tangled.org/core/idresolver"
17
-
"tangled.org/core/jetstream"
18
24
"tangled.org/core/log"
19
25
"tangled.org/core/notifier"
20
-
"tangled.org/core/rbac"
26
+
"tangled.org/core/rbac2"
21
27
"tangled.org/core/spindle/config"
22
28
"tangled.org/core/spindle/db"
23
29
"tangled.org/core/spindle/engine"
···
26
32
"tangled.org/core/spindle/queue"
27
33
"tangled.org/core/spindle/secrets"
28
34
"tangled.org/core/spindle/xrpc"
35
+
"tangled.org/core/tap"
29
36
"tangled.org/core/xrpc/serviceauth"
30
37
)
31
38
32
39
//go:embed motd
33
40
var motd []byte
34
41
35
-
const (
36
-
rbacDomain = "thisserver"
37
-
)
38
-
39
42
type Spindle struct {
40
-
jc *jetstream.JetstreamClient
43
+
tap *tap.Client
41
44
db *db.DB
42
-
e *rbac.Enforcer
45
+
e *rbac2.Enforcer
43
46
l *slog.Logger
44
47
n *notifier.Notifier
45
48
engs map[string]models.Engine
···
54
57
func New(ctx context.Context, cfg *config.Config, engines map[string]models.Engine) (*Spindle, error) {
55
58
logger := log.FromContext(ctx)
56
59
57
-
d, err := db.Make(cfg.Server.DBPath)
60
+
if err := ensureGitVersion(); err != nil {
61
+
return nil, fmt.Errorf("ensuring git version: %w", err)
62
+
}
63
+
64
+
d, err := db.Make(ctx, cfg.Server.DBPath)
58
65
if err != nil {
59
66
return nil, fmt.Errorf("failed to setup db: %w", err)
60
67
}
61
68
62
-
e, err := rbac.NewEnforcer(cfg.Server.DBPath)
69
+
e, err := rbac2.NewEnforcer(cfg.Server.DBPath)
63
70
if err != nil {
64
71
return nil, fmt.Errorf("failed to setup rbac enforcer: %w", err)
65
72
}
66
-
e.E.EnableAutoSave(true)
67
73
68
74
n := notifier.New()
69
75
···
95
101
jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
96
102
logger.Info("initialized queue", "queueSize", cfg.Server.QueueSize, "numWorkers", cfg.Server.MaxJobCount)
97
103
98
-
collections := []string{
99
-
tangled.SpindleMemberNSID,
100
-
tangled.RepoNSID,
101
-
tangled.RepoCollaboratorNSID,
102
-
}
103
-
jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true)
104
-
if err != nil {
105
-
return nil, fmt.Errorf("failed to setup jetstream client: %w", err)
106
-
}
107
-
jc.AddDid(cfg.Server.Owner)
108
-
109
-
// Check if the spindle knows about any Dids;
110
-
dids, err := d.GetAllDids()
111
-
if err != nil {
112
-
return nil, fmt.Errorf("failed to get all dids: %w", err)
113
-
}
114
-
for _, d := range dids {
115
-
jc.AddDid(d)
116
-
}
104
+
tap := tap.NewClient(cfg.Server.TapUrl, "")
117
105
118
106
resolver := idresolver.DefaultResolver(cfg.Server.PlcUrl)
119
107
120
108
spindle := &Spindle{
121
-
jc: jc,
109
+
tap: &tap,
122
110
e: e,
123
111
db: d,
124
112
l: logger,
···
130
118
vault: vault,
131
119
}
132
120
133
-
err = e.AddSpindle(rbacDomain)
134
-
if err != nil {
135
-
return nil, fmt.Errorf("failed to set rbac domain: %w", err)
136
-
}
137
-
err = spindle.configureOwner()
121
+
err = e.SetSpindleOwner(spindle.cfg.Server.Owner, spindle.cfg.Server.Did())
138
122
if err != nil {
139
123
return nil, err
140
124
}
···
143
127
cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath)
144
128
if err != nil {
145
129
return nil, fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
146
-
}
147
-
148
-
err = jc.StartJetstream(ctx, spindle.ingest())
149
-
if err != nil {
150
-
return nil, fmt.Errorf("failed to start jetstream consumer: %w", err)
151
130
}
152
131
153
132
// for each incoming sh.tangled.pipeline, we execute
···
197
176
}
198
177
199
178
// Enforcer returns the RBAC enforcer instance.
200
-
func (s *Spindle) Enforcer() *rbac.Enforcer {
179
+
func (s *Spindle) Enforcer() *rbac2.Enforcer {
201
180
return s.e
202
181
}
203
182
···
217
196
s.ks.Start(ctx)
218
197
}()
219
198
199
+
go func() {
200
+
s.l.Info("starting tap stream consumer")
201
+
s.tap.Connect(ctx, &tap.SimpleIndexer{
202
+
EventHandler: s.processEvent,
203
+
})
204
+
}()
205
+
220
206
s.l.Info("starting spindle server", "address", s.cfg.Server.ListenAddr)
221
207
return http.ListenAndServe(s.cfg.Server.ListenAddr, s.Router())
222
208
}
···
268
254
Config: s.cfg,
269
255
Resolver: s.res,
270
256
Vault: s.vault,
257
+
Notifier: s.Notifier(),
271
258
ServiceAuth: serviceAuth,
272
259
}
273
260
···
275
262
}
276
263
277
264
func (s *Spindle) processPipeline(ctx context.Context, src eventconsumer.Source, msg eventconsumer.Message) error {
265
+
l := log.FromContext(ctx).With("handler", "processKnotStream")
266
+
l = l.With("src", src.Key(), "msg.Nsid", msg.Nsid, "msg.Rkey", msg.Rkey)
278
267
if msg.Nsid == tangled.PipelineNSID {
268
+
return nil
279
269
tpl := tangled.Pipeline{}
280
270
err := json.Unmarshal(msg.EventJson, &tpl)
281
271
if err != nil {
···
296
286
}
297
287
298
288
// filter by repos
299
-
_, err = s.db.GetRepo(
300
-
tpl.TriggerMetadata.Repo.Knot,
301
-
tpl.TriggerMetadata.Repo.Did,
289
+
_, err = s.db.GetRepoWithName(
290
+
syntax.DID(tpl.TriggerMetadata.Repo.Did),
302
291
tpl.TriggerMetadata.Repo.Repo,
303
292
)
304
293
if err != nil {
305
-
return err
294
+
return fmt.Errorf("failed to get repo: %w", err)
306
295
}
307
296
308
297
pipelineId := models.PipelineId{
···
323
312
Name: w.Name,
324
313
}, fmt.Sprintf("unknown engine %#v", w.Engine), -1, s.n)
325
314
if err != nil {
326
-
return err
315
+
return fmt.Errorf("db.StatusFailed: %w", err)
327
316
}
328
317
329
318
continue
···
337
326
338
327
ewf, err := s.engs[w.Engine].InitWorkflow(*w, tpl)
339
328
if err != nil {
340
-
return err
329
+
return fmt.Errorf("init workflow: %w", err)
341
330
}
342
331
343
332
// inject TANGLED_* env vars after InitWorkflow
···
354
343
Name: w.Name,
355
344
}, s.n)
356
345
if err != nil {
357
-
return err
346
+
return fmt.Errorf("db.StatusPending: %w", err)
358
347
}
359
348
}
360
349
}
···
377
366
} else {
378
367
s.l.Error("failed to enqueue pipeline: queue is full")
379
368
}
369
+
} else if msg.Nsid == tangled.GitRefUpdateNSID {
370
+
event := tangled.GitRefUpdate{}
371
+
if err := json.Unmarshal(msg.EventJson, &event); err != nil {
372
+
l.Error("error unmarshalling", "err", err)
373
+
return err
374
+
}
375
+
l = l.With("repoDid", event.RepoDid, "repoName", event.RepoName)
376
+
377
+
// use event.RepoAt
378
+
// sync git repos in {data}/repos/{did}/sh.tangled.repo/{rkey}
379
+
// if it's nil, don't run pipeline. knot needs upgrade
380
+
// we will leave sh.tangled.pipeline.trigger for backward compatibility
381
+
382
+
// NOTE: we are blindly trusting the knot that it will return only repos it own
383
+
repoCloneUri := s.newRepoCloneUrl(src.Key(), event.RepoDid, event.RepoName)
384
+
repoPath := s.newRepoPath(event.RepoDid, event.RepoName)
385
+
err := sparseSyncGitRepo(ctx, repoCloneUri, repoPath, event.NewSha)
386
+
if err != nil {
387
+
l.Error("failed to sync git repo", "err", err)
388
+
return fmt.Errorf("sync git repo: %w", err)
389
+
}
390
+
l.Info("synced git repo")
391
+
392
+
// TODO: plan the pipeline
380
393
}
381
394
382
395
return nil
383
396
}
384
397
385
-
func (s *Spindle) configureOwner() error {
386
-
cfgOwner := s.cfg.Server.Owner
398
+
func (s *Spindle) newRepoPath(did, name string) string {
399
+
return path.Join(s.cfg.Server.RepoDir(), did, name)
400
+
}
401
+
402
+
func (s *Spindle) newRepoCloneUrl(knot, did, name string) string {
403
+
scheme := "https://"
404
+
if s.cfg.Server.Dev {
405
+
scheme = "http://"
406
+
}
407
+
return fmt.Sprintf("%s%s/%s/%s", scheme, knot, did, name)
408
+
}
409
+
410
+
const RequiredVersion = "2.49.0"
411
+
412
+
func ensureGitVersion() error {
413
+
v, err := gitVersion()
414
+
if err != nil {
415
+
return fmt.Errorf("fetching git version: %w", err)
416
+
}
417
+
if v.LessThan(version.Must(version.NewVersion(RequiredVersion))) {
418
+
return fmt.Errorf("installed git version %q is not supported, Spindle requires git version >= %q", v, RequiredVersion)
419
+
}
420
+
return nil
421
+
}
387
422
388
-
existing, err := s.e.GetSpindleUsersByRole("server:owner", rbacDomain)
423
+
// TODO: move to "git" module shared between knot, appview & spindle
424
+
func gitVersion() (*version.Version, error) {
425
+
var buf bytes.Buffer
426
+
cmd := exec.Command("git", "version")
427
+
cmd.Stdout = &buf
428
+
cmd.Stderr = os.Stderr
429
+
err := cmd.Run()
389
430
if err != nil {
390
-
return err
431
+
return nil, err
432
+
}
433
+
fields := strings.Fields(buf.String())
434
+
if len(fields) < 3 {
435
+
return nil, fmt.Errorf("invalid git version: %s", buf)
391
436
}
392
437
393
-
switch len(existing) {
394
-
case 0:
395
-
// no owner configured, continue
396
-
case 1:
397
-
// find existing owner
398
-
existingOwner := existing[0]
438
+
// version string is like: "git version 2.29.3" or "git version 2.29.3.windows.1"
439
+
versionString := fields[2]
440
+
if pos := strings.Index(versionString, "windows"); pos >= 1 {
441
+
versionString = versionString[:pos-1]
442
+
}
443
+
return version.NewVersion(versionString)
444
+
}
399
445
400
-
// no ownership change, this is okay
401
-
if existingOwner == s.cfg.Server.Owner {
402
-
break
446
+
func sparseSyncGitRepo(ctx context.Context, cloneUri, path, rev string) error {
447
+
exist, err := isDir(path)
448
+
if err != nil {
449
+
return err
450
+
}
451
+
if !exist {
452
+
if err := exec.Command("git", "clone", "--no-checkout", "--depth=1", "--filter=tree:0", "--revision="+rev, cloneUri, path).Run(); err != nil {
453
+
return fmt.Errorf("git clone: %w", err)
403
454
}
404
-
405
-
// remove existing owner
406
-
err = s.e.RemoveSpindleOwner(rbacDomain, existingOwner)
407
-
if err != nil {
408
-
return nil
455
+
if err := exec.Command("git", "-C", path, "sparse-checkout", "set", "--no-cone", `'/.tangled/workflows'`).Run(); err != nil {
456
+
return fmt.Errorf("git sparse-checkout set: %w", err)
457
+
}
458
+
if err := exec.Command("git", "-C", path, "checkout", rev).Run(); err != nil {
459
+
return fmt.Errorf("git checkout: %w", err)
460
+
}
461
+
} else {
462
+
if err := exec.Command("git", "-C", path, "pull", "origin", rev).Run(); err != nil {
463
+
return fmt.Errorf("git pull: %w", err)
409
464
}
410
-
default:
411
-
return fmt.Errorf("more than one owner in DB, try deleting %q and starting over", s.cfg.Server.DBPath)
412
465
}
466
+
return nil
467
+
}
413
468
414
-
return s.e.AddSpindleOwner(rbacDomain, cfgOwner)
469
+
func isDir(path string) (bool, error) {
470
+
info, err := os.Stat(path)
471
+
if err == nil && info.IsDir() {
472
+
return true, nil
473
+
}
474
+
if os.IsNotExist(err) {
475
+
return false, nil
476
+
}
477
+
return false, err
415
478
}
+281
spindle/tap.go
+281
spindle/tap.go
···
1
+
package spindle
2
+
3
+
import (
4
+
"context"
5
+
"encoding/json"
6
+
"fmt"
7
+
"time"
8
+
9
+
"github.com/bluesky-social/indigo/atproto/syntax"
10
+
"tangled.org/core/api/tangled"
11
+
"tangled.org/core/eventconsumer"
12
+
"tangled.org/core/spindle/db"
13
+
"tangled.org/core/tap"
14
+
)
15
+
16
+
func (s *Spindle) processEvent(ctx context.Context, evt tap.Event) error {
17
+
l := s.l.With("component", "tapIndexer")
18
+
19
+
var err error
20
+
switch evt.Type {
21
+
case tap.EvtRecord:
22
+
switch evt.Record.Collection.String() {
23
+
case tangled.SpindleMemberNSID:
24
+
err = s.processMember(ctx, evt)
25
+
case tangled.RepoNSID:
26
+
err = s.processRepo(ctx, evt)
27
+
case tangled.RepoCollaboratorNSID:
28
+
err = s.processCollaborator(ctx, evt)
29
+
case tangled.RepoPullNSID:
30
+
err = s.processPull(ctx, evt)
31
+
}
32
+
case tap.EvtIdentity:
33
+
// no-op
34
+
}
35
+
36
+
if err != nil {
37
+
l.Error("failed to process message. will retry later", "event.ID", evt.ID, "err", err)
38
+
return err
39
+
}
40
+
return nil
41
+
}
42
+
43
+
// NOTE: make sure to return nil if we don't need to retry (e.g. forbidden, unrelated)
44
+
45
+
func (s *Spindle) processMember(ctx context.Context, evt tap.Event) error {
46
+
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
47
+
48
+
l.Info("processing spindle.member record")
49
+
50
+
// check perms for this user
51
+
if ok, err := s.e.IsSpindleMemberInviteAllowed(evt.Record.Did, s.cfg.Server.Did()); !ok || err != nil {
52
+
l.Warn("forbidden request", "did", evt.Record.Did, "error", err)
53
+
return nil
54
+
}
55
+
56
+
switch evt.Record.Action {
57
+
case tap.RecordCreateAction, tap.RecordUpdateAction:
58
+
record := tangled.SpindleMember{}
59
+
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
60
+
return fmt.Errorf("parsing record: %w", err)
61
+
}
62
+
63
+
domain := s.cfg.Server.Hostname
64
+
if record.Instance != domain {
65
+
l.Info("domain mismatch", "domain", record.Instance, "expected", domain)
66
+
return nil
67
+
}
68
+
69
+
created, err := time.Parse(record.CreatedAt, time.RFC3339)
70
+
if err != nil {
71
+
created = time.Now()
72
+
}
73
+
if err := db.AddSpindleMember(s.db, db.SpindleMember{
74
+
Did: evt.Record.Did,
75
+
Rkey: evt.Record.Rkey.String(),
76
+
Instance: record.Instance,
77
+
Subject: syntax.DID(record.Subject),
78
+
Created: created,
79
+
}); err != nil {
80
+
l.Error("failed to add member", "error", err)
81
+
return fmt.Errorf("adding member to db: %w", err)
82
+
}
83
+
if err := s.e.AddSpindleMember(syntax.DID(record.Subject), s.cfg.Server.Did()); err != nil {
84
+
return fmt.Errorf("adding member to rbac: %w", err)
85
+
}
86
+
if err := s.tap.AddRepos(ctx, []syntax.DID{syntax.DID(record.Subject)}); err != nil {
87
+
return fmt.Errorf("adding did to tap", err)
88
+
}
89
+
90
+
l.Info("added member", "member", record.Subject)
91
+
return nil
92
+
93
+
case tap.RecordDeleteAction:
94
+
var (
95
+
did = evt.Record.Did.String()
96
+
rkey = evt.Record.Rkey.String()
97
+
)
98
+
member, err := db.GetSpindleMember(s.db, did, rkey)
99
+
if err != nil {
100
+
return fmt.Errorf("finding member: %w", err)
101
+
}
102
+
103
+
if err := db.RemoveSpindleMember(s.db, did, rkey); err != nil {
104
+
return fmt.Errorf("removing member from db: %w", err)
105
+
}
106
+
if err := s.e.RemoveSpindleMember(member.Subject, s.cfg.Server.Did()); err != nil {
107
+
return fmt.Errorf("removing member from rbac: %w", err)
108
+
}
109
+
if err := s.tapSafeRemoveDid(ctx, member.Subject); err != nil {
110
+
return fmt.Errorf("removing did from tap: %w", err)
111
+
}
112
+
113
+
l.Info("removed member", "member", member.Subject)
114
+
return nil
115
+
}
116
+
return nil
117
+
}
118
+
119
+
func (s *Spindle) processCollaborator(ctx context.Context, evt tap.Event) error {
120
+
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
121
+
122
+
l.Info("processing collaborator record")
123
+
switch evt.Record.Action {
124
+
case tap.RecordCreateAction, tap.RecordUpdateAction:
125
+
record := tangled.RepoCollaborator{}
126
+
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
127
+
l.Error("invalid record", "err", err)
128
+
return fmt.Errorf("parsing record: %w", err)
129
+
}
130
+
131
+
// check perms for this user
132
+
if ok, err := s.e.IsRepoCollaboratorInviteAllowed(evt.Record.Did, syntax.ATURI(record.Repo)); !ok || err != nil {
133
+
l.Warn("forbidden request", "did", evt.Record.Did, "err", err)
134
+
return nil
135
+
}
136
+
137
+
if err := s.db.PutRepoCollaborator(&db.RepoCollaborator{
138
+
Did: evt.Record.Did,
139
+
Rkey: evt.Record.Rkey,
140
+
Repo: syntax.ATURI(record.Repo),
141
+
Subject: syntax.DID(record.Subject),
142
+
}); err != nil {
143
+
return fmt.Errorf("adding collaborator to db: %w", err)
144
+
}
145
+
if err := s.e.AddRepoCollaborator(syntax.DID(record.Subject), syntax.ATURI(record.Repo)); err != nil {
146
+
return fmt.Errorf("adding collaborator to rbac: %w", err)
147
+
}
148
+
if err := s.tap.AddRepos(ctx, []syntax.DID{syntax.DID(record.Subject)}); err != nil {
149
+
return fmt.Errorf("adding did to tap: %w", err)
150
+
}
151
+
152
+
l.Info("add repo collaborator", "subejct", record.Subject, "repo", record.Repo)
153
+
return nil
154
+
155
+
case tap.RecordDeleteAction:
156
+
// get existing collaborator
157
+
collaborator, err := s.db.GetRepoCollaborator(evt.Record.Did, evt.Record.Rkey)
158
+
if err != nil {
159
+
return fmt.Errorf("failed to get existing collaborator info: %w", err)
160
+
}
161
+
162
+
// check perms for this user
163
+
if ok, err := s.e.IsRepoCollaboratorInviteAllowed(evt.Record.Did, collaborator.Repo); !ok || err != nil {
164
+
l.Warn("forbidden request", "did", evt.Record.Did, "err", err)
165
+
return nil
166
+
}
167
+
168
+
if err := s.db.RemoveRepoCollaborator(collaborator.Subject, collaborator.Rkey); err != nil {
169
+
return fmt.Errorf("removing collaborator from db: %w", err)
170
+
}
171
+
if err := s.e.RemoveRepoCollaborator(collaborator.Subject, collaborator.Repo); err != nil {
172
+
return fmt.Errorf("removing collaborator from rbac: %w", err)
173
+
}
174
+
if err := s.tapSafeRemoveDid(ctx, collaborator.Subject); err != nil {
175
+
return fmt.Errorf("removing did from tap: %w", err)
176
+
}
177
+
178
+
l.Info("removed repo collaborator", "subejct", collaborator.Subject, "repo", collaborator.Repo)
179
+
return nil
180
+
}
181
+
return nil
182
+
}
183
+
184
+
func (s *Spindle) processRepo(ctx context.Context, evt tap.Event) error {
185
+
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
186
+
187
+
l.Info("processing repo record")
188
+
189
+
// check perms for this user
190
+
if ok, err := s.e.IsSpindleMember(evt.Record.Did, s.cfg.Server.Did()); !ok || err != nil {
191
+
l.Warn("forbidden request", "did", evt.Record.Did, "err", err)
192
+
return nil
193
+
}
194
+
195
+
switch evt.Record.Action {
196
+
case tap.RecordCreateAction, tap.RecordUpdateAction:
197
+
record := tangled.Repo{}
198
+
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
199
+
return fmt.Errorf("parsing record: %w", err)
200
+
}
201
+
202
+
domain := s.cfg.Server.Hostname
203
+
if record.Spindle == nil || *record.Spindle != domain {
204
+
if record.Spindle == nil {
205
+
l.Info("spindle isn't configured", "name", record.Name)
206
+
} else {
207
+
l.Info("different spindle configured", "name", record.Name, "spindle", *record.Spindle, "domain", domain)
208
+
}
209
+
if err := s.db.DeleteRepo(evt.Record.Did, evt.Record.Rkey); err != nil {
210
+
return fmt.Errorf("deleting repo from db: %w", err)
211
+
}
212
+
return nil
213
+
}
214
+
215
+
if err := s.db.PutRepo(&db.Repo{
216
+
Did: evt.Record.Did,
217
+
Rkey: evt.Record.Rkey,
218
+
Name: record.Name,
219
+
Knot: record.Knot,
220
+
}); err != nil {
221
+
return fmt.Errorf("adding repo to db: %w", err)
222
+
}
223
+
224
+
if err := s.e.AddRepo(evt.Record.AtUri()); err != nil {
225
+
return fmt.Errorf("adding repo to rbac")
226
+
}
227
+
228
+
// add this knot to the event consumer
229
+
src := eventconsumer.NewKnotSource(record.Knot)
230
+
s.ks.AddSource(context.Background(), src)
231
+
232
+
l.Info("added repo", "repo", evt.Record.AtUri())
233
+
return nil
234
+
235
+
case tap.RecordDeleteAction:
236
+
// check perms for this user
237
+
if ok, err := s.e.IsRepoOwner(evt.Record.Did, evt.Record.AtUri()); !ok || err != nil {
238
+
l.Warn("forbidden request", "did", evt.Record.Did, "err", err)
239
+
return nil
240
+
}
241
+
242
+
if err := s.db.DeleteRepo(evt.Record.Did, evt.Record.Rkey); err != nil {
243
+
return fmt.Errorf("deleting repo from db: %w", err)
244
+
}
245
+
246
+
if err := s.e.DeleteRepo(evt.Record.AtUri()); err != nil {
247
+
return fmt.Errorf("deleting repo from rbac: %w", err)
248
+
}
249
+
250
+
l.Info("deleted repo", "repo", evt.Record.AtUri())
251
+
return nil
252
+
}
253
+
return nil
254
+
}
255
+
256
+
func (s *Spindle) processPull(ctx context.Context, evt tap.Event) error {
257
+
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
258
+
259
+
l.Info("processing pull record")
260
+
261
+
switch evt.Record.Action {
262
+
case tap.RecordCreateAction, tap.RecordUpdateAction:
263
+
// TODO
264
+
case tap.RecordDeleteAction:
265
+
// TODO
266
+
}
267
+
return nil
268
+
}
269
+
270
+
func (s *Spindle) tapSafeRemoveDid(ctx context.Context, did syntax.DID) error {
271
+
known, err := s.db.IsKnownDid(syntax.DID(did))
272
+
if err != nil {
273
+
return fmt.Errorf("ensuring did known state: %w", err)
274
+
}
275
+
if !known {
276
+
if err := s.tap.RemoveRepos(ctx, []syntax.DID{did}); err != nil {
277
+
return fmt.Errorf("removing did from tap: %w", err)
278
+
}
279
+
}
280
+
return nil
281
+
}
+1
-2
spindle/xrpc/add_secret.go
+1
-2
spindle/xrpc/add_secret.go
···
11
11
"github.com/bluesky-social/indigo/xrpc"
12
12
securejoin "github.com/cyphar/filepath-securejoin"
13
13
"tangled.org/core/api/tangled"
14
-
"tangled.org/core/rbac"
15
14
"tangled.org/core/spindle/secrets"
16
15
xrpcerr "tangled.org/core/xrpc/errors"
17
16
)
···
68
67
return
69
68
}
70
69
71
-
if ok, err := x.Enforcer.IsSettingsAllowed(actorDid.String(), rbac.ThisServer, didPath); !ok || err != nil {
70
+
if ok, err := x.Enforcer.IsRepoSettingsWriteAllowed(actorDid, repoAt); !ok || err != nil {
72
71
l.Error("insufficent permissions", "did", actorDid.String())
73
72
writeError(w, xrpcerr.AccessControlError(actorDid.String()), http.StatusUnauthorized)
74
73
return
+1
-2
spindle/xrpc/list_secrets.go
+1
-2
spindle/xrpc/list_secrets.go
···
11
11
"github.com/bluesky-social/indigo/xrpc"
12
12
securejoin "github.com/cyphar/filepath-securejoin"
13
13
"tangled.org/core/api/tangled"
14
-
"tangled.org/core/rbac"
15
14
"tangled.org/core/spindle/secrets"
16
15
xrpcerr "tangled.org/core/xrpc/errors"
17
16
)
···
63
62
return
64
63
}
65
64
66
-
if ok, err := x.Enforcer.IsSettingsAllowed(actorDid.String(), rbac.ThisServer, didPath); !ok || err != nil {
65
+
if ok, err := x.Enforcer.IsRepoSettingsWriteAllowed(actorDid, repoAt); !ok || err != nil {
67
66
l.Error("insufficent permissions", "did", actorDid.String())
68
67
writeError(w, xrpcerr.AccessControlError(actorDid.String()), http.StatusUnauthorized)
69
68
return
+1
-1
spindle/xrpc/owner.go
+1
-1
spindle/xrpc/owner.go
+72
spindle/xrpc/pipeline_cancelPipeline.go
+72
spindle/xrpc/pipeline_cancelPipeline.go
···
1
+
package xrpc
2
+
3
+
import (
4
+
"encoding/json"
5
+
"fmt"
6
+
"net/http"
7
+
"strings"
8
+
9
+
"github.com/bluesky-social/indigo/atproto/syntax"
10
+
"tangled.org/core/api/tangled"
11
+
"tangled.org/core/spindle/models"
12
+
xrpcerr "tangled.org/core/xrpc/errors"
13
+
)
14
+
15
+
func (x *Xrpc) CancelPipeline(w http.ResponseWriter, r *http.Request) {
16
+
l := x.Logger
17
+
fail := func(e xrpcerr.XrpcError) {
18
+
l.Error("failed", "kind", e.Tag, "error", e.Message)
19
+
writeError(w, e, http.StatusBadRequest)
20
+
}
21
+
l.Debug("cancel pipeline")
22
+
23
+
actorDid, ok := r.Context().Value(ActorDid).(syntax.DID)
24
+
if !ok {
25
+
fail(xrpcerr.MissingActorDidError)
26
+
return
27
+
}
28
+
29
+
var input tangled.PipelineCancelPipeline_Input
30
+
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
31
+
fail(xrpcerr.GenericError(err))
32
+
return
33
+
}
34
+
35
+
aturi := syntax.ATURI(input.Pipeline)
36
+
wid := models.WorkflowId{
37
+
PipelineId: models.PipelineId{
38
+
Knot: strings.TrimPrefix(aturi.Authority().String(), "did:web:"),
39
+
Rkey: aturi.RecordKey().String(),
40
+
},
41
+
Name: input.Workflow,
42
+
}
43
+
l.Debug("cancel pipeline", "wid", wid)
44
+
45
+
// unfortunately we have to resolve repo-at here
46
+
repoAt, err := syntax.ParseATURI(input.Repo)
47
+
if err != nil {
48
+
fail(xrpcerr.InvalidRepoError(input.Repo))
49
+
return
50
+
}
51
+
52
+
isRepoOwner, err := x.Enforcer.IsRepoOwner(actorDid, repoAt)
53
+
if err != nil || !isRepoOwner {
54
+
fail(xrpcerr.AccessControlError(actorDid.String()))
55
+
return
56
+
}
57
+
for _, engine := range x.Engines {
58
+
l.Debug("destorying workflow", "wid", wid)
59
+
err = engine.DestroyWorkflow(r.Context(), wid)
60
+
if err != nil {
61
+
fail(xrpcerr.GenericError(fmt.Errorf("dailed to destroy workflow: %w", err)))
62
+
return
63
+
}
64
+
err = x.Db.StatusCancelled(wid, "User canceled the workflow", -1, x.Notifier)
65
+
if err != nil {
66
+
fail(xrpcerr.GenericError(fmt.Errorf("dailed to emit status failed: %w", err)))
67
+
return
68
+
}
69
+
}
70
+
71
+
w.WriteHeader(http.StatusOK)
72
+
}
+1
-2
spindle/xrpc/remove_secret.go
+1
-2
spindle/xrpc/remove_secret.go
···
10
10
"github.com/bluesky-social/indigo/xrpc"
11
11
securejoin "github.com/cyphar/filepath-securejoin"
12
12
"tangled.org/core/api/tangled"
13
-
"tangled.org/core/rbac"
14
13
"tangled.org/core/spindle/secrets"
15
14
xrpcerr "tangled.org/core/xrpc/errors"
16
15
)
···
62
61
return
63
62
}
64
63
65
-
if ok, err := x.Enforcer.IsSettingsAllowed(actorDid.String(), rbac.ThisServer, didPath); !ok || err != nil {
64
+
if ok, err := x.Enforcer.IsRepoSettingsWriteAllowed(actorDid, repoAt); !ok || err != nil {
66
65
l.Error("insufficent permissions", "did", actorDid.String())
67
66
writeError(w, xrpcerr.AccessControlError(actorDid.String()), http.StatusUnauthorized)
68
67
return
+5
-2
spindle/xrpc/xrpc.go
+5
-2
spindle/xrpc/xrpc.go
···
10
10
11
11
"tangled.org/core/api/tangled"
12
12
"tangled.org/core/idresolver"
13
-
"tangled.org/core/rbac"
13
+
"tangled.org/core/notifier"
14
+
"tangled.org/core/rbac2"
14
15
"tangled.org/core/spindle/config"
15
16
"tangled.org/core/spindle/db"
16
17
"tangled.org/core/spindle/models"
···
24
25
type Xrpc struct {
25
26
Logger *slog.Logger
26
27
Db *db.DB
27
-
Enforcer *rbac.Enforcer
28
+
Enforcer *rbac2.Enforcer
28
29
Engines map[string]models.Engine
29
30
Config *config.Config
30
31
Resolver *idresolver.Resolver
31
32
Vault secrets.Manager
33
+
Notifier *notifier.Notifier
32
34
ServiceAuth *serviceauth.ServiceAuth
33
35
}
34
36
···
41
43
r.Post("/"+tangled.RepoAddSecretNSID, x.AddSecret)
42
44
r.Post("/"+tangled.RepoRemoveSecretNSID, x.RemoveSecret)
43
45
r.Get("/"+tangled.RepoListSecretsNSID, x.ListSecrets)
46
+
r.Post("/"+tangled.PipelineCancelPipelineNSID, x.CancelPipeline)
44
47
})
45
48
46
49
// service query endpoints (no auth required)
+1
-1
tailwind.config.js
+1
-1
tailwind.config.js
···
2
2
const colors = require("tailwindcss/colors");
3
3
4
4
module.exports = {
5
-
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"],
5
+
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"],
6
6
darkMode: "media",
7
7
theme: {
8
8
container: {
+18
tap/simpleIndexer.go
+18
tap/simpleIndexer.go
···
1
+
package tap
2
+
3
+
import "context"
4
+
5
+
type SimpleIndexer struct {
6
+
EventHandler func(ctx context.Context, evt Event) error
7
+
ErrorHandler func(ctx context.Context, err error)
8
+
}
9
+
10
+
var _ Handler = (*SimpleIndexer)(nil)
11
+
12
+
func (i *SimpleIndexer) OnEvent(ctx context.Context, evt Event) error {
13
+
return i.EventHandler(ctx, evt)
14
+
}
15
+
16
+
func (i *SimpleIndexer) OnError(ctx context.Context, err error) {
17
+
i.ErrorHandler(ctx, err)
18
+
}
+169
tap/tap.go
+169
tap/tap.go
···
1
+
/// heavily inspired by <https://github.com/bluesky-social/atproto/blob/c7f5a868837d3e9b3289f988fee2267789327b06/packages/tap/README.md>
2
+
3
+
package tap
4
+
5
+
import (
6
+
"bytes"
7
+
"context"
8
+
"encoding/json"
9
+
"fmt"
10
+
"net/http"
11
+
"net/url"
12
+
13
+
"github.com/bluesky-social/indigo/atproto/syntax"
14
+
"github.com/gorilla/websocket"
15
+
"tangled.org/core/log"
16
+
)
17
+
18
+
// type WebsocketOptions struct {
19
+
// maxReconnectSeconds int
20
+
// heartbeatIntervalMs int
21
+
// // onReconnectError
22
+
// }
23
+
24
+
type Handler interface {
25
+
OnEvent(ctx context.Context, evt Event) error
26
+
OnError(ctx context.Context, err error)
27
+
}
28
+
29
+
type Client struct {
30
+
Url string
31
+
AdminPassword string
32
+
HTTPClient *http.Client
33
+
}
34
+
35
+
func NewClient(url, adminPassword string) Client {
36
+
return Client{
37
+
Url: url,
38
+
AdminPassword: adminPassword,
39
+
HTTPClient: &http.Client{},
40
+
}
41
+
}
42
+
43
+
func (c *Client) AddRepos(ctx context.Context, dids []syntax.DID) error {
44
+
body, err := json.Marshal(map[string][]syntax.DID{"dids": dids})
45
+
if err != nil {
46
+
return err
47
+
}
48
+
req, err := http.NewRequestWithContext(ctx, "POST", c.Url+"/repos/add", bytes.NewReader(body))
49
+
if err != nil {
50
+
return err
51
+
}
52
+
req.SetBasicAuth("admin", c.AdminPassword)
53
+
req.Header.Set("Content-Type", "application/json")
54
+
55
+
resp, err := c.HTTPClient.Do(req)
56
+
if err != nil {
57
+
return err
58
+
}
59
+
defer resp.Body.Close()
60
+
if resp.StatusCode != http.StatusOK {
61
+
return fmt.Errorf("tap: /repos/add failed with status %d", resp.StatusCode)
62
+
}
63
+
return nil
64
+
}
65
+
66
+
func (c *Client) RemoveRepos(ctx context.Context, dids []syntax.DID) error {
67
+
body, err := json.Marshal(map[string][]syntax.DID{"dids": dids})
68
+
if err != nil {
69
+
return err
70
+
}
71
+
req, err := http.NewRequestWithContext(ctx, "POST", c.Url+"/repos/remove", bytes.NewReader(body))
72
+
if err != nil {
73
+
return err
74
+
}
75
+
req.SetBasicAuth("admin", c.AdminPassword)
76
+
req.Header.Set("Content-Type", "application/json")
77
+
78
+
resp, err := c.HTTPClient.Do(req)
79
+
if err != nil {
80
+
return err
81
+
}
82
+
defer resp.Body.Close()
83
+
if resp.StatusCode != http.StatusOK {
84
+
return fmt.Errorf("tap: /repos/remove failed with status %d", resp.StatusCode)
85
+
}
86
+
return nil
87
+
}
88
+
89
+
func (c *Client) Connect(ctx context.Context, handler Handler) error {
90
+
l := log.FromContext(ctx)
91
+
92
+
u, err := url.Parse(c.Url)
93
+
if err != nil {
94
+
return err
95
+
}
96
+
if u.Scheme == "https" {
97
+
u.Scheme = "wss"
98
+
} else {
99
+
u.Scheme = "ws"
100
+
}
101
+
u.Path = "/channel"
102
+
103
+
// TODO: set auth on dial
104
+
105
+
url := u.String()
106
+
107
+
// var backoff int
108
+
// for {
109
+
// select {
110
+
// case <-ctx.Done():
111
+
// return ctx.Err()
112
+
// default:
113
+
// }
114
+
//
115
+
// header := http.Header{
116
+
// "Authorization": []string{""},
117
+
// }
118
+
// conn, res, err := websocket.DefaultDialer.DialContext(ctx, url, header)
119
+
// if err != nil {
120
+
// l.Warn("dialing failed", "url", url, "err", err, "backoff", backoff)
121
+
// time.Sleep(time.Duration(5+backoff) * time.Second)
122
+
// backoff++
123
+
//
124
+
// continue
125
+
// } else {
126
+
// backoff = 0
127
+
// }
128
+
//
129
+
// l.Info("event subscription response", "code", res.StatusCode)
130
+
// }
131
+
132
+
// TODO: keep websocket connection alive
133
+
conn, _, err := websocket.DefaultDialer.DialContext(ctx, url, nil)
134
+
if err != nil {
135
+
return err
136
+
}
137
+
defer conn.Close()
138
+
139
+
for {
140
+
select {
141
+
case <-ctx.Done():
142
+
return ctx.Err()
143
+
default:
144
+
}
145
+
_, message, err := conn.ReadMessage()
146
+
if err != nil {
147
+
return err
148
+
}
149
+
150
+
var ev Event
151
+
if err := json.Unmarshal(message, &ev); err != nil {
152
+
handler.OnError(ctx, fmt.Errorf("failed to parse message: %w", err))
153
+
continue
154
+
}
155
+
if err := handler.OnEvent(ctx, ev); err != nil {
156
+
handler.OnError(ctx, fmt.Errorf("failed to process event %d: %w", ev.ID, err))
157
+
continue
158
+
}
159
+
160
+
ack := map[string]any{
161
+
"type": "ack",
162
+
"id": ev.ID,
163
+
}
164
+
if err := conn.WriteJSON(ack); err != nil {
165
+
l.Warn("failed to send ack", "err", err)
166
+
continue
167
+
}
168
+
}
169
+
}
+62
tap/types.go
+62
tap/types.go
···
1
+
package tap
2
+
3
+
import (
4
+
"encoding/json"
5
+
"fmt"
6
+
7
+
"github.com/bluesky-social/indigo/atproto/syntax"
8
+
)
9
+
10
+
type EventType string
11
+
12
+
const (
13
+
EvtRecord EventType = "record"
14
+
EvtIdentity EventType = "identity"
15
+
)
16
+
17
+
type Event struct {
18
+
ID int64 `json:"id"`
19
+
Type EventType `json:"type"`
20
+
Record *RecordEventData `json:"record,omitempty"`
21
+
Identity *IdentityEventData `json:"identity,omitempty"`
22
+
}
23
+
24
+
type RecordEventData struct {
25
+
Live bool `json:"live"`
26
+
Did syntax.DID `json:"did"`
27
+
Rev string `json:"rev"`
28
+
Collection syntax.NSID `json:"collection"`
29
+
Rkey syntax.RecordKey `json:"rkey"`
30
+
Action RecordAction `json:"action"`
31
+
Record json.RawMessage `json:"record,omitempty"`
32
+
CID *syntax.CID `json:"cid,omitempty"`
33
+
}
34
+
35
+
func (r *RecordEventData) AtUri() syntax.ATURI {
36
+
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, r.Collection, r.Rkey))
37
+
}
38
+
39
+
type RecordAction string
40
+
41
+
const (
42
+
RecordCreateAction RecordAction = "create"
43
+
RecordUpdateAction RecordAction = "update"
44
+
RecordDeleteAction RecordAction = "delete"
45
+
)
46
+
47
+
type IdentityEventData struct {
48
+
DID syntax.DID `json:"did"`
49
+
Handle string `json:"handle"`
50
+
IsActive bool `json:"is_active"`
51
+
Status RepoStatus `json:"status"`
52
+
}
53
+
54
+
type RepoStatus string
55
+
56
+
const (
57
+
RepoStatusActive RepoStatus = "active"
58
+
RepoStatusTakendown RepoStatus = "takendown"
59
+
RepoStatusSuspended RepoStatus = "suspended"
60
+
RepoStatusDeactivated RepoStatus = "deactivated"
61
+
RepoStatusDeleted RepoStatus = "deleted"
62
+
)