···77)
8899type Star struct {
1010- StarredByDid string
1111- RepoAt syntax.ATURI
1212- Created time.Time
1313- Rkey string
1010+ Did string
1111+ RepoAt syntax.ATURI
1212+ Created time.Time
1313+ Rkey string
1414+}
14151515- // optionally, populate this when querying for reverse mappings
1616+// RepoStar is used for reverse mapping to repos
1717+type RepoStar struct {
1818+ Star
1619 Repo *Repo
1720}
2121+2222+// StringStar is used for reverse mapping to strings
2323+type StringStar struct {
2424+ Star
2525+ String *String
2626+}
···3030 <div class="mx-6">
3131 These services may not be fully accessible until upgraded.
3232 <a class="underline text-red-800 dark:text-red-200"
3333- href="https://tangled.org/@tangled.org/core/tree/master/docs/migrations.md">
3333+ href="https://docs.tangled.org/migrating-knots-spindles.html#migrating-knots-spindles">
3434 Click to read the upgrade guide</a>.
3535 </div>
3636 </details>
···2222 <p class="text-gray-500 dark:text-gray-400">
2323 Choose a spindle to execute your workflows on. Only repository owners
2424 can configure spindles. Spindles can be selfhosted,
2525- <a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
2525+ <a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide">
2626 click to learn more.
2727 </a>
2828 </p>
···6677 "tangled.org/core/appview/db"
88 "tangled.org/core/appview/models"
99+ "tangled.org/core/orm"
910)
10111112func (v *Validator) ValidateIssueComment(comment *models.IssueComment) error {
1213 // if comments have parents, only ingest ones that are 1 level deep
1314 if comment.ReplyTo != nil {
1414- parents, err := db.GetIssueComments(v.db, db.FilterEq("at_uri", *comment.ReplyTo))
1515+ parents, err := db.GetIssueComments(v.db, orm.FilterEq("at_uri", *comment.ReplyTo))
1516 if err != nil {
1617 return fmt.Errorf("failed to fetch parent comment: %w", err)
1718 }
+1-34
crypto/verify.go
···55 "crypto/sha256"
66 "encoding/base64"
77 "fmt"
88- "strings"
98109 "github.com/hiddeco/sshsig"
1110 "golang.org/x/crypto/ssh"
1212- "tangled.org/core/types"
1311)
14121513func VerifySignature(pubKey, signature, payload []byte) (error, bool) {
···2826 // multiple algorithms but sha-512 is most secure, and git's ssh signing defaults
2927 // to sha-512 for all key types anyway.
3028 err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git")
3131- return err, err == nil
3232-}
33293434-// VerifyCommitSignature reconstructs the payload used to sign a commit. This is
3535-// essentially the git cat-file output but without the gpgsig header.
3636-//
3737-// Caveats: signature verification will fail on commits with more than one parent,
3838-// i.e. merge commits, because types.NiceDiff doesn't carry more than one Parent field
3939-// and we are unable to reconstruct the payload correctly.
4040-//
4141-// Ideally this should directly operate on an *object.Commit.
4242-func VerifyCommitSignature(pubKey string, commit types.NiceDiff) (error, bool) {
4343- signature := commit.Commit.PGPSignature
4444-4545- author := bytes.NewBuffer([]byte{})
4646- committer := bytes.NewBuffer([]byte{})
4747- commit.Commit.Author.Encode(author)
4848- commit.Commit.Committer.Encode(committer)
4949-5050- payload := strings.Builder{}
5151-5252- fmt.Fprintf(&payload, "tree %s\n", commit.Commit.Tree)
5353- if commit.Commit.Parent != "" {
5454- fmt.Fprintf(&payload, "parent %s\n", commit.Commit.Parent)
5555- }
5656- fmt.Fprintf(&payload, "author %s\n", author.String())
5757- fmt.Fprintf(&payload, "committer %s\n", committer.String())
5858- if commit.Commit.ChangedId != "" {
5959- fmt.Fprintf(&payload, "change-id %s\n", commit.Commit.ChangedId)
6060- }
6161- fmt.Fprintf(&payload, "\n%s", commit.Commit.Message)
6262-6363- return VerifySignature([]byte(pubKey), []byte(signature), []byte(payload.String()))
3030+ return err, err == nil
6431}
65326633// SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
+1529
docs/DOCS.md
···11+---
22+title: Tangled docs
33+author: The Tangled Contributors
44+date: 21 Sun, Dec 2025
55+---
66+77+# Introduction
88+99+Tangled is a decentralized code hosting and collaboration
1010+platform. Every component of Tangled is open-source and
1111+self-hostable. [tangled.org](https://tangled.org) also
1212+provides hosting and CI services that are free to use.
1313+1414+There are several models for decentralized code
1515+collaboration platforms, ranging from ActivityPub’s
1616+(Forgejo) federated model, to Radicle’s entirely P2P model.
1717+Our approach attempts to be the best of both worlds by
1818+adopting the AT Protocol—a protocol for building decentralized
1919+social applications with a central identity
2020+2121+Our approach to this is the idea of “knots”. Knots are
2222+lightweight, headless servers that enable users to host Git
2323+repositories with ease. Knots are designed for either single
2424+or multi-tenant use which is perfect for self-hosting on a
2525+Raspberry Pi at home, or larger “community” servers. By
2626+default, Tangled provides managed knots where you can host
2727+your repositories for free.
2828+2929+The appview at tangled.org acts as a consolidated "view"
3030+into the whole network, allowing users to access, clone and
3131+contribute to repositories hosted across different knots
3232+seamlessly.
3333+3434+# Quick start guide
3535+3636+## Login or sign up
3737+3838+You can [login](https://tangled.org) by using your AT Protocol
3939+account. If you are unclear on what that means, simply head
4040+to the [signup](https://tangled.org/signup) page and create
4141+an account. By doing so, you will be choosing Tangled as
4242+your account provider (you will be granted a handle of the
4343+form `user.tngl.sh`).
4444+4545+In the AT Protocol network, users are free to choose their account
4646+provider (known as a "Personal Data Service", or PDS), and
4747+login to applications that support AT accounts.
4848+4949+You can think of it as "one account for all of the atmosphere"!
5050+5151+If you already have an AT account (you may have one if you
5252+signed up to Bluesky, for example), you can login with the
5353+same handle on Tangled (so just use `user.bsky.social` on
5454+the login page).
5555+5656+## Add an SSH key
5757+5858+Once you are logged in, you can start creating repositories
5959+and pushing code. Tangled supports pushing git repositories
6060+over SSH.
6161+6262+First, you'll need to generate an SSH key if you don't
6363+already have one:
6464+6565+```bash
6666+ssh-keygen -t ed25519 -C "foo@bar.com"
6767+```
6868+6969+When prompted, save the key to the default location
7070+(`~/.ssh/id_ed25519`) and optionally set a passphrase.
7171+7272+Copy your public key to your clipboard:
7373+7474+```bash
7575+# on X11
7676+cat ~/.ssh/id_ed25519.pub | xclip -sel c
7777+7878+# on wayland
7979+cat ~/.ssh/id_ed25519.pub | wl-copy
8080+8181+# on macos
8282+cat ~/.ssh/id_ed25519.pub | pbcopy
8383+```
8484+8585+Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
8686+paste your public key, give it a descriptive name, and hit
8787+save.
8888+8989+## Create a repository
9090+9191+Once your SSH key is added, create your first repository:
9292+9393+1. Hit the green `+` icon on the topbar, and select
9494+ repository
9595+2. Enter a repository name
9696+3. Add a description
9797+4. Choose a knotserver to host this repository on
9898+5. Hit create
9999+100100+Knots are self-hostable, lightweight Git servers that can
101101+host your repository. Unlike traditional code forges, your
102102+code can live on any server. Read the [Knots](TODO) section
103103+for more.
104104+105105+## Configure SSH
106106+107107+To ensure Git uses the correct SSH key and connects smoothly
108108+to Tangled, add this configuration to your `~/.ssh/config`
109109+file:
110110+111111+```
112112+Host tangled.org
113113+ Hostname tangled.org
114114+ User git
115115+ IdentityFile ~/.ssh/id_ed25519
116116+ AddressFamily inet
117117+```
118118+119119+This tells SSH to use your specific key when connecting to
120120+Tangled and prevents authentication issues if you have
121121+multiple SSH keys.
122122+123123+Note that this configuration only works for knotservers that
124124+are hosted by tangled.org. If you use a custom knot, refer
125125+to the [Knots](TODO) section.
126126+127127+## Push your first repository
128128+129129+Initialize a new Git repository:
130130+131131+```bash
132132+mkdir my-project
133133+cd my-project
134134+135135+git init
136136+echo "# My Project" > README.md
137137+```
138138+139139+Add some content and push!
140140+141141+```bash
142142+git add README.md
143143+git commit -m "Initial commit"
144144+git remote add origin git@tangled.org:user.tngl.sh/my-project
145145+git push -u origin main
146146+```
147147+148148+That's it! Your code is now hosted on Tangled.
149149+150150+## Migrating an existing repository
151151+152152+Moving your repositories from GitHub, GitLab, Bitbucket, or
153153+any other Git forge to Tangled is straightforward. You'll
154154+simply change your repository's remote URL. At the moment,
155155+Tangled does not have any tooling to migrate data such as
156156+GitHub issues or pull requests.
157157+158158+First, create a new repository on tangled.org as described
159159+in the [Quick Start Guide](#create-a-repository).
160160+161161+Navigate to your existing local repository:
162162+163163+```bash
164164+cd /path/to/your/existing/repo
165165+```
166166+167167+You can inspect your existing Git remote like so:
168168+169169+```bash
170170+git remote -v
171171+```
172172+173173+You'll see something like:
174174+175175+```
176176+origin git@github.com:username/my-project (fetch)
177177+origin git@github.com:username/my-project (push)
178178+```
179179+180180+Update the remote URL to point to tangled:
181181+182182+```bash
183183+git remote set-url origin git@tangled.org:user.tngl.sh/my-project
184184+```
185185+186186+Verify the change:
187187+188188+```bash
189189+git remote -v
190190+```
191191+192192+You should now see:
193193+194194+```
195195+origin git@tangled.org:user.tngl.sh/my-project (fetch)
196196+origin git@tangled.org:user.tngl.sh/my-project (push)
197197+```
198198+199199+Push all your branches and tags to Tangled:
200200+201201+```bash
202202+git push -u origin --all
203203+git push -u origin --tags
204204+```
205205+206206+Your repository is now migrated to Tangled! All commit
207207+history, branches, and tags have been preserved.
208208+209209+## Mirroring a repository to Tangled
210210+211211+If you want to maintain your repository on multiple forges
212212+simultaneously, for example, keeping your primary repository
213213+on GitHub while mirroring to Tangled for backup or
214214+redundancy, you can do so by adding multiple remotes.
215215+216216+You can configure your local repository to push to both
217217+Tangled and, say, GitHub. You may already have the following
218218+setup:
219219+220220+```
221221+$ git remote -v
222222+origin git@github.com:username/my-project (fetch)
223223+origin git@github.com:username/my-project (push)
224224+```
225225+226226+Now add Tangled as an additional push URL to the same
227227+remote:
228228+229229+```bash
230230+git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
231231+```
232232+233233+You also need to re-add the original URL as a push
234234+destination (Git replaces the push URL when you use `--add`
235235+the first time):
236236+237237+```bash
238238+git remote set-url --add --push origin git@github.com:username/my-project
239239+```
240240+241241+Verify your configuration:
242242+243243+```
244244+$ git remote -v
245245+origin git@github.com:username/repo (fetch)
246246+origin git@tangled.org:username/my-project (push)
247247+origin git@github.com:username/repo (push)
248248+```
249249+250250+Notice that there's one fetch URL (the primary remote) and
251251+two push URLs. Now, whenever you push, Git will
252252+automatically push to both remotes:
253253+254254+```bash
255255+git push origin main
256256+```
257257+258258+This single command pushes your `main` branch to both GitHub
259259+and Tangled simultaneously.
260260+261261+To push all branches and tags:
262262+263263+```bash
264264+git push origin --all
265265+git push origin --tags
266266+```
267267+268268+If you prefer more control over which remote you push to,
269269+you can maintain separate remotes:
270270+271271+```bash
272272+git remote add github git@github.com:username/my-project
273273+git remote add tangled git@tangled.org:username/my-project
274274+```
275275+276276+Then push to each explicitly:
277277+278278+```bash
279279+git push github main
280280+git push tangled main
281281+```
282282+283283+# Knot self-hosting guide
284284+285285+So you want to run your own knot server? Great! Here are a few prerequisites:
286286+287287+1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
288288+2. A (sub)domain name. People generally use `knot.example.com`.
289289+3. A valid SSL certificate for your domain.
290290+291291+## NixOS
292292+293293+Refer to the [knot
294294+module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
295295+for a full list of options. Sample configurations:
296296+297297+- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
298298+- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
299299+300300+## Docker
301301+302302+Refer to
303303+[@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker).
304304+Note that this is community maintained.
305305+306306+## Manual setup
307307+308308+First, clone this repository:
309309+310310+```
311311+git clone https://tangled.org/@tangled.org/core
312312+```
313313+314314+Then, build the `knot` CLI. This is the knot administration
315315+and operation tool. For the purpose of this guide, we're
316316+only concerned with these subcommands:
317317+318318+ * `knot server`: the main knot server process, typically
319319+ run as a supervised service
320320+ * `knot guard`: handles role-based access control for git
321321+ over SSH (you'll never have to run this yourself)
322322+ * `knot keys`: fetches SSH keys associated with your knot;
323323+ we'll use this to generate the SSH
324324+ `AuthorizedKeysCommand`
325325+326326+```
327327+cd core
328328+export CGO_ENABLED=1
329329+go build -o knot ./cmd/knot
330330+```
331331+332332+Next, move the `knot` binary to a location owned by `root` --
333333+`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
334334+335335+```
336336+sudo mv knot /usr/local/bin/knot
337337+sudo chown root:root /usr/local/bin/knot
338338+```
339339+340340+This is necessary because SSH `AuthorizedKeysCommand` requires [really
341341+specific permissions](https://stackoverflow.com/a/27638306). The
342342+`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
343343+retrieve a user's public SSH keys dynamically for authentication. Let's
344344+set that up.
345345+346346+```
347347+sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
348348+Match User git
349349+ AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
350350+ AuthorizedKeysCommandUser nobody
351351+EOF
352352+```
353353+354354+Then, reload `sshd`:
355355+356356+```
357357+sudo systemctl reload ssh
358358+```
359359+360360+Next, create the `git` user. We'll use the `git` user's home directory
361361+to store repositories:
362362+363363+```
364364+sudo adduser git
365365+```
366366+367367+Create `/home/git/.knot.env` with the following, updating the values as
368368+necessary. The `KNOT_SERVER_OWNER` should be set to your
369369+DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
370370+371371+```
372372+KNOT_REPO_SCAN_PATH=/home/git
373373+KNOT_SERVER_HOSTNAME=knot.example.com
374374+APPVIEW_ENDPOINT=https://tangled.org
375375+KNOT_SERVER_OWNER=did:plc:foobar
376376+KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
377377+KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
378378+```
379379+380380+If you run a Linux distribution that uses systemd, you can use the provided
381381+service file to run the server. Copy
382382+[`knotserver.service`](/systemd/knotserver.service)
383383+to `/etc/systemd/system/`. Then, run:
384384+385385+```
386386+systemctl enable knotserver
387387+systemctl start knotserver
388388+```
389389+390390+The last step is to configure a reverse proxy like Nginx or Caddy to front your
391391+knot. Here's an example configuration for Nginx:
392392+393393+```
394394+server {
395395+ listen 80;
396396+ listen [::]:80;
397397+ server_name knot.example.com;
398398+399399+ location / {
400400+ proxy_pass http://localhost:5555;
401401+ proxy_set_header Host $host;
402402+ proxy_set_header X-Real-IP $remote_addr;
403403+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
404404+ proxy_set_header X-Forwarded-Proto $scheme;
405405+ }
406406+407407+ # wss endpoint for git events
408408+ location /events {
409409+ proxy_set_header X-Forwarded-For $remote_addr;
410410+ proxy_set_header Host $http_host;
411411+ proxy_set_header Upgrade websocket;
412412+ proxy_set_header Connection Upgrade;
413413+ proxy_pass http://localhost:5555;
414414+ }
415415+ # additional config for SSL/TLS go here.
416416+}
417417+418418+```
419419+420420+Remember to use Let's Encrypt or similar to procure a certificate for your
421421+knot domain.
422422+423423+You should now have a running knot server! You can finalize
424424+your registration by hitting the `verify` button on the
425425+[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
426426+a record on your PDS to announce the existence of the knot.
427427+428428+### Custom paths
429429+430430+(This section applies to manual setup only. Docker users should edit the mounts
431431+in `docker-compose.yml` instead.)
432432+433433+Right now, the database and repositories of your knot lives in `/home/git`. You
434434+can move these paths if you'd like to store them in another folder. Be careful
435435+when adjusting these paths:
436436+437437+* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
438438+any possible side effects. Remember to restart it once you're done.
439439+* Make backups before moving in case something goes wrong.
440440+* Make sure the `git` user can read and write from the new paths.
441441+442442+#### Database
443443+444444+As an example, let's say the current database is at `/home/git/knotserver.db`,
445445+and we want to move it to `/home/git/database/knotserver.db`.
446446+447447+Copy the current database to the new location. Make sure to copy the `.db-shm`
448448+and `.db-wal` files if they exist.
449449+450450+```
451451+mkdir /home/git/database
452452+cp /home/git/knotserver.db* /home/git/database
453453+```
454454+455455+In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
456456+the new file path (_not_ the directory):
457457+458458+```
459459+KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
460460+```
461461+462462+#### Repositories
463463+464464+As an example, let's say the repositories are currently in `/home/git`, and we
465465+want to move them into `/home/git/repositories`.
466466+467467+Create the new folder, then move the existing repositories (if there are any):
468468+469469+```
470470+mkdir /home/git/repositories
471471+# move all DIDs into the new folder; these will vary for you!
472472+mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
473473+```
474474+475475+In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
476476+to the new directory:
477477+478478+```
479479+KNOT_REPO_SCAN_PATH=/home/git/repositories
480480+```
481481+482482+Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
483483+repository path:
484484+485485+```
486486+sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
487487+Match User git
488488+ AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
489489+ AuthorizedKeysCommandUser nobody
490490+EOF
491491+```
492492+493493+Make sure to restart your SSH server!
494494+495495+#### MOTD (message of the day)
496496+497497+To configure the MOTD used ("Welcome to this knot!" by default), edit the
498498+`/home/git/motd` file:
499499+500500+```
501501+printf "Hi from this knot!\n" > /home/git/motd
502502+```
503503+504504+Note that you should add a newline at the end if setting a non-empty message
505505+since the knot won't do this for you.
506506+507507+# Spindles
508508+509509+## Pipelines
510510+511511+Spindle workflows allow you to write CI/CD pipelines in a
512512+simple format. They're located in the `.tangled/workflows`
513513+directory at the root of your repository, and are defined
514514+using YAML.
515515+516516+The fields are:
517517+518518+- [Trigger](#trigger): A **required** field that defines
519519+ when a workflow should be triggered.
520520+- [Engine](#engine): A **required** field that defines which
521521+ engine a workflow should run on.
522522+- [Clone options](#clone-options): An **optional** field
523523+ that defines how the repository should be cloned.
524524+- [Dependencies](#dependencies): An **optional** field that
525525+ allows you to list dependencies you may need.
526526+- [Environment](#environment): An **optional** field that
527527+ allows you to define environment variables.
528528+- [Steps](#steps): An **optional** field that allows you to
529529+ define what steps should run in the workflow.
530530+531531+### Trigger
532532+533533+The first thing to add to a workflow is the trigger, which
534534+defines when a workflow runs. This is defined using a `when`
535535+field, which takes in a list of conditions. Each condition
536536+has the following fields:
537537+538538+- `event`: This is a **required** field that defines when
539539+ your workflow should run. It's a list that can take one or
540540+ more of the following values:
541541+ - `push`: The workflow should run every time a commit is
542542+ pushed to the repository.
543543+ - `pull_request`: The workflow should run every time a
544544+ pull request is made or updated.
545545+ - `manual`: The workflow can be triggered manually.
546546+- `branch`: Defines which branches the workflow should run
547547+ for. If used with the `push` event, commits to the
548548+ branch(es) listed here will trigger the workflow. If used
549549+ with the `pull_request` event, updates to pull requests
550550+ targeting the branch(es) listed here will trigger the
551551+ workflow. This field has no effect with the `manual`
552552+ event. Supports glob patterns using `*` and `**` (e.g.,
553553+ `main`, `develop`, `release-*`). Either `branch` or `tag`
554554+ (or both) must be specified for `push` events.
555555+- `tag`: Defines which tags the workflow should run for.
556556+ Only used with the `push` event - when tags matching the
557557+ pattern(s) listed here are pushed, the workflow will
558558+ trigger. This field has no effect with `pull_request` or
559559+ `manual` events. Supports glob patterns using `*` and `**`
560560+ (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
561561+ `tag` (or both) must be specified for `push` events.
562562+563563+For example, if you'd like to define a workflow that runs
564564+when commits are pushed to the `main` and `develop`
565565+branches, or when pull requests that target the `main`
566566+branch are updated, or manually, you can do so with:
567567+568568+```yaml
569569+when:
570570+ - event: ["push", "manual"]
571571+ branch: ["main", "develop"]
572572+ - event: ["pull_request"]
573573+ branch: ["main"]
574574+```
575575+576576+You can also trigger workflows on tag pushes. For instance,
577577+to run a deployment workflow when tags matching `v*` are
578578+pushed:
579579+580580+```yaml
581581+when:
582582+ - event: ["push"]
583583+ tag: ["v*"]
584584+```
585585+586586+You can even combine branch and tag patterns in a single
587587+constraint (the workflow triggers if either matches):
588588+589589+```yaml
590590+when:
591591+ - event: ["push"]
592592+ branch: ["main", "release-*"]
593593+ tag: ["v*", "stable"]
594594+```
595595+596596+### Engine
597597+598598+Next is the engine on which the workflow should run, defined
599599+using the **required** `engine` field. The currently
600600+supported engines are:
601601+602602+- `nixery`: This uses an instance of
603603+ [Nixery](https://nixery.dev) to run steps, which allows
604604+ you to add [dependencies](#dependencies) from
605605+ Nixpkgs (https://github.com/NixOS/nixpkgs). You can
606606+ search for packages on https://search.nixos.org, and
607607+ there's a pretty good chance the package(s) you're looking
608608+ for will be there.
609609+610610+Example:
611611+612612+```yaml
613613+engine: "nixery"
614614+```
615615+616616+### Clone options
617617+618618+When a workflow starts, the first step is to clone the
619619+repository. You can customize this behavior using the
620620+**optional** `clone` field. It has the following fields:
621621+622622+- `skip`: Setting this to `true` will skip cloning the
623623+ repository. This can be useful if your workflow is doing
624624+ something that doesn't require anything from the
625625+ repository itself. This is `false` by default.
626626+- `depth`: This sets the number of commits, or the "clone
627627+ depth", to fetch from the repository. For example, if you
628628+ set this to 2, the last 2 commits will be fetched. By
629629+ default, the depth is set to 1, meaning only the most
630630+ recent commit will be fetched, which is the commit that
631631+ triggered the workflow.
632632+- `submodules`: If you use Git submodules
633633+ (https://git-scm.com/book/en/v2/Git-Tools-Submodules)
634634+ in your repository, setting this field to `true` will
635635+ recursively fetch all submodules. This is `false` by
636636+ default.
637637+638638+The default settings are:
639639+640640+```yaml
641641+clone:
642642+ skip: false
643643+ depth: 1
644644+ submodules: false
645645+```
646646+647647+### Dependencies
648648+649649+Usually when you're running a workflow, you'll need
650650+additional dependencies. The `dependencies` field lets you
651651+define which dependencies to get, and from where. It's a
652652+key-value map, with the key being the registry to fetch
653653+dependencies from, and the value being the list of
654654+dependencies to fetch.
655655+656656+Say you want to fetch Node.js and Go from `nixpkgs`, and a
657657+package called `my_pkg` you've made from your own registry
658658+at your repository at
659659+`https://tangled.org/@example.com/my_pkg`. You can define
660660+those dependencies like so:
661661+662662+```yaml
663663+dependencies:
664664+ # nixpkgs
665665+ nixpkgs:
666666+ - nodejs
667667+ - go
668668+ # custom registry
669669+ git+https://tangled.org/@example.com/my_pkg:
670670+ - my_pkg
671671+```
672672+673673+Now these dependencies are available to use in your
674674+workflow!
675675+676676+### Environment
677677+678678+The `environment` field allows you define environment
679679+variables that will be available throughout the entire
680680+workflow. **Do not put secrets here, these environment
681681+variables are visible to anyone viewing the repository. You
682682+can add secrets for pipelines in your repository's
683683+settings.**
684684+685685+Example:
686686+687687+```yaml
688688+environment:
689689+ GOOS: "linux"
690690+ GOARCH: "arm64"
691691+ NODE_ENV: "production"
692692+ MY_ENV_VAR: "MY_ENV_VALUE"
693693+```
694694+695695+### Steps
696696+697697+The `steps` field allows you to define what steps should run
698698+in the workflow. It's a list of step objects, each with the
699699+following fields:
700700+701701+- `name`: This field allows you to give your step a name.
702702+ This name is visible in your workflow runs, and is used to
703703+ describe what the step is doing.
704704+- `command`: This field allows you to define a command to
705705+ run in that step. The step is run in a Bash shell, and the
706706+ logs from the command will be visible in the pipelines
707707+ page on the Tangled website. The
708708+ [dependencies](#dependencies) you added will be available
709709+ to use here.
710710+- `environment`: Similar to the global
711711+ [environment](#environment) config, this **optional**
712712+ field is a key-value map that allows you to set
713713+ environment variables for the step. **Do not put secrets
714714+ here, these environment variables are visible to anyone
715715+ viewing the repository. You can add secrets for pipelines
716716+ in your repository's settings.**
717717+718718+Example:
719719+720720+```yaml
721721+steps:
722722+ - name: "Build backend"
723723+ command: "go build"
724724+ environment:
725725+ GOOS: "darwin"
726726+ GOARCH: "arm64"
727727+ - name: "Build frontend"
728728+ command: "npm run build"
729729+ environment:
730730+ NODE_ENV: "production"
731731+```
732732+733733+### Complete workflow
734734+735735+```yaml
736736+# .tangled/workflows/build.yml
737737+738738+when:
739739+ - event: ["push", "manual"]
740740+ branch: ["main", "develop"]
741741+ - event: ["pull_request"]
742742+ branch: ["main"]
743743+744744+engine: "nixery"
745745+746746+# using the default values
747747+clone:
748748+ skip: false
749749+ depth: 1
750750+ submodules: false
751751+752752+dependencies:
753753+ # nixpkgs
754754+ nixpkgs:
755755+ - nodejs
756756+ - go
757757+ # custom registry
758758+ git+https://tangled.org/@example.com/my_pkg:
759759+ - my_pkg
760760+761761+environment:
762762+ GOOS: "linux"
763763+ GOARCH: "arm64"
764764+ NODE_ENV: "production"
765765+ MY_ENV_VAR: "MY_ENV_VALUE"
766766+767767+steps:
768768+ - name: "Build backend"
769769+ command: "go build"
770770+ environment:
771771+ GOOS: "darwin"
772772+ GOARCH: "arm64"
773773+ - name: "Build frontend"
774774+ command: "npm run build"
775775+ environment:
776776+ NODE_ENV: "production"
777777+```
778778+779779+If you want another example of a workflow, you can look at
780780+the one [Tangled uses to build the
781781+project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml).
782782+783783+## Self-hosting guide
784784+785785+### Prerequisites
786786+787787+* Go
788788+* Docker (the only supported backend currently)
789789+790790+### Configuration
791791+792792+Spindle is configured using environment variables. The following environment variables are available:
793793+794794+* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
795795+* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
796796+* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
797797+* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
798798+* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
799799+* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
800800+* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
801801+* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
802802+* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
803803+804804+### Running spindle
805805+806806+1. **Set the environment variables.** For example:
807807+808808+ ```shell
809809+ export SPINDLE_SERVER_HOSTNAME="your-hostname"
810810+ export SPINDLE_SERVER_OWNER="your-did"
811811+ ```
812812+813813+2. **Build the Spindle binary.**
814814+815815+ ```shell
816816+ cd core
817817+ go mod download
818818+ go build -o cmd/spindle/spindle cmd/spindle/main.go
819819+ ```
820820+821821+3. **Create the log directory.**
822822+823823+ ```shell
824824+ sudo mkdir -p /var/log/spindle
825825+ sudo chown $USER:$USER -R /var/log/spindle
826826+ ```
827827+828828+4. **Run the Spindle binary.**
829829+830830+ ```shell
831831+ ./cmd/spindle/spindle
832832+ ```
833833+834834+Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
835835+836836+## Architecture
837837+838838+Spindle is a small CI runner service. Here's a high-level overview of how it operates:
839839+840840+* Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
841841+[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
842842+* When a new repo record comes through (typically when you add a spindle to a
843843+repo from the settings), spindle then resolves the underlying knot and
844844+subscribes to repo events (see:
845845+[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
846846+* The spindle engine then handles execution of the pipeline, with results and
847847+logs beamed on the spindle event stream over WebSocket
848848+849849+### The engine
850850+851851+At present, the only supported backend is Docker (and Podman, if Docker
852852+compatibility is enabled, so that `/run/docker.sock` is created). spindle
853853+executes each step in the pipeline in a fresh container, with state persisted
854854+across steps within the `/tangled/workspace` directory.
855855+856856+The base image for the container is constructed on the fly using
857857+[Nixery](https://nixery.dev), which is handy for caching layers for frequently
858858+used packages.
859859+860860+The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines).
861861+862862+## Secrets with openbao
863863+864864+This document covers setting up spindle to use OpenBao for secrets
865865+management via OpenBao Proxy instead of the default SQLite backend.
866866+867867+### Overview
868868+869869+Spindle now uses OpenBao Proxy for secrets management. The proxy handles
870870+authentication automatically using AppRole credentials, while spindle
871871+connects to the local proxy instead of directly to the OpenBao server.
872872+873873+This approach provides better security, automatic token renewal, and
874874+simplified application code.
875875+876876+### Installation
877877+878878+Install OpenBao from Nixpkgs:
879879+880880+```bash
881881+nix shell nixpkgs#openbao # for a local server
882882+```
883883+884884+### Setup
885885+886886+The setup process can is documented for both local development and production.
887887+888888+#### Local development
889889+890890+Start OpenBao in dev mode:
891891+892892+```bash
893893+bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
894894+```
895895+896896+This starts OpenBao on `http://localhost:8201` with a root token.
897897+898898+Set up environment for bao CLI:
899899+900900+```bash
901901+export BAO_ADDR=http://localhost:8200
902902+export BAO_TOKEN=root
903903+```
904904+905905+#### Production
906906+907907+You would typically use a systemd service with a
908908+configuration file. Refer to
909909+[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
910910+for how this can be achieved using Nix.
911911+912912+Then, initialize the bao server:
913913+914914+```bash
915915+bao operator init -key-shares=1 -key-threshold=1
916916+```
917917+918918+This will print out an unseal key and a root key. Save them
919919+somewhere (like a password manager). Then unseal the vault
920920+to begin setting it up:
921921+922922+```bash
923923+bao operator unseal <unseal_key>
924924+```
925925+926926+All steps below remain the same across both dev and
927927+production setups.
928928+929929+#### Configure openbao server
930930+931931+Create the spindle KV mount:
932932+933933+```bash
934934+bao secrets enable -path=spindle -version=2 kv
935935+```
936936+937937+Set up AppRole authentication and policy:
938938+939939+Create a policy file `spindle-policy.hcl`:
940940+941941+```hcl
942942+# Full access to spindle KV v2 data
943943+path "spindle/data/*" {
944944+ capabilities = ["create", "read", "update", "delete"]
945945+}
946946+947947+# Access to metadata for listing and management
948948+path "spindle/metadata/*" {
949949+ capabilities = ["list", "read", "delete", "update"]
950950+}
951951+952952+# Allow listing at root level
953953+path "spindle/" {
954954+ capabilities = ["list"]
955955+}
956956+957957+# Required for connection testing and health checks
958958+path "auth/token/lookup-self" {
959959+ capabilities = ["read"]
960960+}
961961+```
962962+963963+Apply the policy and create an AppRole:
964964+965965+```bash
966966+bao policy write spindle-policy spindle-policy.hcl
967967+bao auth enable approle
968968+bao write auth/approle/role/spindle \
969969+ token_policies="spindle-policy" \
970970+ token_ttl=1h \
971971+ token_max_ttl=4h \
972972+ bind_secret_id=true \
973973+ secret_id_ttl=0 \
974974+ secret_id_num_uses=0
975975+```
976976+977977+Get the credentials:
978978+979979+```bash
980980+# Get role ID (static)
981981+ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
982982+983983+# Generate secret ID
984984+SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
985985+986986+echo "Role ID: $ROLE_ID"
987987+echo "Secret ID: $SECRET_ID"
988988+```
989989+990990+#### Create proxy configuration
991991+992992+Create the credential files:
993993+994994+```bash
995995+# Create directory for OpenBao files
996996+mkdir -p /tmp/openbao
997997+998998+# Save credentials
999999+echo "$ROLE_ID" > /tmp/openbao/role-id
10001000+echo "$SECRET_ID" > /tmp/openbao/secret-id
10011001+chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
10021002+```
10031003+10041004+Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
10051005+10061006+```hcl
10071007+# OpenBao server connection
10081008+vault {
10091009+ address = "http://localhost:8200"
10101010+}
10111011+10121012+# Auto-Auth using AppRole
10131013+auto_auth {
10141014+ method "approle" {
10151015+ mount_path = "auth/approle"
10161016+ config = {
10171017+ role_id_file_path = "/tmp/openbao/role-id"
10181018+ secret_id_file_path = "/tmp/openbao/secret-id"
10191019+ }
10201020+ }
10211021+10221022+ # Optional: write token to file for debugging
10231023+ sink "file" {
10241024+ config = {
10251025+ path = "/tmp/openbao/token"
10261026+ mode = 0640
10271027+ }
10281028+ }
10291029+}
10301030+10311031+# Proxy listener for spindle
10321032+listener "tcp" {
10331033+ address = "127.0.0.1:8201"
10341034+ tls_disable = true
10351035+}
10361036+10371037+# Enable API proxy with auto-auth token
10381038+api_proxy {
10391039+ use_auto_auth_token = true
10401040+}
10411041+10421042+# Enable response caching
10431043+cache {
10441044+ use_auto_auth_token = true
10451045+}
10461046+10471047+# Logging
10481048+log_level = "info"
10491049+```
10501050+10511051+#### Start the proxy
10521052+10531053+Start OpenBao Proxy:
10541054+10551055+```bash
10561056+bao proxy -config=/tmp/openbao/proxy.hcl
10571057+```
10581058+10591059+The proxy will authenticate with OpenBao and start listening on
10601060+`127.0.0.1:8201`.
10611061+10621062+#### Configure spindle
10631063+10641064+Set these environment variables for spindle:
10651065+10661066+```bash
10671067+export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
10681068+export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
10691069+export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
10701070+```
10711071+10721072+On startup, spindle will now connect to the local proxy,
10731073+which handles all authentication automatically.
10741074+10751075+### Production setup for proxy
10761076+10771077+For production, you'll want to run the proxy as a service:
10781078+10791079+Place your production configuration in
10801080+`/etc/openbao/proxy.hcl` with proper TLS settings for the
10811081+vault connection.
10821082+10831083+### Verifying setup
10841084+10851085+Test the proxy directly:
10861086+10871087+```bash
10881088+# Check proxy health
10891089+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
10901090+10911091+# Test token lookup through proxy
10921092+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
10931093+```
10941094+10951095+Test OpenBao operations through the server:
10961096+10971097+```bash
10981098+# List all secrets
10991099+bao kv list spindle/
11001100+11011101+# Add a test secret via the spindle API, then check it exists
11021102+bao kv list spindle/repos/
11031103+11041104+# Get a specific secret
11051105+bao kv get spindle/repos/your_repo_path/SECRET_NAME
11061106+```
11071107+11081108+### How it works
11091109+11101110+- Spindle connects to OpenBao Proxy on localhost (typically
11111111+ port 8200 or 8201)
11121112+- The proxy authenticates with OpenBao using AppRole
11131113+ credentials
11141114+- All spindle requests go through the proxy, which injects
11151115+ authentication tokens
11161116+- Secrets are stored at
11171117+ `spindle/repos/{sanitized_repo_path}/{secret_key}`
11181118+- Repository paths like `did:plc:alice/myrepo` become
11191119+ `did_plc_alice_myrepo`
11201120+- The proxy handles all token renewal automatically
11211121+- Spindle no longer manages tokens or authentication
11221122+ directly
11231123+11241124+### Troubleshooting
11251125+11261126+**Connection refused**: Check that the OpenBao Proxy is
11271127+running and listening on the configured address.
11281128+11291129+**403 errors**: Verify the AppRole credentials are correct
11301130+and the policy has the necessary permissions.
11311131+11321132+**404 route errors**: The spindle KV mount probably doesn't
11331133+exist—run the mount creation step again.
11341134+11351135+**Proxy authentication failures**: Check the proxy logs and
11361136+verify the role-id and secret-id files are readable and
11371137+contain valid credentials.
11381138+11391139+**Secret not found after writing**: This can indicate policy
11401140+permission issues. Verify the policy includes both
11411141+`spindle/data/*` and `spindle/metadata/*` paths with
11421142+appropriate capabilities.
11431143+11441144+Check proxy logs:
11451145+11461146+```bash
11471147+# If running as systemd service
11481148+journalctl -u openbao-proxy -f
11491149+11501150+# If running directly, check the console output
11511151+```
11521152+11531153+Test AppRole authentication manually:
11541154+11551155+```bash
11561156+bao write auth/approle/login \
11571157+ role_id="$(cat /tmp/openbao/role-id)" \
11581158+ secret_id="$(cat /tmp/openbao/secret-id)"
11591159+```
11601160+11611161+# Migrating knots and spindles
11621162+11631163+Sometimes, non-backwards compatible changes are made to the
11641164+knot/spindle XRPC APIs. If you host a knot or a spindle, you
11651165+will need to follow this guide to upgrade. Typically, this
11661166+only requires you to deploy the newest version.
11671167+11681168+This document is laid out in reverse-chronological order.
11691169+Newer migration guides are listed first, and older guides
11701170+are further down the page.
11711171+11721172+## Upgrading from v1.8.x
11731173+11741174+After v1.8.2, the HTTP API for knots and spindles has been
11751175+deprecated and replaced with XRPC. Repositories on outdated
11761176+knots will not be viewable from the appview. Upgrading is
11771177+straightforward however.
11781178+11791179+For knots:
11801180+11811181+- Upgrade to the latest tag (v1.9.0 or above)
11821182+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
11831183+ hit the "retry" button to verify your knot
11841184+11851185+For spindles:
11861186+11871187+- Upgrade to the latest tag (v1.9.0 or above)
11881188+- Head to the [spindle
11891189+ dashboard](https://tangled.org/settings/spindles) and hit the
11901190+ "retry" button to verify your spindle
11911191+11921192+## Upgrading from v1.7.x
11931193+11941194+After v1.7.0, knot secrets have been deprecated. You no
11951195+longer need a secret from the appview to run a knot. All
11961196+authorized commands to knots are managed via [Inter-Service
11971197+Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
11981198+Knots will be read-only until upgraded.
11991199+12001200+Upgrading is quite easy, in essence:
12011201+12021202+- `KNOT_SERVER_SECRET` is no more, you can remove this
12031203+ environment variable entirely
12041204+- `KNOT_SERVER_OWNER` is now required on boot, set this to
12051205+ your DID. You can find your DID in the
12061206+ [settings](https://tangled.org/settings) page.
12071207+- Restart your knot once you have replaced the environment
12081208+ variable
12091209+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
12101210+ hit the "retry" button to verify your knot. This simply
12111211+ writes a `sh.tangled.knot` record to your PDS.
12121212+12131213+If you use the nix module, simply bump the flake to the
12141214+latest revision, and change your config block like so:
12151215+12161216+```diff
12171217+ services.tangled.knot = {
12181218+ enable = true;
12191219+ server = {
12201220+- secretFile = /path/to/secret;
12211221++ owner = "did:plc:foo";
12221222+ };
12231223+ };
12241224+```
12251225+12261226+# Hacking on Tangled
12271227+12281228+We highly recommend [installing
12291229+Nix](https://nixos.org/download/) (the package manager)
12301230+before working on the codebase. The Nix flake provides a lot
12311231+of helpers to get started and most importantly, builds and
12321232+dev shells are entirely deterministic.
12331233+12341234+To set up your dev environment:
12351235+12361236+```bash
12371237+nix develop
12381238+```
12391239+12401240+Non-Nix users can look at the `devShell` attribute in the
12411241+`flake.nix` file to determine necessary dependencies.
12421242+12431243+## Running the appview
12441244+12451245+The Nix flake also exposes a few `app` attributes (run `nix
12461246+flake show` to see a full list of what the flake provides),
12471247+one of the apps runs the appview with the `air`
12481248+live-reloader:
12491249+12501250+```bash
12511251+TANGLED_DEV=true nix run .#watch-appview
12521252+12531253+# TANGLED_DB_PATH might be of interest to point to
12541254+# different sqlite DBs
12551255+12561256+# in a separate shell, you can live-reload tailwind
12571257+nix run .#watch-tailwind
12581258+```
12591259+12601260+To authenticate with the appview, you will need Redis and
12611261+OAuth JWKs to be set up:
12621262+12631263+```
12641264+# OAuth JWKs should already be set up by the Nix devshell:
12651265+echo $TANGLED_OAUTH_CLIENT_SECRET
12661266+z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
12671267+12681268+echo $TANGLED_OAUTH_CLIENT_KID
12691269+1761667908
12701270+12711271+# if not, you can set it up yourself:
12721272+goat key generate -t P-256
12731273+Key Type: P-256 / secp256r1 / ES256 private key
12741274+Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
12751275+ z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
12761276+Public Key (DID Key Syntax): share or publish this (eg, in DID document)
12771277+ did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
12781278+12791279+# the secret key from above
12801280+export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
12811281+12821282+# Run Redis in a new shell to store OAuth sessions
12831283+redis-server
12841284+```
12851285+12861286+## Running knots and spindles
12871287+12881288+An end-to-end knot setup requires setting up a machine with
12891289+`sshd`, `AuthorizedKeysCommand`, and a Git user, which is
12901290+quite cumbersome. So the Nix flake provides a
12911291+`nixosConfiguration` to do so.
12921292+12931293+<details>
12941294+ <summary><strong>macOS users will have to set up a Nix Builder first</strong></summary>
12951295+12961296+ In order to build Tangled's dev VM on macOS, you will
12971297+ first need to set up a Linux Nix builder. The recommended
12981298+ way to do so is to run a [`darwin.linux-builder`
12991299+ VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
13001300+ and to register it in `nix.conf` as a builder for Linux
13011301+ with the same architecture as your Mac (`linux-aarch64` if
13021302+ you are using Apple Silicon).
13031303+13041304+ > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
13051305+ > the Tangled repo so that it doesn't conflict with the other VM. For example,
13061306+ > you can do
13071307+ >
13081308+ > ```shell
13091309+ > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
13101310+ > ```
13111311+ >
13121312+ > to store the builder VM in a temporary dir.
13131313+ >
13141314+ > You should read and follow [all the other intructions][darwin builder vm] to
13151315+ > avoid subtle problems.
13161316+13171317+ Alternatively, you can use any other method to set up a
13181318+ Linux machine with Nix installed that you can `sudo ssh`
13191319+ into (in other words, root user on your Mac has to be able
13201320+ to ssh into the Linux machine without entering a password)
13211321+ and that has the same architecture as your Mac. See
13221322+ [remote builder
13231323+ instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
13241324+ for how to register such a builder in `nix.conf`.
13251325+13261326+ > WARNING: If you'd like to use
13271327+ > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
13281328+ > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
13291329+ > ssh` works can be tricky. It seems to be [possible with
13301330+ > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
13311331+13321332+</details>
13331333+13341334+To begin, grab your DID from http://localhost:3000/settings.
13351335+Then, set `TANGLED_VM_KNOT_OWNER` and
13361336+`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
13371337+lightweight NixOS VM like so:
13381338+13391339+```bash
13401340+nix run --impure .#vm
13411341+13421342+# type `poweroff` at the shell to exit the VM
13431343+```
13441344+13451345+This starts a knot on port 6444, a spindle on port 6555
13461346+with `ssh` exposed on port 2222.
13471347+13481348+Once the services are running, head to
13491349+http://localhost:3000/settings/knots and hit "Verify". It should
13501350+verify the ownership of the services instantly if everything
13511351+went smoothly.
13521352+13531353+You can push repositories to this VM with this ssh config
13541354+block on your main machine:
13551355+13561356+```bash
13571357+Host nixos-shell
13581358+ Hostname localhost
13591359+ Port 2222
13601360+ User git
13611361+ IdentityFile ~/.ssh/my_tangled_key
13621362+```
13631363+13641364+Set up a remote called `local-dev` on a git repo:
13651365+13661366+```bash
13671367+git remote add local-dev git@nixos-shell:user/repo
13681368+git push local-dev main
13691369+```
13701370+13711371+The above VM should already be running a spindle on
13721372+`localhost:6555`. Head to http://localhost:3000/settings/spindles and
13731373+hit "Verify". You can then configure each repository to use
13741374+this spindle and run CI jobs.
13751375+13761376+Of interest when debugging spindles:
13771377+13781378+```
13791379+# Service logs from journald:
13801380+journalctl -xeu spindle
13811381+13821382+# CI job logs from disk:
13831383+ls /var/log/spindle
13841384+13851385+# Debugging spindle database:
13861386+sqlite3 /var/lib/spindle/spindle.db
13871387+13881388+# litecli has a nicer REPL interface:
13891389+litecli /var/lib/spindle/spindle.db
13901390+```
13911391+13921392+If for any reason you wish to disable either one of the
13931393+services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
13941394+`services.tangled.spindle.enable` (or
13951395+`services.tangled.knot.enable`) to `false`.
13961396+13971397+# Contribution guide
13981398+13991399+## Commit guidelines
14001400+14011401+We follow a commit style similar to the Go project. Please keep commits:
14021402+14031403+* **atomic**: each commit should represent one logical change
14041404+* **descriptive**: the commit message should clearly describe what the
14051405+change does and why it's needed
14061406+14071407+### Message format
14081408+14091409+```
14101410+<service/top-level directory>/<affected package/directory>: <short summary of change>
14111411+14121412+Optional longer description can go here, if necessary. Explain what the
14131413+change does and why, especially if not obvious. Reference relevant
14141414+issues or PRs when applicable. These can be links for now since we don't
14151415+auto-link issues/PRs yet.
14161416+```
14171417+14181418+Here are some examples:
14191419+14201420+```
14211421+appview/state: fix token expiry check in middleware
14221422+14231423+The previous check did not account for clock drift, leading to premature
14241424+token invalidation.
14251425+```
14261426+14271427+```
14281428+knotserver/git/service: improve error checking in upload-pack
14291429+```
14301430+14311431+14321432+### General notes
14331433+14341434+- PRs get merged "as-is" (fast-forward)—like applying a patch-series
14351435+using `git am`. At present, there is no squashing—so please author
14361436+your commits as they would appear on `master`, following the above
14371437+guidelines.
14381438+- If there is a lot of nesting, for example "appview:
14391439+pages/templates/repo/fragments: ...", these can be truncated down to
14401440+just "appview: repo/fragments: ...". If the change affects a lot of
14411441+subdirectories, you may abbreviate to just the top-level names, e.g.
14421442+"appview: ..." or "knotserver: ...".
14431443+- Keep commits lowercased with no trailing period.
14441444+- Use the imperative mood in the summary line (e.g., "fix bug" not
14451445+"fixed bug" or "fixes bug").
14461446+- Try to keep the summary line under 72 characters, but we aren't too
14471447+fussed about this.
14481448+- Follow the same formatting for PR titles if filled manually.
14491449+- Don't include unrelated changes in the same commit.
14501450+- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
14511451+before submitting if necessary.
14521452+14531453+## Code formatting
14541454+14551455+We use a variety of tools to format our code, and multiplex them with
14561456+[`treefmt`](https://treefmt.com). All you need to do to format your changes
14571457+is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
14581458+14591459+## Proposals for bigger changes
14601460+14611461+Small fixes like typos, minor bugs, or trivial refactors can be
14621462+submitted directly as PRs.
14631463+14641464+For larger changes—especially those introducing new features, significant
14651465+refactoring, or altering system behavior—please open a proposal first. This
14661466+helps us evaluate the scope, design, and potential impact before implementation.
14671467+14681468+Create a new issue titled:
14691469+14701470+```
14711471+proposal: <affected scope>: <summary of change>
14721472+```
14731473+14741474+In the description, explain:
14751475+14761476+- What the change is
14771477+- Why it's needed
14781478+- How you plan to implement it (roughly)
14791479+- Any open questions or tradeoffs
14801480+14811481+We'll use the issue thread to discuss and refine the idea before moving
14821482+forward.
14831483+14841484+## Developer Certificate of Origin (DCO)
14851485+14861486+We require all contributors to certify that they have the right to
14871487+submit the code they're contributing. To do this, we follow the
14881488+[Developer Certificate of Origin
14891489+(DCO)](https://developercertificate.org/).
14901490+14911491+By signing your commits, you're stating that the contribution is your
14921492+own work, or that you have the right to submit it under the project's
14931493+license. This helps us keep things clean and legally sound.
14941494+14951495+To sign your commit, just add the `-s` flag when committing:
14961496+14971497+```sh
14981498+git commit -s -m "your commit message"
14991499+```
15001500+15011501+This appends a line like:
15021502+15031503+```
15041504+Signed-off-by: Your Name <your.email@example.com>
15051505+```
15061506+15071507+We won't merge commits if they aren't signed off. If you forget, you can
15081508+amend the last commit like this:
15091509+15101510+```sh
15111511+git commit --amend -s
15121512+```
15131513+15141514+If you're submitting a PR with multiple commits, make sure each one is
15151515+signed.
15161516+15171517+For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
15181518+to make it sign off commits in the tangled repo:
15191519+15201520+```shell
15211521+# Safety check, should say "No matching config key..."
15221522+jj config list templates.commit_trailers
15231523+# The command below may need to be adjusted if the command above returned something.
15241524+jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
15251525+```
15261526+15271527+Refer to the [jujutsu
15281528+documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
15291529+for more information.
-136
docs/contributing.md
···11-# tangled contributing guide
22-33-## commit guidelines
44-55-We follow a commit style similar to the Go project. Please keep commits:
66-77-* **atomic**: each commit should represent one logical change
88-* **descriptive**: the commit message should clearly describe what the
99-change does and why it's needed
1010-1111-### message format
1212-1313-```
1414-<service/top-level directory>/<affected package/directory>: <short summary of change>
1515-1616-1717-Optional longer description can go here, if necessary. Explain what the
1818-change does and why, especially if not obvious. Reference relevant
1919-issues or PRs when applicable. These can be links for now since we don't
2020-auto-link issues/PRs yet.
2121-```
2222-2323-Here are some examples:
2424-2525-```
2626-appview/state: fix token expiry check in middleware
2727-2828-The previous check did not account for clock drift, leading to premature
2929-token invalidation.
3030-```
3131-3232-```
3333-knotserver/git/service: improve error checking in upload-pack
3434-```
3535-3636-3737-### general notes
3838-3939-- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
4040-using `git am`. At present, there is no squashing -- so please author
4141-your commits as they would appear on `master`, following the above
4242-guidelines.
4343-- If there is a lot of nesting, for example "appview:
4444-pages/templates/repo/fragments: ...", these can be truncated down to
4545-just "appview: repo/fragments: ...". If the change affects a lot of
4646-subdirectories, you may abbreviate to just the top-level names, e.g.
4747-"appview: ..." or "knotserver: ...".
4848-- Keep commits lowercased with no trailing period.
4949-- Use the imperative mood in the summary line (e.g., "fix bug" not
5050-"fixed bug" or "fixes bug").
5151-- Try to keep the summary line under 72 characters, but we aren't too
5252-fussed about this.
5353-- Follow the same formatting for PR titles if filled manually.
5454-- Don't include unrelated changes in the same commit.
5555-- Avoid noisy commit messages like "wip" or "final fix"—rewrite history
5656-before submitting if necessary.
5757-5858-## code formatting
5959-6060-We use a variety of tools to format our code, and multiplex them with
6161-[`treefmt`](https://treefmt.com): all you need to do to format your changes
6262-is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
6363-6464-## proposals for bigger changes
6565-6666-Small fixes like typos, minor bugs, or trivial refactors can be
6767-submitted directly as PRs.
6868-6969-For larger changes—especially those introducing new features, significant
7070-refactoring, or altering system behavior—please open a proposal first. This
7171-helps us evaluate the scope, design, and potential impact before implementation.
7272-7373-### proposal format
7474-7575-Create a new issue titled:
7676-7777-```
7878-proposal: <affected scope>: <summary of change>
7979-```
8080-8181-In the description, explain:
8282-8383-- What the change is
8484-- Why it's needed
8585-- How you plan to implement it (roughly)
8686-- Any open questions or tradeoffs
8787-8888-We'll use the issue thread to discuss and refine the idea before moving
8989-forward.
9090-9191-## developer certificate of origin (DCO)
9292-9393-We require all contributors to certify that they have the right to
9494-submit the code they're contributing. To do this, we follow the
9595-[Developer Certificate of Origin
9696-(DCO)](https://developercertificate.org/).
9797-9898-By signing your commits, you're stating that the contribution is your
9999-own work, or that you have the right to submit it under the project's
100100-license. This helps us keep things clean and legally sound.
101101-102102-To sign your commit, just add the `-s` flag when committing:
103103-104104-```sh
105105-git commit -s -m "your commit message"
106106-```
107107-108108-This appends a line like:
109109-110110-```
111111-Signed-off-by: Your Name <your.email@example.com>
112112-```
113113-114114-We won't merge commits if they aren't signed off. If you forget, you can
115115-amend the last commit like this:
116116-117117-```sh
118118-git commit --amend -s
119119-```
120120-121121-If you're submitting a PR with multiple commits, make sure each one is
122122-signed.
123123-124124-For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125125-to make it sign off commits in the tangled repo:
126126-127127-```shell
128128-# Safety check, should say "No matching config key..."
129129-jj config list templates.commit_trailers
130130-# The command below may need to be adjusted if the command above returned something.
131131-jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
132132-```
133133-134134-Refer to the [jj
135135-documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
136136-for more information.
-172
docs/hacking.md
···11-# hacking on tangled
22-33-We highly recommend [installing
44-nix](https://nixos.org/download/) (the package manager)
55-before working on the codebase. The nix flake provides a lot
66-of helpers to get started and most importantly, builds and
77-dev shells are entirely deterministic.
88-99-To set up your dev environment:
1010-1111-```bash
1212-nix develop
1313-```
1414-1515-Non-nix users can look at the `devShell` attribute in the
1616-`flake.nix` file to determine necessary dependencies.
1717-1818-## running the appview
1919-2020-The nix flake also exposes a few `app` attributes (run `nix
2121-flake show` to see a full list of what the flake provides),
2222-one of the apps runs the appview with the `air`
2323-live-reloader:
2424-2525-```bash
2626-TANGLED_DEV=true nix run .#watch-appview
2727-2828-# TANGLED_DB_PATH might be of interest to point to
2929-# different sqlite DBs
3030-3131-# in a separate shell, you can live-reload tailwind
3232-nix run .#watch-tailwind
3333-```
3434-3535-To authenticate with the appview, you will need redis and
3636-OAUTH JWKs to be setup:
3737-3838-```
3939-# oauth jwks should already be setup by the nix devshell:
4040-echo $TANGLED_OAUTH_CLIENT_SECRET
4141-z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
4242-4343-echo $TANGLED_OAUTH_CLIENT_KID
4444-1761667908
4545-4646-# if not, you can set it up yourself:
4747-goat key generate -t P-256
4848-Key Type: P-256 / secp256r1 / ES256 private key
4949-Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
5050- z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
5151-Public Key (DID Key Syntax): share or publish this (eg, in DID document)
5252- did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
5353-5454-# the secret key from above
5555-export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
5656-5757-# run redis in at a new shell to store oauth sessions
5858-redis-server
5959-```
6060-6161-## running knots and spindles
6262-6363-An end-to-end knot setup requires setting up a machine with
6464-`sshd`, `AuthorizedKeysCommand`, and git user, which is
6565-quite cumbersome. So the nix flake provides a
6666-`nixosConfiguration` to do so.
6767-6868-<details>
6969- <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
7070-7171- In order to build Tangled's dev VM on macOS, you will
7272- first need to set up a Linux Nix builder. The recommended
7373- way to do so is to run a [`darwin.linux-builder`
7474- VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
7575- and to register it in `nix.conf` as a builder for Linux
7676- with the same architecture as your Mac (`linux-aarch64` if
7777- you are using Apple Silicon).
7878-7979- > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
8080- > the tangled repo so that it doesn't conflict with the other VM. For example,
8181- > you can do
8282- >
8383- > ```shell
8484- > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
8585- > ```
8686- >
8787- > to store the builder VM in a temporary dir.
8888- >
8989- > You should read and follow [all the other intructions][darwin builder vm] to
9090- > avoid subtle problems.
9191-9292- Alternatively, you can use any other method to set up a
9393- Linux machine with `nix` installed that you can `sudo ssh`
9494- into (in other words, root user on your Mac has to be able
9595- to ssh into the Linux machine without entering a password)
9696- and that has the same architecture as your Mac. See
9797- [remote builder
9898- instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
9999- for how to register such a builder in `nix.conf`.
100100-101101- > WARNING: If you'd like to use
102102- > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
103103- > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
104104- > ssh` works can be tricky. It seems to be [possible with
105105- > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
106106-107107-</details>
108108-109109-To begin, grab your DID from http://localhost:3000/settings.
110110-Then, set `TANGLED_VM_KNOT_OWNER` and
111111-`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
112112-lightweight NixOS VM like so:
113113-114114-```bash
115115-nix run --impure .#vm
116116-117117-# type `poweroff` at the shell to exit the VM
118118-```
119119-120120-This starts a knot on port 6000, a spindle on port 6555
121121-with `ssh` exposed on port 2222.
122122-123123-Once the services are running, head to
124124-http://localhost:3000/knots and hit verify. It should
125125-verify the ownership of the services instantly if everything
126126-went smoothly.
127127-128128-You can push repositories to this VM with this ssh config
129129-block on your main machine:
130130-131131-```bash
132132-Host nixos-shell
133133- Hostname localhost
134134- Port 2222
135135- User git
136136- IdentityFile ~/.ssh/my_tangled_key
137137-```
138138-139139-Set up a remote called `local-dev` on a git repo:
140140-141141-```bash
142142-git remote add local-dev git@nixos-shell:user/repo
143143-git push local-dev main
144144-```
145145-146146-### running a spindle
147147-148148-The above VM should already be running a spindle on
149149-`localhost:6555`. Head to http://localhost:3000/spindles and
150150-hit verify. You can then configure each repository to use
151151-this spindle and run CI jobs.
152152-153153-Of interest when debugging spindles:
154154-155155-```
156156-# service logs from journald:
157157-journalctl -xeu spindle
158158-159159-# CI job logs from disk:
160160-ls /var/log/spindle
161161-162162-# debugging spindle db:
163163-sqlite3 /var/lib/spindle/spindle.db
164164-165165-# litecli has a nicer REPL interface:
166166-litecli /var/lib/spindle/spindle.db
167167-```
168168-169169-If for any reason you wish to disable either one of the
170170-services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171171-`services.tangled.spindle.enable` (or
172172-`services.tangled.knot.enable`) to `false`.
···11-# knot self-hosting guide
22-33-So you want to run your own knot server? Great! Here are a few prerequisites:
44-55-1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
66-2. A (sub)domain name. People generally use `knot.example.com`.
77-3. A valid SSL certificate for your domain.
88-99-There's a couple of ways to get started:
1010-* NixOS: refer to
1111-[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
1212-* Docker: Documented at
1313-[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
1414-(community maintained: support is not guaranteed!)
1515-* Manual: Documented below.
1616-1717-## manual setup
1818-1919-First, clone this repository:
2020-2121-```
2222-git clone https://tangled.org/@tangled.org/core
2323-```
2424-2525-Then, build the `knot` CLI. This is the knot administration and operation tool.
2626-For the purpose of this guide, we're only concerned with these subcommands:
2727-2828-* `knot server`: the main knot server process, typically run as a
2929-supervised service
3030-* `knot guard`: handles role-based access control for git over SSH
3131-(you'll never have to run this yourself)
3232-* `knot keys`: fetches SSH keys associated with your knot; we'll use
3333-this to generate the SSH `AuthorizedKeysCommand`
3434-3535-```
3636-cd core
3737-export CGO_ENABLED=1
3838-go build -o knot ./cmd/knot
3939-```
4040-4141-Next, move the `knot` binary to a location owned by `root` --
4242-`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
4343-4444-```
4545-sudo mv knot /usr/local/bin/knot
4646-sudo chown root:root /usr/local/bin/knot
4747-```
4848-4949-This is necessary because SSH `AuthorizedKeysCommand` requires [really
5050-specific permissions](https://stackoverflow.com/a/27638306). The
5151-`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
5252-retrieve a user's public SSH keys dynamically for authentication. Let's
5353-set that up.
5454-5555-```
5656-sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
5757-Match User git
5858- AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
5959- AuthorizedKeysCommandUser nobody
6060-EOF
6161-```
6262-6363-Then, reload `sshd`:
6464-6565-```
6666-sudo systemctl reload ssh
6767-```
6868-6969-Next, create the `git` user. We'll use the `git` user's home directory
7070-to store repositories:
7171-7272-```
7373-sudo adduser git
7474-```
7575-7676-Create `/home/git/.knot.env` with the following, updating the values as
7777-necessary. The `KNOT_SERVER_OWNER` should be set to your
7878-DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
7979-8080-```
8181-KNOT_REPO_SCAN_PATH=/home/git
8282-KNOT_SERVER_HOSTNAME=knot.example.com
8383-APPVIEW_ENDPOINT=https://tangled.sh
8484-KNOT_SERVER_OWNER=did:plc:foobar
8585-KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
8686-KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
8787-```
8888-8989-If you run a Linux distribution that uses systemd, you can use the provided
9090-service file to run the server. Copy
9191-[`knotserver.service`](/systemd/knotserver.service)
9292-to `/etc/systemd/system/`. Then, run:
9393-9494-```
9595-systemctl enable knotserver
9696-systemctl start knotserver
9797-```
9898-9999-The last step is to configure a reverse proxy like Nginx or Caddy to front your
100100-knot. Here's an example configuration for Nginx:
101101-102102-```
103103-server {
104104- listen 80;
105105- listen [::]:80;
106106- server_name knot.example.com;
107107-108108- location / {
109109- proxy_pass http://localhost:5555;
110110- proxy_set_header Host $host;
111111- proxy_set_header X-Real-IP $remote_addr;
112112- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
113113- proxy_set_header X-Forwarded-Proto $scheme;
114114- }
115115-116116- # wss endpoint for git events
117117- location /events {
118118- proxy_set_header X-Forwarded-For $remote_addr;
119119- proxy_set_header Host $http_host;
120120- proxy_set_header Upgrade websocket;
121121- proxy_set_header Connection Upgrade;
122122- proxy_pass http://localhost:5555;
123123- }
124124- # additional config for SSL/TLS go here.
125125-}
126126-127127-```
128128-129129-Remember to use Let's Encrypt or similar to procure a certificate for your
130130-knot domain.
131131-132132-You should now have a running knot server! You can finalize
133133-your registration by hitting the `verify` button on the
134134-[/knots](https://tangled.org/knots) page. This simply creates
135135-a record on your PDS to announce the existence of the knot.
136136-137137-### custom paths
138138-139139-(This section applies to manual setup only. Docker users should edit the mounts
140140-in `docker-compose.yml` instead.)
141141-142142-Right now, the database and repositories of your knot lives in `/home/git`. You
143143-can move these paths if you'd like to store them in another folder. Be careful
144144-when adjusting these paths:
145145-146146-* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
147147-any possible side effects. Remember to restart it once you're done.
148148-* Make backups before moving in case something goes wrong.
149149-* Make sure the `git` user can read and write from the new paths.
150150-151151-#### database
152152-153153-As an example, let's say the current database is at `/home/git/knotserver.db`,
154154-and we want to move it to `/home/git/database/knotserver.db`.
155155-156156-Copy the current database to the new location. Make sure to copy the `.db-shm`
157157-and `.db-wal` files if they exist.
158158-159159-```
160160-mkdir /home/git/database
161161-cp /home/git/knotserver.db* /home/git/database
162162-```
163163-164164-In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
165165-the new file path (_not_ the directory):
166166-167167-```
168168-KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
169169-```
170170-171171-#### repositories
172172-173173-As an example, let's say the repositories are currently in `/home/git`, and we
174174-want to move them into `/home/git/repositories`.
175175-176176-Create the new folder, then move the existing repositories (if there are any):
177177-178178-```
179179-mkdir /home/git/repositories
180180-# move all DIDs into the new folder; these will vary for you!
181181-mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
182182-```
183183-184184-In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
185185-to the new directory:
186186-187187-```
188188-KNOT_REPO_SCAN_PATH=/home/git/repositories
189189-```
190190-191191-Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
192192-repository path:
193193-194194-```
195195-sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
196196-Match User git
197197- AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
198198- AuthorizedKeysCommandUser nobody
199199-EOF
200200-```
201201-202202-Make sure to restart your SSH server!
203203-204204-#### MOTD (message of the day)
205205-206206-To configure the MOTD used ("Welcome to this knot!" by default), edit the
207207-`/home/git/motd` file:
208208-209209-```
210210-printf "Hi from this knot!\n" > /home/git/motd
211211-```
212212-213213-Note that you should add a newline at the end if setting a non-empty message
214214-since the knot won't do this for you.
-59
docs/migrations.md
···11-# Migrations
22-33-This document is laid out in reverse-chronological order.
44-Newer migration guides are listed first, and older guides
55-are further down the page.
66-77-## Upgrading from v1.8.x
88-99-After v1.8.2, the HTTP API for knot and spindles have been
1010-deprecated and replaced with XRPC. Repositories on outdated
1111-knots will not be viewable from the appview. Upgrading is
1212-straightforward however.
1313-1414-For knots:
1515-1616-- Upgrade to latest tag (v1.9.0 or above)
1717-- Head to the [knot dashboard](https://tangled.org/knots) and
1818- hit the "retry" button to verify your knot
1919-2020-For spindles:
2121-2222-- Upgrade to latest tag (v1.9.0 or above)
2323-- Head to the [spindle
2424- dashboard](https://tangled.org/spindles) and hit the
2525- "retry" button to verify your spindle
2626-2727-## Upgrading from v1.7.x
2828-2929-After v1.7.0, knot secrets have been deprecated. You no
3030-longer need a secret from the appview to run a knot. All
3131-authorized commands to knots are managed via [Inter-Service
3232-Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
3333-Knots will be read-only until upgraded.
3434-3535-Upgrading is quite easy, in essence:
3636-3737-- `KNOT_SERVER_SECRET` is no more, you can remove this
3838- environment variable entirely
3939-- `KNOT_SERVER_OWNER` is now required on boot, set this to
4040- your DID. You can find your DID in the
4141- [settings](https://tangled.org/settings) page.
4242-- Restart your knot once you have replaced the environment
4343- variable
4444-- Head to the [knot dashboard](https://tangled.org/knots) and
4545- hit the "retry" button to verify your knot. This simply
4646- writes a `sh.tangled.knot` record to your PDS.
4747-4848-If you use the nix module, simply bump the flake to the
4949-latest revision, and change your config block like so:
5050-5151-```diff
5252- services.tangled.knot = {
5353- enable = true;
5454- server = {
5555-- secretFile = /path/to/secret;
5656-+ owner = "did:plc:foo";
5757- };
5858- };
5959-```
-25
docs/spindle/architecture.md
···11-# spindle architecture
22-33-Spindle is a small CI runner service. Here's a high level overview of how it operates:
44-55-* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
66-[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
77-* when a new repo record comes through (typically when you add a spindle to a
88-repo from the settings), spindle then resolves the underlying knot and
99-subscribes to repo events (see:
1010-[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
1111-* the spindle engine then handles execution of the pipeline, with results and
1212-logs beamed on the spindle event stream over wss
1313-1414-### the engine
1515-1616-At present, the only supported backend is Docker (and Podman, if Docker
1717-compatibility is enabled, so that `/run/docker.sock` is created). Spindle
1818-executes each step in the pipeline in a fresh container, with state persisted
1919-across steps within the `/tangled/workspace` directory.
2020-2121-The base image for the container is constructed on the fly using
2222-[Nixery](https://nixery.dev), which is handy for caching layers for frequently
2323-used packages.
2424-2525-The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
···11-# spindle self-hosting guide
22-33-## prerequisites
44-55-* Go
66-* Docker (the only supported backend currently)
77-88-## configuration
99-1010-Spindle is configured using environment variables. The following environment variables are available:
1111-1212-* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
1313-* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
1414-* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
1515-* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
1616-* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
1717-* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
1818-* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
1919-* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
2020-* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
2121-2222-## running spindle
2323-2424-1. **Set the environment variables.** For example:
2525-2626- ```shell
2727- export SPINDLE_SERVER_HOSTNAME="your-hostname"
2828- export SPINDLE_SERVER_OWNER="your-did"
2929- ```
3030-3131-2. **Build the Spindle binary.**
3232-3333- ```shell
3434- cd core
3535- go mod download
3636- go build -o cmd/spindle/spindle cmd/spindle/main.go
3737- ```
3838-3939-3. **Create the log directory.**
4040-4141- ```shell
4242- sudo mkdir -p /var/log/spindle
4343- sudo chown $USER:$USER -R /var/log/spindle
4444- ```
4545-4646-4. **Run the Spindle binary.**
4747-4848- ```shell
4949- ./cmd/spindle/spindle
5050- ```
5151-5252-Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
···11-# spindle secrets with openbao
22-33-This document covers setting up Spindle to use OpenBao for secrets
44-management via OpenBao Proxy instead of the default SQLite backend.
55-66-## overview
77-88-Spindle now uses OpenBao Proxy for secrets management. The proxy handles
99-authentication automatically using AppRole credentials, while Spindle
1010-connects to the local proxy instead of directly to the OpenBao server.
1111-1212-This approach provides better security, automatic token renewal, and
1313-simplified application code.
1414-1515-## installation
1616-1717-Install OpenBao from nixpkgs:
1818-1919-```bash
2020-nix shell nixpkgs#openbao # for a local server
2121-```
2222-2323-## setup
2424-2525-The setup process can is documented for both local development and production.
2626-2727-### local development
2828-2929-Start OpenBao in dev mode:
3030-3131-```bash
3232-bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
3333-```
3434-3535-This starts OpenBao on `http://localhost:8201` with a root token.
3636-3737-Set up environment for bao CLI:
3838-3939-```bash
4040-export BAO_ADDR=http://localhost:8200
4141-export BAO_TOKEN=root
4242-```
4343-4444-### production
4545-4646-You would typically use a systemd service with a configuration file. Refer to
4747-[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
4848-achieved using Nix.
4949-5050-Then, initialize the bao server:
5151-```bash
5252-bao operator init -key-shares=1 -key-threshold=1
5353-```
5454-5555-This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
5656-```bash
5757-bao operator unseal <unseal_key>
5858-```
5959-6060-All steps below remain the same across both dev and production setups.
6161-6262-### configure openbao server
6363-6464-Create the spindle KV mount:
6565-6666-```bash
6767-bao secrets enable -path=spindle -version=2 kv
6868-```
6969-7070-Set up AppRole authentication and policy:
7171-7272-Create a policy file `spindle-policy.hcl`:
7373-7474-```hcl
7575-# Full access to spindle KV v2 data
7676-path "spindle/data/*" {
7777- capabilities = ["create", "read", "update", "delete"]
7878-}
7979-8080-# Access to metadata for listing and management
8181-path "spindle/metadata/*" {
8282- capabilities = ["list", "read", "delete", "update"]
8383-}
8484-8585-# Allow listing at root level
8686-path "spindle/" {
8787- capabilities = ["list"]
8888-}
8989-9090-# Required for connection testing and health checks
9191-path "auth/token/lookup-self" {
9292- capabilities = ["read"]
9393-}
9494-```
9595-9696-Apply the policy and create an AppRole:
9797-9898-```bash
9999-bao policy write spindle-policy spindle-policy.hcl
100100-bao auth enable approle
101101-bao write auth/approle/role/spindle \
102102- token_policies="spindle-policy" \
103103- token_ttl=1h \
104104- token_max_ttl=4h \
105105- bind_secret_id=true \
106106- secret_id_ttl=0 \
107107- secret_id_num_uses=0
108108-```
109109-110110-Get the credentials:
111111-112112-```bash
113113-# Get role ID (static)
114114-ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115115-116116-# Generate secret ID
117117-SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118118-119119-echo "Role ID: $ROLE_ID"
120120-echo "Secret ID: $SECRET_ID"
121121-```
122122-123123-### create proxy configuration
124124-125125-Create the credential files:
126126-127127-```bash
128128-# Create directory for OpenBao files
129129-mkdir -p /tmp/openbao
130130-131131-# Save credentials
132132-echo "$ROLE_ID" > /tmp/openbao/role-id
133133-echo "$SECRET_ID" > /tmp/openbao/secret-id
134134-chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135135-```
136136-137137-Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138138-139139-```hcl
140140-# OpenBao server connection
141141-vault {
142142- address = "http://localhost:8200"
143143-}
144144-145145-# Auto-Auth using AppRole
146146-auto_auth {
147147- method "approle" {
148148- mount_path = "auth/approle"
149149- config = {
150150- role_id_file_path = "/tmp/openbao/role-id"
151151- secret_id_file_path = "/tmp/openbao/secret-id"
152152- }
153153- }
154154-155155- # Optional: write token to file for debugging
156156- sink "file" {
157157- config = {
158158- path = "/tmp/openbao/token"
159159- mode = 0640
160160- }
161161- }
162162-}
163163-164164-# Proxy listener for Spindle
165165-listener "tcp" {
166166- address = "127.0.0.1:8201"
167167- tls_disable = true
168168-}
169169-170170-# Enable API proxy with auto-auth token
171171-api_proxy {
172172- use_auto_auth_token = true
173173-}
174174-175175-# Enable response caching
176176-cache {
177177- use_auto_auth_token = true
178178-}
179179-180180-# Logging
181181-log_level = "info"
182182-```
183183-184184-### start the proxy
185185-186186-Start OpenBao Proxy:
187187-188188-```bash
189189-bao proxy -config=/tmp/openbao/proxy.hcl
190190-```
191191-192192-The proxy will authenticate with OpenBao and start listening on
193193-`127.0.0.1:8201`.
194194-195195-### configure spindle
196196-197197-Set these environment variables for Spindle:
198198-199199-```bash
200200-export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201201-export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
202202-export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203203-```
204204-205205-Start Spindle:
206206-207207-Spindle will now connect to the local proxy, which handles all
208208-authentication automatically.
209209-210210-## production setup for proxy
211211-212212-For production, you'll want to run the proxy as a service:
213213-214214-Place your production configuration in `/etc/openbao/proxy.hcl` with
215215-proper TLS settings for the vault connection.
216216-217217-## verifying setup
218218-219219-Test the proxy directly:
220220-221221-```bash
222222-# Check proxy health
223223-curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224224-225225-# Test token lookup through proxy
226226-curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227227-```
228228-229229-Test OpenBao operations through the server:
230230-231231-```bash
232232-# List all secrets
233233-bao kv list spindle/
234234-235235-# Add a test secret via Spindle API, then check it exists
236236-bao kv list spindle/repos/
237237-238238-# Get a specific secret
239239-bao kv get spindle/repos/your_repo_path/SECRET_NAME
240240-```
241241-242242-## how it works
243243-244244-- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245245-- The proxy authenticates with OpenBao using AppRole credentials
246246-- All Spindle requests go through the proxy, which injects authentication tokens
247247-- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248248-- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249249-- The proxy handles all token renewal automatically
250250-- Spindle no longer manages tokens or authentication directly
251251-252252-## troubleshooting
253253-254254-**Connection refused**: Check that the OpenBao Proxy is running and
255255-listening on the configured address.
256256-257257-**403 errors**: Verify the AppRole credentials are correct and the policy
258258-has the necessary permissions.
259259-260260-**404 route errors**: The spindle KV mount probably doesn't exist - run
261261-the mount creation step again.
262262-263263-**Proxy authentication failures**: Check the proxy logs and verify the
264264-role-id and secret-id files are readable and contain valid credentials.
265265-266266-**Secret not found after writing**: This can indicate policy permission
267267-issues. Verify the policy includes both `spindle/data/*` and
268268-`spindle/metadata/*` paths with appropriate capabilities.
269269-270270-Check proxy logs:
271271-272272-```bash
273273-# If running as systemd service
274274-journalctl -u openbao-proxy -f
275275-276276-# If running directly, check the console output
277277-```
278278-279279-Test AppRole authentication manually:
280280-281281-```bash
282282-bao write auth/approle/login \
283283- role_id="$(cat /tmp/openbao/role-id)" \
284284- secret_id="$(cat /tmp/openbao/secret-id)"
285285-```
-183
docs/spindle/pipeline.md
···11-# spindle pipelines
22-33-Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
44-55-The fields are:
66-77-- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
88-- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
99-- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
1010-- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
1111-- [Environment](#environment): An **optional** field that allows you to define environment variables.
1212-- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
1313-1414-## Trigger
1515-1616-The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
1717-1818-- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
1919- - `push`: The workflow should run every time a commit is pushed to the repository.
2020- - `pull_request`: The workflow should run every time a pull request is made or updated.
2121- - `manual`: The workflow can be triggered manually.
2222-- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
2323-- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
2424-2525-For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
2626-2727-```yaml
2828-when:
2929- - event: ["push", "manual"]
3030- branch: ["main", "develop"]
3131- - event: ["pull_request"]
3232- branch: ["main"]
3333-```
3434-3535-You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
3636-3737-```yaml
3838-when:
3939- - event: ["push"]
4040- tag: ["v*"]
4141-```
4242-4343-You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
4444-4545-```yaml
4646-when:
4747- - event: ["push"]
4848- branch: ["main", "release-*"]
4949- tag: ["v*", "stable"]
5050-```
5151-5252-## Engine
5353-5454-Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
5555-5656-- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
5757-5858-Example:
5959-6060-```yaml
6161-engine: "nixery"
6262-```
6363-6464-## Clone options
6565-6666-When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
6767-6868-- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
6969-- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
7070-- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
7171-7272-The default settings are:
7373-7474-```yaml
7575-clone:
7676- skip: false
7777- depth: 1
7878- submodules: false
7979-```
8080-8181-## Dependencies
8282-8383-Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
8484-8585-Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
8686-8787-```yaml
8888-dependencies:
8989- # nixpkgs
9090- nixpkgs:
9191- - nodejs
9292- - go
9393- # custom registry
9494- git+https://tangled.org/@example.com/my_pkg:
9595- - my_pkg
9696-```
9797-9898-Now these dependencies are available to use in your workflow!
9999-100100-## Environment
101101-102102-The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103103-104104-Example:
105105-106106-```yaml
107107-environment:
108108- GOOS: "linux"
109109- GOARCH: "arm64"
110110- NODE_ENV: "production"
111111- MY_ENV_VAR: "MY_ENV_VALUE"
112112-```
113113-114114-## Steps
115115-116116-The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
117117-118118-- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
119119-- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
120120-- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
121121-122122-Example:
123123-124124-```yaml
125125-steps:
126126- - name: "Build backend"
127127- command: "go build"
128128- environment:
129129- GOOS: "darwin"
130130- GOARCH: "arm64"
131131- - name: "Build frontend"
132132- command: "npm run build"
133133- environment:
134134- NODE_ENV: "production"
135135-```
136136-137137-## Complete workflow
138138-139139-```yaml
140140-# .tangled/workflows/build.yml
141141-142142-when:
143143- - event: ["push", "manual"]
144144- branch: ["main", "develop"]
145145- - event: ["pull_request"]
146146- branch: ["main"]
147147-148148-engine: "nixery"
149149-150150-# using the default values
151151-clone:
152152- skip: false
153153- depth: 1
154154- submodules: false
155155-156156-dependencies:
157157- # nixpkgs
158158- nixpkgs:
159159- - nodejs
160160- - go
161161- # custom registry
162162- git+https://tangled.org/@example.com/my_pkg:
163163- - my_pkg
164164-165165-environment:
166166- GOOS: "linux"
167167- GOARCH: "arm64"
168168- NODE_ENV: "production"
169169- MY_ENV_VAR: "MY_ENV_VALUE"
170170-171171-steps:
172172- - name: "Build backend"
173173- command: "go build"
174174- environment:
175175- GOOS: "darwin"
176176- GOARCH: "arm64"
177177- - name: "Build frontend"
178178- command: "npm run build"
179179- environment:
180180- NODE_ENV: "production"
181181-```
182182-183183-If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
···7272 // existing instances of the closure when j.WantedDids is mutated
7373 return func(ctx context.Context, evt *models.Event) error {
74747575+ j.mu.RLock()
7576 // empty filter => all dids allowed
7676- if len(j.wantedDids) == 0 {
7777- return processFunc(ctx, evt)
7777+ matches := len(j.wantedDids) == 0
7878+ if !matches {
7979+ if _, ok := j.wantedDids[evt.Did]; ok {
8080+ matches = true
8181+ }
7882 }
8383+ j.mu.RUnlock()
79848080- if _, ok := j.wantedDids[evt.Did]; ok {
8585+ if matches {
8186 return processFunc(ctx, evt)
8287 } else {
8388 return nil
···122127123128 go func() {
124129 if j.waitForDid {
125125- for len(j.wantedDids) == 0 {
130130+ for {
131131+ j.mu.RLock()
132132+ hasDid := len(j.wantedDids) != 0
133133+ j.mu.RUnlock()
134134+ if hasDid {
135135+ break
136136+ }
126137 time.Sleep(time.Second)
127138 }
128139 }
+81
knotserver/db/db.go
···11+package db
22+33+import (
44+ "context"
55+ "database/sql"
66+ "log/slog"
77+ "strings"
88+99+ _ "github.com/mattn/go-sqlite3"
1010+ "tangled.org/core/log"
1111+)
1212+1313+type DB struct {
1414+ db *sql.DB
1515+ logger *slog.Logger
1616+}
1717+1818+func Setup(ctx context.Context, dbPath string) (*DB, error) {
1919+ // https://github.com/mattn/go-sqlite3#connection-string
2020+ opts := []string{
2121+ "_foreign_keys=1",
2222+ "_journal_mode=WAL",
2323+ "_synchronous=NORMAL",
2424+ "_auto_vacuum=incremental",
2525+ }
2626+2727+ logger := log.FromContext(ctx)
2828+ logger = log.SubLogger(logger, "db")
2929+3030+ db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
3131+ if err != nil {
3232+ return nil, err
3333+ }
3434+3535+ conn, err := db.Conn(ctx)
3636+ if err != nil {
3737+ return nil, err
3838+ }
3939+ defer conn.Close()
4040+4141+ _, err = conn.ExecContext(ctx, `
4242+ create table if not exists known_dids (
4343+ did text primary key
4444+ );
4545+4646+ create table if not exists public_keys (
4747+ id integer primary key autoincrement,
4848+ did text not null,
4949+ key text not null,
5050+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
5151+ unique(did, key),
5252+ foreign key (did) references known_dids(did) on delete cascade
5353+ );
5454+5555+ create table if not exists _jetstream (
5656+ id integer primary key autoincrement,
5757+ last_time_us integer not null
5858+ );
5959+6060+ create table if not exists events (
6161+ rkey text not null,
6262+ nsid text not null,
6363+ event text not null, -- json
6464+ created integer not null default (strftime('%s', 'now')),
6565+ primary key (rkey, nsid)
6666+ );
6767+6868+ create table if not exists migrations (
6969+ id integer primary key autoincrement,
7070+ name text unique
7171+ );
7272+ `)
7373+ if err != nil {
7474+ return nil, err
7575+ }
7676+7777+ return &DB{
7878+ db: db,
7979+ logger: logger,
8080+ }, nil
8181+}
-64
knotserver/db/init.go
···11-package db
22-33-import (
44- "database/sql"
55- "strings"
66-77- _ "github.com/mattn/go-sqlite3"
88-)
99-1010-type DB struct {
1111- db *sql.DB
1212-}
1313-1414-func Setup(dbPath string) (*DB, error) {
1515- // https://github.com/mattn/go-sqlite3#connection-string
1616- opts := []string{
1717- "_foreign_keys=1",
1818- "_journal_mode=WAL",
1919- "_synchronous=NORMAL",
2020- "_auto_vacuum=incremental",
2121- }
2222-2323- db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
2424- if err != nil {
2525- return nil, err
2626- }
2727-2828- // NOTE: If any other migration is added here, you MUST
2929- // copy the pattern in appview: use a single sql.Conn
3030- // for every migration.
3131-3232- _, err = db.Exec(`
3333- create table if not exists known_dids (
3434- did text primary key
3535- );
3636-3737- create table if not exists public_keys (
3838- id integer primary key autoincrement,
3939- did text not null,
4040- key text not null,
4141- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
4242- unique(did, key),
4343- foreign key (did) references known_dids(did) on delete cascade
4444- );
4545-4646- create table if not exists _jetstream (
4747- id integer primary key autoincrement,
4848- last_time_us integer not null
4949- );
5050-5151- create table if not exists events (
5252- rkey text not null,
5353- nsid text not null,
5454- event text not null, -- json
5555- created integer not null default (strftime('%s', 'now')),
5656- primary key (rkey, nsid)
5757- );
5858- `)
5959- if err != nil {
6060- return nil, err
6161- }
6262-6363- return &DB{db: db}, nil
6464-}
···2222)
23232424type Workflow struct {
2525- Steps []Step
2626- Name string
2727- Data any
2525+ Steps []Step
2626+ Name string
2727+ Data any
2828+ Environment map[string]string
2829}
+77
spindle/models/pipeline_env.go
···11+package models
22+33+import (
44+ "strings"
55+66+ "github.com/go-git/go-git/v5/plumbing"
77+ "tangled.org/core/api/tangled"
88+ "tangled.org/core/workflow"
99+)
1010+1111+// PipelineEnvVars extracts environment variables from pipeline trigger metadata.
1212+// These are framework-provided variables that are injected into workflow steps.
1313+func PipelineEnvVars(tr *tangled.Pipeline_TriggerMetadata, pipelineId PipelineId, devMode bool) map[string]string {
1414+ if tr == nil {
1515+ return nil
1616+ }
1717+1818+ env := make(map[string]string)
1919+2020+ // Standard CI environment variable
2121+ env["CI"] = "true"
2222+2323+ env["TANGLED_PIPELINE_ID"] = pipelineId.Rkey
2424+2525+ // Repo info
2626+ if tr.Repo != nil {
2727+ env["TANGLED_REPO_KNOT"] = tr.Repo.Knot
2828+ env["TANGLED_REPO_DID"] = tr.Repo.Did
2929+ env["TANGLED_REPO_NAME"] = tr.Repo.Repo
3030+ env["TANGLED_REPO_DEFAULT_BRANCH"] = tr.Repo.DefaultBranch
3131+ env["TANGLED_REPO_URL"] = BuildRepoURL(tr.Repo, devMode)
3232+ }
3333+3434+ switch workflow.TriggerKind(tr.Kind) {
3535+ case workflow.TriggerKindPush:
3636+ if tr.Push != nil {
3737+ refName := plumbing.ReferenceName(tr.Push.Ref)
3838+ refType := "branch"
3939+ if refName.IsTag() {
4040+ refType = "tag"
4141+ }
4242+4343+ env["TANGLED_REF"] = tr.Push.Ref
4444+ env["TANGLED_REF_NAME"] = refName.Short()
4545+ env["TANGLED_REF_TYPE"] = refType
4646+ env["TANGLED_SHA"] = tr.Push.NewSha
4747+ env["TANGLED_COMMIT_SHA"] = tr.Push.NewSha
4848+ }
4949+5050+ case workflow.TriggerKindPullRequest:
5151+ if tr.PullRequest != nil {
5252+ // For PRs, the "ref" is the source branch
5353+ env["TANGLED_REF"] = "refs/heads/" + tr.PullRequest.SourceBranch
5454+ env["TANGLED_REF_NAME"] = tr.PullRequest.SourceBranch
5555+ env["TANGLED_REF_TYPE"] = "branch"
5656+ env["TANGLED_SHA"] = tr.PullRequest.SourceSha
5757+ env["TANGLED_COMMIT_SHA"] = tr.PullRequest.SourceSha
5858+5959+ // PR-specific variables
6060+ env["TANGLED_PR_SOURCE_BRANCH"] = tr.PullRequest.SourceBranch
6161+ env["TANGLED_PR_TARGET_BRANCH"] = tr.PullRequest.TargetBranch
6262+ env["TANGLED_PR_SOURCE_SHA"] = tr.PullRequest.SourceSha
6363+ env["TANGLED_PR_ACTION"] = tr.PullRequest.Action
6464+ }
6565+6666+ case workflow.TriggerKindManual:
6767+ // Manual triggers may not have ref/sha info
6868+ // Include any manual inputs if present
6969+ if tr.Manual != nil {
7070+ for _, pair := range tr.Manual.Inputs {
7171+ env["TANGLED_INPUT_"+strings.ToUpper(pair.Key)] = pair.Value
7272+ }
7373+ }
7474+ }
7575+7676+ return env
7777+}
···11+package models
22+33+import (
44+ "encoding/base64"
55+ "strings"
66+)
77+88+// SecretMask replaces secret values in strings with "***".
99+type SecretMask struct {
1010+ replacer *strings.Replacer
1111+}
1212+1313+// NewSecretMask creates a mask for the given secret values.
1414+// Also registers base64-encoded variants of each secret.
1515+func NewSecretMask(values []string) *SecretMask {
1616+ var pairs []string
1717+1818+ for _, value := range values {
1919+ if value == "" {
2020+ continue
2121+ }
2222+2323+ pairs = append(pairs, value, "***")
2424+2525+ b64 := base64.StdEncoding.EncodeToString([]byte(value))
2626+ if b64 != value {
2727+ pairs = append(pairs, b64, "***")
2828+ }
2929+3030+ b64NoPad := strings.TrimRight(b64, "=")
3131+ if b64NoPad != b64 && b64NoPad != value {
3232+ pairs = append(pairs, b64NoPad, "***")
3333+ }
3434+ }
3535+3636+ if len(pairs) == 0 {
3737+ return nil
3838+ }
3939+4040+ return &SecretMask{
4141+ replacer: strings.NewReplacer(pairs...),
4242+ }
4343+}
4444+4545+// Mask replaces all registered secret values with "***".
4646+func (m *SecretMask) Mask(input string) string {
4747+ if m == nil || m.replacer == nil {
4848+ return input
4949+ }
5050+ return m.replacer.Replace(input)
5151+}
+135
spindle/models/secret_mask_test.go
···11+package models
22+33+import (
44+ "encoding/base64"
55+ "testing"
66+)
77+88+func TestSecretMask_BasicMasking(t *testing.T) {
99+ mask := NewSecretMask([]string{"mysecret123"})
1010+1111+ input := "The password is mysecret123 in this log"
1212+ expected := "The password is *** in this log"
1313+1414+ result := mask.Mask(input)
1515+ if result != expected {
1616+ t.Errorf("expected %q, got %q", expected, result)
1717+ }
1818+}
1919+2020+func TestSecretMask_Base64Encoded(t *testing.T) {
2121+ secret := "mysecret123"
2222+ mask := NewSecretMask([]string{secret})
2323+2424+ b64 := base64.StdEncoding.EncodeToString([]byte(secret))
2525+ input := "Encoded: " + b64
2626+ expected := "Encoded: ***"
2727+2828+ result := mask.Mask(input)
2929+ if result != expected {
3030+ t.Errorf("expected %q, got %q", expected, result)
3131+ }
3232+}
3333+3434+func TestSecretMask_Base64NoPadding(t *testing.T) {
3535+ // "test" encodes to "dGVzdA==" with padding
3636+ secret := "test"
3737+ mask := NewSecretMask([]string{secret})
3838+3939+ b64NoPad := "dGVzdA" // base64 without padding
4040+ input := "Token: " + b64NoPad
4141+ expected := "Token: ***"
4242+4343+ result := mask.Mask(input)
4444+ if result != expected {
4545+ t.Errorf("expected %q, got %q", expected, result)
4646+ }
4747+}
4848+4949+func TestSecretMask_MultipleSecrets(t *testing.T) {
5050+ mask := NewSecretMask([]string{"password1", "apikey123"})
5151+5252+ input := "Using password1 and apikey123 for auth"
5353+ expected := "Using *** and *** for auth"
5454+5555+ result := mask.Mask(input)
5656+ if result != expected {
5757+ t.Errorf("expected %q, got %q", expected, result)
5858+ }
5959+}
6060+6161+func TestSecretMask_MultipleOccurrences(t *testing.T) {
6262+ mask := NewSecretMask([]string{"secret"})
6363+6464+ input := "secret appears twice: secret"
6565+ expected := "*** appears twice: ***"
6666+6767+ result := mask.Mask(input)
6868+ if result != expected {
6969+ t.Errorf("expected %q, got %q", expected, result)
7070+ }
7171+}
7272+7373+func TestSecretMask_ShortValues(t *testing.T) {
7474+ mask := NewSecretMask([]string{"abc", "xy", ""})
7575+7676+ if mask == nil {
7777+ t.Fatal("expected non-nil mask")
7878+ }
7979+8080+ input := "abc xy test"
8181+ expected := "*** *** test"
8282+ result := mask.Mask(input)
8383+ if result != expected {
8484+ t.Errorf("expected %q, got %q", expected, result)
8585+ }
8686+}
8787+8888+func TestSecretMask_NilMask(t *testing.T) {
8989+ var mask *SecretMask
9090+9191+ input := "some input text"
9292+ result := mask.Mask(input)
9393+ if result != input {
9494+ t.Errorf("expected %q, got %q", input, result)
9595+ }
9696+}
9797+9898+func TestSecretMask_EmptyInput(t *testing.T) {
9999+ mask := NewSecretMask([]string{"secret"})
100100+101101+ result := mask.Mask("")
102102+ if result != "" {
103103+ t.Errorf("expected empty string, got %q", result)
104104+ }
105105+}
106106+107107+func TestSecretMask_NoMatch(t *testing.T) {
108108+ mask := NewSecretMask([]string{"secretvalue"})
109109+110110+ input := "nothing to mask here"
111111+ result := mask.Mask(input)
112112+ if result != input {
113113+ t.Errorf("expected %q, got %q", input, result)
114114+ }
115115+}
116116+117117+func TestSecretMask_EmptySecretsList(t *testing.T) {
118118+ mask := NewSecretMask([]string{})
119119+120120+ if mask != nil {
121121+ t.Error("expected nil mask for empty secrets list")
122122+ }
123123+}
124124+125125+func TestSecretMask_EmptySecretsFiltered(t *testing.T) {
126126+ mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"})
127127+128128+ input := "Using validpassword here"
129129+ expected := "Using *** here"
130130+131131+ result := mask.Mask(input)
132132+ if result != expected {
133133+ t.Errorf("expected %q, got %q", expected, result)
134134+ }
135135+}
+1-1
spindle/motd
···2020 **
2121 ********
22222323-This is a spindle server. More info at https://tangled.sh/@tangled.sh/core/tree/master/docs/spindle
2323+This is a spindle server. More info at https://docs.tangled.org/spindles.html#spindles
24242525Most API routes are under /xrpc/