···83 Repo *Repo
84}
8586+// NOTE: This method does not include patch blob in returned atproto record
87func (p Pull) AsRecord() tangled.RepoPull {
88 var source *tangled.RepoPull_Source
89 if p.PullSource != nil {
···114 Repo: p.RepoAt.String(),
115 Branch: p.TargetBranch,
116 },
0117 Source: source,
118 }
119 return record
+9-9
appview/ogcard/card.go
···334 return nil
335}
336337-func (c *Card) DrawDollySilhouette(x, y, size int, iconColor color.Color) error {
338 tpl, err := template.New("dolly").
339- ParseFS(pages.Files, "templates/fragments/dolly/silhouette.html")
340 if err != nil {
341- return fmt.Errorf("failed to read dolly silhouette template: %w", err)
342 }
343344 var svgData bytes.Buffer
345- if err = tpl.ExecuteTemplate(&svgData, "fragments/dolly/silhouette", nil); err != nil {
346- return fmt.Errorf("failed to execute dolly silhouette template: %w", err)
347 }
348349 icon, err := BuildSVGIconFromData(svgData.Bytes(), iconColor)
···453454 // Handle SVG separately
455 if contentType == "image/svg+xml" || strings.HasSuffix(url, ".svg") {
456- return c.convertSVGToPNG(bodyBytes)
457 }
458459 // Support content types are in-sync with the allowed custom avatar file types
···493}
494495// convertSVGToPNG converts SVG data to a PNG image
496-func (c *Card) convertSVGToPNG(svgData []byte) (image.Image, bool) {
497 // Parse the SVG
498 icon, err := oksvg.ReadIconStream(bytes.NewReader(svgData))
499 if err != nil {
···547 draw.CatmullRom.Scale(scaledImg, scaledImg.Bounds(), img, srcBounds, draw.Src, nil)
548549 // Draw the image with circular clipping
550- for cy := 0; cy < size; cy++ {
551- for cx := 0; cx < size; cx++ {
552 // Calculate distance from center
553 dx := float64(cx - center)
554 dy := float64(cy - center)
···334 return nil
335}
336337+func (c *Card) DrawDolly(x, y, size int, iconColor color.Color) error {
338 tpl, err := template.New("dolly").
339+ ParseFS(pages.Files, "templates/fragments/dolly/logo.html")
340 if err != nil {
341+ return fmt.Errorf("failed to read dolly template: %w", err)
342 }
343344 var svgData bytes.Buffer
345+ if err = tpl.ExecuteTemplate(&svgData, "fragments/dolly/logo", nil); err != nil {
346+ return fmt.Errorf("failed to execute dolly template: %w", err)
347 }
348349 icon, err := BuildSVGIconFromData(svgData.Bytes(), iconColor)
···453454 // Handle SVG separately
455 if contentType == "image/svg+xml" || strings.HasSuffix(url, ".svg") {
456+ return convertSVGToPNG(bodyBytes)
457 }
458459 // Support content types are in-sync with the allowed custom avatar file types
···493}
494495// convertSVGToPNG converts SVG data to a PNG image
496+func convertSVGToPNG(svgData []byte) (image.Image, bool) {
497 // Parse the SVG
498 icon, err := oksvg.ReadIconStream(bytes.NewReader(svgData))
499 if err != nil {
···547 draw.CatmullRom.Scale(scaledImg, scaledImg.Bounds(), img, srcBounds, draw.Src, nil)
548549 // Draw the image with circular clipping
550+ for cy := range size {
551+ for cx := range size {
552 // Calculate distance from center
553 dx := float64(cx - center)
554 dy := float64(cy - center)
···30 <div class="mx-6">
31 These services may not be fully accessible until upgraded.
32 <a class="underline text-red-800 dark:text-red-200"
33- href="https://tangled.org/@tangled.org/core/tree/master/docs/migrations.md">
34 Click to read the upgrade guide</a>.
35 </div>
36 </details>
···30 <div class="mx-6">
31 These services may not be fully accessible until upgraded.
32 <a class="underline text-red-800 dark:text-red-200"
33+ href="https://docs.tangled.org/migrating-knots-spindles.html#migrating-knots-spindles">
34 Click to read the upgrade guide</a>.
35 </div>
36 </details>
+9-29
appview/pages/templates/brand/brand.html
···4<div class="grid grid-cols-10">
5 <header class="col-span-full md:col-span-10 px-6 py-2 mb-4">
6 <h1 class="text-2xl font-bold dark:text-white mb-1">Brand</h1>
7- <p class="text-gray-600 dark:text-gray-400 mb-1">
8 Assets and guidelines for using Tangled's logo and brand elements.
9 </p>
10 </header>
···1415 <!-- Introduction Section -->
16 <section>
17- <p class="text-gray-600 dark:text-gray-400 mb-2">
18 Tangled's logo and mascot is <strong>Dolly</strong>, the first ever <em>cloned</em> mammal. Please
19 follow the below guidelines when using Dolly and the logotype.
20 </p>
21- <p class="text-gray-600 dark:text-gray-400 mb-2">
22 All assets are served as SVGs, and can be downloaded by right-clicking and clicking "Save image as".
23 </p>
24 </section>
···34 </div>
35 <div class="order-1 lg:order-2">
36 <h2 class="text-xl font-semibold dark:text-white mb-3">Black logotype</h2>
37- <p class="text-gray-600 dark:text-gray-400 mb-4">For use on light-colored backgrounds.</p>
38 <p class="text-gray-700 dark:text-gray-300">
39 This is the preferred version of the logotype, featuring dark text and elements, ideal for light
40 backgrounds and designs.
···53 </div>
54 <div class="order-1 lg:order-2">
55 <h2 class="text-xl font-semibold dark:text-white mb-3">White logotype</h2>
56- <p class="text-gray-600 dark:text-gray-400 mb-4">For use on dark-colored backgrounds.</p>
57 <p class="text-gray-700 dark:text-gray-300">
58 This version features white text and elements, ideal for dark backgrounds
59 and inverted designs.
···81 </div>
82 <div class="order-1 lg:order-2">
83 <h2 class="text-xl font-semibold dark:text-white mb-3">Mark only</h2>
84- <p class="text-gray-600 dark:text-gray-400 mb-4">
85 When a smaller 1:1 logo or icon is needed, Dolly's face may be used on its own.
86 </p>
87 <p class="text-gray-700 dark:text-gray-300 mb-4">
···123 </div>
124 <div class="order-1 lg:order-2">
125 <h2 class="text-xl font-semibold dark:text-white mb-3">Colored backgrounds</h2>
126- <p class="text-gray-600 dark:text-gray-400 mb-4">
127 White logo mark on colored backgrounds.
128 </p>
129 <p class="text-gray-700 dark:text-gray-300 mb-4">
···165 </div>
166 <div class="order-1 lg:order-2">
167 <h2 class="text-xl font-semibold dark:text-white mb-3">Lighter backgrounds</h2>
168- <p class="text-gray-600 dark:text-gray-400 mb-4">
169 Dark logo mark on lighter, pastel backgrounds.
170 </p>
171 <p class="text-gray-700 dark:text-gray-300 mb-4">
···186 </div>
187 <div class="order-1 lg:order-2">
188 <h2 class="text-xl font-semibold dark:text-white mb-3">Recoloring</h2>
189- <p class="text-gray-600 dark:text-gray-400 mb-4">
190 Custom coloring of the logotype is permitted.
191 </p>
192 <p class="text-gray-700 dark:text-gray-300 mb-4">
···194 </p>
195 <p class="text-gray-700 dark:text-gray-300 text-sm">
196 <strong>Example:</strong> Gray/sand colored logotype on a light yellow/tan background.
197- </p>
198- </div>
199- </section>
200-201- <!-- Silhouette Section -->
202- <section class="grid grid-cols-1 lg:grid-cols-2 gap-8 items-center">
203- <div class="order-2 lg:order-1">
204- <div class="border border-gray-200 dark:border-gray-700 p-8 sm:p-16 bg-gray-50 dark:bg-gray-100 rounded">
205- <img src="https://assets.tangled.network/tangled_dolly_silhouette.svg"
206- alt="Dolly silhouette"
207- class="w-full max-w-32 mx-auto" />
208- </div>
209- </div>
210- <div class="order-1 lg:order-2">
211- <h2 class="text-xl font-semibold dark:text-white mb-3">Dolly silhouette</h2>
212- <p class="text-gray-600 dark:text-gray-400 mb-4">A minimalist version of Dolly.</p>
213- <p class="text-gray-700 dark:text-gray-300">
214- The silhouette can be used where a subtle brand presence is needed,
215- or as a background element. Works on any background color with proper contrast.
216- For example, we use this as the site's favicon.
217 </p>
218 </div>
219 </section>
···4<div class="grid grid-cols-10">
5 <header class="col-span-full md:col-span-10 px-6 py-2 mb-4">
6 <h1 class="text-2xl font-bold dark:text-white mb-1">Brand</h1>
7+ <p class="text-gray-500 dark:text-gray-300 mb-1">
8 Assets and guidelines for using Tangled's logo and brand elements.
9 </p>
10 </header>
···1415 <!-- Introduction Section -->
16 <section>
17+ <p class="text-gray-500 dark:text-gray-300 mb-2">
18 Tangled's logo and mascot is <strong>Dolly</strong>, the first ever <em>cloned</em> mammal. Please
19 follow the below guidelines when using Dolly and the logotype.
20 </p>
21+ <p class="text-gray-500 dark:text-gray-300 mb-2">
22 All assets are served as SVGs, and can be downloaded by right-clicking and clicking "Save image as".
23 </p>
24 </section>
···34 </div>
35 <div class="order-1 lg:order-2">
36 <h2 class="text-xl font-semibold dark:text-white mb-3">Black logotype</h2>
37+ <p class="text-gray-500 dark:text-gray-300 mb-4">For use on light-colored backgrounds.</p>
38 <p class="text-gray-700 dark:text-gray-300">
39 This is the preferred version of the logotype, featuring dark text and elements, ideal for light
40 backgrounds and designs.
···53 </div>
54 <div class="order-1 lg:order-2">
55 <h2 class="text-xl font-semibold dark:text-white mb-3">White logotype</h2>
56+ <p class="text-gray-500 dark:text-gray-300 mb-4">For use on dark-colored backgrounds.</p>
57 <p class="text-gray-700 dark:text-gray-300">
58 This version features white text and elements, ideal for dark backgrounds
59 and inverted designs.
···81 </div>
82 <div class="order-1 lg:order-2">
83 <h2 class="text-xl font-semibold dark:text-white mb-3">Mark only</h2>
84+ <p class="text-gray-500 dark:text-gray-300 mb-4">
85 When a smaller 1:1 logo or icon is needed, Dolly's face may be used on its own.
86 </p>
87 <p class="text-gray-700 dark:text-gray-300 mb-4">
···123 </div>
124 <div class="order-1 lg:order-2">
125 <h2 class="text-xl font-semibold dark:text-white mb-3">Colored backgrounds</h2>
126+ <p class="text-gray-500 dark:text-gray-300 mb-4">
127 White logo mark on colored backgrounds.
128 </p>
129 <p class="text-gray-700 dark:text-gray-300 mb-4">
···165 </div>
166 <div class="order-1 lg:order-2">
167 <h2 class="text-xl font-semibold dark:text-white mb-3">Lighter backgrounds</h2>
168+ <p class="text-gray-500 dark:text-gray-300 mb-4">
169 Dark logo mark on lighter, pastel backgrounds.
170 </p>
171 <p class="text-gray-700 dark:text-gray-300 mb-4">
···186 </div>
187 <div class="order-1 lg:order-2">
188 <h2 class="text-xl font-semibold dark:text-white mb-3">Recoloring</h2>
189+ <p class="text-gray-500 dark:text-gray-300 mb-4">
190 Custom coloring of the logotype is permitted.
191 </p>
192 <p class="text-gray-700 dark:text-gray-300 mb-4">
···194 </p>
195 <p class="text-gray-700 dark:text-gray-300 text-sm">
196 <strong>Example:</strong> Gray/sand colored logotype on a light yellow/tan background.
00000000000000000000197 </p>
198 </div>
199 </section>
···22 <p class="text-gray-500 dark:text-gray-400">
23 Choose a spindle to execute your workflows on. Only repository owners
24 can configure spindles. Spindles can be selfhosted,
25- <a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
26 click to learn more.
27 </a>
28 </p>
···22 <p class="text-gray-500 dark:text-gray-400">
23 Choose a spindle to execute your workflows on. Only repository owners
24 can configure spindles. Spindles can be selfhosted,
25+ <a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide">
26 click to learn more.
27 </a>
28 </p>
···1+---
2+title: Tangled docs
3+author: The Tangled Contributors
4+date: 21 Sun, Dec 2025
5+abstract: |
6+ Tangled is a decentralized code hosting and collaboration
7+ platform. Every component of Tangled is open-source and
8+ self-hostable. [tangled.org](https://tangled.org) also
9+ provides hosting and CI services that are free to use.
10+11+ There are several models for decentralized code
12+ collaboration platforms, ranging from ActivityPubโs
13+ (Forgejo) federated model, to Radicleโs entirely P2P model.
14+ Our approach attempts to be the best of both worlds by
15+ adopting the AT Protocolโa protocol for building decentralized
16+ social applications with a central identity
17+18+ Our approach to this is the idea of โknotsโ. Knots are
19+ lightweight, headless servers that enable users to host Git
20+ repositories with ease. Knots are designed for either single
21+ or multi-tenant use which is perfect for self-hosting on a
22+ Raspberry Pi at home, or larger โcommunityโ servers. By
23+ default, Tangled provides managed knots where you can host
24+ your repositories for free.
25+26+ The appview at tangled.org acts as a consolidated "view"
27+ into the whole network, allowing users to access, clone and
28+ contribute to repositories hosted across different knots
29+ seamlessly.
30+---
31+32+# Quick start guide
33+34+## Login or sign up
35+36+You can [login](https://tangled.org) by using your AT Protocol
37+account. If you are unclear on what that means, simply head
38+to the [signup](https://tangled.org/signup) page and create
39+an account. By doing so, you will be choosing Tangled as
40+your account provider (you will be granted a handle of the
41+form `user.tngl.sh`).
42+43+In the AT Protocol network, users are free to choose their account
44+provider (known as a "Personal Data Service", or PDS), and
45+login to applications that support AT accounts.
46+47+You can think of it as "one account for all of the atmosphere"!
48+49+If you already have an AT account (you may have one if you
50+signed up to Bluesky, for example), you can login with the
51+same handle on Tangled (so just use `user.bsky.social` on
52+the login page).
53+54+## Add an SSH key
55+56+Once you are logged in, you can start creating repositories
57+and pushing code. Tangled supports pushing git repositories
58+over SSH.
59+60+First, you'll need to generate an SSH key if you don't
61+already have one:
62+63+```bash
64+ssh-keygen -t ed25519 -C "foo@bar.com"
65+```
66+67+When prompted, save the key to the default location
68+(`~/.ssh/id_ed25519`) and optionally set a passphrase.
69+70+Copy your public key to your clipboard:
71+72+```bash
73+# on X11
74+cat ~/.ssh/id_ed25519.pub | xclip -sel c
75+76+# on wayland
77+cat ~/.ssh/id_ed25519.pub | wl-copy
78+79+# on macos
80+cat ~/.ssh/id_ed25519.pub | pbcopy
81+```
82+83+Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
84+paste your public key, give it a descriptive name, and hit
85+save.
86+87+## Create a repository
88+89+Once your SSH key is added, create your first repository:
90+91+1. Hit the green `+` icon on the topbar, and select
92+ repository
93+2. Enter a repository name
94+3. Add a description
95+4. Choose a knotserver to host this repository on
96+5. Hit create
97+98+Knots are self-hostable, lightweight Git servers that can
99+host your repository. Unlike traditional code forges, your
100+code can live on any server. Read the [Knots](TODO) section
101+for more.
102+103+## Configure SSH
104+105+To ensure Git uses the correct SSH key and connects smoothly
106+to Tangled, add this configuration to your `~/.ssh/config`
107+file:
108+109+```
110+Host tangled.org
111+ Hostname tangled.org
112+ User git
113+ IdentityFile ~/.ssh/id_ed25519
114+ AddressFamily inet
115+```
116+117+This tells SSH to use your specific key when connecting to
118+Tangled and prevents authentication issues if you have
119+multiple SSH keys.
120+121+Note that this configuration only works for knotservers that
122+are hosted by tangled.org. If you use a custom knot, refer
123+to the [Knots](TODO) section.
124+125+## Push your first repository
126+127+Initialize a new Git repository:
128+129+```bash
130+mkdir my-project
131+cd my-project
132+133+git init
134+echo "# My Project" > README.md
135+```
136+137+Add some content and push!
138+139+```bash
140+git add README.md
141+git commit -m "Initial commit"
142+git remote add origin git@tangled.org:user.tngl.sh/my-project
143+git push -u origin main
144+```
145+146+That's it! Your code is now hosted on Tangled.
147+148+## Migrating an existing repository
149+150+Moving your repositories from GitHub, GitLab, Bitbucket, or
151+any other Git forge to Tangled is straightforward. You'll
152+simply change your repository's remote URL. At the moment,
153+Tangled does not have any tooling to migrate data such as
154+GitHub issues or pull requests.
155+156+First, create a new repository on tangled.org as described
157+in the [Quick Start Guide](#create-a-repository).
158+159+Navigate to your existing local repository:
160+161+```bash
162+cd /path/to/your/existing/repo
163+```
164+165+You can inspect your existing Git remote like so:
166+167+```bash
168+git remote -v
169+```
170+171+You'll see something like:
172+173+```
174+origin git@github.com:username/my-project (fetch)
175+origin git@github.com:username/my-project (push)
176+```
177+178+Update the remote URL to point to tangled:
179+180+```bash
181+git remote set-url origin git@tangled.org:user.tngl.sh/my-project
182+```
183+184+Verify the change:
185+186+```bash
187+git remote -v
188+```
189+190+You should now see:
191+192+```
193+origin git@tangled.org:user.tngl.sh/my-project (fetch)
194+origin git@tangled.org:user.tngl.sh/my-project (push)
195+```
196+197+Push all your branches and tags to Tangled:
198+199+```bash
200+git push -u origin --all
201+git push -u origin --tags
202+```
203+204+Your repository is now migrated to Tangled! All commit
205+history, branches, and tags have been preserved.
206+207+## Mirroring a repository to Tangled
208+209+If you want to maintain your repository on multiple forges
210+simultaneously, for example, keeping your primary repository
211+on GitHub while mirroring to Tangled for backup or
212+redundancy, you can do so by adding multiple remotes.
213+214+You can configure your local repository to push to both
215+Tangled and, say, GitHub. You may already have the following
216+setup:
217+218+```
219+$ git remote -v
220+origin git@github.com:username/my-project (fetch)
221+origin git@github.com:username/my-project (push)
222+```
223+224+Now add Tangled as an additional push URL to the same
225+remote:
226+227+```bash
228+git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
229+```
230+231+You also need to re-add the original URL as a push
232+destination (Git replaces the push URL when you use `--add`
233+the first time):
234+235+```bash
236+git remote set-url --add --push origin git@github.com:username/my-project
237+```
238+239+Verify your configuration:
240+241+```
242+$ git remote -v
243+origin git@github.com:username/repo (fetch)
244+origin git@tangled.org:username/my-project (push)
245+origin git@github.com:username/repo (push)
246+```
247+248+Notice that there's one fetch URL (the primary remote) and
249+two push URLs. Now, whenever you push, Git will
250+automatically push to both remotes:
251+252+```bash
253+git push origin main
254+```
255+256+This single command pushes your `main` branch to both GitHub
257+and Tangled simultaneously.
258+259+To push all branches and tags:
260+261+```bash
262+git push origin --all
263+git push origin --tags
264+```
265+266+If you prefer more control over which remote you push to,
267+you can maintain separate remotes:
268+269+```bash
270+git remote add github git@github.com:username/my-project
271+git remote add tangled git@tangled.org:username/my-project
272+```
273+274+Then push to each explicitly:
275+276+```bash
277+git push github main
278+git push tangled main
279+```
280+281+# Knot self-hosting guide
282+283+So you want to run your own knot server? Great! Here are a few prerequisites:
284+285+1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
286+2. A (sub)domain name. People generally use `knot.example.com`.
287+3. A valid SSL certificate for your domain.
288+289+## NixOS
290+291+Refer to the [knot
292+module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
293+for a full list of options. Sample configurations:
294+295+- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
296+- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
297+298+## Docker
299+300+Refer to
301+[@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker).
302+Note that this is community maintained.
303+304+## Manual setup
305+306+First, clone this repository:
307+308+```
309+git clone https://tangled.org/@tangled.org/core
310+```
311+312+Then, build the `knot` CLI. This is the knot administration
313+and operation tool. For the purpose of this guide, we're
314+only concerned with these subcommands:
315+316+ * `knot server`: the main knot server process, typically
317+ run as a supervised service
318+ * `knot guard`: handles role-based access control for git
319+ over SSH (you'll never have to run this yourself)
320+ * `knot keys`: fetches SSH keys associated with your knot;
321+ we'll use this to generate the SSH
322+ `AuthorizedKeysCommand`
323+324+```
325+cd core
326+export CGO_ENABLED=1
327+go build -o knot ./cmd/knot
328+```
329+330+Next, move the `knot` binary to a location owned by `root` --
331+`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
332+333+```
334+sudo mv knot /usr/local/bin/knot
335+sudo chown root:root /usr/local/bin/knot
336+```
337+338+This is necessary because SSH `AuthorizedKeysCommand` requires [really
339+specific permissions](https://stackoverflow.com/a/27638306). The
340+`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
341+retrieve a user's public SSH keys dynamically for authentication. Let's
342+set that up.
343+344+```
345+sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
346+Match User git
347+ AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
348+ AuthorizedKeysCommandUser nobody
349+EOF
350+```
351+352+Then, reload `sshd`:
353+354+```
355+sudo systemctl reload ssh
356+```
357+358+Next, create the `git` user. We'll use the `git` user's home directory
359+to store repositories:
360+361+```
362+sudo adduser git
363+```
364+365+Create `/home/git/.knot.env` with the following, updating the values as
366+necessary. The `KNOT_SERVER_OWNER` should be set to your
367+DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
368+369+```
370+KNOT_REPO_SCAN_PATH=/home/git
371+KNOT_SERVER_HOSTNAME=knot.example.com
372+APPVIEW_ENDPOINT=https://tangled.org
373+KNOT_SERVER_OWNER=did:plc:foobar
374+KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
375+KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
376+```
377+378+If you run a Linux distribution that uses systemd, you can use the provided
379+service file to run the server. Copy
380+[`knotserver.service`](/systemd/knotserver.service)
381+to `/etc/systemd/system/`. Then, run:
382+383+```
384+systemctl enable knotserver
385+systemctl start knotserver
386+```
387+388+The last step is to configure a reverse proxy like Nginx or Caddy to front your
389+knot. Here's an example configuration for Nginx:
390+391+```
392+server {
393+ listen 80;
394+ listen [::]:80;
395+ server_name knot.example.com;
396+397+ location / {
398+ proxy_pass http://localhost:5555;
399+ proxy_set_header Host $host;
400+ proxy_set_header X-Real-IP $remote_addr;
401+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
402+ proxy_set_header X-Forwarded-Proto $scheme;
403+ }
404+405+ # wss endpoint for git events
406+ location /events {
407+ proxy_set_header X-Forwarded-For $remote_addr;
408+ proxy_set_header Host $http_host;
409+ proxy_set_header Upgrade websocket;
410+ proxy_set_header Connection Upgrade;
411+ proxy_pass http://localhost:5555;
412+ }
413+ # additional config for SSL/TLS go here.
414+}
415+416+```
417+418+Remember to use Let's Encrypt or similar to procure a certificate for your
419+knot domain.
420+421+You should now have a running knot server! You can finalize
422+your registration by hitting the `verify` button on the
423+[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
424+a record on your PDS to announce the existence of the knot.
425+426+### Custom paths
427+428+(This section applies to manual setup only. Docker users should edit the mounts
429+in `docker-compose.yml` instead.)
430+431+Right now, the database and repositories of your knot lives in `/home/git`. You
432+can move these paths if you'd like to store them in another folder. Be careful
433+when adjusting these paths:
434+435+* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
436+any possible side effects. Remember to restart it once you're done.
437+* Make backups before moving in case something goes wrong.
438+* Make sure the `git` user can read and write from the new paths.
439+440+#### Database
441+442+As an example, let's say the current database is at `/home/git/knotserver.db`,
443+and we want to move it to `/home/git/database/knotserver.db`.
444+445+Copy the current database to the new location. Make sure to copy the `.db-shm`
446+and `.db-wal` files if they exist.
447+448+```
449+mkdir /home/git/database
450+cp /home/git/knotserver.db* /home/git/database
451+```
452+453+In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
454+the new file path (_not_ the directory):
455+456+```
457+KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
458+```
459+460+#### Repositories
461+462+As an example, let's say the repositories are currently in `/home/git`, and we
463+want to move them into `/home/git/repositories`.
464+465+Create the new folder, then move the existing repositories (if there are any):
466+467+```
468+mkdir /home/git/repositories
469+# move all DIDs into the new folder; these will vary for you!
470+mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
471+```
472+473+In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
474+to the new directory:
475+476+```
477+KNOT_REPO_SCAN_PATH=/home/git/repositories
478+```
479+480+Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
481+repository path:
482+483+```
484+sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
485+Match User git
486+ AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
487+ AuthorizedKeysCommandUser nobody
488+EOF
489+```
490+491+Make sure to restart your SSH server!
492+493+#### MOTD (message of the day)
494+495+To configure the MOTD used ("Welcome to this knot!" by default), edit the
496+`/home/git/motd` file:
497+498+```
499+printf "Hi from this knot!\n" > /home/git/motd
500+```
501+502+Note that you should add a newline at the end if setting a non-empty message
503+since the knot won't do this for you.
504+505+# Spindles
506+507+## Pipelines
508+509+Spindle workflows allow you to write CI/CD pipelines in a
510+simple format. They're located in the `.tangled/workflows`
511+directory at the root of your repository, and are defined
512+using YAML.
513+514+The fields are:
515+516+- [Trigger](#trigger): A **required** field that defines
517+ when a workflow should be triggered.
518+- [Engine](#engine): A **required** field that defines which
519+ engine a workflow should run on.
520+- [Clone options](#clone-options): An **optional** field
521+ that defines how the repository should be cloned.
522+- [Dependencies](#dependencies): An **optional** field that
523+ allows you to list dependencies you may need.
524+- [Environment](#environment): An **optional** field that
525+ allows you to define environment variables.
526+- [Steps](#steps): An **optional** field that allows you to
527+ define what steps should run in the workflow.
528+529+### Trigger
530+531+The first thing to add to a workflow is the trigger, which
532+defines when a workflow runs. This is defined using a `when`
533+field, which takes in a list of conditions. Each condition
534+has the following fields:
535+536+- `event`: This is a **required** field that defines when
537+ your workflow should run. It's a list that can take one or
538+ more of the following values:
539+ - `push`: The workflow should run every time a commit is
540+ pushed to the repository.
541+ - `pull_request`: The workflow should run every time a
542+ pull request is made or updated.
543+ - `manual`: The workflow can be triggered manually.
544+- `branch`: Defines which branches the workflow should run
545+ for. If used with the `push` event, commits to the
546+ branch(es) listed here will trigger the workflow. If used
547+ with the `pull_request` event, updates to pull requests
548+ targeting the branch(es) listed here will trigger the
549+ workflow. This field has no effect with the `manual`
550+ event. Supports glob patterns using `*` and `**` (e.g.,
551+ `main`, `develop`, `release-*`). Either `branch` or `tag`
552+ (or both) must be specified for `push` events.
553+- `tag`: Defines which tags the workflow should run for.
554+ Only used with the `push` event - when tags matching the
555+ pattern(s) listed here are pushed, the workflow will
556+ trigger. This field has no effect with `pull_request` or
557+ `manual` events. Supports glob patterns using `*` and `**`
558+ (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
559+ `tag` (or both) must be specified for `push` events.
560+561+For example, if you'd like to define a workflow that runs
562+when commits are pushed to the `main` and `develop`
563+branches, or when pull requests that target the `main`
564+branch are updated, or manually, you can do so with:
565+566+```yaml
567+when:
568+ - event: ["push", "manual"]
569+ branch: ["main", "develop"]
570+ - event: ["pull_request"]
571+ branch: ["main"]
572+```
573+574+You can also trigger workflows on tag pushes. For instance,
575+to run a deployment workflow when tags matching `v*` are
576+pushed:
577+578+```yaml
579+when:
580+ - event: ["push"]
581+ tag: ["v*"]
582+```
583+584+You can even combine branch and tag patterns in a single
585+constraint (the workflow triggers if either matches):
586+587+```yaml
588+when:
589+ - event: ["push"]
590+ branch: ["main", "release-*"]
591+ tag: ["v*", "stable"]
592+```
593+594+### Engine
595+596+Next is the engine on which the workflow should run, defined
597+using the **required** `engine` field. The currently
598+supported engines are:
599+600+- `nixery`: This uses an instance of
601+ [Nixery](https://nixery.dev) to run steps, which allows
602+ you to add [dependencies](#dependencies) from
603+ Nixpkgs (https://github.com/NixOS/nixpkgs). You can
604+ search for packages on https://search.nixos.org, and
605+ there's a pretty good chance the package(s) you're looking
606+ for will be there.
607+608+Example:
609+610+```yaml
611+engine: "nixery"
612+```
613+614+### Clone options
615+616+When a workflow starts, the first step is to clone the
617+repository. You can customize this behavior using the
618+**optional** `clone` field. It has the following fields:
619+620+- `skip`: Setting this to `true` will skip cloning the
621+ repository. This can be useful if your workflow is doing
622+ something that doesn't require anything from the
623+ repository itself. This is `false` by default.
624+- `depth`: This sets the number of commits, or the "clone
625+ depth", to fetch from the repository. For example, if you
626+ set this to 2, the last 2 commits will be fetched. By
627+ default, the depth is set to 1, meaning only the most
628+ recent commit will be fetched, which is the commit that
629+ triggered the workflow.
630+- `submodules`: If you use Git submodules
631+ (https://git-scm.com/book/en/v2/Git-Tools-Submodules)
632+ in your repository, setting this field to `true` will
633+ recursively fetch all submodules. This is `false` by
634+ default.
635+636+The default settings are:
637+638+```yaml
639+clone:
640+ skip: false
641+ depth: 1
642+ submodules: false
643+```
644+645+### Dependencies
646+647+Usually when you're running a workflow, you'll need
648+additional dependencies. The `dependencies` field lets you
649+define which dependencies to get, and from where. It's a
650+key-value map, with the key being the registry to fetch
651+dependencies from, and the value being the list of
652+dependencies to fetch.
653+654+Say you want to fetch Node.js and Go from `nixpkgs`, and a
655+package called `my_pkg` you've made from your own registry
656+at your repository at
657+`https://tangled.org/@example.com/my_pkg`. You can define
658+those dependencies like so:
659+660+```yaml
661+dependencies:
662+ # nixpkgs
663+ nixpkgs:
664+ - nodejs
665+ - go
666+ # custom registry
667+ git+https://tangled.org/@example.com/my_pkg:
668+ - my_pkg
669+```
670+671+Now these dependencies are available to use in your
672+workflow!
673+674+### Environment
675+676+The `environment` field allows you define environment
677+variables that will be available throughout the entire
678+workflow. **Do not put secrets here, these environment
679+variables are visible to anyone viewing the repository. You
680+can add secrets for pipelines in your repository's
681+settings.**
682+683+Example:
684+685+```yaml
686+environment:
687+ GOOS: "linux"
688+ GOARCH: "arm64"
689+ NODE_ENV: "production"
690+ MY_ENV_VAR: "MY_ENV_VALUE"
691+```
692+693+### Steps
694+695+The `steps` field allows you to define what steps should run
696+in the workflow. It's a list of step objects, each with the
697+following fields:
698+699+- `name`: This field allows you to give your step a name.
700+ This name is visible in your workflow runs, and is used to
701+ describe what the step is doing.
702+- `command`: This field allows you to define a command to
703+ run in that step. The step is run in a Bash shell, and the
704+ logs from the command will be visible in the pipelines
705+ page on the Tangled website. The
706+ [dependencies](#dependencies) you added will be available
707+ to use here.
708+- `environment`: Similar to the global
709+ [environment](#environment) config, this **optional**
710+ field is a key-value map that allows you to set
711+ environment variables for the step. **Do not put secrets
712+ here, these environment variables are visible to anyone
713+ viewing the repository. You can add secrets for pipelines
714+ in your repository's settings.**
715+716+Example:
717+718+```yaml
719+steps:
720+ - name: "Build backend"
721+ command: "go build"
722+ environment:
723+ GOOS: "darwin"
724+ GOARCH: "arm64"
725+ - name: "Build frontend"
726+ command: "npm run build"
727+ environment:
728+ NODE_ENV: "production"
729+```
730+731+### Complete workflow
732+733+```yaml
734+# .tangled/workflows/build.yml
735+736+when:
737+ - event: ["push", "manual"]
738+ branch: ["main", "develop"]
739+ - event: ["pull_request"]
740+ branch: ["main"]
741+742+engine: "nixery"
743+744+# using the default values
745+clone:
746+ skip: false
747+ depth: 1
748+ submodules: false
749+750+dependencies:
751+ # nixpkgs
752+ nixpkgs:
753+ - nodejs
754+ - go
755+ # custom registry
756+ git+https://tangled.org/@example.com/my_pkg:
757+ - my_pkg
758+759+environment:
760+ GOOS: "linux"
761+ GOARCH: "arm64"
762+ NODE_ENV: "production"
763+ MY_ENV_VAR: "MY_ENV_VALUE"
764+765+steps:
766+ - name: "Build backend"
767+ command: "go build"
768+ environment:
769+ GOOS: "darwin"
770+ GOARCH: "arm64"
771+ - name: "Build frontend"
772+ command: "npm run build"
773+ environment:
774+ NODE_ENV: "production"
775+```
776+777+If you want another example of a workflow, you can look at
778+the one [Tangled uses to build the
779+project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml).
780+781+## Self-hosting guide
782+783+### Prerequisites
784+785+* Go
786+* Docker (the only supported backend currently)
787+788+### Configuration
789+790+Spindle is configured using environment variables. The following environment variables are available:
791+792+* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
793+* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
794+* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
795+* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
796+* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
797+* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
798+* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
799+* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
800+* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
801+802+### Running spindle
803+804+1. **Set the environment variables.** For example:
805+806+ ```shell
807+ export SPINDLE_SERVER_HOSTNAME="your-hostname"
808+ export SPINDLE_SERVER_OWNER="your-did"
809+ ```
810+811+2. **Build the Spindle binary.**
812+813+ ```shell
814+ cd core
815+ go mod download
816+ go build -o cmd/spindle/spindle cmd/spindle/main.go
817+ ```
818+819+3. **Create the log directory.**
820+821+ ```shell
822+ sudo mkdir -p /var/log/spindle
823+ sudo chown $USER:$USER -R /var/log/spindle
824+ ```
825+826+4. **Run the Spindle binary.**
827+828+ ```shell
829+ ./cmd/spindle/spindle
830+ ```
831+832+Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
833+834+## Architecture
835+836+Spindle is a small CI runner service. Here's a high-level overview of how it operates:
837+838+* Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
839+[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
840+* When a new repo record comes through (typically when you add a spindle to a
841+repo from the settings), spindle then resolves the underlying knot and
842+subscribes to repo events (see:
843+[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
844+* The spindle engine then handles execution of the pipeline, with results and
845+logs beamed on the spindle event stream over WebSocket
846+847+### The engine
848+849+At present, the only supported backend is Docker (and Podman, if Docker
850+compatibility is enabled, so that `/run/docker.sock` is created). spindle
851+executes each step in the pipeline in a fresh container, with state persisted
852+across steps within the `/tangled/workspace` directory.
853+854+The base image for the container is constructed on the fly using
855+[Nixery](https://nixery.dev), which is handy for caching layers for frequently
856+used packages.
857+858+The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines).
859+860+## Secrets with openbao
861+862+This document covers setting up spindle to use OpenBao for secrets
863+management via OpenBao Proxy instead of the default SQLite backend.
864+865+### Overview
866+867+Spindle now uses OpenBao Proxy for secrets management. The proxy handles
868+authentication automatically using AppRole credentials, while spindle
869+connects to the local proxy instead of directly to the OpenBao server.
870+871+This approach provides better security, automatic token renewal, and
872+simplified application code.
873+874+### Installation
875+876+Install OpenBao from Nixpkgs:
877+878+```bash
879+nix shell nixpkgs#openbao # for a local server
880+```
881+882+### Setup
883+884+The setup process can is documented for both local development and production.
885+886+#### Local development
887+888+Start OpenBao in dev mode:
889+890+```bash
891+bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
892+```
893+894+This starts OpenBao on `http://localhost:8201` with a root token.
895+896+Set up environment for bao CLI:
897+898+```bash
899+export BAO_ADDR=http://localhost:8200
900+export BAO_TOKEN=root
901+```
902+903+#### Production
904+905+You would typically use a systemd service with a
906+configuration file. Refer to
907+[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
908+for how this can be achieved using Nix.
909+910+Then, initialize the bao server:
911+912+```bash
913+bao operator init -key-shares=1 -key-threshold=1
914+```
915+916+This will print out an unseal key and a root key. Save them
917+somewhere (like a password manager). Then unseal the vault
918+to begin setting it up:
919+920+```bash
921+bao operator unseal <unseal_key>
922+```
923+924+All steps below remain the same across both dev and
925+production setups.
926+927+#### Configure openbao server
928+929+Create the spindle KV mount:
930+931+```bash
932+bao secrets enable -path=spindle -version=2 kv
933+```
934+935+Set up AppRole authentication and policy:
936+937+Create a policy file `spindle-policy.hcl`:
938+939+```hcl
940+# Full access to spindle KV v2 data
941+path "spindle/data/*" {
942+ capabilities = ["create", "read", "update", "delete"]
943+}
944+945+# Access to metadata for listing and management
946+path "spindle/metadata/*" {
947+ capabilities = ["list", "read", "delete", "update"]
948+}
949+950+# Allow listing at root level
951+path "spindle/" {
952+ capabilities = ["list"]
953+}
954+955+# Required for connection testing and health checks
956+path "auth/token/lookup-self" {
957+ capabilities = ["read"]
958+}
959+```
960+961+Apply the policy and create an AppRole:
962+963+```bash
964+bao policy write spindle-policy spindle-policy.hcl
965+bao auth enable approle
966+bao write auth/approle/role/spindle \
967+ token_policies="spindle-policy" \
968+ token_ttl=1h \
969+ token_max_ttl=4h \
970+ bind_secret_id=true \
971+ secret_id_ttl=0 \
972+ secret_id_num_uses=0
973+```
974+975+Get the credentials:
976+977+```bash
978+# Get role ID (static)
979+ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
980+981+# Generate secret ID
982+SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
983+984+echo "Role ID: $ROLE_ID"
985+echo "Secret ID: $SECRET_ID"
986+```
987+988+#### Create proxy configuration
989+990+Create the credential files:
991+992+```bash
993+# Create directory for OpenBao files
994+mkdir -p /tmp/openbao
995+996+# Save credentials
997+echo "$ROLE_ID" > /tmp/openbao/role-id
998+echo "$SECRET_ID" > /tmp/openbao/secret-id
999+chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
1000+```
1001+1002+Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
1003+1004+```hcl
1005+# OpenBao server connection
1006+vault {
1007+ address = "http://localhost:8200"
1008+}
1009+1010+# Auto-Auth using AppRole
1011+auto_auth {
1012+ method "approle" {
1013+ mount_path = "auth/approle"
1014+ config = {
1015+ role_id_file_path = "/tmp/openbao/role-id"
1016+ secret_id_file_path = "/tmp/openbao/secret-id"
1017+ }
1018+ }
1019+1020+ # Optional: write token to file for debugging
1021+ sink "file" {
1022+ config = {
1023+ path = "/tmp/openbao/token"
1024+ mode = 0640
1025+ }
1026+ }
1027+}
1028+1029+# Proxy listener for spindle
1030+listener "tcp" {
1031+ address = "127.0.0.1:8201"
1032+ tls_disable = true
1033+}
1034+1035+# Enable API proxy with auto-auth token
1036+api_proxy {
1037+ use_auto_auth_token = true
1038+}
1039+1040+# Enable response caching
1041+cache {
1042+ use_auto_auth_token = true
1043+}
1044+1045+# Logging
1046+log_level = "info"
1047+```
1048+1049+#### Start the proxy
1050+1051+Start OpenBao Proxy:
1052+1053+```bash
1054+bao proxy -config=/tmp/openbao/proxy.hcl
1055+```
1056+1057+The proxy will authenticate with OpenBao and start listening on
1058+`127.0.0.1:8201`.
1059+1060+#### Configure spindle
1061+1062+Set these environment variables for spindle:
1063+1064+```bash
1065+export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
1066+export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
1067+export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
1068+```
1069+1070+On startup, spindle will now connect to the local proxy,
1071+which handles all authentication automatically.
1072+1073+### Production setup for proxy
1074+1075+For production, you'll want to run the proxy as a service:
1076+1077+Place your production configuration in
1078+`/etc/openbao/proxy.hcl` with proper TLS settings for the
1079+vault connection.
1080+1081+### Verifying setup
1082+1083+Test the proxy directly:
1084+1085+```bash
1086+# Check proxy health
1087+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
1088+1089+# Test token lookup through proxy
1090+curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
1091+```
1092+1093+Test OpenBao operations through the server:
1094+1095+```bash
1096+# List all secrets
1097+bao kv list spindle/
1098+1099+# Add a test secret via the spindle API, then check it exists
1100+bao kv list spindle/repos/
1101+1102+# Get a specific secret
1103+bao kv get spindle/repos/your_repo_path/SECRET_NAME
1104+```
1105+1106+### How it works
1107+1108+- Spindle connects to OpenBao Proxy on localhost (typically
1109+ port 8200 or 8201)
1110+- The proxy authenticates with OpenBao using AppRole
1111+ credentials
1112+- All spindle requests go through the proxy, which injects
1113+ authentication tokens
1114+- Secrets are stored at
1115+ `spindle/repos/{sanitized_repo_path}/{secret_key}`
1116+- Repository paths like `did:plc:alice/myrepo` become
1117+ `did_plc_alice_myrepo`
1118+- The proxy handles all token renewal automatically
1119+- Spindle no longer manages tokens or authentication
1120+ directly
1121+1122+### Troubleshooting
1123+1124+**Connection refused**: Check that the OpenBao Proxy is
1125+running and listening on the configured address.
1126+1127+**403 errors**: Verify the AppRole credentials are correct
1128+and the policy has the necessary permissions.
1129+1130+**404 route errors**: The spindle KV mount probably doesn't
1131+existโrun the mount creation step again.
1132+1133+**Proxy authentication failures**: Check the proxy logs and
1134+verify the role-id and secret-id files are readable and
1135+contain valid credentials.
1136+1137+**Secret not found after writing**: This can indicate policy
1138+permission issues. Verify the policy includes both
1139+`spindle/data/*` and `spindle/metadata/*` paths with
1140+appropriate capabilities.
1141+1142+Check proxy logs:
1143+1144+```bash
1145+# If running as systemd service
1146+journalctl -u openbao-proxy -f
1147+1148+# If running directly, check the console output
1149+```
1150+1151+Test AppRole authentication manually:
1152+1153+```bash
1154+bao write auth/approle/login \
1155+ role_id="$(cat /tmp/openbao/role-id)" \
1156+ secret_id="$(cat /tmp/openbao/secret-id)"
1157+```
1158+1159+# Migrating knots and spindles
1160+1161+Sometimes, non-backwards compatible changes are made to the
1162+knot/spindle XRPC APIs. If you host a knot or a spindle, you
1163+will need to follow this guide to upgrade. Typically, this
1164+only requires you to deploy the newest version.
1165+1166+This document is laid out in reverse-chronological order.
1167+Newer migration guides are listed first, and older guides
1168+are further down the page.
1169+1170+## Upgrading from v1.8.x
1171+1172+After v1.8.2, the HTTP API for knots and spindles has been
1173+deprecated and replaced with XRPC. Repositories on outdated
1174+knots will not be viewable from the appview. Upgrading is
1175+straightforward however.
1176+1177+For knots:
1178+1179+- Upgrade to the latest tag (v1.9.0 or above)
1180+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1181+ hit the "retry" button to verify your knot
1182+1183+For spindles:
1184+1185+- Upgrade to the latest tag (v1.9.0 or above)
1186+- Head to the [spindle
1187+ dashboard](https://tangled.org/settings/spindles) and hit the
1188+ "retry" button to verify your spindle
1189+1190+## Upgrading from v1.7.x
1191+1192+After v1.7.0, knot secrets have been deprecated. You no
1193+longer need a secret from the appview to run a knot. All
1194+authorized commands to knots are managed via [Inter-Service
1195+Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
1196+Knots will be read-only until upgraded.
1197+1198+Upgrading is quite easy, in essence:
1199+1200+- `KNOT_SERVER_SECRET` is no more, you can remove this
1201+ environment variable entirely
1202+- `KNOT_SERVER_OWNER` is now required on boot, set this to
1203+ your DID. You can find your DID in the
1204+ [settings](https://tangled.org/settings) page.
1205+- Restart your knot once you have replaced the environment
1206+ variable
1207+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1208+ hit the "retry" button to verify your knot. This simply
1209+ writes a `sh.tangled.knot` record to your PDS.
1210+1211+If you use the nix module, simply bump the flake to the
1212+latest revision, and change your config block like so:
1213+1214+```diff
1215+ services.tangled.knot = {
1216+ enable = true;
1217+ server = {
1218+- secretFile = /path/to/secret;
1219++ owner = "did:plc:foo";
1220+ };
1221+ };
1222+```
1223+1224+# Hacking on Tangled
1225+1226+We highly recommend [installing
1227+Nix](https://nixos.org/download/) (the package manager)
1228+before working on the codebase. The Nix flake provides a lot
1229+of helpers to get started and most importantly, builds and
1230+dev shells are entirely deterministic.
1231+1232+To set up your dev environment:
1233+1234+```bash
1235+nix develop
1236+```
1237+1238+Non-Nix users can look at the `devShell` attribute in the
1239+`flake.nix` file to determine necessary dependencies.
1240+1241+## Running the appview
1242+1243+The Nix flake also exposes a few `app` attributes (run `nix
1244+flake show` to see a full list of what the flake provides),
1245+one of the apps runs the appview with the `air`
1246+live-reloader:
1247+1248+```bash
1249+TANGLED_DEV=true nix run .#watch-appview
1250+1251+# TANGLED_DB_PATH might be of interest to point to
1252+# different sqlite DBs
1253+1254+# in a separate shell, you can live-reload tailwind
1255+nix run .#watch-tailwind
1256+```
1257+1258+To authenticate with the appview, you will need Redis and
1259+OAuth JWKs to be set up:
1260+1261+```
1262+# OAuth JWKs should already be set up by the Nix devshell:
1263+echo $TANGLED_OAUTH_CLIENT_SECRET
1264+z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
1265+1266+echo $TANGLED_OAUTH_CLIENT_KID
1267+1761667908
1268+1269+# if not, you can set it up yourself:
1270+goat key generate -t P-256
1271+Key Type: P-256 / secp256r1 / ES256 private key
1272+Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
1273+ z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
1274+Public Key (DID Key Syntax): share or publish this (eg, in DID document)
1275+ did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
1276+1277+# the secret key from above
1278+export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
1279+1280+# Run Redis in a new shell to store OAuth sessions
1281+redis-server
1282+```
1283+1284+## Running knots and spindles
1285+1286+An end-to-end knot setup requires setting up a machine with
1287+`sshd`, `AuthorizedKeysCommand`, and a Git user, which is
1288+quite cumbersome. So the Nix flake provides a
1289+`nixosConfiguration` to do so.
1290+1291+<details>
1292+ <summary><strong>macOS users will have to set up a Nix Builder first</strong></summary>
1293+1294+ In order to build Tangled's dev VM on macOS, you will
1295+ first need to set up a Linux Nix builder. The recommended
1296+ way to do so is to run a [`darwin.linux-builder`
1297+ VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
1298+ and to register it in `nix.conf` as a builder for Linux
1299+ with the same architecture as your Mac (`linux-aarch64` if
1300+ you are using Apple Silicon).
1301+1302+ > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
1303+ > the Tangled repo so that it doesn't conflict with the other VM. For example,
1304+ > you can do
1305+ >
1306+ > ```shell
1307+ > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
1308+ > ```
1309+ >
1310+ > to store the builder VM in a temporary dir.
1311+ >
1312+ > You should read and follow [all the other intructions][darwin builder vm] to
1313+ > avoid subtle problems.
1314+1315+ Alternatively, you can use any other method to set up a
1316+ Linux machine with Nix installed that you can `sudo ssh`
1317+ into (in other words, root user on your Mac has to be able
1318+ to ssh into the Linux machine without entering a password)
1319+ and that has the same architecture as your Mac. See
1320+ [remote builder
1321+ instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
1322+ for how to register such a builder in `nix.conf`.
1323+1324+ > WARNING: If you'd like to use
1325+ > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
1326+ > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
1327+ > ssh` works can be tricky. It seems to be [possible with
1328+ > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
1329+1330+</details>
1331+1332+To begin, grab your DID from http://localhost:3000/settings.
1333+Then, set `TANGLED_VM_KNOT_OWNER` and
1334+`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
1335+lightweight NixOS VM like so:
1336+1337+```bash
1338+nix run --impure .#vm
1339+1340+# type `poweroff` at the shell to exit the VM
1341+```
1342+1343+This starts a knot on port 6444, a spindle on port 6555
1344+with `ssh` exposed on port 2222.
1345+1346+Once the services are running, head to
1347+http://localhost:3000/settings/knots and hit "Verify". It should
1348+verify the ownership of the services instantly if everything
1349+went smoothly.
1350+1351+You can push repositories to this VM with this ssh config
1352+block on your main machine:
1353+1354+```bash
1355+Host nixos-shell
1356+ Hostname localhost
1357+ Port 2222
1358+ User git
1359+ IdentityFile ~/.ssh/my_tangled_key
1360+```
1361+1362+Set up a remote called `local-dev` on a git repo:
1363+1364+```bash
1365+git remote add local-dev git@nixos-shell:user/repo
1366+git push local-dev main
1367+```
1368+1369+The above VM should already be running a spindle on
1370+`localhost:6555`. Head to http://localhost:3000/settings/spindles and
1371+hit "Verify". You can then configure each repository to use
1372+this spindle and run CI jobs.
1373+1374+Of interest when debugging spindles:
1375+1376+```
1377+# Service logs from journald:
1378+journalctl -xeu spindle
1379+1380+# CI job logs from disk:
1381+ls /var/log/spindle
1382+1383+# Debugging spindle database:
1384+sqlite3 /var/lib/spindle/spindle.db
1385+1386+# litecli has a nicer REPL interface:
1387+litecli /var/lib/spindle/spindle.db
1388+```
1389+1390+If for any reason you wish to disable either one of the
1391+services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
1392+`services.tangled.spindle.enable` (or
1393+`services.tangled.knot.enable`) to `false`.
1394+1395+# Contribution guide
1396+1397+## Commit guidelines
1398+1399+We follow a commit style similar to the Go project. Please keep commits:
1400+1401+* **atomic**: each commit should represent one logical change
1402+* **descriptive**: the commit message should clearly describe what the
1403+change does and why it's needed
1404+1405+### Message format
1406+1407+```
1408+<service/top-level directory>/<affected package/directory>: <short summary of change>
1409+1410+Optional longer description can go here, if necessary. Explain what the
1411+change does and why, especially if not obvious. Reference relevant
1412+issues or PRs when applicable. These can be links for now since we don't
1413+auto-link issues/PRs yet.
1414+```
1415+1416+Here are some examples:
1417+1418+```
1419+appview/state: fix token expiry check in middleware
1420+1421+The previous check did not account for clock drift, leading to premature
1422+token invalidation.
1423+```
1424+1425+```
1426+knotserver/git/service: improve error checking in upload-pack
1427+```
1428+1429+1430+### General notes
1431+1432+- PRs get merged "as-is" (fast-forward)โlike applying a patch-series
1433+using `git am`. At present, there is no squashingโso please author
1434+your commits as they would appear on `master`, following the above
1435+guidelines.
1436+- If there is a lot of nesting, for example "appview:
1437+pages/templates/repo/fragments: ...", these can be truncated down to
1438+just "appview: repo/fragments: ...". If the change affects a lot of
1439+subdirectories, you may abbreviate to just the top-level names, e.g.
1440+"appview: ..." or "knotserver: ...".
1441+- Keep commits lowercased with no trailing period.
1442+- Use the imperative mood in the summary line (e.g., "fix bug" not
1443+"fixed bug" or "fixes bug").
1444+- Try to keep the summary line under 72 characters, but we aren't too
1445+fussed about this.
1446+- Follow the same formatting for PR titles if filled manually.
1447+- Don't include unrelated changes in the same commit.
1448+- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
1449+before submitting if necessary.
1450+1451+## Code formatting
1452+1453+We use a variety of tools to format our code, and multiplex them with
1454+[`treefmt`](https://treefmt.com). All you need to do to format your changes
1455+is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
1456+1457+## Proposals for bigger changes
1458+1459+Small fixes like typos, minor bugs, or trivial refactors can be
1460+submitted directly as PRs.
1461+1462+For larger changesโespecially those introducing new features, significant
1463+refactoring, or altering system behaviorโplease open a proposal first. This
1464+helps us evaluate the scope, design, and potential impact before implementation.
1465+1466+Create a new issue titled:
1467+1468+```
1469+proposal: <affected scope>: <summary of change>
1470+```
1471+1472+In the description, explain:
1473+1474+- What the change is
1475+- Why it's needed
1476+- How you plan to implement it (roughly)
1477+- Any open questions or tradeoffs
1478+1479+We'll use the issue thread to discuss and refine the idea before moving
1480+forward.
1481+1482+## Developer Certificate of Origin (DCO)
1483+1484+We require all contributors to certify that they have the right to
1485+submit the code they're contributing. To do this, we follow the
1486+[Developer Certificate of Origin
1487+(DCO)](https://developercertificate.org/).
1488+1489+By signing your commits, you're stating that the contribution is your
1490+own work, or that you have the right to submit it under the project's
1491+license. This helps us keep things clean and legally sound.
1492+1493+To sign your commit, just add the `-s` flag when committing:
1494+1495+```sh
1496+git commit -s -m "your commit message"
1497+```
1498+1499+This appends a line like:
1500+1501+```
1502+Signed-off-by: Your Name <your.email@example.com>
1503+```
1504+1505+We won't merge commits if they aren't signed off. If you forget, you can
1506+amend the last commit like this:
1507+1508+```sh
1509+git commit --amend -s
1510+```
1511+1512+If you're submitting a PR with multiple commits, make sure each one is
1513+signed.
1514+1515+For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
1516+to make it sign off commits in the tangled repo:
1517+1518+```shell
1519+# Safety check, should say "No matching config key..."
1520+jj config list templates.commit_trailers
1521+# The command below may need to be adjusted if the command above returned something.
1522+jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
1523+```
1524+1525+Refer to the [jujutsu
1526+documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
1527+for more information.
-136
docs/contributing.md
···1-# tangled contributing guide
2-3-## commit guidelines
4-5-We follow a commit style similar to the Go project. Please keep commits:
6-7-* **atomic**: each commit should represent one logical change
8-* **descriptive**: the commit message should clearly describe what the
9-change does and why it's needed
10-11-### message format
12-13-```
14-<service/top-level directory>/<affected package/directory>: <short summary of change>
15-16-17-Optional longer description can go here, if necessary. Explain what the
18-change does and why, especially if not obvious. Reference relevant
19-issues or PRs when applicable. These can be links for now since we don't
20-auto-link issues/PRs yet.
21-```
22-23-Here are some examples:
24-25-```
26-appview/state: fix token expiry check in middleware
27-28-The previous check did not account for clock drift, leading to premature
29-token invalidation.
30-```
31-32-```
33-knotserver/git/service: improve error checking in upload-pack
34-```
35-36-37-### general notes
38-39-- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
40-using `git am`. At present, there is no squashing -- so please author
41-your commits as they would appear on `master`, following the above
42-guidelines.
43-- If there is a lot of nesting, for example "appview:
44-pages/templates/repo/fragments: ...", these can be truncated down to
45-just "appview: repo/fragments: ...". If the change affects a lot of
46-subdirectories, you may abbreviate to just the top-level names, e.g.
47-"appview: ..." or "knotserver: ...".
48-- Keep commits lowercased with no trailing period.
49-- Use the imperative mood in the summary line (e.g., "fix bug" not
50-"fixed bug" or "fixes bug").
51-- Try to keep the summary line under 72 characters, but we aren't too
52-fussed about this.
53-- Follow the same formatting for PR titles if filled manually.
54-- Don't include unrelated changes in the same commit.
55-- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
56-before submitting if necessary.
57-58-## code formatting
59-60-We use a variety of tools to format our code, and multiplex them with
61-[`treefmt`](https://treefmt.com): all you need to do to format your changes
62-is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
63-64-## proposals for bigger changes
65-66-Small fixes like typos, minor bugs, or trivial refactors can be
67-submitted directly as PRs.
68-69-For larger changesโespecially those introducing new features, significant
70-refactoring, or altering system behaviorโplease open a proposal first. This
71-helps us evaluate the scope, design, and potential impact before implementation.
72-73-### proposal format
74-75-Create a new issue titled:
76-77-```
78-proposal: <affected scope>: <summary of change>
79-```
80-81-In the description, explain:
82-83-- What the change is
84-- Why it's needed
85-- How you plan to implement it (roughly)
86-- Any open questions or tradeoffs
87-88-We'll use the issue thread to discuss and refine the idea before moving
89-forward.
90-91-## developer certificate of origin (DCO)
92-93-We require all contributors to certify that they have the right to
94-submit the code they're contributing. To do this, we follow the
95-[Developer Certificate of Origin
96-(DCO)](https://developercertificate.org/).
97-98-By signing your commits, you're stating that the contribution is your
99-own work, or that you have the right to submit it under the project's
100-license. This helps us keep things clean and legally sound.
101-102-To sign your commit, just add the `-s` flag when committing:
103-104-```sh
105-git commit -s -m "your commit message"
106-```
107-108-This appends a line like:
109-110-```
111-Signed-off-by: Your Name <your.email@example.com>
112-```
113-114-We won't merge commits if they aren't signed off. If you forget, you can
115-amend the last commit like this:
116-117-```sh
118-git commit --amend -s
119-```
120-121-If you're submitting a PR with multiple commits, make sure each one is
122-signed.
123-124-For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125-to make it sign off commits in the tangled repo:
126-127-```shell
128-# Safety check, should say "No matching config key..."
129-jj config list templates.commit_trailers
130-# The command below may need to be adjusted if the command above returned something.
131-jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
132-```
133-134-Refer to the [jj
135-documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
136-for more information.
···1-# hacking on tangled
2-3-We highly recommend [installing
4-nix](https://nixos.org/download/) (the package manager)
5-before working on the codebase. The nix flake provides a lot
6-of helpers to get started and most importantly, builds and
7-dev shells are entirely deterministic.
8-9-To set up your dev environment:
10-11-```bash
12-nix develop
13-```
14-15-Non-nix users can look at the `devShell` attribute in the
16-`flake.nix` file to determine necessary dependencies.
17-18-## running the appview
19-20-The nix flake also exposes a few `app` attributes (run `nix
21-flake show` to see a full list of what the flake provides),
22-one of the apps runs the appview with the `air`
23-live-reloader:
24-25-```bash
26-TANGLED_DEV=true nix run .#watch-appview
27-28-# TANGLED_DB_PATH might be of interest to point to
29-# different sqlite DBs
30-31-# in a separate shell, you can live-reload tailwind
32-nix run .#watch-tailwind
33-```
34-35-To authenticate with the appview, you will need redis and
36-OAUTH JWKs to be setup:
37-38-```
39-# oauth jwks should already be setup by the nix devshell:
40-echo $TANGLED_OAUTH_CLIENT_SECRET
41-z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
42-43-echo $TANGLED_OAUTH_CLIENT_KID
44-1761667908
45-46-# if not, you can set it up yourself:
47-goat key generate -t P-256
48-Key Type: P-256 / secp256r1 / ES256 private key
49-Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
50- z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
51-Public Key (DID Key Syntax): share or publish this (eg, in DID document)
52- did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
53-54-# the secret key from above
55-export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
56-57-# run redis in at a new shell to store oauth sessions
58-redis-server
59-```
60-61-## running knots and spindles
62-63-An end-to-end knot setup requires setting up a machine with
64-`sshd`, `AuthorizedKeysCommand`, and git user, which is
65-quite cumbersome. So the nix flake provides a
66-`nixosConfiguration` to do so.
67-68-<details>
69- <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
70-71- In order to build Tangled's dev VM on macOS, you will
72- first need to set up a Linux Nix builder. The recommended
73- way to do so is to run a [`darwin.linux-builder`
74- VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
75- and to register it in `nix.conf` as a builder for Linux
76- with the same architecture as your Mac (`linux-aarch64` if
77- you are using Apple Silicon).
78-79- > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
80- > the tangled repo so that it doesn't conflict with the other VM. For example,
81- > you can do
82- >
83- > ```shell
84- > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
85- > ```
86- >
87- > to store the builder VM in a temporary dir.
88- >
89- > You should read and follow [all the other intructions][darwin builder vm] to
90- > avoid subtle problems.
91-92- Alternatively, you can use any other method to set up a
93- Linux machine with `nix` installed that you can `sudo ssh`
94- into (in other words, root user on your Mac has to be able
95- to ssh into the Linux machine without entering a password)
96- and that has the same architecture as your Mac. See
97- [remote builder
98- instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
99- for how to register such a builder in `nix.conf`.
100-101- > WARNING: If you'd like to use
102- > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
103- > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
104- > ssh` works can be tricky. It seems to be [possible with
105- > Orbstack](https://github.com/orgs/orbstack/discussions/1669).
106-107-</details>
108-109-To begin, grab your DID from http://localhost:3000/settings.
110-Then, set `TANGLED_VM_KNOT_OWNER` and
111-`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
112-lightweight NixOS VM like so:
113-114-```bash
115-nix run --impure .#vm
116-117-# type `poweroff` at the shell to exit the VM
118-```
119-120-This starts a knot on port 6444, a spindle on port 6555
121-with `ssh` exposed on port 2222.
122-123-Once the services are running, head to
124-http://localhost:3000/settings/knots and hit verify. It should
125-verify the ownership of the services instantly if everything
126-went smoothly.
127-128-You can push repositories to this VM with this ssh config
129-block on your main machine:
130-131-```bash
132-Host nixos-shell
133- Hostname localhost
134- Port 2222
135- User git
136- IdentityFile ~/.ssh/my_tangled_key
137-```
138-139-Set up a remote called `local-dev` on a git repo:
140-141-```bash
142-git remote add local-dev git@nixos-shell:user/repo
143-git push local-dev main
144-```
145-146-### running a spindle
147-148-The above VM should already be running a spindle on
149-`localhost:6555`. Head to http://localhost:3000/settings/spindles and
150-hit verify. You can then configure each repository to use
151-this spindle and run CI jobs.
152-153-Of interest when debugging spindles:
154-155-```
156-# service logs from journald:
157-journalctl -xeu spindle
158-159-# CI job logs from disk:
160-ls /var/log/spindle
161-162-# debugging spindle db:
163-sqlite3 /var/lib/spindle/spindle.db
164-165-# litecli has a nicer REPL interface:
166-litecli /var/lib/spindle/spindle.db
167-```
168-169-If for any reason you wish to disable either one of the
170-services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171-`services.tangled.spindle.enable` (or
172-`services.tangled.knot.enable`) to `false`.
···1-# knot self-hosting guide
2-3-So you want to run your own knot server? Great! Here are a few prerequisites:
4-5-1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
6-2. A (sub)domain name. People generally use `knot.example.com`.
7-3. A valid SSL certificate for your domain.
8-9-There's a couple of ways to get started:
10-* NixOS: refer to
11-[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
12-* Docker: Documented at
13-[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
14-(community maintained: support is not guaranteed!)
15-* Manual: Documented below.
16-17-## manual setup
18-19-First, clone this repository:
20-21-```
22-git clone https://tangled.org/@tangled.org/core
23-```
24-25-Then, build the `knot` CLI. This is the knot administration and operation tool.
26-For the purpose of this guide, we're only concerned with these subcommands:
27-28-* `knot server`: the main knot server process, typically run as a
29-supervised service
30-* `knot guard`: handles role-based access control for git over SSH
31-(you'll never have to run this yourself)
32-* `knot keys`: fetches SSH keys associated with your knot; we'll use
33-this to generate the SSH `AuthorizedKeysCommand`
34-35-```
36-cd core
37-export CGO_ENABLED=1
38-go build -o knot ./cmd/knot
39-```
40-41-Next, move the `knot` binary to a location owned by `root` --
42-`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
43-44-```
45-sudo mv knot /usr/local/bin/knot
46-sudo chown root:root /usr/local/bin/knot
47-```
48-49-This is necessary because SSH `AuthorizedKeysCommand` requires [really
50-specific permissions](https://stackoverflow.com/a/27638306). The
51-`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
52-retrieve a user's public SSH keys dynamically for authentication. Let's
53-set that up.
54-55-```
56-sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
57-Match User git
58- AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
59- AuthorizedKeysCommandUser nobody
60-EOF
61-```
62-63-Then, reload `sshd`:
64-65-```
66-sudo systemctl reload ssh
67-```
68-69-Next, create the `git` user. We'll use the `git` user's home directory
70-to store repositories:
71-72-```
73-sudo adduser git
74-```
75-76-Create `/home/git/.knot.env` with the following, updating the values as
77-necessary. The `KNOT_SERVER_OWNER` should be set to your
78-DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
79-80-```
81-KNOT_REPO_SCAN_PATH=/home/git
82-KNOT_SERVER_HOSTNAME=knot.example.com
83-APPVIEW_ENDPOINT=https://tangled.sh
84-KNOT_SERVER_OWNER=did:plc:foobar
85-KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
86-KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
87-```
88-89-If you run a Linux distribution that uses systemd, you can use the provided
90-service file to run the server. Copy
91-[`knotserver.service`](/systemd/knotserver.service)
92-to `/etc/systemd/system/`. Then, run:
93-94-```
95-systemctl enable knotserver
96-systemctl start knotserver
97-```
98-99-The last step is to configure a reverse proxy like Nginx or Caddy to front your
100-knot. Here's an example configuration for Nginx:
101-102-```
103-server {
104- listen 80;
105- listen [::]:80;
106- server_name knot.example.com;
107-108- location / {
109- proxy_pass http://localhost:5555;
110- proxy_set_header Host $host;
111- proxy_set_header X-Real-IP $remote_addr;
112- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
113- proxy_set_header X-Forwarded-Proto $scheme;
114- }
115-116- # wss endpoint for git events
117- location /events {
118- proxy_set_header X-Forwarded-For $remote_addr;
119- proxy_set_header Host $http_host;
120- proxy_set_header Upgrade websocket;
121- proxy_set_header Connection Upgrade;
122- proxy_pass http://localhost:5555;
123- }
124- # additional config for SSL/TLS go here.
125-}
126-127-```
128-129-Remember to use Let's Encrypt or similar to procure a certificate for your
130-knot domain.
131-132-You should now have a running knot server! You can finalize
133-your registration by hitting the `verify` button on the
134-[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
135-a record on your PDS to announce the existence of the knot.
136-137-### custom paths
138-139-(This section applies to manual setup only. Docker users should edit the mounts
140-in `docker-compose.yml` instead.)
141-142-Right now, the database and repositories of your knot lives in `/home/git`. You
143-can move these paths if you'd like to store them in another folder. Be careful
144-when adjusting these paths:
145-146-* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
147-any possible side effects. Remember to restart it once you're done.
148-* Make backups before moving in case something goes wrong.
149-* Make sure the `git` user can read and write from the new paths.
150-151-#### database
152-153-As an example, let's say the current database is at `/home/git/knotserver.db`,
154-and we want to move it to `/home/git/database/knotserver.db`.
155-156-Copy the current database to the new location. Make sure to copy the `.db-shm`
157-and `.db-wal` files if they exist.
158-159-```
160-mkdir /home/git/database
161-cp /home/git/knotserver.db* /home/git/database
162-```
163-164-In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
165-the new file path (_not_ the directory):
166-167-```
168-KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
169-```
170-171-#### repositories
172-173-As an example, let's say the repositories are currently in `/home/git`, and we
174-want to move them into `/home/git/repositories`.
175-176-Create the new folder, then move the existing repositories (if there are any):
177-178-```
179-mkdir /home/git/repositories
180-# move all DIDs into the new folder; these will vary for you!
181-mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
182-```
183-184-In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
185-to the new directory:
186-187-```
188-KNOT_REPO_SCAN_PATH=/home/git/repositories
189-```
190-191-Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
192-repository path:
193-194-```
195-sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
196-Match User git
197- AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
198- AuthorizedKeysCommandUser nobody
199-EOF
200-```
201-202-Make sure to restart your SSH server!
203-204-#### MOTD (message of the day)
205-206-To configure the MOTD used ("Welcome to this knot!" by default), edit the
207-`/home/git/motd` file:
208-209-```
210-printf "Hi from this knot!\n" > /home/git/motd
211-```
212-213-Note that you should add a newline at the end if setting a non-empty message
214-since the knot won't do this for you.
···1-# Migrations
2-3-This document is laid out in reverse-chronological order.
4-Newer migration guides are listed first, and older guides
5-are further down the page.
6-7-## Upgrading from v1.8.x
8-9-After v1.8.2, the HTTP API for knot and spindles have been
10-deprecated and replaced with XRPC. Repositories on outdated
11-knots will not be viewable from the appview. Upgrading is
12-straightforward however.
13-14-For knots:
15-16-- Upgrade to latest tag (v1.9.0 or above)
17-- Head to the [knot dashboard](https://tangled.org/settings/knots) and
18- hit the "retry" button to verify your knot
19-20-For spindles:
21-22-- Upgrade to latest tag (v1.9.0 or above)
23-- Head to the [spindle
24- dashboard](https://tangled.org/settings/spindles) and hit the
25- "retry" button to verify your spindle
26-27-## Upgrading from v1.7.x
28-29-After v1.7.0, knot secrets have been deprecated. You no
30-longer need a secret from the appview to run a knot. All
31-authorized commands to knots are managed via [Inter-Service
32-Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
33-Knots will be read-only until upgraded.
34-35-Upgrading is quite easy, in essence:
36-37-- `KNOT_SERVER_SECRET` is no more, you can remove this
38- environment variable entirely
39-- `KNOT_SERVER_OWNER` is now required on boot, set this to
40- your DID. You can find your DID in the
41- [settings](https://tangled.org/settings) page.
42-- Restart your knot once you have replaced the environment
43- variable
44-- Head to the [knot dashboard](https://tangled.org/settings/knots) and
45- hit the "retry" button to verify your knot. This simply
46- writes a `sh.tangled.knot` record to your PDS.
47-48-If you use the nix module, simply bump the flake to the
49-latest revision, and change your config block like so:
50-51-```diff
52- services.tangled.knot = {
53- enable = true;
54- server = {
55-- secretFile = /path/to/secret;
56-+ owner = "did:plc:foo";
57- };
58- };
59-```
···1-# spindle architecture
2-3-Spindle is a small CI runner service. Here's a high level overview of how it operates:
4-5-* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
6-[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
7-* when a new repo record comes through (typically when you add a spindle to a
8-repo from the settings), spindle then resolves the underlying knot and
9-subscribes to repo events (see:
10-[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
11-* the spindle engine then handles execution of the pipeline, with results and
12-logs beamed on the spindle event stream over wss
13-14-### the engine
15-16-At present, the only supported backend is Docker (and Podman, if Docker
17-compatibility is enabled, so that `/run/docker.sock` is created). Spindle
18-executes each step in the pipeline in a fresh container, with state persisted
19-across steps within the `/tangled/workspace` directory.
20-21-The base image for the container is constructed on the fly using
22-[Nixery](https://nixery.dev), which is handy for caching layers for frequently
23-used packages.
24-25-The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
···0000000000000000000000000
-52
docs/spindle/hosting.md
···1-# spindle self-hosting guide
2-3-## prerequisites
4-5-* Go
6-* Docker (the only supported backend currently)
7-8-## configuration
9-10-Spindle is configured using environment variables. The following environment variables are available:
11-12-* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
13-* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
14-* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
15-* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
16-* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
17-* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
18-* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
19-* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
20-* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
21-22-## running spindle
23-24-1. **Set the environment variables.** For example:
25-26- ```shell
27- export SPINDLE_SERVER_HOSTNAME="your-hostname"
28- export SPINDLE_SERVER_OWNER="your-did"
29- ```
30-31-2. **Build the Spindle binary.**
32-33- ```shell
34- cd core
35- go mod download
36- go build -o cmd/spindle/spindle cmd/spindle/main.go
37- ```
38-39-3. **Create the log directory.**
40-41- ```shell
42- sudo mkdir -p /var/log/spindle
43- sudo chown $USER:$USER -R /var/log/spindle
44- ```
45-46-4. **Run the Spindle binary.**
47-48- ```shell
49- ./cmd/spindle/spindle
50- ```
51-52-Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
···1-# spindle secrets with openbao
2-3-This document covers setting up Spindle to use OpenBao for secrets
4-management via OpenBao Proxy instead of the default SQLite backend.
5-6-## overview
7-8-Spindle now uses OpenBao Proxy for secrets management. The proxy handles
9-authentication automatically using AppRole credentials, while Spindle
10-connects to the local proxy instead of directly to the OpenBao server.
11-12-This approach provides better security, automatic token renewal, and
13-simplified application code.
14-15-## installation
16-17-Install OpenBao from nixpkgs:
18-19-```bash
20-nix shell nixpkgs#openbao # for a local server
21-```
22-23-## setup
24-25-The setup process can is documented for both local development and production.
26-27-### local development
28-29-Start OpenBao in dev mode:
30-31-```bash
32-bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
33-```
34-35-This starts OpenBao on `http://localhost:8201` with a root token.
36-37-Set up environment for bao CLI:
38-39-```bash
40-export BAO_ADDR=http://localhost:8200
41-export BAO_TOKEN=root
42-```
43-44-### production
45-46-You would typically use a systemd service with a configuration file. Refer to
47-[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
48-achieved using Nix.
49-50-Then, initialize the bao server:
51-```bash
52-bao operator init -key-shares=1 -key-threshold=1
53-```
54-55-This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
56-```bash
57-bao operator unseal <unseal_key>
58-```
59-60-All steps below remain the same across both dev and production setups.
61-62-### configure openbao server
63-64-Create the spindle KV mount:
65-66-```bash
67-bao secrets enable -path=spindle -version=2 kv
68-```
69-70-Set up AppRole authentication and policy:
71-72-Create a policy file `spindle-policy.hcl`:
73-74-```hcl
75-# Full access to spindle KV v2 data
76-path "spindle/data/*" {
77- capabilities = ["create", "read", "update", "delete"]
78-}
79-80-# Access to metadata for listing and management
81-path "spindle/metadata/*" {
82- capabilities = ["list", "read", "delete", "update"]
83-}
84-85-# Allow listing at root level
86-path "spindle/" {
87- capabilities = ["list"]
88-}
89-90-# Required for connection testing and health checks
91-path "auth/token/lookup-self" {
92- capabilities = ["read"]
93-}
94-```
95-96-Apply the policy and create an AppRole:
97-98-```bash
99-bao policy write spindle-policy spindle-policy.hcl
100-bao auth enable approle
101-bao write auth/approle/role/spindle \
102- token_policies="spindle-policy" \
103- token_ttl=1h \
104- token_max_ttl=4h \
105- bind_secret_id=true \
106- secret_id_ttl=0 \
107- secret_id_num_uses=0
108-```
109-110-Get the credentials:
111-112-```bash
113-# Get role ID (static)
114-ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115-116-# Generate secret ID
117-SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118-119-echo "Role ID: $ROLE_ID"
120-echo "Secret ID: $SECRET_ID"
121-```
122-123-### create proxy configuration
124-125-Create the credential files:
126-127-```bash
128-# Create directory for OpenBao files
129-mkdir -p /tmp/openbao
130-131-# Save credentials
132-echo "$ROLE_ID" > /tmp/openbao/role-id
133-echo "$SECRET_ID" > /tmp/openbao/secret-id
134-chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135-```
136-137-Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138-139-```hcl
140-# OpenBao server connection
141-vault {
142- address = "http://localhost:8200"
143-}
144-145-# Auto-Auth using AppRole
146-auto_auth {
147- method "approle" {
148- mount_path = "auth/approle"
149- config = {
150- role_id_file_path = "/tmp/openbao/role-id"
151- secret_id_file_path = "/tmp/openbao/secret-id"
152- }
153- }
154-155- # Optional: write token to file for debugging
156- sink "file" {
157- config = {
158- path = "/tmp/openbao/token"
159- mode = 0640
160- }
161- }
162-}
163-164-# Proxy listener for Spindle
165-listener "tcp" {
166- address = "127.0.0.1:8201"
167- tls_disable = true
168-}
169-170-# Enable API proxy with auto-auth token
171-api_proxy {
172- use_auto_auth_token = true
173-}
174-175-# Enable response caching
176-cache {
177- use_auto_auth_token = true
178-}
179-180-# Logging
181-log_level = "info"
182-```
183-184-### start the proxy
185-186-Start OpenBao Proxy:
187-188-```bash
189-bao proxy -config=/tmp/openbao/proxy.hcl
190-```
191-192-The proxy will authenticate with OpenBao and start listening on
193-`127.0.0.1:8201`.
194-195-### configure spindle
196-197-Set these environment variables for Spindle:
198-199-```bash
200-export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201-export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
202-export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203-```
204-205-Start Spindle:
206-207-Spindle will now connect to the local proxy, which handles all
208-authentication automatically.
209-210-## production setup for proxy
211-212-For production, you'll want to run the proxy as a service:
213-214-Place your production configuration in `/etc/openbao/proxy.hcl` with
215-proper TLS settings for the vault connection.
216-217-## verifying setup
218-219-Test the proxy directly:
220-221-```bash
222-# Check proxy health
223-curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224-225-# Test token lookup through proxy
226-curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227-```
228-229-Test OpenBao operations through the server:
230-231-```bash
232-# List all secrets
233-bao kv list spindle/
234-235-# Add a test secret via Spindle API, then check it exists
236-bao kv list spindle/repos/
237-238-# Get a specific secret
239-bao kv get spindle/repos/your_repo_path/SECRET_NAME
240-```
241-242-## how it works
243-244-- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245-- The proxy authenticates with OpenBao using AppRole credentials
246-- All Spindle requests go through the proxy, which injects authentication tokens
247-- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248-- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249-- The proxy handles all token renewal automatically
250-- Spindle no longer manages tokens or authentication directly
251-252-## troubleshooting
253-254-**Connection refused**: Check that the OpenBao Proxy is running and
255-listening on the configured address.
256-257-**403 errors**: Verify the AppRole credentials are correct and the policy
258-has the necessary permissions.
259-260-**404 route errors**: The spindle KV mount probably doesn't exist - run
261-the mount creation step again.
262-263-**Proxy authentication failures**: Check the proxy logs and verify the
264-role-id and secret-id files are readable and contain valid credentials.
265-266-**Secret not found after writing**: This can indicate policy permission
267-issues. Verify the policy includes both `spindle/data/*` and
268-`spindle/metadata/*` paths with appropriate capabilities.
269-270-Check proxy logs:
271-272-```bash
273-# If running as systemd service
274-journalctl -u openbao-proxy -f
275-276-# If running directly, check the console output
277-```
278-279-Test AppRole authentication manually:
280-281-```bash
282-bao write auth/approle/login \
283- role_id="$(cat /tmp/openbao/role-id)" \
284- secret_id="$(cat /tmp/openbao/secret-id)"
285-```
···1-# spindle pipelines
2-3-Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
4-5-The fields are:
6-7-- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
8-- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
9-- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
10-- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
11-- [Environment](#environment): An **optional** field that allows you to define environment variables.
12-- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
13-14-## Trigger
15-16-The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
17-18-- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
19- - `push`: The workflow should run every time a commit is pushed to the repository.
20- - `pull_request`: The workflow should run every time a pull request is made or updated.
21- - `manual`: The workflow can be triggered manually.
22-- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
23-- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
24-25-For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
26-27-```yaml
28-when:
29- - event: ["push", "manual"]
30- branch: ["main", "develop"]
31- - event: ["pull_request"]
32- branch: ["main"]
33-```
34-35-You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
36-37-```yaml
38-when:
39- - event: ["push"]
40- tag: ["v*"]
41-```
42-43-You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
44-45-```yaml
46-when:
47- - event: ["push"]
48- branch: ["main", "release-*"]
49- tag: ["v*", "stable"]
50-```
51-52-## Engine
53-54-Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
55-56-- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
57-58-Example:
59-60-```yaml
61-engine: "nixery"
62-```
63-64-## Clone options
65-66-When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
67-68-- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
69-- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
70-- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
71-72-The default settings are:
73-74-```yaml
75-clone:
76- skip: false
77- depth: 1
78- submodules: false
79-```
80-81-## Dependencies
82-83-Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
84-85-Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
86-87-```yaml
88-dependencies:
89- # nixpkgs
90- nixpkgs:
91- - nodejs
92- - go
93- # custom registry
94- git+https://tangled.org/@example.com/my_pkg:
95- - my_pkg
96-```
97-98-Now these dependencies are available to use in your workflow!
99-100-## Environment
101-102-The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103-104-Example:
105-106-```yaml
107-environment:
108- GOOS: "linux"
109- GOARCH: "arm64"
110- NODE_ENV: "production"
111- MY_ENV_VAR: "MY_ENV_VALUE"
112-```
113-114-## Steps
115-116-The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
117-118-- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
119-- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
120-- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
121-122-Example:
123-124-```yaml
125-steps:
126- - name: "Build backend"
127- command: "go build"
128- environment:
129- GOOS: "darwin"
130- GOARCH: "arm64"
131- - name: "Build frontend"
132- command: "npm run build"
133- environment:
134- NODE_ENV: "production"
135-```
136-137-## Complete workflow
138-139-```yaml
140-# .tangled/workflows/build.yml
141-142-when:
143- - event: ["push", "manual"]
144- branch: ["main", "develop"]
145- - event: ["pull_request"]
146- branch: ["main"]
147-148-engine: "nixery"
149-150-# using the default values
151-clone:
152- skip: false
153- depth: 1
154- submodules: false
155-156-dependencies:
157- # nixpkgs
158- nixpkgs:
159- - nodejs
160- - go
161- # custom registry
162- git+https://tangled.org/@example.com/my_pkg:
163- - my_pkg
164-165-environment:
166- GOOS: "linux"
167- GOARCH: "arm64"
168- NODE_ENV: "production"
169- MY_ENV_VAR: "MY_ENV_VALUE"
170-171-steps:
172- - name: "Build backend"
173- command: "go build"
174- environment:
175- GOOS: "darwin"
176- GOARCH: "arm64"
177- - name: "Build frontend"
178- command: "npm run build"
179- environment:
180- NODE_ENV: "production"
181-```
182-183-If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
···8 var = builtins.getEnv name;
9 in
10 if var == ""
11- then throw "\$${name} must be defined, see docs/hacking.md for more details"
12 else var;
13 envVarOr = name: default: let
14 var = builtins.getEnv name;
···8 var = builtins.getEnv name;
9 in
10 if var == ""
11+ then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details"
12 else var;
13 envVarOr = name: default: let
14 var = builtins.getEnv name;
+3-3
readme.md
···1011## docs
1213-* [knot hosting guide](/docs/knot-hosting.md)
14-* [contributing guide](/docs/contributing.md) **please read before opening a PR!**
15-* [hacking on tangled](/docs/hacking.md)
1617## security
18
···1011## docs
1213+- [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide)
14+- [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!**
15+- [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled)
1617## security
18
+5-1
spindle/engine/engine.go
···70 }
71 defer eng.DestroyWorkflow(ctx, wid)
7273- wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid)
000074 if err != nil {
75 l.Warn("failed to setup step logger; logs will not be persisted", "error", err)
76 wfLogger = nil
···70 }
71 defer eng.DestroyWorkflow(ctx, wid)
7273+ secretValues := make([]string, len(allSecrets))
74+ for i, s := range allSecrets {
75+ secretValues[i] = s.Value
76+ }
77+ wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid, secretValues)
78 if err != nil {
79 l.Warn("failed to setup step logger; logs will not be persisted", "error", err)
80 wfLogger = nil
···1+package models
2+3+import (
4+ "encoding/base64"
5+ "testing"
6+)
7+8+func TestSecretMask_BasicMasking(t *testing.T) {
9+ mask := NewSecretMask([]string{"mysecret123"})
10+11+ input := "The password is mysecret123 in this log"
12+ expected := "The password is *** in this log"
13+14+ result := mask.Mask(input)
15+ if result != expected {
16+ t.Errorf("expected %q, got %q", expected, result)
17+ }
18+}
19+20+func TestSecretMask_Base64Encoded(t *testing.T) {
21+ secret := "mysecret123"
22+ mask := NewSecretMask([]string{secret})
23+24+ b64 := base64.StdEncoding.EncodeToString([]byte(secret))
25+ input := "Encoded: " + b64
26+ expected := "Encoded: ***"
27+28+ result := mask.Mask(input)
29+ if result != expected {
30+ t.Errorf("expected %q, got %q", expected, result)
31+ }
32+}
33+34+func TestSecretMask_Base64NoPadding(t *testing.T) {
35+ // "test" encodes to "dGVzdA==" with padding
36+ secret := "test"
37+ mask := NewSecretMask([]string{secret})
38+39+ b64NoPad := "dGVzdA" // base64 without padding
40+ input := "Token: " + b64NoPad
41+ expected := "Token: ***"
42+43+ result := mask.Mask(input)
44+ if result != expected {
45+ t.Errorf("expected %q, got %q", expected, result)
46+ }
47+}
48+49+func TestSecretMask_MultipleSecrets(t *testing.T) {
50+ mask := NewSecretMask([]string{"password1", "apikey123"})
51+52+ input := "Using password1 and apikey123 for auth"
53+ expected := "Using *** and *** for auth"
54+55+ result := mask.Mask(input)
56+ if result != expected {
57+ t.Errorf("expected %q, got %q", expected, result)
58+ }
59+}
60+61+func TestSecretMask_MultipleOccurrences(t *testing.T) {
62+ mask := NewSecretMask([]string{"secret"})
63+64+ input := "secret appears twice: secret"
65+ expected := "*** appears twice: ***"
66+67+ result := mask.Mask(input)
68+ if result != expected {
69+ t.Errorf("expected %q, got %q", expected, result)
70+ }
71+}
72+73+func TestSecretMask_ShortValues(t *testing.T) {
74+ mask := NewSecretMask([]string{"abc", "xy", ""})
75+76+ if mask == nil {
77+ t.Fatal("expected non-nil mask")
78+ }
79+80+ input := "abc xy test"
81+ expected := "*** *** test"
82+ result := mask.Mask(input)
83+ if result != expected {
84+ t.Errorf("expected %q, got %q", expected, result)
85+ }
86+}
87+88+func TestSecretMask_NilMask(t *testing.T) {
89+ var mask *SecretMask
90+91+ input := "some input text"
92+ result := mask.Mask(input)
93+ if result != input {
94+ t.Errorf("expected %q, got %q", input, result)
95+ }
96+}
97+98+func TestSecretMask_EmptyInput(t *testing.T) {
99+ mask := NewSecretMask([]string{"secret"})
100+101+ result := mask.Mask("")
102+ if result != "" {
103+ t.Errorf("expected empty string, got %q", result)
104+ }
105+}
106+107+func TestSecretMask_NoMatch(t *testing.T) {
108+ mask := NewSecretMask([]string{"secretvalue"})
109+110+ input := "nothing to mask here"
111+ result := mask.Mask(input)
112+ if result != input {
113+ t.Errorf("expected %q, got %q", input, result)
114+ }
115+}
116+117+func TestSecretMask_EmptySecretsList(t *testing.T) {
118+ mask := NewSecretMask([]string{})
119+120+ if mask != nil {
121+ t.Error("expected nil mask for empty secrets list")
122+ }
123+}
124+125+func TestSecretMask_EmptySecretsFiltered(t *testing.T) {
126+ mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"})
127+128+ input := "Using validpassword here"
129+ expected := "Using *** here"
130+131+ result := mask.Mask(input)
132+ if result != expected {
133+ t.Errorf("expected %q, got %q", expected, result)
134+ }
135+}
+1-1
spindle/motd
···20 **
21 ********
2223-This is a spindle server. More info at https://tangled.sh/@tangled.sh/core/tree/master/docs/spindle
2425Most API routes are under /xrpc/
···20 **
21 ********
2223+This is a spindle server. More info at https://docs.tangled.org/spindles.html#spindles
2425Most API routes are under /xrpc/
+31-13
spindle/server.go
···8 "log/slog"
9 "maps"
10 "net/http"
01112 "github.com/go-chi/chi/v5"
13 "tangled.org/core/api/tangled"
···30)
3132//go:embed motd
33-var motd []byte
3435const (
36 rbacDomain = "thisserver"
37)
3839type Spindle struct {
40- jc *jetstream.JetstreamClient
41- db *db.DB
42- e *rbac.Enforcer
43- l *slog.Logger
44- n *notifier.Notifier
45- engs map[string]models.Engine
46- jq *queue.Queue
47- cfg *config.Config
48- ks *eventconsumer.Consumer
49- res *idresolver.Resolver
50- vault secrets.Manager
0051}
5253// New creates a new Spindle server with the provided configuration and engines.
···128 cfg: cfg,
129 res: resolver,
130 vault: vault,
0131 }
132133 err = e.AddSpindle(rbacDomain)
···201 return s.e
202}
20300000000000000204// Start starts the Spindle server (blocking).
205func (s *Spindle) Start(ctx context.Context) error {
206 // starts a job queue runner in the background
···246 mux := chi.NewRouter()
247248 mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
249- w.Write(motd)
250 })
251 mux.HandleFunc("/events", s.Events)
252 mux.HandleFunc("/logs/{knot}/{rkey}/{name}", s.Logs)
···8 "log/slog"
9 "maps"
10 "net/http"
11+ "sync"
1213 "github.com/go-chi/chi/v5"
14 "tangled.org/core/api/tangled"
···31)
3233//go:embed motd
34+var defaultMotd []byte
3536const (
37 rbacDomain = "thisserver"
38)
3940type Spindle struct {
41+ jc *jetstream.JetstreamClient
42+ db *db.DB
43+ e *rbac.Enforcer
44+ l *slog.Logger
45+ n *notifier.Notifier
46+ engs map[string]models.Engine
47+ jq *queue.Queue
48+ cfg *config.Config
49+ ks *eventconsumer.Consumer
50+ res *idresolver.Resolver
51+ vault secrets.Manager
52+ motd []byte
53+ motdMu sync.RWMutex
54}
5556// New creates a new Spindle server with the provided configuration and engines.
···131 cfg: cfg,
132 res: resolver,
133 vault: vault,
134+ motd: defaultMotd,
135 }
136137 err = e.AddSpindle(rbacDomain)
···205 return s.e
206}
207208+// SetMotdContent sets custom MOTD content, replacing the embedded default.
209+func (s *Spindle) SetMotdContent(content []byte) {
210+ s.motdMu.Lock()
211+ defer s.motdMu.Unlock()
212+ s.motd = content
213+}
214+215+// GetMotdContent returns the current MOTD content.
216+func (s *Spindle) GetMotdContent() []byte {
217+ s.motdMu.RLock()
218+ defer s.motdMu.RUnlock()
219+ return s.motd
220+}
221+222// Start starts the Spindle server (blocking).
223func (s *Spindle) Start(ctx context.Context) error {
224 // starts a job queue runner in the background
···264 mux := chi.NewRouter()
265266 mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
267+ w.Write(s.GetMotdContent())
268 })
269 mux.HandleFunc("/events", s.Events)
270 mux.HandleFunc("/logs/{knot}/{rkey}/{name}", s.Logs)
···7475// used by html elements as a unique ID for hrefs
76func (d *Diff) Id() string {
00077 return d.Name.New
78}
79
···7475// used by html elements as a unique ID for hrefs
76func (d *Diff) Id() string {
77+ if d.IsDelete {
78+ return d.Name.Old
79+ }
80 return d.Name.New
81}
82