···47474848// RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
4949type RepoTree_TreeEntry struct {
5050- // is_file: Whether this entry is a file
5151- Is_file bool `json:"is_file" cborgen:"is_file"`
5252- // is_subtree: Whether this entry is a directory/subtree
5353- Is_subtree bool `json:"is_subtree" cborgen:"is_subtree"`
5450 Last_commit *RepoTree_LastCommit `json:"last_commit,omitempty" cborgen:"last_commit,omitempty"`
5551 // mode: File mode
5652 Mode string `json:"mode" cborgen:"mode"`
+4
api/tangled/tangledrepo.go
···3030 Source *string `json:"source,omitempty" cborgen:"source,omitempty"`
3131 // spindle: CI runner to send jobs to and receive results from
3232 Spindle *string `json:"spindle,omitempty" cborgen:"spindle,omitempty"`
3333+ // topics: Topics related to the repo
3434+ Topics []string `json:"topics,omitempty" cborgen:"topics,omitempty"`
3535+ // website: Any URI related to the repo
3636+ Website *string `json:"website,omitempty" cborgen:"website,omitempty"`
3337}
+6-45
appview/commitverify/verify.go
···33import (
44 "log"
5566- "github.com/go-git/go-git/v5/plumbing/object"
76 "tangled.org/core/appview/db"
87 "tangled.org/core/appview/models"
98 "tangled.org/core/crypto"
···3534 return ""
3635}
37363838-func GetVerifiedObjectCommits(e db.Execer, emailToDid map[string]string, commits []*object.Commit) (VerifiedCommits, error) {
3939- ndCommits := []types.NiceDiff{}
4040- for _, commit := range commits {
4141- ndCommits = append(ndCommits, ObjectCommitToNiceDiff(commit))
4242- }
4343- return GetVerifiedCommits(e, emailToDid, ndCommits)
4444-}
4545-4646-func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.NiceDiff) (VerifiedCommits, error) {
3737+func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.Commit) (VerifiedCommits, error) {
4738 vcs := VerifiedCommits{}
48394940 didPubkeyCache := make(map[string][]models.PublicKey)
50415142 for _, commit := range ndCommits {
5252- c := commit.Commit
5353-5454- committerEmail := c.Committer.Email
4343+ committerEmail := commit.Committer.Email
5544 if did, exists := emailToDid[committerEmail]; exists {
5645 // check if we've already fetched public keys for this did
5746 pubKeys, ok := didPubkeyCache[did]
···6756 }
68576958 // try to verify with any associated pubkeys
5959+ payload := commit.Payload()
6060+ signature := commit.PGPSignature
7061 for _, pk := range pubKeys {
7171- if _, ok := crypto.VerifyCommitSignature(pk.Key, commit); ok {
6262+ if _, ok := crypto.VerifySignature([]byte(pk.Key), []byte(signature), []byte(payload)); ok {
72637364 fp, err := crypto.SSHFingerprint(pk.Key)
7465 if err != nil {
7566 log.Println("error computing ssh fingerprint:", err)
7667 }
77687878- vc := verifiedCommit{fingerprint: fp, hash: c.This}
6969+ vc := verifiedCommit{fingerprint: fp, hash: commit.This}
7970 vcs[vc] = struct{}{}
8071 break
8172 }
···86778778 return vcs, nil
8879}
8989-9090-// ObjectCommitToNiceDiff is a compatibility function to convert a
9191-// commit object into a NiceDiff structure.
9292-func ObjectCommitToNiceDiff(c *object.Commit) types.NiceDiff {
9393- var niceDiff types.NiceDiff
9494-9595- // set commit information
9696- niceDiff.Commit.Message = c.Message
9797- niceDiff.Commit.Author = c.Author
9898- niceDiff.Commit.This = c.Hash.String()
9999- niceDiff.Commit.Committer = c.Committer
100100- niceDiff.Commit.Tree = c.TreeHash.String()
101101- niceDiff.Commit.PGPSignature = c.PGPSignature
102102-103103- changeId, ok := c.ExtraHeaders["change-id"]
104104- if ok {
105105- niceDiff.Commit.ChangedId = string(changeId)
106106- }
107107-108108- // set parent hash if available
109109- if len(c.ParentHashes) > 0 {
110110- niceDiff.Commit.Parent = c.ParentHashes[0].String()
111111- }
112112-113113- // XXX: Stats and Diff fields are typically populated
114114- // after fetching the actual diff information, which isn't
115115- // directly available in the commit object itself.
116116-117117- return niceDiff
118118-}
···11+// Copyright 2021 The Gitea Authors. All rights reserved.
22+// SPDX-License-Identifier: MIT
33+44+package bleveutil
55+66+import (
77+ "github.com/blevesearch/bleve/v2"
88+)
99+1010+// FlushingBatch is a batch of operations that automatically flushes to the
1111+// underlying index once it reaches a certain size.
1212+type FlushingBatch struct {
1313+ maxBatchSize int
1414+ batch *bleve.Batch
1515+ index bleve.Index
1616+}
1717+1818+// NewFlushingBatch creates a new flushing batch for the specified index. Once
1919+// the number of operations in the batch reaches the specified limit, the batch
2020+// automatically flushes its operations to the index.
2121+func NewFlushingBatch(index bleve.Index, maxBatchSize int) *FlushingBatch {
2222+ return &FlushingBatch{
2323+ maxBatchSize: maxBatchSize,
2424+ batch: index.NewBatch(),
2525+ index: index,
2626+ }
2727+}
2828+2929+// Index add a new index to batch
3030+func (b *FlushingBatch) Index(id string, data any) error {
3131+ if err := b.batch.Index(id, data); err != nil {
3232+ return err
3333+ }
3434+ return b.flushIfFull()
3535+}
3636+3737+// Delete add a delete index to batch
3838+func (b *FlushingBatch) Delete(id string) error {
3939+ b.batch.Delete(id)
4040+ return b.flushIfFull()
4141+}
4242+4343+func (b *FlushingBatch) flushIfFull() error {
4444+ if b.batch.Size() < b.maxBatchSize {
4545+ return nil
4646+ }
4747+ return b.Flush()
4848+}
4949+5050+// Flush submit the batch and create a new one
5151+func (b *FlushingBatch) Flush() error {
5252+ err := b.index.Batch(b.batch)
5353+ if err != nil {
5454+ return err
5555+ }
5656+ b.batch = b.index.NewBatch()
5757+ return nil
5858+}
···77)
8899type Star struct {
1010- StarredByDid string
1111- RepoAt syntax.ATURI
1212- Created time.Time
1313- Rkey string
1010+ Did string
1111+ RepoAt syntax.ATURI
1212+ Created time.Time
1313+ Rkey string
1414+}
14151515- // optionally, populate this when querying for reverse mappings
1616+// RepoStar is used for reverse mapping to repos
1717+type RepoStar struct {
1818+ Star
1619 Repo *Repo
1720}
2121+2222+// StringStar is used for reverse mapping to strings
2323+type StringStar struct {
2424+ Star
2525+ String *String
2626+}
···44 <h1 class="font-bold text-4xl">tightly-knit<br>social coding.</h1>
5566 <p class="text-lg">
77- tangled is new social-enabled git collaboration platform built on <a class="underline" href="https://atproto.com/">atproto</a>.
77+ Tangled is a decentralized Git hosting and collaboration platform.
88 </p>
99 <p class="text-lg">
1010- we envision a place where developers have complete ownership of their
1010+ We envision a place where developers have complete ownership of their
1111 code, open source communities can freely self-govern and most
1212 importantly, coding can be social and fun again.
1313 </p>
···55 "crypto/sha256"
66 "encoding/base64"
77 "fmt"
88- "strings"
98109 "github.com/hiddeco/sshsig"
1110 "golang.org/x/crypto/ssh"
1212- "tangled.org/core/types"
1311)
14121513func VerifySignature(pubKey, signature, payload []byte) (error, bool) {
···2826 // multiple algorithms but sha-512 is most secure, and git's ssh signing defaults
2927 // to sha-512 for all key types anyway.
3028 err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git")
3131- return err, err == nil
3232-}
33293434-// VerifyCommitSignature reconstructs the payload used to sign a commit. This is
3535-// essentially the git cat-file output but without the gpgsig header.
3636-//
3737-// Caveats: signature verification will fail on commits with more than one parent,
3838-// i.e. merge commits, because types.NiceDiff doesn't carry more than one Parent field
3939-// and we are unable to reconstruct the payload correctly.
4040-//
4141-// Ideally this should directly operate on an *object.Commit.
4242-func VerifyCommitSignature(pubKey string, commit types.NiceDiff) (error, bool) {
4343- signature := commit.Commit.PGPSignature
4444-4545- author := bytes.NewBuffer([]byte{})
4646- committer := bytes.NewBuffer([]byte{})
4747- commit.Commit.Author.Encode(author)
4848- commit.Commit.Committer.Encode(committer)
4949-5050- payload := strings.Builder{}
5151-5252- fmt.Fprintf(&payload, "tree %s\n", commit.Commit.Tree)
5353- if commit.Commit.Parent != "" {
5454- fmt.Fprintf(&payload, "parent %s\n", commit.Commit.Parent)
5555- }
5656- fmt.Fprintf(&payload, "author %s\n", author.String())
5757- fmt.Fprintf(&payload, "committer %s\n", committer.String())
5858- if commit.Commit.ChangedId != "" {
5959- fmt.Fprintf(&payload, "change-id %s\n", commit.Commit.ChangedId)
6060- }
6161- fmt.Fprintf(&payload, "\n%s", commit.Commit.Message)
6262-6363- return VerifySignature([]byte(pubKey), []byte(signature), []byte(payload.String()))
3030+ return err, err == nil
6431}
65326633// SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
+19-9
docs/hacking.md
···37373838```
3939# oauth jwks should already be setup by the nix devshell:
4040-echo $TANGLED_OAUTH_JWKS
4141-{"crv":"P-256","d":"tELKHYH-Dko6qo4ozYcVPE1ah6LvXHFV2wpcWpi8ab4","kid":"1753352226","kty":"EC","x":"mRzYpLzAGq74kJez9UbgGfV040DxgsXpMbaVsdy8RZs","y":"azqqXzUYywMlLb2Uc5AVG18nuLXyPnXr4kI4T39eeIc"}
4040+echo $TANGLED_OAUTH_CLIENT_SECRET
4141+z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
4242+4343+echo $TANGLED_OAUTH_CLIENT_KID
4444+1761667908
42454346# if not, you can set it up yourself:
4444-go build -o genjwks.out ./cmd/genjwks
4545-export TANGLED_OAUTH_JWKS="$(./genjwks.out)"
4747+goat key generate -t P-256
4848+Key Type: P-256 / secp256r1 / ES256 private key
4949+Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
5050+ z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
5151+Public Key (DID Key Syntax): share or publish this (eg, in DID document)
5252+ did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
5353+5454+# the secret key from above
5555+export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
46564757# run redis in at a new shell to store oauth sessions
4858redis-server
···107117# type `poweroff` at the shell to exit the VM
108118```
109119110110-This starts a knot on port 6000, a spindle on port 6555
120120+This starts a knot on port 6444, a spindle on port 6555
111121with `ssh` exposed on port 2222.
112122113123Once the services are running, head to
114114-http://localhost:3000/knots and hit verify. It should
124124+http://localhost:3000/settings/knots and hit verify. It should
115125verify the ownership of the services instantly if everything
116126went smoothly.
117127···136146### running a spindle
137147138148The above VM should already be running a spindle on
139139-`localhost:6555`. Head to http://localhost:3000/spindles and
149149+`localhost:6555`. Head to http://localhost:3000/settings/spindles and
140150hit verify. You can then configure each repository to use
141151this spindle and run CI jobs.
142152···158168159169If for any reason you wish to disable either one of the
160170services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
161161-`services.tangled-spindle.enable` (or
162162-`services.tangled-knot.enable`) to `false`.
171171+`services.tangled.spindle.enable` (or
172172+`services.tangled.knot.enable`) to `false`.
+3-2
docs/knot-hosting.md
···3939```
40404141Next, move the `knot` binary to a location owned by `root` --
4242-`/usr/local/bin/knot` is a good choice:
4242+`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
43434444```
4545sudo mv knot /usr/local/bin/knot
4646+sudo chown root:root /usr/local/bin/knot
4647```
47484849This is necessary because SSH `AuthorizedKeysCommand` requires [really
···130131131132You should now have a running knot server! You can finalize
132133your registration by hitting the `verify` button on the
133133-[/knots](https://tangled.org/knots) page. This simply creates
134134+[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
134135a record on your PDS to announce the existence of the knot.
135136136137### custom paths
+4-4
docs/migrations.md
···1414For knots:
15151616- Upgrade to latest tag (v1.9.0 or above)
1717-- Head to the [knot dashboard](https://tangled.org/knots) and
1717+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1818 hit the "retry" button to verify your knot
19192020For spindles:
21212222- Upgrade to latest tag (v1.9.0 or above)
2323- Head to the [spindle
2424- dashboard](https://tangled.org/spindles) and hit the
2424+ dashboard](https://tangled.org/settings/spindles) and hit the
2525 "retry" button to verify your spindle
26262727## Upgrading from v1.7.x
···4141 [settings](https://tangled.org/settings) page.
4242- Restart your knot once you have replaced the environment
4343 variable
4444-- Head to the [knot dashboard](https://tangled.org/knots) and
4444+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
4545 hit the "retry" button to verify your knot. This simply
4646 writes a `sh.tangled.knot` record to your PDS.
4747···4949latest revision, and change your config block like so:
50505151```diff
5252- services.tangled-knot = {
5252+ services.tangled.knot = {
5353 enable = true;
5454 server = {
5555- secretFile = /path/to/secret;
+19-1
docs/spindle/pipeline.md
···1919 - `push`: The workflow should run every time a commit is pushed to the repository.
2020 - `pull_request`: The workflow should run every time a pull request is made or updated.
2121 - `manual`: The workflow can be triggered manually.
2222-- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
2222+- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
2323+- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
23242425For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
2526···2930 branch: ["main", "develop"]
3031 - event: ["pull_request"]
3132 branch: ["main"]
3333+```
3434+3535+You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
3636+3737+```yaml
3838+when:
3939+ - event: ["push"]
4040+ tag: ["v*"]
4141+```
4242+4343+You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
4444+4545+```yaml
4646+when:
4747+ - event: ["push"]
4848+ branch: ["main", "release-*"]
4949+ tag: ["v*", "stable"]
3250```
33513452## Engine
···11+package db
22+33+import (
44+ "context"
55+ "database/sql"
66+ "log/slog"
77+ "strings"
88+99+ _ "github.com/mattn/go-sqlite3"
1010+ "tangled.org/core/log"
1111+)
1212+1313+type DB struct {
1414+ db *sql.DB
1515+ logger *slog.Logger
1616+}
1717+1818+func Setup(ctx context.Context, dbPath string) (*DB, error) {
1919+ // https://github.com/mattn/go-sqlite3#connection-string
2020+ opts := []string{
2121+ "_foreign_keys=1",
2222+ "_journal_mode=WAL",
2323+ "_synchronous=NORMAL",
2424+ "_auto_vacuum=incremental",
2525+ }
2626+2727+ logger := log.FromContext(ctx)
2828+ logger = log.SubLogger(logger, "db")
2929+3030+ db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
3131+ if err != nil {
3232+ return nil, err
3333+ }
3434+3535+ conn, err := db.Conn(ctx)
3636+ if err != nil {
3737+ return nil, err
3838+ }
3939+ defer conn.Close()
4040+4141+ _, err = conn.ExecContext(ctx, `
4242+ create table if not exists known_dids (
4343+ did text primary key
4444+ );
4545+4646+ create table if not exists public_keys (
4747+ id integer primary key autoincrement,
4848+ did text not null,
4949+ key text not null,
5050+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
5151+ unique(did, key),
5252+ foreign key (did) references known_dids(did) on delete cascade
5353+ );
5454+5555+ create table if not exists _jetstream (
5656+ id integer primary key autoincrement,
5757+ last_time_us integer not null
5858+ );
5959+6060+ create table if not exists events (
6161+ rkey text not null,
6262+ nsid text not null,
6363+ event text not null, -- json
6464+ created integer not null default (strftime('%s', 'now')),
6565+ primary key (rkey, nsid)
6666+ );
6767+6868+ create table if not exists migrations (
6969+ id integer primary key autoincrement,
7070+ name text unique
7171+ );
7272+ `)
7373+ if err != nil {
7474+ return nil, err
7575+ }
7676+7777+ return &DB{
7878+ db: db,
7979+ logger: logger,
8080+ }, nil
8181+}
-64
knotserver/db/init.go
···11-package db
22-33-import (
44- "database/sql"
55- "strings"
66-77- _ "github.com/mattn/go-sqlite3"
88-)
99-1010-type DB struct {
1111- db *sql.DB
1212-}
1313-1414-func Setup(dbPath string) (*DB, error) {
1515- // https://github.com/mattn/go-sqlite3#connection-string
1616- opts := []string{
1717- "_foreign_keys=1",
1818- "_journal_mode=WAL",
1919- "_synchronous=NORMAL",
2020- "_auto_vacuum=incremental",
2121- }
2222-2323- db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
2424- if err != nil {
2525- return nil, err
2626- }
2727-2828- // NOTE: If any other migration is added here, you MUST
2929- // copy the pattern in appview: use a single sql.Conn
3030- // for every migration.
3131-3232- _, err = db.Exec(`
3333- create table if not exists known_dids (
3434- did text primary key
3535- );
3636-3737- create table if not exists public_keys (
3838- id integer primary key autoincrement,
3939- did text not null,
4040- key text not null,
4141- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
4242- unique(did, key),
4343- foreign key (did) references known_dids(did) on delete cascade
4444- );
4545-4646- create table if not exists _jetstream (
4747- id integer primary key autoincrement,
4848- last_time_us integer not null
4949- );
5050-5151- create table if not exists events (
5252- rkey text not null,
5353- nsid text not null,
5454- event text not null, -- json
5555- created integer not null default (strftime('%s', 'now')),
5656- primary key (rkey, nsid)
5757- );
5858- `)
5959- if err != nil {
6060- return nil, err
6161- }
6262-6363- return &DB{db: db}, nil
6464-}
+2-3
knotserver/events.go
···88 "time"
991010 "github.com/gorilla/websocket"
1111+ "tangled.org/core/log"
1112)
12131314var upgrader = websocket.Upgrader{
···1617}
17181819func (h *Knot) Events(w http.ResponseWriter, r *http.Request) {
1919- l := h.l.With("handler", "OpLog")
2020+ l := log.SubLogger(h.l, "eventstream")
2021 l.Debug("received new connection")
21222223 conn, err := upgrader.Upgrade(w, r, nil)
···7576 }
7677 case <-time.After(30 * time.Second):
7778 // send a keep-alive
7878- l.Debug("sent keepalive")
7979 if err = conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
8080 l.Error("failed to write control", "err", err)
8181 }
···8989 h.l.Error("failed to fetch events from db", "err", err, "cursor", cursor)
9090 return err
9191 }
9292- h.l.Debug("ops", "ops", events)
93929493 for _, event := range events {
9594 // first extract the inner json into a map
···11-#!/bin/bash
22-33-# Variables
44-BINARY_NAME="appview"
55-BINARY_PATH=".bin/app"
66-SERVER="95.111.206.63"
77-USER="appview"
88-99-# SCP the binary to root's home directory
1010-scp "$BINARY_PATH" root@$SERVER:/root/"$BINARY_NAME"
1111-1212-# SSH into the server and perform the necessary operations
1313-ssh root@$SERVER <<EOF
1414- set -e # Exit on error
1515-1616- # Move binary to /usr/local/bin and set executable permissions
1717- mv /root/$BINARY_NAME /usr/local/bin/$BINARY_NAME
1818- chmod +x /usr/local/bin/$BINARY_NAME
1919-2020- su appview
2121- cd ~
2222- ./reset.sh
2323-EOF
2424-2525-echo "Deployment complete."
2626-
-5
scripts/generate-jwks.sh
···11-#! /usr/bin/env bash
22-33-set -e
44-55-go run ./cmd/genjwks/
+31
sets/gen.go
···11+package sets
22+33+import (
44+ "math/rand"
55+ "reflect"
66+ "testing/quick"
77+)
88+99+func (_ Set[T]) Generate(rand *rand.Rand, size int) reflect.Value {
1010+ s := New[T]()
1111+1212+ var zero T
1313+ itemType := reflect.TypeOf(zero)
1414+1515+ for {
1616+ if s.Len() >= size {
1717+ break
1818+ }
1919+2020+ item, ok := quick.Value(itemType, rand)
2121+ if !ok {
2222+ continue
2323+ }
2424+2525+ if val, ok := item.Interface().(T); ok {
2626+ s.Insert(val)
2727+ }
2828+ }
2929+3030+ return reflect.ValueOf(s)
3131+}
+35
sets/readme.txt
···11+sets
22+----
33+set datastructure for go with generics and iterators. the
44+api is supposed to mimic rust's std::collections::HashSet api.
55+66+ s1 := sets.Collect(slices.Values([]int{1, 2, 3, 4}))
77+ s2 := sets.Collect(slices.Values([]int{1, 2, 3, 4, 5, 6}))
88+99+ union := sets.Collect(s1.Union(s2))
1010+ intersect := sets.Collect(s1.Intersection(s2))
1111+ diff := sets.Collect(s1.Difference(s2))
1212+ symdiff := sets.Collect(s1.SymmetricDifference(s2))
1313+1414+ s1.Len() // 4
1515+ s1.Contains(1) // true
1616+ s1.IsEmpty() // false
1717+ s1.IsSubset(s2) // true
1818+ s1.IsSuperset(s2) // false
1919+ s1.IsDisjoint(s2) // false
2020+2121+ if exists := s1.Insert(1); exists {
2222+ // already existed in set
2323+ }
2424+2525+ if existed := s1.Remove(1); existed {
2626+ // existed in set, now removed
2727+ }
2828+2929+3030+testing
3131+-------
3232+includes property-based tests using the wonderful
3333+testing/quick module!
3434+3535+ go test -v
+174
sets/set.go
···11+package sets
22+33+import (
44+ "iter"
55+ "maps"
66+)
77+88+type Set[T comparable] struct {
99+ data map[T]struct{}
1010+}
1111+1212+func New[T comparable]() Set[T] {
1313+ return Set[T]{
1414+ data: make(map[T]struct{}),
1515+ }
1616+}
1717+1818+func (s *Set[T]) Insert(item T) bool {
1919+ _, exists := s.data[item]
2020+ s.data[item] = struct{}{}
2121+ return !exists
2222+}
2323+2424+func Singleton[T comparable](item T) Set[T] {
2525+ n := New[T]()
2626+ _ = n.Insert(item)
2727+ return n
2828+}
2929+3030+func (s *Set[T]) Remove(item T) bool {
3131+ _, exists := s.data[item]
3232+ if exists {
3333+ delete(s.data, item)
3434+ }
3535+ return exists
3636+}
3737+3838+func (s Set[T]) Contains(item T) bool {
3939+ _, exists := s.data[item]
4040+ return exists
4141+}
4242+4343+func (s Set[T]) Len() int {
4444+ return len(s.data)
4545+}
4646+4747+func (s Set[T]) IsEmpty() bool {
4848+ return len(s.data) == 0
4949+}
5050+5151+func (s *Set[T]) Clear() {
5252+ s.data = make(map[T]struct{})
5353+}
5454+5555+func (s Set[T]) All() iter.Seq[T] {
5656+ return func(yield func(T) bool) {
5757+ for item := range s.data {
5858+ if !yield(item) {
5959+ return
6060+ }
6161+ }
6262+ }
6363+}
6464+6565+func (s Set[T]) Clone() Set[T] {
6666+ return Set[T]{
6767+ data: maps.Clone(s.data),
6868+ }
6969+}
7070+7171+func (s Set[T]) Union(other Set[T]) iter.Seq[T] {
7272+ if s.Len() >= other.Len() {
7373+ return chain(s.All(), other.Difference(s))
7474+ } else {
7575+ return chain(other.All(), s.Difference(other))
7676+ }
7777+}
7878+7979+func chain[T any](seqs ...iter.Seq[T]) iter.Seq[T] {
8080+ return func(yield func(T) bool) {
8181+ for _, seq := range seqs {
8282+ for item := range seq {
8383+ if !yield(item) {
8484+ return
8585+ }
8686+ }
8787+ }
8888+ }
8989+}
9090+9191+func (s Set[T]) Intersection(other Set[T]) iter.Seq[T] {
9292+ return func(yield func(T) bool) {
9393+ for item := range s.data {
9494+ if other.Contains(item) {
9595+ if !yield(item) {
9696+ return
9797+ }
9898+ }
9999+ }
100100+ }
101101+}
102102+103103+func (s Set[T]) Difference(other Set[T]) iter.Seq[T] {
104104+ return func(yield func(T) bool) {
105105+ for item := range s.data {
106106+ if !other.Contains(item) {
107107+ if !yield(item) {
108108+ return
109109+ }
110110+ }
111111+ }
112112+ }
113113+}
114114+115115+func (s Set[T]) SymmetricDifference(other Set[T]) iter.Seq[T] {
116116+ return func(yield func(T) bool) {
117117+ for item := range s.data {
118118+ if !other.Contains(item) {
119119+ if !yield(item) {
120120+ return
121121+ }
122122+ }
123123+ }
124124+ for item := range other.data {
125125+ if !s.Contains(item) {
126126+ if !yield(item) {
127127+ return
128128+ }
129129+ }
130130+ }
131131+ }
132132+}
133133+134134+func (s Set[T]) IsSubset(other Set[T]) bool {
135135+ for item := range s.data {
136136+ if !other.Contains(item) {
137137+ return false
138138+ }
139139+ }
140140+ return true
141141+}
142142+143143+func (s Set[T]) IsSuperset(other Set[T]) bool {
144144+ return other.IsSubset(s)
145145+}
146146+147147+func (s Set[T]) IsDisjoint(other Set[T]) bool {
148148+ for item := range s.data {
149149+ if other.Contains(item) {
150150+ return false
151151+ }
152152+ }
153153+ return true
154154+}
155155+156156+func (s Set[T]) Equal(other Set[T]) bool {
157157+ if s.Len() != other.Len() {
158158+ return false
159159+ }
160160+ for item := range s.data {
161161+ if !other.Contains(item) {
162162+ return false
163163+ }
164164+ }
165165+ return true
166166+}
167167+168168+func Collect[T comparable](seq iter.Seq[T]) Set[T] {
169169+ result := New[T]()
170170+ for item := range seq {
171171+ result.Insert(item)
172172+ }
173173+ return result
174174+}
+411
sets/set_test.go
···11+package sets
22+33+import (
44+ "slices"
55+ "testing"
66+ "testing/quick"
77+)
88+99+func TestNew(t *testing.T) {
1010+ s := New[int]()
1111+ if s.Len() != 0 {
1212+ t.Errorf("New set should be empty, got length %d", s.Len())
1313+ }
1414+ if !s.IsEmpty() {
1515+ t.Error("New set should be empty")
1616+ }
1717+}
1818+1919+func TestFromSlice(t *testing.T) {
2020+ s := Collect(slices.Values([]int{1, 2, 3, 2, 1}))
2121+ if s.Len() != 3 {
2222+ t.Errorf("Expected length 3, got %d", s.Len())
2323+ }
2424+ if !s.Contains(1) || !s.Contains(2) || !s.Contains(3) {
2525+ t.Error("Set should contain all unique elements from slice")
2626+ }
2727+}
2828+2929+func TestInsert(t *testing.T) {
3030+ s := New[string]()
3131+3232+ if !s.Insert("hello") {
3333+ t.Error("First insert should return true")
3434+ }
3535+ if s.Insert("hello") {
3636+ t.Error("Duplicate insert should return false")
3737+ }
3838+ if s.Len() != 1 {
3939+ t.Errorf("Expected length 1, got %d", s.Len())
4040+ }
4141+}
4242+4343+func TestRemove(t *testing.T) {
4444+ s := Collect(slices.Values([]int{1, 2, 3}))
4545+4646+ if !s.Remove(2) {
4747+ t.Error("Remove existing element should return true")
4848+ }
4949+ if s.Remove(2) {
5050+ t.Error("Remove non-existing element should return false")
5151+ }
5252+ if s.Contains(2) {
5353+ t.Error("Element should be removed")
5454+ }
5555+ if s.Len() != 2 {
5656+ t.Errorf("Expected length 2, got %d", s.Len())
5757+ }
5858+}
5959+6060+func TestContains(t *testing.T) {
6161+ s := Collect(slices.Values([]int{1, 2, 3}))
6262+6363+ if !s.Contains(1) {
6464+ t.Error("Should contain 1")
6565+ }
6666+ if s.Contains(4) {
6767+ t.Error("Should not contain 4")
6868+ }
6969+}
7070+7171+func TestClear(t *testing.T) {
7272+ s := Collect(slices.Values([]int{1, 2, 3}))
7373+ s.Clear()
7474+7575+ if !s.IsEmpty() {
7676+ t.Error("Set should be empty after clear")
7777+ }
7878+ if s.Len() != 0 {
7979+ t.Errorf("Expected length 0, got %d", s.Len())
8080+ }
8181+}
8282+8383+func TestIterator(t *testing.T) {
8484+ s := Collect(slices.Values([]int{1, 2, 3}))
8585+ var items []int
8686+8787+ for item := range s.All() {
8888+ items = append(items, item)
8989+ }
9090+9191+ slices.Sort(items)
9292+ expected := []int{1, 2, 3}
9393+ if !slices.Equal(items, expected) {
9494+ t.Errorf("Expected %v, got %v", expected, items)
9595+ }
9696+}
9797+9898+func TestClone(t *testing.T) {
9999+ s1 := Collect(slices.Values([]int{1, 2, 3}))
100100+ s2 := s1.Clone()
101101+102102+ if !s1.Equal(s2) {
103103+ t.Error("Cloned set should be equal to original")
104104+ }
105105+106106+ s2.Insert(4)
107107+ if s1.Contains(4) {
108108+ t.Error("Modifying clone should not affect original")
109109+ }
110110+}
111111+112112+func TestUnion(t *testing.T) {
113113+ s1 := Collect(slices.Values([]int{1, 2}))
114114+ s2 := Collect(slices.Values([]int{2, 3}))
115115+116116+ result := Collect(s1.Union(s2))
117117+ expected := Collect(slices.Values([]int{1, 2, 3}))
118118+119119+ if !result.Equal(expected) {
120120+ t.Errorf("Expected %v, got %v", expected, result)
121121+ }
122122+}
123123+124124+func TestIntersection(t *testing.T) {
125125+ s1 := Collect(slices.Values([]int{1, 2, 3}))
126126+ s2 := Collect(slices.Values([]int{2, 3, 4}))
127127+128128+ expected := Collect(slices.Values([]int{2, 3}))
129129+ result := Collect(s1.Intersection(s2))
130130+131131+ if !result.Equal(expected) {
132132+ t.Errorf("Expected %v, got %v", expected, result)
133133+ }
134134+}
135135+136136+func TestDifference(t *testing.T) {
137137+ s1 := Collect(slices.Values([]int{1, 2, 3}))
138138+ s2 := Collect(slices.Values([]int{2, 3, 4}))
139139+140140+ expected := Collect(slices.Values([]int{1}))
141141+ result := Collect(s1.Difference(s2))
142142+143143+ if !result.Equal(expected) {
144144+ t.Errorf("Expected %v, got %v", expected, result)
145145+ }
146146+}
147147+148148+func TestSymmetricDifference(t *testing.T) {
149149+ s1 := Collect(slices.Values([]int{1, 2, 3}))
150150+ s2 := Collect(slices.Values([]int{2, 3, 4}))
151151+152152+ expected := Collect(slices.Values([]int{1, 4}))
153153+ result := Collect(s1.SymmetricDifference(s2))
154154+155155+ if !result.Equal(expected) {
156156+ t.Errorf("Expected %v, got %v", expected, result)
157157+ }
158158+}
159159+160160+func TestSymmetricDifferenceCommutativeProperty(t *testing.T) {
161161+ s1 := Collect(slices.Values([]int{1, 2, 3}))
162162+ s2 := Collect(slices.Values([]int{2, 3, 4}))
163163+164164+ result1 := Collect(s1.SymmetricDifference(s2))
165165+ result2 := Collect(s2.SymmetricDifference(s1))
166166+167167+ if !result1.Equal(result2) {
168168+ t.Errorf("Expected %v, got %v", result1, result2)
169169+ }
170170+}
171171+172172+func TestIsSubset(t *testing.T) {
173173+ s1 := Collect(slices.Values([]int{1, 2}))
174174+ s2 := Collect(slices.Values([]int{1, 2, 3}))
175175+176176+ if !s1.IsSubset(s2) {
177177+ t.Error("s1 should be subset of s2")
178178+ }
179179+ if s2.IsSubset(s1) {
180180+ t.Error("s2 should not be subset of s1")
181181+ }
182182+}
183183+184184+func TestIsSuperset(t *testing.T) {
185185+ s1 := Collect(slices.Values([]int{1, 2, 3}))
186186+ s2 := Collect(slices.Values([]int{1, 2}))
187187+188188+ if !s1.IsSuperset(s2) {
189189+ t.Error("s1 should be superset of s2")
190190+ }
191191+ if s2.IsSuperset(s1) {
192192+ t.Error("s2 should not be superset of s1")
193193+ }
194194+}
195195+196196+func TestIsDisjoint(t *testing.T) {
197197+ s1 := Collect(slices.Values([]int{1, 2}))
198198+ s2 := Collect(slices.Values([]int{3, 4}))
199199+ s3 := Collect(slices.Values([]int{2, 3}))
200200+201201+ if !s1.IsDisjoint(s2) {
202202+ t.Error("s1 and s2 should be disjoint")
203203+ }
204204+ if s1.IsDisjoint(s3) {
205205+ t.Error("s1 and s3 should not be disjoint")
206206+ }
207207+}
208208+209209+func TestEqual(t *testing.T) {
210210+ s1 := Collect(slices.Values([]int{1, 2, 3}))
211211+ s2 := Collect(slices.Values([]int{3, 2, 1}))
212212+ s3 := Collect(slices.Values([]int{1, 2}))
213213+214214+ if !s1.Equal(s2) {
215215+ t.Error("s1 and s2 should be equal")
216216+ }
217217+ if s1.Equal(s3) {
218218+ t.Error("s1 and s3 should not be equal")
219219+ }
220220+}
221221+222222+func TestCollect(t *testing.T) {
223223+ s1 := Collect(slices.Values([]int{1, 2}))
224224+ s2 := Collect(slices.Values([]int{2, 3}))
225225+226226+ unionSet := Collect(s1.Union(s2))
227227+ if unionSet.Len() != 3 {
228228+ t.Errorf("Expected union set length 3, got %d", unionSet.Len())
229229+ }
230230+ if !unionSet.Contains(1) || !unionSet.Contains(2) || !unionSet.Contains(3) {
231231+ t.Error("Union set should contain 1, 2, and 3")
232232+ }
233233+234234+ diffSet := Collect(s1.Difference(s2))
235235+ if diffSet.Len() != 1 {
236236+ t.Errorf("Expected difference set length 1, got %d", diffSet.Len())
237237+ }
238238+ if !diffSet.Contains(1) {
239239+ t.Error("Difference set should contain 1")
240240+ }
241241+}
242242+243243+func TestPropertySingleonLen(t *testing.T) {
244244+ f := func(item int) bool {
245245+ single := Singleton(item)
246246+ return single.Len() == 1
247247+ }
248248+249249+ if err := quick.Check(f, nil); err != nil {
250250+ t.Error(err)
251251+ }
252252+}
253253+254254+func TestPropertyInsertIdempotent(t *testing.T) {
255255+ f := func(s Set[int], item int) bool {
256256+ clone := s.Clone()
257257+258258+ clone.Insert(item)
259259+ firstLen := clone.Len()
260260+261261+ clone.Insert(item)
262262+ secondLen := clone.Len()
263263+264264+ return firstLen == secondLen
265265+ }
266266+267267+ if err := quick.Check(f, nil); err != nil {
268268+ t.Error(err)
269269+ }
270270+}
271271+272272+func TestPropertyUnionCommutative(t *testing.T) {
273273+ f := func(s1 Set[int], s2 Set[int]) bool {
274274+ union1 := Collect(s1.Union(s2))
275275+ union2 := Collect(s2.Union(s1))
276276+ return union1.Equal(union2)
277277+ }
278278+279279+ if err := quick.Check(f, nil); err != nil {
280280+ t.Error(err)
281281+ }
282282+}
283283+284284+func TestPropertyIntersectionCommutative(t *testing.T) {
285285+ f := func(s1 Set[int], s2 Set[int]) bool {
286286+ inter1 := Collect(s1.Intersection(s2))
287287+ inter2 := Collect(s2.Intersection(s1))
288288+ return inter1.Equal(inter2)
289289+ }
290290+291291+ if err := quick.Check(f, nil); err != nil {
292292+ t.Error(err)
293293+ }
294294+}
295295+296296+func TestPropertyCloneEquals(t *testing.T) {
297297+ f := func(s Set[int]) bool {
298298+ clone := s.Clone()
299299+ return s.Equal(clone)
300300+ }
301301+302302+ if err := quick.Check(f, nil); err != nil {
303303+ t.Error(err)
304304+ }
305305+}
306306+307307+func TestPropertyIntersectionIsSubset(t *testing.T) {
308308+ f := func(s1 Set[int], s2 Set[int]) bool {
309309+ inter := Collect(s1.Intersection(s2))
310310+ return inter.IsSubset(s1) && inter.IsSubset(s2)
311311+ }
312312+313313+ if err := quick.Check(f, nil); err != nil {
314314+ t.Error(err)
315315+ }
316316+}
317317+318318+func TestPropertyUnionIsSuperset(t *testing.T) {
319319+ f := func(s1 Set[int], s2 Set[int]) bool {
320320+ union := Collect(s1.Union(s2))
321321+ return union.IsSuperset(s1) && union.IsSuperset(s2)
322322+ }
323323+324324+ if err := quick.Check(f, nil); err != nil {
325325+ t.Error(err)
326326+ }
327327+}
328328+329329+func TestPropertyDifferenceDisjoint(t *testing.T) {
330330+ f := func(s1 Set[int], s2 Set[int]) bool {
331331+ diff := Collect(s1.Difference(s2))
332332+ return diff.IsDisjoint(s2)
333333+ }
334334+335335+ if err := quick.Check(f, nil); err != nil {
336336+ t.Error(err)
337337+ }
338338+}
339339+340340+func TestPropertySymmetricDifferenceCommutative(t *testing.T) {
341341+ f := func(s1 Set[int], s2 Set[int]) bool {
342342+ symDiff1 := Collect(s1.SymmetricDifference(s2))
343343+ symDiff2 := Collect(s2.SymmetricDifference(s1))
344344+ return symDiff1.Equal(symDiff2)
345345+ }
346346+347347+ if err := quick.Check(f, nil); err != nil {
348348+ t.Error(err)
349349+ }
350350+}
351351+352352+func TestPropertyRemoveWorks(t *testing.T) {
353353+ f := func(s Set[int], item int) bool {
354354+ clone := s.Clone()
355355+ clone.Insert(item)
356356+ clone.Remove(item)
357357+ return !clone.Contains(item)
358358+ }
359359+360360+ if err := quick.Check(f, nil); err != nil {
361361+ t.Error(err)
362362+ }
363363+}
364364+365365+func TestPropertyClearEmpty(t *testing.T) {
366366+ f := func(s Set[int]) bool {
367367+ s.Clear()
368368+ return s.IsEmpty() && s.Len() == 0
369369+ }
370370+371371+ if err := quick.Check(f, nil); err != nil {
372372+ t.Error(err)
373373+ }
374374+}
375375+376376+func TestPropertyIsSubsetReflexive(t *testing.T) {
377377+ f := func(s Set[int]) bool {
378378+ return s.IsSubset(s)
379379+ }
380380+381381+ if err := quick.Check(f, nil); err != nil {
382382+ t.Error(err)
383383+ }
384384+}
385385+386386+func TestPropertyDeMorganUnion(t *testing.T) {
387387+ f := func(s1 Set[int], s2 Set[int], universe Set[int]) bool {
388388+ // create a universe that contains both sets
389389+ u := universe.Clone()
390390+ for item := range s1.All() {
391391+ u.Insert(item)
392392+ }
393393+ for item := range s2.All() {
394394+ u.Insert(item)
395395+ }
396396+397397+ // (A u B)' = A' n B'
398398+ union := Collect(s1.Union(s2))
399399+ complementUnion := Collect(u.Difference(union))
400400+401401+ complementS1 := Collect(u.Difference(s1))
402402+ complementS2 := Collect(u.Difference(s2))
403403+ intersectionComplements := Collect(complementS1.Intersection(complementS2))
404404+405405+ return complementUnion.Equal(intersectionComplements)
406406+ }
407407+408408+ if err := quick.Check(f, nil); err != nil {
409409+ t.Error(err)
410410+ }
411411+}
···2222)
23232424type Workflow struct {
2525- Steps []Step
2626- Name string
2727- Data any
2525+ Steps []Step
2626+ Name string
2727+ Data any
2828+ Environment map[string]string
2829}
+77
spindle/models/pipeline_env.go
···11+package models
22+33+import (
44+ "strings"
55+66+ "github.com/go-git/go-git/v5/plumbing"
77+ "tangled.org/core/api/tangled"
88+ "tangled.org/core/workflow"
99+)
1010+1111+// PipelineEnvVars extracts environment variables from pipeline trigger metadata.
1212+// These are framework-provided variables that are injected into workflow steps.
1313+func PipelineEnvVars(tr *tangled.Pipeline_TriggerMetadata, pipelineId PipelineId, devMode bool) map[string]string {
1414+ if tr == nil {
1515+ return nil
1616+ }
1717+1818+ env := make(map[string]string)
1919+2020+ // Standard CI environment variable
2121+ env["CI"] = "true"
2222+2323+ env["TANGLED_PIPELINE_ID"] = pipelineId.Rkey
2424+2525+ // Repo info
2626+ if tr.Repo != nil {
2727+ env["TANGLED_REPO_KNOT"] = tr.Repo.Knot
2828+ env["TANGLED_REPO_DID"] = tr.Repo.Did
2929+ env["TANGLED_REPO_NAME"] = tr.Repo.Repo
3030+ env["TANGLED_REPO_DEFAULT_BRANCH"] = tr.Repo.DefaultBranch
3131+ env["TANGLED_REPO_URL"] = BuildRepoURL(tr.Repo, devMode)
3232+ }
3333+3434+ switch workflow.TriggerKind(tr.Kind) {
3535+ case workflow.TriggerKindPush:
3636+ if tr.Push != nil {
3737+ refName := plumbing.ReferenceName(tr.Push.Ref)
3838+ refType := "branch"
3939+ if refName.IsTag() {
4040+ refType = "tag"
4141+ }
4242+4343+ env["TANGLED_REF"] = tr.Push.Ref
4444+ env["TANGLED_REF_NAME"] = refName.Short()
4545+ env["TANGLED_REF_TYPE"] = refType
4646+ env["TANGLED_SHA"] = tr.Push.NewSha
4747+ env["TANGLED_COMMIT_SHA"] = tr.Push.NewSha
4848+ }
4949+5050+ case workflow.TriggerKindPullRequest:
5151+ if tr.PullRequest != nil {
5252+ // For PRs, the "ref" is the source branch
5353+ env["TANGLED_REF"] = "refs/heads/" + tr.PullRequest.SourceBranch
5454+ env["TANGLED_REF_NAME"] = tr.PullRequest.SourceBranch
5555+ env["TANGLED_REF_TYPE"] = "branch"
5656+ env["TANGLED_SHA"] = tr.PullRequest.SourceSha
5757+ env["TANGLED_COMMIT_SHA"] = tr.PullRequest.SourceSha
5858+5959+ // PR-specific variables
6060+ env["TANGLED_PR_SOURCE_BRANCH"] = tr.PullRequest.SourceBranch
6161+ env["TANGLED_PR_TARGET_BRANCH"] = tr.PullRequest.TargetBranch
6262+ env["TANGLED_PR_SOURCE_SHA"] = tr.PullRequest.SourceSha
6363+ env["TANGLED_PR_ACTION"] = tr.PullRequest.Action
6464+ }
6565+6666+ case workflow.TriggerKindManual:
6767+ // Manual triggers may not have ref/sha info
6868+ // Include any manual inputs if present
6969+ if tr.Manual != nil {
7070+ for _, pair := range tr.Manual.Inputs {
7171+ env["TANGLED_INPUT_"+strings.ToUpper(pair.Key)] = pair.Value
7272+ }
7373+ }
7474+ }
7575+7676+ return env
7777+}
···1313)
14141515type OpenBaoManager struct {
1616- client *vault.Client
1717- mountPath string
1818- logger *slog.Logger
1616+ client *vault.Client
1717+ mountPath string
1818+ logger *slog.Logger
1919+ connectionTimeout time.Duration
1920}
20212122type OpenBaoManagerOpt func(*OpenBaoManager)
···2627 }
2728}
28293030+func WithConnectionTimeout(timeout time.Duration) OpenBaoManagerOpt {
3131+ return func(v *OpenBaoManager) {
3232+ v.connectionTimeout = timeout
3333+ }
3434+}
3535+2936// NewOpenBaoManager creates a new OpenBao manager that connects to a Bao Proxy
3037// The proxyAddress should point to the local Bao Proxy (e.g., "http://127.0.0.1:8200")
3138// The proxy handles all authentication automatically via Auto-Auth
···4350 }
44514552 manager := &OpenBaoManager{
4646- client: client,
4747- mountPath: "spindle", // default KV v2 mount path
4848- logger: logger,
5353+ client: client,
5454+ mountPath: "spindle", // default KV v2 mount path
5555+ logger: logger,
5656+ connectionTimeout: 10 * time.Second, // default connection timeout
4957 }
50585159 for _, opt := range opts {
···62706371// testConnection verifies that we can connect to the proxy
6472func (v *OpenBaoManager) testConnection() error {
6565- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
7373+ ctx, cancel := context.WithTimeout(context.Background(), v.connectionTimeout)
6674 defer cancel()
67756876 // try token self-lookup as a quick way to verify proxy works
+5-2
spindle/secrets/openbao_test.go
···152152 for _, tt := range tests {
153153 t.Run(tt.name, func(t *testing.T) {
154154 logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
155155- manager, err := NewOpenBaoManager(tt.proxyAddr, logger, tt.opts...)
155155+ // Use shorter timeout for tests to avoid long waits
156156+ opts := append(tt.opts, WithConnectionTimeout(1*time.Second))
157157+ manager, err := NewOpenBaoManager(tt.proxyAddr, logger, opts...)
156158157159 if tt.expectError {
158160 assert.Error(t, err)
···596598597599 // All these will fail because no real proxy is running
598600 // but we can test that the configuration is properly accepted
599599- manager, err := NewOpenBaoManager(tt.proxyAddr, logger)
601601+ // Use shorter timeout for tests to avoid long waits
602602+ manager, err := NewOpenBaoManager(tt.proxyAddr, logger, WithConnectionTimeout(1*time.Second))
600603 assert.Error(t, err) // Expected because no real proxy
601604 assert.Nil(t, manager)
602605 assert.Contains(t, err.Error(), "failed to connect to bao proxy")
+103-47
spindle/server.go
···66 "encoding/json"
77 "fmt"
88 "log/slog"
99+ "maps"
910 "net/http"
10111112 "github.com/go-chi/chi/v5"
···4950 vault secrets.Manager
5051}
51525252-func Run(ctx context.Context) error {
5353+// New creates a new Spindle server with the provided configuration and engines.
5454+func New(ctx context.Context, cfg *config.Config, engines map[string]models.Engine) (*Spindle, error) {
5355 logger := log.FromContext(ctx)
54565555- cfg, err := config.Load(ctx)
5656- if err != nil {
5757- return fmt.Errorf("failed to load config: %w", err)
5858- }
5959-6057 d, err := db.Make(cfg.Server.DBPath)
6158 if err != nil {
6262- return fmt.Errorf("failed to setup db: %w", err)
5959+ return nil, fmt.Errorf("failed to setup db: %w", err)
6360 }
64616562 e, err := rbac.NewEnforcer(cfg.Server.DBPath)
6663 if err != nil {
6767- return fmt.Errorf("failed to setup rbac enforcer: %w", err)
6464+ return nil, fmt.Errorf("failed to setup rbac enforcer: %w", err)
6865 }
6966 e.E.EnableAutoSave(true)
7067···7471 switch cfg.Server.Secrets.Provider {
7572 case "openbao":
7673 if cfg.Server.Secrets.OpenBao.ProxyAddr == "" {
7777- return fmt.Errorf("openbao proxy address is required when using openbao secrets provider")
7474+ return nil, fmt.Errorf("openbao proxy address is required when using openbao secrets provider")
7875 }
7976 vault, err = secrets.NewOpenBaoManager(
8077 cfg.Server.Secrets.OpenBao.ProxyAddr,
···8279 secrets.WithMountPath(cfg.Server.Secrets.OpenBao.Mount),
8380 )
8481 if err != nil {
8585- return fmt.Errorf("failed to setup openbao secrets provider: %w", err)
8282+ return nil, fmt.Errorf("failed to setup openbao secrets provider: %w", err)
8683 }
8784 logger.Info("using openbao secrets provider", "proxy_address", cfg.Server.Secrets.OpenBao.ProxyAddr, "mount", cfg.Server.Secrets.OpenBao.Mount)
8885 case "sqlite", "":
8986 vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath, secrets.WithTableName("secrets"))
9087 if err != nil {
9191- return fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
8888+ return nil, fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
9289 }
9390 logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath)
9491 default:
9595- return fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
9696- }
9797-9898- nixeryEng, err := nixery.New(ctx, cfg)
9999- if err != nil {
100100- return err
9292+ return nil, fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
10193 }
1029410395 jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
···108100 tangled.RepoNSID,
109101 tangled.RepoCollaboratorNSID,
110102 }
111111- jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, logger, d, true, true)
103103+ jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true)
112104 if err != nil {
113113- return fmt.Errorf("failed to setup jetstream client: %w", err)
105105+ return nil, fmt.Errorf("failed to setup jetstream client: %w", err)
114106 }
115107 jc.AddDid(cfg.Server.Owner)
116108117109 // Check if the spindle knows about any Dids;
118110 dids, err := d.GetAllDids()
119111 if err != nil {
120120- return fmt.Errorf("failed to get all dids: %w", err)
112112+ return nil, fmt.Errorf("failed to get all dids: %w", err)
121113 }
122114 for _, d := range dids {
123115 jc.AddDid(d)
124116 }
125117126126- resolver := idresolver.DefaultResolver()
118118+ resolver := idresolver.DefaultResolver(cfg.Server.PlcUrl)
127119128128- spindle := Spindle{
120120+ spindle := &Spindle{
129121 jc: jc,
130122 e: e,
131123 db: d,
132124 l: logger,
133125 n: &n,
134134- engs: map[string]models.Engine{"nixery": nixeryEng},
126126+ engs: engines,
135127 jq: jq,
136128 cfg: cfg,
137129 res: resolver,
···140132141133 err = e.AddSpindle(rbacDomain)
142134 if err != nil {
143143- return fmt.Errorf("failed to set rbac domain: %w", err)
135135+ return nil, fmt.Errorf("failed to set rbac domain: %w", err)
144136 }
145137 err = spindle.configureOwner()
146138 if err != nil {
147147- return err
139139+ return nil, err
148140 }
149141 logger.Info("owner set", "did", cfg.Server.Owner)
150142151151- // starts a job queue runner in the background
152152- jq.Start()
153153- defer jq.Stop()
154154-155155- // Stop vault token renewal if it implements Stopper
156156- if stopper, ok := vault.(secrets.Stopper); ok {
157157- defer stopper.Stop()
158158- }
159159-160143 cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath)
161144 if err != nil {
162162- return fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
145145+ return nil, fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
163146 }
164147165148 err = jc.StartJetstream(ctx, spindle.ingest())
166149 if err != nil {
167167- return fmt.Errorf("failed to start jetstream consumer: %w", err)
150150+ return nil, fmt.Errorf("failed to start jetstream consumer: %w", err)
168151 }
169152170153 // for each incoming sh.tangled.pipeline, we execute
171154 // spindle.processPipeline, which in turn enqueues the pipeline
172155 // job in the above registered queue.
173156 ccfg := eventconsumer.NewConsumerConfig()
174174- ccfg.Logger = logger
157157+ ccfg.Logger = log.SubLogger(logger, "eventconsumer")
175158 ccfg.Dev = cfg.Server.Dev
176159 ccfg.ProcessFunc = spindle.processPipeline
177160 ccfg.CursorStore = cursorStore
178161 knownKnots, err := d.Knots()
179162 if err != nil {
180180- return err
163163+ return nil, err
181164 }
182165 for _, knot := range knownKnots {
183166 logger.Info("adding source start", "knot", knot)
···185168 }
186169 spindle.ks = eventconsumer.NewConsumer(*ccfg)
187170171171+ return spindle, nil
172172+}
173173+174174+// DB returns the database instance.
175175+func (s *Spindle) DB() *db.DB {
176176+ return s.db
177177+}
178178+179179+// Queue returns the job queue instance.
180180+func (s *Spindle) Queue() *queue.Queue {
181181+ return s.jq
182182+}
183183+184184+// Engines returns the map of available engines.
185185+func (s *Spindle) Engines() map[string]models.Engine {
186186+ return s.engs
187187+}
188188+189189+// Vault returns the secrets manager instance.
190190+func (s *Spindle) Vault() secrets.Manager {
191191+ return s.vault
192192+}
193193+194194+// Notifier returns the notifier instance.
195195+func (s *Spindle) Notifier() *notifier.Notifier {
196196+ return s.n
197197+}
198198+199199+// Enforcer returns the RBAC enforcer instance.
200200+func (s *Spindle) Enforcer() *rbac.Enforcer {
201201+ return s.e
202202+}
203203+204204+// Start starts the Spindle server (blocking).
205205+func (s *Spindle) Start(ctx context.Context) error {
206206+ // starts a job queue runner in the background
207207+ s.jq.Start()
208208+ defer s.jq.Stop()
209209+210210+ // Stop vault token renewal if it implements Stopper
211211+ if stopper, ok := s.vault.(secrets.Stopper); ok {
212212+ defer stopper.Stop()
213213+ }
214214+188215 go func() {
189189- logger.Info("starting knot event consumer")
190190- spindle.ks.Start(ctx)
216216+ s.l.Info("starting knot event consumer")
217217+ s.ks.Start(ctx)
191218 }()
192219193193- logger.Info("starting spindle server", "address", cfg.Server.ListenAddr)
194194- logger.Error("server error", "error", http.ListenAndServe(cfg.Server.ListenAddr, spindle.Router()))
220220+ s.l.Info("starting spindle server", "address", s.cfg.Server.ListenAddr)
221221+ return http.ListenAndServe(s.cfg.Server.ListenAddr, s.Router())
222222+}
223223+224224+func Run(ctx context.Context) error {
225225+ cfg, err := config.Load(ctx)
226226+ if err != nil {
227227+ return fmt.Errorf("failed to load config: %w", err)
228228+ }
229229+230230+ nixeryEng, err := nixery.New(ctx, cfg)
231231+ if err != nil {
232232+ return err
233233+ }
234234+235235+ s, err := New(ctx, cfg, map[string]models.Engine{
236236+ "nixery": nixeryEng,
237237+ })
238238+ if err != nil {
239239+ return err
240240+ }
195241196196- return nil
242242+ return s.Start(ctx)
197243}
198244199245func (s *Spindle) Router() http.Handler {
···210256}
211257212258func (s *Spindle) XrpcRouter() http.Handler {
213213- logger := s.l.With("route", "xrpc")
214214-215259 serviceAuth := serviceauth.NewServiceAuth(s.l, s.res, s.cfg.Server.Did().String())
216260261261+ l := log.SubLogger(s.l, "xrpc")
262262+217263 x := xrpc.Xrpc{
218218- Logger: logger,
264264+ Logger: l,
219265 Db: s.db,
220266 Enforcer: s.e,
221267 Engines: s.engs,
···265311 }
266312267313 workflows := make(map[models.Engine][]models.Workflow)
314314+315315+ // Build pipeline environment variables once for all workflows
316316+ pipelineEnv := models.PipelineEnvVars(tpl.TriggerMetadata, pipelineId, s.cfg.Server.Dev)
268317269318 for _, w := range tpl.Workflows {
270319 if w != nil {
···291340 return err
292341 }
293342343343+ // inject TANGLED_* env vars after InitWorkflow
344344+ // This prevents user-defined env vars from overriding them
345345+ if ewf.Environment == nil {
346346+ ewf.Environment = make(map[string]string)
347347+ }
348348+ maps.Copy(ewf.Environment, pipelineEnv)
349349+294350 workflows[eng] = append(workflows[eng], *ewf)
295351296352 err = s.db.StatusPending(models.WorkflowId{
···305361306362 ok := s.jq.Enqueue(queue.Job{
307363 Run: func() error {
308308- engine.StartWorkflows(s.l, s.vault, s.cfg, s.db, s.n, ctx, &models.Pipeline{
364364+ engine.StartWorkflows(log.SubLogger(s.l, "engine"), s.vault, s.cfg, s.db, s.n, ctx, &models.Pipeline{
309365 RepoOwner: tpl.TriggerMetadata.Repo.Did,
310366 RepoName: tpl.TriggerMetadata.Repo.Repo,
311367 Workflows: workflows,
+8-3
spindle/stream.go
···1010 "strconv"
1111 "time"
12121313+ "tangled.org/core/log"
1314 "tangled.org/core/spindle/models"
14151516 "github.com/go-chi/chi/v5"
···2324}
24252526func (s *Spindle) Events(w http.ResponseWriter, r *http.Request) {
2626- l := s.l.With("handler", "Events")
2727+ l := log.SubLogger(s.l, "eventstream")
2828+2729 l.Debug("received new connection")
28302931 conn, err := upgrader.Upgrade(w, r, nil)
···8284 }
8385 case <-time.After(30 * time.Second):
8486 // send a keep-alive
8585- l.Debug("sent keepalive")
8687 if err = conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
8788 l.Error("failed to write control", "err", err)
8889 }
···212213 if err := conn.WriteMessage(websocket.TextMessage, []byte(line.Text)); err != nil {
213214 return fmt.Errorf("failed to write to websocket: %w", err)
214215 }
216216+ case <-time.After(30 * time.Second):
217217+ // send a keep-alive
218218+ if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
219219+ return fmt.Errorf("failed to write control: %w", err)
220220+ }
215221 }
216222 }
217223}
···222228 s.l.Debug("err", "err", err)
223229 return err
224230 }
225225- s.l.Debug("ops", "ops", events)
226231227232 for _, event := range events {
228233 // first extract the inner json into a map
+199
types/commit.go
···11+package types
22+33+import (
44+ "bytes"
55+ "encoding/json"
66+ "fmt"
77+ "maps"
88+ "regexp"
99+ "strings"
1010+1111+ "github.com/go-git/go-git/v5/plumbing"
1212+ "github.com/go-git/go-git/v5/plumbing/object"
1313+)
1414+1515+type Commit struct {
1616+ // hash of the commit object.
1717+ Hash plumbing.Hash `json:"hash,omitempty"`
1818+1919+ // author is the original author of the commit.
2020+ Author object.Signature `json:"author"`
2121+2222+ // committer is the one performing the commit, might be different from author.
2323+ Committer object.Signature `json:"committer"`
2424+2525+ // message is the commit message, contains arbitrary text.
2626+ Message string `json:"message"`
2727+2828+ // treehash is the hash of the root tree of the commit.
2929+ Tree string `json:"tree"`
3030+3131+ // parents are the hashes of the parent commits of the commit.
3232+ ParentHashes []plumbing.Hash `json:"parent_hashes,omitempty"`
3333+3434+ // pgpsignature is the pgp signature of the commit.
3535+ PGPSignature string `json:"pgp_signature,omitempty"`
3636+3737+ // mergetag is the embedded tag object when a merge commit is created by
3838+ // merging a signed tag.
3939+ MergeTag string `json:"merge_tag,omitempty"`
4040+4141+ // changeid is a unique identifier for the change (e.g., gerrit change-id).
4242+ ChangeId string `json:"change_id,omitempty"`
4343+4444+ // extraheaders contains additional headers not captured by other fields.
4545+ ExtraHeaders map[string][]byte `json:"extra_headers,omitempty"`
4646+4747+ // deprecated: kept for backwards compatibility with old json format.
4848+ This string `json:"this,omitempty"`
4949+5050+ // deprecated: kept for backwards compatibility with old json format.
5151+ Parent string `json:"parent,omitempty"`
5252+}
5353+5454+// types.Commit is an unify two commit structs:
5555+// - git.object.Commit from
5656+// - types.NiceDiff.commit
5757+//
5858+// to do this in backwards compatible fashion, we define the base struct
5959+// to use the same fields as NiceDiff.Commit, and then we also unmarshal
6060+// the struct fields from go-git structs, this custom unmarshal makes sense
6161+// of both representations and unifies them to have maximal data in either
6262+// form.
6363+func (c *Commit) UnmarshalJSON(data []byte) error {
6464+ type Alias Commit
6565+6666+ aux := &struct {
6767+ *object.Commit
6868+ *Alias
6969+ }{
7070+ Alias: (*Alias)(c),
7171+ }
7272+7373+ if err := json.Unmarshal(data, aux); err != nil {
7474+ return err
7575+ }
7676+7777+ c.FromGoGitCommit(aux.Commit)
7878+7979+ return nil
8080+}
8181+8282+// fill in as much of Commit as possible from the given go-git commit
8383+func (c *Commit) FromGoGitCommit(gc *object.Commit) {
8484+ if gc == nil {
8585+ return
8686+ }
8787+8888+ if c.Hash.IsZero() {
8989+ c.Hash = gc.Hash
9090+ }
9191+ if c.This == "" {
9292+ c.This = gc.Hash.String()
9393+ }
9494+ if isEmptySignature(c.Author) {
9595+ c.Author = gc.Author
9696+ }
9797+ if isEmptySignature(c.Committer) {
9898+ c.Committer = gc.Committer
9999+ }
100100+ if c.Message == "" {
101101+ c.Message = gc.Message
102102+ }
103103+ if c.Tree == "" {
104104+ c.Tree = gc.TreeHash.String()
105105+ }
106106+ if c.PGPSignature == "" {
107107+ c.PGPSignature = gc.PGPSignature
108108+ }
109109+ if c.MergeTag == "" {
110110+ c.MergeTag = gc.MergeTag
111111+ }
112112+113113+ if len(c.ParentHashes) == 0 {
114114+ c.ParentHashes = gc.ParentHashes
115115+ }
116116+ if c.Parent == "" && len(gc.ParentHashes) > 0 {
117117+ c.Parent = gc.ParentHashes[0].String()
118118+ }
119119+120120+ if len(c.ExtraHeaders) == 0 {
121121+ c.ExtraHeaders = make(map[string][]byte)
122122+ maps.Copy(c.ExtraHeaders, gc.ExtraHeaders)
123123+ }
124124+125125+ if c.ChangeId == "" {
126126+ if v, ok := gc.ExtraHeaders["change-id"]; ok {
127127+ c.ChangeId = string(v)
128128+ }
129129+ }
130130+}
131131+132132+func isEmptySignature(s object.Signature) bool {
133133+ return s.Email == "" && s.Name == "" && s.When.IsZero()
134134+}
135135+136136+// produce a verifiable payload from this commit's metadata
137137+func (c *Commit) Payload() string {
138138+ author := bytes.NewBuffer([]byte{})
139139+ c.Author.Encode(author)
140140+141141+ committer := bytes.NewBuffer([]byte{})
142142+ c.Committer.Encode(committer)
143143+144144+ payload := strings.Builder{}
145145+146146+ fmt.Fprintf(&payload, "tree %s\n", c.Tree)
147147+148148+ if len(c.ParentHashes) > 0 {
149149+ for _, p := range c.ParentHashes {
150150+ fmt.Fprintf(&payload, "parent %s\n", p.String())
151151+ }
152152+ } else {
153153+ // present for backwards compatibility
154154+ fmt.Fprintf(&payload, "parent %s\n", c.Parent)
155155+ }
156156+157157+ fmt.Fprintf(&payload, "author %s\n", author.String())
158158+ fmt.Fprintf(&payload, "committer %s\n", committer.String())
159159+160160+ if c.ChangeId != "" {
161161+ fmt.Fprintf(&payload, "change-id %s\n", c.ChangeId)
162162+ } else if v, ok := c.ExtraHeaders["change-id"]; ok {
163163+ fmt.Fprintf(&payload, "change-id %s\n", string(v))
164164+ }
165165+166166+ fmt.Fprintf(&payload, "\n%s", c.Message)
167167+168168+ return payload.String()
169169+}
170170+171171+var (
172172+ coAuthorRegex = regexp.MustCompile(`(?im)^Co-authored-by:\s*(.+?)\s*<([^>]+)>`)
173173+)
174174+175175+func (commit Commit) CoAuthors() []object.Signature {
176176+ var coAuthors []object.Signature
177177+ seen := make(map[string]bool)
178178+ matches := coAuthorRegex.FindAllStringSubmatch(commit.Message, -1)
179179+180180+ for _, match := range matches {
181181+ if len(match) >= 3 {
182182+ name := strings.TrimSpace(match[1])
183183+ email := strings.TrimSpace(match[2])
184184+185185+ if seen[email] {
186186+ continue
187187+ }
188188+ seen[email] = true
189189+190190+ coAuthors = append(coAuthors, object.Signature{
191191+ Name: name,
192192+ Email: email,
193193+ When: commit.Committer.When,
194194+ })
195195+ }
196196+ }
197197+198198+ return coAuthors
199199+}