···47474848// RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
4949type RepoTree_TreeEntry struct {
5050- // is_file: Whether this entry is a file
5151- Is_file bool `json:"is_file" cborgen:"is_file"`
5252- // is_subtree: Whether this entry is a directory/subtree
5353- Is_subtree bool `json:"is_subtree" cborgen:"is_subtree"`
5450 Last_commit *RepoTree_LastCommit `json:"last_commit,omitempty" cborgen:"last_commit,omitempty"`
5551 // mode: File mode
5652 Mode string `json:"mode" cborgen:"mode"`
+4
api/tangled/tangledrepo.go
···3030 Source *string `json:"source,omitempty" cborgen:"source,omitempty"`
3131 // spindle: CI runner to send jobs to and receive results from
3232 Spindle *string `json:"spindle,omitempty" cborgen:"spindle,omitempty"`
3333+ // topics: Topics related to the repo
3434+ Topics []string `json:"topics,omitempty" cborgen:"topics,omitempty"`
3535+ // website: Any URI related to the repo
3636+ Website *string `json:"website,omitempty" cborgen:"website,omitempty"`
3337}
+6-45
appview/commitverify/verify.go
···33import (
44 "log"
5566- "github.com/go-git/go-git/v5/plumbing/object"
76 "tangled.org/core/appview/db"
87 "tangled.org/core/appview/models"
98 "tangled.org/core/crypto"
···3534 return ""
3635}
37363838-func GetVerifiedObjectCommits(e db.Execer, emailToDid map[string]string, commits []*object.Commit) (VerifiedCommits, error) {
3939- ndCommits := []types.NiceDiff{}
4040- for _, commit := range commits {
4141- ndCommits = append(ndCommits, ObjectCommitToNiceDiff(commit))
4242- }
4343- return GetVerifiedCommits(e, emailToDid, ndCommits)
4444-}
4545-4646-func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.NiceDiff) (VerifiedCommits, error) {
3737+func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.Commit) (VerifiedCommits, error) {
4738 vcs := VerifiedCommits{}
48394940 didPubkeyCache := make(map[string][]models.PublicKey)
50415142 for _, commit := range ndCommits {
5252- c := commit.Commit
5353-5454- committerEmail := c.Committer.Email
4343+ committerEmail := commit.Committer.Email
5544 if did, exists := emailToDid[committerEmail]; exists {
5645 // check if we've already fetched public keys for this did
5746 pubKeys, ok := didPubkeyCache[did]
···6756 }
68576958 // try to verify with any associated pubkeys
5959+ payload := commit.Payload()
6060+ signature := commit.PGPSignature
7061 for _, pk := range pubKeys {
7171- if _, ok := crypto.VerifyCommitSignature(pk.Key, commit); ok {
6262+ if _, ok := crypto.VerifySignature([]byte(pk.Key), []byte(signature), []byte(payload)); ok {
72637364 fp, err := crypto.SSHFingerprint(pk.Key)
7465 if err != nil {
7566 log.Println("error computing ssh fingerprint:", err)
7667 }
77687878- vc := verifiedCommit{fingerprint: fp, hash: c.This}
6969+ vc := verifiedCommit{fingerprint: fp, hash: commit.This}
7970 vcs[vc] = struct{}{}
8071 break
8172 }
···86778778 return vcs, nil
8879}
8989-9090-// ObjectCommitToNiceDiff is a compatibility function to convert a
9191-// commit object into a NiceDiff structure.
9292-func ObjectCommitToNiceDiff(c *object.Commit) types.NiceDiff {
9393- var niceDiff types.NiceDiff
9494-9595- // set commit information
9696- niceDiff.Commit.Message = c.Message
9797- niceDiff.Commit.Author = c.Author
9898- niceDiff.Commit.This = c.Hash.String()
9999- niceDiff.Commit.Committer = c.Committer
100100- niceDiff.Commit.Tree = c.TreeHash.String()
101101- niceDiff.Commit.PGPSignature = c.PGPSignature
102102-103103- changeId, ok := c.ExtraHeaders["change-id"]
104104- if ok {
105105- niceDiff.Commit.ChangedId = string(changeId)
106106- }
107107-108108- // set parent hash if available
109109- if len(c.ParentHashes) > 0 {
110110- niceDiff.Commit.Parent = c.ParentHashes[0].String()
111111- }
112112-113113- // XXX: Stats and Diff fields are typically populated
114114- // after fetching the actual diff information, which isn't
115115- // directly available in the commit object itself.
116116-117117- return niceDiff
118118-}
···2233import (
44 "fmt"
55+ "strings"
56 "time"
6778 "github.com/bluesky-social/indigo/atproto/syntax"
···1718 Rkey string
1819 Created time.Time
1920 Description string
2121+ Website string
2222+ Topics []string
2023 Spindle string
2124 Labels []string
2225···2831}
29323033func (r *Repo) AsRecord() tangled.Repo {
3131- var source, spindle, description *string
3434+ var source, spindle, description, website *string
32353336 if r.Source != "" {
3437 source = &r.Source
···4245 description = &r.Description
4346 }
44474848+ if r.Website != "" {
4949+ website = &r.Website
5050+ }
5151+4552 return tangled.Repo{
4653 Knot: r.Knot,
4754 Name: r.Name,
4855 Description: description,
5656+ Website: website,
5757+ Topics: r.Topics,
4958 CreatedAt: r.Created.Format(time.RFC3339),
5059 Source: source,
5160 Spindle: spindle,
···6069func (r Repo) DidSlashRepo() string {
6170 p, _ := securejoin.SecureJoin(r.Did, r.Name)
6271 return p
7272+}
7373+7474+func (r Repo) TopicStr() string {
7575+ return strings.Join(r.Topics, " ")
6376}
64776578type RepoStats struct {
···91104 Repo *Repo
92105 Issues []Issue
93106}
107107+108108+type BlobContentType int
109109+110110+const (
111111+ BlobContentTypeCode BlobContentType = iota
112112+ BlobContentTypeMarkup
113113+ BlobContentTypeImage
114114+ BlobContentTypeSvg
115115+ BlobContentTypeVideo
116116+ BlobContentTypeSubmodule
117117+)
118118+119119+func (ty BlobContentType) IsCode() bool { return ty == BlobContentTypeCode }
120120+func (ty BlobContentType) IsMarkup() bool { return ty == BlobContentTypeMarkup }
121121+func (ty BlobContentType) IsImage() bool { return ty == BlobContentTypeImage }
122122+func (ty BlobContentType) IsSvg() bool { return ty == BlobContentTypeSvg }
123123+func (ty BlobContentType) IsVideo() bool { return ty == BlobContentTypeVideo }
124124+func (ty BlobContentType) IsSubmodule() bool { return ty == BlobContentTypeSubmodule }
125125+126126+type BlobView struct {
127127+ HasTextView bool // can show as code/text
128128+ HasRenderedView bool // can show rendered (markup/image/video/submodule)
129129+ HasRawView bool // can download raw (everything except submodule)
130130+131131+ // current display mode
132132+ ShowingRendered bool // currently in rendered mode
133133+ ShowingText bool // currently in text/code mode
134134+135135+ // content type flags
136136+ ContentType BlobContentType
137137+138138+ // Content data
139139+ Contents string
140140+ ContentSrc string // URL for media files
141141+ Lines int
142142+ SizeHint uint64
143143+}
144144+145145+// if both views are available, then show a toggle between them
146146+func (b BlobView) ShowToggle() bool {
147147+ return b.HasTextView && b.HasRenderedView
148148+}
149149+150150+func (b BlobView) IsUnsupported() bool {
151151+ // no view available, only raw
152152+ return !(b.HasRenderedView || b.HasTextView)
153153+}
+14-5
appview/models/star.go
···77)
8899type Star struct {
1010- StarredByDid string
1111- RepoAt syntax.ATURI
1212- Created time.Time
1313- Rkey string
1010+ Did string
1111+ RepoAt syntax.ATURI
1212+ Created time.Time
1313+ Rkey string
1414+}
14151515- // optionally, populate this when querying for reverse mappings
1616+// RepoStar is used for reverse mapping to repos
1717+type RepoStar struct {
1818+ Star
1619 Repo *Repo
1720}
2121+2222+// StringStar is used for reverse mapping to strings
2323+type StringStar struct {
2424+ Star
2525+ String *String
2626+}
···5454 reopened a pull request
5555 {{ else if eq .Type "followed" }}
5656 followed you
5757+ {{ else if eq .Type "user_mentioned" }}
5858+ mentioned you
5759 {{ else }}
5860 {{ end }}
5961{{ end }}
···44 <h1 class="font-bold text-4xl">tightly-knit<br>social coding.</h1>
5566 <p class="text-lg">
77- tangled is new social-enabled git collaboration platform built on <a class="underline" href="https://atproto.com/">atproto</a>.
77+ Tangled is a decentralized Git hosting and collaboration platform.
88 </p>
99 <p class="text-lg">
1010- we envision a place where developers have complete ownership of their
1010+ We envision a place where developers have complete ownership of their
1111 code, open source communities can freely self-govern and most
1212 importantly, coding can be social and fun again.
1313 </p>
···55 "crypto/sha256"
66 "encoding/base64"
77 "fmt"
88- "strings"
98109 "github.com/hiddeco/sshsig"
1110 "golang.org/x/crypto/ssh"
1212- "tangled.org/core/types"
1311)
14121513func VerifySignature(pubKey, signature, payload []byte) (error, bool) {
···2826 // multiple algorithms but sha-512 is most secure, and git's ssh signing defaults
2927 // to sha-512 for all key types anyway.
3028 err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git")
3131- return err, err == nil
3232-}
33293434-// VerifyCommitSignature reconstructs the payload used to sign a commit. This is
3535-// essentially the git cat-file output but without the gpgsig header.
3636-//
3737-// Caveats: signature verification will fail on commits with more than one parent,
3838-// i.e. merge commits, because types.NiceDiff doesn't carry more than one Parent field
3939-// and we are unable to reconstruct the payload correctly.
4040-//
4141-// Ideally this should directly operate on an *object.Commit.
4242-func VerifyCommitSignature(pubKey string, commit types.NiceDiff) (error, bool) {
4343- signature := commit.Commit.PGPSignature
4444-4545- author := bytes.NewBuffer([]byte{})
4646- committer := bytes.NewBuffer([]byte{})
4747- commit.Commit.Author.Encode(author)
4848- commit.Commit.Committer.Encode(committer)
4949-5050- payload := strings.Builder{}
5151-5252- fmt.Fprintf(&payload, "tree %s\n", commit.Commit.Tree)
5353- if commit.Commit.Parent != "" {
5454- fmt.Fprintf(&payload, "parent %s\n", commit.Commit.Parent)
5555- }
5656- fmt.Fprintf(&payload, "author %s\n", author.String())
5757- fmt.Fprintf(&payload, "committer %s\n", committer.String())
5858- if commit.Commit.ChangedId != "" {
5959- fmt.Fprintf(&payload, "change-id %s\n", commit.Commit.ChangedId)
6060- }
6161- fmt.Fprintf(&payload, "\n%s", commit.Commit.Message)
6262-6363- return VerifySignature([]byte(pubKey), []byte(signature), []byte(payload.String()))
3030+ return err, err == nil
6431}
65326633// SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
+6-6
docs/hacking.md
···5252 did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
53535454# the secret key from above
5555-export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
5555+export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
56565757# run redis in at a new shell to store oauth sessions
5858redis-server
···117117# type `poweroff` at the shell to exit the VM
118118```
119119120120-This starts a knot on port 6000, a spindle on port 6555
120120+This starts a knot on port 6444, a spindle on port 6555
121121with `ssh` exposed on port 2222.
122122123123Once the services are running, head to
124124-http://localhost:3000/knots and hit verify. It should
124124+http://localhost:3000/settings/knots and hit verify. It should
125125verify the ownership of the services instantly if everything
126126went smoothly.
127127···146146### running a spindle
147147148148The above VM should already be running a spindle on
149149-`localhost:6555`. Head to http://localhost:3000/spindles and
149149+`localhost:6555`. Head to http://localhost:3000/settings/spindles and
150150hit verify. You can then configure each repository to use
151151this spindle and run CI jobs.
152152···168168169169If for any reason you wish to disable either one of the
170170services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171171-`services.tangled-spindle.enable` (or
172172-`services.tangled-knot.enable`) to `false`.
171171+`services.tangled.spindle.enable` (or
172172+`services.tangled.knot.enable`) to `false`.
+1-1
docs/knot-hosting.md
···131131132132You should now have a running knot server! You can finalize
133133your registration by hitting the `verify` button on the
134134-[/knots](https://tangled.org/knots) page. This simply creates
134134+[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
135135a record on your PDS to announce the existence of the knot.
136136137137### custom paths
+4-4
docs/migrations.md
···1414For knots:
15151616- Upgrade to latest tag (v1.9.0 or above)
1717-- Head to the [knot dashboard](https://tangled.org/knots) and
1717+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1818 hit the "retry" button to verify your knot
19192020For spindles:
21212222- Upgrade to latest tag (v1.9.0 or above)
2323- Head to the [spindle
2424- dashboard](https://tangled.org/spindles) and hit the
2424+ dashboard](https://tangled.org/settings/spindles) and hit the
2525 "retry" button to verify your spindle
26262727## Upgrading from v1.7.x
···4141 [settings](https://tangled.org/settings) page.
4242- Restart your knot once you have replaced the environment
4343 variable
4444-- Head to the [knot dashboard](https://tangled.org/knots) and
4444+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
4545 hit the "retry" button to verify your knot. This simply
4646 writes a `sh.tangled.knot` record to your PDS.
4747···4949latest revision, and change your config block like so:
50505151```diff
5252- services.tangled-knot = {
5252+ services.tangled.knot = {
5353 enable = true;
5454 server = {
5555- secretFile = /path/to/secret;
+19-1
docs/spindle/pipeline.md
···1919 - `push`: The workflow should run every time a commit is pushed to the repository.
2020 - `pull_request`: The workflow should run every time a pull request is made or updated.
2121 - `manual`: The workflow can be triggered manually.
2222-- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
2222+- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
2323+- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
23242425For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
2526···2930 branch: ["main", "develop"]
3031 - event: ["pull_request"]
3132 branch: ["main"]
3333+```
3434+3535+You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
3636+3737+```yaml
3838+when:
3939+ - event: ["push"]
4040+ tag: ["v*"]
4141+```
4242+4343+You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
4444+4545+```yaml
4646+when:
4747+ - event: ["push"]
4848+ branch: ["main", "release-*"]
4949+ tag: ["v*", "stable"]
3250```
33513452## Engine
···11+package db
22+33+import (
44+ "context"
55+ "database/sql"
66+ "log/slog"
77+ "strings"
88+99+ _ "github.com/mattn/go-sqlite3"
1010+ "tangled.org/core/log"
1111+)
1212+1313+type DB struct {
1414+ db *sql.DB
1515+ logger *slog.Logger
1616+}
1717+1818+func Setup(ctx context.Context, dbPath string) (*DB, error) {
1919+ // https://github.com/mattn/go-sqlite3#connection-string
2020+ opts := []string{
2121+ "_foreign_keys=1",
2222+ "_journal_mode=WAL",
2323+ "_synchronous=NORMAL",
2424+ "_auto_vacuum=incremental",
2525+ }
2626+2727+ logger := log.FromContext(ctx)
2828+ logger = log.SubLogger(logger, "db")
2929+3030+ db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
3131+ if err != nil {
3232+ return nil, err
3333+ }
3434+3535+ conn, err := db.Conn(ctx)
3636+ if err != nil {
3737+ return nil, err
3838+ }
3939+ defer conn.Close()
4040+4141+ _, err = conn.ExecContext(ctx, `
4242+ create table if not exists known_dids (
4343+ did text primary key
4444+ );
4545+4646+ create table if not exists public_keys (
4747+ id integer primary key autoincrement,
4848+ did text not null,
4949+ key text not null,
5050+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
5151+ unique(did, key),
5252+ foreign key (did) references known_dids(did) on delete cascade
5353+ );
5454+5555+ create table if not exists _jetstream (
5656+ id integer primary key autoincrement,
5757+ last_time_us integer not null
5858+ );
5959+6060+ create table if not exists events (
6161+ rkey text not null,
6262+ nsid text not null,
6363+ event text not null, -- json
6464+ created integer not null default (strftime('%s', 'now')),
6565+ primary key (rkey, nsid)
6666+ );
6767+6868+ create table if not exists migrations (
6969+ id integer primary key autoincrement,
7070+ name text unique
7171+ );
7272+ `)
7373+ if err != nil {
7474+ return nil, err
7575+ }
7676+7777+ return &DB{
7878+ db: db,
7979+ logger: logger,
8080+ }, nil
8181+}
-64
knotserver/db/init.go
···11-package db
22-33-import (
44- "database/sql"
55- "strings"
66-77- _ "github.com/mattn/go-sqlite3"
88-)
99-1010-type DB struct {
1111- db *sql.DB
1212-}
1313-1414-func Setup(dbPath string) (*DB, error) {
1515- // https://github.com/mattn/go-sqlite3#connection-string
1616- opts := []string{
1717- "_foreign_keys=1",
1818- "_journal_mode=WAL",
1919- "_synchronous=NORMAL",
2020- "_auto_vacuum=incremental",
2121- }
2222-2323- db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
2424- if err != nil {
2525- return nil, err
2626- }
2727-2828- // NOTE: If any other migration is added here, you MUST
2929- // copy the pattern in appview: use a single sql.Conn
3030- // for every migration.
3131-3232- _, err = db.Exec(`
3333- create table if not exists known_dids (
3434- did text primary key
3535- );
3636-3737- create table if not exists public_keys (
3838- id integer primary key autoincrement,
3939- did text not null,
4040- key text not null,
4141- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
4242- unique(did, key),
4343- foreign key (did) references known_dids(did) on delete cascade
4444- );
4545-4646- create table if not exists _jetstream (
4747- id integer primary key autoincrement,
4848- last_time_us integer not null
4949- );
5050-5151- create table if not exists events (
5252- rkey text not null,
5353- nsid text not null,
5454- event text not null, -- json
5555- created integer not null default (strftime('%s', 'now')),
5656- primary key (rkey, nsid)
5757- );
5858- `)
5959- if err != nil {
6060- return nil, err
6161- }
6262-6363- return &DB{db: db}, nil
6464-}
···2222)
23232424type Workflow struct {
2525- Steps []Step
2626- Name string
2727- Data any
2525+ Steps []Step
2626+ Name string
2727+ Data any
2828+ Environment map[string]string
2829}
+77
spindle/models/pipeline_env.go
···11+package models
22+33+import (
44+ "strings"
55+66+ "github.com/go-git/go-git/v5/plumbing"
77+ "tangled.org/core/api/tangled"
88+ "tangled.org/core/workflow"
99+)
1010+1111+// PipelineEnvVars extracts environment variables from pipeline trigger metadata.
1212+// These are framework-provided variables that are injected into workflow steps.
1313+func PipelineEnvVars(tr *tangled.Pipeline_TriggerMetadata, pipelineId PipelineId, devMode bool) map[string]string {
1414+ if tr == nil {
1515+ return nil
1616+ }
1717+1818+ env := make(map[string]string)
1919+2020+ // Standard CI environment variable
2121+ env["CI"] = "true"
2222+2323+ env["TANGLED_PIPELINE_ID"] = pipelineId.Rkey
2424+2525+ // Repo info
2626+ if tr.Repo != nil {
2727+ env["TANGLED_REPO_KNOT"] = tr.Repo.Knot
2828+ env["TANGLED_REPO_DID"] = tr.Repo.Did
2929+ env["TANGLED_REPO_NAME"] = tr.Repo.Repo
3030+ env["TANGLED_REPO_DEFAULT_BRANCH"] = tr.Repo.DefaultBranch
3131+ env["TANGLED_REPO_URL"] = BuildRepoURL(tr.Repo, devMode)
3232+ }
3333+3434+ switch workflow.TriggerKind(tr.Kind) {
3535+ case workflow.TriggerKindPush:
3636+ if tr.Push != nil {
3737+ refName := plumbing.ReferenceName(tr.Push.Ref)
3838+ refType := "branch"
3939+ if refName.IsTag() {
4040+ refType = "tag"
4141+ }
4242+4343+ env["TANGLED_REF"] = tr.Push.Ref
4444+ env["TANGLED_REF_NAME"] = refName.Short()
4545+ env["TANGLED_REF_TYPE"] = refType
4646+ env["TANGLED_SHA"] = tr.Push.NewSha
4747+ env["TANGLED_COMMIT_SHA"] = tr.Push.NewSha
4848+ }
4949+5050+ case workflow.TriggerKindPullRequest:
5151+ if tr.PullRequest != nil {
5252+ // For PRs, the "ref" is the source branch
5353+ env["TANGLED_REF"] = "refs/heads/" + tr.PullRequest.SourceBranch
5454+ env["TANGLED_REF_NAME"] = tr.PullRequest.SourceBranch
5555+ env["TANGLED_REF_TYPE"] = "branch"
5656+ env["TANGLED_SHA"] = tr.PullRequest.SourceSha
5757+ env["TANGLED_COMMIT_SHA"] = tr.PullRequest.SourceSha
5858+5959+ // PR-specific variables
6060+ env["TANGLED_PR_SOURCE_BRANCH"] = tr.PullRequest.SourceBranch
6161+ env["TANGLED_PR_TARGET_BRANCH"] = tr.PullRequest.TargetBranch
6262+ env["TANGLED_PR_SOURCE_SHA"] = tr.PullRequest.SourceSha
6363+ env["TANGLED_PR_ACTION"] = tr.PullRequest.Action
6464+ }
6565+6666+ case workflow.TriggerKindManual:
6767+ // Manual triggers may not have ref/sha info
6868+ // Include any manual inputs if present
6969+ if tr.Manual != nil {
7070+ for _, pair := range tr.Manual.Inputs {
7171+ env["TANGLED_INPUT_"+strings.ToUpper(pair.Key)] = pair.Value
7272+ }
7373+ }
7474+ }
7575+7676+ return env
7777+}
···11+package models
22+33+import (
44+ "encoding/base64"
55+ "strings"
66+)
77+88+// SecretMask replaces secret values in strings with "***".
99+type SecretMask struct {
1010+ replacer *strings.Replacer
1111+}
1212+1313+// NewSecretMask creates a mask for the given secret values.
1414+// Also registers base64-encoded variants of each secret.
1515+func NewSecretMask(values []string) *SecretMask {
1616+ var pairs []string
1717+1818+ for _, value := range values {
1919+ if value == "" {
2020+ continue
2121+ }
2222+2323+ pairs = append(pairs, value, "***")
2424+2525+ b64 := base64.StdEncoding.EncodeToString([]byte(value))
2626+ if b64 != value {
2727+ pairs = append(pairs, b64, "***")
2828+ }
2929+3030+ b64NoPad := strings.TrimRight(b64, "=")
3131+ if b64NoPad != b64 && b64NoPad != value {
3232+ pairs = append(pairs, b64NoPad, "***")
3333+ }
3434+ }
3535+3636+ if len(pairs) == 0 {
3737+ return nil
3838+ }
3939+4040+ return &SecretMask{
4141+ replacer: strings.NewReplacer(pairs...),
4242+ }
4343+}
4444+4545+// Mask replaces all registered secret values with "***".
4646+func (m *SecretMask) Mask(input string) string {
4747+ if m == nil || m.replacer == nil {
4848+ return input
4949+ }
5050+ return m.replacer.Replace(input)
5151+}
+135
spindle/models/secret_mask_test.go
···11+package models
22+33+import (
44+ "encoding/base64"
55+ "testing"
66+)
77+88+func TestSecretMask_BasicMasking(t *testing.T) {
99+ mask := NewSecretMask([]string{"mysecret123"})
1010+1111+ input := "The password is mysecret123 in this log"
1212+ expected := "The password is *** in this log"
1313+1414+ result := mask.Mask(input)
1515+ if result != expected {
1616+ t.Errorf("expected %q, got %q", expected, result)
1717+ }
1818+}
1919+2020+func TestSecretMask_Base64Encoded(t *testing.T) {
2121+ secret := "mysecret123"
2222+ mask := NewSecretMask([]string{secret})
2323+2424+ b64 := base64.StdEncoding.EncodeToString([]byte(secret))
2525+ input := "Encoded: " + b64
2626+ expected := "Encoded: ***"
2727+2828+ result := mask.Mask(input)
2929+ if result != expected {
3030+ t.Errorf("expected %q, got %q", expected, result)
3131+ }
3232+}
3333+3434+func TestSecretMask_Base64NoPadding(t *testing.T) {
3535+ // "test" encodes to "dGVzdA==" with padding
3636+ secret := "test"
3737+ mask := NewSecretMask([]string{secret})
3838+3939+ b64NoPad := "dGVzdA" // base64 without padding
4040+ input := "Token: " + b64NoPad
4141+ expected := "Token: ***"
4242+4343+ result := mask.Mask(input)
4444+ if result != expected {
4545+ t.Errorf("expected %q, got %q", expected, result)
4646+ }
4747+}
4848+4949+func TestSecretMask_MultipleSecrets(t *testing.T) {
5050+ mask := NewSecretMask([]string{"password1", "apikey123"})
5151+5252+ input := "Using password1 and apikey123 for auth"
5353+ expected := "Using *** and *** for auth"
5454+5555+ result := mask.Mask(input)
5656+ if result != expected {
5757+ t.Errorf("expected %q, got %q", expected, result)
5858+ }
5959+}
6060+6161+func TestSecretMask_MultipleOccurrences(t *testing.T) {
6262+ mask := NewSecretMask([]string{"secret"})
6363+6464+ input := "secret appears twice: secret"
6565+ expected := "*** appears twice: ***"
6666+6767+ result := mask.Mask(input)
6868+ if result != expected {
6969+ t.Errorf("expected %q, got %q", expected, result)
7070+ }
7171+}
7272+7373+func TestSecretMask_ShortValues(t *testing.T) {
7474+ mask := NewSecretMask([]string{"abc", "xy", ""})
7575+7676+ if mask == nil {
7777+ t.Fatal("expected non-nil mask")
7878+ }
7979+8080+ input := "abc xy test"
8181+ expected := "*** *** test"
8282+ result := mask.Mask(input)
8383+ if result != expected {
8484+ t.Errorf("expected %q, got %q", expected, result)
8585+ }
8686+}
8787+8888+func TestSecretMask_NilMask(t *testing.T) {
8989+ var mask *SecretMask
9090+9191+ input := "some input text"
9292+ result := mask.Mask(input)
9393+ if result != input {
9494+ t.Errorf("expected %q, got %q", input, result)
9595+ }
9696+}
9797+9898+func TestSecretMask_EmptyInput(t *testing.T) {
9999+ mask := NewSecretMask([]string{"secret"})
100100+101101+ result := mask.Mask("")
102102+ if result != "" {
103103+ t.Errorf("expected empty string, got %q", result)
104104+ }
105105+}
106106+107107+func TestSecretMask_NoMatch(t *testing.T) {
108108+ mask := NewSecretMask([]string{"secretvalue"})
109109+110110+ input := "nothing to mask here"
111111+ result := mask.Mask(input)
112112+ if result != input {
113113+ t.Errorf("expected %q, got %q", input, result)
114114+ }
115115+}
116116+117117+func TestSecretMask_EmptySecretsList(t *testing.T) {
118118+ mask := NewSecretMask([]string{})
119119+120120+ if mask != nil {
121121+ t.Error("expected nil mask for empty secrets list")
122122+ }
123123+}
124124+125125+func TestSecretMask_EmptySecretsFiltered(t *testing.T) {
126126+ mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"})
127127+128128+ input := "Using validpassword here"
129129+ expected := "Using *** here"
130130+131131+ result := mask.Mask(input)
132132+ if result != expected {
133133+ t.Errorf("expected %q, got %q", expected, result)
134134+ }
135135+}
+15-7
spindle/secrets/openbao.go
···1313)
14141515type OpenBaoManager struct {
1616- client *vault.Client
1717- mountPath string
1818- logger *slog.Logger
1616+ client *vault.Client
1717+ mountPath string
1818+ logger *slog.Logger
1919+ connectionTimeout time.Duration
1920}
20212122type OpenBaoManagerOpt func(*OpenBaoManager)
···2627 }
2728}
28293030+func WithConnectionTimeout(timeout time.Duration) OpenBaoManagerOpt {
3131+ return func(v *OpenBaoManager) {
3232+ v.connectionTimeout = timeout
3333+ }
3434+}
3535+2936// NewOpenBaoManager creates a new OpenBao manager that connects to a Bao Proxy
3037// The proxyAddress should point to the local Bao Proxy (e.g., "http://127.0.0.1:8200")
3138// The proxy handles all authentication automatically via Auto-Auth
···4350 }
44514552 manager := &OpenBaoManager{
4646- client: client,
4747- mountPath: "spindle", // default KV v2 mount path
4848- logger: logger,
5353+ client: client,
5454+ mountPath: "spindle", // default KV v2 mount path
5555+ logger: logger,
5656+ connectionTimeout: 10 * time.Second, // default connection timeout
4957 }
50585159 for _, opt := range opts {
···62706371// testConnection verifies that we can connect to the proxy
6472func (v *OpenBaoManager) testConnection() error {
6565- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
7373+ ctx, cancel := context.WithTimeout(context.Background(), v.connectionTimeout)
6674 defer cancel()
67756876 // try token self-lookup as a quick way to verify proxy works
+5-2
spindle/secrets/openbao_test.go
···152152 for _, tt := range tests {
153153 t.Run(tt.name, func(t *testing.T) {
154154 logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
155155- manager, err := NewOpenBaoManager(tt.proxyAddr, logger, tt.opts...)
155155+ // Use shorter timeout for tests to avoid long waits
156156+ opts := append(tt.opts, WithConnectionTimeout(1*time.Second))
157157+ manager, err := NewOpenBaoManager(tt.proxyAddr, logger, opts...)
156158157159 if tt.expectError {
158160 assert.Error(t, err)
···596598597599 // All these will fail because no real proxy is running
598600 // but we can test that the configuration is properly accepted
599599- manager, err := NewOpenBaoManager(tt.proxyAddr, logger)
601601+ // Use shorter timeout for tests to avoid long waits
602602+ manager, err := NewOpenBaoManager(tt.proxyAddr, logger, WithConnectionTimeout(1*time.Second))
600603 assert.Error(t, err) // Expected because no real proxy
601604 assert.Nil(t, manager)
602605 assert.Contains(t, err.Error(), "failed to connect to bao proxy")
+97-41
spindle/server.go
···66 "encoding/json"
77 "fmt"
88 "log/slog"
99+ "maps"
910 "net/http"
10111112 "github.com/go-chi/chi/v5"
···4950 vault secrets.Manager
5051}
51525252-func Run(ctx context.Context) error {
5353+// New creates a new Spindle server with the provided configuration and engines.
5454+func New(ctx context.Context, cfg *config.Config, engines map[string]models.Engine) (*Spindle, error) {
5355 logger := log.FromContext(ctx)
54565555- cfg, err := config.Load(ctx)
5656- if err != nil {
5757- return fmt.Errorf("failed to load config: %w", err)
5858- }
5959-6057 d, err := db.Make(cfg.Server.DBPath)
6158 if err != nil {
6262- return fmt.Errorf("failed to setup db: %w", err)
5959+ return nil, fmt.Errorf("failed to setup db: %w", err)
6360 }
64616562 e, err := rbac.NewEnforcer(cfg.Server.DBPath)
6663 if err != nil {
6767- return fmt.Errorf("failed to setup rbac enforcer: %w", err)
6464+ return nil, fmt.Errorf("failed to setup rbac enforcer: %w", err)
6865 }
6966 e.E.EnableAutoSave(true)
7067···7471 switch cfg.Server.Secrets.Provider {
7572 case "openbao":
7673 if cfg.Server.Secrets.OpenBao.ProxyAddr == "" {
7777- return fmt.Errorf("openbao proxy address is required when using openbao secrets provider")
7474+ return nil, fmt.Errorf("openbao proxy address is required when using openbao secrets provider")
7875 }
7976 vault, err = secrets.NewOpenBaoManager(
8077 cfg.Server.Secrets.OpenBao.ProxyAddr,
···8279 secrets.WithMountPath(cfg.Server.Secrets.OpenBao.Mount),
8380 )
8481 if err != nil {
8585- return fmt.Errorf("failed to setup openbao secrets provider: %w", err)
8282+ return nil, fmt.Errorf("failed to setup openbao secrets provider: %w", err)
8683 }
8784 logger.Info("using openbao secrets provider", "proxy_address", cfg.Server.Secrets.OpenBao.ProxyAddr, "mount", cfg.Server.Secrets.OpenBao.Mount)
8885 case "sqlite", "":
8986 vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath, secrets.WithTableName("secrets"))
9087 if err != nil {
9191- return fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
8888+ return nil, fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
9289 }
9390 logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath)
9491 default:
9595- return fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
9696- }
9797-9898- nixeryEng, err := nixery.New(ctx, cfg)
9999- if err != nil {
100100- return err
9292+ return nil, fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
10193 }
1029410395 jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
···110102 }
111103 jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true)
112104 if err != nil {
113113- return fmt.Errorf("failed to setup jetstream client: %w", err)
105105+ return nil, fmt.Errorf("failed to setup jetstream client: %w", err)
114106 }
115107 jc.AddDid(cfg.Server.Owner)
116108117109 // Check if the spindle knows about any Dids;
118110 dids, err := d.GetAllDids()
119111 if err != nil {
120120- return fmt.Errorf("failed to get all dids: %w", err)
112112+ return nil, fmt.Errorf("failed to get all dids: %w", err)
121113 }
122114 for _, d := range dids {
123115 jc.AddDid(d)
124116 }
125117126126- resolver := idresolver.DefaultResolver()
118118+ resolver := idresolver.DefaultResolver(cfg.Server.PlcUrl)
127119128128- spindle := Spindle{
120120+ spindle := &Spindle{
129121 jc: jc,
130122 e: e,
131123 db: d,
132124 l: logger,
133125 n: &n,
134134- engs: map[string]models.Engine{"nixery": nixeryEng},
126126+ engs: engines,
135127 jq: jq,
136128 cfg: cfg,
137129 res: resolver,
···140132141133 err = e.AddSpindle(rbacDomain)
142134 if err != nil {
143143- return fmt.Errorf("failed to set rbac domain: %w", err)
135135+ return nil, fmt.Errorf("failed to set rbac domain: %w", err)
144136 }
145137 err = spindle.configureOwner()
146138 if err != nil {
147147- return err
139139+ return nil, err
148140 }
149141 logger.Info("owner set", "did", cfg.Server.Owner)
150142151151- // starts a job queue runner in the background
152152- jq.Start()
153153- defer jq.Stop()
154154-155155- // Stop vault token renewal if it implements Stopper
156156- if stopper, ok := vault.(secrets.Stopper); ok {
157157- defer stopper.Stop()
158158- }
159159-160143 cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath)
161144 if err != nil {
162162- return fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
145145+ return nil, fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
163146 }
164147165148 err = jc.StartJetstream(ctx, spindle.ingest())
166149 if err != nil {
167167- return fmt.Errorf("failed to start jetstream consumer: %w", err)
150150+ return nil, fmt.Errorf("failed to start jetstream consumer: %w", err)
168151 }
169152170153 // for each incoming sh.tangled.pipeline, we execute
···177160 ccfg.CursorStore = cursorStore
178161 knownKnots, err := d.Knots()
179162 if err != nil {
180180- return err
163163+ return nil, err
181164 }
182165 for _, knot := range knownKnots {
183166 logger.Info("adding source start", "knot", knot)
···185168 }
186169 spindle.ks = eventconsumer.NewConsumer(*ccfg)
187170171171+ return spindle, nil
172172+}
173173+174174+// DB returns the database instance.
175175+func (s *Spindle) DB() *db.DB {
176176+ return s.db
177177+}
178178+179179+// Queue returns the job queue instance.
180180+func (s *Spindle) Queue() *queue.Queue {
181181+ return s.jq
182182+}
183183+184184+// Engines returns the map of available engines.
185185+func (s *Spindle) Engines() map[string]models.Engine {
186186+ return s.engs
187187+}
188188+189189+// Vault returns the secrets manager instance.
190190+func (s *Spindle) Vault() secrets.Manager {
191191+ return s.vault
192192+}
193193+194194+// Notifier returns the notifier instance.
195195+func (s *Spindle) Notifier() *notifier.Notifier {
196196+ return s.n
197197+}
198198+199199+// Enforcer returns the RBAC enforcer instance.
200200+func (s *Spindle) Enforcer() *rbac.Enforcer {
201201+ return s.e
202202+}
203203+204204+// Start starts the Spindle server (blocking).
205205+func (s *Spindle) Start(ctx context.Context) error {
206206+ // starts a job queue runner in the background
207207+ s.jq.Start()
208208+ defer s.jq.Stop()
209209+210210+ // Stop vault token renewal if it implements Stopper
211211+ if stopper, ok := s.vault.(secrets.Stopper); ok {
212212+ defer stopper.Stop()
213213+ }
214214+188215 go func() {
189189- logger.Info("starting knot event consumer")
190190- spindle.ks.Start(ctx)
216216+ s.l.Info("starting knot event consumer")
217217+ s.ks.Start(ctx)
191218 }()
192219193193- logger.Info("starting spindle server", "address", cfg.Server.ListenAddr)
194194- logger.Error("server error", "error", http.ListenAndServe(cfg.Server.ListenAddr, spindle.Router()))
220220+ s.l.Info("starting spindle server", "address", s.cfg.Server.ListenAddr)
221221+ return http.ListenAndServe(s.cfg.Server.ListenAddr, s.Router())
222222+}
195223196196- return nil
224224+func Run(ctx context.Context) error {
225225+ cfg, err := config.Load(ctx)
226226+ if err != nil {
227227+ return fmt.Errorf("failed to load config: %w", err)
228228+ }
229229+230230+ nixeryEng, err := nixery.New(ctx, cfg)
231231+ if err != nil {
232232+ return err
233233+ }
234234+235235+ s, err := New(ctx, cfg, map[string]models.Engine{
236236+ "nixery": nixeryEng,
237237+ })
238238+ if err != nil {
239239+ return err
240240+ }
241241+242242+ return s.Start(ctx)
197243}
198244199245func (s *Spindle) Router() http.Handler {
···266312267313 workflows := make(map[models.Engine][]models.Workflow)
268314315315+ // Build pipeline environment variables once for all workflows
316316+ pipelineEnv := models.PipelineEnvVars(tpl.TriggerMetadata, pipelineId, s.cfg.Server.Dev)
317317+269318 for _, w := range tpl.Workflows {
270319 if w != nil {
271320 if _, ok := s.engs[w.Engine]; !ok {
···290339 if err != nil {
291340 return err
292341 }
342342+343343+ // inject TANGLED_* env vars after InitWorkflow
344344+ // This prevents user-defined env vars from overriding them
345345+ if ewf.Environment == nil {
346346+ ewf.Environment = make(map[string]string)
347347+ }
348348+ maps.Copy(ewf.Environment, pipelineEnv)
293349294350 workflows[eng] = append(workflows[eng], *ewf)
295351
+5
spindle/stream.go
···213213 if err := conn.WriteMessage(websocket.TextMessage, []byte(line.Text)); err != nil {
214214 return fmt.Errorf("failed to write to websocket: %w", err)
215215 }
216216+ case <-time.After(30 * time.Second):
217217+ // send a keep-alive
218218+ if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
219219+ return fmt.Errorf("failed to write control: %w", err)
220220+ }
216221 }
217222 }
218223}
+199
types/commit.go
···11+package types
22+33+import (
44+ "bytes"
55+ "encoding/json"
66+ "fmt"
77+ "maps"
88+ "regexp"
99+ "strings"
1010+1111+ "github.com/go-git/go-git/v5/plumbing"
1212+ "github.com/go-git/go-git/v5/plumbing/object"
1313+)
1414+1515+type Commit struct {
1616+ // hash of the commit object.
1717+ Hash plumbing.Hash `json:"hash,omitempty"`
1818+1919+ // author is the original author of the commit.
2020+ Author object.Signature `json:"author"`
2121+2222+ // committer is the one performing the commit, might be different from author.
2323+ Committer object.Signature `json:"committer"`
2424+2525+ // message is the commit message, contains arbitrary text.
2626+ Message string `json:"message"`
2727+2828+ // treehash is the hash of the root tree of the commit.
2929+ Tree string `json:"tree"`
3030+3131+ // parents are the hashes of the parent commits of the commit.
3232+ ParentHashes []plumbing.Hash `json:"parent_hashes,omitempty"`
3333+3434+ // pgpsignature is the pgp signature of the commit.
3535+ PGPSignature string `json:"pgp_signature,omitempty"`
3636+3737+ // mergetag is the embedded tag object when a merge commit is created by
3838+ // merging a signed tag.
3939+ MergeTag string `json:"merge_tag,omitempty"`
4040+4141+ // changeid is a unique identifier for the change (e.g., gerrit change-id).
4242+ ChangeId string `json:"change_id,omitempty"`
4343+4444+ // extraheaders contains additional headers not captured by other fields.
4545+ ExtraHeaders map[string][]byte `json:"extra_headers,omitempty"`
4646+4747+ // deprecated: kept for backwards compatibility with old json format.
4848+ This string `json:"this,omitempty"`
4949+5050+ // deprecated: kept for backwards compatibility with old json format.
5151+ Parent string `json:"parent,omitempty"`
5252+}
5353+5454+// types.Commit is an unify two commit structs:
5555+// - git.object.Commit from
5656+// - types.NiceDiff.commit
5757+//
5858+// to do this in backwards compatible fashion, we define the base struct
5959+// to use the same fields as NiceDiff.Commit, and then we also unmarshal
6060+// the struct fields from go-git structs, this custom unmarshal makes sense
6161+// of both representations and unifies them to have maximal data in either
6262+// form.
6363+func (c *Commit) UnmarshalJSON(data []byte) error {
6464+ type Alias Commit
6565+6666+ aux := &struct {
6767+ *object.Commit
6868+ *Alias
6969+ }{
7070+ Alias: (*Alias)(c),
7171+ }
7272+7373+ if err := json.Unmarshal(data, aux); err != nil {
7474+ return err
7575+ }
7676+7777+ c.FromGoGitCommit(aux.Commit)
7878+7979+ return nil
8080+}
8181+8282+// fill in as much of Commit as possible from the given go-git commit
8383+func (c *Commit) FromGoGitCommit(gc *object.Commit) {
8484+ if gc == nil {
8585+ return
8686+ }
8787+8888+ if c.Hash.IsZero() {
8989+ c.Hash = gc.Hash
9090+ }
9191+ if c.This == "" {
9292+ c.This = gc.Hash.String()
9393+ }
9494+ if isEmptySignature(c.Author) {
9595+ c.Author = gc.Author
9696+ }
9797+ if isEmptySignature(c.Committer) {
9898+ c.Committer = gc.Committer
9999+ }
100100+ if c.Message == "" {
101101+ c.Message = gc.Message
102102+ }
103103+ if c.Tree == "" {
104104+ c.Tree = gc.TreeHash.String()
105105+ }
106106+ if c.PGPSignature == "" {
107107+ c.PGPSignature = gc.PGPSignature
108108+ }
109109+ if c.MergeTag == "" {
110110+ c.MergeTag = gc.MergeTag
111111+ }
112112+113113+ if len(c.ParentHashes) == 0 {
114114+ c.ParentHashes = gc.ParentHashes
115115+ }
116116+ if c.Parent == "" && len(gc.ParentHashes) > 0 {
117117+ c.Parent = gc.ParentHashes[0].String()
118118+ }
119119+120120+ if len(c.ExtraHeaders) == 0 {
121121+ c.ExtraHeaders = make(map[string][]byte)
122122+ maps.Copy(c.ExtraHeaders, gc.ExtraHeaders)
123123+ }
124124+125125+ if c.ChangeId == "" {
126126+ if v, ok := gc.ExtraHeaders["change-id"]; ok {
127127+ c.ChangeId = string(v)
128128+ }
129129+ }
130130+}
131131+132132+func isEmptySignature(s object.Signature) bool {
133133+ return s.Email == "" && s.Name == "" && s.When.IsZero()
134134+}
135135+136136+// produce a verifiable payload from this commit's metadata
137137+func (c *Commit) Payload() string {
138138+ author := bytes.NewBuffer([]byte{})
139139+ c.Author.Encode(author)
140140+141141+ committer := bytes.NewBuffer([]byte{})
142142+ c.Committer.Encode(committer)
143143+144144+ payload := strings.Builder{}
145145+146146+ fmt.Fprintf(&payload, "tree %s\n", c.Tree)
147147+148148+ if len(c.ParentHashes) > 0 {
149149+ for _, p := range c.ParentHashes {
150150+ fmt.Fprintf(&payload, "parent %s\n", p.String())
151151+ }
152152+ } else {
153153+ // present for backwards compatibility
154154+ fmt.Fprintf(&payload, "parent %s\n", c.Parent)
155155+ }
156156+157157+ fmt.Fprintf(&payload, "author %s\n", author.String())
158158+ fmt.Fprintf(&payload, "committer %s\n", committer.String())
159159+160160+ if c.ChangeId != "" {
161161+ fmt.Fprintf(&payload, "change-id %s\n", c.ChangeId)
162162+ } else if v, ok := c.ExtraHeaders["change-id"]; ok {
163163+ fmt.Fprintf(&payload, "change-id %s\n", string(v))
164164+ }
165165+166166+ fmt.Fprintf(&payload, "\n%s", c.Message)
167167+168168+ return payload.String()
169169+}
170170+171171+var (
172172+ coAuthorRegex = regexp.MustCompile(`(?im)^Co-authored-by:\s*(.+?)\s*<([^>]+)>`)
173173+)
174174+175175+func (commit Commit) CoAuthors() []object.Signature {
176176+ var coAuthors []object.Signature
177177+ seen := make(map[string]bool)
178178+ matches := coAuthorRegex.FindAllStringSubmatch(commit.Message, -1)
179179+180180+ for _, match := range matches {
181181+ if len(match) >= 3 {
182182+ name := strings.TrimSpace(match[1])
183183+ email := strings.TrimSpace(match[2])
184184+185185+ if seen[email] {
186186+ continue
187187+ }
188188+ seen[email] = true
189189+190190+ coAuthors = append(coAuthors, object.Signature{
191191+ Name: name,
192192+ Email: email,
193193+ When: commit.Committer.When,
194194+ })
195195+ }
196196+ }
197197+198198+ return coAuthors
199199+}