···1515.env
1616*.rdb
1717.envrc
1818+**/*.bleve
1819# Created if following hacking.md
1920genjwks.out
2021/nix/vm-data
+3-1
api/tangled/actorprofile.go
···2727 Location *string `json:"location,omitempty" cborgen:"location,omitempty"`
2828 // pinnedRepositories: Any ATURI, it is up to appviews to validate these fields.
2929 PinnedRepositories []string `json:"pinnedRepositories,omitempty" cborgen:"pinnedRepositories,omitempty"`
3030- Stats []string `json:"stats,omitempty" cborgen:"stats,omitempty"`
3030+ // pronouns: Preferred gender pronouns.
3131+ Pronouns *string `json:"pronouns,omitempty" cborgen:"pronouns,omitempty"`
3232+ Stats []string `json:"stats,omitempty" cborgen:"stats,omitempty"`
3133}
+196-2
api/tangled/cbor_gen.go
···2626 }
27272828 cw := cbg.NewCborWriter(w)
2929- fieldCount := 7
2929+ fieldCount := 8
30303131 if t.Description == nil {
3232 fieldCount--
···4141 }
42424343 if t.PinnedRepositories == nil {
4444+ fieldCount--
4545+ }
4646+4747+ if t.Pronouns == nil {
4448 fieldCount--
4549 }
4650···186190 return err
187191 }
188192 if _, err := cw.WriteString(string(*t.Location)); err != nil {
193193+ return err
194194+ }
195195+ }
196196+ }
197197+198198+ // t.Pronouns (string) (string)
199199+ if t.Pronouns != nil {
200200+201201+ if len("pronouns") > 1000000 {
202202+ return xerrors.Errorf("Value in field \"pronouns\" was too long")
203203+ }
204204+205205+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("pronouns"))); err != nil {
206206+ return err
207207+ }
208208+ if _, err := cw.WriteString(string("pronouns")); err != nil {
209209+ return err
210210+ }
211211+212212+ if t.Pronouns == nil {
213213+ if _, err := cw.Write(cbg.CborNull); err != nil {
214214+ return err
215215+ }
216216+ } else {
217217+ if len(*t.Pronouns) > 1000000 {
218218+ return xerrors.Errorf("Value in field t.Pronouns was too long")
219219+ }
220220+221221+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Pronouns))); err != nil {
222222+ return err
223223+ }
224224+ if _, err := cw.WriteString(string(*t.Pronouns)); err != nil {
189225 return err
190226 }
191227 }
···430466 }
431467432468 t.Location = (*string)(&sval)
469469+ }
470470+ }
471471+ // t.Pronouns (string) (string)
472472+ case "pronouns":
473473+474474+ {
475475+ b, err := cr.ReadByte()
476476+ if err != nil {
477477+ return err
478478+ }
479479+ if b != cbg.CborNull[0] {
480480+ if err := cr.UnreadByte(); err != nil {
481481+ return err
482482+ }
483483+484484+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
485485+ if err != nil {
486486+ return err
487487+ }
488488+489489+ t.Pronouns = (*string)(&sval)
433490 }
434491 }
435492 // t.Description (string) (string)
···58065863 }
5807586458085865 cw := cbg.NewCborWriter(w)
58095809- fieldCount := 8
58665866+ fieldCount := 10
5810586758115868 if t.Description == nil {
58125869 fieldCount--
···58215878 }
5822587958235880 if t.Spindle == nil {
58815881+ fieldCount--
58825882+ }
58835883+58845884+ if t.Topics == nil {
58855885+ fieldCount--
58865886+ }
58875887+58885888+ if t.Website == nil {
58245889 fieldCount--
58255890 }
58265891···59616026 }
59626027 }
5963602860296029+ // t.Topics ([]string) (slice)
60306030+ if t.Topics != nil {
60316031+60326032+ if len("topics") > 1000000 {
60336033+ return xerrors.Errorf("Value in field \"topics\" was too long")
60346034+ }
60356035+60366036+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("topics"))); err != nil {
60376037+ return err
60386038+ }
60396039+ if _, err := cw.WriteString(string("topics")); err != nil {
60406040+ return err
60416041+ }
60426042+60436043+ if len(t.Topics) > 8192 {
60446044+ return xerrors.Errorf("Slice value in field t.Topics was too long")
60456045+ }
60466046+60476047+ if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Topics))); err != nil {
60486048+ return err
60496049+ }
60506050+ for _, v := range t.Topics {
60516051+ if len(v) > 1000000 {
60526052+ return xerrors.Errorf("Value in field v was too long")
60536053+ }
60546054+60556055+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
60566056+ return err
60576057+ }
60586058+ if _, err := cw.WriteString(string(v)); err != nil {
60596059+ return err
60606060+ }
60616061+60626062+ }
60636063+ }
60646064+59646065 // t.Spindle (string) (string)
59656066 if t.Spindle != nil {
59666067···59936094 }
59946095 }
5995609660976097+ // t.Website (string) (string)
60986098+ if t.Website != nil {
60996099+61006100+ if len("website") > 1000000 {
61016101+ return xerrors.Errorf("Value in field \"website\" was too long")
61026102+ }
61036103+61046104+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("website"))); err != nil {
61056105+ return err
61066106+ }
61076107+ if _, err := cw.WriteString(string("website")); err != nil {
61086108+ return err
61096109+ }
61106110+61116111+ if t.Website == nil {
61126112+ if _, err := cw.Write(cbg.CborNull); err != nil {
61136113+ return err
61146114+ }
61156115+ } else {
61166116+ if len(*t.Website) > 1000000 {
61176117+ return xerrors.Errorf("Value in field t.Website was too long")
61186118+ }
61196119+61206120+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Website))); err != nil {
61216121+ return err
61226122+ }
61236123+ if _, err := cw.WriteString(string(*t.Website)); err != nil {
61246124+ return err
61256125+ }
61266126+ }
61276127+ }
61286128+59966129 // t.CreatedAt (string) (string)
59976130 if len("createdAt") > 1000000 {
59986131 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···61856318 t.Source = (*string)(&sval)
61866319 }
61876320 }
63216321+ // t.Topics ([]string) (slice)
63226322+ case "topics":
63236323+63246324+ maj, extra, err = cr.ReadHeader()
63256325+ if err != nil {
63266326+ return err
63276327+ }
63286328+63296329+ if extra > 8192 {
63306330+ return fmt.Errorf("t.Topics: array too large (%d)", extra)
63316331+ }
63326332+63336333+ if maj != cbg.MajArray {
63346334+ return fmt.Errorf("expected cbor array")
63356335+ }
63366336+63376337+ if extra > 0 {
63386338+ t.Topics = make([]string, extra)
63396339+ }
63406340+63416341+ for i := 0; i < int(extra); i++ {
63426342+ {
63436343+ var maj byte
63446344+ var extra uint64
63456345+ var err error
63466346+ _ = maj
63476347+ _ = extra
63486348+ _ = err
63496349+63506350+ {
63516351+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
63526352+ if err != nil {
63536353+ return err
63546354+ }
63556355+63566356+ t.Topics[i] = string(sval)
63576357+ }
63586358+63596359+ }
63606360+ }
61886361 // t.Spindle (string) (string)
61896362 case "spindle":
61906363···62046377 }
6205637862066379 t.Spindle = (*string)(&sval)
63806380+ }
63816381+ }
63826382+ // t.Website (string) (string)
63836383+ case "website":
63846384+63856385+ {
63866386+ b, err := cr.ReadByte()
63876387+ if err != nil {
63886388+ return err
63896389+ }
63906390+ if b != cbg.CborNull[0] {
63916391+ if err := cr.UnreadByte(); err != nil {
63926392+ return err
63936393+ }
63946394+63956395+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
63966396+ if err != nil {
63976397+ return err
63986398+ }
63996399+64006400+ t.Website = (*string)(&sval)
62076401 }
62086402 }
62096403 // t.CreatedAt (string) (string)
+13-1
api/tangled/repoblob.go
···3030// RepoBlob_Output is the output of a sh.tangled.repo.blob call.
3131type RepoBlob_Output struct {
3232 // content: File content (base64 encoded for binary files)
3333- Content string `json:"content" cborgen:"content"`
3333+ Content *string `json:"content,omitempty" cborgen:"content,omitempty"`
3434 // encoding: Content encoding
3535 Encoding *string `json:"encoding,omitempty" cborgen:"encoding,omitempty"`
3636 // isBinary: Whether the file is binary
···4444 Ref string `json:"ref" cborgen:"ref"`
4545 // size: File size in bytes
4646 Size *int64 `json:"size,omitempty" cborgen:"size,omitempty"`
4747+ // submodule: Submodule information if path is a submodule
4848+ Submodule *RepoBlob_Submodule `json:"submodule,omitempty" cborgen:"submodule,omitempty"`
4749}
48504951// RepoBlob_Signature is a "signature" in the sh.tangled.repo.blob schema.
···5456 Name string `json:"name" cborgen:"name"`
5557 // when: Author timestamp
5658 When string `json:"when" cborgen:"when"`
5959+}
6060+6161+// RepoBlob_Submodule is a "submodule" in the sh.tangled.repo.blob schema.
6262+type RepoBlob_Submodule struct {
6363+ // branch: Branch to track in the submodule
6464+ Branch *string `json:"branch,omitempty" cborgen:"branch,omitempty"`
6565+ // name: Submodule name
6666+ Name string `json:"name" cborgen:"name"`
6767+ // url: Submodule repository URL
6868+ Url string `json:"url" cborgen:"url"`
5769}
58705971// RepoBlob calls the XRPC method "sh.tangled.repo.blob".
-4
api/tangled/repotree.go
···47474848// RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
4949type RepoTree_TreeEntry struct {
5050- // is_file: Whether this entry is a file
5151- Is_file bool `json:"is_file" cborgen:"is_file"`
5252- // is_subtree: Whether this entry is a directory/subtree
5353- Is_subtree bool `json:"is_subtree" cborgen:"is_subtree"`
5450 Last_commit *RepoTree_LastCommit `json:"last_commit,omitempty" cborgen:"last_commit,omitempty"`
5551 // mode: File mode
5652 Mode string `json:"mode" cborgen:"mode"`
+4
api/tangled/tangledrepo.go
···3030 Source *string `json:"source,omitempty" cborgen:"source,omitempty"`
3131 // spindle: CI runner to send jobs to and receive results from
3232 Spindle *string `json:"spindle,omitempty" cborgen:"spindle,omitempty"`
3333+ // topics: Topics related to the repo
3434+ Topics []string `json:"topics,omitempty" cborgen:"topics,omitempty"`
3535+ // website: Any URI related to the repo
3636+ Website *string `json:"website,omitempty" cborgen:"website,omitempty"`
3337}
···6060 whereClause += " AND " + condition
6161 }
6262 }
6363+ pageClause := ""
6464+ if page.Limit > 0 {
6565+ pageClause = " limit ? offset ? "
6666+ args = append(args, page.Limit, page.Offset)
6767+ }
63686469 query := fmt.Sprintf(`
6570 select id, recipient_did, actor_did, type, entity_type, entity_id, read, created, repo_id, issue_id, pull_id
6671 from notifications
6772 %s
6873 order by created desc
6969- limit ? offset ?
7070- `, whereClause)
7171-7272- args = append(args, page.Limit, page.Offset)
7474+ %s
7575+ `, whereClause, pageClause)
73767477 rows, err := e.QueryContext(context.Background(), query, args...)
7578 if err != nil {
···131134 select
132135 n.id, n.recipient_did, n.actor_did, n.type, n.entity_type, n.entity_id,
133136 n.read, n.created, n.repo_id, n.issue_id, n.pull_id,
134134- r.id as r_id, r.did as r_did, r.name as r_name, r.description as r_description,
137137+ r.id as r_id, r.did as r_did, r.name as r_name, r.description as r_description, r.website as r_website, r.topics as r_topics,
135138 i.id as i_id, i.did as i_did, i.issue_id as i_issue_id, i.title as i_title, i.open as i_open,
136139 p.id as p_id, p.owner_did as p_owner_did, p.pull_id as p_pull_id, p.title as p_title, p.state as p_state
137140 from notifications n
···160163 var issue models.Issue
161164 var pull models.Pull
162165 var rId, iId, pId sql.NullInt64
163163- var rDid, rName, rDescription sql.NullString
166166+ var rDid, rName, rDescription, rWebsite, rTopicStr sql.NullString
164167 var iDid sql.NullString
165168 var iIssueId sql.NullInt64
166169 var iTitle sql.NullString
···173176 err := rows.Scan(
174177 &n.ID, &n.RecipientDid, &n.ActorDid, &typeStr, &n.EntityType, &n.EntityId,
175178 &n.Read, &createdStr, &n.RepoId, &n.IssueId, &n.PullId,
176176- &rId, &rDid, &rName, &rDescription,
179179+ &rId, &rDid, &rName, &rDescription, &rWebsite, &rTopicStr,
177180 &iId, &iDid, &iIssueId, &iTitle, &iOpen,
178181 &pId, &pOwnerDid, &pPullId, &pTitle, &pState,
179182 )
···200203 }
201204 if rDescription.Valid {
202205 repo.Description = rDescription.String
206206+ }
207207+ if rWebsite.Valid {
208208+ repo.Website = rWebsite.String
209209+ }
210210+ if rTopicStr.Valid {
211211+ repo.Topics = strings.Fields(rTopicStr.String)
203212 }
204213 nwe.Repo = &repo
205214 }
···391400 pull_created,
392401 pull_commented,
393402 followed,
403403+ user_mentioned,
394404 pull_merged,
395405 issue_closed,
396406 email_notifications
···416426 &prefs.PullCreated,
417427 &prefs.PullCommented,
418428 &prefs.Followed,
429429+ &prefs.UserMentioned,
419430 &prefs.PullMerged,
420431 &prefs.IssueClosed,
421432 &prefs.EmailNotifications,
···437448 query := `
438449 INSERT OR REPLACE INTO notification_preferences
439450 (user_did, repo_starred, issue_created, issue_commented, pull_created,
440440- pull_commented, followed, pull_merged, issue_closed, email_notifications)
441441- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
451451+ pull_commented, followed, user_mentioned, pull_merged, issue_closed,
452452+ email_notifications)
453453+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
442454 `
443455444456 result, err := d.DB.ExecContext(ctx, query,
···449461 prefs.PullCreated,
450462 prefs.PullCommented,
451463 prefs.Followed,
464464+ prefs.UserMentioned,
452465 prefs.PullMerged,
453466 prefs.IssueClosed,
454467 prefs.EmailNotifications,
+4-2
appview/db/pipeline.go
···168168169169// this is a mega query, but the most useful one:
170170// get N pipelines, for each one get the latest status of its N workflows
171171-func GetPipelineStatuses(e Execer, filters ...filter) ([]models.Pipeline, error) {
171171+func GetPipelineStatuses(e Execer, limit int, filters ...filter) ([]models.Pipeline, error) {
172172 var conditions []string
173173 var args []any
174174 for _, filter := range filters {
···205205 join
206206 triggers t ON p.trigger_id = t.id
207207 %s
208208- `, whereClause)
208208+ order by p.created desc
209209+ limit %d
210210+ `, whereClause, limit)
209211210212 rows, err := e.Query(query, args...)
211213 if err != nil {
+26-6
appview/db/profile.go
···129129 did,
130130 description,
131131 include_bluesky,
132132- location
132132+ location,
133133+ pronouns
133134 )
134134- values (?, ?, ?, ?)`,
135135+ values (?, ?, ?, ?, ?)`,
135136 profile.Did,
136137 profile.Description,
137138 includeBskyValue,
138139 profile.Location,
140140+ profile.Pronouns,
139141 )
140142141143 if err != nil {
···216218 did,
217219 description,
218220 include_bluesky,
219219- location
221221+ location,
222222+ pronouns
220223 from
221224 profile
222225 %s`,
···231234 for rows.Next() {
232235 var profile models.Profile
233236 var includeBluesky int
237237+ var pronouns sql.Null[string]
234238235235- err = rows.Scan(&profile.ID, &profile.Did, &profile.Description, &includeBluesky, &profile.Location)
239239+ err = rows.Scan(&profile.ID, &profile.Did, &profile.Description, &includeBluesky, &profile.Location, &pronouns)
236240 if err != nil {
237241 return nil, err
238242 }
239243240244 if includeBluesky != 0 {
241245 profile.IncludeBluesky = true
246246+ }
247247+248248+ if pronouns.Valid {
249249+ profile.Pronouns = pronouns.V
242250 }
243251244252 profileMap[profile.Did] = &profile
···302310303311func GetProfile(e Execer, did string) (*models.Profile, error) {
304312 var profile models.Profile
313313+ var pronouns sql.Null[string]
314314+305315 profile.Did = did
306316307317 includeBluesky := 0
318318+308319 err := e.QueryRow(
309309- `select description, include_bluesky, location from profile where did = ?`,
320320+ `select description, include_bluesky, location, pronouns from profile where did = ?`,
310321 did,
311311- ).Scan(&profile.Description, &includeBluesky, &profile.Location)
322322+ ).Scan(&profile.Description, &includeBluesky, &profile.Location, &pronouns)
312323 if err == sql.ErrNoRows {
313324 profile := models.Profile{}
314325 profile.Did = did
···321332322333 if includeBluesky != 0 {
323334 profile.IncludeBluesky = true
335335+ }
336336+337337+ if pronouns.Valid {
338338+ profile.Pronouns = pronouns.V
324339 }
325340326341 rows, err := e.Query(`select link from profile_links where did = ?`, did)
···412427 // ensure description is not too long
413428 if len(profile.Location) > 40 {
414429 return fmt.Errorf("Entered location is too long.")
430430+ }
431431+432432+ // ensure pronouns are not too long
433433+ if len(profile.Pronouns) > 40 {
434434+ return fmt.Errorf("Entered pronouns are too long.")
415435 }
416436417437 // ensure links are in order
···11+// Copyright 2021 The Gitea Authors. All rights reserved.
22+// SPDX-License-Identifier: MIT
33+44+package bleveutil
55+66+import (
77+ "github.com/blevesearch/bleve/v2"
88+)
99+1010+// FlushingBatch is a batch of operations that automatically flushes to the
1111+// underlying index once it reaches a certain size.
1212+type FlushingBatch struct {
1313+ maxBatchSize int
1414+ batch *bleve.Batch
1515+ index bleve.Index
1616+}
1717+1818+// NewFlushingBatch creates a new flushing batch for the specified index. Once
1919+// the number of operations in the batch reaches the specified limit, the batch
2020+// automatically flushes its operations to the index.
2121+func NewFlushingBatch(index bleve.Index, maxBatchSize int) *FlushingBatch {
2222+ return &FlushingBatch{
2323+ maxBatchSize: maxBatchSize,
2424+ batch: index.NewBatch(),
2525+ index: index,
2626+ }
2727+}
2828+2929+// Index add a new index to batch
3030+func (b *FlushingBatch) Index(id string, data any) error {
3131+ if err := b.batch.Index(id, data); err != nil {
3232+ return err
3333+ }
3434+ return b.flushIfFull()
3535+}
3636+3737+// Delete add a delete index to batch
3838+func (b *FlushingBatch) Delete(id string) error {
3939+ b.batch.Delete(id)
4040+ return b.flushIfFull()
4141+}
4242+4343+func (b *FlushingBatch) flushIfFull() error {
4444+ if b.batch.Size() < b.maxBatchSize {
4545+ return nil
4646+ }
4747+ return b.Flush()
4848+}
4949+5050+// Flush submit the batch and create a new one
5151+func (b *FlushingBatch) Flush() error {
5252+ err := b.index.Batch(b.batch)
5353+ if err != nil {
5454+ return err
5555+ }
5656+ b.batch = b.index.NewBatch()
5757+ return nil
5858+}
···77)
8899type Star struct {
1010- StarredByDid string
1111- RepoAt syntax.ATURI
1212- Created time.Time
1313- Rkey string
1010+ Did string
1111+ RepoAt syntax.ATURI
1212+ Created time.Time
1313+ Rkey string
1414+}
14151515- // optionally, populate this when querying for reverse mappings
1616+// RepoStar is used for reverse mapping to repos
1717+type RepoStar struct {
1818+ Star
1619 Repo *Repo
1720}
2121+2222+// StringStar is used for reverse mapping to strings
2323+type StringStar struct {
2424+ Star
2525+ String *String
2626+}
···44 <h1 class="font-bold text-4xl">tightly-knit<br>social coding.</h1>
5566 <p class="text-lg">
77- tangled is new social-enabled git collaboration platform built on <a class="underline" href="https://atproto.com/">atproto</a>.
77+ Tangled is a decentralized Git hosting and collaboration platform.
88 </p>
99 <p class="text-lg">
1010- we envision a place where developers have complete ownership of their
1010+ We envision a place where developers have complete ownership of their
1111 code, open source communities can freely self-govern and most
1212 importantly, coding can be social and fun again.
1313 </p>
···37373838```
3939# oauth jwks should already be setup by the nix devshell:
4040-echo $TANGLED_OAUTH_JWKS
4141-{"crv":"P-256","d":"tELKHYH-Dko6qo4ozYcVPE1ah6LvXHFV2wpcWpi8ab4","kid":"1753352226","kty":"EC","x":"mRzYpLzAGq74kJez9UbgGfV040DxgsXpMbaVsdy8RZs","y":"azqqXzUYywMlLb2Uc5AVG18nuLXyPnXr4kI4T39eeIc"}
4040+echo $TANGLED_OAUTH_CLIENT_SECRET
4141+z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
4242+4343+echo $TANGLED_OAUTH_CLIENT_KID
4444+1761667908
42454346# if not, you can set it up yourself:
4444-go build -o genjwks.out ./cmd/genjwks
4545-export TANGLED_OAUTH_JWKS="$(./genjwks.out)"
4747+goat key generate -t P-256
4848+Key Type: P-256 / secp256r1 / ES256 private key
4949+Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
5050+ z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
5151+Public Key (DID Key Syntax): share or publish this (eg, in DID document)
5252+ did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
5353+5454+# the secret key from above
5555+export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
46564757# run redis in at a new shell to store oauth sessions
4858redis-server
···158168159169If for any reason you wish to disable either one of the
160170services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
161161-`services.tangled-spindle.enable` (or
162162-`services.tangled-knot.enable`) to `false`.
171171+`services.tangled.spindle.enable` (or
172172+`services.tangled.knot.enable`) to `false`.
+1-1
docs/migrations.md
···4949latest revision, and change your config block like so:
50505151```diff
5252- services.tangled-knot = {
5252+ services.tangled.knot = {
5353 enable = true;
5454 server = {
5555- secretFile = /path/to/secret;
+19-1
docs/spindle/pipeline.md
···1919 - `push`: The workflow should run every time a commit is pushed to the repository.
2020 - `pull_request`: The workflow should run every time a pull request is made or updated.
2121 - `manual`: The workflow can be triggered manually.
2222-- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
2222+- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
2323+- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
23242425For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
2526···2930 branch: ["main", "develop"]
3031 - event: ["pull_request"]
3132 branch: ["main"]
3333+```
3434+3535+You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
3636+3737+```yaml
3838+when:
3939+ - event: ["push"]
4040+ tag: ["v*"]
4141+```
4242+4343+You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
4444+4545+```yaml
4646+when:
4747+ - event: ["push"]
4848+ branch: ["main", "release-*"]
4949+ tag: ["v*", "stable"]
3250```
33513452## Engine
···44 sqlite-lib,
55 src,
66}: let
77- version = "1.9.1-alpha";
77+ version = "1.11.0-alpha";
88in
99 buildGoApplication {
1010 pname = "knot";
+21-8
nix/vm.nix
···1010 if var == ""
1111 then throw "\$${name} must be defined, see docs/hacking.md for more details"
1212 else var;
1313+ envVarOr = name: default: let
1414+ var = builtins.getEnv name;
1515+ in
1616+ if var != ""
1717+ then var
1818+ else default;
1919+2020+ plcUrl = envVarOr "TANGLED_VM_PLC_URL" "https://plc.directory";
2121+ jetstream = envVarOr "TANGLED_VM_JETSTREAM_ENDPOINT" "wss://jetstream1.us-west.bsky.network/subscribe";
1322in
1423 nixpkgs.lib.nixosSystem {
1524 inherit system;
···7382 time.timeZone = "Europe/London";
7483 services.getty.autologinUser = "root";
7584 environment.systemPackages = with pkgs; [curl vim git sqlite litecli];
7676- services.tangled-knot = {
8585+ services.tangled.knot = {
7786 enable = true;
7887 motd = "Welcome to the development knot!\n";
7988 server = {
8089 owner = envVar "TANGLED_VM_KNOT_OWNER";
8181- hostname = "localhost:6000";
9090+ hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6000";
9191+ plcUrl = plcUrl;
9292+ jetstreamEndpoint = jetstream;
8293 listenAddr = "0.0.0.0:6000";
8394 };
8495 };
8585- services.tangled-spindle = {
9696+ services.tangled.spindle = {
8697 enable = true;
8798 server = {
8899 owner = envVar "TANGLED_VM_SPINDLE_OWNER";
8989- hostname = "localhost:6555";
100100+ hostname = envVarOr "TANGLED_VM_SPINDLE_HOST" "localhost:6555";
101101+ plcUrl = plcUrl;
102102+ jetstreamEndpoint = jetstream;
90103 listenAddr = "0.0.0.0:6555";
91104 dev = true;
92105 queueSize = 100;
···99112 users = {
100113 # So we don't have to deal with permission clashing between
101114 # blank disk VMs and existing state
102102- users.${config.services.tangled-knot.gitUser}.uid = 666;
103103- groups.${config.services.tangled-knot.gitUser}.gid = 666;
115115+ users.${config.services.tangled.knot.gitUser}.uid = 666;
116116+ groups.${config.services.tangled.knot.gitUser}.gid = 666;
104117105118 # TODO: separate spindle user
106119 };
···120133 serviceConfig.PermissionsStartOnly = true;
121134 };
122135 in {
123123- knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled-knot.stateDir;
124124- spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled-spindle.server.dbPath);
136136+ knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled.knot.stateDir;
137137+ spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled.spindle.server.dbPath);
125138 };
126139 })
127140 ];
-26
scripts/appview.sh
···11-#!/bin/bash
22-33-# Variables
44-BINARY_NAME="appview"
55-BINARY_PATH=".bin/app"
66-SERVER="95.111.206.63"
77-USER="appview"
88-99-# SCP the binary to root's home directory
1010-scp "$BINARY_PATH" root@$SERVER:/root/"$BINARY_NAME"
1111-1212-# SSH into the server and perform the necessary operations
1313-ssh root@$SERVER <<EOF
1414- set -e # Exit on error
1515-1616- # Move binary to /usr/local/bin and set executable permissions
1717- mv /root/$BINARY_NAME /usr/local/bin/$BINARY_NAME
1818- chmod +x /usr/local/bin/$BINARY_NAME
1919-2020- su appview
2121- cd ~
2222- ./reset.sh
2323-EOF
2424-2525-echo "Deployment complete."
2626-
-5
scripts/generate-jwks.sh
···11-#! /usr/bin/env bash
22-33-set -e
44-55-go run ./cmd/genjwks/
···1313)
14141515type OpenBaoManager struct {
1616- client *vault.Client
1717- mountPath string
1818- logger *slog.Logger
1616+ client *vault.Client
1717+ mountPath string
1818+ logger *slog.Logger
1919+ connectionTimeout time.Duration
1920}
20212122type OpenBaoManagerOpt func(*OpenBaoManager)
···2627 }
2728}
28293030+func WithConnectionTimeout(timeout time.Duration) OpenBaoManagerOpt {
3131+ return func(v *OpenBaoManager) {
3232+ v.connectionTimeout = timeout
3333+ }
3434+}
3535+2936// NewOpenBaoManager creates a new OpenBao manager that connects to a Bao Proxy
3037// The proxyAddress should point to the local Bao Proxy (e.g., "http://127.0.0.1:8200")
3138// The proxy handles all authentication automatically via Auto-Auth
···4350 }
44514552 manager := &OpenBaoManager{
4646- client: client,
4747- mountPath: "spindle", // default KV v2 mount path
4848- logger: logger,
5353+ client: client,
5454+ mountPath: "spindle", // default KV v2 mount path
5555+ logger: logger,
5656+ connectionTimeout: 10 * time.Second, // default connection timeout
4957 }
50585159 for _, opt := range opts {
···62706371// testConnection verifies that we can connect to the proxy
6472func (v *OpenBaoManager) testConnection() error {
6565- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
7373+ ctx, cancel := context.WithTimeout(context.Background(), v.connectionTimeout)
6674 defer cancel()
67756876 // try token self-lookup as a quick way to verify proxy works
+5-2
spindle/secrets/openbao_test.go
···152152 for _, tt := range tests {
153153 t.Run(tt.name, func(t *testing.T) {
154154 logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
155155- manager, err := NewOpenBaoManager(tt.proxyAddr, logger, tt.opts...)
155155+ // Use shorter timeout for tests to avoid long waits
156156+ opts := append(tt.opts, WithConnectionTimeout(1*time.Second))
157157+ manager, err := NewOpenBaoManager(tt.proxyAddr, logger, opts...)
156158157159 if tt.expectError {
158160 assert.Error(t, err)
···596598597599 // All these will fail because no real proxy is running
598600 // but we can test that the configuration is properly accepted
599599- manager, err := NewOpenBaoManager(tt.proxyAddr, logger)
601601+ // Use shorter timeout for tests to avoid long waits
602602+ manager, err := NewOpenBaoManager(tt.proxyAddr, logger, WithConnectionTimeout(1*time.Second))
600603 assert.Error(t, err) // Expected because no real proxy
601604 assert.Nil(t, manager)
602605 assert.Contains(t, err.Error(), "failed to connect to bao proxy")
+86-41
spindle/server.go
···4949 vault secrets.Manager
5050}
51515252-func Run(ctx context.Context) error {
5252+// New creates a new Spindle server with the provided configuration and engines.
5353+func New(ctx context.Context, cfg *config.Config, engines map[string]models.Engine) (*Spindle, error) {
5354 logger := log.FromContext(ctx)
5454-5555- cfg, err := config.Load(ctx)
5656- if err != nil {
5757- return fmt.Errorf("failed to load config: %w", err)
5858- }
59556056 d, err := db.Make(cfg.Server.DBPath)
6157 if err != nil {
6262- return fmt.Errorf("failed to setup db: %w", err)
5858+ return nil, fmt.Errorf("failed to setup db: %w", err)
6359 }
64606561 e, err := rbac.NewEnforcer(cfg.Server.DBPath)
6662 if err != nil {
6767- return fmt.Errorf("failed to setup rbac enforcer: %w", err)
6363+ return nil, fmt.Errorf("failed to setup rbac enforcer: %w", err)
6864 }
6965 e.E.EnableAutoSave(true)
7066···7470 switch cfg.Server.Secrets.Provider {
7571 case "openbao":
7672 if cfg.Server.Secrets.OpenBao.ProxyAddr == "" {
7777- return fmt.Errorf("openbao proxy address is required when using openbao secrets provider")
7373+ return nil, fmt.Errorf("openbao proxy address is required when using openbao secrets provider")
7874 }
7975 vault, err = secrets.NewOpenBaoManager(
8076 cfg.Server.Secrets.OpenBao.ProxyAddr,
···8278 secrets.WithMountPath(cfg.Server.Secrets.OpenBao.Mount),
8379 )
8480 if err != nil {
8585- return fmt.Errorf("failed to setup openbao secrets provider: %w", err)
8181+ return nil, fmt.Errorf("failed to setup openbao secrets provider: %w", err)
8682 }
8783 logger.Info("using openbao secrets provider", "proxy_address", cfg.Server.Secrets.OpenBao.ProxyAddr, "mount", cfg.Server.Secrets.OpenBao.Mount)
8884 case "sqlite", "":
8985 vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath, secrets.WithTableName("secrets"))
9086 if err != nil {
9191- return fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
8787+ return nil, fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
9288 }
9389 logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath)
9490 default:
9595- return fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
9696- }
9797-9898- nixeryEng, err := nixery.New(ctx, cfg)
9999- if err != nil {
100100- return err
9191+ return nil, fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
10192 }
1029310394 jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
···110101 }
111102 jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true)
112103 if err != nil {
113113- return fmt.Errorf("failed to setup jetstream client: %w", err)
104104+ return nil, fmt.Errorf("failed to setup jetstream client: %w", err)
114105 }
115106 jc.AddDid(cfg.Server.Owner)
116107117108 // Check if the spindle knows about any Dids;
118109 dids, err := d.GetAllDids()
119110 if err != nil {
120120- return fmt.Errorf("failed to get all dids: %w", err)
111111+ return nil, fmt.Errorf("failed to get all dids: %w", err)
121112 }
122113 for _, d := range dids {
123114 jc.AddDid(d)
124115 }
125116126126- resolver := idresolver.DefaultResolver()
117117+ resolver := idresolver.DefaultResolver(cfg.Server.PlcUrl)
127118128128- spindle := Spindle{
119119+ spindle := &Spindle{
129120 jc: jc,
130121 e: e,
131122 db: d,
132123 l: logger,
133124 n: &n,
134134- engs: map[string]models.Engine{"nixery": nixeryEng},
125125+ engs: engines,
135126 jq: jq,
136127 cfg: cfg,
137128 res: resolver,
···140131141132 err = e.AddSpindle(rbacDomain)
142133 if err != nil {
143143- return fmt.Errorf("failed to set rbac domain: %w", err)
134134+ return nil, fmt.Errorf("failed to set rbac domain: %w", err)
144135 }
145136 err = spindle.configureOwner()
146137 if err != nil {
147147- return err
138138+ return nil, err
148139 }
149140 logger.Info("owner set", "did", cfg.Server.Owner)
150150-151151- // starts a job queue runner in the background
152152- jq.Start()
153153- defer jq.Stop()
154154-155155- // Stop vault token renewal if it implements Stopper
156156- if stopper, ok := vault.(secrets.Stopper); ok {
157157- defer stopper.Stop()
158158- }
159141160142 cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath)
161143 if err != nil {
162162- return fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
144144+ return nil, fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
163145 }
164146165147 err = jc.StartJetstream(ctx, spindle.ingest())
166148 if err != nil {
167167- return fmt.Errorf("failed to start jetstream consumer: %w", err)
149149+ return nil, fmt.Errorf("failed to start jetstream consumer: %w", err)
168150 }
169151170152 // for each incoming sh.tangled.pipeline, we execute
···177159 ccfg.CursorStore = cursorStore
178160 knownKnots, err := d.Knots()
179161 if err != nil {
180180- return err
162162+ return nil, err
181163 }
182164 for _, knot := range knownKnots {
183165 logger.Info("adding source start", "knot", knot)
···185167 }
186168 spindle.ks = eventconsumer.NewConsumer(*ccfg)
187169170170+ return spindle, nil
171171+}
172172+173173+// DB returns the database instance.
174174+func (s *Spindle) DB() *db.DB {
175175+ return s.db
176176+}
177177+178178+// Queue returns the job queue instance.
179179+func (s *Spindle) Queue() *queue.Queue {
180180+ return s.jq
181181+}
182182+183183+// Engines returns the map of available engines.
184184+func (s *Spindle) Engines() map[string]models.Engine {
185185+ return s.engs
186186+}
187187+188188+// Vault returns the secrets manager instance.
189189+func (s *Spindle) Vault() secrets.Manager {
190190+ return s.vault
191191+}
192192+193193+// Notifier returns the notifier instance.
194194+func (s *Spindle) Notifier() *notifier.Notifier {
195195+ return s.n
196196+}
197197+198198+// Enforcer returns the RBAC enforcer instance.
199199+func (s *Spindle) Enforcer() *rbac.Enforcer {
200200+ return s.e
201201+}
202202+203203+// Start starts the Spindle server (blocking).
204204+func (s *Spindle) Start(ctx context.Context) error {
205205+ // starts a job queue runner in the background
206206+ s.jq.Start()
207207+ defer s.jq.Stop()
208208+209209+ // Stop vault token renewal if it implements Stopper
210210+ if stopper, ok := s.vault.(secrets.Stopper); ok {
211211+ defer stopper.Stop()
212212+ }
213213+188214 go func() {
189189- logger.Info("starting knot event consumer")
190190- spindle.ks.Start(ctx)
215215+ s.l.Info("starting knot event consumer")
216216+ s.ks.Start(ctx)
191217 }()
192218193193- logger.Info("starting spindle server", "address", cfg.Server.ListenAddr)
194194- logger.Error("server error", "error", http.ListenAndServe(cfg.Server.ListenAddr, spindle.Router()))
219219+ s.l.Info("starting spindle server", "address", s.cfg.Server.ListenAddr)
220220+ return http.ListenAndServe(s.cfg.Server.ListenAddr, s.Router())
221221+}
222222+223223+func Run(ctx context.Context) error {
224224+ cfg, err := config.Load(ctx)
225225+ if err != nil {
226226+ return fmt.Errorf("failed to load config: %w", err)
227227+ }
228228+229229+ nixeryEng, err := nixery.New(ctx, cfg)
230230+ if err != nil {
231231+ return err
232232+ }
233233+234234+ s, err := New(ctx, cfg, map[string]models.Engine{
235235+ "nixery": nixeryEng,
236236+ })
237237+ if err != nil {
238238+ return err
239239+ }
195240196196- return nil
241241+ return s.Start(ctx)
197242}
198243199244func (s *Spindle) Router() http.Handler {
+5
spindle/stream.go
···213213 if err := conn.WriteMessage(websocket.TextMessage, []byte(line.Text)); err != nil {
214214 return fmt.Errorf("failed to write to websocket: %w", err)
215215 }
216216+ case <-time.After(30 * time.Second):
217217+ // send a keep-alive
218218+ if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
219219+ return fmt.Errorf("failed to write control: %w", err)
220220+ }
216221 }
217222 }
218223}