···15.env
16*.rdb
17.envrc
018# Created if following hacking.md
19genjwks.out
20/nix/vm-data
···15.env
16*.rdb
17.envrc
18+**/*.bleve
19# Created if following hacking.md
20genjwks.out
21/nix/vm-data
+3-1
api/tangled/actorprofile.go
···27 Location *string `json:"location,omitempty" cborgen:"location,omitempty"`
28 // pinnedRepositories: Any ATURI, it is up to appviews to validate these fields.
29 PinnedRepositories []string `json:"pinnedRepositories,omitempty" cborgen:"pinnedRepositories,omitempty"`
30- Stats []string `json:"stats,omitempty" cborgen:"stats,omitempty"`
0031}
···27 Location *string `json:"location,omitempty" cborgen:"location,omitempty"`
28 // pinnedRepositories: Any ATURI, it is up to appviews to validate these fields.
29 PinnedRepositories []string `json:"pinnedRepositories,omitempty" cborgen:"pinnedRepositories,omitempty"`
30+ // pronouns: Preferred gender pronouns.
31+ Pronouns *string `json:"pronouns,omitempty" cborgen:"pronouns,omitempty"`
32+ Stats []string `json:"stats,omitempty" cborgen:"stats,omitempty"`
33}
···26 }
2728 cw := cbg.NewCborWriter(w)
29+ fieldCount := 8
3031 if t.Description == nil {
32 fieldCount--
···41 }
4243 if t.PinnedRepositories == nil {
44+ fieldCount--
45+ }
46+47+ if t.Pronouns == nil {
48 fieldCount--
49 }
50···190 return err
191 }
192 if _, err := cw.WriteString(string(*t.Location)); err != nil {
193+ return err
194+ }
195+ }
196+ }
197+198+ // t.Pronouns (string) (string)
199+ if t.Pronouns != nil {
200+201+ if len("pronouns") > 1000000 {
202+ return xerrors.Errorf("Value in field \"pronouns\" was too long")
203+ }
204+205+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("pronouns"))); err != nil {
206+ return err
207+ }
208+ if _, err := cw.WriteString(string("pronouns")); err != nil {
209+ return err
210+ }
211+212+ if t.Pronouns == nil {
213+ if _, err := cw.Write(cbg.CborNull); err != nil {
214+ return err
215+ }
216+ } else {
217+ if len(*t.Pronouns) > 1000000 {
218+ return xerrors.Errorf("Value in field t.Pronouns was too long")
219+ }
220+221+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Pronouns))); err != nil {
222+ return err
223+ }
224+ if _, err := cw.WriteString(string(*t.Pronouns)); err != nil {
225 return err
226 }
227 }
···466 }
467468 t.Location = (*string)(&sval)
469+ }
470+ }
471+ // t.Pronouns (string) (string)
472+ case "pronouns":
473+474+ {
475+ b, err := cr.ReadByte()
476+ if err != nil {
477+ return err
478+ }
479+ if b != cbg.CborNull[0] {
480+ if err := cr.UnreadByte(); err != nil {
481+ return err
482+ }
483+484+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
485+ if err != nil {
486+ return err
487+ }
488+489+ t.Pronouns = (*string)(&sval)
490 }
491 }
492 // t.Description (string) (string)
···5863 }
58645865 cw := cbg.NewCborWriter(w)
5866+ fieldCount := 10
58675868 if t.Description == nil {
5869 fieldCount--
···5878 }
58795880 if t.Spindle == nil {
5881+ fieldCount--
5882+ }
5883+5884+ if t.Topics == nil {
5885+ fieldCount--
5886+ }
5887+5888+ if t.Website == nil {
5889 fieldCount--
5890 }
5891···6026 }
6027 }
60286029+ // t.Topics ([]string) (slice)
6030+ if t.Topics != nil {
6031+6032+ if len("topics") > 1000000 {
6033+ return xerrors.Errorf("Value in field \"topics\" was too long")
6034+ }
6035+6036+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("topics"))); err != nil {
6037+ return err
6038+ }
6039+ if _, err := cw.WriteString(string("topics")); err != nil {
6040+ return err
6041+ }
6042+6043+ if len(t.Topics) > 8192 {
6044+ return xerrors.Errorf("Slice value in field t.Topics was too long")
6045+ }
6046+6047+ if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Topics))); err != nil {
6048+ return err
6049+ }
6050+ for _, v := range t.Topics {
6051+ if len(v) > 1000000 {
6052+ return xerrors.Errorf("Value in field v was too long")
6053+ }
6054+6055+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
6056+ return err
6057+ }
6058+ if _, err := cw.WriteString(string(v)); err != nil {
6059+ return err
6060+ }
6061+6062+ }
6063+ }
6064+6065 // t.Spindle (string) (string)
6066 if t.Spindle != nil {
6067···6094 }
6095 }
60966097+ // t.Website (string) (string)
6098+ if t.Website != nil {
6099+6100+ if len("website") > 1000000 {
6101+ return xerrors.Errorf("Value in field \"website\" was too long")
6102+ }
6103+6104+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("website"))); err != nil {
6105+ return err
6106+ }
6107+ if _, err := cw.WriteString(string("website")); err != nil {
6108+ return err
6109+ }
6110+6111+ if t.Website == nil {
6112+ if _, err := cw.Write(cbg.CborNull); err != nil {
6113+ return err
6114+ }
6115+ } else {
6116+ if len(*t.Website) > 1000000 {
6117+ return xerrors.Errorf("Value in field t.Website was too long")
6118+ }
6119+6120+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Website))); err != nil {
6121+ return err
6122+ }
6123+ if _, err := cw.WriteString(string(*t.Website)); err != nil {
6124+ return err
6125+ }
6126+ }
6127+ }
6128+6129 // t.CreatedAt (string) (string)
6130 if len("createdAt") > 1000000 {
6131 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···6318 t.Source = (*string)(&sval)
6319 }
6320 }
6321+ // t.Topics ([]string) (slice)
6322+ case "topics":
6323+6324+ maj, extra, err = cr.ReadHeader()
6325+ if err != nil {
6326+ return err
6327+ }
6328+6329+ if extra > 8192 {
6330+ return fmt.Errorf("t.Topics: array too large (%d)", extra)
6331+ }
6332+6333+ if maj != cbg.MajArray {
6334+ return fmt.Errorf("expected cbor array")
6335+ }
6336+6337+ if extra > 0 {
6338+ t.Topics = make([]string, extra)
6339+ }
6340+6341+ for i := 0; i < int(extra); i++ {
6342+ {
6343+ var maj byte
6344+ var extra uint64
6345+ var err error
6346+ _ = maj
6347+ _ = extra
6348+ _ = err
6349+6350+ {
6351+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
6352+ if err != nil {
6353+ return err
6354+ }
6355+6356+ t.Topics[i] = string(sval)
6357+ }
6358+6359+ }
6360+ }
6361 // t.Spindle (string) (string)
6362 case "spindle":
6363···6377 }
63786379 t.Spindle = (*string)(&sval)
6380+ }
6381+ }
6382+ // t.Website (string) (string)
6383+ case "website":
6384+6385+ {
6386+ b, err := cr.ReadByte()
6387+ if err != nil {
6388+ return err
6389+ }
6390+ if b != cbg.CborNull[0] {
6391+ if err := cr.UnreadByte(); err != nil {
6392+ return err
6393+ }
6394+6395+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
6396+ if err != nil {
6397+ return err
6398+ }
6399+6400+ t.Website = (*string)(&sval)
6401 }
6402 }
6403 // t.CreatedAt (string) (string)
+13-1
api/tangled/repoblob.go
···30// RepoBlob_Output is the output of a sh.tangled.repo.blob call.
31type RepoBlob_Output struct {
32 // content: File content (base64 encoded for binary files)
33- Content string `json:"content" cborgen:"content"`
34 // encoding: Content encoding
35 Encoding *string `json:"encoding,omitempty" cborgen:"encoding,omitempty"`
36 // isBinary: Whether the file is binary
···44 Ref string `json:"ref" cborgen:"ref"`
45 // size: File size in bytes
46 Size *int64 `json:"size,omitempty" cborgen:"size,omitempty"`
0047}
4849// RepoBlob_Signature is a "signature" in the sh.tangled.repo.blob schema.
···54 Name string `json:"name" cborgen:"name"`
55 // when: Author timestamp
56 When string `json:"when" cborgen:"when"`
000000000057}
5859// RepoBlob calls the XRPC method "sh.tangled.repo.blob".
···30// RepoBlob_Output is the output of a sh.tangled.repo.blob call.
31type RepoBlob_Output struct {
32 // content: File content (base64 encoded for binary files)
33+ Content *string `json:"content,omitempty" cborgen:"content,omitempty"`
34 // encoding: Content encoding
35 Encoding *string `json:"encoding,omitempty" cborgen:"encoding,omitempty"`
36 // isBinary: Whether the file is binary
···44 Ref string `json:"ref" cborgen:"ref"`
45 // size: File size in bytes
46 Size *int64 `json:"size,omitempty" cborgen:"size,omitempty"`
47+ // submodule: Submodule information if path is a submodule
48+ Submodule *RepoBlob_Submodule `json:"submodule,omitempty" cborgen:"submodule,omitempty"`
49}
5051// RepoBlob_Signature is a "signature" in the sh.tangled.repo.blob schema.
···56 Name string `json:"name" cborgen:"name"`
57 // when: Author timestamp
58 When string `json:"when" cborgen:"when"`
59+}
60+61+// RepoBlob_Submodule is a "submodule" in the sh.tangled.repo.blob schema.
62+type RepoBlob_Submodule struct {
63+ // branch: Branch to track in the submodule
64+ Branch *string `json:"branch,omitempty" cborgen:"branch,omitempty"`
65+ // name: Submodule name
66+ Name string `json:"name" cborgen:"name"`
67+ // url: Submodule repository URL
68+ Url string `json:"url" cborgen:"url"`
69}
7071// RepoBlob calls the XRPC method "sh.tangled.repo.blob".
-4
api/tangled/repotree.go
···4748// RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
49type RepoTree_TreeEntry struct {
50- // is_file: Whether this entry is a file
51- Is_file bool `json:"is_file" cborgen:"is_file"`
52- // is_subtree: Whether this entry is a directory/subtree
53- Is_subtree bool `json:"is_subtree" cborgen:"is_subtree"`
54 Last_commit *RepoTree_LastCommit `json:"last_commit,omitempty" cborgen:"last_commit,omitempty"`
55 // mode: File mode
56 Mode string `json:"mode" cborgen:"mode"`
···4748// RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
49type RepoTree_TreeEntry struct {
000050 Last_commit *RepoTree_LastCommit `json:"last_commit,omitempty" cborgen:"last_commit,omitempty"`
51 // mode: File mode
52 Mode string `json:"mode" cborgen:"mode"`
+4
api/tangled/tangledrepo.go
···30 Source *string `json:"source,omitempty" cborgen:"source,omitempty"`
31 // spindle: CI runner to send jobs to and receive results from
32 Spindle *string `json:"spindle,omitempty" cborgen:"spindle,omitempty"`
000033}
···30 Source *string `json:"source,omitempty" cborgen:"source,omitempty"`
31 // spindle: CI runner to send jobs to and receive results from
32 Spindle *string `json:"spindle,omitempty" cborgen:"spindle,omitempty"`
33+ // topics: Topics related to the repo
34+ Topics []string `json:"topics,omitempty" cborgen:"topics,omitempty"`
35+ // website: Any URI related to the repo
36+ Website *string `json:"website,omitempty" cborgen:"website,omitempty"`
37}
···60 whereClause += " AND " + condition
61 }
62 }
000006364 query := fmt.Sprintf(`
65 select id, recipient_did, actor_did, type, entity_type, entity_id, read, created, repo_id, issue_id, pull_id
66 from notifications
67 %s
68 order by created desc
69- limit ? offset ?
70- `, whereClause)
71-72- args = append(args, page.Limit, page.Offset)
7374 rows, err := e.QueryContext(context.Background(), query, args...)
75 if err != nil {
···131 select
132 n.id, n.recipient_did, n.actor_did, n.type, n.entity_type, n.entity_id,
133 n.read, n.created, n.repo_id, n.issue_id, n.pull_id,
134- r.id as r_id, r.did as r_did, r.name as r_name, r.description as r_description,
135 i.id as i_id, i.did as i_did, i.issue_id as i_issue_id, i.title as i_title, i.open as i_open,
136 p.id as p_id, p.owner_did as p_owner_did, p.pull_id as p_pull_id, p.title as p_title, p.state as p_state
137 from notifications n
···160 var issue models.Issue
161 var pull models.Pull
162 var rId, iId, pId sql.NullInt64
163- var rDid, rName, rDescription sql.NullString
164 var iDid sql.NullString
165 var iIssueId sql.NullInt64
166 var iTitle sql.NullString
···173 err := rows.Scan(
174 &n.ID, &n.RecipientDid, &n.ActorDid, &typeStr, &n.EntityType, &n.EntityId,
175 &n.Read, &createdStr, &n.RepoId, &n.IssueId, &n.PullId,
176- &rId, &rDid, &rName, &rDescription,
177 &iId, &iDid, &iIssueId, &iTitle, &iOpen,
178 &pId, &pOwnerDid, &pPullId, &pTitle, &pState,
179 )
···200 }
201 if rDescription.Valid {
202 repo.Description = rDescription.String
000000203 }
204 nwe.Repo = &repo
205 }
···391 pull_created,
392 pull_commented,
393 followed,
0394 pull_merged,
395 issue_closed,
396 email_notifications
···416 &prefs.PullCreated,
417 &prefs.PullCommented,
418 &prefs.Followed,
0419 &prefs.PullMerged,
420 &prefs.IssueClosed,
421 &prefs.EmailNotifications,
···437 query := `
438 INSERT OR REPLACE INTO notification_preferences
439 (user_did, repo_starred, issue_created, issue_commented, pull_created,
440- pull_commented, followed, pull_merged, issue_closed, email_notifications)
441- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
0442 `
443444 result, err := d.DB.ExecContext(ctx, query,
···449 prefs.PullCreated,
450 prefs.PullCommented,
451 prefs.Followed,
0452 prefs.PullMerged,
453 prefs.IssueClosed,
454 prefs.EmailNotifications,
···60 whereClause += " AND " + condition
61 }
62 }
63+ pageClause := ""
64+ if page.Limit > 0 {
65+ pageClause = " limit ? offset ? "
66+ args = append(args, page.Limit, page.Offset)
67+ }
6869 query := fmt.Sprintf(`
70 select id, recipient_did, actor_did, type, entity_type, entity_id, read, created, repo_id, issue_id, pull_id
71 from notifications
72 %s
73 order by created desc
74+ %s
75+ `, whereClause, pageClause)
007677 rows, err := e.QueryContext(context.Background(), query, args...)
78 if err != nil {
···134 select
135 n.id, n.recipient_did, n.actor_did, n.type, n.entity_type, n.entity_id,
136 n.read, n.created, n.repo_id, n.issue_id, n.pull_id,
137+ r.id as r_id, r.did as r_did, r.name as r_name, r.description as r_description, r.website as r_website, r.topics as r_topics,
138 i.id as i_id, i.did as i_did, i.issue_id as i_issue_id, i.title as i_title, i.open as i_open,
139 p.id as p_id, p.owner_did as p_owner_did, p.pull_id as p_pull_id, p.title as p_title, p.state as p_state
140 from notifications n
···163 var issue models.Issue
164 var pull models.Pull
165 var rId, iId, pId sql.NullInt64
166+ var rDid, rName, rDescription, rWebsite, rTopicStr sql.NullString
167 var iDid sql.NullString
168 var iIssueId sql.NullInt64
169 var iTitle sql.NullString
···176 err := rows.Scan(
177 &n.ID, &n.RecipientDid, &n.ActorDid, &typeStr, &n.EntityType, &n.EntityId,
178 &n.Read, &createdStr, &n.RepoId, &n.IssueId, &n.PullId,
179+ &rId, &rDid, &rName, &rDescription, &rWebsite, &rTopicStr,
180 &iId, &iDid, &iIssueId, &iTitle, &iOpen,
181 &pId, &pOwnerDid, &pPullId, &pTitle, &pState,
182 )
···203 }
204 if rDescription.Valid {
205 repo.Description = rDescription.String
206+ }
207+ if rWebsite.Valid {
208+ repo.Website = rWebsite.String
209+ }
210+ if rTopicStr.Valid {
211+ repo.Topics = strings.Fields(rTopicStr.String)
212 }
213 nwe.Repo = &repo
214 }
···400 pull_created,
401 pull_commented,
402 followed,
403+ user_mentioned,
404 pull_merged,
405 issue_closed,
406 email_notifications
···426 &prefs.PullCreated,
427 &prefs.PullCommented,
428 &prefs.Followed,
429+ &prefs.UserMentioned,
430 &prefs.PullMerged,
431 &prefs.IssueClosed,
432 &prefs.EmailNotifications,
···448 query := `
449 INSERT OR REPLACE INTO notification_preferences
450 (user_did, repo_starred, issue_created, issue_commented, pull_created,
451+ pull_commented, followed, user_mentioned, pull_merged, issue_closed,
452+ email_notifications)
453+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
454 `
455456 result, err := d.DB.ExecContext(ctx, query,
···461 prefs.PullCreated,
462 prefs.PullCommented,
463 prefs.Followed,
464+ prefs.UserMentioned,
465 prefs.PullMerged,
466 prefs.IssueClosed,
467 prefs.EmailNotifications,
+4-2
appview/db/pipeline.go
···168169// this is a mega query, but the most useful one:
170// get N pipelines, for each one get the latest status of its N workflows
171-func GetPipelineStatuses(e Execer, filters ...filter) ([]models.Pipeline, error) {
172 var conditions []string
173 var args []any
174 for _, filter := range filters {
···205 join
206 triggers t ON p.trigger_id = t.id
207 %s
208- `, whereClause)
00209210 rows, err := e.Query(query, args...)
211 if err != nil {
···168169// this is a mega query, but the most useful one:
170// get N pipelines, for each one get the latest status of its N workflows
171+func GetPipelineStatuses(e Execer, limit int, filters ...filter) ([]models.Pipeline, error) {
172 var conditions []string
173 var args []any
174 for _, filter := range filters {
···205 join
206 triggers t ON p.trigger_id = t.id
207 %s
208+ order by p.created desc
209+ limit %d
210+ `, whereClause, limit)
211212 rows, err := e.Query(query, args...)
213 if err != nil {
+26-6
appview/db/profile.go
···129 did,
130 description,
131 include_bluesky,
132- location
0133 )
134- values (?, ?, ?, ?)`,
135 profile.Did,
136 profile.Description,
137 includeBskyValue,
138 profile.Location,
0139 )
140141 if err != nil {
···216 did,
217 description,
218 include_bluesky,
219- location
0220 from
221 profile
222 %s`,
···231 for rows.Next() {
232 var profile models.Profile
233 var includeBluesky int
0234235- err = rows.Scan(&profile.ID, &profile.Did, &profile.Description, &includeBluesky, &profile.Location)
236 if err != nil {
237 return nil, err
238 }
239240 if includeBluesky != 0 {
241 profile.IncludeBluesky = true
0000242 }
243244 profileMap[profile.Did] = &profile
···302303func GetProfile(e Execer, did string) (*models.Profile, error) {
304 var profile models.Profile
00305 profile.Did = did
306307 includeBluesky := 0
0308 err := e.QueryRow(
309- `select description, include_bluesky, location from profile where did = ?`,
310 did,
311- ).Scan(&profile.Description, &includeBluesky, &profile.Location)
312 if err == sql.ErrNoRows {
313 profile := models.Profile{}
314 profile.Did = did
···321322 if includeBluesky != 0 {
323 profile.IncludeBluesky = true
0000324 }
325326 rows, err := e.Query(`select link from profile_links where did = ?`, did)
···412 // ensure description is not too long
413 if len(profile.Location) > 40 {
414 return fmt.Errorf("Entered location is too long.")
00000415 }
416417 // ensure links are in order
···129 did,
130 description,
131 include_bluesky,
132+ location,
133+ pronouns
134 )
135+ values (?, ?, ?, ?, ?)`,
136 profile.Did,
137 profile.Description,
138 includeBskyValue,
139 profile.Location,
140+ profile.Pronouns,
141 )
142143 if err != nil {
···218 did,
219 description,
220 include_bluesky,
221+ location,
222+ pronouns
223 from
224 profile
225 %s`,
···234 for rows.Next() {
235 var profile models.Profile
236 var includeBluesky int
237+ var pronouns sql.Null[string]
238239+ err = rows.Scan(&profile.ID, &profile.Did, &profile.Description, &includeBluesky, &profile.Location, &pronouns)
240 if err != nil {
241 return nil, err
242 }
243244 if includeBluesky != 0 {
245 profile.IncludeBluesky = true
246+ }
247+248+ if pronouns.Valid {
249+ profile.Pronouns = pronouns.V
250 }
251252 profileMap[profile.Did] = &profile
···310311func GetProfile(e Execer, did string) (*models.Profile, error) {
312 var profile models.Profile
313+ var pronouns sql.Null[string]
314+315 profile.Did = did
316317 includeBluesky := 0
318+319 err := e.QueryRow(
320+ `select description, include_bluesky, location, pronouns from profile where did = ?`,
321 did,
322+ ).Scan(&profile.Description, &includeBluesky, &profile.Location, &pronouns)
323 if err == sql.ErrNoRows {
324 profile := models.Profile{}
325 profile.Did = did
···332333 if includeBluesky != 0 {
334 profile.IncludeBluesky = true
335+ }
336+337+ if pronouns.Valid {
338+ profile.Pronouns = pronouns.V
339 }
340341 rows, err := e.Query(`select link from profile_links where did = ?`, did)
···427 // ensure description is not too long
428 if len(profile.Location) > 40 {
429 return fmt.Errorf("Entered location is too long.")
430+ }
431+432+ // ensure pronouns are not too long
433+ if len(profile.Pronouns) > 40 {
434+ return fmt.Errorf("Entered pronouns are too long.")
435 }
436437 // ensure links are in order
···1+// Copyright 2021 The Gitea Authors. All rights reserved.
2+// SPDX-License-Identifier: MIT
3+4+package bleveutil
5+6+import (
7+ "github.com/blevesearch/bleve/v2"
8+)
9+10+// FlushingBatch is a batch of operations that automatically flushes to the
11+// underlying index once it reaches a certain size.
12+type FlushingBatch struct {
13+ maxBatchSize int
14+ batch *bleve.Batch
15+ index bleve.Index
16+}
17+18+// NewFlushingBatch creates a new flushing batch for the specified index. Once
19+// the number of operations in the batch reaches the specified limit, the batch
20+// automatically flushes its operations to the index.
21+func NewFlushingBatch(index bleve.Index, maxBatchSize int) *FlushingBatch {
22+ return &FlushingBatch{
23+ maxBatchSize: maxBatchSize,
24+ batch: index.NewBatch(),
25+ index: index,
26+ }
27+}
28+29+// Index add a new index to batch
30+func (b *FlushingBatch) Index(id string, data any) error {
31+ if err := b.batch.Index(id, data); err != nil {
32+ return err
33+ }
34+ return b.flushIfFull()
35+}
36+37+// Delete add a delete index to batch
38+func (b *FlushingBatch) Delete(id string) error {
39+ b.batch.Delete(id)
40+ return b.flushIfFull()
41+}
42+43+func (b *FlushingBatch) flushIfFull() error {
44+ if b.batch.Size() < b.maxBatchSize {
45+ return nil
46+ }
47+ return b.Flush()
48+}
49+50+// Flush submit the batch and create a new one
51+func (b *FlushingBatch) Flush() error {
52+ err := b.index.Batch(b.batch)
53+ if err != nil {
54+ return err
55+ }
56+ b.batch = b.index.NewBatch()
57+ return nil
58+}
···40 commented on an issue
41 {{ else if eq .Type "issue_closed" }}
42 closed an issue
0043 {{ else if eq .Type "pull_created" }}
44 created a pull request
45 {{ else if eq .Type "pull_commented" }}
···48 merged a pull request
49 {{ else if eq .Type "pull_closed" }}
50 closed a pull request
0051 {{ else if eq .Type "followed" }}
52 followed you
0053 {{ else }}
54 {{ end }}
55{{ end }}
···40 commented on an issue
41 {{ else if eq .Type "issue_closed" }}
42 closed an issue
43+ {{ else if eq .Type "issue_reopen" }}
44+ reopened an issue
45 {{ else if eq .Type "pull_created" }}
46 created a pull request
47 {{ else if eq .Type "pull_commented" }}
···50 merged a pull request
51 {{ else if eq .Type "pull_closed" }}
52 closed a pull request
53+ {{ else if eq .Type "pull_reopen" }}
54+ reopened a pull request
55 {{ else if eq .Type "followed" }}
56 followed you
57+ {{ else if eq .Type "user_mentioned" }}
58+ mentioned you
59 {{ else }}
60 {{ end }}
61{{ end }}
···3536 <p><span class="{{$bullet}}">1</span>First, generate a new <a href="https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key" class="underline">SSH key pair</a>.</p>
37 <p><span class="{{$bullet}}">2</span>Then add the public key to your account from the <a href="/settings" class="underline">settings</a> page.</p>
38- <p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code></p>
39 <p><span class="{{$bullet}}">4</span>Push!</p>
40 </div>
41 </div>
···3536 <p><span class="{{$bullet}}">1</span>First, generate a new <a href="https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key" class="underline">SSH key pair</a>.</p>
37 <p><span class="{{$bullet}}">2</span>Then add the public key to your account from the <a href="/settings" class="underline">settings</a> page.</p>
38+ <p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code></p>
39 <p><span class="{{$bullet}}">4</span>Push!</p>
40 </div>
41 </div>
···4 <h1 class="font-bold text-4xl">tightly-knit<br>social coding.</h1>
56 <p class="text-lg">
7- tangled is new social-enabled git collaboration platform built on <a class="underline" href="https://atproto.com/">atproto</a>.
8 </p>
9 <p class="text-lg">
10- we envision a place where developers have complete ownership of their
11 code, open source communities can freely self-govern and most
12 importantly, coding can be social and fun again.
13 </p>
···4 <h1 class="font-bold text-4xl">tightly-knit<br>social coding.</h1>
56 <p class="text-lg">
7+ Tangled is a decentralized Git hosting and collaboration platform.
8 </p>
9 <p class="text-lg">
10+ We envision a place where developers have complete ownership of their
11 code, open source communities can freely self-govern and most
12 importantly, coding can be social and fun again.
13 </p>
···3738```
39# oauth jwks should already be setup by the nix devshell:
40-echo $TANGLED_OAUTH_JWKS
41-{"crv":"P-256","d":"tELKHYH-Dko6qo4ozYcVPE1ah6LvXHFV2wpcWpi8ab4","kid":"1753352226","kty":"EC","x":"mRzYpLzAGq74kJez9UbgGfV040DxgsXpMbaVsdy8RZs","y":"azqqXzUYywMlLb2Uc5AVG18nuLXyPnXr4kI4T39eeIc"}
0004243# if not, you can set it up yourself:
44-go build -o genjwks.out ./cmd/genjwks
45-export TANGLED_OAUTH_JWKS="$(./genjwks.out)"
00000004647# run redis in at a new shell to store oauth sessions
48redis-server
···158159If for any reason you wish to disable either one of the
160services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
161-`services.tangled-spindle.enable` (or
162-`services.tangled-knot.enable`) to `false`.
···3738```
39# oauth jwks should already be setup by the nix devshell:
40+echo $TANGLED_OAUTH_CLIENT_SECRET
41+z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
42+43+echo $TANGLED_OAUTH_CLIENT_KID
44+1761667908
4546# if not, you can set it up yourself:
47+goat key generate -t P-256
48+Key Type: P-256 / secp256r1 / ES256 private key
49+Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
50+ z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
51+Public Key (DID Key Syntax): share or publish this (eg, in DID document)
52+ did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
53+54+# the secret key from above
55+export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
5657# run redis in at a new shell to store oauth sessions
58redis-server
···168169If for any reason you wish to disable either one of the
170services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171+`services.tangled.spindle.enable` (or
172+`services.tangled.knot.enable`) to `false`.
+1-1
docs/migrations.md
···49latest revision, and change your config block like so:
5051```diff
52- services.tangled-knot = {
53 enable = true;
54 server = {
55- secretFile = /path/to/secret;
···49latest revision, and change your config block like so:
5051```diff
52+ services.tangled.knot = {
53 enable = true;
54 server = {
55- secretFile = /path/to/secret;
+19-1
docs/spindle/pipeline.md
···19 - `push`: The workflow should run every time a commit is pushed to the repository.
20 - `pull_request`: The workflow should run every time a pull request is made or updated.
21 - `manual`: The workflow can be triggered manually.
22-- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
02324For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
25···29 branch: ["main", "develop"]
30 - event: ["pull_request"]
31 branch: ["main"]
0000000000000000032```
3334## Engine
···19 - `push`: The workflow should run every time a commit is pushed to the repository.
20 - `pull_request`: The workflow should run every time a pull request is made or updated.
21 - `manual`: The workflow can be triggered manually.
22+- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
23+- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
2425For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
26···30 branch: ["main", "develop"]
31 - event: ["pull_request"]
32 branch: ["main"]
33+```
34+35+You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
36+37+```yaml
38+when:
39+ - event: ["push"]
40+ tag: ["v*"]
41+```
42+43+You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
44+45+```yaml
46+when:
47+ - event: ["push"]
48+ branch: ["main", "release-*"]
49+ tag: ["v*", "stable"]
50```
5152## Engine
···4 sqlite-lib,
5 src,
6}: let
7- version = "1.9.1-alpha";
8in
9 buildGoApplication {
10 pname = "knot";
···4 sqlite-lib,
5 src,
6}: let
7+ version = "1.11.0-alpha";
8in
9 buildGoApplication {
10 pname = "knot";
+21-8
nix/vm.nix
···10 if var == ""
11 then throw "\$${name} must be defined, see docs/hacking.md for more details"
12 else var;
00000000013in
14 nixpkgs.lib.nixosSystem {
15 inherit system;
···73 time.timeZone = "Europe/London";
74 services.getty.autologinUser = "root";
75 environment.systemPackages = with pkgs; [curl vim git sqlite litecli];
76- services.tangled-knot = {
77 enable = true;
78 motd = "Welcome to the development knot!\n";
79 server = {
80 owner = envVar "TANGLED_VM_KNOT_OWNER";
81- hostname = "localhost:6000";
0082 listenAddr = "0.0.0.0:6000";
83 };
84 };
85- services.tangled-spindle = {
86 enable = true;
87 server = {
88 owner = envVar "TANGLED_VM_SPINDLE_OWNER";
89- hostname = "localhost:6555";
0090 listenAddr = "0.0.0.0:6555";
91 dev = true;
92 queueSize = 100;
···99 users = {
100 # So we don't have to deal with permission clashing between
101 # blank disk VMs and existing state
102- users.${config.services.tangled-knot.gitUser}.uid = 666;
103- groups.${config.services.tangled-knot.gitUser}.gid = 666;
104105 # TODO: separate spindle user
106 };
···120 serviceConfig.PermissionsStartOnly = true;
121 };
122 in {
123- knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled-knot.stateDir;
124- spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled-spindle.server.dbPath);
125 };
126 })
127 ];
···10 if var == ""
11 then throw "\$${name} must be defined, see docs/hacking.md for more details"
12 else var;
13+ envVarOr = name: default: let
14+ var = builtins.getEnv name;
15+ in
16+ if var != ""
17+ then var
18+ else default;
19+20+ plcUrl = envVarOr "TANGLED_VM_PLC_URL" "https://plc.directory";
21+ jetstream = envVarOr "TANGLED_VM_JETSTREAM_ENDPOINT" "wss://jetstream1.us-west.bsky.network/subscribe";
22in
23 nixpkgs.lib.nixosSystem {
24 inherit system;
···82 time.timeZone = "Europe/London";
83 services.getty.autologinUser = "root";
84 environment.systemPackages = with pkgs; [curl vim git sqlite litecli];
85+ services.tangled.knot = {
86 enable = true;
87 motd = "Welcome to the development knot!\n";
88 server = {
89 owner = envVar "TANGLED_VM_KNOT_OWNER";
90+ hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6000";
91+ plcUrl = plcUrl;
92+ jetstreamEndpoint = jetstream;
93 listenAddr = "0.0.0.0:6000";
94 };
95 };
96+ services.tangled.spindle = {
97 enable = true;
98 server = {
99 owner = envVar "TANGLED_VM_SPINDLE_OWNER";
100+ hostname = envVarOr "TANGLED_VM_SPINDLE_HOST" "localhost:6555";
101+ plcUrl = plcUrl;
102+ jetstreamEndpoint = jetstream;
103 listenAddr = "0.0.0.0:6555";
104 dev = true;
105 queueSize = 100;
···112 users = {
113 # So we don't have to deal with permission clashing between
114 # blank disk VMs and existing state
115+ users.${config.services.tangled.knot.gitUser}.uid = 666;
116+ groups.${config.services.tangled.knot.gitUser}.gid = 666;
117118 # TODO: separate spindle user
119 };
···133 serviceConfig.PermissionsStartOnly = true;
134 };
135 in {
136+ knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled.knot.stateDir;
137+ spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled.spindle.server.dbPath);
138 };
139 })
140 ];
-26
scripts/appview.sh
···1-#!/bin/bash
2-3-# Variables
4-BINARY_NAME="appview"
5-BINARY_PATH=".bin/app"
6-SERVER="95.111.206.63"
7-USER="appview"
8-9-# SCP the binary to root's home directory
10-scp "$BINARY_PATH" root@$SERVER:/root/"$BINARY_NAME"
11-12-# SSH into the server and perform the necessary operations
13-ssh root@$SERVER <<EOF
14- set -e # Exit on error
15-16- # Move binary to /usr/local/bin and set executable permissions
17- mv /root/$BINARY_NAME /usr/local/bin/$BINARY_NAME
18- chmod +x /usr/local/bin/$BINARY_NAME
19-20- su appview
21- cd ~
22- ./reset.sh
23-EOF
24-25-echo "Deployment complete."
26-
···00000000000000000000000000
-5
scripts/generate-jwks.sh
···1-#! /usr/bin/env bash
2-3-set -e
4-5-go run ./cmd/genjwks/
···13)
1415type OpenBaoManager struct {
16- client *vault.Client
17- mountPath string
18- logger *slog.Logger
019}
2021type OpenBaoManagerOpt func(*OpenBaoManager)
···26 }
27}
2800000029// NewOpenBaoManager creates a new OpenBao manager that connects to a Bao Proxy
30// The proxyAddress should point to the local Bao Proxy (e.g., "http://127.0.0.1:8200")
31// The proxy handles all authentication automatically via Auto-Auth
···43 }
4445 manager := &OpenBaoManager{
46- client: client,
47- mountPath: "spindle", // default KV v2 mount path
48- logger: logger,
049 }
5051 for _, opt := range opts {
···6263// testConnection verifies that we can connect to the proxy
64func (v *OpenBaoManager) testConnection() error {
65- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
66 defer cancel()
6768 // try token self-lookup as a quick way to verify proxy works
···13)
1415type OpenBaoManager struct {
16+ client *vault.Client
17+ mountPath string
18+ logger *slog.Logger
19+ connectionTimeout time.Duration
20}
2122type OpenBaoManagerOpt func(*OpenBaoManager)
···27 }
28}
2930+func WithConnectionTimeout(timeout time.Duration) OpenBaoManagerOpt {
31+ return func(v *OpenBaoManager) {
32+ v.connectionTimeout = timeout
33+ }
34+}
35+36// NewOpenBaoManager creates a new OpenBao manager that connects to a Bao Proxy
37// The proxyAddress should point to the local Bao Proxy (e.g., "http://127.0.0.1:8200")
38// The proxy handles all authentication automatically via Auto-Auth
···50 }
5152 manager := &OpenBaoManager{
53+ client: client,
54+ mountPath: "spindle", // default KV v2 mount path
55+ logger: logger,
56+ connectionTimeout: 10 * time.Second, // default connection timeout
57 }
5859 for _, opt := range opts {
···7071// testConnection verifies that we can connect to the proxy
72func (v *OpenBaoManager) testConnection() error {
73+ ctx, cancel := context.WithTimeout(context.Background(), v.connectionTimeout)
74 defer cancel()
7576 // try token self-lookup as a quick way to verify proxy works
+5-2
spindle/secrets/openbao_test.go
···152 for _, tt := range tests {
153 t.Run(tt.name, func(t *testing.T) {
154 logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
155- manager, err := NewOpenBaoManager(tt.proxyAddr, logger, tt.opts...)
00156157 if tt.expectError {
158 assert.Error(t, err)
···596597 // All these will fail because no real proxy is running
598 // but we can test that the configuration is properly accepted
599- manager, err := NewOpenBaoManager(tt.proxyAddr, logger)
0600 assert.Error(t, err) // Expected because no real proxy
601 assert.Nil(t, manager)
602 assert.Contains(t, err.Error(), "failed to connect to bao proxy")
···152 for _, tt := range tests {
153 t.Run(tt.name, func(t *testing.T) {
154 logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
155+ // Use shorter timeout for tests to avoid long waits
156+ opts := append(tt.opts, WithConnectionTimeout(1*time.Second))
157+ manager, err := NewOpenBaoManager(tt.proxyAddr, logger, opts...)
158159 if tt.expectError {
160 assert.Error(t, err)
···598599 // All these will fail because no real proxy is running
600 // but we can test that the configuration is properly accepted
601+ // Use shorter timeout for tests to avoid long waits
602+ manager, err := NewOpenBaoManager(tt.proxyAddr, logger, WithConnectionTimeout(1*time.Second))
603 assert.Error(t, err) // Expected because no real proxy
604 assert.Nil(t, manager)
605 assert.Contains(t, err.Error(), "failed to connect to bao proxy")
+86-41
spindle/server.go
···49 vault secrets.Manager
50}
5152-func Run(ctx context.Context) error {
053 logger := log.FromContext(ctx)
54-55- cfg, err := config.Load(ctx)
56- if err != nil {
57- return fmt.Errorf("failed to load config: %w", err)
58- }
5960 d, err := db.Make(cfg.Server.DBPath)
61 if err != nil {
62- return fmt.Errorf("failed to setup db: %w", err)
63 }
6465 e, err := rbac.NewEnforcer(cfg.Server.DBPath)
66 if err != nil {
67- return fmt.Errorf("failed to setup rbac enforcer: %w", err)
68 }
69 e.E.EnableAutoSave(true)
70···74 switch cfg.Server.Secrets.Provider {
75 case "openbao":
76 if cfg.Server.Secrets.OpenBao.ProxyAddr == "" {
77- return fmt.Errorf("openbao proxy address is required when using openbao secrets provider")
78 }
79 vault, err = secrets.NewOpenBaoManager(
80 cfg.Server.Secrets.OpenBao.ProxyAddr,
···82 secrets.WithMountPath(cfg.Server.Secrets.OpenBao.Mount),
83 )
84 if err != nil {
85- return fmt.Errorf("failed to setup openbao secrets provider: %w", err)
86 }
87 logger.Info("using openbao secrets provider", "proxy_address", cfg.Server.Secrets.OpenBao.ProxyAddr, "mount", cfg.Server.Secrets.OpenBao.Mount)
88 case "sqlite", "":
89 vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath, secrets.WithTableName("secrets"))
90 if err != nil {
91- return fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
92 }
93 logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath)
94 default:
95- return fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
96- }
97-98- nixeryEng, err := nixery.New(ctx, cfg)
99- if err != nil {
100- return err
101 }
102103 jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
···110 }
111 jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true)
112 if err != nil {
113- return fmt.Errorf("failed to setup jetstream client: %w", err)
114 }
115 jc.AddDid(cfg.Server.Owner)
116117 // Check if the spindle knows about any Dids;
118 dids, err := d.GetAllDids()
119 if err != nil {
120- return fmt.Errorf("failed to get all dids: %w", err)
121 }
122 for _, d := range dids {
123 jc.AddDid(d)
124 }
125126- resolver := idresolver.DefaultResolver()
127128- spindle := Spindle{
129 jc: jc,
130 e: e,
131 db: d,
132 l: logger,
133 n: &n,
134- engs: map[string]models.Engine{"nixery": nixeryEng},
135 jq: jq,
136 cfg: cfg,
137 res: resolver,
···140141 err = e.AddSpindle(rbacDomain)
142 if err != nil {
143- return fmt.Errorf("failed to set rbac domain: %w", err)
144 }
145 err = spindle.configureOwner()
146 if err != nil {
147- return err
148 }
149 logger.Info("owner set", "did", cfg.Server.Owner)
150-151- // starts a job queue runner in the background
152- jq.Start()
153- defer jq.Stop()
154-155- // Stop vault token renewal if it implements Stopper
156- if stopper, ok := vault.(secrets.Stopper); ok {
157- defer stopper.Stop()
158- }
159160 cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath)
161 if err != nil {
162- return fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
163 }
164165 err = jc.StartJetstream(ctx, spindle.ingest())
166 if err != nil {
167- return fmt.Errorf("failed to start jetstream consumer: %w", err)
168 }
169170 // for each incoming sh.tangled.pipeline, we execute
···177 ccfg.CursorStore = cursorStore
178 knownKnots, err := d.Knots()
179 if err != nil {
180- return err
181 }
182 for _, knot := range knownKnots {
183 logger.Info("adding source start", "knot", knot)
···185 }
186 spindle.ks = eventconsumer.NewConsumer(*ccfg)
18700000000000000000000000000000000000000000000188 go func() {
189- logger.Info("starting knot event consumer")
190- spindle.ks.Start(ctx)
191 }()
192193- logger.Info("starting spindle server", "address", cfg.Server.ListenAddr)
194- logger.Error("server error", "error", http.ListenAndServe(cfg.Server.ListenAddr, spindle.Router()))
0000000000000000000195196- return nil
197}
198199func (s *Spindle) Router() http.Handler {
···49 vault secrets.Manager
50}
5152+// New creates a new Spindle server with the provided configuration and engines.
53+func New(ctx context.Context, cfg *config.Config, engines map[string]models.Engine) (*Spindle, error) {
54 logger := log.FromContext(ctx)
000005556 d, err := db.Make(cfg.Server.DBPath)
57 if err != nil {
58+ return nil, fmt.Errorf("failed to setup db: %w", err)
59 }
6061 e, err := rbac.NewEnforcer(cfg.Server.DBPath)
62 if err != nil {
63+ return nil, fmt.Errorf("failed to setup rbac enforcer: %w", err)
64 }
65 e.E.EnableAutoSave(true)
66···70 switch cfg.Server.Secrets.Provider {
71 case "openbao":
72 if cfg.Server.Secrets.OpenBao.ProxyAddr == "" {
73+ return nil, fmt.Errorf("openbao proxy address is required when using openbao secrets provider")
74 }
75 vault, err = secrets.NewOpenBaoManager(
76 cfg.Server.Secrets.OpenBao.ProxyAddr,
···78 secrets.WithMountPath(cfg.Server.Secrets.OpenBao.Mount),
79 )
80 if err != nil {
81+ return nil, fmt.Errorf("failed to setup openbao secrets provider: %w", err)
82 }
83 logger.Info("using openbao secrets provider", "proxy_address", cfg.Server.Secrets.OpenBao.ProxyAddr, "mount", cfg.Server.Secrets.OpenBao.Mount)
84 case "sqlite", "":
85 vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath, secrets.WithTableName("secrets"))
86 if err != nil {
87+ return nil, fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
88 }
89 logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath)
90 default:
91+ return nil, fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
0000092 }
9394 jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
···101 }
102 jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true)
103 if err != nil {
104+ return nil, fmt.Errorf("failed to setup jetstream client: %w", err)
105 }
106 jc.AddDid(cfg.Server.Owner)
107108 // Check if the spindle knows about any Dids;
109 dids, err := d.GetAllDids()
110 if err != nil {
111+ return nil, fmt.Errorf("failed to get all dids: %w", err)
112 }
113 for _, d := range dids {
114 jc.AddDid(d)
115 }
116117+ resolver := idresolver.DefaultResolver(cfg.Server.PlcUrl)
118119+ spindle := &Spindle{
120 jc: jc,
121 e: e,
122 db: d,
123 l: logger,
124 n: &n,
125+ engs: engines,
126 jq: jq,
127 cfg: cfg,
128 res: resolver,
···131132 err = e.AddSpindle(rbacDomain)
133 if err != nil {
134+ return nil, fmt.Errorf("failed to set rbac domain: %w", err)
135 }
136 err = spindle.configureOwner()
137 if err != nil {
138+ return nil, err
139 }
140 logger.Info("owner set", "did", cfg.Server.Owner)
000000000141142 cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath)
143 if err != nil {
144+ return nil, fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
145 }
146147 err = jc.StartJetstream(ctx, spindle.ingest())
148 if err != nil {
149+ return nil, fmt.Errorf("failed to start jetstream consumer: %w", err)
150 }
151152 // for each incoming sh.tangled.pipeline, we execute
···159 ccfg.CursorStore = cursorStore
160 knownKnots, err := d.Knots()
161 if err != nil {
162+ return nil, err
163 }
164 for _, knot := range knownKnots {
165 logger.Info("adding source start", "knot", knot)
···167 }
168 spindle.ks = eventconsumer.NewConsumer(*ccfg)
169170+ return spindle, nil
171+}
172+173+// DB returns the database instance.
174+func (s *Spindle) DB() *db.DB {
175+ return s.db
176+}
177+178+// Queue returns the job queue instance.
179+func (s *Spindle) Queue() *queue.Queue {
180+ return s.jq
181+}
182+183+// Engines returns the map of available engines.
184+func (s *Spindle) Engines() map[string]models.Engine {
185+ return s.engs
186+}
187+188+// Vault returns the secrets manager instance.
189+func (s *Spindle) Vault() secrets.Manager {
190+ return s.vault
191+}
192+193+// Notifier returns the notifier instance.
194+func (s *Spindle) Notifier() *notifier.Notifier {
195+ return s.n
196+}
197+198+// Enforcer returns the RBAC enforcer instance.
199+func (s *Spindle) Enforcer() *rbac.Enforcer {
200+ return s.e
201+}
202+203+// Start starts the Spindle server (blocking).
204+func (s *Spindle) Start(ctx context.Context) error {
205+ // starts a job queue runner in the background
206+ s.jq.Start()
207+ defer s.jq.Stop()
208+209+ // Stop vault token renewal if it implements Stopper
210+ if stopper, ok := s.vault.(secrets.Stopper); ok {
211+ defer stopper.Stop()
212+ }
213+214 go func() {
215+ s.l.Info("starting knot event consumer")
216+ s.ks.Start(ctx)
217 }()
218219+ s.l.Info("starting spindle server", "address", s.cfg.Server.ListenAddr)
220+ return http.ListenAndServe(s.cfg.Server.ListenAddr, s.Router())
221+}
222+223+func Run(ctx context.Context) error {
224+ cfg, err := config.Load(ctx)
225+ if err != nil {
226+ return fmt.Errorf("failed to load config: %w", err)
227+ }
228+229+ nixeryEng, err := nixery.New(ctx, cfg)
230+ if err != nil {
231+ return err
232+ }
233+234+ s, err := New(ctx, cfg, map[string]models.Engine{
235+ "nixery": nixeryEng,
236+ })
237+ if err != nil {
238+ return err
239+ }
240241+ return s.Start(ctx)
242}
243244func (s *Spindle) Router() http.Handler {
+5
spindle/stream.go
···213 if err := conn.WriteMessage(websocket.TextMessage, []byte(line.Text)); err != nil {
214 return fmt.Errorf("failed to write to websocket: %w", err)
215 }
00000216 }
217 }
218}
···213 if err := conn.WriteMessage(websocket.TextMessage, []byte(line.Text)); err != nil {
214 return fmt.Errorf("failed to write to websocket: %w", err)
215 }
216+ case <-time.After(30 * time.Second):
217+ // send a keep-alive
218+ if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
219+ return fmt.Errorf("failed to write control: %w", err)
220+ }
221 }
222 }
223}