···27 Location *string `json:"location,omitempty" cborgen:"location,omitempty"`
28 // pinnedRepositories: Any ATURI, it is up to appviews to validate these fields.
29 PinnedRepositories []string `json:"pinnedRepositories,omitempty" cborgen:"pinnedRepositories,omitempty"`
30- Stats []string `json:"stats,omitempty" cborgen:"stats,omitempty"`
0031}
···27 Location *string `json:"location,omitempty" cborgen:"location,omitempty"`
28 // pinnedRepositories: Any ATURI, it is up to appviews to validate these fields.
29 PinnedRepositories []string `json:"pinnedRepositories,omitempty" cborgen:"pinnedRepositories,omitempty"`
30+ // pronouns: Preferred gender pronouns.
31+ Pronouns *string `json:"pronouns,omitempty" cborgen:"pronouns,omitempty"`
32+ Stats []string `json:"stats,omitempty" cborgen:"stats,omitempty"`
33}
···26 }
2728 cw := cbg.NewCborWriter(w)
29+ fieldCount := 8
3031 if t.Description == nil {
32 fieldCount--
···41 }
4243 if t.PinnedRepositories == nil {
44+ fieldCount--
45+ }
46+47+ if t.Pronouns == nil {
48 fieldCount--
49 }
50···190 return err
191 }
192 if _, err := cw.WriteString(string(*t.Location)); err != nil {
193+ return err
194+ }
195+ }
196+ }
197+198+ // t.Pronouns (string) (string)
199+ if t.Pronouns != nil {
200+201+ if len("pronouns") > 1000000 {
202+ return xerrors.Errorf("Value in field \"pronouns\" was too long")
203+ }
204+205+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("pronouns"))); err != nil {
206+ return err
207+ }
208+ if _, err := cw.WriteString(string("pronouns")); err != nil {
209+ return err
210+ }
211+212+ if t.Pronouns == nil {
213+ if _, err := cw.Write(cbg.CborNull); err != nil {
214+ return err
215+ }
216+ } else {
217+ if len(*t.Pronouns) > 1000000 {
218+ return xerrors.Errorf("Value in field t.Pronouns was too long")
219+ }
220+221+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Pronouns))); err != nil {
222+ return err
223+ }
224+ if _, err := cw.WriteString(string(*t.Pronouns)); err != nil {
225 return err
226 }
227 }
···466 }
467468 t.Location = (*string)(&sval)
469+ }
470+ }
471+ // t.Pronouns (string) (string)
472+ case "pronouns":
473+474+ {
475+ b, err := cr.ReadByte()
476+ if err != nil {
477+ return err
478+ }
479+ if b != cbg.CborNull[0] {
480+ if err := cr.UnreadByte(); err != nil {
481+ return err
482+ }
483+484+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
485+ if err != nil {
486+ return err
487+ }
488+489+ t.Pronouns = (*string)(&sval)
490 }
491 }
492 // t.Description (string) (string)
···5863 }
58645865 cw := cbg.NewCborWriter(w)
5866+ fieldCount := 10
58675868 if t.Description == nil {
5869 fieldCount--
···5878 }
58795880 if t.Spindle == nil {
5881+ fieldCount--
5882+ }
5883+5884+ if t.Topics == nil {
5885+ fieldCount--
5886+ }
5887+5888+ if t.Website == nil {
5889 fieldCount--
5890 }
5891···6026 }
6027 }
60286029+ // t.Topics ([]string) (slice)
6030+ if t.Topics != nil {
6031+6032+ if len("topics") > 1000000 {
6033+ return xerrors.Errorf("Value in field \"topics\" was too long")
6034+ }
6035+6036+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("topics"))); err != nil {
6037+ return err
6038+ }
6039+ if _, err := cw.WriteString(string("topics")); err != nil {
6040+ return err
6041+ }
6042+6043+ if len(t.Topics) > 8192 {
6044+ return xerrors.Errorf("Slice value in field t.Topics was too long")
6045+ }
6046+6047+ if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Topics))); err != nil {
6048+ return err
6049+ }
6050+ for _, v := range t.Topics {
6051+ if len(v) > 1000000 {
6052+ return xerrors.Errorf("Value in field v was too long")
6053+ }
6054+6055+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
6056+ return err
6057+ }
6058+ if _, err := cw.WriteString(string(v)); err != nil {
6059+ return err
6060+ }
6061+6062+ }
6063+ }
6064+6065 // t.Spindle (string) (string)
6066 if t.Spindle != nil {
6067···6094 }
6095 }
60966097+ // t.Website (string) (string)
6098+ if t.Website != nil {
6099+6100+ if len("website") > 1000000 {
6101+ return xerrors.Errorf("Value in field \"website\" was too long")
6102+ }
6103+6104+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("website"))); err != nil {
6105+ return err
6106+ }
6107+ if _, err := cw.WriteString(string("website")); err != nil {
6108+ return err
6109+ }
6110+6111+ if t.Website == nil {
6112+ if _, err := cw.Write(cbg.CborNull); err != nil {
6113+ return err
6114+ }
6115+ } else {
6116+ if len(*t.Website) > 1000000 {
6117+ return xerrors.Errorf("Value in field t.Website was too long")
6118+ }
6119+6120+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Website))); err != nil {
6121+ return err
6122+ }
6123+ if _, err := cw.WriteString(string(*t.Website)); err != nil {
6124+ return err
6125+ }
6126+ }
6127+ }
6128+6129 // t.CreatedAt (string) (string)
6130 if len("createdAt") > 1000000 {
6131 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···6318 t.Source = (*string)(&sval)
6319 }
6320 }
6321+ // t.Topics ([]string) (slice)
6322+ case "topics":
6323+6324+ maj, extra, err = cr.ReadHeader()
6325+ if err != nil {
6326+ return err
6327+ }
6328+6329+ if extra > 8192 {
6330+ return fmt.Errorf("t.Topics: array too large (%d)", extra)
6331+ }
6332+6333+ if maj != cbg.MajArray {
6334+ return fmt.Errorf("expected cbor array")
6335+ }
6336+6337+ if extra > 0 {
6338+ t.Topics = make([]string, extra)
6339+ }
6340+6341+ for i := 0; i < int(extra); i++ {
6342+ {
6343+ var maj byte
6344+ var extra uint64
6345+ var err error
6346+ _ = maj
6347+ _ = extra
6348+ _ = err
6349+6350+ {
6351+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
6352+ if err != nil {
6353+ return err
6354+ }
6355+6356+ t.Topics[i] = string(sval)
6357+ }
6358+6359+ }
6360+ }
6361 // t.Spindle (string) (string)
6362 case "spindle":
6363···6377 }
63786379 t.Spindle = (*string)(&sval)
6380+ }
6381+ }
6382+ // t.Website (string) (string)
6383+ case "website":
6384+6385+ {
6386+ b, err := cr.ReadByte()
6387+ if err != nil {
6388+ return err
6389+ }
6390+ if b != cbg.CborNull[0] {
6391+ if err := cr.UnreadByte(); err != nil {
6392+ return err
6393+ }
6394+6395+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
6396+ if err != nil {
6397+ return err
6398+ }
6399+6400+ t.Website = (*string)(&sval)
6401 }
6402 }
6403 // t.CreatedAt (string) (string)
+13-1
api/tangled/repoblob.go
···30// RepoBlob_Output is the output of a sh.tangled.repo.blob call.
31type RepoBlob_Output struct {
32 // content: File content (base64 encoded for binary files)
33- Content string `json:"content" cborgen:"content"`
34 // encoding: Content encoding
35 Encoding *string `json:"encoding,omitempty" cborgen:"encoding,omitempty"`
36 // isBinary: Whether the file is binary
···44 Ref string `json:"ref" cborgen:"ref"`
45 // size: File size in bytes
46 Size *int64 `json:"size,omitempty" cborgen:"size,omitempty"`
0047}
4849// RepoBlob_Signature is a "signature" in the sh.tangled.repo.blob schema.
···54 Name string `json:"name" cborgen:"name"`
55 // when: Author timestamp
56 When string `json:"when" cborgen:"when"`
000000000057}
5859// RepoBlob calls the XRPC method "sh.tangled.repo.blob".
···30// RepoBlob_Output is the output of a sh.tangled.repo.blob call.
31type RepoBlob_Output struct {
32 // content: File content (base64 encoded for binary files)
33+ Content *string `json:"content,omitempty" cborgen:"content,omitempty"`
34 // encoding: Content encoding
35 Encoding *string `json:"encoding,omitempty" cborgen:"encoding,omitempty"`
36 // isBinary: Whether the file is binary
···44 Ref string `json:"ref" cborgen:"ref"`
45 // size: File size in bytes
46 Size *int64 `json:"size,omitempty" cborgen:"size,omitempty"`
47+ // submodule: Submodule information if path is a submodule
48+ Submodule *RepoBlob_Submodule `json:"submodule,omitempty" cborgen:"submodule,omitempty"`
49}
5051// RepoBlob_Signature is a "signature" in the sh.tangled.repo.blob schema.
···56 Name string `json:"name" cborgen:"name"`
57 // when: Author timestamp
58 When string `json:"when" cborgen:"when"`
59+}
60+61+// RepoBlob_Submodule is a "submodule" in the sh.tangled.repo.blob schema.
62+type RepoBlob_Submodule struct {
63+ // branch: Branch to track in the submodule
64+ Branch *string `json:"branch,omitempty" cborgen:"branch,omitempty"`
65+ // name: Submodule name
66+ Name string `json:"name" cborgen:"name"`
67+ // url: Submodule repository URL
68+ Url string `json:"url" cborgen:"url"`
69}
7071// RepoBlob calls the XRPC method "sh.tangled.repo.blob".
-4
api/tangled/repotree.go
···4748// RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
49type RepoTree_TreeEntry struct {
50- // is_file: Whether this entry is a file
51- Is_file bool `json:"is_file" cborgen:"is_file"`
52- // is_subtree: Whether this entry is a directory/subtree
53- Is_subtree bool `json:"is_subtree" cborgen:"is_subtree"`
54 Last_commit *RepoTree_LastCommit `json:"last_commit,omitempty" cborgen:"last_commit,omitempty"`
55 // mode: File mode
56 Mode string `json:"mode" cborgen:"mode"`
···4748// RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
49type RepoTree_TreeEntry struct {
000050 Last_commit *RepoTree_LastCommit `json:"last_commit,omitempty" cborgen:"last_commit,omitempty"`
51 // mode: File mode
52 Mode string `json:"mode" cborgen:"mode"`
+4
api/tangled/tangledrepo.go
···30 Source *string `json:"source,omitempty" cborgen:"source,omitempty"`
31 // spindle: CI runner to send jobs to and receive results from
32 Spindle *string `json:"spindle,omitempty" cborgen:"spindle,omitempty"`
000033}
···30 Source *string `json:"source,omitempty" cborgen:"source,omitempty"`
31 // spindle: CI runner to send jobs to and receive results from
32 Spindle *string `json:"spindle,omitempty" cborgen:"spindle,omitempty"`
33+ // topics: Topics related to the repo
34+ Topics []string `json:"topics,omitempty" cborgen:"topics,omitempty"`
35+ // website: Any URI related to the repo
36+ Website *string `json:"website,omitempty" cborgen:"website,omitempty"`
37}
···569 -- indexes for better performance
570 create index if not exists idx_notifications_recipient_created on notifications(recipient_did, created desc);
571 create index if not exists idx_notifications_recipient_read on notifications(recipient_did, read);
572- create index if not exists idx_stars_created on stars(created);
573- create index if not exists idx_stars_repo_at_created on stars(repo_at, created);
574 `)
575 if err != nil {
576 return nil, err
···1102 runMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error {
1103 _, err := tx.Exec(`
1104 alter table pull_submissions add column combined text;
00000000000000000000000000000000000000000000000000000000000001105 `)
1106 return err
1107 })
···569 -- indexes for better performance
570 create index if not exists idx_notifications_recipient_created on notifications(recipient_did, created desc);
571 create index if not exists idx_notifications_recipient_read on notifications(recipient_did, read);
00572 `)
573 if err != nil {
574 return nil, err
···1100 runMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error {
1101 _, err := tx.Exec(`
1102 alter table pull_submissions add column combined text;
1103+ `)
1104+ return err
1105+ })
1106+1107+ runMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error {
1108+ _, err := tx.Exec(`
1109+ alter table profile add column pronouns text;
1110+ `)
1111+ return err
1112+ })
1113+1114+ runMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error {
1115+ _, err := tx.Exec(`
1116+ alter table repos add column website text;
1117+ alter table repos add column topics text;
1118+ `)
1119+ return err
1120+ })
1121+1122+ runMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error {
1123+ _, err := tx.Exec(`
1124+ alter table notification_preferences add column user_mentioned integer not null default 1;
1125+ `)
1126+ return err
1127+ })
1128+1129+ // remove the foreign key constraints from stars.
1130+ runMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error {
1131+ _, err := tx.Exec(`
1132+ create table stars_new (
1133+ id integer primary key autoincrement,
1134+ did text not null,
1135+ rkey text not null,
1136+1137+ subject_at text not null,
1138+1139+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
1140+ unique(did, rkey),
1141+ unique(did, subject_at)
1142+ );
1143+1144+ insert into stars_new (
1145+ id,
1146+ did,
1147+ rkey,
1148+ subject_at,
1149+ created
1150+ )
1151+ select
1152+ id,
1153+ starred_by_did,
1154+ rkey,
1155+ repo_at,
1156+ created
1157+ from stars;
1158+1159+ drop table stars;
1160+ alter table stars_new rename to stars;
1161+1162+ create index if not exists idx_stars_created on stars(created);
1163+ create index if not exists idx_stars_subject_at_created on stars(subject_at, created);
1164 `)
1165 return err
1166 })
+15-5
appview/db/notifications.go
···134 select
135 n.id, n.recipient_did, n.actor_did, n.type, n.entity_type, n.entity_id,
136 n.read, n.created, n.repo_id, n.issue_id, n.pull_id,
137- r.id as r_id, r.did as r_did, r.name as r_name, r.description as r_description,
138 i.id as i_id, i.did as i_did, i.issue_id as i_issue_id, i.title as i_title, i.open as i_open,
139 p.id as p_id, p.owner_did as p_owner_did, p.pull_id as p_pull_id, p.title as p_title, p.state as p_state
140 from notifications n
···163 var issue models.Issue
164 var pull models.Pull
165 var rId, iId, pId sql.NullInt64
166- var rDid, rName, rDescription sql.NullString
167 var iDid sql.NullString
168 var iIssueId sql.NullInt64
169 var iTitle sql.NullString
···176 err := rows.Scan(
177 &n.ID, &n.RecipientDid, &n.ActorDid, &typeStr, &n.EntityType, &n.EntityId,
178 &n.Read, &createdStr, &n.RepoId, &n.IssueId, &n.PullId,
179- &rId, &rDid, &rName, &rDescription,
180 &iId, &iDid, &iIssueId, &iTitle, &iOpen,
181 &pId, &pOwnerDid, &pPullId, &pTitle, &pState,
182 )
···203 }
204 if rDescription.Valid {
205 repo.Description = rDescription.String
000000206 }
207 nwe.Repo = &repo
208 }
···394 pull_created,
395 pull_commented,
396 followed,
0397 pull_merged,
398 issue_closed,
399 email_notifications
···419 &prefs.PullCreated,
420 &prefs.PullCommented,
421 &prefs.Followed,
0422 &prefs.PullMerged,
423 &prefs.IssueClosed,
424 &prefs.EmailNotifications,
···440 query := `
441 INSERT OR REPLACE INTO notification_preferences
442 (user_did, repo_starred, issue_created, issue_commented, pull_created,
443- pull_commented, followed, pull_merged, issue_closed, email_notifications)
444- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
0445 `
446447 result, err := d.DB.ExecContext(ctx, query,
···452 prefs.PullCreated,
453 prefs.PullCommented,
454 prefs.Followed,
0455 prefs.PullMerged,
456 prefs.IssueClosed,
457 prefs.EmailNotifications,
···134 select
135 n.id, n.recipient_did, n.actor_did, n.type, n.entity_type, n.entity_id,
136 n.read, n.created, n.repo_id, n.issue_id, n.pull_id,
137+ r.id as r_id, r.did as r_did, r.name as r_name, r.description as r_description, r.website as r_website, r.topics as r_topics,
138 i.id as i_id, i.did as i_did, i.issue_id as i_issue_id, i.title as i_title, i.open as i_open,
139 p.id as p_id, p.owner_did as p_owner_did, p.pull_id as p_pull_id, p.title as p_title, p.state as p_state
140 from notifications n
···163 var issue models.Issue
164 var pull models.Pull
165 var rId, iId, pId sql.NullInt64
166+ var rDid, rName, rDescription, rWebsite, rTopicStr sql.NullString
167 var iDid sql.NullString
168 var iIssueId sql.NullInt64
169 var iTitle sql.NullString
···176 err := rows.Scan(
177 &n.ID, &n.RecipientDid, &n.ActorDid, &typeStr, &n.EntityType, &n.EntityId,
178 &n.Read, &createdStr, &n.RepoId, &n.IssueId, &n.PullId,
179+ &rId, &rDid, &rName, &rDescription, &rWebsite, &rTopicStr,
180 &iId, &iDid, &iIssueId, &iTitle, &iOpen,
181 &pId, &pOwnerDid, &pPullId, &pTitle, &pState,
182 )
···203 }
204 if rDescription.Valid {
205 repo.Description = rDescription.String
206+ }
207+ if rWebsite.Valid {
208+ repo.Website = rWebsite.String
209+ }
210+ if rTopicStr.Valid {
211+ repo.Topics = strings.Fields(rTopicStr.String)
212 }
213 nwe.Repo = &repo
214 }
···400 pull_created,
401 pull_commented,
402 followed,
403+ user_mentioned,
404 pull_merged,
405 issue_closed,
406 email_notifications
···426 &prefs.PullCreated,
427 &prefs.PullCommented,
428 &prefs.Followed,
429+ &prefs.UserMentioned,
430 &prefs.PullMerged,
431 &prefs.IssueClosed,
432 &prefs.EmailNotifications,
···448 query := `
449 INSERT OR REPLACE INTO notification_preferences
450 (user_did, repo_starred, issue_created, issue_commented, pull_created,
451+ pull_commented, followed, user_mentioned, pull_merged, issue_closed,
452+ email_notifications)
453+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
454 `
455456 result, err := d.DB.ExecContext(ctx, query,
···461 prefs.PullCreated,
462 prefs.PullCommented,
463 prefs.Followed,
464+ prefs.UserMentioned,
465 prefs.PullMerged,
466 prefs.IssueClosed,
467 prefs.EmailNotifications,
+4-2
appview/db/pipeline.go
···168169// this is a mega query, but the most useful one:
170// get N pipelines, for each one get the latest status of its N workflows
171-func GetPipelineStatuses(e Execer, filters ...filter) ([]models.Pipeline, error) {
172 var conditions []string
173 var args []any
174 for _, filter := range filters {
···205 join
206 triggers t ON p.trigger_id = t.id
207 %s
208- `, whereClause)
00209210 rows, err := e.Query(query, args...)
211 if err != nil {
···168169// this is a mega query, but the most useful one:
170// get N pipelines, for each one get the latest status of its N workflows
171+func GetPipelineStatuses(e Execer, limit int, filters ...filter) ([]models.Pipeline, error) {
172 var conditions []string
173 var args []any
174 for _, filter := range filters {
···205 join
206 triggers t ON p.trigger_id = t.id
207 %s
208+ order by p.created desc
209+ limit %d
210+ `, whereClause, limit)
211212 rows, err := e.Query(query, args...)
213 if err != nil {
+26-6
appview/db/profile.go
···129 did,
130 description,
131 include_bluesky,
132- location
0133 )
134- values (?, ?, ?, ?)`,
135 profile.Did,
136 profile.Description,
137 includeBskyValue,
138 profile.Location,
0139 )
140141 if err != nil {
···216 did,
217 description,
218 include_bluesky,
219- location
0220 from
221 profile
222 %s`,
···231 for rows.Next() {
232 var profile models.Profile
233 var includeBluesky int
0234235- err = rows.Scan(&profile.ID, &profile.Did, &profile.Description, &includeBluesky, &profile.Location)
236 if err != nil {
237 return nil, err
238 }
239240 if includeBluesky != 0 {
241 profile.IncludeBluesky = true
0000242 }
243244 profileMap[profile.Did] = &profile
···302303func GetProfile(e Execer, did string) (*models.Profile, error) {
304 var profile models.Profile
00305 profile.Did = did
306307 includeBluesky := 0
0308 err := e.QueryRow(
309- `select description, include_bluesky, location from profile where did = ?`,
310 did,
311- ).Scan(&profile.Description, &includeBluesky, &profile.Location)
312 if err == sql.ErrNoRows {
313 profile := models.Profile{}
314 profile.Did = did
···321322 if includeBluesky != 0 {
323 profile.IncludeBluesky = true
0000324 }
325326 rows, err := e.Query(`select link from profile_links where did = ?`, did)
···412 // ensure description is not too long
413 if len(profile.Location) > 40 {
414 return fmt.Errorf("Entered location is too long.")
00000415 }
416417 // ensure links are in order
···129 did,
130 description,
131 include_bluesky,
132+ location,
133+ pronouns
134 )
135+ values (?, ?, ?, ?, ?)`,
136 profile.Did,
137 profile.Description,
138 includeBskyValue,
139 profile.Location,
140+ profile.Pronouns,
141 )
142143 if err != nil {
···218 did,
219 description,
220 include_bluesky,
221+ location,
222+ pronouns
223 from
224 profile
225 %s`,
···234 for rows.Next() {
235 var profile models.Profile
236 var includeBluesky int
237+ var pronouns sql.Null[string]
238239+ err = rows.Scan(&profile.ID, &profile.Did, &profile.Description, &includeBluesky, &profile.Location, &pronouns)
240 if err != nil {
241 return nil, err
242 }
243244 if includeBluesky != 0 {
245 profile.IncludeBluesky = true
246+ }
247+248+ if pronouns.Valid {
249+ profile.Pronouns = pronouns.V
250 }
251252 profileMap[profile.Did] = &profile
···310311func GetProfile(e Execer, did string) (*models.Profile, error) {
312 var profile models.Profile
313+ var pronouns sql.Null[string]
314+315 profile.Did = did
316317 includeBluesky := 0
318+319 err := e.QueryRow(
320+ `select description, include_bluesky, location, pronouns from profile where did = ?`,
321 did,
322+ ).Scan(&profile.Description, &includeBluesky, &profile.Location, &pronouns)
323 if err == sql.ErrNoRows {
324 profile := models.Profile{}
325 profile.Did = did
···332333 if includeBluesky != 0 {
334 profile.IncludeBluesky = true
335+ }
336+337+ if pronouns.Valid {
338+ profile.Pronouns = pronouns.V
339 }
340341 rows, err := e.Query(`select link from profile_links where did = ?`, did)
···427 // ensure description is not too long
428 if len(profile.Location) > 40 {
429 return fmt.Errorf("Entered location is too long.")
430+ }
431+432+ // ensure pronouns are not too long
433+ if len(profile.Pronouns) > 40 {
434+ return fmt.Errorf("Entered pronouns are too long.")
435 }
436437 // ensure links are in order
···23import (
4 "fmt"
5+ "strings"
6 "time"
78 "github.com/bluesky-social/indigo/atproto/syntax"
···18 Rkey string
19 Created time.Time
20 Description string
21+ Website string
22+ Topics []string
23 Spindle string
24 Labels []string
25···31}
3233func (r *Repo) AsRecord() tangled.Repo {
34+ var source, spindle, description, website *string
3536 if r.Source != "" {
37 source = &r.Source
···45 description = &r.Description
46 }
4748+ if r.Website != "" {
49+ website = &r.Website
50+ }
51+52 return tangled.Repo{
53 Knot: r.Knot,
54 Name: r.Name,
55 Description: description,
56+ Website: website,
57+ Topics: r.Topics,
58 CreatedAt: r.Created.Format(time.RFC3339),
59 Source: source,
60 Spindle: spindle,
···69func (r Repo) DidSlashRepo() string {
70 p, _ := securejoin.SecureJoin(r.Did, r.Name)
71 return p
72+}
73+74+func (r Repo) TopicStr() string {
75+ return strings.Join(r.Topics, " ")
76}
7778type RepoStats struct {
···104 Repo *Repo
105 Issues []Issue
106}
107+108+type BlobContentType int
109+110+const (
111+ BlobContentTypeCode BlobContentType = iota
112+ BlobContentTypeMarkup
113+ BlobContentTypeImage
114+ BlobContentTypeSvg
115+ BlobContentTypeVideo
116+ BlobContentTypeSubmodule
117+)
118+119+func (ty BlobContentType) IsCode() bool { return ty == BlobContentTypeCode }
120+func (ty BlobContentType) IsMarkup() bool { return ty == BlobContentTypeMarkup }
121+func (ty BlobContentType) IsImage() bool { return ty == BlobContentTypeImage }
122+func (ty BlobContentType) IsSvg() bool { return ty == BlobContentTypeSvg }
123+func (ty BlobContentType) IsVideo() bool { return ty == BlobContentTypeVideo }
124+func (ty BlobContentType) IsSubmodule() bool { return ty == BlobContentTypeSubmodule }
125+126+type BlobView struct {
127+ HasTextView bool // can show as code/text
128+ HasRenderedView bool // can show rendered (markup/image/video/submodule)
129+ HasRawView bool // can download raw (everything except submodule)
130+131+ // current display mode
132+ ShowingRendered bool // currently in rendered mode
133+ ShowingText bool // currently in text/code mode
134+135+ // content type flags
136+ ContentType BlobContentType
137+138+ // Content data
139+ Contents string
140+ ContentSrc string // URL for media files
141+ Lines int
142+ SizeHint uint64
143+}
144+145+// if both views are available, then show a toggle between them
146+func (b BlobView) ShowToggle() bool {
147+ return b.HasTextView && b.HasRenderedView
148+}
149+150+func (b BlobView) IsUnsupported() bool {
151+ // no view available, only raw
152+ return !(b.HasRenderedView || b.HasTextView)
153+}
+14-5
appview/models/star.go
···7)
89type Star struct {
10- StarredByDid string
11- RepoAt syntax.ATURI
12- Created time.Time
13- Rkey string
01415- // optionally, populate this when querying for reverse mappings
0016 Repo *Repo
17}
000000
···7)
89type Star struct {
10+ Did string
11+ RepoAt syntax.ATURI
12+ Created time.Time
13+ Rkey string
14+}
1516+// RepoStar is used for reverse mapping to repos
17+type RepoStar struct {
18+ Star
19 Repo *Repo
20}
21+22+// StringStar is used for reverse mapping to strings
23+type StringStar struct {
24+ Star
25+ String *String
26+}
···54 reopened a pull request
55 {{ else if eq .Type "followed" }}
56 followed you
0057 {{ else }}
58 {{ end }}
59{{ end }}
···54 reopened a pull request
55 {{ else if eq .Type "followed" }}
56 followed you
57+ {{ else if eq .Type "user_mentioned" }}
58+ mentioned you
59 {{ else }}
60 {{ end }}
61{{ end }}
···3536 <p><span class="{{$bullet}}">1</span>First, generate a new <a href="https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key" class="underline">SSH key pair</a>.</p>
37 <p><span class="{{$bullet}}">2</span>Then add the public key to your account from the <a href="/settings" class="underline">settings</a> page.</p>
38- <p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code></p>
39 <p><span class="{{$bullet}}">4</span>Push!</p>
40 </div>
41 </div>
···3536 <p><span class="{{$bullet}}">1</span>First, generate a new <a href="https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key" class="underline">SSH key pair</a>.</p>
37 <p><span class="{{$bullet}}">2</span>Then add the public key to your account from the <a href="/settings" class="underline">settings</a> page.</p>
38+ <p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code></p>
39 <p><span class="{{$bullet}}">4</span>Push!</p>
40 </div>
41 </div>
···4 <h1 class="font-bold text-4xl">tightly-knit<br>social coding.</h1>
56 <p class="text-lg">
7- tangled is new social-enabled git collaboration platform built on <a class="underline" href="https://atproto.com/">atproto</a>.
8 </p>
9 <p class="text-lg">
10- we envision a place where developers have complete ownership of their
11 code, open source communities can freely self-govern and most
12 importantly, coding can be social and fun again.
13 </p>
···4 <h1 class="font-bold text-4xl">tightly-knit<br>social coding.</h1>
56 <p class="text-lg">
7+ Tangled is a decentralized Git hosting and collaboration platform.
8 </p>
9 <p class="text-lg">
10+ We envision a place where developers have complete ownership of their
11 code, open source communities can freely self-govern and most
12 importantly, coding can be social and fun again.
13 </p>
···52 did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
5354# the secret key from above
55-export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
5657# run redis in at a new shell to store oauth sessions
58redis-server
···168169If for any reason you wish to disable either one of the
170services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171-`services.tangled-spindle.enable` (or
172-`services.tangled-knot.enable`) to `false`.
···52 did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
5354# the secret key from above
55+export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
5657# run redis in at a new shell to store oauth sessions
58redis-server
···168169If for any reason you wish to disable either one of the
170services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171+`services.tangled.spindle.enable` (or
172+`services.tangled.knot.enable`) to `false`.
+1-1
docs/migrations.md
···49latest revision, and change your config block like so:
5051```diff
52- services.tangled-knot = {
53 enable = true;
54 server = {
55- secretFile = /path/to/secret;
···49latest revision, and change your config block like so:
5051```diff
52+ services.tangled.knot = {
53 enable = true;
54 server = {
55- secretFile = /path/to/secret;
+19-1
docs/spindle/pipeline.md
···19 - `push`: The workflow should run every time a commit is pushed to the repository.
20 - `pull_request`: The workflow should run every time a pull request is made or updated.
21 - `manual`: The workflow can be triggered manually.
22-- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
02324For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
25···29 branch: ["main", "develop"]
30 - event: ["pull_request"]
31 branch: ["main"]
0000000000000000032```
3334## Engine
···19 - `push`: The workflow should run every time a commit is pushed to the repository.
20 - `pull_request`: The workflow should run every time a pull request is made or updated.
21 - `manual`: The workflow can be triggered manually.
22+- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
23+- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
2425For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
26···30 branch: ["main", "develop"]
31 - event: ["pull_request"]
32 branch: ["main"]
33+```
34+35+You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
36+37+```yaml
38+when:
39+ - event: ["push"]
40+ tag: ["v*"]
41+```
42+43+You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
44+45+```yaml
46+when:
47+ - event: ["push"]
48+ branch: ["main", "release-*"]
49+ tag: ["v*", "stable"]
50```
5152## Engine
···109 setup := &setupSteps{}
110111 setup.addStep(nixConfStep())
112- setup.addStep(cloneStep(twf, *tpl.TriggerMetadata, e.cfg.Server.Dev))
113 // this step could be empty
114 if s := dependencyStep(dwf.Dependencies); s != nil {
115 setup.addStep(*s)
···109 setup := &setupSteps{}
110111 setup.addStep(nixConfStep())
112+ setup.addStep(models.BuildCloneStep(twf, *tpl.TriggerMetadata, e.cfg.Server.Dev))
113 // this step could be empty
114 if s := dependencyStep(dwf.Dependencies); s != nil {
115 setup.addStep(*s)
-73
spindle/engines/nixery/setup_steps.go
···23import (
4 "fmt"
5- "path"
6 "strings"
7-8- "tangled.org/core/api/tangled"
9- "tangled.org/core/workflow"
10)
1112func nixConfStep() Step {
···17 command: setupCmd,
18 name: "Configure Nix",
19 }
20-}
21-22-// cloneOptsAsSteps processes clone options and adds corresponding steps
23-// to the beginning of the workflow's step list if cloning is not skipped.
24-//
25-// the steps to do here are:
26-// - git init
27-// - git remote add origin <url>
28-// - git fetch --depth=<d> --recurse-submodules=<yes|no> <sha>
29-// - git checkout FETCH_HEAD
30-func cloneStep(twf tangled.Pipeline_Workflow, tr tangled.Pipeline_TriggerMetadata, dev bool) Step {
31- if twf.Clone.Skip {
32- return Step{}
33- }
34-35- var commands []string
36-37- // initialize git repo in workspace
38- commands = append(commands, "git init")
39-40- // add repo as git remote
41- scheme := "https://"
42- if dev {
43- scheme = "http://"
44- tr.Repo.Knot = strings.ReplaceAll(tr.Repo.Knot, "localhost", "host.docker.internal")
45- }
46- url := scheme + path.Join(tr.Repo.Knot, tr.Repo.Did, tr.Repo.Repo)
47- commands = append(commands, fmt.Sprintf("git remote add origin %s", url))
48-49- // run git fetch
50- {
51- var fetchArgs []string
52-53- // default clone depth is 1
54- depth := 1
55- if twf.Clone.Depth > 1 {
56- depth = int(twf.Clone.Depth)
57- }
58- fetchArgs = append(fetchArgs, fmt.Sprintf("--depth=%d", depth))
59-60- // optionally recurse submodules
61- if twf.Clone.Submodules {
62- fetchArgs = append(fetchArgs, "--recurse-submodules=yes")
63- }
64-65- // set remote to fetch from
66- fetchArgs = append(fetchArgs, "origin")
67-68- // set revision to checkout
69- switch workflow.TriggerKind(tr.Kind) {
70- case workflow.TriggerKindManual:
71- // TODO: unimplemented
72- case workflow.TriggerKindPush:
73- fetchArgs = append(fetchArgs, tr.Push.NewSha)
74- case workflow.TriggerKindPullRequest:
75- fetchArgs = append(fetchArgs, tr.PullRequest.SourceSha)
76- }
77-78- commands = append(commands, fmt.Sprintf("git fetch %s", strings.Join(fetchArgs, " ")))
79- }
80-81- // run git checkout
82- commands = append(commands, "git checkout FETCH_HEAD")
83-84- cloneStep := Step{
85- command: strings.Join(commands, "\n"),
86- name: "Clone repository into workspace",
87- }
88- return cloneStep
89}
9091// dependencyStep processes dependencies defined in the workflow.
···23import (
4 "fmt"
05 "strings"
0006)
78func nixConfStep() Step {
···13 command: setupCmd,
14 name: "Configure Nix",
15 }
00000000000000000000000000000000000000000000000000000000000000000000016}
1718// dependencyStep processes dependencies defined in the workflow.
+3-7
spindle/ingester.go
···910 "tangled.org/core/api/tangled"
11 "tangled.org/core/eventconsumer"
12- "tangled.org/core/idresolver"
13 "tangled.org/core/rbac"
14 "tangled.org/core/spindle/db"
15···142func (s *Spindle) ingestRepo(ctx context.Context, e *models.Event) error {
143 var err error
144 did := e.Did
145- resolver := idresolver.DefaultResolver()
146147 l := s.l.With("component", "ingester", "record", tangled.RepoNSID)
148···190 }
191192 // add collaborators to rbac
193- owner, err := resolver.ResolveIdent(ctx, did)
194 if err != nil || owner.Handle.IsInvalidHandle() {
195 return err
196 }
···225 return err
226 }
227228- resolver := idresolver.DefaultResolver()
229-230- subjectId, err := resolver.ResolveIdent(ctx, record.Subject)
231 if err != nil || subjectId.Handle.IsInvalidHandle() {
232 return err
233 }
···240241 // TODO: get rid of this entirely
242 // resolve this aturi to extract the repo record
243- owner, err := resolver.ResolveIdent(ctx, repoAt.Authority().String())
244 if err != nil || owner.Handle.IsInvalidHandle() {
245 return fmt.Errorf("failed to resolve handle: %w", err)
246 }
···910 "tangled.org/core/api/tangled"
11 "tangled.org/core/eventconsumer"
012 "tangled.org/core/rbac"
13 "tangled.org/core/spindle/db"
14···141func (s *Spindle) ingestRepo(ctx context.Context, e *models.Event) error {
142 var err error
143 did := e.Did
0144145 l := s.l.With("component", "ingester", "record", tangled.RepoNSID)
146···188 }
189190 // add collaborators to rbac
191+ owner, err := s.res.ResolveIdent(ctx, did)
192 if err != nil || owner.Handle.IsInvalidHandle() {
193 return err
194 }
···223 return err
224 }
225226+ subjectId, err := s.res.ResolveIdent(ctx, record.Subject)
00227 if err != nil || subjectId.Handle.IsInvalidHandle() {
228 return err
229 }
···236237 // TODO: get rid of this entirely
238 // resolve this aturi to extract the repo record
239+ owner, err := s.res.ResolveIdent(ctx, repoAt.Authority().String())
240 if err != nil || owner.Handle.IsInvalidHandle() {
241 return fmt.Errorf("failed to resolve handle: %w", err)
242 }
···13)
1415type OpenBaoManager struct {
16- client *vault.Client
17- mountPath string
18- logger *slog.Logger
019}
2021type OpenBaoManagerOpt func(*OpenBaoManager)
···26 }
27}
2800000029// NewOpenBaoManager creates a new OpenBao manager that connects to a Bao Proxy
30// The proxyAddress should point to the local Bao Proxy (e.g., "http://127.0.0.1:8200")
31// The proxy handles all authentication automatically via Auto-Auth
···43 }
4445 manager := &OpenBaoManager{
46- client: client,
47- mountPath: "spindle", // default KV v2 mount path
48- logger: logger,
049 }
5051 for _, opt := range opts {
···6263// testConnection verifies that we can connect to the proxy
64func (v *OpenBaoManager) testConnection() error {
65- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
66 defer cancel()
6768 // try token self-lookup as a quick way to verify proxy works
···13)
1415type OpenBaoManager struct {
16+ client *vault.Client
17+ mountPath string
18+ logger *slog.Logger
19+ connectionTimeout time.Duration
20}
2122type OpenBaoManagerOpt func(*OpenBaoManager)
···27 }
28}
2930+func WithConnectionTimeout(timeout time.Duration) OpenBaoManagerOpt {
31+ return func(v *OpenBaoManager) {
32+ v.connectionTimeout = timeout
33+ }
34+}
35+36// NewOpenBaoManager creates a new OpenBao manager that connects to a Bao Proxy
37// The proxyAddress should point to the local Bao Proxy (e.g., "http://127.0.0.1:8200")
38// The proxy handles all authentication automatically via Auto-Auth
···50 }
5152 manager := &OpenBaoManager{
53+ client: client,
54+ mountPath: "spindle", // default KV v2 mount path
55+ logger: logger,
56+ connectionTimeout: 10 * time.Second, // default connection timeout
57 }
5859 for _, opt := range opts {
···7071// testConnection verifies that we can connect to the proxy
72func (v *OpenBaoManager) testConnection() error {
73+ ctx, cancel := context.WithTimeout(context.Background(), v.connectionTimeout)
74 defer cancel()
7576 // try token self-lookup as a quick way to verify proxy works
+5-2
spindle/secrets/openbao_test.go
···152 for _, tt := range tests {
153 t.Run(tt.name, func(t *testing.T) {
154 logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
155- manager, err := NewOpenBaoManager(tt.proxyAddr, logger, tt.opts...)
00156157 if tt.expectError {
158 assert.Error(t, err)
···596597 // All these will fail because no real proxy is running
598 // but we can test that the configuration is properly accepted
599- manager, err := NewOpenBaoManager(tt.proxyAddr, logger)
0600 assert.Error(t, err) // Expected because no real proxy
601 assert.Nil(t, manager)
602 assert.Contains(t, err.Error(), "failed to connect to bao proxy")
···152 for _, tt := range tests {
153 t.Run(tt.name, func(t *testing.T) {
154 logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
155+ // Use shorter timeout for tests to avoid long waits
156+ opts := append(tt.opts, WithConnectionTimeout(1*time.Second))
157+ manager, err := NewOpenBaoManager(tt.proxyAddr, logger, opts...)
158159 if tt.expectError {
160 assert.Error(t, err)
···598599 // All these will fail because no real proxy is running
600 // but we can test that the configuration is properly accepted
601+ // Use shorter timeout for tests to avoid long waits
602+ manager, err := NewOpenBaoManager(tt.proxyAddr, logger, WithConnectionTimeout(1*time.Second))
603 assert.Error(t, err) // Expected because no real proxy
604 assert.Nil(t, manager)
605 assert.Contains(t, err.Error(), "failed to connect to bao proxy")
+86-41
spindle/server.go
···49 vault secrets.Manager
50}
5152-func Run(ctx context.Context) error {
053 logger := log.FromContext(ctx)
54-55- cfg, err := config.Load(ctx)
56- if err != nil {
57- return fmt.Errorf("failed to load config: %w", err)
58- }
5960 d, err := db.Make(cfg.Server.DBPath)
61 if err != nil {
62- return fmt.Errorf("failed to setup db: %w", err)
63 }
6465 e, err := rbac.NewEnforcer(cfg.Server.DBPath)
66 if err != nil {
67- return fmt.Errorf("failed to setup rbac enforcer: %w", err)
68 }
69 e.E.EnableAutoSave(true)
70···74 switch cfg.Server.Secrets.Provider {
75 case "openbao":
76 if cfg.Server.Secrets.OpenBao.ProxyAddr == "" {
77- return fmt.Errorf("openbao proxy address is required when using openbao secrets provider")
78 }
79 vault, err = secrets.NewOpenBaoManager(
80 cfg.Server.Secrets.OpenBao.ProxyAddr,
···82 secrets.WithMountPath(cfg.Server.Secrets.OpenBao.Mount),
83 )
84 if err != nil {
85- return fmt.Errorf("failed to setup openbao secrets provider: %w", err)
86 }
87 logger.Info("using openbao secrets provider", "proxy_address", cfg.Server.Secrets.OpenBao.ProxyAddr, "mount", cfg.Server.Secrets.OpenBao.Mount)
88 case "sqlite", "":
89 vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath, secrets.WithTableName("secrets"))
90 if err != nil {
91- return fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
92 }
93 logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath)
94 default:
95- return fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
96- }
97-98- nixeryEng, err := nixery.New(ctx, cfg)
99- if err != nil {
100- return err
101 }
102103 jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
···110 }
111 jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true)
112 if err != nil {
113- return fmt.Errorf("failed to setup jetstream client: %w", err)
114 }
115 jc.AddDid(cfg.Server.Owner)
116117 // Check if the spindle knows about any Dids;
118 dids, err := d.GetAllDids()
119 if err != nil {
120- return fmt.Errorf("failed to get all dids: %w", err)
121 }
122 for _, d := range dids {
123 jc.AddDid(d)
124 }
125126- resolver := idresolver.DefaultResolver()
127128- spindle := Spindle{
129 jc: jc,
130 e: e,
131 db: d,
132 l: logger,
133 n: &n,
134- engs: map[string]models.Engine{"nixery": nixeryEng},
135 jq: jq,
136 cfg: cfg,
137 res: resolver,
···140141 err = e.AddSpindle(rbacDomain)
142 if err != nil {
143- return fmt.Errorf("failed to set rbac domain: %w", err)
144 }
145 err = spindle.configureOwner()
146 if err != nil {
147- return err
148 }
149 logger.Info("owner set", "did", cfg.Server.Owner)
150-151- // starts a job queue runner in the background
152- jq.Start()
153- defer jq.Stop()
154-155- // Stop vault token renewal if it implements Stopper
156- if stopper, ok := vault.(secrets.Stopper); ok {
157- defer stopper.Stop()
158- }
159160 cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath)
161 if err != nil {
162- return fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
163 }
164165 err = jc.StartJetstream(ctx, spindle.ingest())
166 if err != nil {
167- return fmt.Errorf("failed to start jetstream consumer: %w", err)
168 }
169170 // for each incoming sh.tangled.pipeline, we execute
···177 ccfg.CursorStore = cursorStore
178 knownKnots, err := d.Knots()
179 if err != nil {
180- return err
181 }
182 for _, knot := range knownKnots {
183 logger.Info("adding source start", "knot", knot)
···185 }
186 spindle.ks = eventconsumer.NewConsumer(*ccfg)
18700000000000000000000000000000000000000000000188 go func() {
189- logger.Info("starting knot event consumer")
190- spindle.ks.Start(ctx)
191 }()
192193- logger.Info("starting spindle server", "address", cfg.Server.ListenAddr)
194- logger.Error("server error", "error", http.ListenAndServe(cfg.Server.ListenAddr, spindle.Router()))
0000000000000000000195196- return nil
197}
198199func (s *Spindle) Router() http.Handler {
···49 vault secrets.Manager
50}
5152+// New creates a new Spindle server with the provided configuration and engines.
53+func New(ctx context.Context, cfg *config.Config, engines map[string]models.Engine) (*Spindle, error) {
54 logger := log.FromContext(ctx)
000005556 d, err := db.Make(cfg.Server.DBPath)
57 if err != nil {
58+ return nil, fmt.Errorf("failed to setup db: %w", err)
59 }
6061 e, err := rbac.NewEnforcer(cfg.Server.DBPath)
62 if err != nil {
63+ return nil, fmt.Errorf("failed to setup rbac enforcer: %w", err)
64 }
65 e.E.EnableAutoSave(true)
66···70 switch cfg.Server.Secrets.Provider {
71 case "openbao":
72 if cfg.Server.Secrets.OpenBao.ProxyAddr == "" {
73+ return nil, fmt.Errorf("openbao proxy address is required when using openbao secrets provider")
74 }
75 vault, err = secrets.NewOpenBaoManager(
76 cfg.Server.Secrets.OpenBao.ProxyAddr,
···78 secrets.WithMountPath(cfg.Server.Secrets.OpenBao.Mount),
79 )
80 if err != nil {
81+ return nil, fmt.Errorf("failed to setup openbao secrets provider: %w", err)
82 }
83 logger.Info("using openbao secrets provider", "proxy_address", cfg.Server.Secrets.OpenBao.ProxyAddr, "mount", cfg.Server.Secrets.OpenBao.Mount)
84 case "sqlite", "":
85 vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath, secrets.WithTableName("secrets"))
86 if err != nil {
87+ return nil, fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
88 }
89 logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath)
90 default:
91+ return nil, fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
0000092 }
9394 jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
···101 }
102 jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true)
103 if err != nil {
104+ return nil, fmt.Errorf("failed to setup jetstream client: %w", err)
105 }
106 jc.AddDid(cfg.Server.Owner)
107108 // Check if the spindle knows about any Dids;
109 dids, err := d.GetAllDids()
110 if err != nil {
111+ return nil, fmt.Errorf("failed to get all dids: %w", err)
112 }
113 for _, d := range dids {
114 jc.AddDid(d)
115 }
116117+ resolver := idresolver.DefaultResolver(cfg.Server.PlcUrl)
118119+ spindle := &Spindle{
120 jc: jc,
121 e: e,
122 db: d,
123 l: logger,
124 n: &n,
125+ engs: engines,
126 jq: jq,
127 cfg: cfg,
128 res: resolver,
···131132 err = e.AddSpindle(rbacDomain)
133 if err != nil {
134+ return nil, fmt.Errorf("failed to set rbac domain: %w", err)
135 }
136 err = spindle.configureOwner()
137 if err != nil {
138+ return nil, err
139 }
140 logger.Info("owner set", "did", cfg.Server.Owner)
000000000141142 cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath)
143 if err != nil {
144+ return nil, fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
145 }
146147 err = jc.StartJetstream(ctx, spindle.ingest())
148 if err != nil {
149+ return nil, fmt.Errorf("failed to start jetstream consumer: %w", err)
150 }
151152 // for each incoming sh.tangled.pipeline, we execute
···159 ccfg.CursorStore = cursorStore
160 knownKnots, err := d.Knots()
161 if err != nil {
162+ return nil, err
163 }
164 for _, knot := range knownKnots {
165 logger.Info("adding source start", "knot", knot)
···167 }
168 spindle.ks = eventconsumer.NewConsumer(*ccfg)
169170+ return spindle, nil
171+}
172+173+// DB returns the database instance.
174+func (s *Spindle) DB() *db.DB {
175+ return s.db
176+}
177+178+// Queue returns the job queue instance.
179+func (s *Spindle) Queue() *queue.Queue {
180+ return s.jq
181+}
182+183+// Engines returns the map of available engines.
184+func (s *Spindle) Engines() map[string]models.Engine {
185+ return s.engs
186+}
187+188+// Vault returns the secrets manager instance.
189+func (s *Spindle) Vault() secrets.Manager {
190+ return s.vault
191+}
192+193+// Notifier returns the notifier instance.
194+func (s *Spindle) Notifier() *notifier.Notifier {
195+ return s.n
196+}
197+198+// Enforcer returns the RBAC enforcer instance.
199+func (s *Spindle) Enforcer() *rbac.Enforcer {
200+ return s.e
201+}
202+203+// Start starts the Spindle server (blocking).
204+func (s *Spindle) Start(ctx context.Context) error {
205+ // starts a job queue runner in the background
206+ s.jq.Start()
207+ defer s.jq.Stop()
208+209+ // Stop vault token renewal if it implements Stopper
210+ if stopper, ok := s.vault.(secrets.Stopper); ok {
211+ defer stopper.Stop()
212+ }
213+214 go func() {
215+ s.l.Info("starting knot event consumer")
216+ s.ks.Start(ctx)
217 }()
218219+ s.l.Info("starting spindle server", "address", s.cfg.Server.ListenAddr)
220+ return http.ListenAndServe(s.cfg.Server.ListenAddr, s.Router())
221+}
222+223+func Run(ctx context.Context) error {
224+ cfg, err := config.Load(ctx)
225+ if err != nil {
226+ return fmt.Errorf("failed to load config: %w", err)
227+ }
228+229+ nixeryEng, err := nixery.New(ctx, cfg)
230+ if err != nil {
231+ return err
232+ }
233+234+ s, err := New(ctx, cfg, map[string]models.Engine{
235+ "nixery": nixeryEng,
236+ })
237+ if err != nil {
238+ return err
239+ }
240241+ return s.Start(ctx)
242}
243244func (s *Spindle) Router() http.Handler {
+5
spindle/stream.go
···213 if err := conn.WriteMessage(websocket.TextMessage, []byte(line.Text)); err != nil {
214 return fmt.Errorf("failed to write to websocket: %w", err)
215 }
00000216 }
217 }
218}
···213 if err := conn.WriteMessage(websocket.TextMessage, []byte(line.Text)); err != nil {
214 return fmt.Errorf("failed to write to websocket: %w", err)
215 }
216+ case <-time.After(30 * time.Second):
217+ // send a keep-alive
218+ if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
219+ return fmt.Errorf("failed to write control: %w", err)
220+ }
221 }
222 }
223}