···7 "time"
89 "tangled.org/core/appview/models"
010)
1112-func GetPipelines(e Execer, filters ...filter) ([]models.Pipeline, error) {
13 var pipelines []models.Pipeline
1415 var conditions []string
···168169// this is a mega query, but the most useful one:
170// get N pipelines, for each one get the latest status of its N workflows
171-func GetPipelineStatuses(e Execer, limit int, filters ...filter) ([]models.Pipeline, error) {
172 var conditions []string
173 var args []any
174 for _, filter := range filters {
175- filter.key = "p." + filter.key // the table is aliased in the query to `p`
176 conditions = append(conditions, filter.Condition())
177 args = append(args, filter.Arg()...)
178 }
···264 conditions = nil
265 args = nil
266 for _, p := range pipelines {
267- knotFilter := FilterEq("pipeline_knot", p.Knot)
268- rkeyFilter := FilterEq("pipeline_rkey", p.Rkey)
269 conditions = append(conditions, fmt.Sprintf("(%s and %s)", knotFilter.Condition(), rkeyFilter.Condition()))
270 args = append(args, p.Knot)
271 args = append(args, p.Rkey)
···7 "time"
89 "tangled.org/core/appview/models"
10+ "tangled.org/core/orm"
11)
1213+func GetPipelines(e Execer, filters ...orm.Filter) ([]models.Pipeline, error) {
14 var pipelines []models.Pipeline
1516 var conditions []string
···169170// this is a mega query, but the most useful one:
171// get N pipelines, for each one get the latest status of its N workflows
172+func GetPipelineStatuses(e Execer, limit int, filters ...orm.Filter) ([]models.Pipeline, error) {
173 var conditions []string
174 var args []any
175 for _, filter := range filters {
176+ filter.Key = "p." + filter.Key // the table is aliased in the query to `p`
177 conditions = append(conditions, filter.Condition())
178 args = append(args, filter.Arg()...)
179 }
···265 conditions = nil
266 args = nil
267 for _, p := range pipelines {
268+ knotFilter := orm.FilterEq("pipeline_knot", p.Knot)
269+ rkeyFilter := orm.FilterEq("pipeline_rkey", p.Rkey)
270 conditions = append(conditions, fmt.Sprintf("(%s and %s)", knotFilter.Condition(), rkeyFilter.Condition()))
271 args = append(args, p.Knot)
272 args = append(args, p.Rkey)
+11-5
appview/db/profile.go
···1112 "github.com/bluesky-social/indigo/atproto/syntax"
13 "tangled.org/core/appview/models"
014)
1516const TimeframeMonths = 7
···4445 issues, err := GetIssues(
46 e,
47- FilterEq("did", forDid),
48- FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
49 )
50 if err != nil {
51 return nil, fmt.Errorf("error getting issues by owner did: %w", err)
···65 *items = append(*items, &issue)
66 }
6768- repos, err := GetRepos(e, 0, FilterEq("did", forDid))
69 if err != nil {
70 return nil, fmt.Errorf("error getting all repos by did: %w", err)
71 }
···199 return tx.Commit()
200}
201202-func GetProfiles(e Execer, filters ...filter) (map[string]*models.Profile, error) {
203 var conditions []string
204 var args []any
205 for _, filter := range filters {
···229 if err != nil {
230 return nil, err
231 }
0232233 profileMap := make(map[string]*models.Profile)
234 for rows.Next() {
···269 if err != nil {
270 return nil, err
271 }
00272 idxs := make(map[string]int)
273 for did := range profileMap {
274 idxs[did] = 0
···289 if err != nil {
290 return nil, err
291 }
00292 idxs = make(map[string]int)
293 for did := range profileMap {
294 idxs[did] = 0
···441 }
442443 // ensure all pinned repos are either own repos or collaborating repos
444- repos, err := GetRepos(e, 0, FilterEq("did", profile.Did))
445 if err != nil {
446 log.Printf("getting repos for %s: %s", profile.Did, err)
447 }
···1112 "github.com/bluesky-social/indigo/atproto/syntax"
13 "tangled.org/core/appview/models"
14+ "tangled.org/core/orm"
15)
1617const TimeframeMonths = 7
···4546 issues, err := GetIssues(
47 e,
48+ orm.FilterEq("did", forDid),
49+ orm.FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
50 )
51 if err != nil {
52 return nil, fmt.Errorf("error getting issues by owner did: %w", err)
···66 *items = append(*items, &issue)
67 }
6869+ repos, err := GetRepos(e, 0, orm.FilterEq("did", forDid))
70 if err != nil {
71 return nil, fmt.Errorf("error getting all repos by did: %w", err)
72 }
···200 return tx.Commit()
201}
202203+func GetProfiles(e Execer, filters ...orm.Filter) (map[string]*models.Profile, error) {
204 var conditions []string
205 var args []any
206 for _, filter := range filters {
···230 if err != nil {
231 return nil, err
232 }
233+ defer rows.Close()
234235 profileMap := make(map[string]*models.Profile)
236 for rows.Next() {
···271 if err != nil {
272 return nil, err
273 }
274+ defer rows.Close()
275+276 idxs := make(map[string]int)
277 for did := range profileMap {
278 idxs[did] = 0
···293 if err != nil {
294 return nil, err
295 }
296+ defer rows.Close()
297+298 idxs = make(map[string]int)
299 for did := range profileMap {
300 idxs[did] = 0
···447 }
448449 // ensure all pinned repos are either own repos or collaborating repos
450+ repos, err := GetRepos(e, 0, orm.FilterEq("did", profile.Did))
451 if err != nil {
452 log.Printf("getting repos for %s: %s", profile.Did, err)
453 }
+69-24
appview/db/pulls.go
···1314 "github.com/bluesky-social/indigo/atproto/syntax"
15 "tangled.org/core/appview/models"
016)
1718func NewPull(tx *sql.Tx, pull *models.Pull) error {
···93 insert into pull_submissions (pull_at, round_number, patch, combined, source_rev)
94 values (?, ?, ?, ?, ?)
95 `, pull.AtUri(), 0, pull.Submissions[0].Patch, pull.Submissions[0].Combined, pull.Submissions[0].SourceRev)
96- return err
0000000097}
9899func GetPullAt(e Execer, repoAt syntax.ATURI, pullId int) (syntax.ATURI, error) {
···110 return pullId - 1, err
111}
112113-func GetPullsWithLimit(e Execer, limit int, filters ...filter) ([]*models.Pull, error) {
114 pulls := make(map[syntax.ATURI]*models.Pull)
115116 var conditions []string
···221 for _, p := range pulls {
222 pullAts = append(pullAts, p.AtUri())
223 }
224- submissionsMap, err := GetPullSubmissions(e, FilterIn("pull_at", pullAts))
225 if err != nil {
226 return nil, fmt.Errorf("failed to get submissions: %w", err)
227 }
···233 }
234235 // collect allLabels for each issue
236- allLabels, err := GetLabels(e, FilterIn("subject", pullAts))
237 if err != nil {
238 return nil, fmt.Errorf("failed to query labels: %w", err)
239 }
···250 sourceAts = append(sourceAts, *p.PullSource.RepoAt)
251 }
252 }
253- sourceRepos, err := GetRepos(e, 0, FilterIn("at_uri", sourceAts))
254 if err != nil && !errors.Is(err, sql.ErrNoRows) {
255 return nil, fmt.Errorf("failed to get source repos: %w", err)
256 }
···266 }
267 }
2680000000000269 orderedByPullId := []*models.Pull{}
270 for _, p := range pulls {
271 orderedByPullId = append(orderedByPullId, p)
···277 return orderedByPullId, nil
278}
279280-func GetPulls(e Execer, filters ...filter) ([]*models.Pull, error) {
281 return GetPullsWithLimit(e, 0, filters...)
282}
283284func GetPullIDs(e Execer, opts models.PullSearchOptions) ([]int64, error) {
285 var ids []int64
286287- var filters []filter
288- filters = append(filters, FilterEq("state", opts.State))
289 if opts.RepoAt != "" {
290- filters = append(filters, FilterEq("repo_at", opts.RepoAt))
291 }
292293 var conditions []string
···343}
344345func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) {
346- pulls, err := GetPullsWithLimit(e, 1, FilterEq("repo_at", repoAt), FilterEq("pull_id", pullId))
347 if err != nil {
348 return nil, err
349 }
···355}
356357// mapping from pull -> pull submissions
358-func GetPullSubmissions(e Execer, filters ...filter) (map[syntax.ATURI][]*models.PullSubmission, error) {
359 var conditions []string
360 var args []any
361 for _, filter := range filters {
···430431 // Get comments for all submissions using GetPullComments
432 submissionIds := slices.Collect(maps.Keys(submissionMap))
433- comments, err := GetPullComments(e, FilterIn("submission_id", submissionIds))
434 if err != nil {
435- return nil, err
436 }
437 for _, comment := range comments {
438 if submission, ok := submissionMap[comment.SubmissionId]; ok {
···456 return m, nil
457}
458459-func GetPullComments(e Execer, filters ...filter) ([]models.PullComment, error) {
460 var conditions []string
461 var args []any
462 for _, filter := range filters {
···492 }
493 defer rows.Close()
494495- var comments []models.PullComment
496 for rows.Next() {
497 var comment models.PullComment
498 var createdAt string
···514 comment.Created = t
515 }
516517- comments = append(comments, comment)
0518 }
519520 if err := rows.Err(); err != nil {
521 return nil, err
522 }
523000000000000000000000524 return comments, nil
525}
526···600 return pulls, nil
601}
602603-func NewPullComment(e Execer, comment *models.PullComment) (int64, error) {
604 query := `insert into pull_comments (owner_did, repo_at, submission_id, comment_at, pull_id, body) values (?, ?, ?, ?, ?, ?)`
605- res, err := e.Exec(
606 query,
607 comment.OwnerDid,
608 comment.RepoAt,
···618 i, err := res.LastInsertId()
619 if err != nil {
620 return 0, err
0000621 }
622623 return i, nil
···664 return err
665}
666667-func SetPullParentChangeId(e Execer, parentChangeId string, filters ...filter) error {
668 var conditions []string
669 var args []any
670···688689// Only used when stacking to update contents in the event of a rebase (the interdiff should be empty).
690// otherwise submissions are immutable
691-func UpdatePull(e Execer, newPatch, sourceRev string, filters ...filter) error {
692 var conditions []string
693 var args []any
694···746func GetStack(e Execer, stackId string) (models.Stack, error) {
747 unorderedPulls, err := GetPulls(
748 e,
749- FilterEq("stack_id", stackId),
750- FilterNotEq("state", models.PullDeleted),
751 )
752 if err != nil {
753 return nil, err
···791func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) {
792 pulls, err := GetPulls(
793 e,
794- FilterEq("stack_id", stackId),
795- FilterEq("state", models.PullDeleted),
796 )
797 if err != nil {
798 return nil, err
···1314 "github.com/bluesky-social/indigo/atproto/syntax"
15 "tangled.org/core/appview/models"
16+ "tangled.org/core/orm"
17)
1819func NewPull(tx *sql.Tx, pull *models.Pull) error {
···94 insert into pull_submissions (pull_at, round_number, patch, combined, source_rev)
95 values (?, ?, ?, ?, ?)
96 `, pull.AtUri(), 0, pull.Submissions[0].Patch, pull.Submissions[0].Combined, pull.Submissions[0].SourceRev)
97+ if err != nil {
98+ return err
99+ }
100+101+ if err := putReferences(tx, pull.AtUri(), pull.References); err != nil {
102+ return fmt.Errorf("put reference_links: %w", err)
103+ }
104+105+ return nil
106}
107108func GetPullAt(e Execer, repoAt syntax.ATURI, pullId int) (syntax.ATURI, error) {
···119 return pullId - 1, err
120}
121122+func GetPullsWithLimit(e Execer, limit int, filters ...orm.Filter) ([]*models.Pull, error) {
123 pulls := make(map[syntax.ATURI]*models.Pull)
124125 var conditions []string
···230 for _, p := range pulls {
231 pullAts = append(pullAts, p.AtUri())
232 }
233+ submissionsMap, err := GetPullSubmissions(e, orm.FilterIn("pull_at", pullAts))
234 if err != nil {
235 return nil, fmt.Errorf("failed to get submissions: %w", err)
236 }
···242 }
243244 // collect allLabels for each issue
245+ allLabels, err := GetLabels(e, orm.FilterIn("subject", pullAts))
246 if err != nil {
247 return nil, fmt.Errorf("failed to query labels: %w", err)
248 }
···259 sourceAts = append(sourceAts, *p.PullSource.RepoAt)
260 }
261 }
262+ sourceRepos, err := GetRepos(e, 0, orm.FilterIn("at_uri", sourceAts))
263 if err != nil && !errors.Is(err, sql.ErrNoRows) {
264 return nil, fmt.Errorf("failed to get source repos: %w", err)
265 }
···275 }
276 }
277278+ allReferences, err := GetReferencesAll(e, orm.FilterIn("from_at", pullAts))
279+ if err != nil {
280+ return nil, fmt.Errorf("failed to query reference_links: %w", err)
281+ }
282+ for pullAt, references := range allReferences {
283+ if pull, ok := pulls[pullAt]; ok {
284+ pull.References = references
285+ }
286+ }
287+288 orderedByPullId := []*models.Pull{}
289 for _, p := range pulls {
290 orderedByPullId = append(orderedByPullId, p)
···296 return orderedByPullId, nil
297}
298299+func GetPulls(e Execer, filters ...orm.Filter) ([]*models.Pull, error) {
300 return GetPullsWithLimit(e, 0, filters...)
301}
302303func GetPullIDs(e Execer, opts models.PullSearchOptions) ([]int64, error) {
304 var ids []int64
305306+ var filters []orm.Filter
307+ filters = append(filters, orm.FilterEq("state", opts.State))
308 if opts.RepoAt != "" {
309+ filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt))
310 }
311312 var conditions []string
···362}
363364func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) {
365+ pulls, err := GetPullsWithLimit(e, 1, orm.FilterEq("repo_at", repoAt), orm.FilterEq("pull_id", pullId))
366 if err != nil {
367 return nil, err
368 }
···374}
375376// mapping from pull -> pull submissions
377+func GetPullSubmissions(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]*models.PullSubmission, error) {
378 var conditions []string
379 var args []any
380 for _, filter := range filters {
···449450 // Get comments for all submissions using GetPullComments
451 submissionIds := slices.Collect(maps.Keys(submissionMap))
452+ comments, err := GetPullComments(e, orm.FilterIn("submission_id", submissionIds))
453 if err != nil {
454+ return nil, fmt.Errorf("failed to get pull comments: %w", err)
455 }
456 for _, comment := range comments {
457 if submission, ok := submissionMap[comment.SubmissionId]; ok {
···475 return m, nil
476}
477478+func GetPullComments(e Execer, filters ...orm.Filter) ([]models.PullComment, error) {
479 var conditions []string
480 var args []any
481 for _, filter := range filters {
···511 }
512 defer rows.Close()
513514+ commentMap := make(map[string]*models.PullComment)
515 for rows.Next() {
516 var comment models.PullComment
517 var createdAt string
···533 comment.Created = t
534 }
535536+ atUri := comment.AtUri().String()
537+ commentMap[atUri] = &comment
538 }
539540 if err := rows.Err(); err != nil {
541 return nil, err
542 }
543544+ // collect references for each comments
545+ commentAts := slices.Collect(maps.Keys(commentMap))
546+ allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
547+ if err != nil {
548+ return nil, fmt.Errorf("failed to query reference_links: %w", err)
549+ }
550+ for commentAt, references := range allReferencs {
551+ if comment, ok := commentMap[commentAt.String()]; ok {
552+ comment.References = references
553+ }
554+ }
555+556+ var comments []models.PullComment
557+ for _, c := range commentMap {
558+ comments = append(comments, *c)
559+ }
560+561+ sort.Slice(comments, func(i, j int) bool {
562+ return comments[i].Created.Before(comments[j].Created)
563+ })
564+565 return comments, nil
566}
567···641 return pulls, nil
642}
643644+func NewPullComment(tx *sql.Tx, comment *models.PullComment) (int64, error) {
645 query := `insert into pull_comments (owner_did, repo_at, submission_id, comment_at, pull_id, body) values (?, ?, ?, ?, ?, ?)`
646+ res, err := tx.Exec(
647 query,
648 comment.OwnerDid,
649 comment.RepoAt,
···659 i, err := res.LastInsertId()
660 if err != nil {
661 return 0, err
662+ }
663+664+ if err := putReferences(tx, comment.AtUri(), comment.References); err != nil {
665+ return 0, fmt.Errorf("put reference_links: %w", err)
666 }
667668 return i, nil
···709 return err
710}
711712+func SetPullParentChangeId(e Execer, parentChangeId string, filters ...orm.Filter) error {
713 var conditions []string
714 var args []any
715···733734// Only used when stacking to update contents in the event of a rebase (the interdiff should be empty).
735// otherwise submissions are immutable
736+func UpdatePull(e Execer, newPatch, sourceRev string, filters ...orm.Filter) error {
737 var conditions []string
738 var args []any
739···791func GetStack(e Execer, stackId string) (models.Stack, error) {
792 unorderedPulls, err := GetPulls(
793 e,
794+ orm.FilterEq("stack_id", stackId),
795+ orm.FilterNotEq("state", models.PullDeleted),
796 )
797 if err != nil {
798 return nil, err
···836func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) {
837 pulls, err := GetPulls(
838 e,
839+ orm.FilterEq("stack_id", stackId),
840+ orm.FilterEq("state", models.PullDeleted),
841 )
842 if err != nil {
843 return nil, err
···3536 <p><span class="{{$bullet}}">1</span>First, generate a new <a href="https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key" class="underline">SSH key pair</a>.</p>
37 <p><span class="{{$bullet}}">2</span>Then add the public key to your account from the <a href="/settings" class="underline">settings</a> page.</p>
38- <p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code></p>
39 <p><span class="{{$bullet}}">4</span>Push!</p>
40 </div>
41 </div>
···3536 <p><span class="{{$bullet}}">1</span>First, generate a new <a href="https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key" class="underline">SSH key pair</a>.</p>
37 <p><span class="{{$bullet}}">2</span>Then add the public key to your account from the <a href="/settings" class="underline">settings</a> page.</p>
38+ <p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ resolve .RepoInfo.OwnerDid }}/{{ .RepoInfo.Name }}</code></p>
39 <p><span class="{{$bullet}}">4</span>Push!</p>
40 </div>
41 </div>
+1-1
appview/pages/templates/repo/fork.html
···34 {{ end }}
35 </div>
36 </div>
37- <p class="text-sm text-gray-500 dark:text-gray-400">A knot hosts repository data. <a href="/knots" class="underline">Learn how to register your own knot.</a></p>
38 </fieldset>
3940 <div class="space-y-2">
···34 {{ end }}
35 </div>
36 </div>
37+ <p class="text-sm text-gray-500 dark:text-gray-400">A knot hosts repository data. <a href="/settings/knots" class="underline">Learn how to register your own knot.</a></p>
38 </fieldset>
3940 <div class="space-y-2">
···165 </div>
166 <p class="text-sm text-gray-500 dark:text-gray-400 mt-1">
167 A knot hosts repository data and handles Git operations.
168- You can also <a href="/knots" class="underline">register your own knot</a>.
169 </p>
170 </div>
171{{ end }}
···165 </div>
166 <p class="text-sm text-gray-500 dark:text-gray-400 mt-1">
167 A knot hosts repository data and handles Git operations.
168+ You can also <a href="/settings/knots" class="underline">register your own knot</a>.
169 </p>
170 </div>
171{{ end }}
···11 "tangled.org/core/appview/pages"
12 "tangled.org/core/appview/pagination"
13 "tangled.org/core/consts"
014)
1516func (s *State) GoodFirstIssues(w http.ResponseWriter, r *http.Request) {
···2021 goodFirstIssueLabel := s.config.Label.GoodFirstIssue
2223- gfiLabelDef, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", goodFirstIssueLabel))
24 if err != nil {
25 log.Println("failed to get gfi label def", err)
26 s.pages.Error500(w)
27 return
28 }
2930- repoLabels, err := db.GetRepoLabels(s.db, db.FilterEq("label_at", goodFirstIssueLabel))
31 if err != nil {
32 log.Println("failed to get repo labels", err)
33 s.pages.Error503(w)
···55 pagination.Page{
56 Limit: 500,
57 },
58- db.FilterIn("repo_at", repoUris),
59- db.FilterEq("open", 1),
60 )
61 if err != nil {
62 log.Println("failed to get issues", err)
···132 }
133134 if len(uriList) > 0 {
135- allLabelDefs, err = db.GetLabelDefinitions(s.db, db.FilterIn("at_uri", uriList))
136 if err != nil {
137 log.Println("failed to fetch labels", err)
138 }
···11 "tangled.org/core/appview/pages"
12 "tangled.org/core/appview/pagination"
13 "tangled.org/core/consts"
14+ "tangled.org/core/orm"
15)
1617func (s *State) GoodFirstIssues(w http.ResponseWriter, r *http.Request) {
···2122 goodFirstIssueLabel := s.config.Label.GoodFirstIssue
2324+ gfiLabelDef, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", goodFirstIssueLabel))
25 if err != nil {
26 log.Println("failed to get gfi label def", err)
27 s.pages.Error500(w)
28 return
29 }
3031+ repoLabels, err := db.GetRepoLabels(s.db, orm.FilterEq("label_at", goodFirstIssueLabel))
32 if err != nil {
33 log.Println("failed to get repo labels", err)
34 s.pages.Error503(w)
···56 pagination.Page{
57 Limit: 500,
58 },
59+ orm.FilterIn("repo_at", repoUris),
60+ orm.FilterEq("open", 1),
61 )
62 if err != nil {
63 log.Println("failed to get issues", err)
···133 }
134135 if len(uriList) > 0 {
136+ allLabelDefs, err = db.GetLabelDefinitions(s.db, orm.FilterIn("at_uri", uriList))
137 if err != nil {
138 log.Println("failed to fetch labels", err)
139 }
+17
appview/state/git_http.go
···2526}
270000000000000000028func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) {
29 user, ok := r.Context().Value("resolvedId").(identity.Identity)
30 if !ok {
···2526}
2728+func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) {
29+ user, ok := r.Context().Value("resolvedId").(identity.Identity)
30+ if !ok {
31+ http.Error(w, "failed to resolve user", http.StatusInternalServerError)
32+ return
33+ }
34+ repo := r.Context().Value("repo").(*models.Repo)
35+36+ scheme := "https"
37+ if s.config.Core.Dev {
38+ scheme = "http"
39+ }
40+41+ targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
42+ s.proxyRequest(w, r, targetURL)
43+}
44+45func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) {
46 user, ok := r.Context().Value("resolvedId").(identity.Identity)
47 if !ok {
···67 "tangled.org/core/appview/db"
8 "tangled.org/core/appview/models"
09)
1011func (v *Validator) ValidateIssueComment(comment *models.IssueComment) error {
12 // if comments have parents, only ingest ones that are 1 level deep
13 if comment.ReplyTo != nil {
14- parents, err := db.GetIssueComments(v.db, db.FilterEq("at_uri", *comment.ReplyTo))
15 if err != nil {
16 return fmt.Errorf("failed to fetch parent comment: %w", err)
17 }
···67 "tangled.org/core/appview/db"
8 "tangled.org/core/appview/models"
9+ "tangled.org/core/orm"
10)
1112func (v *Validator) ValidateIssueComment(comment *models.IssueComment) error {
13 // if comments have parents, only ingest ones that are 1 level deep
14 if comment.ReplyTo != nil {
15+ parents, err := db.GetIssueComments(v.db, orm.FilterEq("at_uri", *comment.ReplyTo))
16 if err != nil {
17 return fmt.Errorf("failed to fetch parent comment: %w", err)
18 }
+1-34
crypto/verify.go
···5 "crypto/sha256"
6 "encoding/base64"
7 "fmt"
8- "strings"
910 "github.com/hiddeco/sshsig"
11 "golang.org/x/crypto/ssh"
12- "tangled.org/core/types"
13)
1415func VerifySignature(pubKey, signature, payload []byte) (error, bool) {
···28 // multiple algorithms but sha-512 is most secure, and git's ssh signing defaults
29 // to sha-512 for all key types anyway.
30 err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git")
31- return err, err == nil
32-}
3334-// VerifyCommitSignature reconstructs the payload used to sign a commit. This is
35-// essentially the git cat-file output but without the gpgsig header.
36-//
37-// Caveats: signature verification will fail on commits with more than one parent,
38-// i.e. merge commits, because types.NiceDiff doesn't carry more than one Parent field
39-// and we are unable to reconstruct the payload correctly.
40-//
41-// Ideally this should directly operate on an *object.Commit.
42-func VerifyCommitSignature(pubKey string, commit types.NiceDiff) (error, bool) {
43- signature := commit.Commit.PGPSignature
44-45- author := bytes.NewBuffer([]byte{})
46- committer := bytes.NewBuffer([]byte{})
47- commit.Commit.Author.Encode(author)
48- commit.Commit.Committer.Encode(committer)
49-50- payload := strings.Builder{}
51-52- fmt.Fprintf(&payload, "tree %s\n", commit.Commit.Tree)
53- if commit.Commit.Parent != "" {
54- fmt.Fprintf(&payload, "parent %s\n", commit.Commit.Parent)
55- }
56- fmt.Fprintf(&payload, "author %s\n", author.String())
57- fmt.Fprintf(&payload, "committer %s\n", committer.String())
58- if commit.Commit.ChangedId != "" {
59- fmt.Fprintf(&payload, "change-id %s\n", commit.Commit.ChangedId)
60- }
61- fmt.Fprintf(&payload, "\n%s", commit.Commit.Message)
62-63- return VerifySignature([]byte(pubKey), []byte(signature), []byte(payload.String()))
64}
6566// SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
···5 "crypto/sha256"
6 "encoding/base64"
7 "fmt"
089 "github.com/hiddeco/sshsig"
10 "golang.org/x/crypto/ssh"
011)
1213func VerifySignature(pubKey, signature, payload []byte) (error, bool) {
···26 // multiple algorithms but sha-512 is most secure, and git's ssh signing defaults
27 // to sha-512 for all key types anyway.
28 err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git")
002930+ return err, err == nil
0000000000000000000000000000031}
3233// SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
+3-3
docs/hacking.md
···117# type `poweroff` at the shell to exit the VM
118```
119120-This starts a knot on port 6000, a spindle on port 6555
121with `ssh` exposed on port 2222.
122123Once the services are running, head to
124-http://localhost:3000/knots and hit verify. It should
125verify the ownership of the services instantly if everything
126went smoothly.
127···146### running a spindle
147148The above VM should already be running a spindle on
149-`localhost:6555`. Head to http://localhost:3000/spindles and
150hit verify. You can then configure each repository to use
151this spindle and run CI jobs.
152
···117# type `poweroff` at the shell to exit the VM
118```
119120+This starts a knot on port 6444, a spindle on port 6555
121with `ssh` exposed on port 2222.
122123Once the services are running, head to
124+http://localhost:3000/settings/knots and hit verify. It should
125verify the ownership of the services instantly if everything
126went smoothly.
127···146### running a spindle
147148The above VM should already be running a spindle on
149+`localhost:6555`. Head to http://localhost:3000/settings/spindles and
150hit verify. You can then configure each repository to use
151this spindle and run CI jobs.
152
+1-1
docs/knot-hosting.md
···131132You should now have a running knot server! You can finalize
133your registration by hitting the `verify` button on the
134-[/knots](https://tangled.org/knots) page. This simply creates
135a record on your PDS to announce the existence of the knot.
136137### custom paths
···131132You should now have a running knot server! You can finalize
133your registration by hitting the `verify` button on the
134+[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
135a record on your PDS to announce the existence of the knot.
136137### custom paths
+3-3
docs/migrations.md
···14For knots:
1516- Upgrade to latest tag (v1.9.0 or above)
17-- Head to the [knot dashboard](https://tangled.org/knots) and
18 hit the "retry" button to verify your knot
1920For spindles:
2122- Upgrade to latest tag (v1.9.0 or above)
23- Head to the [spindle
24- dashboard](https://tangled.org/spindles) and hit the
25 "retry" button to verify your spindle
2627## Upgrading from v1.7.x
···41 [settings](https://tangled.org/settings) page.
42- Restart your knot once you have replaced the environment
43 variable
44-- Head to the [knot dashboard](https://tangled.org/knots) and
45 hit the "retry" button to verify your knot. This simply
46 writes a `sh.tangled.knot` record to your PDS.
47
···14For knots:
1516- Upgrade to latest tag (v1.9.0 or above)
17+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
18 hit the "retry" button to verify your knot
1920For spindles:
2122- Upgrade to latest tag (v1.9.0 or above)
23- Head to the [spindle
24+ dashboard](https://tangled.org/settings/spindles) and hit the
25 "retry" button to verify your spindle
2627## Upgrading from v1.7.x
···41 [settings](https://tangled.org/settings) page.
42- Restart your knot once you have replaced the environment
43 variable
44+- Head to the [knot dashboard](https://tangled.org/settings/knots) and
45 hit the "retry" button to verify your knot. This simply
46 writes a `sh.tangled.knot` record to your PDS.
47
···1+package db
2+3+import (
4+ "context"
5+ "database/sql"
6+ "log/slog"
7+ "strings"
8+9+ _ "github.com/mattn/go-sqlite3"
10+ "tangled.org/core/log"
11+)
12+13+type DB struct {
14+ db *sql.DB
15+ logger *slog.Logger
16+}
17+18+func Setup(ctx context.Context, dbPath string) (*DB, error) {
19+ // https://github.com/mattn/go-sqlite3#connection-string
20+ opts := []string{
21+ "_foreign_keys=1",
22+ "_journal_mode=WAL",
23+ "_synchronous=NORMAL",
24+ "_auto_vacuum=incremental",
25+ }
26+27+ logger := log.FromContext(ctx)
28+ logger = log.SubLogger(logger, "db")
29+30+ db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
31+ if err != nil {
32+ return nil, err
33+ }
34+35+ conn, err := db.Conn(ctx)
36+ if err != nil {
37+ return nil, err
38+ }
39+ defer conn.Close()
40+41+ _, err = conn.ExecContext(ctx, `
42+ create table if not exists known_dids (
43+ did text primary key
44+ );
45+46+ create table if not exists public_keys (
47+ id integer primary key autoincrement,
48+ did text not null,
49+ key text not null,
50+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
51+ unique(did, key),
52+ foreign key (did) references known_dids(did) on delete cascade
53+ );
54+55+ create table if not exists _jetstream (
56+ id integer primary key autoincrement,
57+ last_time_us integer not null
58+ );
59+60+ create table if not exists events (
61+ rkey text not null,
62+ nsid text not null,
63+ event text not null, -- json
64+ created integer not null default (strftime('%s', 'now')),
65+ primary key (rkey, nsid)
66+ );
67+68+ create table if not exists migrations (
69+ id integer primary key autoincrement,
70+ name text unique
71+ );
72+ `)
73+ if err != nil {
74+ return nil, err
75+ }
76+77+ return &DB{
78+ db: db,
79+ logger: logger,
80+ }, nil
81+}
-64
knotserver/db/init.go
···1-package db
2-3-import (
4- "database/sql"
5- "strings"
6-7- _ "github.com/mattn/go-sqlite3"
8-)
9-10-type DB struct {
11- db *sql.DB
12-}
13-14-func Setup(dbPath string) (*DB, error) {
15- // https://github.com/mattn/go-sqlite3#connection-string
16- opts := []string{
17- "_foreign_keys=1",
18- "_journal_mode=WAL",
19- "_synchronous=NORMAL",
20- "_auto_vacuum=incremental",
21- }
22-23- db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
24- if err != nil {
25- return nil, err
26- }
27-28- // NOTE: If any other migration is added here, you MUST
29- // copy the pattern in appview: use a single sql.Conn
30- // for every migration.
31-32- _, err = db.Exec(`
33- create table if not exists known_dids (
34- did text primary key
35- );
36-37- create table if not exists public_keys (
38- id integer primary key autoincrement,
39- did text not null,
40- key text not null,
41- created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
42- unique(did, key),
43- foreign key (did) references known_dids(did) on delete cascade
44- );
45-46- create table if not exists _jetstream (
47- id integer primary key autoincrement,
48- last_time_us integer not null
49- );
50-51- create table if not exists events (
52- rkey text not null,
53- nsid text not null,
54- event text not null, -- json
55- created integer not null default (strftime('%s', 'now')),
56- primary key (rkey, nsid)
57- );
58- `)
59- if err != nil {
60- return nil, err
61- }
62-63- return &DB{db: db}, nil
64-}