Compare changes

Choose any two refs to compare.

+3243 -1651
-2
api/tangled/repoblob.go
··· 21 Hash string `json:"hash" cborgen:"hash"` 22 // message: Commit message 23 Message string `json:"message" cborgen:"message"` 24 - // shortHash: Short commit hash 25 - ShortHash *string `json:"shortHash,omitempty" cborgen:"shortHash,omitempty"` 26 // when: Commit timestamp 27 When string `json:"when" cborgen:"when"` 28 }
··· 21 Hash string `json:"hash" cborgen:"hash"` 22 // message: Commit message 23 Message string `json:"message" cborgen:"message"` 24 // when: Commit timestamp 25 When string `json:"when" cborgen:"when"` 26 }
+33
api/tangled/repotag.go
···
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.repo.tag 6 + 7 + import ( 8 + "bytes" 9 + "context" 10 + 11 + "github.com/bluesky-social/indigo/lex/util" 12 + ) 13 + 14 + const ( 15 + RepoTagNSID = "sh.tangled.repo.tag" 16 + ) 17 + 18 + // RepoTag calls the XRPC method "sh.tangled.repo.tag". 19 + // 20 + // repo: Repository identifier in format 'did:plc:.../repoName' 21 + // tag: Name of tag, such as v1.3.0 22 + func RepoTag(ctx context.Context, c util.LexClient, repo string, tag string) ([]byte, error) { 23 + buf := new(bytes.Buffer) 24 + 25 + params := map[string]interface{}{} 26 + params["repo"] = repo 27 + params["tag"] = tag 28 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.repo.tag", params, nil, buf); err != nil { 29 + return nil, err 30 + } 31 + 32 + return buf.Bytes(), nil 33 + }
+14 -2
api/tangled/repotree.go
··· 16 17 // RepoTree_LastCommit is a "lastCommit" in the sh.tangled.repo.tree schema. 18 type RepoTree_LastCommit struct { 19 // hash: Commit hash 20 Hash string `json:"hash" cborgen:"hash"` 21 // message: Commit message ··· 27 // RepoTree_Output is the output of a sh.tangled.repo.tree call. 28 type RepoTree_Output struct { 29 // dotdot: Parent directory path 30 - Dotdot *string `json:"dotdot,omitempty" cborgen:"dotdot,omitempty"` 31 - Files []*RepoTree_TreeEntry `json:"files" cborgen:"files"` 32 // parent: The parent path in the tree 33 Parent *string `json:"parent,omitempty" cborgen:"parent,omitempty"` 34 // readme: Readme for this file tree ··· 43 Contents string `json:"contents" cborgen:"contents"` 44 // filename: Name of the readme file 45 Filename string `json:"filename" cborgen:"filename"` 46 } 47 48 // RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
··· 16 17 // RepoTree_LastCommit is a "lastCommit" in the sh.tangled.repo.tree schema. 18 type RepoTree_LastCommit struct { 19 + Author *RepoTree_Signature `json:"author,omitempty" cborgen:"author,omitempty"` 20 // hash: Commit hash 21 Hash string `json:"hash" cborgen:"hash"` 22 // message: Commit message ··· 28 // RepoTree_Output is the output of a sh.tangled.repo.tree call. 29 type RepoTree_Output struct { 30 // dotdot: Parent directory path 31 + Dotdot *string `json:"dotdot,omitempty" cborgen:"dotdot,omitempty"` 32 + Files []*RepoTree_TreeEntry `json:"files" cborgen:"files"` 33 + LastCommit *RepoTree_LastCommit `json:"lastCommit,omitempty" cborgen:"lastCommit,omitempty"` 34 // parent: The parent path in the tree 35 Parent *string `json:"parent,omitempty" cborgen:"parent,omitempty"` 36 // readme: Readme for this file tree ··· 45 Contents string `json:"contents" cborgen:"contents"` 46 // filename: Name of the readme file 47 Filename string `json:"filename" cborgen:"filename"` 48 + } 49 + 50 + // RepoTree_Signature is a "signature" in the sh.tangled.repo.tree schema. 51 + type RepoTree_Signature struct { 52 + // email: Author email 53 + Email string `json:"email" cborgen:"email"` 54 + // name: Author name 55 + Name string `json:"name" cborgen:"name"` 56 + // when: Author timestamp 57 + When string `json:"when" cborgen:"when"` 58 } 59 60 // RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
+24
appview/db/db.go
··· 1181 return err 1182 }) 1183 1184 return &DB{ 1185 db, 1186 logger,
··· 1181 return err 1182 }) 1183 1184 + orm.RunMigration(conn, logger, "remove-profile-stats-column-constraint", func(tx *sql.Tx) error { 1185 + _, err := tx.Exec(` 1186 + -- create new table without the check constraint 1187 + create table profile_stats_new ( 1188 + id integer primary key autoincrement, 1189 + did text not null, 1190 + kind text not null, -- no constraint this time 1191 + foreign key (did) references profile(did) on delete cascade 1192 + ); 1193 + 1194 + -- copy data from old table 1195 + insert into profile_stats_new (id, did, kind) 1196 + select id, did, kind 1197 + from profile_stats; 1198 + 1199 + -- drop old table 1200 + drop table profile_stats; 1201 + 1202 + -- rename new table 1203 + alter table profile_stats_new rename to profile_stats; 1204 + `) 1205 + return err 1206 + }) 1207 + 1208 return &DB{ 1209 db, 1210 logger,
+7
appview/db/profile.go
··· 450 case models.VanityStatRepositoryCount: 451 query = `select count(id) from repos where did = ?` 452 args = append(args, did) 453 } 454 455 var result uint64
··· 450 case models.VanityStatRepositoryCount: 451 query = `select count(id) from repos where did = ?` 452 args = append(args, did) 453 + case models.VanityStatStarCount: 454 + query = `select count(id) from stars where subject_at like 'at://' || ? || '%'` 455 + args = append(args, did) 456 + case models.VanityStatNone: 457 + return 0, nil 458 + default: 459 + return 0, fmt.Errorf("invalid vanity stat kind: %s", stat) 460 } 461 462 var result uint64
+1 -1
appview/ingester.go
··· 317 var stats [2]models.VanityStat 318 for i, s := range record.Stats { 319 if i < 2 { 320 - stats[i].Kind = models.VanityStatKind(s) 321 } 322 } 323
··· 317 var stats [2]models.VanityStat 318 for i, s := range record.Stats { 319 if i < 2 { 320 + stats[i].Kind = models.ParseVanityStatKind(s) 321 } 322 } 323
+18 -1
appview/issues/issues.go
··· 822 823 keyword := params.Get("q") 824 825 var issues []models.Issue 826 searchOpts := models.IssueSearchOptions{ 827 Keyword: keyword, ··· 837 } 838 l.Debug("searched issues with indexer", "count", len(res.Hits)) 839 totalIssues = int(res.Total) 840 841 issues, err = db.GetIssues( 842 rp.db, ··· 884 885 rp.pages.RepoIssues(w, pages.RepoIssuesParams{ 886 LoggedInUser: rp.oauth.GetMultiAccountUser(r), 887 - RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 888 Issues: issues, 889 IssueCount: totalIssues, 890 LabelDefs: defs,
··· 822 823 keyword := params.Get("q") 824 825 + repoInfo := rp.repoResolver.GetRepoInfo(r, user) 826 + 827 var issues []models.Issue 828 searchOpts := models.IssueSearchOptions{ 829 Keyword: keyword, ··· 839 } 840 l.Debug("searched issues with indexer", "count", len(res.Hits)) 841 totalIssues = int(res.Total) 842 + 843 + // count matching issues in the opposite state to display correct counts 844 + countRes, err := rp.indexer.Search(r.Context(), models.IssueSearchOptions{ 845 + Keyword: keyword, RepoAt: f.RepoAt().String(), IsOpen: !isOpen, 846 + Page: pagination.Page{Limit: 1}, 847 + }) 848 + if err == nil { 849 + if isOpen { 850 + repoInfo.Stats.IssueCount.Open = int(res.Total) 851 + repoInfo.Stats.IssueCount.Closed = int(countRes.Total) 852 + } else { 853 + repoInfo.Stats.IssueCount.Closed = int(res.Total) 854 + repoInfo.Stats.IssueCount.Open = int(countRes.Total) 855 + } 856 + } 857 858 issues, err = db.GetIssues( 859 rp.db, ··· 901 902 rp.pages.RepoIssues(w, pages.RepoIssuesParams{ 903 LoggedInUser: rp.oauth.GetMultiAccountUser(r), 904 + RepoInfo: repoInfo, 905 Issues: issues, 906 IssueCount: totalIssues, 907 LabelDefs: defs,
+25 -17
appview/issues/opengraph.go
··· 124 } 125 126 // Split stats area: left side for status/comments (80%), right side for dolly (20%) 127 - statusCommentsArea, dollyArea := statsArea.Split(true, 80) 128 129 // Draw status and comment count in status/comments area 130 - statsBounds := statusCommentsArea.Img.Bounds() 131 statsX := statsBounds.Min.X + 60 // left padding 132 statsY := statsBounds.Min.Y 133 ··· 140 // Draw status (open/closed) with colored icon and text 141 var statusIcon string 142 var statusText string 143 - var statusBgColor color.RGBA 144 145 if issue.Open { 146 statusIcon = "circle-dot" 147 statusText = "open" 148 - statusBgColor = color.RGBA{34, 139, 34, 255} // green 149 } else { 150 statusIcon = "ban" 151 statusText = "closed" 152 - statusBgColor = color.RGBA{52, 58, 64, 255} // dark gray 153 } 154 155 - badgeIconSize := 36 156 157 - // Draw icon with status color (no background) 158 - err = statusCommentsArea.DrawLucideIcon(statusIcon, statsX, statsY+iconBaselineOffset-badgeIconSize/2+5, badgeIconSize, statusBgColor) 159 if err != nil { 160 log.Printf("failed to draw status icon: %v", err) 161 } 162 163 - // Draw text with status color (no background) 164 - textX := statsX + badgeIconSize + 12 165 - badgeTextSize := 32.0 166 - err = statusCommentsArea.DrawTextAt(statusText, textX, statsY+iconBaselineOffset, statusBgColor, badgeTextSize, ogcard.Middle, ogcard.Left) 167 if err != nil { 168 log.Printf("failed to draw status text: %v", err) 169 } 170 171 - statusTextWidth := len(statusText) * 20 172 - currentX := statsX + badgeIconSize + 12 + statusTextWidth + 50 173 174 // Draw comment count 175 - err = statusCommentsArea.DrawLucideIcon("message-square", currentX, statsY+iconBaselineOffset-iconSize/2+5, iconSize, iconColor) 176 if err != nil { 177 log.Printf("failed to draw comment icon: %v", err) 178 } ··· 182 if commentCount == 1 { 183 commentText = "1 comment" 184 } 185 - err = statusCommentsArea.DrawTextAt(commentText, currentX, statsY+iconBaselineOffset, iconColor, textSize, ogcard.Middle, ogcard.Left) 186 if err != nil { 187 log.Printf("failed to draw comment text: %v", err) 188 } ··· 205 openedDate := issue.Created.Format("Jan 2, 2006") 206 metaText := fmt.Sprintf("opened by %s ยท %s", authorHandle, openedDate) 207 208 - err = statusCommentsArea.DrawTextAt(metaText, statsX, labelY, iconColor, labelSize, ogcard.Top, ogcard.Left) 209 if err != nil { 210 log.Printf("failed to draw metadata: %v", err) 211 }
··· 124 } 125 126 // Split stats area: left side for status/comments (80%), right side for dolly (20%) 127 + statusArea, dollyArea := statsArea.Split(true, 80) 128 129 // Draw status and comment count in status/comments area 130 + statsBounds := statusArea.Img.Bounds() 131 statsX := statsBounds.Min.X + 60 // left padding 132 statsY := statsBounds.Min.Y 133 ··· 140 // Draw status (open/closed) with colored icon and text 141 var statusIcon string 142 var statusText string 143 + var statusColor color.RGBA 144 145 if issue.Open { 146 statusIcon = "circle-dot" 147 statusText = "open" 148 + statusColor = color.RGBA{34, 139, 34, 255} // green 149 } else { 150 statusIcon = "ban" 151 statusText = "closed" 152 + statusColor = color.RGBA{52, 58, 64, 255} // dark gray 153 } 154 155 + statusTextWidth := statusArea.TextWidth(statusText, textSize) 156 + badgePadding := 12 157 + badgeHeight := int(textSize) + (badgePadding * 2) 158 + badgeWidth := iconSize + badgePadding + statusTextWidth + (badgePadding * 2) 159 + cornerRadius := 8 160 + badgeX := 60 161 + badgeY := 0 162 163 + statusArea.DrawRoundedRect(badgeX, badgeY, badgeWidth, badgeHeight, cornerRadius, statusColor) 164 + 165 + whiteColor := color.RGBA{255, 255, 255, 255} 166 + iconX := statsX + badgePadding 167 + iconY := statsY + (badgeHeight-iconSize)/2 168 + err = statusArea.DrawLucideIcon(statusIcon, iconX, iconY, iconSize, whiteColor) 169 if err != nil { 170 log.Printf("failed to draw status icon: %v", err) 171 } 172 173 + textX := statsX + badgePadding + iconSize + badgePadding 174 + textY := statsY + (badgeHeight-int(textSize))/2 - 5 175 + err = statusArea.DrawTextAt(statusText, textX, textY, whiteColor, textSize, ogcard.Top, ogcard.Left) 176 if err != nil { 177 log.Printf("failed to draw status text: %v", err) 178 } 179 180 + currentX := statsX + badgeWidth + 50 181 182 // Draw comment count 183 + err = statusArea.DrawLucideIcon("message-square", currentX, iconY, iconSize, iconColor) 184 if err != nil { 185 log.Printf("failed to draw comment icon: %v", err) 186 } ··· 190 if commentCount == 1 { 191 commentText = "1 comment" 192 } 193 + err = statusArea.DrawTextAt(commentText, currentX, textY, iconColor, textSize, ogcard.Top, ogcard.Left) 194 if err != nil { 195 log.Printf("failed to draw comment text: %v", err) 196 } ··· 213 openedDate := issue.Created.Format("Jan 2, 2006") 214 metaText := fmt.Sprintf("opened by %s ยท %s", authorHandle, openedDate) 215 216 + err = statusArea.DrawTextAt(metaText, statsX, labelY, iconColor, labelSize, ogcard.Top, ogcard.Left) 217 if err != nil { 218 log.Printf("failed to draw metadata: %v", err) 219 }
+27 -1
appview/models/profile.go
··· 59 VanityStatOpenIssueCount VanityStatKind = "open-issue-count" 60 VanityStatClosedIssueCount VanityStatKind = "closed-issue-count" 61 VanityStatRepositoryCount VanityStatKind = "repository-count" 62 ) 63 64 func (v VanityStatKind) String() string { 65 switch v { 66 case VanityStatMergedPRCount: ··· 75 return "Closed Issues" 76 case VanityStatRepositoryCount: 77 return "Repositories" 78 } 79 - return "" 80 } 81 82 type VanityStat struct {
··· 59 VanityStatOpenIssueCount VanityStatKind = "open-issue-count" 60 VanityStatClosedIssueCount VanityStatKind = "closed-issue-count" 61 VanityStatRepositoryCount VanityStatKind = "repository-count" 62 + VanityStatStarCount VanityStatKind = "star-count" 63 + VanityStatNone VanityStatKind = "" 64 ) 65 66 + func ParseVanityStatKind(s string) VanityStatKind { 67 + switch s { 68 + case "merged-pull-request-count": 69 + return VanityStatMergedPRCount 70 + case "closed-pull-request-count": 71 + return VanityStatClosedPRCount 72 + case "open-pull-request-count": 73 + return VanityStatOpenPRCount 74 + case "open-issue-count": 75 + return VanityStatOpenIssueCount 76 + case "closed-issue-count": 77 + return VanityStatClosedIssueCount 78 + case "repository-count": 79 + return VanityStatRepositoryCount 80 + case "star-count": 81 + return VanityStatStarCount 82 + default: 83 + return VanityStatNone 84 + } 85 + } 86 + 87 func (v VanityStatKind) String() string { 88 switch v { 89 case VanityStatMergedPRCount: ··· 98 return "Closed Issues" 99 case VanityStatRepositoryCount: 100 return "Repositories" 101 + case VanityStatStarCount: 102 + return "Stars Received" 103 + default: 104 + return "" 105 } 106 } 107 108 type VanityStat struct {
+42 -14
appview/notify/db/db.go
··· 2 3 import ( 4 "context" 5 - "log" 6 "slices" 7 8 "github.com/bluesky-social/indigo/atproto/syntax" ··· 11 "tangled.org/core/appview/models" 12 "tangled.org/core/appview/notify" 13 "tangled.org/core/idresolver" 14 "tangled.org/core/orm" 15 "tangled.org/core/sets" 16 ) ··· 38 } 39 40 func (n *databaseNotifier) NewStar(ctx context.Context, star *models.Star) { 41 if star.RepoAt.Collection().String() != tangled.RepoNSID { 42 // skip string stars for now 43 return ··· 45 var err error 46 repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(star.RepoAt))) 47 if err != nil { 48 - log.Printf("NewStar: failed to get repos: %v", err) 49 return 50 } 51 ··· 59 var pullId *int64 60 61 n.notifyEvent( 62 actorDid, 63 recipients, 64 eventType, ··· 75 } 76 77 func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 78 collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt())) 79 if err != nil { 80 - log.Printf("failed to fetch collaborators: %v", err) 81 return 82 } 83 ··· 101 var pullId *int64 102 103 n.notifyEvent( 104 actorDid, 105 recipients, 106 models.NotificationTypeIssueCreated, ··· 111 pullId, 112 ) 113 n.notifyEvent( 114 actorDid, 115 sets.Collect(slices.Values(mentions)), 116 models.NotificationTypeUserMentioned, ··· 123 } 124 125 func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 126 issues, err := db.GetIssues(n.db, orm.FilterEq("at_uri", comment.IssueAt)) 127 if err != nil { 128 - log.Printf("NewIssueComment: failed to get issues: %v", err) 129 return 130 } 131 if len(issues) == 0 { 132 - log.Printf("NewIssueComment: no issue found for %s", comment.IssueAt) 133 return 134 } 135 issue := issues[0] ··· 170 var pullId *int64 171 172 n.notifyEvent( 173 actorDid, 174 recipients, 175 models.NotificationTypeIssueCommented, ··· 180 pullId, 181 ) 182 n.notifyEvent( 183 actorDid, 184 sets.Collect(slices.Values(mentions)), 185 models.NotificationTypeUserMentioned, ··· 204 var repoId, issueId, pullId *int64 205 206 n.notifyEvent( 207 actorDid, 208 recipients, 209 eventType, ··· 220 } 221 222 func (n *databaseNotifier) NewPull(ctx context.Context, pull *models.Pull) { 223 repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt))) 224 if err != nil { 225 - log.Printf("NewPull: failed to get repos: %v", err) 226 return 227 } 228 collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt())) 229 if err != nil { 230 - log.Printf("failed to fetch collaborators: %v", err) 231 return 232 } 233 ··· 249 pullId := &p 250 251 n.notifyEvent( 252 actorDid, 253 recipients, 254 eventType, ··· 261 } 262 263 func (n *databaseNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 264 pull, err := db.GetPull(n.db, 265 syntax.ATURI(comment.RepoAt), 266 comment.PullId, 267 ) 268 if err != nil { 269 - log.Printf("NewPullComment: failed to get pulls: %v", err) 270 return 271 } 272 273 repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", comment.RepoAt)) 274 if err != nil { 275 - log.Printf("NewPullComment: failed to get repos: %v", err) 276 return 277 } 278 ··· 298 pullId := &p 299 300 n.notifyEvent( 301 actorDid, 302 recipients, 303 eventType, ··· 308 pullId, 309 ) 310 n.notifyEvent( 311 actorDid, 312 sets.Collect(slices.Values(mentions)), 313 models.NotificationTypeUserMentioned, ··· 336 } 337 338 func (n *databaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) { 339 collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt())) 340 if err != nil { 341 - log.Printf("failed to fetch collaborators: %v", err) 342 return 343 } 344 ··· 368 } 369 370 n.notifyEvent( 371 actor, 372 recipients, 373 eventType, ··· 380 } 381 382 func (n *databaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) { 383 // Get repo details 384 repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt))) 385 if err != nil { 386 - log.Printf("NewPullState: failed to get repos: %v", err) 387 return 388 } 389 390 collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt())) 391 if err != nil { 392 - log.Printf("failed to fetch collaborators: %v", err) 393 return 394 } 395 ··· 417 case models.PullMerged: 418 eventType = models.NotificationTypePullMerged 419 default: 420 - log.Println("NewPullState: unexpected new PR state:", pull.State) 421 return 422 } 423 p := int64(pull.ID) 424 pullId := &p 425 426 n.notifyEvent( 427 actor, 428 recipients, 429 eventType, ··· 436 } 437 438 func (n *databaseNotifier) notifyEvent( 439 actorDid syntax.DID, 440 recipients sets.Set[syntax.DID], 441 eventType models.NotificationType, ··· 445 issueId *int64, 446 pullId *int64, 447 ) { 448 // if the user is attempting to mention >maxMentions users, this is probably spam, do not mention anybody 449 if eventType == models.NotificationTypeUserMentioned && recipients.Len() > maxMentions { 450 return ··· 494 } 495 496 if err := db.CreateNotification(tx, notif); err != nil { 497 - log.Printf("notifyEvent: failed to create notification for %s: %v", recipientDid, err) 498 } 499 } 500
··· 2 3 import ( 4 "context" 5 "slices" 6 7 "github.com/bluesky-social/indigo/atproto/syntax" ··· 10 "tangled.org/core/appview/models" 11 "tangled.org/core/appview/notify" 12 "tangled.org/core/idresolver" 13 + "tangled.org/core/log" 14 "tangled.org/core/orm" 15 "tangled.org/core/sets" 16 ) ··· 38 } 39 40 func (n *databaseNotifier) NewStar(ctx context.Context, star *models.Star) { 41 + l := log.FromContext(ctx) 42 + 43 if star.RepoAt.Collection().String() != tangled.RepoNSID { 44 // skip string stars for now 45 return ··· 47 var err error 48 repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(star.RepoAt))) 49 if err != nil { 50 + l.Error("failed to get repos", "err", err) 51 return 52 } 53 ··· 61 var pullId *int64 62 63 n.notifyEvent( 64 + ctx, 65 actorDid, 66 recipients, 67 eventType, ··· 78 } 79 80 func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 81 + l := log.FromContext(ctx) 82 + 83 collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt())) 84 if err != nil { 85 + l.Error("failed to fetch collaborators", "err", err) 86 return 87 } 88 ··· 106 var pullId *int64 107 108 n.notifyEvent( 109 + ctx, 110 actorDid, 111 recipients, 112 models.NotificationTypeIssueCreated, ··· 117 pullId, 118 ) 119 n.notifyEvent( 120 + ctx, 121 actorDid, 122 sets.Collect(slices.Values(mentions)), 123 models.NotificationTypeUserMentioned, ··· 130 } 131 132 func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 133 + l := log.FromContext(ctx) 134 + 135 issues, err := db.GetIssues(n.db, orm.FilterEq("at_uri", comment.IssueAt)) 136 if err != nil { 137 + l.Error("failed to get issues", "err", err) 138 return 139 } 140 if len(issues) == 0 { 141 + l.Error("no issue found for", "err", comment.IssueAt) 142 return 143 } 144 issue := issues[0] ··· 179 var pullId *int64 180 181 n.notifyEvent( 182 + ctx, 183 actorDid, 184 recipients, 185 models.NotificationTypeIssueCommented, ··· 190 pullId, 191 ) 192 n.notifyEvent( 193 + ctx, 194 actorDid, 195 sets.Collect(slices.Values(mentions)), 196 models.NotificationTypeUserMentioned, ··· 215 var repoId, issueId, pullId *int64 216 217 n.notifyEvent( 218 + ctx, 219 actorDid, 220 recipients, 221 eventType, ··· 232 } 233 234 func (n *databaseNotifier) NewPull(ctx context.Context, pull *models.Pull) { 235 + l := log.FromContext(ctx) 236 + 237 repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt))) 238 if err != nil { 239 + l.Error("failed to get repos", "err", err) 240 return 241 } 242 collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt())) 243 if err != nil { 244 + l.Error("failed to fetch collaborators", "err", err) 245 return 246 } 247 ··· 263 pullId := &p 264 265 n.notifyEvent( 266 + ctx, 267 actorDid, 268 recipients, 269 eventType, ··· 276 } 277 278 func (n *databaseNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 279 + l := log.FromContext(ctx) 280 + 281 pull, err := db.GetPull(n.db, 282 syntax.ATURI(comment.RepoAt), 283 comment.PullId, 284 ) 285 if err != nil { 286 + l.Error("failed to get pulls", "err", err) 287 return 288 } 289 290 repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", comment.RepoAt)) 291 if err != nil { 292 + l.Error("failed to get repos", "err", err) 293 return 294 } 295 ··· 315 pullId := &p 316 317 n.notifyEvent( 318 + ctx, 319 actorDid, 320 recipients, 321 eventType, ··· 326 pullId, 327 ) 328 n.notifyEvent( 329 + ctx, 330 actorDid, 331 sets.Collect(slices.Values(mentions)), 332 models.NotificationTypeUserMentioned, ··· 355 } 356 357 func (n *databaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) { 358 + l := log.FromContext(ctx) 359 + 360 collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt())) 361 if err != nil { 362 + l.Error("failed to fetch collaborators", "err", err) 363 return 364 } 365 ··· 389 } 390 391 n.notifyEvent( 392 + ctx, 393 actor, 394 recipients, 395 eventType, ··· 402 } 403 404 func (n *databaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) { 405 + l := log.FromContext(ctx) 406 + 407 // Get repo details 408 repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt))) 409 if err != nil { 410 + l.Error("failed to get repos", "err", err) 411 return 412 } 413 414 collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt())) 415 if err != nil { 416 + l.Error("failed to fetch collaborators", "err", err) 417 return 418 } 419 ··· 441 case models.PullMerged: 442 eventType = models.NotificationTypePullMerged 443 default: 444 + l.Error("unexpected new PR state", "state", pull.State) 445 return 446 } 447 p := int64(pull.ID) 448 pullId := &p 449 450 n.notifyEvent( 451 + ctx, 452 actor, 453 recipients, 454 eventType, ··· 461 } 462 463 func (n *databaseNotifier) notifyEvent( 464 + ctx context.Context, 465 actorDid syntax.DID, 466 recipients sets.Set[syntax.DID], 467 eventType models.NotificationType, ··· 471 issueId *int64, 472 pullId *int64, 473 ) { 474 + l := log.FromContext(ctx) 475 + 476 // if the user is attempting to mention >maxMentions users, this is probably spam, do not mention anybody 477 if eventType == models.NotificationTypeUserMentioned && recipients.Len() > maxMentions { 478 return ··· 522 } 523 524 if err := db.CreateNotification(tx, notif); err != nil { 525 + l.Error("failed to create notification", "recipientDid", recipientDid, "err", err) 526 } 527 } 528
+105
appview/notify/logging_notifier.go
···
··· 1 + package notify 2 + 3 + import ( 4 + "context" 5 + "log/slog" 6 + 7 + "tangled.org/core/appview/models" 8 + tlog "tangled.org/core/log" 9 + 10 + "github.com/bluesky-social/indigo/atproto/syntax" 11 + ) 12 + 13 + type loggingNotifier struct { 14 + inner Notifier 15 + logger *slog.Logger 16 + } 17 + 18 + func NewLoggingNotifier(inner Notifier, logger *slog.Logger) Notifier { 19 + return &loggingNotifier{ 20 + inner, 21 + logger, 22 + } 23 + } 24 + 25 + var _ Notifier = &loggingNotifier{} 26 + 27 + func (l *loggingNotifier) NewRepo(ctx context.Context, repo *models.Repo) { 28 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "NewRepo")) 29 + l.inner.NewRepo(ctx, repo) 30 + } 31 + 32 + func (l *loggingNotifier) NewStar(ctx context.Context, star *models.Star) { 33 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "NewStar")) 34 + l.inner.NewStar(ctx, star) 35 + } 36 + 37 + func (l *loggingNotifier) DeleteStar(ctx context.Context, star *models.Star) { 38 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "DeleteStar")) 39 + l.inner.DeleteStar(ctx, star) 40 + } 41 + 42 + func (l *loggingNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 43 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "NewIssue")) 44 + l.inner.NewIssue(ctx, issue, mentions) 45 + } 46 + 47 + func (l *loggingNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 48 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "NewIssueComment")) 49 + l.inner.NewIssueComment(ctx, comment, mentions) 50 + } 51 + 52 + func (l *loggingNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) { 53 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "NewIssueState")) 54 + l.inner.NewIssueState(ctx, actor, issue) 55 + } 56 + 57 + func (l *loggingNotifier) DeleteIssue(ctx context.Context, issue *models.Issue) { 58 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "DeleteIssue")) 59 + l.inner.DeleteIssue(ctx, issue) 60 + } 61 + 62 + func (l *loggingNotifier) NewFollow(ctx context.Context, follow *models.Follow) { 63 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "NewFollow")) 64 + l.inner.NewFollow(ctx, follow) 65 + } 66 + 67 + func (l *loggingNotifier) DeleteFollow(ctx context.Context, follow *models.Follow) { 68 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "DeleteFollow")) 69 + l.inner.DeleteFollow(ctx, follow) 70 + } 71 + 72 + func (l *loggingNotifier) NewPull(ctx context.Context, pull *models.Pull) { 73 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "NewPull")) 74 + l.inner.NewPull(ctx, pull) 75 + } 76 + 77 + func (l *loggingNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 78 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "NewPullComment")) 79 + l.inner.NewPullComment(ctx, comment, mentions) 80 + } 81 + 82 + func (l *loggingNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) { 83 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "NewPullState")) 84 + l.inner.NewPullState(ctx, actor, pull) 85 + } 86 + 87 + func (l *loggingNotifier) UpdateProfile(ctx context.Context, profile *models.Profile) { 88 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "UpdateProfile")) 89 + l.inner.UpdateProfile(ctx, profile) 90 + } 91 + 92 + func (l *loggingNotifier) NewString(ctx context.Context, s *models.String) { 93 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "NewString")) 94 + l.inner.NewString(ctx, s) 95 + } 96 + 97 + func (l *loggingNotifier) EditString(ctx context.Context, s *models.String) { 98 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "EditString")) 99 + l.inner.EditString(ctx, s) 100 + } 101 + 102 + func (l *loggingNotifier) DeleteString(ctx context.Context, did, rkey string) { 103 + ctx = tlog.IntoContext(ctx, tlog.SubLogger(l.logger, "DeleteString")) 104 + l.inner.DeleteString(ctx, did, rkey) 105 + }
+20 -31
appview/notify/merged_notifier.go
··· 2 3 import ( 4 "context" 5 - "log/slog" 6 - "reflect" 7 "sync" 8 9 "github.com/bluesky-social/indigo/atproto/syntax" 10 "tangled.org/core/appview/models" 11 - "tangled.org/core/log" 12 ) 13 14 type mergedNotifier struct { 15 notifiers []Notifier 16 - logger *slog.Logger 17 } 18 19 - func NewMergedNotifier(notifiers []Notifier, logger *slog.Logger) Notifier { 20 - return &mergedNotifier{notifiers, logger} 21 } 22 23 var _ Notifier = &mergedNotifier{} 24 25 // fanout calls the same method on all notifiers concurrently 26 - func (m *mergedNotifier) fanout(method string, ctx context.Context, args ...any) { 27 - ctx = log.IntoContext(ctx, m.logger.With("method", method)) 28 var wg sync.WaitGroup 29 for _, n := range m.notifiers { 30 wg.Add(1) 31 go func(notifier Notifier) { 32 defer wg.Done() 33 - v := reflect.ValueOf(notifier).MethodByName(method) 34 - in := make([]reflect.Value, len(args)+1) 35 - in[0] = reflect.ValueOf(ctx) 36 - for i, arg := range args { 37 - in[i+1] = reflect.ValueOf(arg) 38 - } 39 - v.Call(in) 40 }(n) 41 } 42 } 43 44 func (m *mergedNotifier) NewRepo(ctx context.Context, repo *models.Repo) { 45 - m.fanout("NewRepo", ctx, repo) 46 } 47 48 func (m *mergedNotifier) NewStar(ctx context.Context, star *models.Star) { 49 - m.fanout("NewStar", ctx, star) 50 } 51 52 func (m *mergedNotifier) DeleteStar(ctx context.Context, star *models.Star) { 53 - m.fanout("DeleteStar", ctx, star) 54 } 55 56 func (m *mergedNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 57 - m.fanout("NewIssue", ctx, issue, mentions) 58 } 59 60 func (m *mergedNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 61 - m.fanout("NewIssueComment", ctx, comment, mentions) 62 } 63 64 func (m *mergedNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) { 65 - m.fanout("NewIssueState", ctx, actor, issue) 66 } 67 68 func (m *mergedNotifier) DeleteIssue(ctx context.Context, issue *models.Issue) { 69 - m.fanout("DeleteIssue", ctx, issue) 70 } 71 72 func (m *mergedNotifier) NewFollow(ctx context.Context, follow *models.Follow) { 73 - m.fanout("NewFollow", ctx, follow) 74 } 75 76 func (m *mergedNotifier) DeleteFollow(ctx context.Context, follow *models.Follow) { 77 - m.fanout("DeleteFollow", ctx, follow) 78 } 79 80 func (m *mergedNotifier) NewPull(ctx context.Context, pull *models.Pull) { 81 - m.fanout("NewPull", ctx, pull) 82 } 83 84 func (m *mergedNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 85 - m.fanout("NewPullComment", ctx, comment, mentions) 86 } 87 88 func (m *mergedNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) { 89 - m.fanout("NewPullState", ctx, actor, pull) 90 } 91 92 func (m *mergedNotifier) UpdateProfile(ctx context.Context, profile *models.Profile) { 93 - m.fanout("UpdateProfile", ctx, profile) 94 } 95 96 func (m *mergedNotifier) NewString(ctx context.Context, s *models.String) { 97 - m.fanout("NewString", ctx, s) 98 } 99 100 func (m *mergedNotifier) EditString(ctx context.Context, s *models.String) { 101 - m.fanout("EditString", ctx, s) 102 } 103 104 func (m *mergedNotifier) DeleteString(ctx context.Context, did, rkey string) { 105 - m.fanout("DeleteString", ctx, did, rkey) 106 }
··· 2 3 import ( 4 "context" 5 "sync" 6 7 "github.com/bluesky-social/indigo/atproto/syntax" 8 "tangled.org/core/appview/models" 9 ) 10 11 type mergedNotifier struct { 12 notifiers []Notifier 13 } 14 15 + func NewMergedNotifier(notifiers []Notifier) Notifier { 16 + return &mergedNotifier{notifiers} 17 } 18 19 var _ Notifier = &mergedNotifier{} 20 21 // fanout calls the same method on all notifiers concurrently 22 + func (m *mergedNotifier) fanout(callback func(Notifier)) { 23 var wg sync.WaitGroup 24 for _, n := range m.notifiers { 25 wg.Add(1) 26 go func(notifier Notifier) { 27 defer wg.Done() 28 + callback(n) 29 }(n) 30 } 31 } 32 33 func (m *mergedNotifier) NewRepo(ctx context.Context, repo *models.Repo) { 34 + m.fanout(func(n Notifier) { n.NewRepo(ctx, repo) }) 35 } 36 37 func (m *mergedNotifier) NewStar(ctx context.Context, star *models.Star) { 38 + m.fanout(func(n Notifier) { n.NewStar(ctx, star) }) 39 } 40 41 func (m *mergedNotifier) DeleteStar(ctx context.Context, star *models.Star) { 42 + m.fanout(func(n Notifier) { n.DeleteStar(ctx, star) }) 43 } 44 45 func (m *mergedNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 46 + m.fanout(func(n Notifier) { n.NewIssue(ctx, issue, mentions) }) 47 } 48 49 func (m *mergedNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 50 + m.fanout(func(n Notifier) { n.NewIssueComment(ctx, comment, mentions) }) 51 } 52 53 func (m *mergedNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) { 54 + m.fanout(func(n Notifier) { n.NewIssueState(ctx, actor, issue) }) 55 } 56 57 func (m *mergedNotifier) DeleteIssue(ctx context.Context, issue *models.Issue) { 58 + m.fanout(func(n Notifier) { n.DeleteIssue(ctx, issue) }) 59 } 60 61 func (m *mergedNotifier) NewFollow(ctx context.Context, follow *models.Follow) { 62 + m.fanout(func(n Notifier) { n.NewFollow(ctx, follow) }) 63 } 64 65 func (m *mergedNotifier) DeleteFollow(ctx context.Context, follow *models.Follow) { 66 + m.fanout(func(n Notifier) { n.DeleteFollow(ctx, follow) }) 67 } 68 69 func (m *mergedNotifier) NewPull(ctx context.Context, pull *models.Pull) { 70 + m.fanout(func(n Notifier) { n.NewPull(ctx, pull) }) 71 } 72 73 func (m *mergedNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 74 + m.fanout(func(n Notifier) { n.NewPullComment(ctx, comment, mentions) }) 75 } 76 77 func (m *mergedNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) { 78 + m.fanout(func(n Notifier) { n.NewPullState(ctx, actor, pull) }) 79 } 80 81 func (m *mergedNotifier) UpdateProfile(ctx context.Context, profile *models.Profile) { 82 + m.fanout(func(n Notifier) { n.UpdateProfile(ctx, profile) }) 83 } 84 85 func (m *mergedNotifier) NewString(ctx context.Context, s *models.String) { 86 + m.fanout(func(n Notifier) { n.NewString(ctx, s) }) 87 } 88 89 func (m *mergedNotifier) EditString(ctx context.Context, s *models.String) { 90 + m.fanout(func(n Notifier) { n.EditString(ctx, s) }) 91 } 92 93 func (m *mergedNotifier) DeleteString(ctx context.Context, did, rkey string) { 94 + m.fanout(func(n Notifier) { n.DeleteString(ctx, did, rkey) }) 95 }
+56
appview/ogcard/card.go
··· 257 return textWidth, err 258 } 259 260 // DrawBoldText draws bold text by rendering multiple times with slight offsets 261 func (c *Card) DrawBoldText(text string, x, y int, textColor color.Color, sizePt float64, valign VAlign, halign HAlign) (int, error) { 262 // Draw the text multiple times with slight offsets to create bold effect ··· 582 func (c *Card) DrawRect(startX, startY, endX, endY int, color color.Color) { 583 draw.Draw(c.Img, image.Rect(startX, startY, endX, endY), &image.Uniform{color}, image.Point{}, draw.Src) 584 }
··· 257 return textWidth, err 258 } 259 260 + func (c *Card) FontHeight(sizePt float64) int { 261 + ft := freetype.NewContext() 262 + ft.SetDPI(72) 263 + ft.SetFont(c.Font) 264 + ft.SetFontSize(sizePt) 265 + return ft.PointToFixed(sizePt).Ceil() 266 + } 267 + 268 + func (c *Card) TextWidth(text string, sizePt float64) int { 269 + face := truetype.NewFace(c.Font, &truetype.Options{Size: sizePt, DPI: 72}) 270 + lineWidth := font.MeasureString(face, text) 271 + textWidth := lineWidth.Ceil() 272 + return textWidth 273 + } 274 + 275 // DrawBoldText draws bold text by rendering multiple times with slight offsets 276 func (c *Card) DrawBoldText(text string, x, y int, textColor color.Color, sizePt float64, valign VAlign, halign HAlign) (int, error) { 277 // Draw the text multiple times with slight offsets to create bold effect ··· 597 func (c *Card) DrawRect(startX, startY, endX, endY int, color color.Color) { 598 draw.Draw(c.Img, image.Rect(startX, startY, endX, endY), &image.Uniform{color}, image.Point{}, draw.Src) 599 } 600 + 601 + // drawRoundedRect draws a filled rounded rectangle on the given card 602 + func (card *Card) DrawRoundedRect(x, y, width, height, cornerRadius int, fillColor color.RGBA) { 603 + cardBounds := card.Img.Bounds() 604 + for py := y; py < y+height; py++ { 605 + for px := x; px < x+width; px++ { 606 + // calculate distance from corners 607 + dx := 0 608 + dy := 0 609 + 610 + // check which corner region we're in 611 + if px < x+cornerRadius && py < y+cornerRadius { 612 + // top-left corner 613 + dx = x + cornerRadius - px 614 + dy = y + cornerRadius - py 615 + } else if px >= x+width-cornerRadius && py < y+cornerRadius { 616 + // top-right corner 617 + dx = px - (x + width - cornerRadius - 1) 618 + dy = y + cornerRadius - py 619 + } else if px < x+cornerRadius && py >= y+height-cornerRadius { 620 + // bottom-left corner 621 + dx = x + cornerRadius - px 622 + dy = py - (y + height - cornerRadius - 1) 623 + } else if px >= x+width-cornerRadius && py >= y+height-cornerRadius { 624 + // Bottom-right corner 625 + dx = px - (x + width - cornerRadius - 1) 626 + dy = py - (y + height - cornerRadius - 1) 627 + } 628 + 629 + // if we're in a corner, check if we're within the radius 630 + inCorner := (dx > 0 || dy > 0) 631 + withinRadius := dx*dx+dy*dy <= cornerRadius*cornerRadius 632 + 633 + // draw pixel if not in corner, or in corner and within radius 634 + // check bounds relative to the card's image bounds 635 + if (!inCorner || withinRadius) && px >= 0 && px < cardBounds.Dx() && py >= 0 && py < cardBounds.Dy() { 636 + card.Img.Set(px+cardBounds.Min.X, py+cardBounds.Min.Y, fillColor) 637 + } 638 + } 639 + } 640 + }
+30 -12
appview/pages/pages.go
··· 764 } 765 766 type RepoTreeParams struct { 767 - LoggedInUser *oauth.MultiAccountUser 768 - RepoInfo repoinfo.RepoInfo 769 - Active string 770 - BreadCrumbs [][]string 771 - TreePath string 772 - Raw bool 773 - HTMLReadme template.HTML 774 types.RepoTreeResponse 775 } 776 ··· 844 return p.executeRepo("repo/tags", w, params) 845 } 846 847 type RepoArtifactParams struct { 848 LoggedInUser *oauth.MultiAccountUser 849 RepoInfo repoinfo.RepoInfo ··· 855 } 856 857 type RepoBlobParams struct { 858 - LoggedInUser *oauth.MultiAccountUser 859 - RepoInfo repoinfo.RepoInfo 860 - Active string 861 - BreadCrumbs [][]string 862 - BlobView models.BlobView 863 *tangled.RepoBlob_Output 864 } 865
··· 764 } 765 766 type RepoTreeParams struct { 767 + LoggedInUser *oauth.MultiAccountUser 768 + RepoInfo repoinfo.RepoInfo 769 + Active string 770 + BreadCrumbs [][]string 771 + TreePath string 772 + Raw bool 773 + HTMLReadme template.HTML 774 + EmailToDid map[string]string 775 + LastCommitInfo *types.LastCommitInfo 776 types.RepoTreeResponse 777 } 778 ··· 846 return p.executeRepo("repo/tags", w, params) 847 } 848 849 + type RepoTagParams struct { 850 + LoggedInUser *oauth.MultiAccountUser 851 + RepoInfo repoinfo.RepoInfo 852 + Active string 853 + types.RepoTagResponse 854 + ArtifactMap map[plumbing.Hash][]models.Artifact 855 + DanglingArtifacts []models.Artifact 856 + } 857 + 858 + func (p *Pages) RepoTag(w io.Writer, params RepoTagParams) error { 859 + params.Active = "overview" 860 + return p.executeRepo("repo/tag", w, params) 861 + } 862 + 863 type RepoArtifactParams struct { 864 LoggedInUser *oauth.MultiAccountUser 865 RepoInfo repoinfo.RepoInfo ··· 871 } 872 873 type RepoBlobParams struct { 874 + LoggedInUser *oauth.MultiAccountUser 875 + RepoInfo repoinfo.RepoInfo 876 + Active string 877 + BreadCrumbs [][]string 878 + BlobView models.BlobView 879 + EmailToDid map[string]string 880 + LastCommitInfo *types.LastCommitInfo 881 *tangled.RepoBlob_Output 882 } 883
+113
appview/pages/templates/fragments/resizeable.html
···
··· 1 + {{ define "fragments/resizable" }} 2 + <script> 3 + class ResizablePanel { 4 + constructor(resizerElement) { 5 + this.resizer = resizerElement; 6 + this.isResizing = false; 7 + this.type = resizerElement.dataset.resizer; 8 + this.targetId = resizerElement.dataset.target; 9 + this.target = document.getElementById(this.targetId); 10 + this.min = parseInt(resizerElement.dataset.min) || 100; 11 + this.max = parseInt(resizerElement.dataset.max) || Infinity; 12 + 13 + this.direction = resizerElement.dataset.direction || 'before'; // 'before' or 'after' 14 + 15 + this.handleMouseDown = this.handleMouseDown.bind(this); 16 + this.handleMouseMove = this.handleMouseMove.bind(this); 17 + this.handleMouseUp = this.handleMouseUp.bind(this); 18 + 19 + this.init(); 20 + } 21 + 22 + init() { 23 + this.resizer.addEventListener('mousedown', this.handleMouseDown); 24 + } 25 + 26 + handleMouseDown(e) { 27 + e.preventDefault(); 28 + this.isResizing = true; 29 + this.resizer.classList.add('resizing'); 30 + document.body.style.cursor = this.type === 'vertical' ? 'col-resize' : 'row-resize'; 31 + document.body.style.userSelect = 'none'; 32 + 33 + this.startX = e.clientX; 34 + this.startY = e.clientY; 35 + this.startWidth = this.target.offsetWidth; 36 + this.startHeight = this.target.offsetHeight; 37 + 38 + document.addEventListener('mousemove', this.handleMouseMove); 39 + document.addEventListener('mouseup', this.handleMouseUp); 40 + } 41 + 42 + handleMouseMove(e) { 43 + if (!this.isResizing) return; 44 + 45 + if (this.type === 'vertical') { 46 + let newWidth; 47 + 48 + if (this.direction === 'after') { 49 + const deltaX = this.startX - e.clientX; 50 + newWidth = this.startWidth + deltaX; 51 + } else { 52 + const deltaX = e.clientX - this.startX; 53 + newWidth = this.startWidth + deltaX; 54 + } 55 + 56 + if (newWidth >= this.min && newWidth <= this.max) { 57 + this.target.style.width = newWidth + 'px'; 58 + this.target.style.flexShrink = '0'; 59 + } 60 + } else { 61 + let newHeight; 62 + 63 + if (this.direction === 'after') { 64 + const deltaY = this.startY - e.clientY; 65 + newHeight = this.startHeight + deltaY; 66 + } else { 67 + const deltaY = e.clientY - this.startY; 68 + newHeight = this.startHeight + deltaY; 69 + } 70 + 71 + if (newHeight >= this.min && newHeight <= this.max) { 72 + this.target.style.height = newHeight + 'px'; 73 + } 74 + } 75 + } 76 + 77 + handleMouseUp() { 78 + if (!this.isResizing) return; 79 + 80 + this.isResizing = false; 81 + this.resizer.classList.remove('resizing'); 82 + document.body.style.cursor = ''; 83 + document.body.style.userSelect = ''; 84 + 85 + document.removeEventListener('mousemove', this.handleMouseMove); 86 + document.removeEventListener('mouseup', this.handleMouseUp); 87 + } 88 + 89 + destroy() { 90 + this.resizer.removeEventListener('mousedown', this.handleMouseDown); 91 + document.removeEventListener('mousemove', this.handleMouseMove); 92 + document.removeEventListener('mouseup', this.handleMouseUp); 93 + } 94 + } 95 + 96 + function initializeResizers() { 97 + const resizers = document.querySelectorAll('[data-resizer]'); 98 + const instances = []; 99 + 100 + resizers.forEach(resizer => { 101 + instances.push(new ResizablePanel(resizer)); 102 + }); 103 + 104 + return instances; 105 + } 106 + 107 + if (document.readyState === 'loading') { 108 + document.addEventListener('DOMContentLoaded', initializeResizers); 109 + } else { 110 + initializeResizers(); 111 + } 112 + </script> 113 + {{ end }}
+3 -3
appview/pages/templates/fragments/starBtn.html
··· 15 hx-disabled-elt="#starBtn" 16 > 17 {{ if .IsStarred }} 18 - {{ i "star" "w-4 h-4 fill-current" }} 19 {{ else }} 20 - {{ i "star" "w-4 h-4" }} 21 {{ end }} 22 <span class="text-sm"> 23 {{ .StarCount }} 24 </span> 25 - {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 26 </button> 27 {{ end }}
··· 15 hx-disabled-elt="#starBtn" 16 > 17 {{ if .IsStarred }} 18 + {{ i "star" "w-4 h-4 fill-current inline group-[.htmx-request]:hidden" }} 19 {{ else }} 20 + {{ i "star" "w-4 h-4 inline group-[.htmx-request]:hidden" }} 21 {{ end }} 22 + {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 23 <span class="text-sm"> 24 {{ .StarCount }} 25 </span> 26 </button> 27 {{ end }}
+6
appview/pages/templates/repo/blob.html
··· 12 13 {{ define "repoContent" }} 14 {{ $linkstyle := "no-underline hover:underline" }} 15 <div class="pb-2 mb-3 text-base border-b border-gray-200 dark:border-gray-700"> 16 <div class="flex flex-col md:flex-row md:justify-between gap-2"> 17 <div id="breadcrumbs" class="overflow-x-auto whitespace-nowrap text-gray-400 dark:text-gray-500"> ··· 57 </div> 58 </div> 59 </div> 60 {{ if .BlobView.IsUnsupported }} 61 <p class="text-center text-gray-400 dark:text-gray-500"> 62 Previews are not supported for this file type.
··· 12 13 {{ define "repoContent" }} 14 {{ $linkstyle := "no-underline hover:underline" }} 15 + 16 <div class="pb-2 mb-3 text-base border-b border-gray-200 dark:border-gray-700"> 17 <div class="flex flex-col md:flex-row md:justify-between gap-2"> 18 <div id="breadcrumbs" class="overflow-x-auto whitespace-nowrap text-gray-400 dark:text-gray-500"> ··· 58 </div> 59 </div> 60 </div> 61 + 62 + {{ if .LastCommitInfo }} 63 + {{ template "repo/fragments/lastCommitPanel" $ }} 64 + {{ end }} 65 + 66 {{ if .BlobView.IsUnsupported }} 67 <p class="text-center text-gray-400 dark:text-gray-500"> 68 Previews are not supported for this file type.
+3 -2
appview/pages/templates/repo/fragments/artifact.html
··· 19 {{ if and .LoggedInUser (eq .LoggedInUser.Did .Artifact.Did) }} 20 <button 21 id="delete-{{ $unique }}" 22 - class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2" 23 title="Delete artifact" 24 hx-delete="/{{ .RepoInfo.FullName }}/tags/{{ .Artifact.Tag.String }}/{{ .Artifact.Name | urlquery }}" 25 hx-swap="outerHTML" 26 hx-target="#artifact-{{ $unique }}" 27 hx-disabled-elt="#delete-{{ $unique }}" 28 hx-confirm="Are you sure you want to delete the artifact '{{ .Artifact.Name }}'?"> 29 - {{ i "trash-2" "w-4 h-4" }} 30 </button> 31 {{ end }} 32 </div>
··· 19 {{ if and .LoggedInUser (eq .LoggedInUser.Did .Artifact.Did) }} 20 <button 21 id="delete-{{ $unique }}" 22 + class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group" 23 title="Delete artifact" 24 hx-delete="/{{ .RepoInfo.FullName }}/tags/{{ .Artifact.Tag.String }}/{{ .Artifact.Name | urlquery }}" 25 hx-swap="outerHTML" 26 hx-target="#artifact-{{ $unique }}" 27 hx-disabled-elt="#delete-{{ $unique }}" 28 hx-confirm="Are you sure you want to delete the artifact '{{ .Artifact.Name }}'?"> 29 + {{ i "trash-2" "size-4 inline group-[.htmx-request]:hidden" }} 30 + {{ i "loader-circle" "size-4 animate-spin hidden group-[.htmx-request]:inline" }} 31 </button> 32 {{ end }} 33 </div>
+70
appview/pages/templates/repo/fragments/artifactList.html
···
··· 1 + {{ define "repo/fragments/artifactList" }} 2 + {{ $root := index . 0 }} 3 + {{ $tag := index . 1 }} 4 + {{ $isPushAllowed := $root.RepoInfo.Roles.IsPushAllowed }} 5 + {{ $artifacts := index $root.ArtifactMap $tag.Tag.Hash }} 6 + 7 + <h2 class="my-4 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold">artifacts</h2> 8 + <div class="flex flex-col rounded border border-gray-200 dark:border-gray-700"> 9 + {{ range $artifact := $artifacts }} 10 + {{ $args := dict "LoggedInUser" $root.LoggedInUser "RepoInfo" $root.RepoInfo "Artifact" $artifact }} 11 + {{ template "repo/fragments/artifact" $args }} 12 + {{ end }} 13 + <div id="artifact-git-source" class="flex items-center justify-between p-2 border-b border-gray-200 dark:border-gray-700"> 14 + <div id="left-side" class="flex items-center gap-2 min-w-0 max-w-[60%]"> 15 + {{ i "archive" "w-4 h-4" }} 16 + <a href="/{{ $root.RepoInfo.FullName }}/archive/{{ pathEscape (print "refs/tags/" $tag.Name) }}" class="no-underline hover:no-underline"> 17 + Source code (.tar.gz) 18 + </a> 19 + </div> 20 + </div> 21 + {{ if $isPushAllowed }} 22 + {{ template "uploadArtifact" (list $root $tag) }} 23 + {{ end }} 24 + </div> 25 + {{ end }} 26 + 27 + {{ define "uploadArtifact" }} 28 + {{ $root := index . 0 }} 29 + {{ $tag := index . 1 }} 30 + {{ $unique := $tag.Tag.Target.String }} 31 + <form 32 + id="upload-{{$unique}}" 33 + method="post" 34 + enctype="multipart/form-data" 35 + hx-post="/{{ $root.RepoInfo.FullName }}/tags/{{ $tag.Name | urlquery }}/upload" 36 + hx-on::after-request="if(event.detail.successful) this.reset()" 37 + hx-disabled-elt="#upload-btn-{{$unique}}" 38 + hx-swap="beforebegin" 39 + hx-target="#artifact-git-source" 40 + class="flex items-center gap-2 px-2 group"> 41 + <div class="flex-grow"> 42 + <input type="file" 43 + name="artifact" 44 + required 45 + class="block py-2 px-0 w-full border-none 46 + text-black dark:text-white 47 + bg-white dark:bg-gray-800 48 + file:mr-4 file:px-2 file:py-2 49 + file:rounded file:border-0 50 + file:text-sm file:font-medium 51 + file:text-gray-700 file:dark:text-gray-300 52 + file:bg-gray-200 file:dark:bg-gray-700 53 + file:hover:bg-gray-100 file:hover:dark:bg-gray-600 54 + "> 55 + </input> 56 + </div> 57 + <div class="flex justify-end"> 58 + <button 59 + type="submit" 60 + class="btn-create gap-2" 61 + id="upload-btn-{{$unique}}" 62 + title="Upload artifact"> 63 + {{ i "upload" "size-4 inline group-[.htmx-request]:hidden" }} 64 + {{ i "loader-circle" "size-4 animate-spin hidden group-[.htmx-request]:inline" }} 65 + <span class="hidden md:inline">upload</span> 66 + </button> 67 + </div> 68 + </form> 69 + {{ end }} 70 +
+19 -2
appview/pages/templates/repo/fragments/diff.html
··· 3 #filesToggle:checked ~ div label[for="filesToggle"] .show-text { display: none; } 4 #filesToggle:checked ~ div label[for="filesToggle"] .hide-text { display: inline; } 5 #filesToggle:not(:checked) ~ div label[for="filesToggle"] .hide-text { display: none; } 6 - #filesToggle:checked ~ div div#files { width: fit-content; max-width: 15vw; margin-right: 1rem; } 7 #filesToggle:not(:checked) ~ div div#files { width: 0; display: none; margin-right: 0; } 8 </style> 9 10 {{ template "diffTopbar" . }} 11 {{ block "diffLayout" . }} {{ end }} 12 {{ end }} 13 14 {{ define "diffTopbar" }} ··· 78 79 {{ end }} 80 81 {{ define "diffLayout" }} 82 {{ $diff := index . 0 }} 83 {{ $opts := index . 1 }} ··· 90 </section> 91 </div> 92 93 <!-- main content --> 94 - <div class="flex-1 min-w-0 sticky top-12 pb-12"> 95 {{ template "diffFiles" (list $diff $opts) }} 96 </div> 97
··· 3 #filesToggle:checked ~ div label[for="filesToggle"] .show-text { display: none; } 4 #filesToggle:checked ~ div label[for="filesToggle"] .hide-text { display: inline; } 5 #filesToggle:not(:checked) ~ div label[for="filesToggle"] .hide-text { display: none; } 6 + #filesToggle:checked ~ div div#files { width: fit-content; max-width: 15vw; } 7 #filesToggle:not(:checked) ~ div div#files { width: 0; display: none; margin-right: 0; } 8 + #filesToggle:not(:checked) ~ div div#resize-files { display: none; } 9 </style> 10 11 {{ template "diffTopbar" . }} 12 {{ block "diffLayout" . }} {{ end }} 13 + {{ template "fragments/resizable" }} 14 {{ end }} 15 16 {{ define "diffTopbar" }} ··· 80 81 {{ end }} 82 83 + {{ define "resize-grip" }} 84 + {{ $id := index . 0 }} 85 + {{ $target := index . 1 }} 86 + {{ $direction := index . 2 }} 87 + <div id="{{ $id }}" 88 + data-resizer="vertical" 89 + data-target="{{ $target }}" 90 + data-direction="{{ $direction }}" 91 + class="resizer-vertical hidden md:flex w-4 sticky top-12 max-h-screen flex-col items-center justify-center group"> 92 + <div class="w-1 h-16 group-hover:h-24 group-[.resizing]:h-24 transition-all rounded-full bg-gray-400 dark:bg-gray-500 group-hover:bg-gray-500 group-hover:dark:bg-gray-400"></div> 93 + </div> 94 + {{ end }} 95 + 96 {{ define "diffLayout" }} 97 {{ $diff := index . 0 }} 98 {{ $opts := index . 1 }} ··· 105 </section> 106 </div> 107 108 + {{ template "resize-grip" (list "resize-files" "files" "before") }} 109 + 110 <!-- main content --> 111 + <div id="diff-files" class="flex-1 min-w-0 sticky top-12 pb-12"> 112 {{ template "diffFiles" (list $diff $opts) }} 113 </div> 114
+29
appview/pages/templates/repo/fragments/lastCommitPanel.html
···
··· 1 + {{ define "repo/fragments/lastCommitPanel" }} 2 + {{ $messageParts := splitN .LastCommitInfo.Message "\n\n" 2 }} 3 + <div class="pb-2 mb-3 border-b border-gray-200 dark:border-gray-700 flex items-center justify-between text-sm"> 4 + <div class="flex items-center gap-1"> 5 + {{ if .LastCommitInfo.Author }} 6 + {{ $authorDid := index .EmailToDid .LastCommitInfo.Author.Email }} 7 + <span class="flex items-center gap-1"> 8 + {{ if $authorDid }} 9 + {{ template "user/fragments/picHandleLink" $authorDid }} 10 + {{ else }} 11 + {{ placeholderAvatar "tiny" }} 12 + <a href="mailto:{{ .LastCommitInfo.Author.Email }}" class="no-underline hover:underline">{{ .LastCommitInfo.Author.Name }}</a> 13 + {{ end }} 14 + </span> 15 + <span class="px-1 select-none before:content-['\00B7']"></span> 16 + {{ end }} 17 + <a href="/{{ .RepoInfo.FullName }}/commit/{{ .LastCommitInfo.Hash }}" 18 + class="inline no-underline hover:underline dark:text-white"> 19 + {{ index $messageParts 0 }} 20 + </a> 21 + <span class="px-1 select-none before:content-['\00B7']"></span> 22 + <span class="text-gray-400 dark:text-gray-500">{{ template "repo/fragments/time" .LastCommitInfo.When }}</span> 23 + </div> 24 + <a href="/{{ .RepoInfo.FullName }}/commit/{{ .LastCommitInfo.Hash.String }}" 25 + class="no-underline hover:underline text-gray-700 dark:text-gray-300 bg-gray-100 dark:bg-gray-900 px-2 py-1 rounded font-mono text-xs"> 26 + {{ slice .LastCommitInfo.Hash.String 0 8 }} 27 + </a> 28 + </div> 29 + {{ end }}
+67
appview/pages/templates/repo/fragments/singleTag.html
···
··· 1 + {{ define "repo/fragments/singleTag" }} 2 + {{ $root := index . 0 }} 3 + {{ $item := index . 1 }} 4 + {{ with $item }} 5 + <div class="md:grid md:grid-cols-12 md:items-start flex flex-col"> 6 + <!-- Header column (top on mobile, left on md+) --> 7 + <div class="md:col-span-2 md:border-r border-b md:border-b-0 border-gray-200 dark:border-gray-700 w-full md:h-full"> 8 + <!-- Mobile layout: horizontal --> 9 + <div class="flex md:hidden flex-col py-2 px-2 text-xl"> 10 + <a href="/{{ $root.RepoInfo.FullName }}/tags/{{ .Name | urlquery }}" class="no-underline hover:underline flex items-center gap-2 font-bold"> 11 + {{ i "tag" "w-4 h-4" }} 12 + {{ .Name }} 13 + </a> 14 + 15 + <div class="flex items-center gap-3 text-gray-500 dark:text-gray-400 text-sm"> 16 + {{ if .Tag }} 17 + <a href="/{{ $root.RepoInfo.FullName }}/commit/{{ .Tag.Target.String }}" 18 + class="no-underline hover:underline text-gray-500 dark:text-gray-400"> 19 + {{ slice .Tag.Target.String 0 8 }} 20 + </a> 21 + 22 + <span class="px-1 text-gray-500 dark:text-gray-400 select-none after:content-['ยท']"></span> 23 + <span>{{ .Tag.Tagger.Name }}</span> 24 + 25 + <span class="px-1 text-gray-500 dark:text-gray-400 select-none after:content-['ยท']"></span> 26 + {{ template "repo/fragments/shortTime" .Tag.Tagger.When }} 27 + {{ end }} 28 + </div> 29 + </div> 30 + 31 + <!-- Desktop layout: vertical and left-aligned --> 32 + <div class="hidden md:block text-left px-2 pb-6"> 33 + <a href="/{{ $root.RepoInfo.FullName }}/tags/{{ .Name | urlquery }}" class="no-underline hover:underline flex items-center gap-2 font-bold"> 34 + {{ i "tag" "w-4 h-4" }} 35 + {{ .Name }} 36 + </a> 37 + <div class="flex flex-grow flex-col text-gray-500 dark:text-gray-400 text-sm"> 38 + {{ if .Tag }} 39 + <a href="/{{ $root.RepoInfo.FullName }}/commit/{{ .Tag.Target.String }}" 40 + class="no-underline hover:underline text-gray-500 dark:text-gray-400 flex items-center gap-2"> 41 + {{ i "git-commit-horizontal" "w-4 h-4" }} 42 + {{ slice .Tag.Target.String 0 8 }} 43 + </a> 44 + <span>{{ .Tag.Tagger.Name }}</span> 45 + {{ template "repo/fragments/time" .Tag.Tagger.When }} 46 + {{ end }} 47 + </div> 48 + </div> 49 + </div> 50 + 51 + <!-- Content column (bottom on mobile, right on md+) --> 52 + <div class="md:col-span-10 px-2 py-3 md:py-0 md:pb-6"> 53 + {{ if .Tag }} 54 + {{ $messageParts := splitN .Tag.Message "\n\n" 2 }} 55 + <p class="font-bold text-lg">{{ index $messageParts 0 }}</p> 56 + {{ if gt (len $messageParts) 1 }} 57 + <p class="cursor-text py-2">{{ nl2br (index $messageParts 1) }}</p> 58 + {{ end }} 59 + {{ template "repo/fragments/artifactList" (list $root .) }} 60 + {{ else }} 61 + <p class="italic text-gray-500 dark:text-gray-400">no message</p> 62 + {{ end }} 63 + </div> 64 + </div> 65 + {{ end }} 66 + {{ end }} 67 +
+1 -1
appview/pages/templates/repo/index.html
··· 334 {{ with $tag }} 335 <div> 336 <div class="text-base flex items-center gap-2"> 337 - <a href="/{{ $.RepoInfo.FullName }}/tree/{{ .Reference.Name | urlquery }}" 338 class="inline no-underline hover:underline dark:text-white"> 339 {{ .Reference.Name }} 340 </a>
··· 334 {{ with $tag }} 335 <div> 336 <div class="text-base flex items-center gap-2"> 337 + <a href="/{{ $.RepoInfo.FullName }}/tags/{{ .Reference.Name | urlquery }}" 338 class="inline no-underline hover:underline dark:text-white"> 339 {{ .Reference.Name }} 340 </a>
+2 -2
appview/pages/templates/repo/pulls/fragments/pullActions.html
··· 38 hx-vals='{"branch": "{{ .BranchDeleteStatus.Branch }}" }' 39 hx-swap="none" 40 class="btn-flat p-2 flex items-center gap-2 no-underline hover:no-underline group text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300"> 41 - {{ i "git-branch" "w-4 h-4" }} 42 - <span>delete branch</span> 43 {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 44 </button> 45 {{ end }} 46 {{ if and $isPushAllowed $isOpen $isLastRound }}
··· 38 hx-vals='{"branch": "{{ .BranchDeleteStatus.Branch }}" }' 39 hx-swap="none" 40 class="btn-flat p-2 flex items-center gap-2 no-underline hover:no-underline group text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300"> 41 + {{ i "git-branch" "w-4 h-4 inline group-[.htmx-request]:hidden" }} 42 {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 43 + delete branch 44 </button> 45 {{ end }} 46 {{ if and $isPushAllowed $isOpen $isLastRound }}
+22 -5
appview/pages/templates/repo/pulls/pull.html
··· 111 {{ end }} 112 {{ end }} 113 114 {{ define "diffLayout" }} 115 {{ $diff := index . 0 }} 116 {{ $opts := index . 1 }} ··· 124 </section> 125 </div> 126 127 <!-- main content --> 128 - <div class="flex-1 min-w-0 sticky top-12 pb-12"> 129 {{ template "diffFiles" (list $diff $opts) }} 130 </div> 131 132 <!-- right panel --> 133 {{ template "subsPanel" $ }} ··· 187 188 {{ define "subsToggle" }} 189 <style> 190 - /* Mobile: full width */ 191 #subsToggle:checked ~ div div#subs { 192 width: 100%; 193 margin-left: 0; ··· 196 #subsToggle:checked ~ div label[for="subsToggle"] .hide-toggle { display: flex; } 197 #subsToggle:not(:checked) ~ div label[for="subsToggle"] .hide-toggle { display: none; } 198 199 - /* Desktop: 25vw with left margin */ 200 @media (min-width: 768px) { 201 #subsToggle:checked ~ div div#subs { 202 width: 25vw; 203 - margin-left: 1rem; 204 } 205 - /* Unchecked state */ 206 #subsToggle:not(:checked) ~ div div#subs { 207 width: 0; 208 display: none; 209 margin-left: 0; 210 } 211 } 212 </style>
··· 111 {{ end }} 112 {{ end }} 113 114 + {{ define "resize-grip" }} 115 + {{ $id := index . 0 }} 116 + {{ $target := index . 1 }} 117 + {{ $direction := index . 2 }} 118 + <div id="{{ $id }}" 119 + data-resizer="vertical" 120 + data-target="{{ $target }}" 121 + data-direction="{{ $direction }}" 122 + class="resizer-vertical hidden md:flex w-4 sticky top-12 max-h-screen flex-col items-center justify-center group"> 123 + <div class="w-1 h-16 group-hover:h-24 group-[.resizing]:h-24 transition-all rounded-full bg-gray-400 dark:bg-gray-500 group-hover:bg-gray-500 group-hover:dark:bg-gray-400"></div> 124 + </div> 125 + {{ end }} 126 + 127 {{ define "diffLayout" }} 128 {{ $diff := index . 0 }} 129 {{ $opts := index . 1 }} ··· 137 </section> 138 </div> 139 140 + {{ template "resize-grip" (list "resize-files" "files" "before") }} 141 + 142 <!-- main content --> 143 + <div id="diff-files" class="flex-1 min-w-0 sticky top-12 pb-12"> 144 {{ template "diffFiles" (list $diff $opts) }} 145 </div> 146 + 147 + {{ template "resize-grip" (list "resize-subs" "subs" "after") }} 148 149 <!-- right panel --> 150 {{ template "subsPanel" $ }} ··· 204 205 {{ define "subsToggle" }} 206 <style> 207 #subsToggle:checked ~ div div#subs { 208 width: 100%; 209 margin-left: 0; ··· 212 #subsToggle:checked ~ div label[for="subsToggle"] .hide-toggle { display: flex; } 213 #subsToggle:not(:checked) ~ div label[for="subsToggle"] .hide-toggle { display: none; } 214 215 @media (min-width: 768px) { 216 #subsToggle:checked ~ div div#subs { 217 width: 25vw; 218 + max-width: 50vw; 219 } 220 #subsToggle:not(:checked) ~ div div#subs { 221 width: 0; 222 display: none; 223 margin-left: 0; 224 + } 225 + #subsToggle:not(:checked) ~ div div#resize-subs { 226 + display: none; 227 } 228 } 229 </style>
+16
appview/pages/templates/repo/tag.html
···
··· 1 + {{ define "title" }} 2 + tags ยท {{ .RepoInfo.FullName }} 3 + {{ end }} 4 + 5 + {{ define "extrameta" }} 6 + {{ $title := printf "tags &middot; %s" .RepoInfo.FullName }} 7 + {{ $url := printf "https://tangled.org/%s/tag/%s" .RepoInfo.FullName .Tag.Name }} 8 + 9 + {{ template "repo/fragments/og" (dict "RepoInfo" .RepoInfo "Title" $title "Url" $url) }} 10 + {{ end }} 11 + 12 + {{ define "repoContent" }} 13 + <section class="flex flex-col py-2 gap-12 md:gap-0"> 14 + {{ template "repo/fragments/singleTag" (list $ .Tag ) }} 15 + </section> 16 + {{ end }}
+1 -129
appview/pages/templates/repo/tags.html
··· 14 <h2 class="mb-4 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold">tags</h2> 15 <div class="flex flex-col py-2 gap-12 md:gap-0"> 16 {{ range .Tags }} 17 - <div class="md:grid md:grid-cols-12 md:items-start flex flex-col"> 18 - <!-- Header column (top on mobile, left on md+) --> 19 - <div class="md:col-span-2 md:border-r border-b md:border-b-0 border-gray-200 dark:border-gray-700 w-full md:h-full"> 20 - <!-- Mobile layout: horizontal --> 21 - <div class="flex md:hidden flex-col py-2 px-2 text-xl"> 22 - <a href="/{{ $.RepoInfo.FullName }}/tree/{{ .Name | urlquery }}" class="no-underline hover:underline flex items-center gap-2 font-bold"> 23 - {{ i "tag" "w-4 h-4" }} 24 - {{ .Name }} 25 - </a> 26 - 27 - <div class="flex items-center gap-3 text-gray-500 dark:text-gray-400 text-sm"> 28 - {{ if .Tag }} 29 - <a href="/{{ $.RepoInfo.FullName }}/commit/{{ .Tag.Target.String }}" 30 - class="no-underline hover:underline text-gray-500 dark:text-gray-400"> 31 - {{ slice .Tag.Target.String 0 8 }} 32 - </a> 33 - 34 - <span class="px-1 text-gray-500 dark:text-gray-400 select-none after:content-['ยท']"></span> 35 - <span>{{ .Tag.Tagger.Name }}</span> 36 - 37 - <span class="px-1 text-gray-500 dark:text-gray-400 select-none after:content-['ยท']"></span> 38 - {{ template "repo/fragments/shortTime" .Tag.Tagger.When }} 39 - {{ end }} 40 - </div> 41 - </div> 42 - 43 - <!-- Desktop layout: vertical and left-aligned --> 44 - <div class="hidden md:block text-left px-2 pb-6"> 45 - <a href="/{{ $.RepoInfo.FullName }}/tree/{{ .Name | urlquery }}" class="no-underline hover:underline flex items-center gap-2 font-bold"> 46 - {{ i "tag" "w-4 h-4" }} 47 - {{ .Name }} 48 - </a> 49 - <div class="flex flex-grow flex-col text-gray-500 dark:text-gray-400 text-sm"> 50 - {{ if .Tag }} 51 - <a href="/{{ $.RepoInfo.FullName }}/commit/{{ .Tag.Target.String }}" 52 - class="no-underline hover:underline text-gray-500 dark:text-gray-400 flex items-center gap-2"> 53 - {{ i "git-commit-horizontal" "w-4 h-4" }} 54 - {{ slice .Tag.Target.String 0 8 }} 55 - </a> 56 - <span>{{ .Tag.Tagger.Name }}</span> 57 - {{ template "repo/fragments/time" .Tag.Tagger.When }} 58 - {{ end }} 59 - </div> 60 - </div> 61 - </div> 62 - 63 - <!-- Content column (bottom on mobile, right on md+) --> 64 - <div class="md:col-span-10 px-2 py-3 md:py-0 md:pb-6"> 65 - {{ if .Tag }} 66 - {{ $messageParts := splitN .Tag.Message "\n\n" 2 }} 67 - <p class="font-bold text-lg">{{ index $messageParts 0 }}</p> 68 - {{ if gt (len $messageParts) 1 }} 69 - <p class="cursor-text py-2">{{ nl2br (index $messageParts 1) }}</p> 70 - {{ end }} 71 - {{ block "artifacts" (list $ .) }} {{ end }} 72 - {{ else }} 73 - <p class="italic text-gray-500 dark:text-gray-400">no message</p> 74 - {{ end }} 75 - </div> 76 - </div> 77 {{ else }} 78 <p class="text-center text-gray-400 dark:text-gray-500 p-4"> 79 This repository does not contain any tags. ··· 89 {{ block "dangling" . }} {{ end }} 90 </section> 91 {{ end }} 92 - {{ end }} 93 - 94 - {{ define "artifacts" }} 95 - {{ $root := index . 0 }} 96 - {{ $tag := index . 1 }} 97 - {{ $isPushAllowed := $root.RepoInfo.Roles.IsPushAllowed }} 98 - {{ $artifacts := index $root.ArtifactMap $tag.Tag.Hash }} 99 - 100 - <h2 class="my-4 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold">artifacts</h2> 101 - <div class="flex flex-col rounded border border-gray-200 dark:border-gray-700"> 102 - {{ range $artifact := $artifacts }} 103 - {{ $args := dict "LoggedInUser" $root.LoggedInUser "RepoInfo" $root.RepoInfo "Artifact" $artifact }} 104 - {{ template "repo/fragments/artifact" $args }} 105 - {{ end }} 106 - <div id="artifact-git-source" class="flex items-center justify-between p-2 border-b border-gray-200 dark:border-gray-700"> 107 - <div id="left-side" class="flex items-center gap-2 min-w-0 max-w-[60%]"> 108 - {{ i "archive" "w-4 h-4" }} 109 - <a href="/{{ $root.RepoInfo.FullName }}/archive/{{ pathEscape (print "refs/tags/" $tag.Name) }}" class="no-underline hover:no-underline"> 110 - Source code (.tar.gz) 111 - </a> 112 - </div> 113 - </div> 114 - {{ if $isPushAllowed }} 115 - {{ block "uploadArtifact" (list $root $tag) }} {{ end }} 116 - {{ end }} 117 - </div> 118 - {{ end }} 119 - 120 - {{ define "uploadArtifact" }} 121 - {{ $root := index . 0 }} 122 - {{ $tag := index . 1 }} 123 - {{ $unique := $tag.Tag.Target.String }} 124 - <form 125 - id="upload-{{$unique}}" 126 - method="post" 127 - enctype="multipart/form-data" 128 - hx-post="/{{ $root.RepoInfo.FullName }}/tags/{{ $tag.Name | urlquery }}/upload" 129 - hx-on::after-request="if(event.detail.successful) this.reset()" 130 - hx-disabled-elt="#upload-btn-{{$unique}}" 131 - hx-swap="beforebegin" 132 - hx-target="this" 133 - class="flex items-center gap-2 px-2"> 134 - <div class="flex-grow"> 135 - <input type="file" 136 - name="artifact" 137 - required 138 - class="block py-2 px-0 w-full border-none 139 - text-black dark:text-white 140 - bg-white dark:bg-gray-800 141 - file:mr-4 file:px-2 file:py-2 142 - file:rounded file:border-0 143 - file:text-sm file:font-medium 144 - file:text-gray-700 file:dark:text-gray-300 145 - file:bg-gray-200 file:dark:bg-gray-700 146 - file:hover:bg-gray-100 file:hover:dark:bg-gray-600 147 - "> 148 - </input> 149 - </div> 150 - <div class="flex justify-end"> 151 - <button 152 - type="submit" 153 - class="btn gap-2" 154 - id="upload-btn-{{$unique}}" 155 - title="Upload artifact"> 156 - {{ i "upload" "w-4 h-4" }} 157 - <span class="hidden md:inline">upload</span> 158 - </button> 159 - </div> 160 - </form> 161 {{ end }} 162 163 {{ define "dangling" }}
··· 14 <h2 class="mb-4 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold">tags</h2> 15 <div class="flex flex-col py-2 gap-12 md:gap-0"> 16 {{ range .Tags }} 17 + {{ template "repo/fragments/singleTag" (list $ . ) }} 18 {{ else }} 19 <p class="text-center text-gray-400 dark:text-gray-500 p-4"> 20 This repository does not contain any tags. ··· 30 {{ block "dangling" . }} {{ end }} 31 </section> 32 {{ end }} 33 {{ end }} 34 35 {{ define "dangling" }}
+4
appview/pages/templates/repo/tree.html
··· 52 </div> 53 </div> 54 55 {{ range .Files }} 56 <div class="grid grid-cols-12 gap-4 items-center py-1"> 57 <div class="col-span-8 md:col-span-4">
··· 52 </div> 53 </div> 54 55 + {{ if .LastCommitInfo }} 56 + {{ template "repo/fragments/lastCommitPanel" $ }} 57 + {{ end }} 58 + 59 {{ range .Files }} 60 <div class="grid grid-cols-12 gap-4 items-center py-1"> 61 <div class="col-span-8 md:col-span-4">
+2 -1
appview/pages/templates/user/fragments/editBio.html
··· 110 {{ $id := index . 0 }} 111 {{ $stat := index . 1 }} 112 <select class="stat-group w-full p-1 border border-gray-200 bg-white dark:bg-gray-800 dark:text-white dark:border-gray-700 text-sm" id="stat{{$id}}" name="stat{{$id}}"> 113 - <option value="">choose stat</option> 114 {{ $stats := assoc 115 "merged-pull-request-count" "Merged PR Count" 116 "closed-pull-request-count" "Closed PR Count" ··· 118 "open-issue-count" "Open Issue Count" 119 "closed-issue-count" "Closed Issue Count" 120 "repository-count" "Repository Count" 121 }} 122 {{ range $s := $stats }} 123 {{ $value := index $s 0 }}
··· 110 {{ $id := index . 0 }} 111 {{ $stat := index . 1 }} 112 <select class="stat-group w-full p-1 border border-gray-200 bg-white dark:bg-gray-800 dark:text-white dark:border-gray-700 text-sm" id="stat{{$id}}" name="stat{{$id}}"> 113 + <option value="">Choose Stat</option> 114 {{ $stats := assoc 115 "merged-pull-request-count" "Merged PR Count" 116 "closed-pull-request-count" "Closed PR Count" ··· 118 "open-issue-count" "Open Issue Count" 119 "closed-issue-count" "Closed Issue Count" 120 "repository-count" "Repository Count" 121 + "star-count" "Star Count" 122 }} 123 {{ range $s := $stats }} 124 {{ $value := index $s 0 }}
+6 -3
appview/pages/templates/user/fragments/follow.html
··· 13 hx-swap="outerHTML" 14 > 15 {{ if eq .FollowStatus.String "IsNotFollowing" }} 16 - {{ i "user-round-plus" "w-4 h-4" }} follow 17 {{ else }} 18 - {{ i "user-round-minus" "w-4 h-4" }} unfollow 19 {{ end }} 20 - {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 21 </button> 22 {{ end }}
··· 13 hx-swap="outerHTML" 14 > 15 {{ if eq .FollowStatus.String "IsNotFollowing" }} 16 + {{ i "user-round-plus" "size-4 inline group-[.htmx-request]:hidden" }} 17 + {{ i "loader-circle" "size-4 animate-spin hidden group-[.htmx-request]:inline" }} 18 + follow 19 {{ else }} 20 + {{ i "user-round-minus" "size-4 inline group-[.htmx-request]:hidden" }} 21 + {{ i "loader-circle" "size-4 animate-spin hidden group-[.htmx-request]:inline" }} 22 + unfollow 23 {{ end }} 24 </button> 25 {{ end }}
+27 -19
appview/pulls/opengraph.go
··· 128 } 129 130 // Split stats area: left side for status/stats (80%), right side for dolly (20%) 131 - statusStatsArea, dollyArea := statsArea.Split(true, 80) 132 133 // Draw status and stats 134 - statsBounds := statusStatsArea.Img.Bounds() 135 statsX := statsBounds.Min.X + 60 // left padding 136 statsY := statsBounds.Min.Y 137 ··· 157 } else { 158 statusIcon = "git-pull-request-closed" 159 statusText = "closed" 160 - statusColor = color.RGBA{128, 128, 128, 255} // gray 161 } 162 163 - statusIconSize := 36 164 165 - // Draw icon with status color 166 - err = statusStatsArea.DrawLucideIcon(statusIcon, statsX, statsY+iconBaselineOffset-statusIconSize/2+5, statusIconSize, statusColor) 167 if err != nil { 168 log.Printf("failed to draw status icon: %v", err) 169 } 170 171 - // Draw text with status color 172 - textX := statsX + statusIconSize + 12 173 - statusTextSize := 32.0 174 - err = statusStatsArea.DrawTextAt(statusText, textX, statsY+iconBaselineOffset, statusColor, statusTextSize, ogcard.Middle, ogcard.Left) 175 if err != nil { 176 log.Printf("failed to draw status text: %v", err) 177 } 178 179 - statusTextWidth := len(statusText) * 20 180 - currentX := statsX + statusIconSize + 12 + statusTextWidth + 40 181 182 // Draw comment count 183 - err = statusStatsArea.DrawLucideIcon("message-square", currentX, statsY+iconBaselineOffset-iconSize/2+5, iconSize, iconColor) 184 if err != nil { 185 log.Printf("failed to draw comment icon: %v", err) 186 } ··· 190 if commentCount == 1 { 191 commentText = "1 comment" 192 } 193 - err = statusStatsArea.DrawTextAt(commentText, currentX, statsY+iconBaselineOffset, iconColor, textSize, ogcard.Middle, ogcard.Left) 194 if err != nil { 195 log.Printf("failed to draw comment text: %v", err) 196 } ··· 199 currentX += commentTextWidth + 40 200 201 // Draw files changed 202 - err = statusStatsArea.DrawLucideIcon("file-diff", currentX, statsY+iconBaselineOffset-iconSize/2+5, iconSize, iconColor) 203 if err != nil { 204 log.Printf("failed to draw file diff icon: %v", err) 205 } ··· 209 if filesChanged == 1 { 210 filesText = "1 file" 211 } 212 - err = statusStatsArea.DrawTextAt(filesText, currentX, statsY+iconBaselineOffset, iconColor, textSize, ogcard.Middle, ogcard.Left) 213 if err != nil { 214 log.Printf("failed to draw files text: %v", err) 215 } ··· 220 // Draw additions (green +) 221 greenColor := color.RGBA{34, 139, 34, 255} 222 additionsText := fmt.Sprintf("+%d", diffStats.Insertions) 223 - err = statusStatsArea.DrawTextAt(additionsText, currentX, statsY+iconBaselineOffset, greenColor, textSize, ogcard.Middle, ogcard.Left) 224 if err != nil { 225 log.Printf("failed to draw additions text: %v", err) 226 } ··· 231 // Draw deletions (red -) right next to additions 232 redColor := color.RGBA{220, 20, 60, 255} 233 deletionsText := fmt.Sprintf("-%d", diffStats.Deletions) 234 - err = statusStatsArea.DrawTextAt(deletionsText, currentX, statsY+iconBaselineOffset, redColor, textSize, ogcard.Middle, ogcard.Left) 235 if err != nil { 236 log.Printf("failed to draw deletions text: %v", err) 237 } ··· 254 openedDate := pull.Created.Format("Jan 2, 2006") 255 metaText := fmt.Sprintf("opened by %s ยท %s", authorHandle, openedDate) 256 257 - err = statusStatsArea.DrawTextAt(metaText, statsX, labelY, iconColor, labelSize, ogcard.Top, ogcard.Left) 258 if err != nil { 259 log.Printf("failed to draw metadata: %v", err) 260 }
··· 128 } 129 130 // Split stats area: left side for status/stats (80%), right side for dolly (20%) 131 + statusArea, dollyArea := statsArea.Split(true, 80) 132 133 // Draw status and stats 134 + statsBounds := statusArea.Img.Bounds() 135 statsX := statsBounds.Min.X + 60 // left padding 136 statsY := statsBounds.Min.Y 137 ··· 157 } else { 158 statusIcon = "git-pull-request-closed" 159 statusText = "closed" 160 + statusColor = color.RGBA{52, 58, 64, 255} // dark gray 161 } 162 163 + statusTextWidth := statusArea.TextWidth(statusText, textSize) 164 + badgePadding := 12 165 + badgeHeight := int(textSize) + (badgePadding * 2) 166 + badgeWidth := iconSize + badgePadding + statusTextWidth + (badgePadding * 2) 167 + cornerRadius := 8 168 + badgeX := 60 169 + badgeY := 0 170 + 171 + statusArea.DrawRoundedRect(badgeX, badgeY, badgeWidth, badgeHeight, cornerRadius, statusColor) 172 173 + whiteColor := color.RGBA{255, 255, 255, 255} 174 + iconX := statsX + badgePadding 175 + iconY := statsY + (badgeHeight-iconSize)/2 176 + err = statusArea.DrawLucideIcon(statusIcon, iconX, iconY, iconSize, whiteColor) 177 if err != nil { 178 log.Printf("failed to draw status icon: %v", err) 179 } 180 181 + textX := statsX + badgePadding + iconSize + badgePadding 182 + textY := statsY + (badgeHeight-int(textSize))/2 - 5 183 + err = statusArea.DrawTextAt(statusText, textX, textY, whiteColor, textSize, ogcard.Top, ogcard.Left) 184 if err != nil { 185 log.Printf("failed to draw status text: %v", err) 186 } 187 188 + currentX := statsX + badgeWidth + 50 189 190 // Draw comment count 191 + err = statusArea.DrawLucideIcon("message-square", currentX, iconY, iconSize, iconColor) 192 if err != nil { 193 log.Printf("failed to draw comment icon: %v", err) 194 } ··· 198 if commentCount == 1 { 199 commentText = "1 comment" 200 } 201 + err = statusArea.DrawTextAt(commentText, currentX, textY, iconColor, textSize, ogcard.Top, ogcard.Left) 202 if err != nil { 203 log.Printf("failed to draw comment text: %v", err) 204 } ··· 207 currentX += commentTextWidth + 40 208 209 // Draw files changed 210 + err = statusArea.DrawLucideIcon("file-diff", currentX, iconY, iconSize, iconColor) 211 if err != nil { 212 log.Printf("failed to draw file diff icon: %v", err) 213 } ··· 217 if filesChanged == 1 { 218 filesText = "1 file" 219 } 220 + err = statusArea.DrawTextAt(filesText, currentX, textY, iconColor, textSize, ogcard.Top, ogcard.Left) 221 if err != nil { 222 log.Printf("failed to draw files text: %v", err) 223 } ··· 228 // Draw additions (green +) 229 greenColor := color.RGBA{34, 139, 34, 255} 230 additionsText := fmt.Sprintf("+%d", diffStats.Insertions) 231 + err = statusArea.DrawTextAt(additionsText, currentX, textY, greenColor, textSize, ogcard.Top, ogcard.Left) 232 if err != nil { 233 log.Printf("failed to draw additions text: %v", err) 234 } ··· 239 // Draw deletions (red -) right next to additions 240 redColor := color.RGBA{220, 20, 60, 255} 241 deletionsText := fmt.Sprintf("-%d", diffStats.Deletions) 242 + err = statusArea.DrawTextAt(deletionsText, currentX, textY, redColor, textSize, ogcard.Top, ogcard.Left) 243 if err != nil { 244 log.Printf("failed to draw deletions text: %v", err) 245 } ··· 262 openedDate := pull.Created.Format("Jan 2, 2006") 263 metaText := fmt.Sprintf("opened by %s ยท %s", authorHandle, openedDate) 264 265 + err = statusArea.DrawTextAt(metaText, statsX, labelY, iconColor, labelSize, ogcard.Top, ogcard.Left) 266 if err != nil { 267 log.Printf("failed to draw metadata: %v", err) 268 }
+39 -30
appview/repo/artifact.go
··· 5 "encoding/json" 6 "fmt" 7 "io" 8 - "log" 9 "net/http" 10 "net/url" 11 "time" ··· 31 32 // TODO: proper statuses here on early exit 33 func (rp *Repo) AttachArtifact(w http.ResponseWriter, r *http.Request) { 34 user := rp.oauth.GetMultiAccountUser(r) 35 tagParam := chi.URLParam(r, "tag") 36 f, err := rp.repoResolver.Resolve(r) 37 if err != nil { 38 - log.Println("failed to get repo and knot", err) 39 rp.pages.Notice(w, "upload", "failed to upload artifact, error in repo resolution") 40 return 41 } 42 43 tag, err := rp.resolveTag(r.Context(), f, tagParam) 44 if err != nil { 45 - log.Println("failed to resolve tag", err) 46 rp.pages.Notice(w, "upload", "failed to upload artifact, error in tag resolution") 47 return 48 } 49 50 file, header, err := r.FormFile("artifact") 51 if err != nil { 52 - log.Println("failed to upload artifact", err) 53 rp.pages.Notice(w, "upload", "failed to upload artifact") 54 return 55 } ··· 57 58 client, err := rp.oauth.AuthorizedClient(r) 59 if err != nil { 60 - log.Println("failed to get authorized client", err) 61 rp.pages.Notice(w, "upload", "failed to get authorized client") 62 return 63 } 64 65 uploadBlobResp, err := xrpc.RepoUploadBlob(r.Context(), client, file, header.Header.Get("Content-Type")) 66 if err != nil { 67 - log.Println("failed to upload blob", err) 68 rp.pages.Notice(w, "upload", "Failed to upload blob to your PDS. Try again later.") 69 return 70 } 71 72 - log.Println("uploaded blob", humanize.Bytes(uint64(uploadBlobResp.Blob.Size)), uploadBlobResp.Blob.Ref.String()) 73 74 rkey := tid.TID() 75 createdAt := time.Now() ··· 89 }, 90 }) 91 if err != nil { 92 - log.Println("failed to create record", err) 93 rp.pages.Notice(w, "upload", "Failed to create artifact record. Try again later.") 94 return 95 } 96 97 - log.Println(putRecordResp.Uri) 98 99 tx, err := rp.db.BeginTx(r.Context(), nil) 100 if err != nil { 101 - log.Println("failed to start tx") 102 rp.pages.Notice(w, "upload", "Failed to create artifact. Try again later.") 103 return 104 } ··· 118 119 err = db.AddArtifact(tx, artifact) 120 if err != nil { 121 - log.Println("failed to add artifact record to db", err) 122 rp.pages.Notice(w, "upload", "Failed to create artifact. Try again later.") 123 return 124 } 125 126 err = tx.Commit() 127 if err != nil { 128 - log.Println("failed to add artifact record to db") 129 rp.pages.Notice(w, "upload", "Failed to create artifact. Try again later.") 130 return 131 } ··· 138 } 139 140 func (rp *Repo) DownloadArtifact(w http.ResponseWriter, r *http.Request) { 141 f, err := rp.repoResolver.Resolve(r) 142 if err != nil { 143 - log.Println("failed to get repo and knot", err) 144 http.Error(w, "failed to resolve repo", http.StatusInternalServerError) 145 return 146 } ··· 150 151 tag, err := rp.resolveTag(r.Context(), f, tagParam) 152 if err != nil { 153 - log.Println("failed to resolve tag", err) 154 rp.pages.Notice(w, "upload", "failed to upload artifact, error in tag resolution") 155 return 156 } ··· 162 orm.FilterEq("name", filename), 163 ) 164 if err != nil { 165 - log.Println("failed to get artifacts", err) 166 http.Error(w, "failed to get artifact", http.StatusInternalServerError) 167 return 168 } 169 170 if len(artifacts) != 1 { 171 - log.Printf("too many or too few artifacts found") 172 http.Error(w, "artifact not found", http.StatusNotFound) 173 return 174 } ··· 177 178 ownerId, err := rp.idResolver.ResolveIdent(r.Context(), f.Did) 179 if err != nil { 180 - log.Println("failed to resolve repo owner did", f.Did, err) 181 http.Error(w, "repository owner not found", http.StatusNotFound) 182 return 183 } ··· 191 192 req, err := http.NewRequest(http.MethodGet, url.String(), nil) 193 if err != nil { 194 - log.Println("failed to create request", err) 195 http.Error(w, "failed to create request", http.StatusInternalServerError) 196 return 197 } ··· 199 200 resp, err := http.DefaultClient.Do(req) 201 if err != nil { 202 - log.Println("failed to make request", err) 203 http.Error(w, "failed to make request to PDS", http.StatusInternalServerError) 204 return 205 } ··· 215 216 // stream the body directly to the client 217 if _, err := io.Copy(w, resp.Body); err != nil { 218 - log.Println("error streaming response to client:", err) 219 } 220 } 221 222 // TODO: proper statuses here on early exit 223 func (rp *Repo) DeleteArtifact(w http.ResponseWriter, r *http.Request) { 224 user := rp.oauth.GetMultiAccountUser(r) 225 tagParam := chi.URLParam(r, "tag") 226 filename := chi.URLParam(r, "file") 227 f, err := rp.repoResolver.Resolve(r) 228 if err != nil { 229 - log.Println("failed to get repo and knot", err) 230 return 231 } 232 ··· 241 orm.FilterEq("name", filename), 242 ) 243 if err != nil { 244 - log.Println("failed to get artifacts", err) 245 rp.pages.Notice(w, "remove", "Failed to delete artifact. Try again later.") 246 return 247 } ··· 253 artifact := artifacts[0] 254 255 if user.Active.Did != artifact.Did { 256 - log.Println("user not authorized to delete artifact", err) 257 rp.pages.Notice(w, "remove", "Unauthorized deletion of artifact.") 258 return 259 } ··· 264 Rkey: artifact.Rkey, 265 }) 266 if err != nil { 267 - log.Println("failed to get blob from pds", err) 268 rp.pages.Notice(w, "remove", "Failed to remove blob from PDS.") 269 return 270 } 271 272 tx, err := rp.db.BeginTx(r.Context(), nil) 273 if err != nil { 274 - log.Println("failed to start tx") 275 rp.pages.Notice(w, "remove", "Failed to delete artifact. Try again later.") 276 return 277 } ··· 283 orm.FilterEq("name", filename), 284 ) 285 if err != nil { 286 - log.Println("failed to remove artifact record from db", err) 287 rp.pages.Notice(w, "remove", "Failed to delete artifact. Try again later.") 288 return 289 } 290 291 err = tx.Commit() 292 if err != nil { 293 - log.Println("failed to remove artifact record from db") 294 rp.pages.Notice(w, "remove", "Failed to delete artifact. Try again later.") 295 return 296 } 297 298 w.Write([]byte{}) 299 } 300 301 func (rp *Repo) resolveTag(ctx context.Context, f *models.Repo, tagParam string) (*types.TagReference, error) { 302 tagParam, err := url.QueryUnescape(tagParam) 303 if err != nil { 304 return nil, err ··· 317 xrpcBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo) 318 if err != nil { 319 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 320 - log.Println("failed to call XRPC repo.tags", xrpcerr) 321 return nil, xrpcerr 322 } 323 - log.Println("failed to reach knotserver", err) 324 return nil, err 325 } 326 327 var result types.RepoTagsResponse 328 if err := json.Unmarshal(xrpcBytes, &result); err != nil { 329 - log.Println("failed to decode XRPC tags response", err) 330 return nil, err 331 } 332
··· 5 "encoding/json" 6 "fmt" 7 "io" 8 "net/http" 9 "net/url" 10 "time" ··· 30 31 // TODO: proper statuses here on early exit 32 func (rp *Repo) AttachArtifact(w http.ResponseWriter, r *http.Request) { 33 + l := rp.logger.With("handler", "AttachArtifact") 34 + 35 user := rp.oauth.GetMultiAccountUser(r) 36 tagParam := chi.URLParam(r, "tag") 37 f, err := rp.repoResolver.Resolve(r) 38 if err != nil { 39 + l.Error("failed to get repo and knot", "err", err) 40 rp.pages.Notice(w, "upload", "failed to upload artifact, error in repo resolution") 41 return 42 } 43 44 tag, err := rp.resolveTag(r.Context(), f, tagParam) 45 if err != nil { 46 + l.Error("failed to resolve tag", "err", err) 47 rp.pages.Notice(w, "upload", "failed to upload artifact, error in tag resolution") 48 return 49 } 50 51 file, header, err := r.FormFile("artifact") 52 if err != nil { 53 + l.Error("failed to upload artifact", "err", err) 54 rp.pages.Notice(w, "upload", "failed to upload artifact") 55 return 56 } ··· 58 59 client, err := rp.oauth.AuthorizedClient(r) 60 if err != nil { 61 + l.Error("failed to get authorized client", "err", err) 62 rp.pages.Notice(w, "upload", "failed to get authorized client") 63 return 64 } 65 66 uploadBlobResp, err := xrpc.RepoUploadBlob(r.Context(), client, file, header.Header.Get("Content-Type")) 67 if err != nil { 68 + l.Error("failed to upload blob", "err", err) 69 rp.pages.Notice(w, "upload", "Failed to upload blob to your PDS. Try again later.") 70 return 71 } 72 73 + l.Info("uploaded blob", "size", humanize.Bytes(uint64(uploadBlobResp.Blob.Size)), "blobRef", uploadBlobResp.Blob.Ref.String()) 74 75 rkey := tid.TID() 76 createdAt := time.Now() ··· 90 }, 91 }) 92 if err != nil { 93 + l.Error("failed to create record", "err", err) 94 rp.pages.Notice(w, "upload", "Failed to create artifact record. Try again later.") 95 return 96 } 97 98 + l.Debug("created record for blob", "aturi", putRecordResp.Uri) 99 100 tx, err := rp.db.BeginTx(r.Context(), nil) 101 if err != nil { 102 + l.Error("failed to start tx") 103 rp.pages.Notice(w, "upload", "Failed to create artifact. Try again later.") 104 return 105 } ··· 119 120 err = db.AddArtifact(tx, artifact) 121 if err != nil { 122 + l.Error("failed to add artifact record to db", "err", err) 123 rp.pages.Notice(w, "upload", "Failed to create artifact. Try again later.") 124 return 125 } 126 127 err = tx.Commit() 128 if err != nil { 129 + l.Error("failed to add artifact record to db") 130 rp.pages.Notice(w, "upload", "Failed to create artifact. Try again later.") 131 return 132 } ··· 139 } 140 141 func (rp *Repo) DownloadArtifact(w http.ResponseWriter, r *http.Request) { 142 + l := rp.logger.With("handler", "DownloadArtifact") 143 + 144 f, err := rp.repoResolver.Resolve(r) 145 if err != nil { 146 + l.Error("failed to get repo and knot", "err", err) 147 http.Error(w, "failed to resolve repo", http.StatusInternalServerError) 148 return 149 } ··· 153 154 tag, err := rp.resolveTag(r.Context(), f, tagParam) 155 if err != nil { 156 + l.Error("failed to resolve tag", "err", err) 157 rp.pages.Notice(w, "upload", "failed to upload artifact, error in tag resolution") 158 return 159 } ··· 165 orm.FilterEq("name", filename), 166 ) 167 if err != nil { 168 + l.Error("failed to get artifacts", "err", err) 169 http.Error(w, "failed to get artifact", http.StatusInternalServerError) 170 return 171 } 172 173 if len(artifacts) != 1 { 174 + l.Error("too many or too few artifacts found") 175 http.Error(w, "artifact not found", http.StatusNotFound) 176 return 177 } ··· 180 181 ownerId, err := rp.idResolver.ResolveIdent(r.Context(), f.Did) 182 if err != nil { 183 + l.Error("failed to resolve repo owner did", "did", f.Did, "err", err) 184 http.Error(w, "repository owner not found", http.StatusNotFound) 185 return 186 } ··· 194 195 req, err := http.NewRequest(http.MethodGet, url.String(), nil) 196 if err != nil { 197 + l.Error("failed to create request", "err", err) 198 http.Error(w, "failed to create request", http.StatusInternalServerError) 199 return 200 } ··· 202 203 resp, err := http.DefaultClient.Do(req) 204 if err != nil { 205 + l.Error("failed to make request", "err", err) 206 http.Error(w, "failed to make request to PDS", http.StatusInternalServerError) 207 return 208 } ··· 218 219 // stream the body directly to the client 220 if _, err := io.Copy(w, resp.Body); err != nil { 221 + l.Error("error streaming response to client:", "err", err) 222 } 223 } 224 225 // TODO: proper statuses here on early exit 226 func (rp *Repo) DeleteArtifact(w http.ResponseWriter, r *http.Request) { 227 + l := rp.logger.With("handler", "DeleteArtifact") 228 + 229 user := rp.oauth.GetMultiAccountUser(r) 230 tagParam := chi.URLParam(r, "tag") 231 filename := chi.URLParam(r, "file") 232 f, err := rp.repoResolver.Resolve(r) 233 if err != nil { 234 + l.Error("failed to get repo and knot", "err", err) 235 return 236 } 237 ··· 246 orm.FilterEq("name", filename), 247 ) 248 if err != nil { 249 + l.Error("failed to get artifacts", "err", err) 250 rp.pages.Notice(w, "remove", "Failed to delete artifact. Try again later.") 251 return 252 } ··· 258 artifact := artifacts[0] 259 260 if user.Active.Did != artifact.Did { 261 + l.Error("user not authorized to delete artifact", "err", err) 262 rp.pages.Notice(w, "remove", "Unauthorized deletion of artifact.") 263 return 264 } ··· 269 Rkey: artifact.Rkey, 270 }) 271 if err != nil { 272 + l.Error("failed to get blob from pds", "err", err) 273 rp.pages.Notice(w, "remove", "Failed to remove blob from PDS.") 274 return 275 } 276 277 tx, err := rp.db.BeginTx(r.Context(), nil) 278 if err != nil { 279 + l.Error("failed to start tx") 280 rp.pages.Notice(w, "remove", "Failed to delete artifact. Try again later.") 281 return 282 } ··· 288 orm.FilterEq("name", filename), 289 ) 290 if err != nil { 291 + l.Error("failed to remove artifact record from db", "err", err) 292 rp.pages.Notice(w, "remove", "Failed to delete artifact. Try again later.") 293 return 294 } 295 296 err = tx.Commit() 297 if err != nil { 298 + l.Error("failed to remove artifact record from db") 299 rp.pages.Notice(w, "remove", "Failed to delete artifact. Try again later.") 300 return 301 } 302 + 303 + l.Info("successfully deleted artifact", "tag", tagParam, "file", filename) 304 305 w.Write([]byte{}) 306 } 307 308 func (rp *Repo) resolveTag(ctx context.Context, f *models.Repo, tagParam string) (*types.TagReference, error) { 309 + l := rp.logger.With("handler", "resolveTag") 310 + 311 tagParam, err := url.QueryUnescape(tagParam) 312 if err != nil { 313 return nil, err ··· 326 xrpcBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo) 327 if err != nil { 328 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 329 + l.Error("failed to call XRPC repo.tags", "err", xrpcerr) 330 return nil, xrpcerr 331 } 332 + l.Error("failed to reach knotserver", "err", err) 333 return nil, err 334 } 335 336 var result types.RepoTagsResponse 337 if err := json.Unmarshal(xrpcBytes, &result); err != nil { 338 + l.Error("failed to decode XRPC tags response", "err", err) 339 return nil, err 340 } 341
+32
appview/repo/blob.go
··· 9 "path/filepath" 10 "slices" 11 "strings" 12 13 "tangled.org/core/api/tangled" 14 "tangled.org/core/appview/config" 15 "tangled.org/core/appview/models" 16 "tangled.org/core/appview/pages" 17 "tangled.org/core/appview/pages/markup" 18 "tangled.org/core/appview/reporesolver" 19 xrpcclient "tangled.org/core/appview/xrpcclient" 20 21 indigoxrpc "github.com/bluesky-social/indigo/xrpc" 22 "github.com/go-chi/chi/v5" 23 ) 24 25 // the content can be one of the following: ··· 78 79 user := rp.oauth.GetMultiAccountUser(r) 80 81 rp.pages.RepoBlob(w, pages.RepoBlobParams{ 82 LoggedInUser: user, 83 RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 84 BreadCrumbs: breadcrumbs, 85 BlobView: blobView, 86 RepoBlob_Output: resp, 87 }) 88 }
··· 9 "path/filepath" 10 "slices" 11 "strings" 12 + "time" 13 14 "tangled.org/core/api/tangled" 15 "tangled.org/core/appview/config" 16 + "tangled.org/core/appview/db" 17 "tangled.org/core/appview/models" 18 "tangled.org/core/appview/pages" 19 "tangled.org/core/appview/pages/markup" 20 "tangled.org/core/appview/reporesolver" 21 xrpcclient "tangled.org/core/appview/xrpcclient" 22 + "tangled.org/core/types" 23 24 indigoxrpc "github.com/bluesky-social/indigo/xrpc" 25 "github.com/go-chi/chi/v5" 26 + "github.com/go-git/go-git/v5/plumbing" 27 ) 28 29 // the content can be one of the following: ··· 82 83 user := rp.oauth.GetMultiAccountUser(r) 84 85 + // Get email to DID mapping for commit author 86 + var emails []string 87 + if resp.LastCommit != nil && resp.LastCommit.Author != nil { 88 + emails = append(emails, resp.LastCommit.Author.Email) 89 + } 90 + emailToDidMap, err := db.GetEmailToDid(rp.db, emails, true) 91 + if err != nil { 92 + l.Error("failed to get email to did mapping", "err", err) 93 + emailToDidMap = make(map[string]string) 94 + } 95 + 96 + var lastCommitInfo *types.LastCommitInfo 97 + if resp.LastCommit != nil { 98 + when, _ := time.Parse(time.RFC3339, resp.LastCommit.When) 99 + lastCommitInfo = &types.LastCommitInfo{ 100 + Hash: plumbing.NewHash(resp.LastCommit.Hash), 101 + Message: resp.LastCommit.Message, 102 + When: when, 103 + } 104 + if resp.LastCommit.Author != nil { 105 + lastCommitInfo.Author.Name = resp.LastCommit.Author.Name 106 + lastCommitInfo.Author.Email = resp.LastCommit.Author.Email 107 + lastCommitInfo.Author.When, _ = time.Parse(time.RFC3339, resp.LastCommit.Author.When) 108 + } 109 + } 110 + 111 rp.pages.RepoBlob(w, pages.RepoBlobParams{ 112 LoggedInUser: user, 113 RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 114 BreadCrumbs: breadcrumbs, 115 BlobView: blobView, 116 + EmailToDid: emailToDidMap, 117 + LastCommitInfo: lastCommitInfo, 118 RepoBlob_Output: resp, 119 }) 120 }
+1
appview/repo/router.go
··· 23 r.Route("/tags", func(r chi.Router) { 24 r.Get("/", rp.Tags) 25 r.Route("/{tag}", func(r chi.Router) { 26 r.Get("/download/{file}", rp.DownloadArtifact) 27 28 // require repo:push to upload or delete artifacts
··· 23 r.Route("/tags", func(r chi.Router) { 24 r.Get("/", rp.Tags) 25 r.Route("/{tag}", func(r chi.Router) { 26 + r.Get("/", rp.Tag) 27 r.Get("/download/{file}", rp.DownloadArtifact) 28 29 // require repo:push to upload or delete artifacts
+58
appview/repo/tags.go
··· 14 "tangled.org/core/types" 15 16 indigoxrpc "github.com/bluesky-social/indigo/xrpc" 17 "github.com/go-git/go-git/v5/plumbing" 18 ) 19 ··· 70 } 71 } 72 user := rp.oauth.GetMultiAccountUser(r) 73 rp.pages.RepoTags(w, pages.RepoTagsParams{ 74 LoggedInUser: user, 75 RepoInfo: rp.repoResolver.GetRepoInfo(r, user), ··· 78 DanglingArtifacts: danglingArtifacts, 79 }) 80 }
··· 14 "tangled.org/core/types" 15 16 indigoxrpc "github.com/bluesky-social/indigo/xrpc" 17 + "github.com/go-chi/chi/v5" 18 "github.com/go-git/go-git/v5/plumbing" 19 ) 20 ··· 71 } 72 } 73 user := rp.oauth.GetMultiAccountUser(r) 74 + 75 rp.pages.RepoTags(w, pages.RepoTagsParams{ 76 LoggedInUser: user, 77 RepoInfo: rp.repoResolver.GetRepoInfo(r, user), ··· 80 DanglingArtifacts: danglingArtifacts, 81 }) 82 } 83 + 84 + func (rp *Repo) Tag(w http.ResponseWriter, r *http.Request) { 85 + l := rp.logger.With("handler", "RepoTag") 86 + f, err := rp.repoResolver.Resolve(r) 87 + if err != nil { 88 + l.Error("failed to get repo and knot", "err", err) 89 + return 90 + } 91 + scheme := "http" 92 + if !rp.config.Core.Dev { 93 + scheme = "https" 94 + } 95 + host := fmt.Sprintf("%s://%s", scheme, f.Knot) 96 + xrpcc := &indigoxrpc.Client{ 97 + Host: host, 98 + } 99 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 100 + tag := chi.URLParam(r, "tag") 101 + 102 + xrpcBytes, err := tangled.RepoTag(r.Context(), xrpcc, repo, tag) 103 + if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 104 + l.Error("failed to call XRPC repo.tags", "err", xrpcerr) 105 + rp.pages.Error503(w) 106 + return 107 + } 108 + var result types.RepoTagResponse 109 + if err := json.Unmarshal(xrpcBytes, &result); err != nil { 110 + l.Error("failed to decode XRPC response", "err", err) 111 + rp.pages.Error503(w) 112 + return 113 + } 114 + 115 + filters := []orm.Filter{orm.FilterEq("repo_at", f.RepoAt())} 116 + if result.Tag.Tag != nil { 117 + filters = append(filters, orm.FilterEq("tag", result.Tag.Tag.Hash[:])) 118 + } 119 + 120 + artifacts, err := db.GetArtifact(rp.db, filters...) 121 + if err != nil { 122 + l.Error("failed grab artifacts", "err", err) 123 + return 124 + } 125 + // convert artifacts to map for easy UI building 126 + artifactMap := make(map[plumbing.Hash][]models.Artifact) 127 + for _, a := range artifacts { 128 + artifactMap[a.Tag] = append(artifactMap[a.Tag], a) 129 + } 130 + 131 + user := rp.oauth.GetMultiAccountUser(r) 132 + rp.pages.RepoTag(w, pages.RepoTagParams{ 133 + LoggedInUser: user, 134 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 135 + RepoTagResponse: result, 136 + ArtifactMap: artifactMap, 137 + }) 138 + }
+29
appview/repo/tree.go
··· 8 "time" 9 10 "tangled.org/core/api/tangled" 11 "tangled.org/core/appview/pages" 12 "tangled.org/core/appview/reporesolver" 13 xrpcclient "tangled.org/core/appview/xrpcclient" ··· 98 } 99 sortFiles(result.Files) 100 101 rp.pages.RepoTree(w, pages.RepoTreeParams{ 102 LoggedInUser: user, 103 BreadCrumbs: breadcrumbs, 104 TreePath: treePath, 105 RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 106 RepoTreeResponse: result, 107 }) 108 }
··· 8 "time" 9 10 "tangled.org/core/api/tangled" 11 + "tangled.org/core/appview/db" 12 "tangled.org/core/appview/pages" 13 "tangled.org/core/appview/reporesolver" 14 xrpcclient "tangled.org/core/appview/xrpcclient" ··· 99 } 100 sortFiles(result.Files) 101 102 + // Get email to DID mapping for commit author 103 + var emails []string 104 + if xrpcResp.LastCommit != nil && xrpcResp.LastCommit.Author != nil { 105 + emails = append(emails, xrpcResp.LastCommit.Author.Email) 106 + } 107 + emailToDidMap, err := db.GetEmailToDid(rp.db, emails, true) 108 + if err != nil { 109 + l.Error("failed to get email to did mapping", "err", err) 110 + emailToDidMap = make(map[string]string) 111 + } 112 + 113 + var lastCommitInfo *types.LastCommitInfo 114 + if xrpcResp.LastCommit != nil { 115 + when, _ := time.Parse(time.RFC3339, xrpcResp.LastCommit.When) 116 + lastCommitInfo = &types.LastCommitInfo{ 117 + Hash: plumbing.NewHash(xrpcResp.LastCommit.Hash), 118 + Message: xrpcResp.LastCommit.Message, 119 + When: when, 120 + } 121 + if xrpcResp.LastCommit.Author != nil { 122 + lastCommitInfo.Author.Name = xrpcResp.LastCommit.Author.Name 123 + lastCommitInfo.Author.Email = xrpcResp.LastCommit.Author.Email 124 + lastCommitInfo.Author.When, _ = time.Parse(time.RFC3339, xrpcResp.LastCommit.Author.When) 125 + } 126 + } 127 + 128 rp.pages.RepoTree(w, pages.RepoTreeParams{ 129 LoggedInUser: user, 130 BreadCrumbs: breadcrumbs, 131 TreePath: treePath, 132 RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 133 + EmailToDid: emailToDidMap, 134 + LastCommitInfo: lastCommitInfo, 135 RepoTreeResponse: result, 136 }) 137 }
+2 -7
appview/state/profile.go
··· 550 stat0 := r.FormValue("stat0") 551 stat1 := r.FormValue("stat1") 552 553 - if stat0 != "" { 554 - profile.Stats[0].Kind = models.VanityStatKind(stat0) 555 - } 556 - 557 - if stat1 != "" { 558 - profile.Stats[1].Kind = models.VanityStatKind(stat1) 559 - } 560 561 if err := db.ValidateProfile(s.db, profile); err != nil { 562 log.Println("invalid profile", err)
··· 550 stat0 := r.FormValue("stat0") 551 stat1 := r.FormValue("stat1") 552 553 + profile.Stats[0].Kind = models.ParseVanityStatKind(stat0) 554 + profile.Stats[1].Kind = models.ParseVanityStatKind(stat1) 555 556 if err := db.ValidateProfile(s.db, profile); err != nil { 557 log.Println("invalid profile", err)
+2 -1
appview/state/state.go
··· 173 notifiers = append(notifiers, phnotify.NewPosthogNotifier(posthog)) 174 } 175 notifiers = append(notifiers, indexer) 176 - notifier := notify.NewMergedNotifier(notifiers, tlog.SubLogger(logger, "notify")) 177 178 state := &State{ 179 d,
··· 173 notifiers = append(notifiers, phnotify.NewPosthogNotifier(posthog)) 174 } 175 notifiers = append(notifiers, indexer) 176 + notifier := notify.NewMergedNotifier(notifiers) 177 + notifier = notify.NewLoggingNotifier(notifier, tlog.SubLogger(logger, "notify")) 178 179 state := &State{ 180 d,
+1 -1
appview/validator/label.go
··· 4 "context" 5 "fmt" 6 "regexp" 7 "strings" 8 9 "github.com/bluesky-social/indigo/atproto/syntax" 10 - "golang.org/x/exp/slices" 11 "tangled.org/core/api/tangled" 12 "tangled.org/core/appview/models" 13 )
··· 4 "context" 5 "fmt" 6 "regexp" 7 + "slices" 8 "strings" 9 10 "github.com/bluesky-social/indigo/atproto/syntax" 11 "tangled.org/core/api/tangled" 12 "tangled.org/core/appview/models" 13 )
-50
cmd/knotmirror/main.go
··· 1 - package main 2 - 3 - import ( 4 - "context" 5 - "log/slog" 6 - "os" 7 - "os/signal" 8 - "syscall" 9 - 10 - "github.com/carlmjohnson/versioninfo" 11 - "github.com/urfave/cli/v3" 12 - "tangled.org/core/knotmirror" 13 - "tangled.org/core/log" 14 - ) 15 - 16 - func main() { 17 - if err := run(os.Args); err != nil { 18 - slog.Error("error running knotmirror", "err", err) 19 - os.Exit(-1) 20 - } 21 - } 22 - 23 - func run(args []string) error { 24 - ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) 25 - defer cancel() 26 - 27 - logger := log.New("knotmirror") 28 - slog.SetDefault(logger) 29 - ctx = log.IntoContext(ctx, logger) 30 - 31 - app := cli.Command{ 32 - Name: "knotmirror", 33 - Usage: "knot mirroring service", 34 - Version: versioninfo.Short(), 35 - } 36 - app.Flags = []cli.Flag{} 37 - app.Commands = []*cli.Command{ 38 - { 39 - Name: "serve", 40 - Usage: "run the knotmirror daemon", 41 - Action: runKnotMirror, 42 - Flags: []cli.Flag{}, 43 - }, 44 - } 45 - return app.Run(ctx, args) 46 - } 47 - 48 - func runKnotMirror(ctx context.Context, cmd *cli.Command) error { 49 - return knotmirror.Run(ctx) 50 - }
···
+83
docs/DOCS.md
··· 502 Note that you should add a newline at the end if setting a non-empty message 503 since the knot won't do this for you. 504 505 # Spindles 506 507 ## Pipelines ··· 1561 Refer to the [jujutsu 1562 documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers) 1563 for more information.
··· 502 Note that you should add a newline at the end if setting a non-empty message 503 since the knot won't do this for you. 504 505 + ## Troubleshooting 506 + 507 + If you run your own knot, you may run into some of these 508 + common issues. You can always join the 509 + [IRC](https://web.libera.chat/#tangled) or 510 + [Discord](https://chat.tangled.org/) if this section does 511 + not help. 512 + 513 + ### Unable to push 514 + 515 + If you are unable to push to your knot or repository: 516 + 517 + 1. First, ensure that you have added your SSH public key to 518 + your account 519 + 2. Check to see that your knot has synced the key by running 520 + `knot keys` 521 + 3. Check to see if git is supplying the correct private key 522 + when pushing: `GIT_SSH_COMMAND="ssh -v" git push ...` 523 + 4. Check to see if `sshd` on the knot is rejecting the push 524 + for some reason: `journalctl -xeu ssh` (or `sshd`, 525 + depending on your machine). These logs are unavailable if 526 + using docker. 527 + 5. Check to see if the knot itself is rejecting the push, 528 + depending on your setup, the logs might be in one of the 529 + following paths: 530 + * `/tmp/knotguard.log` 531 + * `/home/git/log` 532 + * `/home/git/guard.log` 533 + 534 # Spindles 535 536 ## Pipelines ··· 1590 Refer to the [jujutsu 1591 documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers) 1592 for more information. 1593 + 1594 + # Troubleshooting guide 1595 + 1596 + ## Login issues 1597 + 1598 + Owing to the distributed nature of OAuth on AT Protocol, you 1599 + may run into issues with logging in. If you run a 1600 + self-hosted PDS: 1601 + 1602 + - You may need to ensure that your PDS is timesynced using 1603 + NTP: 1604 + * Enable the `ntpd` service 1605 + * Run `ntpd -qg` to synchronize your clock 1606 + - You may need to increase the default request timeout: 1607 + `NODE_OPTIONS="--network-family-autoselection-attempt-timeout=500"` 1608 + 1609 + ## Empty punchcard 1610 + 1611 + For Tangled to register commits that you make across the 1612 + network, you need to setup one of following: 1613 + 1614 + - The committer email should be a verified email associated 1615 + to your account. You can add and verify emails on the 1616 + settings page. 1617 + - Or, the committer email should be set to your account's 1618 + DID: `git config user.email "did:plc:foobar". You can find 1619 + your account's DID on the settings page 1620 + 1621 + ## Commit is not marked as verified 1622 + 1623 + Presently, Tangled only supports SSH commit signatures. 1624 + 1625 + To sign commits using an SSH key with git: 1626 + 1627 + ``` 1628 + git config --global gpg.format ssh 1629 + git config --global user.signingkey ~/.ssh/tangled-key 1630 + ``` 1631 + 1632 + To sign commits using an SSH key with jj, add this to your 1633 + config: 1634 + 1635 + ``` 1636 + [signing] 1637 + behavior = "own" 1638 + backend = "ssh" 1639 + key = "~/.ssh/tangled-key" 1640 + ``` 1641 + 1642 + ## Self-hosted knot issues 1643 + 1644 + If you need help troubleshooting a self-hosted knot, check 1645 + out the [knot troubleshooting 1646 + guide](/knot-self-hosting-guide.html#troubleshooting).
+1 -1
go.mod
··· 49 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc 50 gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab 51 golang.org/x/crypto v0.40.0 52 - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b 53 golang.org/x/image v0.31.0 54 golang.org/x/net v0.42.0 55 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da ··· 203 go.uber.org/atomic v1.11.0 // indirect 204 go.uber.org/multierr v1.11.0 // indirect 205 go.uber.org/zap v1.27.0 // indirect 206 golang.org/x/sync v0.17.0 // indirect 207 golang.org/x/sys v0.34.0 // indirect 208 golang.org/x/text v0.29.0 // indirect
··· 49 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc 50 gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab 51 golang.org/x/crypto v0.40.0 52 golang.org/x/image v0.31.0 53 golang.org/x/net v0.42.0 54 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da ··· 202 go.uber.org/atomic v1.11.0 // indirect 203 go.uber.org/multierr v1.11.0 // indirect 204 go.uber.org/zap v1.27.0 // indirect 205 + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect 206 golang.org/x/sync v0.17.0 // indirect 207 golang.org/x/sys v0.34.0 // indirect 208 golang.org/x/text v0.29.0 // indirect
+9
input.css
··· 175 @apply opacity-70; 176 } 177 178 .prose a.footnote-backref { 179 @apply no-underline; 180 }
··· 175 @apply opacity-70; 176 } 177 178 + .prose h1:target, 179 + .prose h2:target, 180 + .prose h3:target, 181 + .prose h4:target, 182 + .prose h5:target, 183 + .prose h6:target { 184 + @apply bg-yellow-200/30 dark:bg-yellow-600/30; 185 + } 186 + 187 .prose a.footnote-backref { 188 @apply no-underline; 189 }
-23
knotmirror/config/config.go
··· 1 - package config 2 - 3 - import ( 4 - "context" 5 - 6 - "github.com/sethvargo/go-envconfig" 7 - ) 8 - 9 - type Config struct { 10 - TapUrl string `env:"MIRROR_TAP_URL, default=http://localhost:2481"` 11 - DbPath string `env:"MIRROR_DB_PATH, default=mirror.db"` 12 - GitRepoBasePath string `env:"MIRROR_GIT_BASEPATH, default=repos"` 13 - KnotUseSSL bool `env:"MIRROR_KNOT_USE_SSL, default=false"` // use SSL for Knot when not schema is not specified 14 - ResyncParallelism int `env:"MIRROR_RESYNC_PARALLELISM, default=5"` 15 - } 16 - 17 - func Load(ctx context.Context) (*Config, error) { 18 - var cfg Config 19 - if err := envconfig.Process(ctx, &cfg); err != nil { 20 - return nil, err 21 - } 22 - return &cfg, nil 23 - }
···
-25
knotmirror/crawler.go
··· 1 - package knotmirror 2 - 3 - import ( 4 - "context" 5 - "database/sql" 6 - "log/slog" 7 - 8 - "tangled.org/core/log" 9 - ) 10 - 11 - type Crawler struct { 12 - logger *slog.Logger 13 - db *sql.DB 14 - } 15 - 16 - func NewCrawler(l *slog.Logger, db *sql.DB) *Crawler { 17 - return &Crawler{ 18 - logger: log.SubLogger(l, "crawler"), 19 - db: db, 20 - } 21 - } 22 - 23 - func (c *Crawler) Run(ctx context.Context) { 24 - // TODO: repository crawler 25 - }
···
-67
knotmirror/db/db.go
··· 1 - package db 2 - 3 - import ( 4 - "context" 5 - "database/sql" 6 - "fmt" 7 - "strings" 8 - ) 9 - 10 - func Make(ctx context.Context, dbPath string) (*sql.DB, error) { 11 - // https://github.com/mattn/go-sqlite3#connection-string 12 - opts := []string{ 13 - "_foreign_keys=1", 14 - "_journal_mode=WAL", 15 - "_synchronous=NORMAL", 16 - "_auto_vacuum=incremental", 17 - } 18 - 19 - db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&")) 20 - if err != nil { 21 - return nil, err 22 - } 23 - 24 - conn, err := db.Conn(ctx) 25 - if err != nil { 26 - return nil, err 27 - } 28 - defer conn.Close() 29 - 30 - _, err = conn.ExecContext(ctx, ` 31 - create table if not exists repos ( 32 - did text not null, 33 - rkey text not null, 34 - at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo' || '/' || rkey) stored, 35 - cid text not null, 36 - 37 - -- record content 38 - name text not null, 39 - knot_domain text not null, 40 - 41 - -- sync data 42 - git_rev text not null, 43 - repo_sha text not null, 44 - state text not null default 'pending', 45 - error_msg text, 46 - retry_count integer not null default 0, 47 - retry_after integer not null default 0, 48 - 49 - unique(did, rkey) 50 - ); 51 - 52 - -- knot hosts 53 - create table if not exists hosts ( 54 - hostname text not null, 55 - no_ssl integer not null default 0, 56 - status text not null default 'active', 57 - last_seq integer not null default -1, 58 - 59 - unique(hostname) 60 - ); 61 - `) 62 - if err != nil { 63 - return nil, fmt.Errorf("initializing db schema: %w", err) 64 - } 65 - 66 - return db, nil 67 - }
···
-38
knotmirror/db/hosts.go
··· 1 - package db 2 - 3 - import ( 4 - "context" 5 - "database/sql" 6 - "fmt" 7 - 8 - "tangled.org/core/knotmirror/models" 9 - ) 10 - 11 - func ListHosts(ctx context.Context, e *sql.DB) ([]models.Host, error) { 12 - rows, err := e.QueryContext(ctx, ` 13 - select hostname, no_ssl, status, last_seq from hosts 14 - where status = 'active'; 15 - `) 16 - if err != nil { 17 - return nil, fmt.Errorf("querying hosts: %w", err) 18 - } 19 - defer rows.Close() 20 - 21 - var hosts []models.Host 22 - for rows.Next() { 23 - var host models.Host 24 - if err := rows.Scan( 25 - &host.Hostname, 26 - &host.NoSSL, 27 - &host.Status, 28 - &host.LastSeq, 29 - ); err != nil { 30 - return nil, fmt.Errorf("scanning row: %w", err) 31 - } 32 - hosts = append(hosts, host) 33 - } 34 - if err := rows.Err(); err != nil { 35 - return nil, fmt.Errorf("scanning rows: %w ", err) 36 - } 37 - return hosts, nil 38 - }
···
-139
knotmirror/db/repos.go
··· 1 - package db 2 - 3 - import ( 4 - "context" 5 - "database/sql" 6 - "fmt" 7 - 8 - "github.com/bluesky-social/indigo/atproto/syntax" 9 - "tangled.org/core/knotmirror/models" 10 - ) 11 - 12 - func UpsertRepo(ctx context.Context, e *sql.DB, repo *models.Repo) error { 13 - if _, err := e.ExecContext(ctx, 14 - `insert into repos (did, rkey, cid, name, knot_domain, git_rev, repo_sha, state, error_msg, retry_count, retry_after) 15 - values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 16 - on conflict(did, rkey) do update set 17 - cid = excluded.cid, 18 - name = excluded.name, 19 - knot_domain = excluded.knot_domain, 20 - git_rev = excluded.git_rev, 21 - repo_sha = excluded.repo_sha, 22 - state = excluded.state, 23 - error_msg = excluded.error_msg, 24 - retry_count = excluded.retry_count, 25 - retry_after = excluded.retry_after 26 - on conflict(cid) do nothing`, 27 - repo.Did, 28 - repo.Rkey, 29 - repo.Cid, 30 - repo.Name, 31 - repo.KnotDomain, 32 - repo.GitRev, 33 - repo.RepoSha, 34 - repo.State, 35 - repo.ErrorMsg, 36 - repo.RetryCount, 37 - repo.RetryAfter, 38 - ); err != nil { 39 - return fmt.Errorf("upserting repo: %w", err) 40 - } 41 - return nil 42 - } 43 - 44 - func UpdateRepoState(ctx context.Context, e *sql.DB, did syntax.DID, rkey syntax.RecordKey, state models.RepoState) error { 45 - if _, err := e.ExecContext(ctx, 46 - `update repos 47 - set state = ? 48 - where did = ? and rkey = ?`, 49 - state, 50 - did, rkey, 51 - ); err != nil { 52 - return fmt.Errorf("updating repo: %w", err) 53 - } 54 - return nil 55 - } 56 - 57 - func DeleteRepo(ctx context.Context, e *sql.DB, did syntax.DID, rkey syntax.RecordKey) error { 58 - if _, err := e.ExecContext(ctx, 59 - `delete from repos where did = ? and rkey = ?`, 60 - did, 61 - rkey, 62 - ); err != nil { 63 - return fmt.Errorf("deleting repo: %w", err) 64 - } 65 - return nil 66 - } 67 - 68 - func GetRepoByName(ctx context.Context, e *sql.DB, did syntax.DID, name string) (*models.Repo, error) { 69 - var repo models.Repo 70 - if err := e.QueryRowContext(ctx, 71 - `select 72 - did, 73 - rkey, 74 - cid, 75 - name, 76 - knot_domain, 77 - git_rev, 78 - repo_sha, 79 - state, 80 - error_msg, 81 - retry_count, 82 - retry_after 83 - from repos 84 - where did = ? and name = ?`, 85 - did, 86 - name, 87 - ).Scan( 88 - &repo.Did, 89 - &repo.Rkey, 90 - &repo.Cid, 91 - &repo.Name, 92 - &repo.KnotDomain, 93 - &repo.GitRev, 94 - &repo.RepoSha, 95 - &repo.State, 96 - &repo.ErrorMsg, 97 - &repo.RetryCount, 98 - &repo.RetryAfter, 99 - ); err != nil { 100 - return nil, fmt.Errorf("querying repo: %w", err) 101 - } 102 - return &repo, nil 103 - } 104 - 105 - func GetRepoByAtUri(ctx context.Context, e *sql.DB, aturi syntax.ATURI) (*models.Repo, error) { 106 - var repo models.Repo 107 - if err := e.QueryRowContext(ctx, 108 - `select 109 - did, 110 - rkey, 111 - cid, 112 - name, 113 - knot_domain, 114 - git_rev, 115 - repo_sha, 116 - state, 117 - error_msg, 118 - retry_count, 119 - retry_after 120 - from repos 121 - where at_uri = ?`, 122 - aturi, 123 - ).Scan( 124 - &repo.Did, 125 - &repo.Rkey, 126 - &repo.Cid, 127 - &repo.Name, 128 - &repo.KnotDomain, 129 - &repo.GitRev, 130 - &repo.RepoSha, 131 - &repo.State, 132 - &repo.ErrorMsg, 133 - &repo.RetryCount, 134 - &repo.RetryAfter, 135 - ); err != nil { 136 - return nil, fmt.Errorf("querying repo: %w", err) 137 - } 138 - return &repo, nil 139 - }
···
-61
knotmirror/knotmirror.go
··· 1 - package knotmirror 2 - 3 - import ( 4 - "context" 5 - "fmt" 6 - "time" 7 - 8 - "tangled.org/core/knotmirror/config" 9 - "tangled.org/core/knotmirror/db" 10 - "tangled.org/core/knotmirror/knotstream" 11 - "tangled.org/core/log" 12 - ) 13 - 14 - func Run(ctx context.Context) error { 15 - logger := log.FromContext(ctx) 16 - cfg := loadConfig() 17 - 18 - db, err := db.Make(ctx, cfg.DbPath) 19 - if err != nil { 20 - return fmt.Errorf("initializing db: %w", err) 21 - } 22 - 23 - tap := NewTapClient(logger, cfg.TapUrl) 24 - knotstream := knotstream.NewKnotStream(logger, db) 25 - 26 - crawler := NewCrawler(logger, db) 27 - 28 - resyncer := NewResyncer(logger, db, cfg) 29 - 30 - // maintain repository list with tap 31 - // NOTE: this can be removed once we introduce did-for-repo because then we can just listen to KnotStream for #identity events. 32 - tap.Run(ctx) 33 - 34 - // listen to knotstream (currently we don't have relay for knots, so subscribe every known knots) 35 - if err := knotstream.ResubscribeAllHosts(ctx); err != nil { 36 - return fmt.Errorf("resubscribing known hosts: %w", err) 37 - } 38 - 39 - // periodically crawl the entire network to mirror the repositories 40 - crawler.Run(ctx) 41 - 42 - resyncer.Run(ctx) 43 - 44 - logger.Info("startup complete") 45 - 46 - <-ctx.Done() 47 - logger.Info("received shutdown signal", "reason", ctx.Err()) 48 - 49 - logger.Info("shutting down knotmirror") 50 - 51 - _, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) 52 - defer shutdownCancel() 53 - // TODO: shutdown logic 54 - 55 - logger.Info("shutdown complete") 56 - return nil 57 - } 58 - 59 - func loadConfig() *config.Config { 60 - panic("unimplemented") 61 - }
···
-45
knotmirror/knotstream/knotstream.go
··· 1 - package knotstream 2 - 3 - import ( 4 - "context" 5 - "database/sql" 6 - "fmt" 7 - "log/slog" 8 - "time" 9 - 10 - "tangled.org/core/knotmirror/db" 11 - "tangled.org/core/log" 12 - ) 13 - 14 - type KnotStream struct { 15 - logger *slog.Logger 16 - db *sql.DB 17 - slurper *KnotSlurper 18 - } 19 - 20 - func NewKnotStream(l *slog.Logger, db *sql.DB) *KnotStream { 21 - l = log.SubLogger(l, "knotstream") 22 - return &KnotStream{ 23 - logger: l, 24 - db: db, 25 - slurper: NewKnotSlurper(l, db), 26 - } 27 - } 28 - 29 - func (s *KnotStream) ResubscribeAllHosts(ctx context.Context) error { 30 - hosts, err := db.ListHosts(ctx, s.db) 31 - if err != nil { 32 - return fmt.Errorf("listing hosts: %w", err) 33 - } 34 - 35 - for _, host := range hosts { 36 - l := s.logger.With("hostname", host.Hostname) 37 - l.Info("re-subscribing to active host") 38 - if err := s.slurper.Subscribe(ctx, host); err != nil { 39 - l.Warn("failed to re-subscribe to host", "err", err) 40 - } 41 - // sleep for a very short period, so we don't open tons of sockets at the same time 42 - time.Sleep(1 * time.Millisecond) 43 - } 44 - return nil 45 - }
···
-247
knotmirror/knotstream/slurper.go
··· 1 - package knotstream 2 - 3 - import ( 4 - "context" 5 - "database/sql" 6 - "encoding/json" 7 - "fmt" 8 - "log/slog" 9 - "math/rand" 10 - "net/http" 11 - "time" 12 - 13 - "github.com/bluesky-social/indigo/atproto/syntax" 14 - "github.com/bluesky-social/indigo/util/ssrf" 15 - "github.com/carlmjohnson/versioninfo" 16 - "github.com/gorilla/websocket" 17 - "tangled.org/core/api/tangled" 18 - "tangled.org/core/knotmirror/db" 19 - "tangled.org/core/knotmirror/models" 20 - "tangled.org/core/log" 21 - ) 22 - 23 - type KnotSlurper struct { 24 - logger *slog.Logger 25 - db *sql.DB 26 - concurrency int 27 - tskMngr *TaskManager 28 - } 29 - 30 - func NewKnotSlurper(l *slog.Logger, db *sql.DB) *KnotSlurper { 31 - return &KnotSlurper{ 32 - logger: log.SubLogger(l, "slurper"), 33 - db: db, 34 - concurrency: 1000, 35 - tskMngr: NewTaskManager(l), 36 - } 37 - } 38 - 39 - func (s *KnotSlurper) Start(ctx context.Context) { 40 - // start parallel workers in background 41 - for i := 0; i < s.concurrency; i++ { 42 - go s.worker(ctx) 43 - } 44 - } 45 - 46 - func (s *KnotSlurper) worker(ctx context.Context) { 47 - s.tskMngr.ForEach(ctx, func(task *Task) error { 48 - var legacyMessage LegacyGitEvent 49 - if err := json.Unmarshal(task.message, &legacyMessage); err != nil { 50 - return fmt.Errorf("unmarshaling message: %w", err) 51 - } 52 - 53 - if err := s.ProcessLegacyGitRefUpdate(ctx, &legacyMessage); err != nil { 54 - return fmt.Errorf("processing gitRefUpdate: %w", err) 55 - } 56 - return nil 57 - }) 58 - } 59 - 60 - func (s *KnotSlurper) Subscribe(ctx context.Context, host models.Host) error { 61 - go s.subscribeWithRedialer(ctx, host) 62 - return nil 63 - } 64 - 65 - func (s *KnotSlurper) subscribeWithRedialer(ctx context.Context, host models.Host) { 66 - l := s.logger.With("host", host.Hostname) 67 - 68 - dialer := websocket.Dialer{ 69 - HandshakeTimeout: time.Second * 5, 70 - } 71 - 72 - // if this isn't a localhost / private connection, then we should enable SSRF protections 73 - if !host.NoSSL { 74 - netDialer := ssrf.PublicOnlyDialer() 75 - dialer.NetDialContext = netDialer.DialContext 76 - } 77 - 78 - cursor := host.LastSeq 79 - 80 - // connectedInbound.Inc() 81 - // defer connectedInbound.Dec() 82 - 83 - var backoff int 84 - for { 85 - select { 86 - case <-ctx.Done(): 87 - return 88 - default: 89 - } 90 - u := host.LegacyEventsURL(cursor) 91 - 92 - // NOTE: manual backoff retry implementation to explicitly handle fails 93 - hdr := make(http.Header) 94 - hdr.Add("User-Agent", userAgent()) 95 - conn, resp, err := dialer.DialContext(ctx, u, hdr) 96 - if err != nil { 97 - l.Warn("dialing failed", "err", err, "backoff", backoff) 98 - time.Sleep(sleepForBackoff(backoff)) 99 - backoff++ 100 - if backoff > 15 { 101 - l.Warn("host does not appear to be online, disabling for now") 102 - return 103 - } 104 - continue 105 - } 106 - 107 - l.Debug("knot event subscription response", "code", resp.StatusCode, "url", u) 108 - 109 - if err := s.handleConnection(ctx, conn, host); err != nil { 110 - // TODO: measure the last N connection error times and if they're coming too fast reconnect slower or don't reconnect and wait for requestCrawl 111 - l.Warn("host connection failed", "err", err, "backoff", backoff) 112 - } 113 - 114 - // retry 115 - // TODO: don't reset backoff when cursor haven't advanced..? 116 - backoff = 0 117 - } 118 - } 119 - 120 - func (s *KnotSlurper) handleConnection(ctx context.Context, conn *websocket.Conn, host models.Host) error { 121 - // schedule ping on every 30s 122 - go func() { 123 - t := time.NewTicker(30 * time.Second) 124 - defer t.Stop() 125 - failcount := 0 126 - 127 - for { 128 - select { 129 - case <-t.C: 130 - if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second*10)); err != nil { 131 - s.logger.Warn("failed to ping", "err", err) 132 - failcount++ 133 - if failcount >= 4 { 134 - s.logger.Error("too many ping fails", "count", failcount) 135 - _ = conn.Close() 136 - return 137 - } 138 - } else { 139 - failcount = 0 // ok ping 140 - } 141 - case <-ctx.Done(): 142 - _ = conn.Close() 143 - return 144 - } 145 - } 146 - }() 147 - 148 - conn.SetPingHandler(func(message string) error { 149 - err := conn.WriteControl(websocket.PongMessage, []byte(message), time.Now().Add(time.Minute)) 150 - if err == websocket.ErrCloseSent { 151 - return nil 152 - } 153 - return err 154 - }) 155 - conn.SetPongHandler(func(_ string) error { 156 - if err := conn.SetReadDeadline(time.Now().Add(time.Minute)); err != nil { 157 - s.logger.Error("failed to set read deadline", "err", err) 158 - } 159 - return nil 160 - }) 161 - 162 - for { 163 - select { 164 - case <-ctx.Done(): 165 - return ctx.Err() 166 - default: 167 - } 168 - msgType, msg, err := conn.ReadMessage() 169 - if err != nil { 170 - return err 171 - } 172 - 173 - if msgType != websocket.TextMessage { 174 - continue 175 - } 176 - 177 - s.tskMngr.AddTask(ctx, &Task{ 178 - key: host.Hostname, // TODO: replace to repository AT-URI for better concurrency 179 - message: msg, 180 - }) 181 - } 182 - } 183 - 184 - type LegacyGitEvent struct { 185 - Rkey string 186 - Nsid string 187 - Event tangled.GitRefUpdate 188 - } 189 - 190 - func (s *KnotSlurper) ProcessLegacyGitRefUpdate(ctx context.Context, evt *LegacyGitEvent) error { 191 - curr, err := db.GetRepoByName(ctx, s.db, syntax.DID(evt.Event.RepoDid), evt.Event.RepoName) 192 - if err != nil { 193 - return err 194 - } 195 - repoAt := curr.AtUri() 196 - if curr == nil { 197 - // if repo doesn't exist in DB, just ignore the event. That repo is unknown. 198 - // 199 - // Normally did+name is already enough to perform git-fetch as that's 200 - // what needed to fetch the repository. 201 - // But we want to store that in did/rkey in knot-mirror. 202 - // Therefore, we should ignore when the repository is unknown. 203 - // Hopefully crawler will sync it later. 204 - s.logger.Warn("skipping event from unknown repo", "repoAt", repoAt) 205 - return nil 206 - } 207 - 208 - // TODO: should plan resync to resyncBuffer on RepoStateResyncing 209 - if curr.State != models.RepoStateActive { 210 - s.logger.Debug("skipping non-active repo", "repoAt", repoAt) 211 - // firehoseEventsSkipped.Inc() 212 - return nil 213 - } 214 - 215 - if curr.GitRev != "" && evt.Rkey <= curr.GitRev.String() { 216 - s.logger.Debug("skipping replayed event", "repoAt", repoAt, "eventRev", evt.Rkey, "currentRev", curr.GitRev) 217 - // firehoseEventsSkipped.Inc() 218 - return nil 219 - } 220 - 221 - // if curr.State == models.RepoStateResyncing { 222 - // firehoseEventsSkipped.Inc() 223 - // return fp.events.addToResyncBuffer(ctx, commit) 224 - // } 225 - 226 - // can't skip anything, update repo state 227 - if err := db.UpdateRepoState(ctx, s.db, curr.Did, curr.Rkey, models.RepoStateDesynchronized); err != nil { 228 - return err 229 - } 230 - 231 - // firehoseEventsProcessed.Inc() 232 - return nil 233 - } 234 - 235 - func userAgent() string { 236 - return fmt.Sprintf("knotmirror/%s", versioninfo.Short()) 237 - } 238 - 239 - func sleepForBackoff(b int) time.Duration { 240 - if b == 0 { 241 - return 0 242 - } 243 - if b < 10 { 244 - return (time.Duration(b) * 2) + (time.Millisecond * time.Duration(rand.Intn(1000))) 245 - } 246 - return time.Second * 30 247 - }
···
-79
knotmirror/knotstream/taskmanager.go
··· 1 - package knotstream 2 - 3 - import ( 4 - "context" 5 - "log/slog" 6 - "sync" 7 - 8 - "tangled.org/core/log" 9 - ) 10 - 11 - type TaskManager struct { 12 - logger *slog.Logger 13 - lk sync.Mutex 14 - feeder chan *Task 15 - scheduled map[string][]*Task 16 - } 17 - 18 - type Task struct { 19 - key string 20 - message []byte 21 - } 22 - 23 - func NewTaskManager(l *slog.Logger) *TaskManager { 24 - return &TaskManager{ 25 - logger: log.SubLogger(l, "taskmanager"), 26 - feeder: make(chan *Task), 27 - scheduled: make(map[string][]*Task), 28 - } 29 - } 30 - 31 - func (m *TaskManager) AddTask(ctx context.Context, task *Task) { 32 - m.lk.Lock() 33 - if st, ok := m.scheduled[task.key]; ok { 34 - // schedule task 35 - m.scheduled[task.key] = append(st, task) 36 - m.lk.Unlock() 37 - return 38 - } 39 - m.scheduled[task.key] = []*Task{} 40 - m.lk.Unlock() 41 - 42 - select { 43 - case <-ctx.Done(): 44 - return 45 - case m.feeder <- task: 46 - return 47 - } 48 - } 49 - 50 - func (m *TaskManager) ForEach(ctx context.Context, fn func(task *Task) error) { 51 - for task := range m.feeder { 52 - for task != nil { 53 - select { 54 - case <-ctx.Done(): 55 - return 56 - default: 57 - } 58 - if err := fn(task); err != nil { 59 - m.logger.Error("event handler failed", "err", err) 60 - } 61 - 62 - m.lk.Lock() 63 - func() { 64 - rem, ok := m.scheduled[task.key] 65 - if !ok { 66 - m.logger.Error("should always have an 'active' entry if a worker is processing a job") 67 - } 68 - if len(rem) == 0 { 69 - delete(m.scheduled, task.key) 70 - task = nil 71 - } else { 72 - task = rem[0] 73 - m.scheduled[task.key] = rem[1:] 74 - } 75 - }() 76 - m.lk.Unlock() 77 - } 78 - } 79 - }
···
-79
knotmirror/models/models.go
··· 1 - package models 2 - 3 - import ( 4 - "fmt" 5 - 6 - "github.com/bluesky-social/indigo/atproto/syntax" 7 - "tangled.org/core/api/tangled" 8 - ) 9 - 10 - type Repo struct { 11 - Did syntax.DID 12 - Rkey syntax.RecordKey 13 - Cid *syntax.CID 14 - // content of tangled.Repo 15 - Name string 16 - KnotDomain string 17 - 18 - GitRev syntax.TID // last processed git.refUpdate revision 19 - RepoSha string // sha256 sum of git refs (to avoid no-op git fetch) 20 - State RepoState 21 - ErrorMsg string 22 - RetryCount int 23 - RetryAfter int64 // Unix timestamp (seconds) 24 - } 25 - 26 - func (r *Repo) AtUri() syntax.ATURI { 27 - return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, tangled.RepoNSID, r.Rkey)) 28 - } 29 - 30 - type RepoState string 31 - 32 - const ( 33 - RepoStatePending RepoState = "pending" 34 - RepoStateDesynchronized RepoState = "desynchronized" 35 - RepoStateResyncing RepoState = "resyncing" 36 - RepoStateActive RepoState = "active" 37 - RepoStateError RepoState = "error" 38 - ) 39 - 40 - type Host struct { 41 - Hostname string 42 - NoSSL bool 43 - Status HostStatus 44 - LastSeq int64 45 - } 46 - 47 - type HostStatus string 48 - 49 - const ( 50 - HostStatusActive HostStatus = "active" 51 - HostStatusIdle HostStatus = "idle" 52 - HostStatusOffline HostStatus = "offline" 53 - HostStatusThrottled HostStatus = "throttled" 54 - HostStatusBanned HostStatus = "banned" 55 - ) 56 - 57 - // func (h *Host) SubscribeGitRefsURL(cursor int64) string { 58 - // scheme := "wss" 59 - // if h.NoSSL { 60 - // scheme = "ws" 61 - // } 62 - // u := fmt.Sprintf("%s://%s/xrpc/%s", scheme, h.Hostname, tangled.SubscribeGitRefsNSID) 63 - // if cursor > 0 { 64 - // u = fmt.Sprintf("%s?cursor=%d", u, h.LastSeq) 65 - // } 66 - // return u 67 - // } 68 - 69 - func (h *Host) LegacyEventsURL(cursor int64) string { 70 - scheme := "wss" 71 - if h.NoSSL { 72 - scheme = "ws" 73 - } 74 - u := fmt.Sprintf("%s://%s/events", scheme, h.Hostname) 75 - if cursor > 0 { 76 - u = fmt.Sprintf("%s?cursor=%d", u, h.LastSeq) 77 - } 78 - return u 79 - }
···
-8
knotmirror/readme.md
··· 1 - # KnotMirror 2 - 3 - Mirror of all known repos. Heavily inspired by [indigo/relay] and [indigo/tap]. 4 - 5 - Knot Mirror syncs repo list using tap and subscribe to all known knots as KnotStream. 6 - 7 - [indigo/relay]: https://github.com/bluesky-social/indigo/tree/main/cmd/relay 8 - [indigo/tap]: https://github.com/bluesky-social/indigo/tree/main/cmd/tap
···
-241
knotmirror/resyncer.go
··· 1 - package knotmirror 2 - 3 - import ( 4 - "context" 5 - "database/sql" 6 - "errors" 7 - "fmt" 8 - "log/slog" 9 - "math/rand" 10 - "net/url" 11 - "path" 12 - "sync" 13 - "time" 14 - 15 - "github.com/bluesky-social/indigo/atproto/syntax" 16 - "github.com/go-git/go-git/v5" 17 - gitconfig "github.com/go-git/go-git/v5/config" 18 - "tangled.org/core/knotmirror/config" 19 - "tangled.org/core/knotmirror/db" 20 - "tangled.org/core/knotmirror/models" 21 - "tangled.org/core/log" 22 - ) 23 - 24 - type Resyncer struct { 25 - logger *slog.Logger 26 - db *sql.DB 27 - 28 - claimJobMu sync.Mutex 29 - 30 - repoBasePath string 31 - repoFetchTimeout time.Duration 32 - knotUseSSL bool 33 - 34 - parallelism int 35 - } 36 - 37 - func NewResyncer(l *slog.Logger, db *sql.DB, cfg *config.Config) *Resyncer { 38 - return &Resyncer{ 39 - logger: log.SubLogger(l, "resyncer"), 40 - db: db, 41 - repoBasePath: cfg.GitRepoBasePath, 42 - repoFetchTimeout: 300 * time.Second, 43 - knotUseSSL: cfg.KnotUseSSL, 44 - parallelism: cfg.ResyncParallelism, 45 - } 46 - } 47 - 48 - func (r *Resyncer) Run(ctx context.Context) { 49 - for i := 0; i < r.parallelism; i++ { 50 - go r.runResyncWorker(ctx, i) 51 - } 52 - } 53 - 54 - func (r *Resyncer) runResyncWorker(ctx context.Context, workerID int) { 55 - l := r.logger.With("worker", workerID) 56 - for { 57 - select { 58 - case <-ctx.Done(): 59 - l.Info("resync worker shutting down", "error", ctx.Err()) 60 - return 61 - default: 62 - } 63 - repoAt, found, err := r.claimResyncJob(ctx) 64 - if err != nil { 65 - l.Error("failed to claim resync job", "error", err) 66 - time.Sleep(time.Second) 67 - continue 68 - } 69 - if !found { 70 - time.Sleep(time.Second) 71 - continue 72 - } 73 - l.Info("processing resync", "aturi", repoAt) 74 - if err := r.resyncRepo(ctx, repoAt); err != nil { 75 - l.Error("resync failed", "aturi", repoAt, "error", err) 76 - } 77 - } 78 - } 79 - 80 - func (r *Resyncer) claimResyncJob(ctx context.Context) (syntax.ATURI, bool, error) { 81 - // use mutex to prevent duplicated jobs 82 - r.claimJobMu.Lock() 83 - defer r.claimJobMu.Unlock() 84 - 85 - var repoAt syntax.ATURI 86 - now := time.Now().Unix() 87 - if err := r.db.QueryRowContext(ctx, 88 - `update repos 89 - set state = ? 90 - where at_uri = ( 91 - select at_uri from repos 92 - where state in (?, ?, ?) 93 - and (retry_after = 0 or retry_after < ?) 94 - limit 1 95 - ) 96 - returning at_uri 97 - `, 98 - models.RepoStateResyncing, 99 - models.RepoStatePending, models.RepoStateDesynchronized, models.RepoStateError, 100 - now, 101 - ).Scan(&repoAt); err != nil { 102 - if errors.Is(err, sql.ErrNoRows) { 103 - return "", false, nil 104 - } 105 - return "", false, err 106 - } 107 - 108 - return repoAt, true, nil 109 - } 110 - 111 - func (r *Resyncer) resyncRepo(ctx context.Context, repoAt syntax.ATURI) error { 112 - // ctx, span := tracer.Start(ctx, "resyncDid") 113 - // span.SetAttributes(attribute.String("did", did)) 114 - // defer span.End() 115 - // 116 - // resyncsStarted.Inc() 117 - // startTime := time.Now() 118 - 119 - r.logger.Info("starting resync", "aturi", repoAt) 120 - 121 - success, err := r.doResync(ctx, repoAt) 122 - if !success { 123 - // resyncsFailed.Inc() 124 - // resyncDuration.Observe(time.Since(startTime).Seconds()) 125 - return r.handleResyncError(ctx, repoAt, err) 126 - } 127 - 128 - // resyncsCompleted.Inc() 129 - // resyncDuration.Observe(time.Since(startTime).Seconds()) 130 - 131 - return nil 132 - } 133 - 134 - func (r *Resyncer) doResync(ctx context.Context, repoAt syntax.ATURI) (bool, error) { 135 - // ctx, span := tracer.Start(ctx, "doResync") 136 - // span.SetAttributes(attribute.String("did", did)) 137 - // defer span.End() 138 - 139 - repo, err := db.GetRepoByAtUri(ctx, r.db, repoAt) 140 - if err != nil { 141 - return false, fmt.Errorf("failed to get repo: %w", err) 142 - } 143 - 144 - repoPath := r.repoPath(repo) 145 - remoteUrl := r.repoRemoteURL(repo) 146 - 147 - // TODO: check if Knot is on backoff list. If so, return (false, nil) 148 - // TODO: use r.repoFetchTimeout on fetch 149 - 150 - // TODO: detect rate limit error (http.StatusTooManyRequests) to put Knot in backoff list 151 - gr, err := git.PlainOpen(repoPath) 152 - if errors.Is(err, git.ErrRepositoryNotExists) { 153 - // if err := exec.Command("git", "clone", "--mirror", remoteUrl, repoPath).Run(); err != nil { 154 - // return false, fmt.Errorf("cloning repo: %w", err) 155 - // } 156 - _, err := git.PlainCloneContext(ctx, repoPath, true, &git.CloneOptions{ 157 - URL: remoteUrl, 158 - Mirror: true, 159 - }) 160 - if err != nil { 161 - return false, fmt.Errorf("cloning repo: %w", err) 162 - } 163 - } else { 164 - // if err := exec.Command("git", "-C", repoPath, "fetch", "--mirror", remoteUrl).Run(); err != nil { 165 - // return false, fmt.Errorf("fetching repo: %w", err) 166 - // } 167 - if err != nil { 168 - return false, fmt.Errorf("laoding repo: %w", err) 169 - } 170 - if err := gr.FetchContext(ctx, &git.FetchOptions{ 171 - RemoteURL: remoteUrl, 172 - RefSpecs: []gitconfig.RefSpec{gitconfig.RefSpec("+refs/*:refs/*")}, 173 - Force: true, 174 - Prune: true, 175 - }); err != nil { 176 - return false, fmt.Errorf("fetching reppo: %w", err) 177 - } 178 - } 179 - 180 - // repo.GitRev = <processed git.refUpdate revision> 181 - // repo.RepoSha = <sha256 sum of git refs> 182 - repo.State = models.RepoStateActive 183 - repo.ErrorMsg = "" 184 - repo.RetryCount = 0 185 - repo.RetryAfter = 0 186 - if err := db.UpsertRepo(ctx, r.db, repo); err != nil { 187 - return false, fmt.Errorf("updating repo state to active %w", err) 188 - } 189 - return true, nil 190 - } 191 - 192 - func (r *Resyncer) handleResyncError(ctx context.Context, repoAt syntax.ATURI, err error) error { 193 - var state models.RepoState 194 - var errMsg string 195 - if err == nil { 196 - state = models.RepoStateDesynchronized 197 - errMsg = "" 198 - } else { 199 - state = models.RepoStateError 200 - errMsg = err.Error() 201 - } 202 - 203 - repo, err := db.GetRepoByAtUri(ctx, r.db, repoAt) 204 - if err != nil { 205 - return err 206 - } 207 - 208 - // start a 1 min & go up to 1 hr between retries 209 - retryAfter := time.Now().Add(backoff(repo.RetryCount, 60) * 60) 210 - 211 - repo.State = state 212 - repo.ErrorMsg = errMsg 213 - repo.RetryCount = repo.RetryCount + 1 214 - repo.RetryAfter = retryAfter.Unix() 215 - if dbErr := db.UpsertRepo(ctx, r.db, repo); dbErr != nil { 216 - return dbErr 217 - } 218 - return err 219 - } 220 - 221 - func (r *Resyncer) repoPath(repo *models.Repo) string { 222 - return path.Join(r.repoBasePath, repo.Did.String(), repo.Rkey.String()) 223 - } 224 - 225 - func (r *Resyncer) repoRemoteURL(repo *models.Repo) string { 226 - u, _ := url.Parse(repo.KnotDomain) 227 - if u.Scheme == "" { 228 - if r.knotUseSSL { 229 - u.Scheme = "https" 230 - } else { 231 - u.Scheme = "http" 232 - } 233 - } 234 - return u.String() 235 - } 236 - 237 - func backoff(retries int, max int) time.Duration { 238 - dur := min(1<<retries, max) 239 - jitter := time.Millisecond * time.Duration(rand.Intn(1000)) 240 - return time.Second*time.Duration(dur) + jitter 241 - }
···
-78
knotmirror/tapclient.go
··· 1 - package knotmirror 2 - 3 - import ( 4 - "context" 5 - "database/sql" 6 - "encoding/json" 7 - "fmt" 8 - "log/slog" 9 - 10 - "tangled.org/core/api/tangled" 11 - "tangled.org/core/knotmirror/db" 12 - "tangled.org/core/knotmirror/models" 13 - "tangled.org/core/log" 14 - "tangled.org/core/tap" 15 - ) 16 - 17 - type Tap struct { 18 - logger *slog.Logger 19 - tap tap.Client 20 - db *sql.DB 21 - } 22 - 23 - func NewTapClient(l *slog.Logger, tapUrl string) *Tap { 24 - return &Tap{ 25 - logger: log.SubLogger(l, "tapclient"), 26 - tap: tap.NewClient(tapUrl, ""), 27 - } 28 - } 29 - 30 - func (t *Tap) Run(ctx context.Context) { 31 - go t.tap.Connect(ctx, &tap.SimpleIndexer{ 32 - EventHandler: t.processEvent, 33 - }) 34 - } 35 - 36 - func (t *Tap) processEvent(ctx context.Context, evt tap.Event) error { 37 - l := t.logger.With("component", "tapIndexer") 38 - 39 - var err error 40 - switch evt.Type { 41 - case tap.EvtRecord: 42 - switch evt.Record.Collection.String() { 43 - case tangled.RepoNSID: 44 - err = t.processRepo(ctx, evt.Record) 45 - } 46 - } 47 - 48 - if err != nil { 49 - l.Error("failed to process message. will retry later", "event.ID", evt.ID, "err", err) 50 - return err 51 - } 52 - return nil 53 - } 54 - 55 - func (t *Tap) processRepo(ctx context.Context, evt *tap.RecordEventData) error { 56 - switch evt.Action { 57 - case tap.RecordCreateAction, tap.RecordUpdateAction: 58 - record := tangled.Repo{} 59 - if err := json.Unmarshal(evt.Record, &record); err != nil { 60 - return fmt.Errorf("parsing record: %w", err) 61 - } 62 - 63 - if err := db.UpsertRepo(ctx, t.db, &models.Repo{ 64 - Did: evt.Did, 65 - Rkey: evt.Rkey, 66 - Cid: evt.CID, 67 - Name: record.Name, 68 - KnotDomain: record.Knot, 69 - }); err != nil { 70 - return fmt.Errorf("upserting repo to db: %w", err) 71 - } 72 - case tap.RecordDeleteAction: 73 - if err := db.DeleteRepo(ctx, t.db, evt.Did, evt.Rkey); err != nil { 74 - return fmt.Errorf("deleting repo from db: %w", err) 75 - } 76 - } 77 - return nil 78 - }
···
+46 -3
knotserver/git/branch.go
··· 12 "tangled.org/core/types" 13 ) 14 15 - func (g *GitRepo) Branches() ([]types.Branch, error) { 16 fields := []string{ 17 "refname:short", 18 "objectname", ··· 33 if i != 0 { 34 outFormat.WriteString(fieldSeparator) 35 } 36 - outFormat.WriteString(fmt.Sprintf("%%(%s)", f)) 37 } 38 outFormat.WriteString("") 39 outFormat.WriteString(recordSeparator) 40 41 - output, err := g.forEachRef(outFormat.String(), "refs/heads") 42 if err != nil { 43 return nil, fmt.Errorf("failed to get branches: %w", err) 44 } ··· 48 return nil, nil 49 } 50 51 branches := make([]types.Branch, 0, len(records)) 52 53 // ignore errors here ··· 109 110 slices.Reverse(branches) 111 return branches, nil 112 } 113 114 func (g *GitRepo) DeleteBranch(branch string) error {
··· 12 "tangled.org/core/types" 13 ) 14 15 + type BranchesOptions struct { 16 + Limit int 17 + Offset int 18 + } 19 + 20 + func (g *GitRepo) Branches(opts *BranchesOptions) ([]types.Branch, error) { 21 + if opts == nil { 22 + opts = &BranchesOptions{} 23 + } 24 + 25 fields := []string{ 26 "refname:short", 27 "objectname", ··· 42 if i != 0 { 43 outFormat.WriteString(fieldSeparator) 44 } 45 + fmt.Fprintf(&outFormat, "%%(%s)", f) 46 } 47 outFormat.WriteString("") 48 outFormat.WriteString(recordSeparator) 49 50 + args := []string{outFormat.String(), "--sort=-creatordate"} 51 + 52 + // only add the count if the limit is a non-zero value, 53 + // if it is zero, get as many tags as we can 54 + if opts.Limit > 0 { 55 + args = append(args, fmt.Sprintf("--count=%d", opts.Offset+opts.Limit)) 56 + } 57 + 58 + args = append(args, "refs/heads") 59 + 60 + output, err := g.forEachRef(args...) 61 if err != nil { 62 return nil, fmt.Errorf("failed to get branches: %w", err) 63 } ··· 67 return nil, nil 68 } 69 70 + startIdx := opts.Offset 71 + if startIdx >= len(records) { 72 + return nil, nil 73 + } 74 + 75 + endIdx := len(records) 76 + if opts.Limit > 0 { 77 + endIdx = min(startIdx+opts.Limit, len(records)) 78 + } 79 + 80 + records = records[startIdx:endIdx] 81 branches := make([]types.Branch, 0, len(records)) 82 83 // ignore errors here ··· 139 140 slices.Reverse(branches) 141 return branches, nil 142 + } 143 + 144 + func (g *GitRepo) Branch(name string) (*plumbing.Reference, error) { 145 + ref, err := g.r.Reference(plumbing.NewBranchReferenceName(name), false) 146 + if err != nil { 147 + return nil, fmt.Errorf("branch: %w", err) 148 + } 149 + 150 + if !ref.Name().IsBranch() { 151 + return nil, fmt.Errorf("branch: %s is not a branch", ref.Name()) 152 + } 153 + 154 + return ref, nil 155 } 156 157 func (g *GitRepo) DeleteBranch(branch string) error {
+355
knotserver/git/branch_test.go
···
··· 1 + package git 2 + 3 + import ( 4 + "path/filepath" 5 + "slices" 6 + "testing" 7 + 8 + gogit "github.com/go-git/go-git/v5" 9 + "github.com/go-git/go-git/v5/plumbing" 10 + "github.com/stretchr/testify/assert" 11 + "github.com/stretchr/testify/require" 12 + "github.com/stretchr/testify/suite" 13 + 14 + "tangled.org/core/sets" 15 + ) 16 + 17 + type BranchSuite struct { 18 + suite.Suite 19 + *RepoSuite 20 + } 21 + 22 + func TestBranchSuite(t *testing.T) { 23 + t.Parallel() 24 + suite.Run(t, new(BranchSuite)) 25 + } 26 + 27 + func (s *BranchSuite) SetupTest() { 28 + s.RepoSuite = NewRepoSuite(s.T()) 29 + } 30 + 31 + func (s *BranchSuite) TearDownTest() { 32 + s.RepoSuite.cleanup() 33 + } 34 + 35 + func (s *BranchSuite) setupRepoWithBranches() { 36 + s.init() 37 + 38 + // get the initial commit on master 39 + head, err := s.repo.r.Head() 40 + require.NoError(s.T(), err) 41 + initialCommit := head.Hash() 42 + 43 + // create multiple branches with commits 44 + // branch-1 45 + s.createBranch("branch-1", initialCommit) 46 + s.checkoutBranch("branch-1") 47 + _ = s.commitFile("file1.txt", "content 1", "Add file1 on branch-1") 48 + 49 + // branch-2 50 + s.createBranch("branch-2", initialCommit) 51 + s.checkoutBranch("branch-2") 52 + _ = s.commitFile("file2.txt", "content 2", "Add file2 on branch-2") 53 + 54 + // branch-3 55 + s.createBranch("branch-3", initialCommit) 56 + s.checkoutBranch("branch-3") 57 + _ = s.commitFile("file3.txt", "content 3", "Add file3 on branch-3") 58 + 59 + // branch-4 60 + s.createBranch("branch-4", initialCommit) 61 + s.checkoutBranch("branch-4") 62 + s.commitFile("file4.txt", "content 4", "Add file4 on branch-4") 63 + 64 + // back to master and make a commit 65 + s.checkoutBranch("master") 66 + s.commitFile("master-file.txt", "master content", "Add file on master") 67 + 68 + // verify we have multiple branches 69 + refs, err := s.repo.r.References() 70 + require.NoError(s.T(), err) 71 + 72 + branchCount := 0 73 + err = refs.ForEach(func(ref *plumbing.Reference) error { 74 + if ref.Name().IsBranch() { 75 + branchCount++ 76 + } 77 + return nil 78 + }) 79 + require.NoError(s.T(), err) 80 + 81 + // we should have 5 branches: master, branch-1, branch-2, branch-3, branch-4 82 + assert.Equal(s.T(), 5, branchCount, "expected 5 branches") 83 + } 84 + 85 + func (s *BranchSuite) TestBranches_All() { 86 + s.setupRepoWithBranches() 87 + 88 + branches, err := s.repo.Branches(&BranchesOptions{}) 89 + require.NoError(s.T(), err) 90 + 91 + assert.Len(s.T(), branches, 5, "expected 5 branches") 92 + 93 + expectedBranches := sets.Collect(slices.Values([]string{ 94 + "master", 95 + "branch-1", 96 + "branch-2", 97 + "branch-3", 98 + "branch-4", 99 + })) 100 + 101 + for _, branch := range branches { 102 + assert.True(s.T(), expectedBranches.Contains(branch.Reference.Name), 103 + "unexpected branch: %s", branch.Reference.Name) 104 + assert.NotEmpty(s.T(), branch.Reference.Hash, "branch hash should not be empty") 105 + assert.NotNil(s.T(), branch.Commit, "branch commit should not be nil") 106 + } 107 + } 108 + 109 + func (s *BranchSuite) TestBranches_WithLimit() { 110 + s.setupRepoWithBranches() 111 + 112 + tests := []struct { 113 + name string 114 + limit int 115 + expectedCount int 116 + }{ 117 + { 118 + name: "limit 1", 119 + limit: 1, 120 + expectedCount: 1, 121 + }, 122 + { 123 + name: "limit 2", 124 + limit: 2, 125 + expectedCount: 2, 126 + }, 127 + { 128 + name: "limit 3", 129 + limit: 3, 130 + expectedCount: 3, 131 + }, 132 + { 133 + name: "limit 10 (more than available)", 134 + limit: 10, 135 + expectedCount: 5, 136 + }, 137 + } 138 + 139 + for _, tt := range tests { 140 + s.Run(tt.name, func() { 141 + branches, err := s.repo.Branches(&BranchesOptions{ 142 + Limit: tt.limit, 143 + }) 144 + require.NoError(s.T(), err) 145 + assert.Len(s.T(), branches, tt.expectedCount, "expected %d branches", tt.expectedCount) 146 + }) 147 + } 148 + } 149 + 150 + func (s *BranchSuite) TestBranches_WithOffset() { 151 + s.setupRepoWithBranches() 152 + 153 + tests := []struct { 154 + name string 155 + offset int 156 + expectedCount int 157 + }{ 158 + { 159 + name: "offset 0", 160 + offset: 0, 161 + expectedCount: 5, 162 + }, 163 + { 164 + name: "offset 1", 165 + offset: 1, 166 + expectedCount: 4, 167 + }, 168 + { 169 + name: "offset 2", 170 + offset: 2, 171 + expectedCount: 3, 172 + }, 173 + { 174 + name: "offset 4", 175 + offset: 4, 176 + expectedCount: 1, 177 + }, 178 + { 179 + name: "offset 5 (all skipped)", 180 + offset: 5, 181 + expectedCount: 0, 182 + }, 183 + { 184 + name: "offset 10 (more than available)", 185 + offset: 10, 186 + expectedCount: 0, 187 + }, 188 + } 189 + 190 + for _, tt := range tests { 191 + s.Run(tt.name, func() { 192 + branches, err := s.repo.Branches(&BranchesOptions{ 193 + Offset: tt.offset, 194 + }) 195 + require.NoError(s.T(), err) 196 + assert.Len(s.T(), branches, tt.expectedCount, "expected %d branches", tt.expectedCount) 197 + }) 198 + } 199 + } 200 + 201 + func (s *BranchSuite) TestBranches_WithLimitAndOffset() { 202 + s.setupRepoWithBranches() 203 + 204 + tests := []struct { 205 + name string 206 + limit int 207 + offset int 208 + expectedCount int 209 + }{ 210 + { 211 + name: "limit 2, offset 0", 212 + limit: 2, 213 + offset: 0, 214 + expectedCount: 2, 215 + }, 216 + { 217 + name: "limit 2, offset 1", 218 + limit: 2, 219 + offset: 1, 220 + expectedCount: 2, 221 + }, 222 + { 223 + name: "limit 2, offset 3", 224 + limit: 2, 225 + offset: 3, 226 + expectedCount: 2, 227 + }, 228 + { 229 + name: "limit 2, offset 4", 230 + limit: 2, 231 + offset: 4, 232 + expectedCount: 1, 233 + }, 234 + { 235 + name: "limit 3, offset 2", 236 + limit: 3, 237 + offset: 2, 238 + expectedCount: 3, 239 + }, 240 + { 241 + name: "limit 10, offset 3", 242 + limit: 10, 243 + offset: 3, 244 + expectedCount: 2, 245 + }, 246 + } 247 + 248 + for _, tt := range tests { 249 + s.Run(tt.name, func() { 250 + branches, err := s.repo.Branches(&BranchesOptions{ 251 + Limit: tt.limit, 252 + Offset: tt.offset, 253 + }) 254 + require.NoError(s.T(), err) 255 + assert.Len(s.T(), branches, tt.expectedCount, "expected %d branches", tt.expectedCount) 256 + }) 257 + } 258 + } 259 + 260 + func (s *BranchSuite) TestBranches_EmptyRepo() { 261 + repoPath := filepath.Join(s.tempDir, "empty-repo") 262 + 263 + _, err := gogit.PlainInit(repoPath, false) 264 + require.NoError(s.T(), err) 265 + 266 + gitRepo, err := PlainOpen(repoPath) 267 + require.NoError(s.T(), err) 268 + 269 + branches, err := gitRepo.Branches(&BranchesOptions{}) 270 + require.NoError(s.T(), err) 271 + 272 + if branches != nil { 273 + assert.Empty(s.T(), branches, "expected no branches in empty repo") 274 + } 275 + } 276 + 277 + func (s *BranchSuite) TestBranches_Pagination() { 278 + s.setupRepoWithBranches() 279 + 280 + allBranches, err := s.repo.Branches(&BranchesOptions{}) 281 + require.NoError(s.T(), err) 282 + assert.Len(s.T(), allBranches, 5, "expected 5 branches") 283 + 284 + pageSize := 2 285 + var paginatedBranches []string 286 + 287 + for offset := 0; offset < len(allBranches); offset += pageSize { 288 + branches, err := s.repo.Branches(&BranchesOptions{ 289 + Limit: pageSize, 290 + Offset: offset, 291 + }) 292 + require.NoError(s.T(), err) 293 + for _, branch := range branches { 294 + paginatedBranches = append(paginatedBranches, branch.Reference.Name) 295 + } 296 + } 297 + 298 + assert.Len(s.T(), paginatedBranches, len(allBranches), "pagination should return all branches") 299 + 300 + // create sets to verify all branches are present 301 + allBranchNames := sets.New[string]() 302 + for _, branch := range allBranches { 303 + allBranchNames.Insert(branch.Reference.Name) 304 + } 305 + 306 + paginatedBranchNames := sets.New[string]() 307 + for _, name := range paginatedBranches { 308 + paginatedBranchNames.Insert(name) 309 + } 310 + 311 + assert.EqualValues(s.T(), allBranchNames, paginatedBranchNames, 312 + "pagination should return the same set of branches") 313 + } 314 + 315 + func (s *BranchSuite) TestBranches_VerifyBranchFields() { 316 + s.setupRepoWithBranches() 317 + 318 + branches, err := s.repo.Branches(&BranchesOptions{}) 319 + require.NoError(s.T(), err) 320 + 321 + found := false 322 + for i := range branches { 323 + if branches[i].Reference.Name == "master" { 324 + found = true 325 + assert.Equal(s.T(), "master", branches[i].Reference.Name) 326 + assert.NotEmpty(s.T(), branches[i].Reference.Hash) 327 + assert.NotNil(s.T(), branches[i].Commit) 328 + assert.NotEmpty(s.T(), branches[i].Commit.Author.Name) 329 + assert.NotEmpty(s.T(), branches[i].Commit.Author.Email) 330 + assert.False(s.T(), branches[i].Commit.Hash.IsZero()) 331 + break 332 + } 333 + } 334 + 335 + assert.True(s.T(), found, "master branch not found") 336 + } 337 + 338 + func (s *BranchSuite) TestBranches_NilOptions() { 339 + s.setupRepoWithBranches() 340 + 341 + branches, err := s.repo.Branches(nil) 342 + require.NoError(s.T(), err) 343 + assert.Len(s.T(), branches, 5, "nil options should return all branches") 344 + } 345 + 346 + func (s *BranchSuite) TestBranches_ZeroLimitAndOffset() { 347 + s.setupRepoWithBranches() 348 + 349 + branches, err := s.repo.Branches(&BranchesOptions{ 350 + Limit: 0, 351 + Offset: 0, 352 + }) 353 + require.NoError(s.T(), err) 354 + assert.Len(s.T(), branches, 5, "zero limit should return all branches") 355 + }
+1 -14
knotserver/git/git.go
··· 122 func (g *GitRepo) TotalCommits() (int, error) { 123 output, err := g.revList( 124 g.h.String(), 125 - fmt.Sprintf("--count"), 126 ) 127 if err != nil { 128 return 0, fmt.Errorf("failed to run rev-list: %w", err) ··· 250 251 // path is not a submodule 252 return nil, ErrNotSubmodule 253 - } 254 - 255 - func (g *GitRepo) Branch(name string) (*plumbing.Reference, error) { 256 - ref, err := g.r.Reference(plumbing.NewBranchReferenceName(name), false) 257 - if err != nil { 258 - return nil, fmt.Errorf("branch: %w", err) 259 - } 260 - 261 - if !ref.Name().IsBranch() { 262 - return nil, fmt.Errorf("branch: %s is not a branch", ref.Name()) 263 - } 264 - 265 - return ref, nil 266 } 267 268 func (g *GitRepo) SetDefaultBranch(branch string) error {
··· 122 func (g *GitRepo) TotalCommits() (int, error) { 123 output, err := g.revList( 124 g.h.String(), 125 + "--count", 126 ) 127 if err != nil { 128 return 0, fmt.Errorf("failed to run rev-list: %w", err) ··· 250 251 // path is not a submodule 252 return nil, ErrNotSubmodule 253 } 254 255 func (g *GitRepo) SetDefaultBranch(branch string) error {
+94 -31
knotserver/git/last_commit.go
··· 6 "crypto/sha256" 7 "fmt" 8 "io" 9 "os/exec" 10 "path" 11 "strings" 12 "time" 13 14 "github.com/dgraph-io/ristretto" 15 "github.com/go-git/go-git/v5/plumbing" 16 - "github.com/go-git/go-git/v5/plumbing/object" 17 ) 18 19 var ( ··· 72 type commit struct { 73 hash plumbing.Hash 74 when time.Time 75 - files []string 76 message string 77 } 78 79 func cacheKey(g *GitRepo, path string) string { 80 sep := byte(':') 81 hash := sha256.Sum256(fmt.Append([]byte{}, g.path, sep, g.h.String(), sep, path)) 82 return fmt.Sprintf("%x", hash) 83 } 84 85 - func (g *GitRepo) calculateCommitTimeIn(ctx context.Context, subtree *object.Tree, parent string, timeout time.Duration) (map[string]commit, error) { 86 ctx, cancel := context.WithTimeout(ctx, timeout) 87 defer cancel() 88 - return g.calculateCommitTime(ctx, subtree, parent) 89 } 90 91 - func (g *GitRepo) calculateCommitTime(ctx context.Context, subtree *object.Tree, parent string) (map[string]commit, error) { 92 - filesToDo := make(map[string]struct{}) 93 filesDone := make(map[string]commit) 94 - for _, e := range subtree.Entries { 95 - fpath := path.Clean(path.Join(parent, e.Name)) 96 - filesToDo[fpath] = struct{}{} 97 - } 98 99 - for _, e := range subtree.Entries { 100 - f := path.Clean(path.Join(parent, e.Name)) 101 - cacheKey := cacheKey(g, f) 102 if cached, ok := commitCache.Get(cacheKey); ok { 103 - filesDone[f] = cached.(commit) 104 - delete(filesToDo, f) 105 } else { 106 - filesToDo[f] = struct{}{} 107 } 108 } 109 110 - if len(filesToDo) == 0 { 111 return filesDone, nil 112 } 113 ··· 115 defer cancel() 116 117 pathSpec := "." 118 - if parent != "" { 119 - pathSpec = parent 120 } 121 - output, err := g.streamingGitLog(ctx, "--pretty=format:%H,%ad,%s", "--date=iso", "--name-only", "--", pathSpec) 122 if err != nil { 123 return nil, err 124 } 125 defer output.Close() // Ensure the git process is properly cleaned up 126 127 reader := bufio.NewReader(output) 128 - var current commit 129 for { 130 line, err := reader.ReadString('\n') 131 if err != nil && err != io.EOF { ··· 136 if line == "" { 137 if !current.hash.IsZero() { 138 // we have a fully parsed commit 139 - for _, f := range current.files { 140 - if _, ok := filesToDo[f]; ok { 141 filesDone[f] = current 142 - delete(filesToDo, f) 143 commitCache.Set(cacheKey(g, f), current, 0) 144 } 145 } 146 147 - if len(filesToDo) == 0 { 148 - cancel() 149 break 150 } 151 - current = commit{} 152 } 153 } else if current.hash.IsZero() { 154 parts := strings.SplitN(line, ",", 3) 155 if len(parts) == 3 { 156 current.hash = plumbing.NewHash(parts[0]) 157 - current.when, _ = time.Parse("2006-01-02 15:04:05 -0700", parts[1]) 158 current.message = parts[2] 159 } 160 } else { 161 // all ancestors along this path should also be included 162 file := path.Clean(line) 163 - ancestors := ancestors(file) 164 - current.files = append(current.files, file) 165 - current.files = append(current.files, ancestors...) 166 } 167 168 if err == io.EOF { ··· 171 } 172 173 return filesDone, nil 174 } 175 176 func ancestors(p string) []string {
··· 6 "crypto/sha256" 7 "fmt" 8 "io" 9 + "iter" 10 "os/exec" 11 "path" 12 + "strconv" 13 "strings" 14 "time" 15 16 "github.com/dgraph-io/ristretto" 17 "github.com/go-git/go-git/v5/plumbing" 18 + "tangled.org/core/sets" 19 + "tangled.org/core/types" 20 ) 21 22 var ( ··· 75 type commit struct { 76 hash plumbing.Hash 77 when time.Time 78 + files sets.Set[string] 79 message string 80 } 81 82 + func newCommit() commit { 83 + return commit{ 84 + files: sets.New[string](), 85 + } 86 + } 87 + 88 + type lastCommitDir struct { 89 + dir string 90 + entries []string 91 + } 92 + 93 + func (l lastCommitDir) children() iter.Seq[string] { 94 + return func(yield func(string) bool) { 95 + for _, child := range l.entries { 96 + if !yield(path.Join(l.dir, child)) { 97 + return 98 + } 99 + } 100 + } 101 + } 102 + 103 func cacheKey(g *GitRepo, path string) string { 104 sep := byte(':') 105 hash := sha256.Sum256(fmt.Append([]byte{}, g.path, sep, g.h.String(), sep, path)) 106 return fmt.Sprintf("%x", hash) 107 } 108 109 + func (g *GitRepo) lastCommitDirIn(ctx context.Context, parent lastCommitDir, timeout time.Duration) (map[string]commit, error) { 110 ctx, cancel := context.WithTimeout(ctx, timeout) 111 defer cancel() 112 + return g.lastCommitDir(ctx, parent) 113 } 114 115 + func (g *GitRepo) lastCommitDir(ctx context.Context, parent lastCommitDir) (map[string]commit, error) { 116 + filesToDo := sets.Collect(parent.children()) 117 filesDone := make(map[string]commit) 118 119 + for p := range filesToDo.All() { 120 + cacheKey := cacheKey(g, p) 121 if cached, ok := commitCache.Get(cacheKey); ok { 122 + filesDone[p] = cached.(commit) 123 + filesToDo.Remove(p) 124 } else { 125 + filesToDo.Insert(p) 126 } 127 } 128 129 + if filesToDo.IsEmpty() { 130 return filesDone, nil 131 } 132 ··· 134 defer cancel() 135 136 pathSpec := "." 137 + if parent.dir != "" { 138 + pathSpec = parent.dir 139 + } 140 + if filesToDo.Len() == 1 { 141 + // this is an optimization for the scenario where we want to calculate 142 + // the last commit for just one path, we can directly set the pathspec to that path 143 + for s := range filesToDo.All() { 144 + pathSpec = s 145 + } 146 } 147 + 148 + output, err := g.streamingGitLog(ctx, "--pretty=format:%H,%ad,%s", "--date=unix", "--name-only", "--", pathSpec) 149 if err != nil { 150 return nil, err 151 } 152 defer output.Close() // Ensure the git process is properly cleaned up 153 154 reader := bufio.NewReader(output) 155 + current := newCommit() 156 for { 157 line, err := reader.ReadString('\n') 158 if err != nil && err != io.EOF { ··· 163 if line == "" { 164 if !current.hash.IsZero() { 165 // we have a fully parsed commit 166 + for f := range current.files.All() { 167 + if filesToDo.Contains(f) { 168 filesDone[f] = current 169 + filesToDo.Remove(f) 170 commitCache.Set(cacheKey(g, f), current, 0) 171 } 172 } 173 174 + if filesToDo.IsEmpty() { 175 break 176 } 177 + current = newCommit() 178 } 179 } else if current.hash.IsZero() { 180 parts := strings.SplitN(line, ",", 3) 181 if len(parts) == 3 { 182 current.hash = plumbing.NewHash(parts[0]) 183 + epochTime, _ := strconv.ParseInt(parts[1], 10, 64) 184 + current.when = time.Unix(epochTime, 0) 185 current.message = parts[2] 186 } 187 } else { 188 // all ancestors along this path should also be included 189 file := path.Clean(line) 190 + current.files.Insert(file) 191 + for _, a := range ancestors(file) { 192 + current.files.Insert(a) 193 + } 194 } 195 196 if err == io.EOF { ··· 199 } 200 201 return filesDone, nil 202 + } 203 + 204 + // LastCommitFile returns the last commit information for a specific file path 205 + func (g *GitRepo) LastCommitFile(ctx context.Context, filePath string) (*types.LastCommitInfo, error) { 206 + parent, child := path.Split(filePath) 207 + parent = path.Clean(parent) 208 + if parent == "." { 209 + parent = "" 210 + } 211 + 212 + lastCommitDir := lastCommitDir{ 213 + dir: parent, 214 + entries: []string{child}, 215 + } 216 + 217 + times, err := g.lastCommitDirIn(ctx, lastCommitDir, 2*time.Second) 218 + if err != nil { 219 + return nil, fmt.Errorf("calculate commit time: %w", err) 220 + } 221 + 222 + // extract the only element of the map, the commit info of the current path 223 + var commitInfo *commit 224 + for _, c := range times { 225 + commitInfo = &c 226 + } 227 + 228 + if commitInfo == nil { 229 + return nil, fmt.Errorf("no commit found for path: %s", filePath) 230 + } 231 + 232 + return &types.LastCommitInfo{ 233 + Hash: commitInfo.hash, 234 + Message: commitInfo.message, 235 + When: commitInfo.when, 236 + }, nil 237 } 238 239 func ancestors(p string) []string {
+30 -30
knotserver/git/merge.go
··· 107 return fmt.Sprintf("merge failed: %s", e.Message) 108 } 109 110 - func (g *GitRepo) createTempFileWithPatch(patchData string) (string, error) { 111 tmpFile, err := os.CreateTemp("", "git-patch-*.patch") 112 if err != nil { 113 return "", fmt.Errorf("failed to create temporary patch file: %w", err) 114 } 115 116 - if _, err := tmpFile.Write([]byte(patchData)); err != nil { 117 tmpFile.Close() 118 os.Remove(tmpFile.Name()) 119 return "", fmt.Errorf("failed to write patch data to temporary file: %w", err) ··· 127 return tmpFile.Name(), nil 128 } 129 130 - func (g *GitRepo) cloneRepository(targetBranch string) (string, error) { 131 tmpDir, err := os.MkdirTemp("", "git-clone-") 132 if err != nil { 133 return "", fmt.Errorf("failed to create temporary directory: %w", err) ··· 147 return tmpDir, nil 148 } 149 150 - func (g *GitRepo) checkPatch(tmpDir, patchFile string) error { 151 - var stderr bytes.Buffer 152 - 153 - cmd := exec.Command("git", "-C", tmpDir, "apply", "--check", "-v", patchFile) 154 - cmd.Stderr = &stderr 155 - 156 - if err := cmd.Run(); err != nil { 157 - conflicts := parseGitApplyErrors(stderr.String()) 158 - return &ErrMerge{ 159 - Message: "patch cannot be applied cleanly", 160 - Conflicts: conflicts, 161 - HasConflict: len(conflicts) > 0, 162 - OtherError: err, 163 - } 164 - } 165 - return nil 166 - } 167 - 168 func (g *GitRepo) applyPatch(patchData, patchFile string, opts MergeOptions) error { 169 var stderr bytes.Buffer 170 var cmd *exec.Cmd ··· 173 exec.Command("git", "-C", g.path, "config", "user.name", opts.CommitterName).Run() 174 exec.Command("git", "-C", g.path, "config", "user.email", opts.CommitterEmail).Run() 175 exec.Command("git", "-C", g.path, "config", "advice.mergeConflict", "false").Run() 176 177 // if patch is a format-patch, apply using 'git am' 178 if opts.FormatPatch { ··· 213 cmd.Stderr = &stderr 214 215 if err := cmd.Run(); err != nil { 216 - return fmt.Errorf("patch application failed: %s", stderr.String()) 217 } 218 219 return nil ··· 241 } 242 243 func (g *GitRepo) applySingleMailbox(singlePatch types.FormatPatch) (plumbing.Hash, error) { 244 - tmpPatch, err := g.createTempFileWithPatch(singlePatch.Raw) 245 if err != nil { 246 return plumbing.ZeroHash, fmt.Errorf("failed to create temporary patch file for singluar mailbox patch: %w", err) 247 } ··· 257 log.Println("head before apply", head.Hash().String()) 258 259 if err := cmd.Run(); err != nil { 260 - return plumbing.ZeroHash, fmt.Errorf("patch application failed: %s", stderr.String()) 261 } 262 263 if err := g.Refresh(); err != nil { ··· 324 return newHash, nil 325 } 326 327 - func (g *GitRepo) MergeCheck(patchData string, targetBranch string) error { 328 if val, ok := mergeCheckCache.Get(g, patchData, targetBranch); ok { 329 return val 330 } 331 332 - patchFile, err := g.createTempFileWithPatch(patchData) 333 if err != nil { 334 return &ErrMerge{ 335 Message: err.Error(), ··· 338 } 339 defer os.Remove(patchFile) 340 341 - tmpDir, err := g.cloneRepository(targetBranch) 342 if err != nil { 343 return &ErrMerge{ 344 Message: err.Error(), ··· 347 } 348 defer os.RemoveAll(tmpDir) 349 350 - result := g.checkPatch(tmpDir, patchFile) 351 mergeCheckCache.Set(g, patchData, targetBranch, result) 352 return result 353 } 354 355 func (g *GitRepo) MergeWithOptions(patchData string, targetBranch string, opts MergeOptions) error { 356 - patchFile, err := g.createTempFileWithPatch(patchData) 357 if err != nil { 358 return &ErrMerge{ 359 Message: err.Error(), ··· 362 } 363 defer os.Remove(patchFile) 364 365 - tmpDir, err := g.cloneRepository(targetBranch) 366 if err != nil { 367 return &ErrMerge{ 368 Message: err.Error(),
··· 107 return fmt.Sprintf("merge failed: %s", e.Message) 108 } 109 110 + func createTemp(data string) (string, error) { 111 tmpFile, err := os.CreateTemp("", "git-patch-*.patch") 112 if err != nil { 113 return "", fmt.Errorf("failed to create temporary patch file: %w", err) 114 } 115 116 + if _, err := tmpFile.Write([]byte(data)); err != nil { 117 tmpFile.Close() 118 os.Remove(tmpFile.Name()) 119 return "", fmt.Errorf("failed to write patch data to temporary file: %w", err) ··· 127 return tmpFile.Name(), nil 128 } 129 130 + func (g *GitRepo) cloneTemp(targetBranch string) (string, error) { 131 tmpDir, err := os.MkdirTemp("", "git-clone-") 132 if err != nil { 133 return "", fmt.Errorf("failed to create temporary directory: %w", err) ··· 147 return tmpDir, nil 148 } 149 150 func (g *GitRepo) applyPatch(patchData, patchFile string, opts MergeOptions) error { 151 var stderr bytes.Buffer 152 var cmd *exec.Cmd ··· 155 exec.Command("git", "-C", g.path, "config", "user.name", opts.CommitterName).Run() 156 exec.Command("git", "-C", g.path, "config", "user.email", opts.CommitterEmail).Run() 157 exec.Command("git", "-C", g.path, "config", "advice.mergeConflict", "false").Run() 158 + exec.Command("git", "-C", g.path, "config", "advice.amWorkDir", "false").Run() 159 160 // if patch is a format-patch, apply using 'git am' 161 if opts.FormatPatch { ··· 196 cmd.Stderr = &stderr 197 198 if err := cmd.Run(); err != nil { 199 + conflicts := parseGitApplyErrors(stderr.String()) 200 + return &ErrMerge{ 201 + Message: "patch cannot be applied cleanly", 202 + Conflicts: conflicts, 203 + HasConflict: len(conflicts) > 0, 204 + OtherError: err, 205 + } 206 } 207 208 return nil ··· 230 } 231 232 func (g *GitRepo) applySingleMailbox(singlePatch types.FormatPatch) (plumbing.Hash, error) { 233 + tmpPatch, err := createTemp(singlePatch.Raw) 234 if err != nil { 235 return plumbing.ZeroHash, fmt.Errorf("failed to create temporary patch file for singluar mailbox patch: %w", err) 236 } ··· 246 log.Println("head before apply", head.Hash().String()) 247 248 if err := cmd.Run(); err != nil { 249 + conflicts := parseGitApplyErrors(stderr.String()) 250 + return plumbing.ZeroHash, &ErrMerge{ 251 + Message: "patch cannot be applied cleanly", 252 + Conflicts: conflicts, 253 + HasConflict: len(conflicts) > 0, 254 + OtherError: err, 255 + } 256 } 257 258 if err := g.Refresh(); err != nil { ··· 319 return newHash, nil 320 } 321 322 + func (g *GitRepo) MergeCheckWithOptions(patchData string, targetBranch string, mo MergeOptions) error { 323 if val, ok := mergeCheckCache.Get(g, patchData, targetBranch); ok { 324 return val 325 } 326 327 + patchFile, err := createTemp(patchData) 328 if err != nil { 329 return &ErrMerge{ 330 Message: err.Error(), ··· 333 } 334 defer os.Remove(patchFile) 335 336 + tmpDir, err := g.cloneTemp(targetBranch) 337 if err != nil { 338 return &ErrMerge{ 339 Message: err.Error(), ··· 342 } 343 defer os.RemoveAll(tmpDir) 344 345 + tmpRepo, err := PlainOpen(tmpDir) 346 + if err != nil { 347 + return err 348 + } 349 + 350 + result := tmpRepo.applyPatch(patchData, patchFile, mo) 351 mergeCheckCache.Set(g, patchData, targetBranch, result) 352 return result 353 } 354 355 func (g *GitRepo) MergeWithOptions(patchData string, targetBranch string, opts MergeOptions) error { 356 + patchFile, err := createTemp(patchData) 357 if err != nil { 358 return &ErrMerge{ 359 Message: err.Error(), ··· 362 } 363 defer os.Remove(patchFile) 364 365 + tmpDir, err := g.cloneTemp(targetBranch) 366 if err != nil { 367 return &ErrMerge{ 368 Message: err.Error(),
+706
knotserver/git/merge_test.go
···
··· 1 + package git 2 + 3 + import ( 4 + "os" 5 + "path/filepath" 6 + "strings" 7 + "testing" 8 + 9 + "github.com/go-git/go-git/v5" 10 + "github.com/go-git/go-git/v5/config" 11 + "github.com/go-git/go-git/v5/plumbing" 12 + "github.com/go-git/go-git/v5/plumbing/object" 13 + "github.com/stretchr/testify/assert" 14 + "github.com/stretchr/testify/require" 15 + ) 16 + 17 + type Helper struct { 18 + t *testing.T 19 + tempDir string 20 + repo *GitRepo 21 + } 22 + 23 + func helper(t *testing.T) *Helper { 24 + tempDir, err := os.MkdirTemp("", "git-merge-test-*") 25 + require.NoError(t, err) 26 + 27 + return &Helper{ 28 + t: t, 29 + tempDir: tempDir, 30 + } 31 + } 32 + 33 + func (h *Helper) cleanup() { 34 + if h.tempDir != "" { 35 + os.RemoveAll(h.tempDir) 36 + } 37 + } 38 + 39 + // initRepo initializes a git repository with an initial commit 40 + func (h *Helper) initRepo() *GitRepo { 41 + repoPath := filepath.Join(h.tempDir, "test-repo") 42 + 43 + // initialize repository 44 + r, err := git.PlainInit(repoPath, false) 45 + require.NoError(h.t, err) 46 + 47 + // configure git user 48 + cfg, err := r.Config() 49 + require.NoError(h.t, err) 50 + cfg.User.Name = "Test User" 51 + cfg.User.Email = "test@example.com" 52 + err = r.SetConfig(cfg) 53 + require.NoError(h.t, err) 54 + 55 + // create initial commit with a file 56 + w, err := r.Worktree() 57 + require.NoError(h.t, err) 58 + 59 + // create initial file 60 + initialFile := filepath.Join(repoPath, "README.md") 61 + err = os.WriteFile(initialFile, []byte("# Test Repository\n\nInitial content.\n"), 0644) 62 + require.NoError(h.t, err) 63 + 64 + _, err = w.Add("README.md") 65 + require.NoError(h.t, err) 66 + 67 + _, err = w.Commit("Initial commit", &git.CommitOptions{ 68 + Author: &object.Signature{ 69 + Name: "Test User", 70 + Email: "test@example.com", 71 + }, 72 + }) 73 + require.NoError(h.t, err) 74 + 75 + gitRepo, err := PlainOpen(repoPath) 76 + require.NoError(h.t, err) 77 + 78 + h.repo = gitRepo 79 + return gitRepo 80 + } 81 + 82 + // addFile creates a file in the repository 83 + func (h *Helper) addFile(filename, content string) { 84 + filePath := filepath.Join(h.repo.path, filename) 85 + dir := filepath.Dir(filePath) 86 + 87 + err := os.MkdirAll(dir, 0755) 88 + require.NoError(h.t, err) 89 + 90 + err = os.WriteFile(filePath, []byte(content), 0644) 91 + require.NoError(h.t, err) 92 + } 93 + 94 + // commitFile adds and commits a file 95 + func (h *Helper) commitFile(filename, content, message string) plumbing.Hash { 96 + h.addFile(filename, content) 97 + 98 + w, err := h.repo.r.Worktree() 99 + require.NoError(h.t, err) 100 + 101 + _, err = w.Add(filename) 102 + require.NoError(h.t, err) 103 + 104 + hash, err := w.Commit(message, &git.CommitOptions{ 105 + Author: &object.Signature{ 106 + Name: "Test User", 107 + Email: "test@example.com", 108 + }, 109 + }) 110 + require.NoError(h.t, err) 111 + 112 + return hash 113 + } 114 + 115 + // readFile reads a file from the repository 116 + func (h *Helper) readFile(filename string) string { 117 + content, err := os.ReadFile(filepath.Join(h.repo.path, filename)) 118 + require.NoError(h.t, err) 119 + return string(content) 120 + } 121 + 122 + // fileExists checks if a file exists in the repository 123 + func (h *Helper) fileExists(filename string) bool { 124 + _, err := os.Stat(filepath.Join(h.repo.path, filename)) 125 + return err == nil 126 + } 127 + 128 + func TestApplyPatch_Success(t *testing.T) { 129 + h := helper(t) 130 + defer h.cleanup() 131 + 132 + repo := h.initRepo() 133 + 134 + // modify README.md 135 + patch := `diff --git a/README.md b/README.md 136 + index 1234567..abcdefg 100644 137 + --- a/README.md 138 + +++ b/README.md 139 + @@ -1,3 +1,3 @@ 140 + # Test Repository 141 + 142 + -Initial content. 143 + +Modified content. 144 + ` 145 + 146 + patchFile, err := createTemp(patch) 147 + require.NoError(t, err) 148 + defer os.Remove(patchFile) 149 + 150 + opts := MergeOptions{ 151 + CommitMessage: "Apply test patch", 152 + CommitterName: "Test Committer", 153 + CommitterEmail: "committer@example.com", 154 + FormatPatch: false, 155 + } 156 + 157 + err = repo.applyPatch(patch, patchFile, opts) 158 + assert.NoError(t, err) 159 + 160 + // verify the file was modified 161 + content := h.readFile("README.md") 162 + assert.Contains(t, content, "Modified content.") 163 + } 164 + 165 + func TestApplyPatch_AddNewFile(t *testing.T) { 166 + h := helper(t) 167 + defer h.cleanup() 168 + 169 + repo := h.initRepo() 170 + 171 + // add a new file 172 + patch := `diff --git a/newfile.txt b/newfile.txt 173 + new file mode 100644 174 + index 0000000..ce01362 175 + --- /dev/null 176 + +++ b/newfile.txt 177 + @@ -0,0 +1 @@ 178 + +hello 179 + ` 180 + 181 + patchFile, err := createTemp(patch) 182 + require.NoError(t, err) 183 + defer os.Remove(patchFile) 184 + 185 + opts := MergeOptions{ 186 + CommitMessage: "Add new file", 187 + CommitterName: "Test Committer", 188 + CommitterEmail: "committer@example.com", 189 + FormatPatch: false, 190 + } 191 + 192 + err = repo.applyPatch(patch, patchFile, opts) 193 + assert.NoError(t, err) 194 + 195 + assert.True(t, h.fileExists("newfile.txt")) 196 + content := h.readFile("newfile.txt") 197 + assert.Equal(t, "hello\n", content) 198 + } 199 + 200 + func TestApplyPatch_DeleteFile(t *testing.T) { 201 + h := helper(t) 202 + defer h.cleanup() 203 + 204 + repo := h.initRepo() 205 + 206 + // add a file 207 + h.commitFile("deleteme.txt", "content to delete\n", "Add file to delete") 208 + 209 + // delete the file 210 + patch := `diff --git a/deleteme.txt b/deleteme.txt 211 + deleted file mode 100644 212 + index 1234567..0000000 213 + --- a/deleteme.txt 214 + +++ /dev/null 215 + @@ -1 +0,0 @@ 216 + -content to delete 217 + ` 218 + 219 + patchFile, err := createTemp(patch) 220 + require.NoError(t, err) 221 + defer os.Remove(patchFile) 222 + 223 + opts := MergeOptions{ 224 + CommitMessage: "Delete file", 225 + CommitterName: "Test Committer", 226 + CommitterEmail: "committer@example.com", 227 + FormatPatch: false, 228 + } 229 + 230 + err = repo.applyPatch(patch, patchFile, opts) 231 + assert.NoError(t, err) 232 + 233 + assert.False(t, h.fileExists("deleteme.txt")) 234 + } 235 + 236 + func TestApplyPatch_WithAuthor(t *testing.T) { 237 + h := helper(t) 238 + defer h.cleanup() 239 + 240 + repo := h.initRepo() 241 + 242 + patch := `diff --git a/README.md b/README.md 243 + index 1234567..abcdefg 100644 244 + --- a/README.md 245 + +++ b/README.md 246 + @@ -1,3 +1,4 @@ 247 + # Test Repository 248 + 249 + Initial content. 250 + +New line. 251 + ` 252 + 253 + patchFile, err := createTemp(patch) 254 + require.NoError(t, err) 255 + defer os.Remove(patchFile) 256 + 257 + opts := MergeOptions{ 258 + CommitMessage: "Patch with author", 259 + AuthorName: "Patch Author", 260 + AuthorEmail: "author@example.com", 261 + CommitterName: "Test Committer", 262 + CommitterEmail: "committer@example.com", 263 + FormatPatch: false, 264 + } 265 + 266 + err = repo.applyPatch(patch, patchFile, opts) 267 + assert.NoError(t, err) 268 + 269 + head, err := repo.r.Head() 270 + require.NoError(t, err) 271 + 272 + commit, err := repo.r.CommitObject(head.Hash()) 273 + require.NoError(t, err) 274 + 275 + assert.Equal(t, "Patch Author", commit.Author.Name) 276 + assert.Equal(t, "author@example.com", commit.Author.Email) 277 + } 278 + 279 + func TestApplyPatch_MissingFile(t *testing.T) { 280 + h := helper(t) 281 + defer h.cleanup() 282 + 283 + repo := h.initRepo() 284 + 285 + // patch that modifies a non-existent file 286 + patch := `diff --git a/nonexistent.txt b/nonexistent.txt 287 + index 1234567..abcdefg 100644 288 + --- a/nonexistent.txt 289 + +++ b/nonexistent.txt 290 + @@ -1 +1 @@ 291 + -old content 292 + +new content 293 + ` 294 + 295 + patchFile, err := createTemp(patch) 296 + require.NoError(t, err) 297 + defer os.Remove(patchFile) 298 + 299 + opts := MergeOptions{ 300 + CommitMessage: "Should fail", 301 + CommitterName: "Test Committer", 302 + CommitterEmail: "committer@example.com", 303 + FormatPatch: false, 304 + } 305 + 306 + err = repo.applyPatch(patch, patchFile, opts) 307 + assert.Error(t, err) 308 + assert.Contains(t, err.Error(), "patch application failed") 309 + } 310 + 311 + func TestApplyPatch_Conflict(t *testing.T) { 312 + h := helper(t) 313 + defer h.cleanup() 314 + 315 + repo := h.initRepo() 316 + 317 + // modify the file to create a conflict 318 + h.commitFile("README.md", "# Test Repository\n\nDifferent content.\n", "Modify README") 319 + 320 + // patch that expects different content 321 + patch := `diff --git a/README.md b/README.md 322 + index 1234567..abcdefg 100644 323 + --- a/README.md 324 + +++ b/README.md 325 + @@ -1,3 +1,3 @@ 326 + # Test Repository 327 + 328 + -Initial content. 329 + +Modified content. 330 + ` 331 + 332 + patchFile, err := createTemp(patch) 333 + require.NoError(t, err) 334 + defer os.Remove(patchFile) 335 + 336 + opts := MergeOptions{ 337 + CommitMessage: "Should conflict", 338 + CommitterName: "Test Committer", 339 + CommitterEmail: "committer@example.com", 340 + FormatPatch: false, 341 + } 342 + 343 + err = repo.applyPatch(patch, patchFile, opts) 344 + assert.Error(t, err) 345 + } 346 + 347 + func TestApplyPatch_MissingDirectory(t *testing.T) { 348 + h := helper(t) 349 + defer h.cleanup() 350 + 351 + repo := h.initRepo() 352 + 353 + // patch that adds a file in a non-existent directory 354 + patch := `diff --git a/subdir/newfile.txt b/subdir/newfile.txt 355 + new file mode 100644 356 + index 0000000..ce01362 357 + --- /dev/null 358 + +++ b/subdir/newfile.txt 359 + @@ -0,0 +1 @@ 360 + +content 361 + ` 362 + 363 + patchFile, err := createTemp(patch) 364 + require.NoError(t, err) 365 + defer os.Remove(patchFile) 366 + 367 + opts := MergeOptions{ 368 + CommitMessage: "Add file in subdir", 369 + CommitterName: "Test Committer", 370 + CommitterEmail: "committer@example.com", 371 + FormatPatch: false, 372 + } 373 + 374 + // git apply should create the directory automatically 375 + err = repo.applyPatch(patch, patchFile, opts) 376 + assert.NoError(t, err) 377 + 378 + // Verify the file and directory were created 379 + assert.True(t, h.fileExists("subdir/newfile.txt")) 380 + } 381 + 382 + func TestApplyMailbox_Single(t *testing.T) { 383 + h := helper(t) 384 + defer h.cleanup() 385 + 386 + repo := h.initRepo() 387 + 388 + // format-patch mailbox format 389 + patch := `From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 390 + From: Patch Author <author@example.com> 391 + Date: Mon, 1 Jan 2024 12:00:00 +0000 392 + Subject: [PATCH] Add new feature 393 + 394 + This is a test patch. 395 + --- 396 + newfile.txt | 1 + 397 + 1 file changed, 1 insertion(+) 398 + create mode 100644 newfile.txt 399 + 400 + diff --git a/newfile.txt b/newfile.txt 401 + new file mode 100644 402 + index 0000000..ce01362 403 + --- /dev/null 404 + +++ b/newfile.txt 405 + @@ -0,0 +1 @@ 406 + +hello 407 + -- 408 + 2.40.0 409 + ` 410 + 411 + err := repo.applyMailbox(patch) 412 + assert.NoError(t, err) 413 + 414 + assert.True(t, h.fileExists("newfile.txt")) 415 + content := h.readFile("newfile.txt") 416 + assert.Equal(t, "hello\n", content) 417 + } 418 + 419 + func TestApplyMailbox_Multiple(t *testing.T) { 420 + h := helper(t) 421 + defer h.cleanup() 422 + 423 + repo := h.initRepo() 424 + 425 + // multiple patches in mailbox format 426 + patch := `From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 427 + From: Patch Author <author@example.com> 428 + Date: Mon, 1 Jan 2024 12:00:00 +0000 429 + Subject: [PATCH 1/2] Add first file 430 + 431 + --- 432 + file1.txt | 1 + 433 + 1 file changed, 1 insertion(+) 434 + create mode 100644 file1.txt 435 + 436 + diff --git a/file1.txt b/file1.txt 437 + new file mode 100644 438 + index 0000000..ce01362 439 + --- /dev/null 440 + +++ b/file1.txt 441 + @@ -0,0 +1 @@ 442 + +first 443 + -- 444 + 2.40.0 445 + 446 + From 1111111111111111111111111111111111111111 Mon Sep 17 00:00:00 2001 447 + From: Patch Author <author@example.com> 448 + Date: Mon, 1 Jan 2024 12:01:00 +0000 449 + Subject: [PATCH 2/2] Add second file 450 + 451 + --- 452 + file2.txt | 1 + 453 + 1 file changed, 1 insertion(+) 454 + create mode 100644 file2.txt 455 + 456 + diff --git a/file2.txt b/file2.txt 457 + new file mode 100644 458 + index 0000000..ce01362 459 + --- /dev/null 460 + +++ b/file2.txt 461 + @@ -0,0 +1 @@ 462 + +second 463 + -- 464 + 2.40.0 465 + ` 466 + 467 + err := repo.applyMailbox(patch) 468 + assert.NoError(t, err) 469 + 470 + assert.True(t, h.fileExists("file1.txt")) 471 + assert.True(t, h.fileExists("file2.txt")) 472 + 473 + content1 := h.readFile("file1.txt") 474 + assert.Equal(t, "first\n", content1) 475 + 476 + content2 := h.readFile("file2.txt") 477 + assert.Equal(t, "second\n", content2) 478 + } 479 + 480 + func TestApplyMailbox_Conflict(t *testing.T) { 481 + h := helper(t) 482 + defer h.cleanup() 483 + 484 + repo := h.initRepo() 485 + 486 + h.commitFile("README.md", "# Test Repository\n\nConflicting content.\n", "Create conflict") 487 + 488 + patch := `From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 489 + From: Patch Author <author@example.com> 490 + Date: Mon, 1 Jan 2024 12:00:00 +0000 491 + Subject: [PATCH] Modify README 492 + 493 + --- 494 + README.md | 2 +- 495 + 1 file changed, 1 insertion(+), 1 deletion(-) 496 + 497 + diff --git a/README.md b/README.md 498 + index 1234567..abcdefg 100644 499 + --- a/README.md 500 + +++ b/README.md 501 + @@ -1,3 +1,3 @@ 502 + # Test Repository 503 + 504 + -Initial content. 505 + +Different content. 506 + -- 507 + 2.40.0 508 + ` 509 + 510 + err := repo.applyMailbox(patch) 511 + assert.Error(t, err) 512 + 513 + var mergeErr *ErrMerge 514 + assert.ErrorAs(t, err, &mergeErr) 515 + } 516 + 517 + func TestParseGitApplyErrors(t *testing.T) { 518 + tests := []struct { 519 + name string 520 + errorOutput string 521 + expectedCount int 522 + expectedReason string 523 + }{ 524 + { 525 + name: "file already exists", 526 + errorOutput: `error: path/to/file.txt: already exists in working directory`, 527 + expectedCount: 1, 528 + expectedReason: "file already exists", 529 + }, 530 + { 531 + name: "file does not exist", 532 + errorOutput: `error: path/to/file.txt: does not exist in working tree`, 533 + expectedCount: 1, 534 + expectedReason: "file does not exist", 535 + }, 536 + { 537 + name: "patch does not apply", 538 + errorOutput: `error: patch failed: file.txt:10 539 + error: file.txt: patch does not apply`, 540 + expectedCount: 1, 541 + expectedReason: "patch does not apply", 542 + }, 543 + { 544 + name: "multiple conflicts", 545 + errorOutput: `error: patch failed: file1.txt:5 546 + error: file1.txt:5: some error 547 + error: patch failed: file2.txt:10 548 + error: file2.txt:10: another error`, 549 + expectedCount: 2, 550 + }, 551 + } 552 + 553 + for _, tt := range tests { 554 + t.Run(tt.name, func(t *testing.T) { 555 + conflicts := parseGitApplyErrors(tt.errorOutput) 556 + assert.Len(t, conflicts, tt.expectedCount) 557 + 558 + if tt.expectedReason != "" && len(conflicts) > 0 { 559 + assert.Equal(t, tt.expectedReason, conflicts[0].Reason) 560 + } 561 + }) 562 + } 563 + } 564 + 565 + func TestErrMerge_Error(t *testing.T) { 566 + tests := []struct { 567 + name string 568 + err ErrMerge 569 + expectedMsg string 570 + }{ 571 + { 572 + name: "with conflicts", 573 + err: ErrMerge{ 574 + Message: "test merge failed", 575 + HasConflict: true, 576 + Conflicts: []ConflictInfo{ 577 + {Filename: "file1.txt", Reason: "conflict 1"}, 578 + {Filename: "file2.txt", Reason: "conflict 2"}, 579 + }, 580 + }, 581 + expectedMsg: "merge failed due to conflicts: test merge failed (2 conflicts)", 582 + }, 583 + { 584 + name: "with other error", 585 + err: ErrMerge{ 586 + Message: "command failed", 587 + OtherError: assert.AnError, 588 + }, 589 + expectedMsg: "merge failed: command failed:", 590 + }, 591 + { 592 + name: "message only", 593 + err: ErrMerge{ 594 + Message: "simple failure", 595 + }, 596 + expectedMsg: "merge failed: simple failure", 597 + }, 598 + } 599 + 600 + for _, tt := range tests { 601 + t.Run(tt.name, func(t *testing.T) { 602 + errMsg := tt.err.Error() 603 + assert.Contains(t, errMsg, tt.expectedMsg) 604 + }) 605 + } 606 + } 607 + 608 + func TestMergeWithOptions_Integration(t *testing.T) { 609 + h := helper(t) 610 + defer h.cleanup() 611 + 612 + // create a repository first with initial content 613 + workRepoPath := filepath.Join(h.tempDir, "work-repo") 614 + workRepo, err := git.PlainInit(workRepoPath, false) 615 + require.NoError(t, err) 616 + 617 + // configure git user 618 + cfg, err := workRepo.Config() 619 + require.NoError(t, err) 620 + cfg.User.Name = "Test User" 621 + cfg.User.Email = "test@example.com" 622 + err = workRepo.SetConfig(cfg) 623 + require.NoError(t, err) 624 + 625 + // Create initial commit 626 + w, err := workRepo.Worktree() 627 + require.NoError(t, err) 628 + 629 + err = os.WriteFile(filepath.Join(workRepoPath, "README.md"), []byte("# Initial\n"), 0644) 630 + require.NoError(t, err) 631 + 632 + _, err = w.Add("README.md") 633 + require.NoError(t, err) 634 + 635 + _, err = w.Commit("Initial commit", &git.CommitOptions{ 636 + Author: &object.Signature{ 637 + Name: "Test User", 638 + Email: "test@example.com", 639 + }, 640 + }) 641 + require.NoError(t, err) 642 + 643 + // create a bare repository (like production) 644 + bareRepoPath := filepath.Join(h.tempDir, "bare-repo") 645 + err = InitBare(bareRepoPath, "main") 646 + require.NoError(t, err) 647 + 648 + // add bare repo as remote and push to it 649 + _, err = workRepo.CreateRemote(&config.RemoteConfig{ 650 + Name: "origin", 651 + URLs: []string{"file://" + bareRepoPath}, 652 + }) 653 + require.NoError(t, err) 654 + 655 + err = workRepo.Push(&git.PushOptions{ 656 + RemoteName: "origin", 657 + RefSpecs: []config.RefSpec{"refs/heads/master:refs/heads/main"}, 658 + }) 659 + require.NoError(t, err) 660 + 661 + // now merge a patch into the bare repo 662 + gitRepo, err := PlainOpen(bareRepoPath) 663 + require.NoError(t, err) 664 + 665 + patch := `diff --git a/feature.txt b/feature.txt 666 + new file mode 100644 667 + index 0000000..5e1c309 668 + --- /dev/null 669 + +++ b/feature.txt 670 + @@ -0,0 +1 @@ 671 + +Hello World 672 + ` 673 + 674 + opts := MergeOptions{ 675 + CommitMessage: "Add feature", 676 + CommitterName: "Test Committer", 677 + CommitterEmail: "committer@example.com", 678 + FormatPatch: false, 679 + } 680 + 681 + err = gitRepo.MergeWithOptions(patch, "main", opts) 682 + assert.NoError(t, err) 683 + 684 + // Clone again and verify the changes were merged 685 + verifyRepoPath := filepath.Join(h.tempDir, "verify-repo") 686 + verifyRepo, err := git.PlainClone(verifyRepoPath, false, &git.CloneOptions{ 687 + URL: "file://" + bareRepoPath, 688 + }) 689 + require.NoError(t, err) 690 + 691 + // check that feature.txt exists 692 + featureFile := filepath.Join(verifyRepoPath, "feature.txt") 693 + assert.FileExists(t, featureFile) 694 + 695 + content, err := os.ReadFile(featureFile) 696 + require.NoError(t, err) 697 + assert.Equal(t, "Hello World\n", string(content)) 698 + 699 + // verify commit message 700 + head, err := verifyRepo.Head() 701 + require.NoError(t, err) 702 + 703 + commit, err := verifyRepo.CommitObject(head.Hash()) 704 + require.NoError(t, err) 705 + assert.Equal(t, "Add feature", strings.TrimSpace(commit.Message)) 706 + }
+1 -1
knotserver/git/post_receive.go
··· 95 // git rev-list <newsha> ^other-branches --not ^this-branch 96 args = append(args, line.NewSha.String()) 97 98 - branches, _ := g.Branches() 99 for _, b := range branches { 100 if !strings.Contains(line.Ref, b.Name) { 101 args = append(args, fmt.Sprintf("^%s", b.Name))
··· 95 // git rev-list <newsha> ^other-branches --not ^this-branch 96 args = append(args, line.NewSha.String()) 97 98 + branches, _ := g.Branches(nil) 99 for _, b := range branches { 100 if !strings.Contains(line.Ref, b.Name) { 101 args = append(args, fmt.Sprintf("^%s", b.Name))
+38 -3
knotserver/git/tag.go
··· 10 "github.com/go-git/go-git/v5/plumbing/object" 11 ) 12 13 - func (g *GitRepo) Tags() ([]object.Tag, error) { 14 fields := []string{ 15 "refname:short", 16 "objectname", ··· 29 if i != 0 { 30 outFormat.WriteString(fieldSeparator) 31 } 32 - outFormat.WriteString(fmt.Sprintf("%%(%s)", f)) 33 } 34 outFormat.WriteString("") 35 outFormat.WriteString(recordSeparator) 36 37 - output, err := g.forEachRef(outFormat.String(), "--sort=-creatordate", "refs/tags") 38 if err != nil { 39 return nil, fmt.Errorf("failed to get tags: %w", err) 40 } ··· 44 return nil, nil 45 } 46 47 tags := make([]object.Tag, 0, len(records)) 48 49 for _, line := range records {
··· 10 "github.com/go-git/go-git/v5/plumbing/object" 11 ) 12 13 + type TagsOptions struct { 14 + Limit int 15 + Offset int 16 + Pattern string 17 + } 18 + 19 + func (g *GitRepo) Tags(opts *TagsOptions) ([]object.Tag, error) { 20 + if opts == nil { 21 + opts = &TagsOptions{} 22 + } 23 + 24 + if opts.Pattern == "" { 25 + opts.Pattern = "refs/tags" 26 + } 27 + 28 fields := []string{ 29 "refname:short", 30 "objectname", ··· 43 if i != 0 { 44 outFormat.WriteString(fieldSeparator) 45 } 46 + fmt.Fprintf(&outFormat, "%%(%s)", f) 47 } 48 outFormat.WriteString("") 49 outFormat.WriteString(recordSeparator) 50 51 + args := []string{outFormat.String(), "--sort=-creatordate"} 52 + 53 + // only add the count if the limit is a non-zero value, 54 + // if it is zero, get as many tags as we can 55 + if opts.Limit > 0 { 56 + args = append(args, fmt.Sprintf("--count=%d", opts.Offset+opts.Limit)) 57 + } 58 + 59 + args = append(args, opts.Pattern) 60 + 61 + output, err := g.forEachRef(args...) 62 if err != nil { 63 return nil, fmt.Errorf("failed to get tags: %w", err) 64 } ··· 68 return nil, nil 69 } 70 71 + startIdx := opts.Offset 72 + if startIdx >= len(records) { 73 + return nil, nil 74 + } 75 + 76 + endIdx := len(records) 77 + if opts.Limit > 0 { 78 + endIdx = min(startIdx+opts.Limit, len(records)) 79 + } 80 + 81 + records = records[startIdx:endIdx] 82 tags := make([]object.Tag, 0, len(records)) 83 84 for _, line := range records {
+365
knotserver/git/tag_test.go
···
··· 1 + package git 2 + 3 + import ( 4 + "path/filepath" 5 + "testing" 6 + "time" 7 + 8 + gogit "github.com/go-git/go-git/v5" 9 + "github.com/go-git/go-git/v5/plumbing" 10 + "github.com/go-git/go-git/v5/plumbing/object" 11 + "github.com/stretchr/testify/assert" 12 + "github.com/stretchr/testify/require" 13 + "github.com/stretchr/testify/suite" 14 + ) 15 + 16 + type TagSuite struct { 17 + suite.Suite 18 + *RepoSuite 19 + } 20 + 21 + func TestTagSuite(t *testing.T) { 22 + t.Parallel() 23 + suite.Run(t, new(TagSuite)) 24 + } 25 + 26 + func (s *TagSuite) SetupTest() { 27 + s.RepoSuite = NewRepoSuite(s.T()) 28 + } 29 + 30 + func (s *TagSuite) TearDownTest() { 31 + s.RepoSuite.cleanup() 32 + } 33 + 34 + func (s *TagSuite) setupRepoWithTags() { 35 + s.init() 36 + 37 + // create commits for tagging 38 + commit1 := s.commitFile("file1.txt", "content 1", "Add file1") 39 + commit2 := s.commitFile("file2.txt", "content 2", "Add file2") 40 + commit3 := s.commitFile("file3.txt", "content 3", "Add file3") 41 + commit4 := s.commitFile("file4.txt", "content 4", "Add file4") 42 + commit5 := s.commitFile("file5.txt", "content 5", "Add file5") 43 + 44 + // create annotated tags 45 + s.createAnnotatedTag( 46 + "v1.0.0", 47 + commit1, 48 + "Tagger One", 49 + "tagger1@example.com", 50 + "Release version 1.0.0\n\nThis is the first stable release.", 51 + s.baseTime.Add(1*time.Hour), 52 + ) 53 + 54 + s.createAnnotatedTag( 55 + "v1.1.0", 56 + commit2, 57 + "Tagger Two", 58 + "tagger2@example.com", 59 + "Release version 1.1.0", 60 + s.baseTime.Add(2*time.Hour), 61 + ) 62 + 63 + // create lightweight tags 64 + s.createLightweightTag("v2.0.0", commit3) 65 + s.createLightweightTag("v2.1.0", commit4) 66 + 67 + // create another annotated tag 68 + s.createAnnotatedTag( 69 + "v3.0.0", 70 + commit5, 71 + "Tagger Three", 72 + "tagger3@example.com", 73 + "Major version 3.0.0\n\nBreaking changes included.", 74 + s.baseTime.Add(3*time.Hour), 75 + ) 76 + } 77 + 78 + func (s *TagSuite) TestTags_All() { 79 + s.setupRepoWithTags() 80 + 81 + tags, err := s.repo.Tags(nil) 82 + require.NoError(s.T(), err) 83 + 84 + // we created 5 tags total (3 annotated, 2 lightweight) 85 + assert.Len(s.T(), tags, 5, "expected 5 tags") 86 + 87 + // verify tags are sorted by creation date (newest first) 88 + expectedAnnotated := map[string]bool{ 89 + "v1.0.0": true, 90 + "v1.1.0": true, 91 + "v3.0.0": true, 92 + } 93 + 94 + expectedLightweight := map[string]bool{ 95 + "v2.0.0": true, 96 + "v2.1.0": true, 97 + } 98 + 99 + for _, tag := range tags { 100 + if expectedAnnotated[tag.Name] { 101 + // annotated tags should have tagger info 102 + assert.NotEmpty(s.T(), tag.Tagger.Name, "annotated tag %s should have tagger name", tag.Name) 103 + assert.NotEmpty(s.T(), tag.Message, "annotated tag %s should have message", tag.Name) 104 + } else if expectedLightweight[tag.Name] { 105 + // lightweight tags won't have tagger info or message (they'll have empty values) 106 + } else { 107 + s.T().Errorf("unexpected tag name: %s", tag.Name) 108 + } 109 + } 110 + } 111 + 112 + func (s *TagSuite) TestTags_WithLimit() { 113 + s.setupRepoWithTags() 114 + 115 + tests := []struct { 116 + name string 117 + limit int 118 + expectedCount int 119 + }{ 120 + { 121 + name: "limit 1", 122 + limit: 1, 123 + expectedCount: 1, 124 + }, 125 + { 126 + name: "limit 2", 127 + limit: 2, 128 + expectedCount: 2, 129 + }, 130 + { 131 + name: "limit 3", 132 + limit: 3, 133 + expectedCount: 3, 134 + }, 135 + { 136 + name: "limit 10 (more than available)", 137 + limit: 10, 138 + expectedCount: 5, 139 + }, 140 + } 141 + 142 + for _, tt := range tests { 143 + s.Run(tt.name, func() { 144 + tags, err := s.repo.Tags(&TagsOptions{ 145 + Limit: tt.limit, 146 + }) 147 + require.NoError(s.T(), err) 148 + assert.Len(s.T(), tags, tt.expectedCount, "expected %d tags", tt.expectedCount) 149 + }) 150 + } 151 + } 152 + 153 + func (s *TagSuite) TestTags_WithOffset() { 154 + s.setupRepoWithTags() 155 + 156 + tests := []struct { 157 + name string 158 + offset int 159 + expectedCount int 160 + }{ 161 + { 162 + name: "offset 0", 163 + offset: 0, 164 + expectedCount: 5, 165 + }, 166 + { 167 + name: "offset 1", 168 + offset: 1, 169 + expectedCount: 4, 170 + }, 171 + { 172 + name: "offset 2", 173 + offset: 2, 174 + expectedCount: 3, 175 + }, 176 + { 177 + name: "offset 4", 178 + offset: 4, 179 + expectedCount: 1, 180 + }, 181 + { 182 + name: "offset 5 (all skipped)", 183 + offset: 5, 184 + expectedCount: 0, 185 + }, 186 + { 187 + name: "offset 10 (more than available)", 188 + offset: 10, 189 + expectedCount: 0, 190 + }, 191 + } 192 + 193 + for _, tt := range tests { 194 + s.Run(tt.name, func() { 195 + tags, err := s.repo.Tags(&TagsOptions{ 196 + Offset: tt.offset, 197 + }) 198 + require.NoError(s.T(), err) 199 + assert.Len(s.T(), tags, tt.expectedCount, "expected %d tags", tt.expectedCount) 200 + }) 201 + } 202 + } 203 + 204 + func (s *TagSuite) TestTags_WithLimitAndOffset() { 205 + s.setupRepoWithTags() 206 + 207 + tests := []struct { 208 + name string 209 + limit int 210 + offset int 211 + expectedCount int 212 + }{ 213 + { 214 + name: "limit 2, offset 0", 215 + limit: 2, 216 + offset: 0, 217 + expectedCount: 2, 218 + }, 219 + { 220 + name: "limit 2, offset 1", 221 + limit: 2, 222 + offset: 1, 223 + expectedCount: 2, 224 + }, 225 + { 226 + name: "limit 2, offset 3", 227 + limit: 2, 228 + offset: 3, 229 + expectedCount: 2, 230 + }, 231 + { 232 + name: "limit 2, offset 4", 233 + limit: 2, 234 + offset: 4, 235 + expectedCount: 1, 236 + }, 237 + { 238 + name: "limit 3, offset 2", 239 + limit: 3, 240 + offset: 2, 241 + expectedCount: 3, 242 + }, 243 + { 244 + name: "limit 10, offset 3", 245 + limit: 10, 246 + offset: 3, 247 + expectedCount: 2, 248 + }, 249 + } 250 + 251 + for _, tt := range tests { 252 + s.Run(tt.name, func() { 253 + tags, err := s.repo.Tags(&TagsOptions{ 254 + Limit: tt.limit, 255 + Offset: tt.offset, 256 + }) 257 + require.NoError(s.T(), err) 258 + assert.Len(s.T(), tags, tt.expectedCount, "expected %d tags", tt.expectedCount) 259 + }) 260 + } 261 + } 262 + 263 + func (s *TagSuite) TestTags_EmptyRepo() { 264 + repoPath := filepath.Join(s.tempDir, "empty-repo") 265 + 266 + _, err := gogit.PlainInit(repoPath, false) 267 + require.NoError(s.T(), err) 268 + 269 + gitRepo, err := PlainOpen(repoPath) 270 + require.NoError(s.T(), err) 271 + 272 + tags, err := gitRepo.Tags(nil) 273 + require.NoError(s.T(), err) 274 + 275 + if tags != nil { 276 + assert.Empty(s.T(), tags, "expected no tags in empty repo") 277 + } 278 + } 279 + 280 + func (s *TagSuite) TestTags_Pagination() { 281 + s.setupRepoWithTags() 282 + 283 + allTags, err := s.repo.Tags(nil) 284 + require.NoError(s.T(), err) 285 + assert.Len(s.T(), allTags, 5, "expected 5 tags") 286 + 287 + pageSize := 2 288 + var paginatedTags []object.Tag 289 + 290 + for offset := 0; offset < len(allTags); offset += pageSize { 291 + tags, err := s.repo.Tags(&TagsOptions{ 292 + Limit: pageSize, 293 + Offset: offset, 294 + }) 295 + require.NoError(s.T(), err) 296 + paginatedTags = append(paginatedTags, tags...) 297 + } 298 + 299 + assert.Len(s.T(), paginatedTags, len(allTags), "pagination should return all tags") 300 + 301 + for i := range allTags { 302 + assert.Equal(s.T(), allTags[i].Name, paginatedTags[i].Name, 303 + "tag at index %d differs", i) 304 + } 305 + } 306 + 307 + func (s *TagSuite) TestTags_VerifyAnnotatedTagFields() { 308 + s.setupRepoWithTags() 309 + 310 + tags, err := s.repo.Tags(nil) 311 + require.NoError(s.T(), err) 312 + 313 + var v1Tag *object.Tag 314 + for i := range tags { 315 + if tags[i].Name == "v1.0.0" { 316 + v1Tag = &tags[i] 317 + break 318 + } 319 + } 320 + 321 + require.NotNil(s.T(), v1Tag, "v1.0.0 tag not found") 322 + 323 + assert.Equal(s.T(), "Tagger One", v1Tag.Tagger.Name, "tagger name should match") 324 + assert.Equal(s.T(), "tagger1@example.com", v1Tag.Tagger.Email, "tagger email should match") 325 + 326 + assert.Equal(s.T(), "Release version 1.0.0\n\nThis is the first stable release.", 327 + v1Tag.Message, "tag message should match") 328 + 329 + assert.Equal(s.T(), plumbing.TagObject, v1Tag.TargetType, 330 + "target type should be CommitObject") 331 + 332 + assert.False(s.T(), v1Tag.Hash.IsZero(), "tag hash should be set") 333 + 334 + assert.False(s.T(), v1Tag.Target.IsZero(), "target hash should be set") 335 + } 336 + 337 + func (s *TagSuite) TestTags_NilOptions() { 338 + s.setupRepoWithTags() 339 + 340 + tags, err := s.repo.Tags(nil) 341 + require.NoError(s.T(), err) 342 + assert.Len(s.T(), tags, 5, "nil options should return all tags") 343 + } 344 + 345 + func (s *TagSuite) TestTags_ZeroLimitAndOffset() { 346 + s.setupRepoWithTags() 347 + 348 + tags, err := s.repo.Tags(&TagsOptions{ 349 + Limit: 0, 350 + Offset: 0, 351 + }) 352 + require.NoError(s.T(), err) 353 + assert.Len(s.T(), tags, 5, "zero limit should return all tags") 354 + } 355 + 356 + func (s *TagSuite) TestTags_Pattern() { 357 + s.setupRepoWithTags() 358 + 359 + v1tag, err := s.repo.Tags(&TagsOptions{ 360 + Pattern: "refs/tags/v1.0.0", 361 + }) 362 + 363 + require.NoError(s.T(), err) 364 + assert.Len(s.T(), v1tag, 1, "expected 1 tag") 365 + }
+141
knotserver/git/test_common.go
···
··· 1 + package git 2 + 3 + import ( 4 + "os" 5 + "path/filepath" 6 + "testing" 7 + "time" 8 + 9 + gogit "github.com/go-git/go-git/v5" 10 + "github.com/go-git/go-git/v5/plumbing" 11 + "github.com/go-git/go-git/v5/plumbing/object" 12 + "github.com/stretchr/testify/require" 13 + ) 14 + 15 + type RepoSuite struct { 16 + t *testing.T 17 + tempDir string 18 + repo *GitRepo 19 + baseTime time.Time 20 + } 21 + 22 + func NewRepoSuite(t *testing.T) *RepoSuite { 23 + tempDir, err := os.MkdirTemp("", "git-test-*") 24 + require.NoError(t, err) 25 + 26 + return &RepoSuite{ 27 + t: t, 28 + tempDir: tempDir, 29 + baseTime: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), 30 + } 31 + } 32 + 33 + func (h *RepoSuite) cleanup() { 34 + if h.tempDir != "" { 35 + os.RemoveAll(h.tempDir) 36 + } 37 + } 38 + 39 + func (h *RepoSuite) init() *GitRepo { 40 + repoPath := filepath.Join(h.tempDir, "test-repo") 41 + 42 + // initialize repository 43 + r, err := gogit.PlainInit(repoPath, false) 44 + require.NoError(h.t, err) 45 + 46 + // configure git user 47 + cfg, err := r.Config() 48 + require.NoError(h.t, err) 49 + cfg.User.Name = "Test User" 50 + cfg.User.Email = "test@example.com" 51 + err = r.SetConfig(cfg) 52 + require.NoError(h.t, err) 53 + 54 + // create initial commit with a file 55 + w, err := r.Worktree() 56 + require.NoError(h.t, err) 57 + 58 + // create initial file 59 + initialFile := filepath.Join(repoPath, "README.md") 60 + err = os.WriteFile(initialFile, []byte("# Test Repository\n\nInitial content.\n"), 0644) 61 + require.NoError(h.t, err) 62 + 63 + _, err = w.Add("README.md") 64 + require.NoError(h.t, err) 65 + 66 + _, err = w.Commit("Initial commit", &gogit.CommitOptions{ 67 + Author: &object.Signature{ 68 + Name: "Test User", 69 + Email: "test@example.com", 70 + When: h.baseTime, 71 + }, 72 + }) 73 + require.NoError(h.t, err) 74 + 75 + gitRepo, err := PlainOpen(repoPath) 76 + require.NoError(h.t, err) 77 + 78 + h.repo = gitRepo 79 + return gitRepo 80 + } 81 + 82 + func (h *RepoSuite) commitFile(filename, content, message string) plumbing.Hash { 83 + filePath := filepath.Join(h.repo.path, filename) 84 + dir := filepath.Dir(filePath) 85 + 86 + err := os.MkdirAll(dir, 0755) 87 + require.NoError(h.t, err) 88 + 89 + err = os.WriteFile(filePath, []byte(content), 0644) 90 + require.NoError(h.t, err) 91 + 92 + w, err := h.repo.r.Worktree() 93 + require.NoError(h.t, err) 94 + 95 + _, err = w.Add(filename) 96 + require.NoError(h.t, err) 97 + 98 + hash, err := w.Commit(message, &gogit.CommitOptions{ 99 + Author: &object.Signature{ 100 + Name: "Test User", 101 + Email: "test@example.com", 102 + }, 103 + }) 104 + require.NoError(h.t, err) 105 + 106 + return hash 107 + } 108 + 109 + func (h *RepoSuite) createAnnotatedTag(name string, commit plumbing.Hash, taggerName, taggerEmail, message string, when time.Time) { 110 + _, err := h.repo.r.CreateTag(name, commit, &gogit.CreateTagOptions{ 111 + Tagger: &object.Signature{ 112 + Name: taggerName, 113 + Email: taggerEmail, 114 + When: when, 115 + }, 116 + Message: message, 117 + }) 118 + require.NoError(h.t, err) 119 + } 120 + 121 + func (h *RepoSuite) createLightweightTag(name string, commit plumbing.Hash) { 122 + ref := plumbing.NewReferenceFromStrings("refs/tags/"+name, commit.String()) 123 + err := h.repo.r.Storer.SetReference(ref) 124 + require.NoError(h.t, err) 125 + } 126 + 127 + func (h *RepoSuite) createBranch(name string, commit plumbing.Hash) { 128 + ref := plumbing.NewReferenceFromStrings("refs/heads/"+name, commit.String()) 129 + err := h.repo.r.Storer.SetReference(ref) 130 + require.NoError(h.t, err) 131 + } 132 + 133 + func (h *RepoSuite) checkoutBranch(name string) { 134 + w, err := h.repo.r.Worktree() 135 + require.NoError(h.t, err) 136 + 137 + err = w.Checkout(&gogit.CheckoutOptions{ 138 + Branch: plumbing.NewBranchReferenceName(name), 139 + }) 140 + require.NoError(h.t, err) 141 + }
+11 -1
knotserver/git/tree.go
··· 48 func (g *GitRepo) makeNiceTree(ctx context.Context, subtree *object.Tree, parent string) []types.NiceTree { 49 nts := []types.NiceTree{} 50 51 - times, err := g.calculateCommitTimeIn(ctx, subtree, parent, 2*time.Second) 52 if err != nil { 53 return nts 54 }
··· 48 func (g *GitRepo) makeNiceTree(ctx context.Context, subtree *object.Tree, parent string) []types.NiceTree { 49 nts := []types.NiceTree{} 50 51 + entries := make([]string, len(subtree.Entries)) 52 + for _, e := range subtree.Entries { 53 + entries = append(entries, e.Name) 54 + } 55 + 56 + lastCommitDir := lastCommitDir{ 57 + dir: parent, 58 + entries: entries, 59 + } 60 + 61 + times, err := g.lastCommitDirIn(ctx, lastCommitDir, 2*time.Second) 62 if err != nil { 63 return nts 64 }
+25
knotserver/router.go
··· 5 "fmt" 6 "log/slog" 7 "net/http" 8 9 "github.com/go-chi/chi/v5" 10 "tangled.org/core/idresolver" ··· 79 }) 80 81 r.Route("/{did}", func(r chi.Router) { 82 r.Route("/{name}", func(r chi.Router) { 83 // routes for git operations 84 r.Get("/info/refs", h.InfoRefs) ··· 114 } 115 116 return xrpc.Router() 117 } 118 119 func (h *Knot) configureOwner() error {
··· 5 "fmt" 6 "log/slog" 7 "net/http" 8 + "strings" 9 10 "github.com/go-chi/chi/v5" 11 "tangled.org/core/idresolver" ··· 80 }) 81 82 r.Route("/{did}", func(r chi.Router) { 83 + r.Use(h.resolveDidRedirect) 84 r.Route("/{name}", func(r chi.Router) { 85 // routes for git operations 86 r.Get("/info/refs", h.InfoRefs) ··· 116 } 117 118 return xrpc.Router() 119 + } 120 + 121 + func (h *Knot) resolveDidRedirect(next http.Handler) http.Handler { 122 + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 123 + didOrHandle := chi.URLParam(r, "did") 124 + if strings.HasPrefix(didOrHandle, "did:") { 125 + next.ServeHTTP(w, r) 126 + return 127 + } 128 + 129 + trimmed := strings.TrimPrefix(didOrHandle, "@") 130 + id, err := h.resolver.ResolveIdent(r.Context(), trimmed) 131 + if err != nil { 132 + // invalid did or handle 133 + h.l.Error("failed to resolve did/handle", "handle", trimmed, "err", err) 134 + http.Error(w, fmt.Sprintf("failed to resolve did/handle: %s", trimmed), http.StatusInternalServerError) 135 + return 136 + } 137 + 138 + suffix := strings.TrimPrefix(r.URL.Path, "/"+didOrHandle) 139 + newPath := fmt.Sprintf("/%s/%s?%s", id.DID.String(), suffix, r.URL.RawQuery) 140 + http.Redirect(w, r, newPath, http.StatusTemporaryRedirect) 141 + }) 142 } 143 144 func (h *Knot) configureOwner() error {
+7 -1
knotserver/xrpc/merge_check.go
··· 9 securejoin "github.com/cyphar/filepath-securejoin" 10 "tangled.org/core/api/tangled" 11 "tangled.org/core/knotserver/git" 12 xrpcerr "tangled.org/core/xrpc/errors" 13 ) 14 ··· 51 return 52 } 53 54 - err = gr.MergeCheck(data.Patch, data.Branch) 55 56 response := tangled.RepoMergeCheck_Output{ 57 Is_conflicted: false,
··· 9 securejoin "github.com/cyphar/filepath-securejoin" 10 "tangled.org/core/api/tangled" 11 "tangled.org/core/knotserver/git" 12 + "tangled.org/core/patchutil" 13 xrpcerr "tangled.org/core/xrpc/errors" 14 ) 15 ··· 52 return 53 } 54 55 + mo := git.MergeOptions{} 56 + mo.CommitterName = x.Config.Git.UserName 57 + mo.CommitterEmail = x.Config.Git.UserEmail 58 + mo.FormatPatch = patchutil.IsFormatPatch(data.Patch) 59 + 60 + err = gr.MergeCheckWithOptions(data.Patch, data.Branch, mo) 61 62 response := tangled.RepoMergeCheck_Output{ 63 Is_conflicted: false,
+23
knotserver/xrpc/repo_blob.go
··· 1 package xrpc 2 3 import ( 4 "crypto/sha256" 5 "encoding/base64" 6 "fmt" ··· 8 "path/filepath" 9 "slices" 10 "strings" 11 12 "tangled.org/core/api/tangled" 13 "tangled.org/core/knotserver/git" ··· 140 141 if mimeType != "" { 142 response.MimeType = &mimeType 143 } 144 145 writeJson(w, response)
··· 1 package xrpc 2 3 import ( 4 + "context" 5 "crypto/sha256" 6 "encoding/base64" 7 "fmt" ··· 9 "path/filepath" 10 "slices" 11 "strings" 12 + "time" 13 14 "tangled.org/core/api/tangled" 15 "tangled.org/core/knotserver/git" ··· 142 143 if mimeType != "" { 144 response.MimeType = &mimeType 145 + } 146 + 147 + ctx, cancel := context.WithTimeout(r.Context(), 2*time.Second) 148 + defer cancel() 149 + 150 + lastCommit, err := gr.LastCommitFile(ctx, treePath) 151 + if err == nil && lastCommit != nil { 152 + response.LastCommit = &tangled.RepoBlob_LastCommit{ 153 + Hash: lastCommit.Hash.String(), 154 + Message: lastCommit.Message, 155 + When: lastCommit.When.Format(time.RFC3339), 156 + } 157 + 158 + // try to get author information 159 + commit, err := gr.Commit(lastCommit.Hash) 160 + if err == nil { 161 + response.LastCommit.Author = &tangled.RepoBlob_Signature{ 162 + Name: commit.Author.Name, 163 + Email: commit.Author.Email, 164 + } 165 + } 166 } 167 168 writeJson(w, response)
+14 -21
knotserver/xrpc/repo_branches.go
··· 17 return 18 } 19 20 - cursor := r.URL.Query().Get("cursor") 21 22 - // limit := 50 // default 23 - // if limitStr := r.URL.Query().Get("limit"); limitStr != "" { 24 - // if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 100 { 25 - // limit = l 26 - // } 27 - // } 28 29 - limit := 500 30 31 gr, err := git.PlainOpen(repoPath) 32 if err != nil { ··· 34 return 35 } 36 37 - branches, _ := gr.Branches() 38 - 39 - offset := 0 40 - if cursor != "" { 41 - if o, err := strconv.Atoi(cursor); err == nil && o >= 0 && o < len(branches) { 42 - offset = o 43 - } 44 - } 45 - 46 - end := min(offset+limit, len(branches)) 47 - 48 - paginatedBranches := branches[offset:end] 49 50 // Create response using existing types.RepoBranchesResponse 51 response := types.RepoBranchesResponse{ 52 - Branches: paginatedBranches, 53 } 54 55 writeJson(w, response)
··· 17 return 18 } 19 20 + // default 21 + limit := 50 22 + offset := 0 23 24 + if l, err := strconv.Atoi(r.URL.Query().Get("limit")); err == nil && l > 0 && l <= 100 { 25 + limit = l 26 + } 27 28 + if o, err := strconv.Atoi(r.URL.Query().Get("cursor")); err == nil && o > 0 { 29 + offset = o 30 + } 31 32 gr, err := git.PlainOpen(repoPath) 33 if err != nil { ··· 35 return 36 } 37 38 + branches, _ := gr.Branches(&git.BranchesOptions{ 39 + Limit: limit, 40 + Offset: offset, 41 + }) 42 43 // Create response using existing types.RepoBranchesResponse 44 response := types.RepoBranchesResponse{ 45 + Branches: branches, 46 } 47 48 writeJson(w, response)
+85
knotserver/xrpc/repo_tag.go
···
··· 1 + package xrpc 2 + 3 + import ( 4 + "fmt" 5 + "net/http" 6 + 7 + "github.com/go-git/go-git/v5/plumbing" 8 + "github.com/go-git/go-git/v5/plumbing/object" 9 + 10 + "tangled.org/core/knotserver/git" 11 + "tangled.org/core/types" 12 + xrpcerr "tangled.org/core/xrpc/errors" 13 + ) 14 + 15 + func (x *Xrpc) RepoTag(w http.ResponseWriter, r *http.Request) { 16 + repo := r.URL.Query().Get("repo") 17 + repoPath, err := x.parseRepoParam(repo) 18 + if err != nil { 19 + writeError(w, err.(xrpcerr.XrpcError), http.StatusBadRequest) 20 + return 21 + } 22 + 23 + tagName := r.URL.Query().Get("tag") 24 + if tagName == "" { 25 + writeError(w, xrpcerr.NewXrpcError( 26 + xrpcerr.WithTag("InvalidRequest"), 27 + xrpcerr.WithMessage("missing name parameter"), 28 + ), http.StatusBadRequest) 29 + return 30 + } 31 + 32 + gr, err := git.PlainOpen(repoPath) 33 + if err != nil { 34 + x.Logger.Error("failed to open", "error", err) 35 + writeError(w, xrpcerr.RepoNotFoundError, http.StatusNoContent) 36 + return 37 + } 38 + 39 + // if this is not already formatted as refs/tags/v0.1.0, then format it 40 + if !plumbing.ReferenceName(tagName).IsTag() { 41 + tagName = plumbing.NewTagReferenceName(tagName).String() 42 + } 43 + 44 + tags, err := gr.Tags(&git.TagsOptions{ 45 + Pattern: tagName, 46 + }) 47 + 48 + if len(tags) != 1 { 49 + writeError(w, xrpcerr.NewXrpcError( 50 + xrpcerr.WithTag("TagNotFound"), 51 + xrpcerr.WithMessage(fmt.Sprintf("expected 1 tag to be returned, got %d tags", len(tags))), 52 + ), http.StatusBadRequest) 53 + return 54 + } 55 + 56 + tag := tags[0] 57 + 58 + if err != nil { 59 + x.Logger.Warn("getting tags", "error", err.Error()) 60 + tags = []object.Tag{} 61 + } 62 + 63 + var target *object.Tag 64 + if tag.Target != plumbing.ZeroHash { 65 + target = &tag 66 + } 67 + tr := types.TagReference{ 68 + Tag: target, 69 + } 70 + 71 + tr.Reference = types.Reference{ 72 + Name: tag.Name, 73 + Hash: tag.Hash.String(), 74 + } 75 + 76 + if tag.Message != "" { 77 + tr.Message = tag.Message 78 + } 79 + 80 + response := types.RepoTagResponse{ 81 + Tag: &tr, 82 + } 83 + 84 + writeJson(w, response) 85 + }
+15 -22
knotserver/xrpc/repo_tags.go
··· 20 return 21 } 22 23 - cursor := r.URL.Query().Get("cursor") 24 25 - limit := 50 // default 26 - if limitStr := r.URL.Query().Get("limit"); limitStr != "" { 27 - if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 100 { 28 - limit = l 29 - } 30 } 31 32 gr, err := git.PlainOpen(repoPath) ··· 36 return 37 } 38 39 - tags, err := gr.Tags() 40 if err != nil { 41 x.Logger.Warn("getting tags", "error", err.Error()) 42 tags = []object.Tag{} ··· 64 rtags = append(rtags, &tr) 65 } 66 67 - // apply pagination manually 68 - offset := 0 69 - if cursor != "" { 70 - if o, err := strconv.Atoi(cursor); err == nil && o >= 0 && o < len(rtags) { 71 - offset = o 72 - } 73 - } 74 - 75 - // calculate end index 76 - end := min(offset+limit, len(rtags)) 77 - 78 - paginatedTags := rtags[offset:end] 79 - 80 - // Create response using existing types.RepoTagsResponse 81 response := types.RepoTagsResponse{ 82 - Tags: paginatedTags, 83 } 84 85 writeJson(w, response)
··· 20 return 21 } 22 23 + // default 24 + limit := 50 25 + offset := 0 26 27 + if l, err := strconv.Atoi(r.URL.Query().Get("limit")); err == nil && l > 0 && l <= 100 { 28 + limit = l 29 + } 30 + 31 + if o, err := strconv.Atoi(r.URL.Query().Get("cursor")); err == nil && o > 0 { 32 + offset = o 33 } 34 35 gr, err := git.PlainOpen(repoPath) ··· 39 return 40 } 41 42 + tags, err := gr.Tags(&git.TagsOptions{ 43 + Limit: limit, 44 + Offset: offset, 45 + }) 46 + 47 if err != nil { 48 x.Logger.Warn("getting tags", "error", err.Error()) 49 tags = []object.Tag{} ··· 71 rtags = append(rtags, &tr) 72 } 73 74 response := types.RepoTagsResponse{ 75 + Tags: rtags, 76 } 77 78 writeJson(w, response)
+35
knotserver/xrpc/repo_tree.go
··· 9 "tangled.org/core/api/tangled" 10 "tangled.org/core/appview/pages/markup" 11 "tangled.org/core/knotserver/git" 12 xrpcerr "tangled.org/core/xrpc/errors" 13 ) 14 ··· 105 Filename: readmeFileName, 106 Contents: readmeContents, 107 }, 108 } 109 110 writeJson(w, response)
··· 9 "tangled.org/core/api/tangled" 10 "tangled.org/core/appview/pages/markup" 11 "tangled.org/core/knotserver/git" 12 + "tangled.org/core/types" 13 xrpcerr "tangled.org/core/xrpc/errors" 14 ) 15 ··· 106 Filename: readmeFileName, 107 Contents: readmeContents, 108 }, 109 + } 110 + 111 + // calculate lastCommit for the directory as a whole 112 + var lastCommitTree *types.LastCommitInfo 113 + for _, e := range files { 114 + if e.LastCommit == nil { 115 + continue 116 + } 117 + 118 + if lastCommitTree == nil { 119 + lastCommitTree = e.LastCommit 120 + continue 121 + } 122 + 123 + if lastCommitTree.When.After(e.LastCommit.When) { 124 + lastCommitTree = e.LastCommit 125 + } 126 + } 127 + 128 + if lastCommitTree != nil { 129 + response.LastCommit = &tangled.RepoTree_LastCommit{ 130 + Hash: lastCommitTree.Hash.String(), 131 + Message: lastCommitTree.Message, 132 + When: lastCommitTree.When.Format(time.RFC3339), 133 + } 134 + 135 + // try to get author information 136 + commit, err := gr.Commit(lastCommitTree.Hash) 137 + if err == nil { 138 + response.LastCommit.Author = &tangled.RepoTree_Signature{ 139 + Name: commit.Author.Name, 140 + Email: commit.Author.Email, 141 + } 142 + } 143 } 144 145 writeJson(w, response)
+1
knotserver/xrpc/xrpc.go
··· 59 r.Get("/"+tangled.RepoLogNSID, x.RepoLog) 60 r.Get("/"+tangled.RepoBranchesNSID, x.RepoBranches) 61 r.Get("/"+tangled.RepoTagsNSID, x.RepoTags) 62 r.Get("/"+tangled.RepoBlobNSID, x.RepoBlob) 63 r.Get("/"+tangled.RepoDiffNSID, x.RepoDiff) 64 r.Get("/"+tangled.RepoCompareNSID, x.RepoCompare)
··· 59 r.Get("/"+tangled.RepoLogNSID, x.RepoLog) 60 r.Get("/"+tangled.RepoBranchesNSID, x.RepoBranches) 61 r.Get("/"+tangled.RepoTagsNSID, x.RepoTags) 62 + r.Get("/"+tangled.RepoTagNSID, x.RepoTag) 63 r.Get("/"+tangled.RepoBlobNSID, x.RepoBlob) 64 r.Get("/"+tangled.RepoDiffNSID, x.RepoDiff) 65 r.Get("/"+tangled.RepoCompareNSID, x.RepoCompare)
+2 -1
lexicons/actor/profile.json
··· 45 "open-pull-request-count", 46 "open-issue-count", 47 "closed-issue-count", 48 - "repository-count" 49 ] 50 } 51 },
··· 45 "open-pull-request-count", 46 "open-issue-count", 47 "closed-issue-count", 48 + "repository-count", 49 + "star-count" 50 ] 51 } 52 },
-4
lexicons/repo/blob.json
··· 115 "type": "string", 116 "description": "Commit hash" 117 }, 118 - "shortHash": { 119 - "type": "string", 120 - "description": "Short commit hash" 121 - }, 122 "message": { 123 "type": "string", 124 "description": "Commit message"
··· 115 "type": "string", 116 "description": "Commit hash" 117 }, 118 "message": { 119 "type": "string", 120 "description": "Commit message"
+43
lexicons/repo/tag.json
···
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.repo.tag", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": [ 10 + "repo", 11 + "tag" 12 + ], 13 + "properties": { 14 + "repo": { 15 + "type": "string", 16 + "description": "Repository identifier in format 'did:plc:.../repoName'" 17 + }, 18 + "tag": { 19 + "type": "string", 20 + "description": "Name of tag, such as v1.3.0" 21 + } 22 + } 23 + }, 24 + "output": { 25 + "encoding": "*/*" 26 + }, 27 + "errors": [ 28 + { 29 + "name": "RepoNotFound", 30 + "description": "Repository not found or access denied" 31 + }, 32 + { 33 + "name": "TagNotFound", 34 + "description": "Tag not found" 35 + }, 36 + { 37 + "name": "InvalidRequest", 38 + "description": "Invalid request parameters" 39 + } 40 + ] 41 + } 42 + } 43 + }
+53 -5
lexicons/repo/tree.json
··· 6 "type": "query", 7 "parameters": { 8 "type": "params", 9 - "required": ["repo", "ref"], 10 "properties": { 11 "repo": { 12 "type": "string", ··· 27 "encoding": "application/json", 28 "schema": { 29 "type": "object", 30 - "required": ["ref", "files"], 31 "properties": { 32 "ref": { 33 "type": "string", ··· 45 "type": "ref", 46 "ref": "#readme", 47 "description": "Readme for this file tree" 48 }, 49 "files": { 50 "type": "array", ··· 77 }, 78 "readme": { 79 "type": "object", 80 - "required": ["filename", "contents"], 81 "properties": { 82 "filename": { 83 "type": "string", ··· 91 }, 92 "treeEntry": { 93 "type": "object", 94 - "required": ["name", "mode", "size"], 95 "properties": { 96 "name": { 97 "type": "string", ··· 113 }, 114 "lastCommit": { 115 "type": "object", 116 - "required": ["hash", "message", "when"], 117 "properties": { 118 "hash": { 119 "type": "string", ··· 123 "type": "string", 124 "description": "Commit message" 125 }, 126 "when": { 127 "type": "string", 128 "format": "datetime", 129 "description": "Commit timestamp" 130 } 131 } 132 }
··· 6 "type": "query", 7 "parameters": { 8 "type": "params", 9 + "required": [ 10 + "repo", 11 + "ref" 12 + ], 13 "properties": { 14 "repo": { 15 "type": "string", ··· 30 "encoding": "application/json", 31 "schema": { 32 "type": "object", 33 + "required": [ 34 + "ref", 35 + "files" 36 + ], 37 "properties": { 38 "ref": { 39 "type": "string", ··· 51 "type": "ref", 52 "ref": "#readme", 53 "description": "Readme for this file tree" 54 + }, 55 + "lastCommit": { 56 + "type": "ref", 57 + "ref": "#lastCommit" 58 }, 59 "files": { 60 "type": "array", ··· 87 }, 88 "readme": { 89 "type": "object", 90 + "required": [ 91 + "filename", 92 + "contents" 93 + ], 94 "properties": { 95 "filename": { 96 "type": "string", ··· 104 }, 105 "treeEntry": { 106 "type": "object", 107 + "required": [ 108 + "name", 109 + "mode", 110 + "size" 111 + ], 112 "properties": { 113 "name": { 114 "type": "string", ··· 130 }, 131 "lastCommit": { 132 "type": "object", 133 + "required": [ 134 + "hash", 135 + "message", 136 + "when" 137 + ], 138 "properties": { 139 "hash": { 140 "type": "string", ··· 144 "type": "string", 145 "description": "Commit message" 146 }, 147 + "author": { 148 + "type": "ref", 149 + "ref": "#signature" 150 + }, 151 "when": { 152 "type": "string", 153 "format": "datetime", 154 "description": "Commit timestamp" 155 + } 156 + } 157 + }, 158 + "signature": { 159 + "type": "object", 160 + "required": [ 161 + "name", 162 + "email", 163 + "when" 164 + ], 165 + "properties": { 166 + "name": { 167 + "type": "string", 168 + "description": "Author name" 169 + }, 170 + "email": { 171 + "type": "string", 172 + "description": "Author email" 173 + }, 174 + "when": { 175 + "type": "string", 176 + "format": "datetime", 177 + "description": "Author timestamp" 178 } 179 } 180 }
+16 -14
spindle/engine/engine.go
··· 30 } 31 } 32 33 var wg sync.WaitGroup 34 for eng, wfs := range pipeline.Workflows { 35 workflowTimeout := eng.WorkflowTimeout() ··· 45 Name: w.Name, 46 } 47 48 - err := db.StatusRunning(wid, n) 49 if err != nil { 50 l.Error("failed to set workflow status to running", "wid", wid, "err", err) 51 return 52 } 53 54 - err = eng.SetupWorkflow(ctx, wid, &w) 55 if err != nil { 56 // TODO(winter): Should this always set StatusFailed? 57 // In the original, we only do in a subset of cases. ··· 69 return 70 } 71 defer eng.DestroyWorkflow(ctx, wid) 72 - 73 - secretValues := make([]string, len(allSecrets)) 74 - for i, s := range allSecrets { 75 - secretValues[i] = s.Value 76 - } 77 - wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid, secretValues) 78 - if err != nil { 79 - l.Warn("failed to setup step logger; logs will not be persisted", "error", err) 80 - wfLogger = nil 81 - } else { 82 - defer wfLogger.Close() 83 - } 84 85 ctx, cancel := context.WithTimeout(ctx, workflowTimeout) 86 defer cancel()
··· 30 } 31 } 32 33 + secretValues := make([]string, len(allSecrets)) 34 + for i, s := range allSecrets { 35 + secretValues[i] = s.Value 36 + } 37 + 38 var wg sync.WaitGroup 39 for eng, wfs := range pipeline.Workflows { 40 workflowTimeout := eng.WorkflowTimeout() ··· 50 Name: w.Name, 51 } 52 53 + wfLogger, err := models.NewFileWorkflowLogger(cfg.Server.LogDir, wid, secretValues) 54 + if err != nil { 55 + l.Warn("failed to setup step logger; logs will not be persisted", "error", err) 56 + wfLogger = models.NullLogger{} 57 + } else { 58 + l.Info("setup step logger; logs will be persisted", "logDir", cfg.Server.LogDir, "wid", wid) 59 + defer wfLogger.Close() 60 + } 61 + 62 + err = db.StatusRunning(wid, n) 63 if err != nil { 64 l.Error("failed to set workflow status to running", "wid", wid, "err", err) 65 return 66 } 67 68 + err = eng.SetupWorkflow(ctx, wid, &w, wfLogger) 69 if err != nil { 70 // TODO(winter): Should this always set StatusFailed? 71 // In the original, we only do in a subset of cases. ··· 83 return 84 } 85 defer eng.DestroyWorkflow(ctx, wid) 86 87 ctx, cancel := context.WithTimeout(ctx, workflowTimeout) 88 defer cancel()
+52 -9
spindle/engines/nixery/engine.go
··· 1 package nixery 2 3 import ( 4 "context" 5 "errors" 6 "fmt" 7 "io" 8 "log/slog" 9 - "os" 10 "path" 11 "runtime" 12 "sync" ··· 169 return e, nil 170 } 171 172 - func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId, wf *models.Workflow) error { 173 - e.l.Info("setting up workflow", "workflow", wid) 174 175 _, err := e.docker.NetworkCreate(ctx, networkName(wid), network.CreateOptions{ 176 Driver: "bridge", 177 }) 178 if err != nil { 179 return err 180 } 181 e.registerCleanup(wid, func(ctx context.Context) error { 182 if err := e.docker.NetworkRemove(ctx, networkName(wid)); err != nil { 183 return fmt.Errorf("removing network: %w", err) ··· 185 return nil 186 }) 187 188 addl := wf.Data.(addlFields) 189 190 reader, err := e.docker.ImagePull(ctx, addl.image, image.PullOptions{}) 191 if err != nil { 192 - e.l.Error("pipeline image pull failed!", "image", addl.image, "workflowId", wid, "error", err.Error()) 193 - 194 return fmt.Errorf("pulling image: %w", err) 195 } 196 defer reader.Close() 197 - io.Copy(os.Stdout, reader) 198 199 resp, err := e.docker.ContainerCreate(ctx, &container.Config{ 200 Image: addl.image, ··· 229 ExtraHosts: []string{"host.docker.internal:host-gateway"}, 230 }, nil, nil, "") 231 if err != nil { 232 return fmt.Errorf("creating container: %w", err) 233 } 234 e.registerCleanup(wid, func(ctx context.Context) error { 235 if err := e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{}); err != nil { 236 return fmt.Errorf("stopping container: %w", err) ··· 244 if err != nil { 245 return fmt.Errorf("removing container: %w", err) 246 } 247 return nil 248 }) 249 250 if err := e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { 251 return fmt.Errorf("starting container: %w", err) 252 } ··· 273 return err 274 } 275 276 execInspectResp, err := e.docker.ContainerExecInspect(ctx, mkExecResp.ID) 277 if err != nil { 278 return err ··· 290 return nil 291 } 292 293 - func (e *Engine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error { 294 addl := w.Data.(addlFields) 295 workflowEnvs := ConstructEnvs(w.Environment) 296 // TODO(winter): should SetupWorkflow also have secret access? ··· 313 envs.AddEnv(k, v) 314 } 315 } 316 envs.AddEnv("HOME", homeDir) 317 318 mkExecResp, err := e.docker.ContainerExecCreate(ctx, addl.container, container.ExecOptions{ 319 Cmd: []string{"bash", "-c", step.Command()}, ··· 328 // start tailing logs in background 329 tailDone := make(chan error, 1) 330 go func() { 331 - tailDone <- e.tailStep(ctx, wfLogger, mkExecResp.ID, wid, idx, step) 332 }() 333 334 select { ··· 374 return nil 375 } 376 377 - func (e *Engine) tailStep(ctx context.Context, wfLogger *models.WorkflowLogger, execID string, wid models.WorkflowId, stepIdx int, step models.Step) error { 378 if wfLogger == nil { 379 return nil 380 }
··· 1 package nixery 2 3 import ( 4 + "bufio" 5 "context" 6 "errors" 7 "fmt" 8 "io" 9 "log/slog" 10 "path" 11 "runtime" 12 "sync" ··· 169 return e, nil 170 } 171 172 + func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId, wf *models.Workflow, wfLogger models.WorkflowLogger) error { 173 + /// -------------------------INITIAL SETUP------------------------------------------ 174 + l := e.l.With("workflow", wid) 175 + l.Info("setting up workflow") 176 + 177 + setupStep := Step{ 178 + name: "nixery image pull", 179 + kind: models.StepKindSystem, 180 + } 181 + setupStepIdx := -1 182 + 183 + wfLogger.ControlWriter(setupStepIdx, setupStep, models.StepStatusStart).Write([]byte{0}) 184 + defer wfLogger.ControlWriter(setupStepIdx, setupStep, models.StepStatusEnd).Write([]byte{0}) 185 186 + /// -------------------------NETWORK CREATION--------------------------------------- 187 _, err := e.docker.NetworkCreate(ctx, networkName(wid), network.CreateOptions{ 188 Driver: "bridge", 189 }) 190 if err != nil { 191 return err 192 } 193 + 194 e.registerCleanup(wid, func(ctx context.Context) error { 195 if err := e.docker.NetworkRemove(ctx, networkName(wid)); err != nil { 196 return fmt.Errorf("removing network: %w", err) ··· 198 return nil 199 }) 200 201 + /// -------------------------IMAGE PULL--------------------------------------------- 202 addl := wf.Data.(addlFields) 203 + l.Info("pulling image", "image", addl.image) 204 + fmt.Fprintf( 205 + wfLogger.DataWriter(setupStepIdx, "stdout"), 206 + "pulling image: %s", 207 + addl.image, 208 + ) 209 210 reader, err := e.docker.ImagePull(ctx, addl.image, image.PullOptions{}) 211 if err != nil { 212 + l.Error("pipeline image pull failed!", "error", err.Error()) 213 + fmt.Fprintf(wfLogger.DataWriter(setupStepIdx, "stderr"), "image pull failed: %s", err) 214 return fmt.Errorf("pulling image: %w", err) 215 } 216 defer reader.Close() 217 + 218 + scanner := bufio.NewScanner(reader) 219 + for scanner.Scan() { 220 + line := scanner.Text() 221 + wfLogger.DataWriter(setupStepIdx, "stdout").Write([]byte(line)) 222 + l.Info("image pull progress", "stdout", line) 223 + } 224 + 225 + /// -------------------------CONTAINER CREATION------------------------------------- 226 + l.Info("creating container") 227 + wfLogger.DataWriter(setupStepIdx, "stdout").Write([]byte("creating container...")) 228 229 resp, err := e.docker.ContainerCreate(ctx, &container.Config{ 230 Image: addl.image, ··· 259 ExtraHosts: []string{"host.docker.internal:host-gateway"}, 260 }, nil, nil, "") 261 if err != nil { 262 + fmt.Fprintf( 263 + wfLogger.DataWriter(setupStepIdx, "stderr"), 264 + "container creation failed: %s", 265 + err, 266 + ) 267 return fmt.Errorf("creating container: %w", err) 268 } 269 + 270 e.registerCleanup(wid, func(ctx context.Context) error { 271 if err := e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{}); err != nil { 272 return fmt.Errorf("stopping container: %w", err) ··· 280 if err != nil { 281 return fmt.Errorf("removing container: %w", err) 282 } 283 + 284 return nil 285 }) 286 287 + /// -------------------------CONTAINER START---------------------------------------- 288 + wfLogger.DataWriter(setupStepIdx, "stdout").Write([]byte("starting container...")) 289 if err := e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { 290 return fmt.Errorf("starting container: %w", err) 291 } ··· 312 return err 313 } 314 315 + /// -----------------------------------FINISH--------------------------------------- 316 execInspectResp, err := e.docker.ContainerExecInspect(ctx, mkExecResp.ID) 317 if err != nil { 318 return err ··· 330 return nil 331 } 332 333 + func (e *Engine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger models.WorkflowLogger) error { 334 addl := w.Data.(addlFields) 335 workflowEnvs := ConstructEnvs(w.Environment) 336 // TODO(winter): should SetupWorkflow also have secret access? ··· 353 envs.AddEnv(k, v) 354 } 355 } 356 + 357 envs.AddEnv("HOME", homeDir) 358 + existingPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" 359 + envs.AddEnv("PATH", fmt.Sprintf("%s/.nix-profile/bin:/nix/var/nix/profiles/default/bin:%s", homeDir, existingPath)) 360 361 mkExecResp, err := e.docker.ContainerExecCreate(ctx, addl.container, container.ExecOptions{ 362 Cmd: []string{"bash", "-c", step.Command()}, ··· 371 // start tailing logs in background 372 tailDone := make(chan error, 1) 373 go func() { 374 + tailDone <- e.tailStep(ctx, wfLogger, mkExecResp.ID, idx) 375 }() 376 377 select { ··· 417 return nil 418 } 419 420 + func (e *Engine) tailStep(ctx context.Context, wfLogger models.WorkflowLogger, execID string, stepIdx int) error { 421 if wfLogger == nil { 422 return nil 423 }
+1 -1
spindle/engines/nixery/setup_steps.go
··· 37 } 38 39 if len(customPackages) > 0 { 40 - installCmd := "nix --extra-experimental-features nix-command --extra-experimental-features flakes profile install" 41 cmd := fmt.Sprintf("%s %s", installCmd, strings.Join(customPackages, " ")) 42 installStep := Step{ 43 command: cmd,
··· 37 } 38 39 if len(customPackages) > 0 { 40 + installCmd := "nix --extra-experimental-features nix-command --extra-experimental-features flakes profile add" 41 cmd := fmt.Sprintf("%s %s", installCmd, strings.Join(customPackages, " ")) 42 installStep := Step{ 43 command: cmd,
+2 -2
spindle/models/engine.go
··· 10 11 type Engine interface { 12 InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*Workflow, error) 13 - SetupWorkflow(ctx context.Context, wid WorkflowId, wf *Workflow) error 14 WorkflowTimeout() time.Duration 15 DestroyWorkflow(ctx context.Context, wid WorkflowId) error 16 - RunStep(ctx context.Context, wid WorkflowId, w *Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *WorkflowLogger) error 17 }
··· 10 11 type Engine interface { 12 InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*Workflow, error) 13 + SetupWorkflow(ctx context.Context, wid WorkflowId, wf *Workflow, wfLogger WorkflowLogger) error 14 WorkflowTimeout() time.Duration 15 DestroyWorkflow(ctx context.Context, wid WorkflowId) error 16 + RunStep(ctx context.Context, wid WorkflowId, w *Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger WorkflowLogger) error 17 }
+22 -10
spindle/models/logger.go
··· 9 "strings" 10 ) 11 12 - type WorkflowLogger struct { 13 file *os.File 14 encoder *json.Encoder 15 mask *SecretMask 16 } 17 18 - func NewWorkflowLogger(baseDir string, wid WorkflowId, secretValues []string) (*WorkflowLogger, error) { 19 path := LogFilePath(baseDir, wid) 20 - 21 file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) 22 if err != nil { 23 return nil, fmt.Errorf("creating log file: %w", err) 24 } 25 - 26 - return &WorkflowLogger{ 27 file: file, 28 encoder: json.NewEncoder(file), 29 mask: NewSecretMask(secretValues), ··· 35 return logFilePath 36 } 37 38 - func (l *WorkflowLogger) Close() error { 39 return l.file.Close() 40 } 41 42 - func (l *WorkflowLogger) DataWriter(idx int, stream string) io.Writer { 43 return &dataWriter{ 44 logger: l, 45 idx: idx, ··· 47 } 48 } 49 50 - func (l *WorkflowLogger) ControlWriter(idx int, step Step, stepStatus StepStatus) io.Writer { 51 return &controlWriter{ 52 logger: l, 53 idx: idx, ··· 57 } 58 59 type dataWriter struct { 60 - logger *WorkflowLogger 61 idx int 62 stream string 63 } ··· 75 } 76 77 type controlWriter struct { 78 - logger *WorkflowLogger 79 idx int 80 step Step 81 stepStatus StepStatus
··· 9 "strings" 10 ) 11 12 + type WorkflowLogger interface { 13 + Close() error 14 + DataWriter(idx int, stream string) io.Writer 15 + ControlWriter(idx int, step Step, stepStatus StepStatus) io.Writer 16 + } 17 + 18 + type NullLogger struct{} 19 + 20 + func (l NullLogger) Close() error { return nil } 21 + func (l NullLogger) DataWriter(idx int, stream string) io.Writer { return io.Discard } 22 + func (l NullLogger) ControlWriter(idx int, step Step, stepStatus StepStatus) io.Writer { 23 + return io.Discard 24 + } 25 + 26 + type FileWorkflowLogger struct { 27 file *os.File 28 encoder *json.Encoder 29 mask *SecretMask 30 } 31 32 + func NewFileWorkflowLogger(baseDir string, wid WorkflowId, secretValues []string) (WorkflowLogger, error) { 33 path := LogFilePath(baseDir, wid) 34 file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) 35 if err != nil { 36 return nil, fmt.Errorf("creating log file: %w", err) 37 } 38 + return &FileWorkflowLogger{ 39 file: file, 40 encoder: json.NewEncoder(file), 41 mask: NewSecretMask(secretValues), ··· 47 return logFilePath 48 } 49 50 + func (l *FileWorkflowLogger) Close() error { 51 return l.file.Close() 52 } 53 54 + func (l *FileWorkflowLogger) DataWriter(idx int, stream string) io.Writer { 55 return &dataWriter{ 56 logger: l, 57 idx: idx, ··· 59 } 60 } 61 62 + func (l *FileWorkflowLogger) ControlWriter(idx int, step Step, stepStatus StepStatus) io.Writer { 63 return &controlWriter{ 64 logger: l, 65 idx: idx, ··· 69 } 70 71 type dataWriter struct { 72 + logger *FileWorkflowLogger 73 idx int 74 stream string 75 } ··· 87 } 88 89 type controlWriter struct { 90 + logger *FileWorkflowLogger 91 idx int 92 step Step 93 stepStatus StepStatus
+4 -10
types/repo.go
··· 94 Tags []*TagReference `json:"tags,omitempty"` 95 } 96 97 type RepoBranchesResponse struct { 98 Branches []Branch `json:"branches,omitempty"` 99 } ··· 104 105 type RepoDefaultBranchResponse struct { 106 Branch string `json:"branch,omitempty"` 107 - } 108 - 109 - type RepoBlobResponse struct { 110 - Contents string `json:"contents,omitempty"` 111 - Ref string `json:"ref,omitempty"` 112 - Path string `json:"path,omitempty"` 113 - IsBinary bool `json:"is_binary,omitempty"` 114 - 115 - Lines int `json:"lines,omitempty"` 116 - SizeHint uint64 `json:"size_hint,omitempty"` 117 } 118 119 type ForkStatus int
··· 94 Tags []*TagReference `json:"tags,omitempty"` 95 } 96 97 + type RepoTagResponse struct { 98 + Tag *TagReference `json:"tag,omitempty"` 99 + } 100 + 101 type RepoBranchesResponse struct { 102 Branches []Branch `json:"branches,omitempty"` 103 } ··· 108 109 type RepoDefaultBranchResponse struct { 110 Branch string `json:"branch,omitempty"` 111 } 112 113 type ForkStatus int
+5
types/tree.go
··· 105 Hash plumbing.Hash 106 Message string 107 When time.Time 108 }
··· 105 Hash plumbing.Hash 106 Message string 107 When time.Time 108 + Author struct { 109 + Email string 110 + Name string 111 + When time.Time 112 + } 113 }