Monorepo for Tangled tangled.org

knotserver|appview: add branch rules #1235

open opened by pyotr.bsky.social targeting master from pyotr.bsky.social/core: adding-branch-rules
Labels

None yet.

assignee

None yet.

Participants 1
AT URI
at://did:plc:uxmy3zztxyhfk6mxrkun5tpr/sh.tangled.repo.pull/3mi5xihwr6c22
+10407 -2130
Interdiff #0 #1
+2
.gitignore
··· 22 22 genjwks.out 23 23 /nix/vm-data 24 24 blog/build/ 25 + build/ 26 + .wrangler/
+4 -4
.tangled/workflows/deploy-blog.yml
··· 16 16 mkdir -p appview/pages/static 17 17 touch appview/pages/static/x 18 18 19 + - name: generate css 20 + command: | 21 + tailwindcss -i input.css -o appview/pages/static/tw.css 22 + 19 23 - name: build blog cmd 20 24 command: | 21 25 go build -o blog.out ./cmd/blog 22 - 23 - - name: generate css 24 - command: | 25 - tailwindcss -i input.css -o appview/pages/static/tw.css 26 26 27 27 - name: build static site 28 28 command: |
+34
api/tangled/knotsubscribeRepos.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.knot.subscribeRepos 6 + 7 + const ( 8 + KnotSubscribeReposNSID = "sh.tangled.knot.subscribeRepos" 9 + ) 10 + 11 + // KnotSubscribeRepos_GitSync1 is a "gitSync1" in the sh.tangled.knot.subscribeRepos schema. 12 + type KnotSubscribeRepos_GitSync1 struct { 13 + // did: Repository DID identifier 14 + Did string `json:"did" cborgen:"did"` 15 + // seq: The stream sequence number of this message. 16 + Seq int64 `json:"seq" cborgen:"seq"` 17 + } 18 + 19 + // KnotSubscribeRepos_GitSync2 is a "gitSync2" in the sh.tangled.knot.subscribeRepos schema. 20 + type KnotSubscribeRepos_GitSync2 struct { 21 + // did: Repository AT-URI identifier 22 + Did *string `json:"did,omitempty" cborgen:"did,omitempty"` 23 + // seq: The stream sequence number of this message. 24 + Seq int64 `json:"seq" cborgen:"seq"` 25 + } 26 + 27 + // KnotSubscribeRepos_Identity is a "identity" in the sh.tangled.knot.subscribeRepos schema. 28 + type KnotSubscribeRepos_Identity struct { 29 + // did: Repository DID identifier 30 + Did string `json:"did" cborgen:"did"` 31 + // seq: The stream sequence number of this message. 32 + Seq int64 `json:"seq" cborgen:"seq"` 33 + Time string `json:"time" cborgen:"time"` 34 + }
api/tangled/repoUpdateBranchRule.go

This file has not been changed.

api/tangled/repocreateBranchRule.go

This file has not been changed.

api/tangled/repodeleteBranchRule.go

This file has not been changed.

api/tangled/repolistBranchRules.go

This file has not been changed.

+32
api/tangled/syncrequestCrawl.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.sync.requestCrawl 6 + 7 + import ( 8 + "context" 9 + 10 + "github.com/bluesky-social/indigo/lex/util" 11 + ) 12 + 13 + const ( 14 + SyncRequestCrawlNSID = "sh.tangled.sync.requestCrawl" 15 + ) 16 + 17 + // SyncRequestCrawl_Input is the input argument to a sh.tangled.sync.requestCrawl call. 18 + type SyncRequestCrawl_Input struct { 19 + // ensureRepo: specific repository to ensure crawling 20 + EnsureRepo *string `json:"ensureRepo,omitempty" cborgen:"ensureRepo,omitempty"` 21 + // hostname: Hostname of the current service (eg, Knot) that is requesting to be crawled. 22 + Hostname string `json:"hostname" cborgen:"hostname"` 23 + } 24 + 25 + // SyncRequestCrawl calls the XRPC method "sh.tangled.sync.requestCrawl". 26 + func SyncRequestCrawl(ctx context.Context, c util.LexClient, input *SyncRequestCrawl_Input) error { 27 + if err := c.LexDo(ctx, util.Procedure, "application/json", "sh.tangled.sync.requestCrawl", nil, input, nil); err != nil { 28 + return err 29 + } 30 + 31 + return nil 32 + }
+50
api/tangled/tempanalyzeMerge.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.analyzeMerge 6 + 7 + import ( 8 + "context" 9 + 10 + "github.com/bluesky-social/indigo/lex/util" 11 + ) 12 + 13 + const ( 14 + GitTempAnalyzeMergeNSID = "sh.tangled.git.temp.analyzeMerge" 15 + ) 16 + 17 + // GitTempAnalyzeMerge_ConflictInfo is a "conflictInfo" in the sh.tangled.git.temp.analyzeMerge schema. 18 + type GitTempAnalyzeMerge_ConflictInfo struct { 19 + // filename: Name of the conflicted file 20 + Filename string `json:"filename" cborgen:"filename"` 21 + // reason: Reason for the conflict 22 + Reason string `json:"reason" cborgen:"reason"` 23 + } 24 + 25 + // GitTempAnalyzeMerge_Output is the output of a sh.tangled.git.temp.analyzeMerge call. 26 + type GitTempAnalyzeMerge_Output struct { 27 + // conflicts: List of files with merge conflicts 28 + Conflicts []*GitTempAnalyzeMerge_ConflictInfo `json:"conflicts,omitempty" cborgen:"conflicts,omitempty"` 29 + // is_conflicted: Whether the merge has conflicts 30 + Is_conflicted bool `json:"is_conflicted" cborgen:"is_conflicted"` 31 + } 32 + 33 + // GitTempAnalyzeMerge calls the XRPC method "sh.tangled.git.temp.analyzeMerge". 34 + // 35 + // branch: Target branch to merge into 36 + // patch: Patch or pull request to check for merge conflicts 37 + // repo: AT-URI of the repository 38 + func GitTempAnalyzeMerge(ctx context.Context, c util.LexClient, branch string, patch string, repo string) (*GitTempAnalyzeMerge_Output, error) { 39 + var out GitTempAnalyzeMerge_Output 40 + 41 + params := map[string]interface{}{} 42 + params["branch"] = branch 43 + params["patch"] = patch 44 + params["repo"] = repo 45 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.analyzeMerge", params, nil, &out); err != nil { 46 + return nil, err 47 + } 48 + 49 + return &out, nil 50 + }
+71
api/tangled/tempdefs.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.defs 6 + 7 + import ( 8 + "github.com/bluesky-social/indigo/lex/util" 9 + ) 10 + 11 + const () 12 + 13 + // GitTempDefs_Blob is a "blob" in the sh.tangled.git.temp.defs schema. 14 + // 15 + // blob metadata. This object doesn't include the blob content 16 + type GitTempDefs_Blob struct { 17 + LastCommit *GitTempDefs_Commit `json:"lastCommit" cborgen:"lastCommit"` 18 + Mode string `json:"mode" cborgen:"mode"` 19 + // name: The file name 20 + Name string `json:"name" cborgen:"name"` 21 + // size: File size in bytes 22 + Size int64 `json:"size" cborgen:"size"` 23 + // submodule: Submodule information if path is a submodule 24 + Submodule *GitTempDefs_Submodule `json:"submodule,omitempty" cborgen:"submodule,omitempty"` 25 + } 26 + 27 + // GitTempDefs_Branch is a "branch" in the sh.tangled.git.temp.defs schema. 28 + type GitTempDefs_Branch struct { 29 + // commit: hydrated commit object 30 + Commit *GitTempDefs_Commit `json:"commit" cborgen:"commit"` 31 + // name: branch name 32 + Name string `json:"name" cborgen:"name"` 33 + } 34 + 35 + // GitTempDefs_Commit is a "commit" in the sh.tangled.git.temp.defs schema. 36 + type GitTempDefs_Commit struct { 37 + Author *GitTempDefs_Signature `json:"author" cborgen:"author"` 38 + Committer *GitTempDefs_Signature `json:"committer" cborgen:"committer"` 39 + Hash *string `json:"hash" cborgen:"hash"` 40 + Message string `json:"message" cborgen:"message"` 41 + Tree *string `json:"tree" cborgen:"tree"` 42 + } 43 + 44 + // GitTempDefs_Signature is a "signature" in the sh.tangled.git.temp.defs schema. 45 + type GitTempDefs_Signature struct { 46 + // email: Person email 47 + Email string `json:"email" cborgen:"email"` 48 + // name: Person name 49 + Name string `json:"name" cborgen:"name"` 50 + // when: Timestamp of the signature 51 + When string `json:"when" cborgen:"when"` 52 + } 53 + 54 + // GitTempDefs_Submodule is a "submodule" in the sh.tangled.git.temp.defs schema. 55 + type GitTempDefs_Submodule struct { 56 + // branch: Branch to track in the submodule 57 + Branch *string `json:"branch,omitempty" cborgen:"branch,omitempty"` 58 + // name: Submodule name 59 + Name string `json:"name" cborgen:"name"` 60 + // url: Submodule repository URL 61 + Url string `json:"url" cborgen:"url"` 62 + } 63 + 64 + // GitTempDefs_Tag is a "tag" in the sh.tangled.git.temp.defs schema. 65 + type GitTempDefs_Tag struct { 66 + Message *string `json:"message,omitempty" cborgen:"message,omitempty"` 67 + // name: tag name 68 + Name string `json:"name" cborgen:"name"` 69 + Tagger *GitTempDefs_Signature `json:"tagger" cborgen:"tagger"` 70 + Target *util.LexiconTypeDecoder `json:"target" cborgen:"target"` 71 + }
+41
api/tangled/tempgetArchive.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.getArchive 6 + 7 + import ( 8 + "bytes" 9 + "context" 10 + 11 + "github.com/bluesky-social/indigo/lex/util" 12 + ) 13 + 14 + const ( 15 + GitTempGetArchiveNSID = "sh.tangled.git.temp.getArchive" 16 + ) 17 + 18 + // GitTempGetArchive calls the XRPC method "sh.tangled.git.temp.getArchive". 19 + // 20 + // format: Archive format 21 + // prefix: Prefix for files in the archive 22 + // ref: Git reference (branch, tag, or commit SHA) 23 + // repo: AT-URI of the repository 24 + func GitTempGetArchive(ctx context.Context, c util.LexClient, format string, prefix string, ref string, repo string) ([]byte, error) { 25 + buf := new(bytes.Buffer) 26 + 27 + params := map[string]interface{}{} 28 + if format != "" { 29 + params["format"] = format 30 + } 31 + if prefix != "" { 32 + params["prefix"] = prefix 33 + } 34 + params["ref"] = ref 35 + params["repo"] = repo 36 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.getArchive", params, nil, buf); err != nil { 37 + return nil, err 38 + } 39 + 40 + return buf.Bytes(), nil 41 + }
+37
api/tangled/tempgetBlob.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.getBlob 6 + 7 + import ( 8 + "bytes" 9 + "context" 10 + 11 + "github.com/bluesky-social/indigo/lex/util" 12 + ) 13 + 14 + const ( 15 + GitTempGetBlobNSID = "sh.tangled.git.temp.getBlob" 16 + ) 17 + 18 + // GitTempGetBlob calls the XRPC method "sh.tangled.git.temp.getBlob". 19 + // 20 + // path: Path within the repository tree 21 + // ref: Git reference (branch, tag, or commit SHA) 22 + // repo: AT-URI of the repository 23 + func GitTempGetBlob(ctx context.Context, c util.LexClient, path string, ref string, repo string) ([]byte, error) { 24 + buf := new(bytes.Buffer) 25 + 26 + params := map[string]interface{}{} 27 + params["path"] = path 28 + if ref != "" { 29 + params["ref"] = ref 30 + } 31 + params["repo"] = repo 32 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.getBlob", params, nil, buf); err != nil { 33 + return nil, err 34 + } 35 + 36 + return buf.Bytes(), nil 37 + }
+45
api/tangled/tempgetBranch.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.getBranch 6 + 7 + import ( 8 + "context" 9 + 10 + "github.com/bluesky-social/indigo/lex/util" 11 + ) 12 + 13 + const ( 14 + GitTempGetBranchNSID = "sh.tangled.git.temp.getBranch" 15 + ) 16 + 17 + // GitTempGetBranch_Output is the output of a sh.tangled.git.temp.getBranch call. 18 + type GitTempGetBranch_Output struct { 19 + Author *GitTempDefs_Signature `json:"author,omitempty" cborgen:"author,omitempty"` 20 + // hash: Latest commit hash on this branch 21 + Hash string `json:"hash" cborgen:"hash"` 22 + // message: Latest commit message 23 + Message *string `json:"message,omitempty" cborgen:"message,omitempty"` 24 + // name: Branch name 25 + Name string `json:"name" cborgen:"name"` 26 + // when: Timestamp of latest commit 27 + When string `json:"when" cborgen:"when"` 28 + } 29 + 30 + // GitTempGetBranch calls the XRPC method "sh.tangled.git.temp.getBranch". 31 + // 32 + // name: Branch name to get information for 33 + // repo: AT-URI of the repository 34 + func GitTempGetBranch(ctx context.Context, c util.LexClient, name string, repo string) (*GitTempGetBranch_Output, error) { 35 + var out GitTempGetBranch_Output 36 + 37 + params := map[string]interface{}{} 38 + params["name"] = name 39 + params["repo"] = repo 40 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.getBranch", params, nil, &out); err != nil { 41 + return nil, err 42 + } 43 + 44 + return &out, nil 45 + }
+32
api/tangled/tempgetCommit.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.getCommit 6 + 7 + import ( 8 + "context" 9 + 10 + "github.com/bluesky-social/indigo/lex/util" 11 + ) 12 + 13 + const ( 14 + GitTempGetCommitNSID = "sh.tangled.git.temp.getCommit" 15 + ) 16 + 17 + // GitTempGetCommit calls the XRPC method "sh.tangled.git.temp.getCommit". 18 + // 19 + // ref: reference name to resolve 20 + // repo: AT-URI of the repository 21 + func GitTempGetCommit(ctx context.Context, c util.LexClient, ref string, repo string) (*GitTempDefs_Commit, error) { 22 + var out GitTempDefs_Commit 23 + 24 + params := map[string]interface{}{} 25 + params["ref"] = ref 26 + params["repo"] = repo 27 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.getCommit", params, nil, &out); err != nil { 28 + return nil, err 29 + } 30 + 31 + return &out, nil 32 + }
+35
api/tangled/tempgetDiff.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.getDiff 6 + 7 + import ( 8 + "bytes" 9 + "context" 10 + 11 + "github.com/bluesky-social/indigo/lex/util" 12 + ) 13 + 14 + const ( 15 + GitTempGetDiffNSID = "sh.tangled.git.temp.getDiff" 16 + ) 17 + 18 + // GitTempGetDiff calls the XRPC method "sh.tangled.git.temp.getDiff". 19 + // 20 + // repo: AT-URI of the repository 21 + // rev1: First revision (commit, branch, or tag) 22 + // rev2: Second revision (commit, branch, or tag) 23 + func GitTempGetDiff(ctx context.Context, c util.LexClient, repo string, rev1 string, rev2 string) ([]byte, error) { 24 + buf := new(bytes.Buffer) 25 + 26 + params := map[string]interface{}{} 27 + params["repo"] = repo 28 + params["rev1"] = rev1 29 + params["rev2"] = rev2 30 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.getDiff", params, nil, buf); err != nil { 31 + return nil, err 32 + } 33 + 34 + return buf.Bytes(), nil 35 + }
+36
api/tangled/tempgetEntity.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.getEntity 6 + 7 + import ( 8 + "context" 9 + 10 + "github.com/bluesky-social/indigo/lex/util" 11 + ) 12 + 13 + const ( 14 + GitTempGetEntityNSID = "sh.tangled.git.temp.getEntity" 15 + ) 16 + 17 + // GitTempGetEntity calls the XRPC method "sh.tangled.git.temp.getEntity". 18 + // 19 + // path: path of the entity 20 + // ref: Git reference (branch, tag, or commit SHA) 21 + // repo: AT-URI of the repository 22 + func GitTempGetEntity(ctx context.Context, c util.LexClient, path string, ref string, repo string) (*GitTempDefs_Blob, error) { 23 + var out GitTempDefs_Blob 24 + 25 + params := map[string]interface{}{} 26 + params["path"] = path 27 + if ref != "" { 28 + params["ref"] = ref 29 + } 30 + params["repo"] = repo 31 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.getEntity", params, nil, &out); err != nil { 32 + return nil, err 33 + } 34 + 35 + return &out, nil 36 + }
+30
api/tangled/tempgetHead.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.getHead 6 + 7 + import ( 8 + "context" 9 + 10 + "github.com/bluesky-social/indigo/lex/util" 11 + ) 12 + 13 + const ( 14 + GitTempGetHeadNSID = "sh.tangled.git.temp.getHead" 15 + ) 16 + 17 + // GitTempGetHead calls the XRPC method "sh.tangled.git.temp.getHead". 18 + // 19 + // repo: AT-URI of the repository 20 + func GitTempGetHead(ctx context.Context, c util.LexClient, repo string) (*GitTempDefs_Branch, error) { 21 + var out GitTempDefs_Branch 22 + 23 + params := map[string]interface{}{} 24 + params["repo"] = repo 25 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.getHead", params, nil, &out); err != nil { 26 + return nil, err 27 + } 28 + 29 + return &out, nil 30 + }
+33
api/tangled/tempgetTag.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.getTag 6 + 7 + import ( 8 + "bytes" 9 + "context" 10 + 11 + "github.com/bluesky-social/indigo/lex/util" 12 + ) 13 + 14 + const ( 15 + GitTempGetTagNSID = "sh.tangled.git.temp.getTag" 16 + ) 17 + 18 + // GitTempGetTag calls the XRPC method "sh.tangled.git.temp.getTag". 19 + // 20 + // repo: AT-URI of the repository 21 + // tag: Name of tag, such as v1.3.0 22 + func GitTempGetTag(ctx context.Context, c util.LexClient, repo string, tag string) ([]byte, error) { 23 + buf := new(bytes.Buffer) 24 + 25 + params := map[string]interface{}{} 26 + params["repo"] = repo 27 + params["tag"] = tag 28 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.getTag", params, nil, buf); err != nil { 29 + return nil, err 30 + } 31 + 32 + return buf.Bytes(), nil 33 + }
+90
api/tangled/tempgetTree.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.getTree 6 + 7 + import ( 8 + "context" 9 + 10 + "github.com/bluesky-social/indigo/lex/util" 11 + ) 12 + 13 + const ( 14 + GitTempGetTreeNSID = "sh.tangled.git.temp.getTree" 15 + ) 16 + 17 + // GitTempGetTree_LastCommit is a "lastCommit" in the sh.tangled.git.temp.getTree schema. 18 + type GitTempGetTree_LastCommit struct { 19 + Author *GitTempGetTree_Signature `json:"author,omitempty" cborgen:"author,omitempty"` 20 + // hash: Commit hash 21 + Hash string `json:"hash" cborgen:"hash"` 22 + // message: Commit message 23 + Message string `json:"message" cborgen:"message"` 24 + // when: Commit timestamp 25 + When string `json:"when" cborgen:"when"` 26 + } 27 + 28 + // GitTempGetTree_Output is the output of a sh.tangled.git.temp.getTree call. 29 + type GitTempGetTree_Output struct { 30 + // dotdot: Parent directory path 31 + Dotdot *string `json:"dotdot,omitempty" cborgen:"dotdot,omitempty"` 32 + Files []*GitTempGetTree_TreeEntry `json:"files" cborgen:"files"` 33 + LastCommit *GitTempGetTree_LastCommit `json:"lastCommit,omitempty" cborgen:"lastCommit,omitempty"` 34 + // parent: The parent path in the tree 35 + Parent *string `json:"parent,omitempty" cborgen:"parent,omitempty"` 36 + // readme: Readme for this file tree 37 + Readme *GitTempGetTree_Readme `json:"readme,omitempty" cborgen:"readme,omitempty"` 38 + // ref: The git reference used 39 + Ref string `json:"ref" cborgen:"ref"` 40 + } 41 + 42 + // GitTempGetTree_Readme is a "readme" in the sh.tangled.git.temp.getTree schema. 43 + type GitTempGetTree_Readme struct { 44 + // contents: Contents of the readme file 45 + Contents string `json:"contents" cborgen:"contents"` 46 + // filename: Name of the readme file 47 + Filename string `json:"filename" cborgen:"filename"` 48 + } 49 + 50 + // GitTempGetTree_Signature is a "signature" in the sh.tangled.git.temp.getTree schema. 51 + type GitTempGetTree_Signature struct { 52 + // email: Author email 53 + Email string `json:"email" cborgen:"email"` 54 + // name: Author name 55 + Name string `json:"name" cborgen:"name"` 56 + // when: Author timestamp 57 + When string `json:"when" cborgen:"when"` 58 + } 59 + 60 + // GitTempGetTree_TreeEntry is a "treeEntry" in the sh.tangled.git.temp.getTree schema. 61 + type GitTempGetTree_TreeEntry struct { 62 + Last_commit *GitTempGetTree_LastCommit `json:"last_commit,omitempty" cborgen:"last_commit,omitempty"` 63 + // mode: File mode 64 + Mode string `json:"mode" cborgen:"mode"` 65 + // name: Relative file or directory name 66 + Name string `json:"name" cborgen:"name"` 67 + // size: File size in bytes 68 + Size int64 `json:"size" cborgen:"size"` 69 + } 70 + 71 + // GitTempGetTree calls the XRPC method "sh.tangled.git.temp.getTree". 72 + // 73 + // path: Path within the repository tree 74 + // ref: Git reference (branch, tag, or commit SHA) 75 + // repo: AT-URI of the repository 76 + func GitTempGetTree(ctx context.Context, c util.LexClient, path string, ref string, repo string) (*GitTempGetTree_Output, error) { 77 + var out GitTempGetTree_Output 78 + 79 + params := map[string]interface{}{} 80 + if path != "" { 81 + params["path"] = path 82 + } 83 + params["ref"] = ref 84 + params["repo"] = repo 85 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.getTree", params, nil, &out); err != nil { 86 + return nil, err 87 + } 88 + 89 + return &out, nil 90 + }
+39
api/tangled/templistBranches.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.listBranches 6 + 7 + import ( 8 + "bytes" 9 + "context" 10 + 11 + "github.com/bluesky-social/indigo/lex/util" 12 + ) 13 + 14 + const ( 15 + GitTempListBranchesNSID = "sh.tangled.git.temp.listBranches" 16 + ) 17 + 18 + // GitTempListBranches calls the XRPC method "sh.tangled.git.temp.listBranches". 19 + // 20 + // cursor: Pagination cursor 21 + // limit: Maximum number of branches to return 22 + // repo: AT-URI of the repository 23 + func GitTempListBranches(ctx context.Context, c util.LexClient, cursor string, limit int64, repo string) ([]byte, error) { 24 + buf := new(bytes.Buffer) 25 + 26 + params := map[string]interface{}{} 27 + if cursor != "" { 28 + params["cursor"] = cursor 29 + } 30 + if limit != 0 { 31 + params["limit"] = limit 32 + } 33 + params["repo"] = repo 34 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.listBranches", params, nil, buf); err != nil { 35 + return nil, err 36 + } 37 + 38 + return buf.Bytes(), nil 39 + }
+43
api/tangled/templistCommits.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.listCommits 6 + 7 + import ( 8 + "bytes" 9 + "context" 10 + 11 + "github.com/bluesky-social/indigo/lex/util" 12 + ) 13 + 14 + const ( 15 + GitTempListCommitsNSID = "sh.tangled.git.temp.listCommits" 16 + ) 17 + 18 + // GitTempListCommits calls the XRPC method "sh.tangled.git.temp.listCommits". 19 + // 20 + // cursor: Pagination cursor (commit SHA) 21 + // limit: Maximum number of commits to return 22 + // ref: Git reference (branch, tag, or commit SHA) 23 + // repo: AT-URI of the repository 24 + func GitTempListCommits(ctx context.Context, c util.LexClient, cursor string, limit int64, ref string, repo string) ([]byte, error) { 25 + buf := new(bytes.Buffer) 26 + 27 + params := map[string]interface{}{} 28 + if cursor != "" { 29 + params["cursor"] = cursor 30 + } 31 + if limit != 0 { 32 + params["limit"] = limit 33 + } 34 + if ref != "" { 35 + params["ref"] = ref 36 + } 37 + params["repo"] = repo 38 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.listCommits", params, nil, buf); err != nil { 39 + return nil, err 40 + } 41 + 42 + return buf.Bytes(), nil 43 + }
+61
api/tangled/templistLanguages.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.listLanguages 6 + 7 + import ( 8 + "context" 9 + 10 + "github.com/bluesky-social/indigo/lex/util" 11 + ) 12 + 13 + const ( 14 + GitTempListLanguagesNSID = "sh.tangled.git.temp.listLanguages" 15 + ) 16 + 17 + // GitTempListLanguages_Language is a "language" in the sh.tangled.git.temp.listLanguages schema. 18 + type GitTempListLanguages_Language struct { 19 + // color: Hex color code for this language 20 + Color *string `json:"color,omitempty" cborgen:"color,omitempty"` 21 + // extensions: File extensions associated with this language 22 + Extensions []string `json:"extensions,omitempty" cborgen:"extensions,omitempty"` 23 + // fileCount: Number of files in this language 24 + FileCount *int64 `json:"fileCount,omitempty" cborgen:"fileCount,omitempty"` 25 + // name: Programming language name 26 + Name string `json:"name" cborgen:"name"` 27 + // percentage: Percentage of total codebase (0-100) 28 + Percentage int64 `json:"percentage" cborgen:"percentage"` 29 + // size: Total size of files in this language (bytes) 30 + Size int64 `json:"size" cborgen:"size"` 31 + } 32 + 33 + // GitTempListLanguages_Output is the output of a sh.tangled.git.temp.listLanguages call. 34 + type GitTempListLanguages_Output struct { 35 + Languages []*GitTempListLanguages_Language `json:"languages" cborgen:"languages"` 36 + // ref: The git reference used 37 + Ref string `json:"ref" cborgen:"ref"` 38 + // totalFiles: Total number of files analyzed 39 + TotalFiles *int64 `json:"totalFiles,omitempty" cborgen:"totalFiles,omitempty"` 40 + // totalSize: Total size of all analyzed files in bytes 41 + TotalSize *int64 `json:"totalSize,omitempty" cborgen:"totalSize,omitempty"` 42 + } 43 + 44 + // GitTempListLanguages calls the XRPC method "sh.tangled.git.temp.listLanguages". 45 + // 46 + // ref: Git reference (branch, tag, or commit SHA) 47 + // repo: AT-URI of the repository 48 + func GitTempListLanguages(ctx context.Context, c util.LexClient, ref string, repo string) (*GitTempListLanguages_Output, error) { 49 + var out GitTempListLanguages_Output 50 + 51 + params := map[string]interface{}{} 52 + if ref != "" { 53 + params["ref"] = ref 54 + } 55 + params["repo"] = repo 56 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.listLanguages", params, nil, &out); err != nil { 57 + return nil, err 58 + } 59 + 60 + return &out, nil 61 + }
+39
api/tangled/templistTags.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.git.temp.listTags 6 + 7 + import ( 8 + "bytes" 9 + "context" 10 + 11 + "github.com/bluesky-social/indigo/lex/util" 12 + ) 13 + 14 + const ( 15 + GitTempListTagsNSID = "sh.tangled.git.temp.listTags" 16 + ) 17 + 18 + // GitTempListTags calls the XRPC method "sh.tangled.git.temp.listTags". 19 + // 20 + // cursor: Pagination cursor 21 + // limit: Maximum number of tags to return 22 + // repo: AT-URI of the repository 23 + func GitTempListTags(ctx context.Context, c util.LexClient, cursor string, limit int64, repo string) ([]byte, error) { 24 + buf := new(bytes.Buffer) 25 + 26 + params := map[string]interface{}{} 27 + if cursor != "" { 28 + params["cursor"] = cursor 29 + } 30 + if limit != 0 { 31 + params["limit"] = limit 32 + } 33 + params["repo"] = repo 34 + if err := c.LexDo(ctx, util.Query, "", "sh.tangled.git.temp.listTags", params, nil, buf); err != nil { 35 + return nil, err 36 + } 37 + 38 + return buf.Bytes(), nil 39 + }
+26 -16
appview/config/config.go
··· 46 46 PLCURL string `env:"URL, default=https://plc.directory"` 47 47 } 48 48 49 + type KnotMirrorConfig struct { 50 + Url string `env:"URL, default=https://mirror.tangled.network"` 51 + } 52 + 49 53 type JetstreamConfig struct { 50 54 Endpoint string `env:"ENDPOINT, default=wss://jetstream1.us-east.bsky.network/subscribe"` 51 55 } ··· 135 139 UpdateInterval time.Duration `env:"UPDATE_INTERVAL, default=1h"` 136 140 } 137 141 142 + type OgreConfig struct { 143 + Host string `env:"HOST, default=https://ogre.tangled.network"` 144 + } 145 + 138 146 func (cfg RedisConfig) ToURL() string { 139 147 u := &url.URL{ 140 148 Scheme: "redis", ··· 150 158 } 151 159 152 160 type Config struct { 161 + Core CoreConfig `env:",prefix=TANGLED_"` 162 + Jetstream JetstreamConfig `env:",prefix=TANGLED_JETSTREAM_"` 163 + Knotstream ConsumerConfig `env:",prefix=TANGLED_KNOTSTREAM_"` 164 + Spindlestream ConsumerConfig `env:",prefix=TANGLED_SPINDLESTREAM_"` 165 + Resend ResendConfig `env:",prefix=TANGLED_RESEND_"` 166 + Posthog PosthogConfig `env:",prefix=TANGLED_POSTHOG_"` 167 + Camo CamoConfig `env:",prefix=TANGLED_CAMO_"` 168 + Avatar AvatarConfig `env:",prefix=TANGLED_AVATAR_"` 169 + OAuth OAuthConfig `env:",prefix=TANGLED_OAUTH_"` 170 + Redis RedisConfig `env:",prefix=TANGLED_REDIS_"` 171 + Plc PlcConfig `env:",prefix=TANGLED_PLC_"` 172 + Pds PdsConfig `env:",prefix=TANGLED_PDS_"` 173 + Cloudflare Cloudflare `env:",prefix=TANGLED_CLOUDFLARE_"` 174 + Label LabelConfig `env:",prefix=TANGLED_LABEL_"` 175 + Bluesky BlueskyConfig `env:",prefix=TANGLED_BLUESKY_"` 176 + Sites SitesConfig `env:",prefix=TANGLED_SITES_"` 177 + KnotMirror KnotMirrorConfig `env:",prefix=TANGLED_KNOTMIRROR_"` 178 + Ogre OgreConfig `env:",prefix=TANGLED_OGRE_"` 153 - Core CoreConfig `env:",prefix=TANGLED_"` 154 - Jetstream JetstreamConfig `env:",prefix=TANGLED_JETSTREAM_"` 155 - Knotstream ConsumerConfig `env:",prefix=TANGLED_KNOTSTREAM_"` 156 - Spindlestream ConsumerConfig `env:",prefix=TANGLED_SPINDLESTREAM_"` 157 - Resend ResendConfig `env:",prefix=TANGLED_RESEND_"` 158 - Posthog PosthogConfig `env:",prefix=TANGLED_POSTHOG_"` 159 - Camo CamoConfig `env:",prefix=TANGLED_CAMO_"` 160 - Avatar AvatarConfig `env:",prefix=TANGLED_AVATAR_"` 161 - OAuth OAuthConfig `env:",prefix=TANGLED_OAUTH_"` 162 - Redis RedisConfig `env:",prefix=TANGLED_REDIS_"` 163 - Plc PlcConfig `env:",prefix=TANGLED_PLC_"` 164 - Pds PdsConfig `env:",prefix=TANGLED_PDS_"` 165 - Cloudflare Cloudflare `env:",prefix=TANGLED_CLOUDFLARE_"` 166 - Label LabelConfig `env:",prefix=TANGLED_LABEL_"` 167 - Bluesky BlueskyConfig `env:",prefix=TANGLED_BLUESKY_"` 168 - Sites SitesConfig `env:",prefix=TANGLED_SITES_"` 169 179 } 170 180 171 181 func LoadConfig(ctx context.Context) (*Config, error) {
+1 -1
appview/db/collaborators.go
··· 59 59 return nil, nil 60 60 } 61 61 62 + return GetRepos(e, orm.FilterIn("at_uri", repoAts)) 62 - return GetRepos(e, 0, orm.FilterIn("at_uri", repoAts)) 63 63 } 64 64 65 65 func GetCollaborators(e Execer, filters ...orm.Filter) ([]models.Collaborator, error) {
+1
appview/db/db.go
··· 34 34 "_journal_mode=WAL", 35 35 "_synchronous=NORMAL", 36 36 "_auto_vacuum=incremental", 37 + "_busy_timeout=5000", 37 38 } 38 39 39 40 logger := log.FromContext(ctx)
+1 -1
appview/db/issues.go
··· 206 206 repoAts = append(repoAts, string(issue.RepoAt)) 207 207 } 208 208 209 + repos, err := GetRepos(e, orm.FilterIn("at_uri", repoAts)) 209 - repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoAts)) 210 210 if err != nil { 211 211 return nil, fmt.Errorf("failed to build repo mappings: %w", err) 212 212 }
+2 -2
appview/db/profile.go
··· 66 66 *items = append(*items, &issue) 67 67 } 68 68 69 + repos, err := GetRepos(e, orm.FilterEq("did", forDid)) 69 - repos, err := GetRepos(e, 0, orm.FilterEq("did", forDid)) 70 70 if err != nil { 71 71 return nil, fmt.Errorf("error getting all repos by did: %w", err) 72 72 } ··· 489 489 } 490 490 491 491 // ensure all pinned repos are either own repos or collaborating repos 492 + repos, err := GetRepos(e, orm.FilterEq("did", profile.Did)) 492 - repos, err := GetRepos(e, 0, orm.FilterEq("did", profile.Did)) 493 493 if err != nil { 494 494 log.Printf("getting repos for %s: %s", profile.Did, err) 495 495 }
+1 -1
appview/db/pulls.go
··· 264 264 sourceAts = append(sourceAts, *p.PullSource.RepoAt) 265 265 } 266 266 } 267 + sourceRepos, err := GetRepos(e, orm.FilterIn("at_uri", sourceAts)) 267 - sourceRepos, err := GetRepos(e, 0, orm.FilterIn("at_uri", sourceAts)) 268 268 if err != nil && !errors.Is(err, sql.ErrNoRows) { 269 269 return nil, fmt.Errorf("failed to get source repos: %w", err) 270 270 }
+13 -9
appview/db/reference.go
··· 253 253 func GetBacklinks(e Execer, target syntax.ATURI) ([]models.RichReferenceLink, error) { 254 254 rows, err := e.Query( 255 255 `select from_at from reference_links 256 + where to_at = ? and from_at <> to_at`, 256 - where to_at = ?`, 257 257 target, 258 258 ) 259 259 if err != nil { ··· 283 283 return nil, fmt.Errorf("get issue backlinks: %w", err) 284 284 } 285 285 backlinks = append(backlinks, ls...) 286 + ls, err = getIssueCommentBacklinks(e, target, backlinksMap[tangled.RepoIssueCommentNSID]) 286 - ls, err = getIssueCommentBacklinks(e, backlinksMap[tangled.RepoIssueCommentNSID]) 287 287 if err != nil { 288 288 return nil, fmt.Errorf("get issue_comment backlinks: %w", err) 289 289 } ··· 293 293 return nil, fmt.Errorf("get pull backlinks: %w", err) 294 294 } 295 295 backlinks = append(backlinks, ls...) 296 + ls, err = getPullCommentBacklinks(e, target, backlinksMap[tangled.RepoPullCommentNSID]) 296 - ls, err = getPullCommentBacklinks(e, backlinksMap[tangled.RepoPullCommentNSID]) 297 297 if err != nil { 298 298 return nil, fmt.Errorf("get pull_comment backlinks: %w", err) 299 299 } ··· 344 344 return refLinks, nil 345 345 } 346 346 347 + func getIssueCommentBacklinks(e Execer, target syntax.ATURI, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 347 - func getIssueCommentBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 348 348 if len(aturis) == 0 { 349 349 return nil, nil 350 350 } 351 351 filter := orm.FilterIn("c.at_uri", aturis) 352 + exclude := orm.FilterNotEq("i.at_uri", target) 352 353 rows, err := e.Query( 353 354 fmt.Sprintf( 354 355 `select r.did, r.name, i.issue_id, c.id, i.title, i.open ··· 357 358 on i.at_uri = c.issue_at 358 359 join repos r 359 360 on r.at_uri = i.repo_at 361 + where %s and %s`, 360 - where %s`, 361 362 filter.Condition(), 363 + exclude.Condition(), 362 364 ), 365 + append(filter.Arg(), exclude.Arg()...)..., 363 - filter.Arg()..., 364 366 ) 365 367 if err != nil { 366 368 return nil, err ··· 424 426 return refLinks, nil 425 427 } 426 428 429 + func getPullCommentBacklinks(e Execer, target syntax.ATURI, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 427 - func getPullCommentBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 428 430 if len(aturis) == 0 { 429 431 return nil, nil 430 432 } 431 433 filter := orm.FilterIn("c.comment_at", aturis) 434 + exclude := orm.FilterNotEq("p.at_uri", target) 432 435 rows, err := e.Query( 433 436 fmt.Sprintf( 434 437 `select r.did, r.name, p.pull_id, c.id, p.title, p.state ··· 437 440 on r.at_uri = p.repo_at 438 441 join pull_comments c 439 442 on r.at_uri = c.repo_at and p.pull_id = c.pull_id 443 + where %s and %s`, 440 - where %s`, 441 444 filter.Condition(), 445 + exclude.Condition(), 442 446 ), 447 + append(filter.Arg(), exclude.Arg()...)..., 443 - filter.Arg()..., 444 448 ) 445 449 if err != nil { 446 450 return nil, err
+72 -67
appview/db/repos.go
··· 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 13 "tangled.org/core/appview/models" 14 + "tangled.org/core/appview/pagination" 14 15 "tangled.org/core/orm" 15 16 ) 16 17 18 + func GetRepos(e Execer, filters ...orm.Filter) ([]models.Repo, error) { 19 + return GetReposPaginated(e, pagination.Page{}, filters...) 20 + } 17 - func GetRepos(e Execer, limit int, filters ...orm.Filter) ([]models.Repo, error) { 18 - repoMap := make(map[syntax.ATURI]*models.Repo) 19 21 22 + func GetReposPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]models.Repo, error) { 20 23 var conditions []string 21 24 var args []any 22 25 for _, filter := range filters { ··· 29 32 whereClause = " where " + strings.Join(conditions, " and ") 30 33 } 31 34 35 + pageClause := "" 36 + if page.Limit != 0 { 37 + pageClause = fmt.Sprintf(" limit %d offset %d", page.Limit, page.Offset) 32 - limitClause := "" 33 - if limit != 0 { 34 - limitClause = fmt.Sprintf(" limit %d", limit) 35 38 } 36 39 40 + // main query to get repos with pagination 41 + query := fmt.Sprintf(` 42 + select 37 - repoQuery := fmt.Sprintf( 38 - `select 39 43 id, 40 44 did, 41 45 name, ··· 47 51 topics, 48 52 source, 49 53 spindle 54 + from repos 50 - from 51 - repos r 52 55 %s 53 56 order by created desc 57 + %s 58 + `, whereClause, pageClause) 59 + 60 + rows, err := e.Query(query, args...) 54 - %s`, 55 - whereClause, 56 - limitClause, 57 - ) 58 - rows, err := e.Query(repoQuery, args...) 59 61 if err != nil { 62 + return nil, err 60 - return nil, fmt.Errorf("failed to execute repo query: %w ", err) 61 63 } 62 64 defer rows.Close() 63 65 66 + repoMap := make(map[syntax.ATURI]*models.Repo) 64 67 for rows.Next() { 65 68 var repo models.Repo 66 69 var createdAt string ··· 80 83 &spindle, 81 84 ) 82 85 if err != nil { 86 + return nil, err 83 - return nil, fmt.Errorf("failed to execute repo query: %w ", err) 84 87 } 85 88 89 + // parse created timestamp 86 90 if t, err := time.Parse(time.RFC3339, createdAt); err == nil { 87 91 repo.Created = t 88 92 } 93 + 94 + // handle nullable fields 89 95 if description.Valid { 90 96 repo.Description = description.String 91 97 } ··· 107 113 } 108 114 109 115 if err = rows.Err(); err != nil { 116 + return nil, err 117 + } 118 + 119 + // if no repos, return early 120 + if len(repoMap) == 0 { 121 + return nil, nil 110 - return nil, fmt.Errorf("failed to execute repo query: %w ", err) 111 122 } 112 123 124 + // build IN clause for related queries 113 125 inClause := strings.TrimSuffix(strings.Repeat("?, ", len(repoMap)), ", ") 114 126 args = make([]any, len(repoMap)) 115 - 116 127 i := 0 117 128 for _, r := range repoMap { 118 129 args[i] = r.RepoAt() 119 130 i++ 120 131 } 121 132 133 + // get labels for all repos 122 - // Get labels for all repos 123 134 labelsQuery := fmt.Sprintf( 124 135 `select repo_at, label_at from repo_labels where repo_at in (%s)`, 125 136 inClause, 126 137 ) 138 + 127 139 rows, err = e.Query(labelsQuery, args...) 128 140 if err != nil { 141 + return nil, err 129 - return nil, fmt.Errorf("failed to execute labels query: %w ", err) 130 142 } 131 143 defer rows.Close() 132 144 133 145 for rows.Next() { 134 146 var repoat, labelat string 135 147 if err := rows.Scan(&repoat, &labelat); err != nil { 136 - log.Println("err", "err", err) 137 148 continue 138 149 } 139 150 if r, ok := repoMap[syntax.ATURI(repoat)]; ok { 140 151 r.Labels = append(r.Labels, labelat) 141 152 } 142 153 } 143 - if err = rows.Err(); err != nil { 144 - return nil, fmt.Errorf("failed to execute labels query: %w ", err) 145 - } 146 154 155 + // get primary language for all repos 156 + languageQuery := fmt.Sprintf(` 147 - languageQuery := fmt.Sprintf( 148 - ` 149 157 select repo_at, language 150 158 from ( 151 159 select 160 + repo_at, language, 161 + row_number() over ( 162 + partition by repo_at 163 + order by bytes desc 164 + ) as rn 152 - repo_at, 153 - language, 154 - row_number() over ( 155 - partition by repo_at 156 - order by bytes desc 157 - ) as rn 158 165 from repo_languages 159 166 where repo_at in (%s) 167 + and is_default_ref = 1 168 + and language <> '' 160 - and is_default_ref = 1 161 - and language <> '' 162 169 ) 163 170 where rn = 1 171 + `, inClause) 172 + 164 - `, 165 - inClause, 166 - ) 167 173 rows, err = e.Query(languageQuery, args...) 168 174 if err != nil { 175 + return nil, fmt.Errorf("failed to execute lang query: %w", err) 169 - return nil, fmt.Errorf("failed to execute lang query: %w ", err) 170 176 } 171 177 defer rows.Close() 172 178 ··· 181 187 } 182 188 } 183 189 if err = rows.Err(); err != nil { 190 + return nil, fmt.Errorf("failed to execute lang query: %w", err) 184 - return nil, fmt.Errorf("failed to execute lang query: %w ", err) 185 191 } 186 192 193 + // get star counts 187 194 starCountQuery := fmt.Sprintf( 195 + `select subject_at, count(1) from stars where subject_at in (%s) group by subject_at`, 188 - `select 189 - subject_at, count(1) 190 - from stars 191 - where subject_at in (%s) 192 - group by subject_at`, 193 196 inClause, 194 197 ) 198 + 195 199 rows, err = e.Query(starCountQuery, args...) 196 200 if err != nil { 201 + return nil, fmt.Errorf("failed to execute star-count query: %w", err) 197 - return nil, fmt.Errorf("failed to execute star-count query: %w ", err) 198 202 } 199 203 defer rows.Close() 200 204 ··· 210 214 } 211 215 } 212 216 if err = rows.Err(); err != nil { 217 + return nil, fmt.Errorf("failed to execute star-count query: %w", err) 213 - return nil, fmt.Errorf("failed to execute star-count query: %w ", err) 214 218 } 215 219 220 + // get issue counts 221 + issueCountQuery := fmt.Sprintf(` 222 + select 216 - issueCountQuery := fmt.Sprintf( 217 - `select 218 223 repo_at, 219 224 count(case when open = 1 then 1 end) as open_count, 220 225 count(case when open = 0 then 1 end) as closed_count 221 226 from issues 222 227 where repo_at in (%s) 228 + group by repo_at 229 + `, inClause) 230 + 223 - group by repo_at`, 224 - inClause, 225 - ) 226 231 rows, err = e.Query(issueCountQuery, args...) 227 232 if err != nil { 233 + return nil, fmt.Errorf("failed to execute issue-count query: %w", err) 228 - return nil, fmt.Errorf("failed to execute issue-count query: %w ", err) 229 234 } 230 235 defer rows.Close() 231 236 ··· 242 247 } 243 248 } 244 249 if err = rows.Err(); err != nil { 250 + return nil, fmt.Errorf("failed to execute issue-count query: %w", err) 245 - return nil, fmt.Errorf("failed to execute issue-count query: %w ", err) 246 251 } 247 252 253 + // get pull counts 254 + pullCountQuery := fmt.Sprintf(` 255 + select 248 - pullCountQuery := fmt.Sprintf( 249 - `select 250 256 repo_at, 251 257 count(case when state = ? then 1 end) as open_count, 252 258 count(case when state = ? then 1 end) as merged_count, ··· 254 260 count(case when state = ? then 1 end) as deleted_count 255 261 from pulls 256 262 where repo_at in (%s) 263 + group by repo_at 264 + `, inClause) 265 + 266 + pullArgs := append([]any{ 257 - group by repo_at`, 258 - inClause, 259 - ) 260 - args = append([]any{ 261 267 models.PullOpen, 262 268 models.PullMerged, 263 269 models.PullClosed, 264 270 models.PullDeleted, 265 271 }, args...) 272 + 273 + rows, err = e.Query(pullCountQuery, pullArgs...) 266 - rows, err = e.Query( 267 - pullCountQuery, 268 - args..., 269 - ) 270 274 if err != nil { 275 + return nil, fmt.Errorf("failed to execute pulls-count query: %w", err) 271 - return nil, fmt.Errorf("failed to execute pulls-count query: %w ", err) 272 276 } 273 277 defer rows.Close() 274 278 ··· 287 291 } 288 292 } 289 293 if err = rows.Err(); err != nil { 294 + return nil, fmt.Errorf("failed to execute pulls-count query: %w", err) 290 - return nil, fmt.Errorf("failed to execute pulls-count query: %w ", err) 291 295 } 292 296 293 297 var repos []models.Repo ··· 295 299 repos = append(repos, *r) 296 300 } 297 301 302 + // sort by created timestamp (desc) 298 303 slices.SortFunc(repos, func(a, b models.Repo) int { 299 304 if a.Created.After(b.Created) { 300 305 return -1 ··· 307 312 308 313 // helper to get exactly one repo 309 314 func GetRepo(e Execer, filters ...orm.Filter) (*models.Repo, error) { 315 + repos, err := GetReposPaginated(e, pagination.Page{Limit: 1}, filters...) 310 - repos, err := GetRepos(e, 0, filters...) 311 316 if err != nil { 312 317 return nil, err 313 318 } ··· 317 322 } 318 323 319 324 if len(repos) != 1 { 325 + return nil, fmt.Errorf("too few rows returned") 320 - return nil, fmt.Errorf("too many rows returned") 321 326 } 322 327 323 328 return &repos[0], nil
+2 -2
appview/db/star.go
··· 197 197 return nil, nil 198 198 } 199 199 200 + repos, err := GetRepos(e, orm.FilterIn("at_uri", args)) 200 - repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", args)) 201 201 if err != nil { 202 202 return nil, err 203 203 } ··· 300 300 } 301 301 302 302 // get full repo data 303 + repos, err := GetRepos(e, orm.FilterIn("at_uri", repoUris)) 303 - repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoUris)) 304 304 if err != nil { 305 305 return nil, err 306 306 }
+3 -2
appview/db/timeline.go
··· 5 5 6 6 "github.com/bluesky-social/indigo/atproto/syntax" 7 7 "tangled.org/core/appview/models" 8 + "tangled.org/core/appview/pagination" 8 9 "tangled.org/core/orm" 9 10 ) 10 11 ··· 90 91 filters = append(filters, orm.FilterIn("did", userIsFollowing)) 91 92 } 92 93 94 + repos, err := GetReposPaginated(e, pagination.Page{Limit: limit}, filters...) 93 - repos, err := GetRepos(e, limit, filters...) 94 95 if err != nil { 95 96 return nil, err 96 97 } ··· 105 106 106 107 var origRepos []models.Repo 107 108 if args != nil { 109 + origRepos, err = GetRepos(e, orm.FilterIn("at_uri", args)) 108 - origRepos, err = GetRepos(e, 0, orm.FilterIn("at_uri", args)) 109 110 } 110 111 if err != nil { 111 112 return nil, err
+27 -4
appview/email/email.go
··· 49 49 parts := strings.Split(addr.Address, "@") 50 50 domain := parts[1] 51 51 52 + canonical := coalesceToCanonicalName(domain) 53 + mx, err := net.LookupMX(canonical) 54 + 55 + // Don't check err here; mx will only contain valid mx records, and we should 56 + // only fallback to an implicit mx if there are no mx records defined (whether 57 + // they are valid or not). 58 + if len(mx) != 0 { 59 + return true 52 - mx, err := net.LookupMX(domain) 53 - if err != nil || len(mx) == 0 { 54 - return false 55 60 } 56 61 62 + if err != nil { 63 + // If the domain resolves to an address, assume it's an implicit mx. 64 + address, _ := net.LookupIP(canonical) 65 + if len(address) != 0 { 66 + return true 67 + } 68 + } 69 + 70 + return false 71 + } 72 + 73 + func coalesceToCanonicalName(domain string) string { 74 + canonical, err := net.LookupCNAME(domain) 75 + if err != nil { 76 + // net.LookupCNAME() returns an error if there is no cname record *and* no 77 + // a/aaaa records, but there may still be mx records. 78 + return domain 79 + } 80 + return canonical 57 - return true 58 81 }
+4
appview/indexer/indexer.go
··· 7 7 "tangled.org/core/appview/db" 8 8 issues_indexer "tangled.org/core/appview/indexer/issues" 9 9 pulls_indexer "tangled.org/core/appview/indexer/pulls" 10 + repos_indexer "tangled.org/core/appview/indexer/repos" 10 11 "tangled.org/core/appview/notify" 11 12 tlog "tangled.org/core/log" 12 13 ) ··· 14 15 type Indexer struct { 15 16 Issues *issues_indexer.Indexer 16 17 Pulls *pulls_indexer.Indexer 18 + Repos *repos_indexer.Indexer 17 19 logger *slog.Logger 18 20 notify.BaseNotifier 19 21 } ··· 22 24 return &Indexer{ 23 25 issues_indexer.NewIndexer("indexes/issues.bleve"), 24 26 pulls_indexer.NewIndexer("indexes/pulls.bleve"), 27 + repos_indexer.NewIndexer("indexes/repos.bleve"), 25 28 logger, 26 29 notify.BaseNotifier{}, 27 30 } ··· 32 35 ctx = tlog.IntoContext(ctx, ix.logger) 33 36 ix.Issues.Init(ctx, db) 34 37 ix.Pulls.Init(ctx, db) 38 + ix.Repos.Init(ctx, db) 35 39 return nil 36 40 }
+9
appview/indexer/notifier.go
··· 73 73 l.Error("failed to index a pr", "err", err) 74 74 } 75 75 } 76 + 77 + func (ix *Indexer) NewRepo(ctx context.Context, repo *models.Repo) { 78 + l := log.FromContext(ctx).With("notifier", "indexer", "repo", repo) 79 + l.Debug("indexing new repo") 80 + err := ix.Repos.Index(ctx, *repo) 81 + if err != nil { 82 + l.Error("failed to index a repo", "err", err) 83 + } 84 + }
+375
appview/indexer/repos/indexer.go
··· 1 + // heavily inspired by gitea's model (basically copy-pasted) 2 + package repos_indexer 3 + 4 + import ( 5 + "context" 6 + "errors" 7 + "log" 8 + "os" 9 + 10 + "github.com/blevesearch/bleve/v2" 11 + "github.com/blevesearch/bleve/v2/analysis/analyzer/custom" 12 + "github.com/blevesearch/bleve/v2/analysis/token/camelcase" 13 + "github.com/blevesearch/bleve/v2/analysis/token/lowercase" 14 + "github.com/blevesearch/bleve/v2/analysis/token/ngram" 15 + "github.com/blevesearch/bleve/v2/analysis/token/unicodenorm" 16 + "github.com/blevesearch/bleve/v2/analysis/tokenizer/unicode" 17 + "github.com/blevesearch/bleve/v2/index/upsidedown" 18 + "github.com/blevesearch/bleve/v2/mapping" 19 + "github.com/blevesearch/bleve/v2/search/query" 20 + "tangled.org/core/appview/db" 21 + "tangled.org/core/appview/indexer/base36" 22 + bleveutil "tangled.org/core/appview/indexer/bleve" 23 + "tangled.org/core/appview/models" 24 + "tangled.org/core/appview/pagination" 25 + tlog "tangled.org/core/log" 26 + ) 27 + 28 + const ( 29 + repoIndexerAnalyzer = "repoIndexer" 30 + repoIndexerDocType = "repoIndexerDocType" 31 + 32 + unicodeNormalizeName = "unicodeNormalize" 33 + 34 + // Bump this when the index mapping changes to trigger a rebuild. 35 + repoIndexerVersion = 5 36 + ) 37 + 38 + type Indexer struct { 39 + indexer bleve.Index 40 + path string 41 + } 42 + 43 + func NewIndexer(indexDir string) *Indexer { 44 + return &Indexer{ 45 + path: indexDir, 46 + } 47 + } 48 + 49 + // Init initializes the indexer 50 + func (ix *Indexer) Init(ctx context.Context, e db.Execer) { 51 + l := tlog.FromContext(ctx) 52 + existed, err := ix.intialize(ctx) 53 + if err != nil { 54 + log.Fatalln("failed to initialize repo indexer", err) 55 + } 56 + if !existed { 57 + l.Debug("Populating the repo indexer") 58 + err := PopulateIndexer(ctx, ix, e) 59 + if err != nil { 60 + log.Fatalln("failed to populate repo indexer", err) 61 + } 62 + } 63 + 64 + count, _ := ix.indexer.DocCount() 65 + l.Info("Initialized the repo indexer", "docCount", count) 66 + } 67 + 68 + func generateRepoIndexMapping() (mapping.IndexMapping, error) { 69 + mapping := bleve.NewIndexMapping() 70 + docMapping := bleve.NewDocumentMapping() 71 + 72 + textFieldMapping := bleve.NewTextFieldMapping() 73 + textFieldMapping.Store = false 74 + textFieldMapping.IncludeInAll = false 75 + 76 + keywordFieldMapping := bleve.NewKeywordFieldMapping() 77 + keywordFieldMapping.Store = false 78 + keywordFieldMapping.IncludeInAll = false 79 + 80 + // case-insensitive keyword field for language and topics 81 + caseInsensitiveKeywordMapping := bleve.NewTextFieldMapping() 82 + caseInsensitiveKeywordMapping.Store = false 83 + caseInsensitiveKeywordMapping.IncludeInAll = false 84 + caseInsensitiveKeywordMapping.Analyzer = "keyword_lowercase" 85 + 86 + // trigram field for partial repo name matching 87 + trigramFieldMapping := bleve.NewTextFieldMapping() 88 + trigramFieldMapping.Store = false 89 + trigramFieldMapping.IncludeInAll = false 90 + trigramFieldMapping.Analyzer = "trigram" 91 + 92 + // text fields 93 + docMapping.AddFieldMappingsAt("name", textFieldMapping) 94 + docMapping.AddFieldMappingsAt("name_trigram", trigramFieldMapping) 95 + docMapping.AddFieldMappingsAt("description", textFieldMapping) 96 + docMapping.AddFieldMappingsAt("website", textFieldMapping) 97 + docMapping.AddFieldMappingsAt("topics", textFieldMapping) 98 + 99 + // keyword fields 100 + docMapping.AddFieldMappingsAt("language", caseInsensitiveKeywordMapping) 101 + docMapping.AddFieldMappingsAt("topics_exact", caseInsensitiveKeywordMapping) 102 + docMapping.AddFieldMappingsAt("did", keywordFieldMapping) 103 + docMapping.AddFieldMappingsAt("knot", keywordFieldMapping) 104 + docMapping.AddFieldMappingsAt("repo_at", keywordFieldMapping) 105 + 106 + err := mapping.AddCustomTokenFilter(unicodeNormalizeName, map[string]any{ 107 + "type": unicodenorm.Name, 108 + "form": unicodenorm.NFC, 109 + }) 110 + if err != nil { 111 + return nil, err 112 + } 113 + 114 + err = mapping.AddCustomTokenFilter("edgeNgram3", map[string]any{ 115 + "type": ngram.Name, 116 + "min": 2.0, 117 + "max": 3.0, 118 + }) 119 + if err != nil { 120 + return nil, err 121 + } 122 + 123 + err = mapping.AddCustomAnalyzer(repoIndexerAnalyzer, map[string]any{ 124 + "type": custom.Name, 125 + "char_filters": []string{}, 126 + "tokenizer": unicode.Name, 127 + "token_filters": []string{unicodeNormalizeName, camelcase.Name, lowercase.Name}, 128 + }) 129 + if err != nil { 130 + return nil, err 131 + } 132 + 133 + err = mapping.AddCustomAnalyzer("keyword_lowercase", map[string]any{ 134 + "type": custom.Name, 135 + "char_filters": []string{}, 136 + "tokenizer": "single", 137 + "token_filters": []string{lowercase.Name}, 138 + }) 139 + if err != nil { 140 + return nil, err 141 + } 142 + 143 + err = mapping.AddCustomAnalyzer("trigram", map[string]any{ 144 + "type": custom.Name, 145 + "char_filters": []string{}, 146 + "tokenizer": "single", 147 + "token_filters": []string{lowercase.Name, "edgeNgram3"}, 148 + }) 149 + if err != nil { 150 + return nil, err 151 + } 152 + 153 + mapping.DefaultAnalyzer = repoIndexerAnalyzer 154 + mapping.AddDocumentMapping(repoIndexerDocType, docMapping) 155 + mapping.AddDocumentMapping("_all", bleve.NewDocumentDisabledMapping()) 156 + mapping.DefaultMapping = bleve.NewDocumentDisabledMapping() 157 + 158 + return mapping, nil 159 + } 160 + 161 + func (ix *Indexer) intialize(ctx context.Context) (bool, error) { 162 + if ix.indexer != nil { 163 + return false, errors.New("indexer is already initialized") 164 + } 165 + 166 + indexer, err := openIndexer(ctx, ix.path, repoIndexerVersion) 167 + if err != nil { 168 + return false, err 169 + } 170 + if indexer != nil { 171 + ix.indexer = indexer 172 + return true, nil 173 + } 174 + 175 + mapping, err := generateRepoIndexMapping() 176 + if err != nil { 177 + return false, err 178 + } 179 + indexer, err = bleve.New(ix.path, mapping) 180 + if err != nil { 181 + return false, err 182 + } 183 + indexer.SetInternal([]byte("mapping_version"), []byte{byte(repoIndexerVersion)}) 184 + 185 + ix.indexer = indexer 186 + 187 + return false, nil 188 + } 189 + 190 + func openIndexer(ctx context.Context, path string, version int) (bleve.Index, error) { 191 + l := tlog.FromContext(ctx) 192 + indexer, err := bleve.Open(path) 193 + if err != nil { 194 + if errors.Is(err, upsidedown.IncompatibleVersion) { 195 + l.Info("Indexer was built with a previous version of bleve, deleting and rebuilding") 196 + return nil, os.RemoveAll(path) 197 + } 198 + return nil, nil 199 + } 200 + 201 + storedVersion, _ := indexer.GetInternal([]byte("mapping_version")) 202 + if storedVersion == nil || int(storedVersion[0]) != version { 203 + l.Info("Indexer mapping version changed, deleting and rebuilding") 204 + indexer.Close() 205 + return nil, os.RemoveAll(path) 206 + } 207 + 208 + return indexer, nil 209 + } 210 + 211 + func PopulateIndexer(ctx context.Context, ix *Indexer, e db.Execer) error { 212 + l := tlog.FromContext(ctx) 213 + count := 0 214 + 215 + err := pagination.IterateAll( 216 + func(page pagination.Page) ([]models.Repo, error) { 217 + return db.GetReposPaginated(e, page) 218 + }, 219 + func(repos []models.Repo) error { 220 + count += len(repos) 221 + return ix.Index(ctx, repos...) 222 + }, 223 + ) 224 + 225 + l.Info("repos indexed", "count", count) 226 + return err 227 + } 228 + 229 + type repoData struct { 230 + ID int64 `json:"id"` 231 + RepoAt string `json:"repo_at"` 232 + Did string `json:"did"` 233 + Name string `json:"name"` 234 + NameTrigram string `json:"name_trigram"` 235 + Description string `json:"description"` 236 + Website string `json:"website"` 237 + Topics []string `json:"topics"` 238 + TopicsExact []string `json:"topics_exact"` 239 + Knot string `json:"knot"` 240 + Language string `json:"language"` 241 + } 242 + 243 + func makeRepoData(repo *models.Repo) *repoData { 244 + var language string 245 + if repo.RepoStats != nil { 246 + language = repo.RepoStats.Language 247 + } 248 + return &repoData{ 249 + ID: repo.Id, 250 + RepoAt: repo.RepoAt().String(), 251 + Did: repo.Did, 252 + Name: repo.Name, 253 + NameTrigram: repo.Name, 254 + Description: repo.Description, 255 + Website: repo.Website, 256 + Topics: repo.Topics, 257 + TopicsExact: repo.Topics, 258 + Knot: repo.Knot, 259 + Language: language, 260 + } 261 + } 262 + 263 + // Type returns the document type, for bleve's mapping.Classifier interface. 264 + func (r *repoData) Type() string { 265 + return repoIndexerDocType 266 + } 267 + 268 + type SearchResult struct { 269 + Hits []int64 270 + Total uint64 271 + } 272 + 273 + const maxBatchSize = 20 274 + 275 + func (ix *Indexer) Index(ctx context.Context, repos ...models.Repo) error { 276 + batch := bleveutil.NewFlushingBatch(ix.indexer, maxBatchSize) 277 + for _, repo := range repos { 278 + repoData := makeRepoData(&repo) 279 + if err := batch.Index(base36.Encode(repo.Id), repoData); err != nil { 280 + return err 281 + } 282 + } 283 + return batch.Flush() 284 + } 285 + 286 + func (ix *Indexer) Delete(ctx context.Context, repoID int64) error { 287 + return ix.indexer.Delete(base36.Encode(repoID)) 288 + } 289 + 290 + func (ix *Indexer) Search(ctx context.Context, opts models.RepoSearchOptions) (*SearchResult, error) { 291 + var musts []query.Query 292 + var mustNots []query.Query 293 + 294 + for _, keyword := range opts.Keywords { 295 + musts = append(musts, bleve.NewDisjunctionQuery( 296 + bleveutil.MatchAndQuery("name", keyword, repoIndexerAnalyzer, 0), 297 + bleveutil.MatchAndQuery("name_trigram", keyword, "trigram", 0), 298 + bleveutil.MatchAndQuery("description", keyword, repoIndexerAnalyzer, 0), 299 + bleveutil.MatchAndQuery("website", keyword, repoIndexerAnalyzer, 0), 300 + bleveutil.MatchAndQuery("topics", keyword, repoIndexerAnalyzer, 0), 301 + )) 302 + } 303 + 304 + for _, phrase := range opts.Phrases { 305 + musts = append(musts, bleve.NewDisjunctionQuery( 306 + bleveutil.MatchPhraseQuery("name", phrase, repoIndexerAnalyzer), 307 + bleveutil.MatchPhraseQuery("description", phrase, repoIndexerAnalyzer), 308 + bleveutil.MatchPhraseQuery("website", phrase, repoIndexerAnalyzer), 309 + bleveutil.MatchPhraseQuery("topics", phrase, repoIndexerAnalyzer), 310 + )) 311 + } 312 + 313 + for _, keyword := range opts.NegatedKeywords { 314 + mustNots = append(mustNots, bleve.NewDisjunctionQuery( 315 + bleveutil.MatchAndQuery("name", keyword, repoIndexerAnalyzer, 0), 316 + bleveutil.MatchAndQuery("description", keyword, repoIndexerAnalyzer, 0), 317 + bleveutil.MatchAndQuery("website", keyword, repoIndexerAnalyzer, 0), 318 + bleveutil.MatchAndQuery("topics", keyword, repoIndexerAnalyzer, 0), 319 + )) 320 + } 321 + 322 + for _, phrase := range opts.NegatedPhrases { 323 + mustNots = append(mustNots, bleve.NewDisjunctionQuery( 324 + bleveutil.MatchPhraseQuery("name", phrase, repoIndexerAnalyzer), 325 + bleveutil.MatchPhraseQuery("description", phrase, repoIndexerAnalyzer), 326 + bleveutil.MatchPhraseQuery("website", phrase, repoIndexerAnalyzer), 327 + bleveutil.MatchPhraseQuery("topics", phrase, repoIndexerAnalyzer), 328 + )) 329 + } 330 + 331 + // keyword filters 332 + if opts.Language != "" { 333 + musts = append(musts, bleveutil.MatchAndQuery("language", opts.Language, "keyword_lowercase", 0)) 334 + } 335 + 336 + if opts.Knot != "" { 337 + musts = append(musts, bleveutil.KeywordFieldQuery("knot", opts.Knot)) 338 + } 339 + 340 + if opts.Did != "" { 341 + musts = append(musts, bleveutil.KeywordFieldQuery("did", opts.Did)) 342 + } 343 + 344 + for _, topic := range opts.Topics { 345 + musts = append(musts, bleveutil.MatchAndQuery("topics_exact", topic, "keyword_lowercase", 0)) 346 + } 347 + 348 + for _, topic := range opts.NegatedTopics { 349 + mustNots = append(mustNots, bleveutil.MatchAndQuery("topics_exact", topic, "keyword_lowercase", 0)) 350 + } 351 + 352 + indexerQuery := bleve.NewBooleanQuery() 353 + if len(musts) == 0 { 354 + musts = append(musts, bleve.NewMatchAllQuery()) 355 + } 356 + indexerQuery.AddMust(musts...) 357 + indexerQuery.AddMustNot(mustNots...) 358 + searchReq := bleve.NewSearchRequestOptions(indexerQuery, opts.Page.Limit, opts.Page.Offset, false) 359 + res, err := ix.indexer.SearchInContext(ctx, searchReq) 360 + if err != nil { 361 + return nil, nil 362 + } 363 + ret := &SearchResult{ 364 + Total: res.Total, 365 + Hits: make([]int64, len(res.Hits)), 366 + } 367 + for i, hit := range res.Hits { 368 + id, err := base36.Decode(hit.ID) 369 + if err != nil { 370 + return nil, err 371 + } 372 + ret.Hits[i] = id 373 + } 374 + return ret, nil 375 + }
+639
appview/indexer/repos/indexer_test.go
··· 1 + package repos_indexer 2 + 3 + import ( 4 + "context" 5 + "os" 6 + "testing" 7 + 8 + "github.com/blevesearch/bleve/v2" 9 + "github.com/stretchr/testify/assert" 10 + "github.com/stretchr/testify/require" 11 + "tangled.org/core/appview/models" 12 + "tangled.org/core/appview/pagination" 13 + ) 14 + 15 + func setupTestIndexer(t *testing.T) (*Indexer, func()) { 16 + t.Helper() 17 + 18 + tmpDir, err := os.MkdirTemp("", "repo_indexer_test") 19 + require.NoError(t, err) 20 + 21 + ix := NewIndexer(tmpDir) 22 + 23 + mapping, err := generateRepoIndexMapping() 24 + require.NoError(t, err) 25 + 26 + indexer, err := bleve.New(tmpDir, mapping) 27 + require.NoError(t, err) 28 + ix.indexer = indexer 29 + 30 + cleanup := func() { 31 + ix.indexer.Close() 32 + os.RemoveAll(tmpDir) 33 + } 34 + 35 + return ix, cleanup 36 + } 37 + 38 + func TestBasicIndexingAndSearch(t *testing.T) { 39 + ix, cleanup := setupTestIndexer(t) 40 + defer cleanup() 41 + 42 + ctx := context.Background() 43 + 44 + err := ix.Index(ctx, 45 + models.Repo{ 46 + Id: 1, 47 + Did: "did:plc:alice", 48 + Name: "web-framework", 49 + Knot: "example.com", 50 + Description: "A modern web framework for Go", 51 + Website: "https://example.com/web-framework", 52 + Topics: []string{"web", "framework", "golang"}, 53 + RepoStats: &models.RepoStats{Language: "Go"}, 54 + }, 55 + models.Repo{ 56 + Id: 2, 57 + Did: "did:plc:bob", 58 + Name: "cli-tool", 59 + Knot: "example.com", 60 + Description: "Command line utility for developers", 61 + Website: "", 62 + Topics: []string{"cli", "tool"}, 63 + RepoStats: &models.RepoStats{Language: "Rust"}, 64 + }, 65 + models.Repo{ 66 + Id: 3, 67 + Did: "did:plc:alice", 68 + Name: "javascript-parser", 69 + Knot: "example.com", 70 + Description: "Fast JavaScript parser", 71 + Website: "", 72 + Topics: []string{"javascript", "parser"}, 73 + RepoStats: &models.RepoStats{Language: "JavaScript"}, 74 + }, 75 + ) 76 + require.NoError(t, err) 77 + 78 + // search by name 79 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 80 + Keywords: []string{"framework"}, 81 + Page: pagination.Page{Limit: 10}, 82 + }) 83 + require.NoError(t, err) 84 + assert.Equal(t, uint64(1), result.Total) 85 + assert.Contains(t, result.Hits, int64(1)) 86 + 87 + // search by description 88 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 89 + Keywords: []string{"utility"}, 90 + Page: pagination.Page{Limit: 10}, 91 + }) 92 + require.NoError(t, err) 93 + assert.Equal(t, uint64(1), result.Total) 94 + assert.Contains(t, result.Hits, int64(2)) 95 + 96 + // search by website 97 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 98 + Keywords: []string{"example.com/web-framework"}, 99 + Page: pagination.Page{Limit: 10}, 100 + }) 101 + require.NoError(t, err) 102 + assert.Equal(t, uint64(1), result.Total) 103 + assert.Contains(t, result.Hits, int64(1)) 104 + } 105 + 106 + func TestLanguageFiltering(t *testing.T) { 107 + ix, cleanup := setupTestIndexer(t) 108 + defer cleanup() 109 + 110 + ctx := context.Background() 111 + 112 + err := ix.Index(ctx, 113 + models.Repo{ 114 + Id: 1, 115 + Did: "did:plc:alice", 116 + Name: "go-project", 117 + RepoStats: &models.RepoStats{Language: "Go"}, 118 + }, 119 + models.Repo{ 120 + Id: 2, 121 + Did: "did:plc:bob", 122 + Name: "rust-project", 123 + RepoStats: &models.RepoStats{Language: "Rust"}, 124 + }, 125 + models.Repo{ 126 + Id: 3, 127 + Did: "did:plc:alice", 128 + Name: "another-go-project", 129 + RepoStats: &models.RepoStats{Language: "Go"}, 130 + }, 131 + ) 132 + require.NoError(t, err) 133 + 134 + // filter by go language 135 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 136 + Language: "Go", 137 + Page: pagination.Page{Limit: 10}, 138 + }) 139 + require.NoError(t, err) 140 + assert.Equal(t, uint64(2), result.Total) 141 + assert.Contains(t, result.Hits, int64(1)) 142 + assert.Contains(t, result.Hits, int64(3)) 143 + 144 + // filter by rust language 145 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 146 + Language: "Rust", 147 + Page: pagination.Page{Limit: 10}, 148 + }) 149 + require.NoError(t, err) 150 + assert.Equal(t, uint64(1), result.Total) 151 + assert.Contains(t, result.Hits, int64(2)) 152 + } 153 + 154 + func TestTopicExactMatching(t *testing.T) { 155 + ix, cleanup := setupTestIndexer(t) 156 + defer cleanup() 157 + 158 + ctx := context.Background() 159 + 160 + err := ix.Index(ctx, 161 + models.Repo{ 162 + Id: 1, 163 + Did: "did:plc:alice", 164 + Name: "js-tool", 165 + Topics: []string{"javascript", "tool"}, 166 + RepoStats: &models.RepoStats{}, 167 + }, 168 + models.Repo{ 169 + Id: 2, 170 + Did: "did:plc:bob", 171 + Name: "java-app", 172 + Topics: []string{"java", "application"}, 173 + RepoStats: &models.RepoStats{}, 174 + }, 175 + models.Repo{ 176 + Id: 3, 177 + Did: "did:plc:alice", 178 + Name: "cli-tool", 179 + Topics: []string{"cli", "tool"}, 180 + RepoStats: &models.RepoStats{}, 181 + }, 182 + ) 183 + require.NoError(t, err) 184 + 185 + // exact match for "javascript" topic 186 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 187 + Topics: []string{"javascript"}, 188 + Page: pagination.Page{Limit: 10}, 189 + }) 190 + require.NoError(t, err) 191 + assert.Equal(t, uint64(1), result.Total) 192 + assert.Contains(t, result.Hits, int64(1)) 193 + 194 + // exact match for "tool" topic (should match repos 1 and 3) 195 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 196 + Topics: []string{"tool"}, 197 + Page: pagination.Page{Limit: 10}, 198 + }) 199 + require.NoError(t, err) 200 + assert.Equal(t, uint64(2), result.Total) 201 + assert.Contains(t, result.Hits, int64(1)) 202 + assert.Contains(t, result.Hits, int64(3)) 203 + } 204 + 205 + func TestTopicTextSearch(t *testing.T) { 206 + ix, cleanup := setupTestIndexer(t) 207 + defer cleanup() 208 + 209 + ctx := context.Background() 210 + 211 + err := ix.Index(ctx, 212 + models.Repo{ 213 + Id: 1, 214 + Did: "did:plc:alice", 215 + Name: "js-tool", 216 + Topics: []string{"JavaScript"}, 217 + RepoStats: &models.RepoStats{}, 218 + }, 219 + models.Repo{ 220 + Id: 2, 221 + Did: "did:plc:bob", 222 + Name: "java-app", 223 + Topics: []string{"Java"}, 224 + RepoStats: &models.RepoStats{}, 225 + }, 226 + ) 227 + require.NoError(t, err) 228 + 229 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 230 + Keywords: []string{"Java"}, 231 + Page: pagination.Page{Limit: 10}, 232 + }) 233 + require.NoError(t, err) 234 + assert.Equal(t, uint64(2), result.Total) 235 + assert.Contains(t, result.Hits, int64(1)) 236 + assert.Contains(t, result.Hits, int64(2)) 237 + } 238 + 239 + func TestNegatedFilters(t *testing.T) { 240 + ix, cleanup := setupTestIndexer(t) 241 + defer cleanup() 242 + 243 + ctx := context.Background() 244 + 245 + err := ix.Index(ctx, 246 + models.Repo{ 247 + Id: 1, 248 + Did: "did:plc:alice", 249 + Name: "active-project", 250 + Description: "An active development project", 251 + Topics: []string{"active"}, 252 + RepoStats: &models.RepoStats{Language: "Go"}, 253 + }, 254 + models.Repo{ 255 + Id: 2, 256 + Did: "did:plc:bob", 257 + Name: "archived-project", 258 + Description: "An archived project", 259 + Topics: []string{"archived"}, 260 + RepoStats: &models.RepoStats{Language: "Python"}, 261 + }, 262 + models.Repo{ 263 + Id: 3, 264 + Did: "did:plc:alice", 265 + Name: "another-project", 266 + Description: "Another active project", 267 + Topics: []string{"active"}, 268 + RepoStats: &models.RepoStats{Language: "Go"}, 269 + }, 270 + ) 271 + require.NoError(t, err) 272 + 273 + // exclude archived topic 274 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 275 + NegatedTopics: []string{"archived"}, 276 + Page: pagination.Page{Limit: 10}, 277 + }) 278 + require.NoError(t, err) 279 + assert.Equal(t, uint64(2), result.Total) 280 + assert.Contains(t, result.Hits, int64(1)) 281 + assert.Contains(t, result.Hits, int64(3)) 282 + 283 + // exclude keyword "archived" 284 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 285 + NegatedKeywords: []string{"archived"}, 286 + Page: pagination.Page{Limit: 10}, 287 + }) 288 + require.NoError(t, err) 289 + assert.Equal(t, uint64(2), result.Total) 290 + assert.Contains(t, result.Hits, int64(1)) 291 + assert.Contains(t, result.Hits, int64(3)) 292 + 293 + // exclude phrase 294 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 295 + NegatedPhrases: []string{"archived project"}, 296 + Page: pagination.Page{Limit: 10}, 297 + }) 298 + require.NoError(t, err) 299 + assert.Equal(t, uint64(2), result.Total) 300 + assert.Contains(t, result.Hits, int64(1)) 301 + assert.Contains(t, result.Hits, int64(3)) 302 + } 303 + 304 + func TestPagination(t *testing.T) { 305 + ix, cleanup := setupTestIndexer(t) 306 + defer cleanup() 307 + 308 + ctx := context.Background() 309 + 310 + // index multiple repos 311 + var repos []models.Repo 312 + for i := 1; i <= 25; i++ { 313 + repos = append(repos, models.Repo{ 314 + Id: int64(i), 315 + Did: "did:plc:alice", 316 + Name: "project", 317 + Topics: []string{"test"}, 318 + RepoStats: &models.RepoStats{}, 319 + }) 320 + } 321 + err := ix.Index(ctx, repos...) 322 + require.NoError(t, err) 323 + 324 + // first page 325 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 326 + Topics: []string{"test"}, 327 + Page: pagination.Page{Limit: 10, Offset: 0}, 328 + }) 329 + require.NoError(t, err) 330 + assert.Equal(t, uint64(25), result.Total) 331 + assert.Len(t, result.Hits, 10) 332 + 333 + // second page 334 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 335 + Topics: []string{"test"}, 336 + Page: pagination.Page{Limit: 10, Offset: 10}, 337 + }) 338 + require.NoError(t, err) 339 + assert.Equal(t, uint64(25), result.Total) 340 + assert.Len(t, result.Hits, 10) 341 + 342 + // third page - 5 items 343 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 344 + Topics: []string{"test"}, 345 + Page: pagination.Page{Limit: 10, Offset: 20}, 346 + }) 347 + require.NoError(t, err) 348 + assert.Equal(t, uint64(25), result.Total) 349 + assert.Len(t, result.Hits, 5) 350 + } 351 + 352 + func TestUpdateReindex(t *testing.T) { 353 + ix, cleanup := setupTestIndexer(t) 354 + defer cleanup() 355 + 356 + ctx := context.Background() 357 + 358 + // initial index 359 + err := ix.Index(ctx, models.Repo{ 360 + Id: 1, 361 + Did: "did:plc:alice", 362 + Name: "my-project", 363 + Description: "Initial description", 364 + Topics: []string{"initial"}, 365 + RepoStats: &models.RepoStats{Language: "Go"}, 366 + }) 367 + require.NoError(t, err) 368 + 369 + // search for initial state 370 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 371 + Keywords: []string{"Initial"}, 372 + Page: pagination.Page{Limit: 10}, 373 + }) 374 + require.NoError(t, err) 375 + assert.Equal(t, uint64(1), result.Total) 376 + 377 + // update the repo 378 + err = ix.Index(ctx, models.Repo{ 379 + Id: 1, 380 + Did: "did:plc:alice", 381 + Name: "my-project", 382 + Description: "Updated description", 383 + Topics: []string{"updated"}, 384 + RepoStats: &models.RepoStats{Language: "Rust"}, 385 + }) 386 + require.NoError(t, err) 387 + 388 + // search for old description should return nothing 389 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 390 + Keywords: []string{"Initial"}, 391 + Page: pagination.Page{Limit: 10}, 392 + }) 393 + require.NoError(t, err) 394 + assert.Equal(t, uint64(0), result.Total) 395 + 396 + // search for new description should work 397 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 398 + Keywords: []string{"Updated"}, 399 + Page: pagination.Page{Limit: 10}, 400 + }) 401 + require.NoError(t, err) 402 + assert.Equal(t, uint64(1), result.Total) 403 + 404 + // language should be updated 405 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 406 + Language: "Rust", 407 + Page: pagination.Page{Limit: 10}, 408 + }) 409 + require.NoError(t, err) 410 + assert.Equal(t, uint64(1), result.Total) 411 + } 412 + 413 + func TestEmptyResults(t *testing.T) { 414 + ix, cleanup := setupTestIndexer(t) 415 + defer cleanup() 416 + 417 + ctx := context.Background() 418 + 419 + err := ix.Index(ctx, models.Repo{ 420 + Id: 1, 421 + Did: "did:plc:alice", 422 + Name: "my-project", 423 + RepoStats: &models.RepoStats{}, 424 + }) 425 + require.NoError(t, err) 426 + 427 + // search for non-existent keyword 428 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 429 + Keywords: []string{"nonexistent"}, 430 + Page: pagination.Page{Limit: 10}, 431 + }) 432 + require.NoError(t, err) 433 + assert.Equal(t, uint64(0), result.Total) 434 + assert.Empty(t, result.Hits) 435 + 436 + // search for non-existent language 437 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 438 + Language: "NonexistentLanguage", 439 + Page: pagination.Page{Limit: 10}, 440 + }) 441 + require.NoError(t, err) 442 + assert.Equal(t, uint64(0), result.Total) 443 + assert.Empty(t, result.Hits) 444 + } 445 + 446 + func TestCombinedFilters(t *testing.T) { 447 + ix, cleanup := setupTestIndexer(t) 448 + defer cleanup() 449 + 450 + ctx := context.Background() 451 + 452 + err := ix.Index(ctx, 453 + models.Repo{ 454 + Id: 1, 455 + Did: "did:plc:alice", 456 + Name: "web-server", 457 + Knot: "example.com", 458 + Description: "A web server in Go", 459 + Topics: []string{"web", "server"}, 460 + RepoStats: &models.RepoStats{Language: "Go"}, 461 + }, 462 + models.Repo{ 463 + Id: 2, 464 + Did: "did:plc:bob", 465 + Name: "web-client", 466 + Knot: "example.org", 467 + Description: "A web client in Rust", 468 + Topics: []string{"web", "client"}, 469 + RepoStats: &models.RepoStats{Language: "Rust"}, 470 + }, 471 + models.Repo{ 472 + Id: 3, 473 + Did: "did:plc:alice", 474 + Name: "cli-tool", 475 + Knot: "example.com", 476 + Description: "A CLI tool in Go", 477 + Topics: []string{"cli", "tool"}, 478 + RepoStats: &models.RepoStats{Language: "Go"}, 479 + }, 480 + ) 481 + require.NoError(t, err) 482 + 483 + // combine language + topic + keyword 484 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 485 + Language: "Go", 486 + Topics: []string{"web"}, 487 + Keywords: []string{"server"}, 488 + Page: pagination.Page{Limit: 10}, 489 + }) 490 + require.NoError(t, err) 491 + assert.Equal(t, uint64(1), result.Total) 492 + assert.Contains(t, result.Hits, int64(1)) 493 + 494 + // combine did + language 495 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 496 + Did: "did:plc:alice", 497 + Language: "Go", 498 + Page: pagination.Page{Limit: 10}, 499 + }) 500 + require.NoError(t, err) 501 + assert.Equal(t, uint64(2), result.Total) 502 + assert.Contains(t, result.Hits, int64(1)) 503 + assert.Contains(t, result.Hits, int64(3)) 504 + 505 + // combine knot + language 506 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 507 + Knot: "example.com", 508 + Language: "Go", 509 + Page: pagination.Page{Limit: 10}, 510 + }) 511 + require.NoError(t, err) 512 + assert.Equal(t, uint64(2), result.Total) 513 + assert.Contains(t, result.Hits, int64(1)) 514 + assert.Contains(t, result.Hits, int64(3)) 515 + } 516 + 517 + func TestRepoWithoutLanguage(t *testing.T) { 518 + ix, cleanup := setupTestIndexer(t) 519 + defer cleanup() 520 + 521 + ctx := context.Background() 522 + 523 + err := ix.Index(ctx, 524 + models.Repo{ 525 + Id: 1, 526 + Did: "did:plc:alice", 527 + Name: "project-with-language", 528 + RepoStats: &models.RepoStats{Language: "Go"}, 529 + }, 530 + models.Repo{ 531 + Id: 2, 532 + Did: "did:plc:bob", 533 + Name: "project-without-language", 534 + RepoStats: &models.RepoStats{Language: ""}, 535 + }, 536 + ) 537 + require.NoError(t, err) 538 + 539 + // search without language filter should return both 540 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 541 + Keywords: []string{"project"}, 542 + Page: pagination.Page{Limit: 10}, 543 + }) 544 + require.NoError(t, err) 545 + assert.Equal(t, uint64(2), result.Total) 546 + 547 + // language filter should only return repo with language 548 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 549 + Language: "Go", 550 + Page: pagination.Page{Limit: 10}, 551 + }) 552 + require.NoError(t, err) 553 + assert.Equal(t, uint64(1), result.Total) 554 + assert.Contains(t, result.Hits, int64(1)) 555 + } 556 + 557 + func TestRepoWithoutTopics(t *testing.T) { 558 + ix, cleanup := setupTestIndexer(t) 559 + defer cleanup() 560 + 561 + ctx := context.Background() 562 + 563 + err := ix.Index(ctx, 564 + models.Repo{ 565 + Id: 1, 566 + Did: "did:plc:alice", 567 + Name: "project-with-topics", 568 + Topics: []string{"cli", "tool"}, 569 + RepoStats: &models.RepoStats{}, 570 + }, 571 + models.Repo{ 572 + Id: 2, 573 + Did: "did:plc:bob", 574 + Name: "project-without-topics", 575 + Topics: []string{}, 576 + RepoStats: &models.RepoStats{}, 577 + }, 578 + ) 579 + require.NoError(t, err) 580 + 581 + // topic filter should only return repo with topics 582 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 583 + Topics: []string{"cli"}, 584 + Page: pagination.Page{Limit: 10}, 585 + }) 586 + require.NoError(t, err) 587 + assert.Equal(t, uint64(1), result.Total) 588 + assert.Contains(t, result.Hits, int64(1)) 589 + 590 + // general search should return both 591 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 592 + Keywords: []string{"project"}, 593 + Page: pagination.Page{Limit: 10}, 594 + }) 595 + require.NoError(t, err) 596 + assert.Equal(t, uint64(2), result.Total) 597 + } 598 + 599 + func TestDelete(t *testing.T) { 600 + ix, cleanup := setupTestIndexer(t) 601 + defer cleanup() 602 + 603 + ctx := context.Background() 604 + 605 + err := ix.Index(ctx, 606 + models.Repo{ 607 + Id: 1, 608 + Did: "did:plc:alice", 609 + Name: "to-delete", 610 + RepoStats: &models.RepoStats{}, 611 + }, 612 + models.Repo{ 613 + Id: 2, 614 + Did: "did:plc:bob", 615 + Name: "to-keep", 616 + RepoStats: &models.RepoStats{}, 617 + }, 618 + ) 619 + require.NoError(t, err) 620 + 621 + // verify both exist 622 + result, err := ix.Search(ctx, models.RepoSearchOptions{ 623 + Page: pagination.Page{Limit: 10}, 624 + }) 625 + require.NoError(t, err) 626 + assert.Equal(t, uint64(2), result.Total) 627 + 628 + // delete repo 1 629 + err = ix.Delete(ctx, 1) 630 + require.NoError(t, err) 631 + 632 + // verify only one remains 633 + result, err = ix.Search(ctx, models.RepoSearchOptions{ 634 + Page: pagination.Page{Limit: 10}, 635 + }) 636 + require.NoError(t, err) 637 + assert.Equal(t, uint64(1), result.Total) 638 + assert.Contains(t, result.Hits, int64(2)) 639 + }
+5 -2
appview/issues/issues.go
··· 10 10 "time" 11 11 12 12 comatproto "github.com/bluesky-social/indigo/api/atproto" 13 + "github.com/bluesky-social/indigo/atproto/atclient" 13 - atpclient "github.com/bluesky-social/indigo/atproto/client" 14 14 "github.com/bluesky-social/indigo/atproto/syntax" 15 15 lexutil "github.com/bluesky-social/indigo/lex/util" 16 16 "github.com/go-chi/chi/v5" ··· 30 30 "tangled.org/core/appview/searchquery" 31 31 "tangled.org/core/appview/validator" 32 32 "tangled.org/core/idresolver" 33 + "tangled.org/core/ogre" 33 34 "tangled.org/core/orm" 34 35 "tangled.org/core/rbac" 35 36 "tangled.org/core/tid" ··· 48 49 logger *slog.Logger 49 50 validator *validator.Validator 50 51 indexer *issues_indexer.Indexer 52 + ogreClient *ogre.Client 51 53 } 52 54 53 55 func New( ··· 77 79 logger: logger, 78 80 validator: validator, 79 81 indexer: indexer, 82 + ogreClient: ogre.NewClient(config.Ogre.Host), 80 83 } 81 84 } 82 85 ··· 1098 1101 // this is used to rollback changes made to the PDS 1099 1102 // 1100 1103 // it is a no-op if the provided ATURI is empty 1104 + func rollbackRecord(ctx context.Context, aturi string, client *atclient.APIClient) error { 1101 - func rollbackRecord(ctx context.Context, aturi string, client *atpclient.APIClient) error { 1102 1105 if aturi == "" { 1103 1106 return nil 1104 1107 }
+36 -230
appview/issues/opengraph.go
··· 1 1 package issues 2 2 3 3 import ( 4 - "bytes" 5 4 "context" 6 - "fmt" 7 - "image" 8 - "image/color" 9 - "image/png" 10 5 "log" 11 6 "net/http" 7 + "time" 12 8 13 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/ogre" 14 - "tangled.org/core/appview/ogcard" 15 11 ) 16 12 13 + func (rp *Issues) IssueOpenGraphSummary(w http.ResponseWriter, r *http.Request) { 14 + f, err := rp.repoResolver.Resolve(r) 17 - func (rp *Issues) drawIssueSummaryCard(issue *models.Issue, repo *models.Repo, commentCount int, ownerHandle string) (*ogcard.Card, error) { 18 - width, height := ogcard.DefaultSize() 19 - mainCard, err := ogcard.NewCard(width, height) 20 - if err != nil { 21 - return nil, err 22 - } 23 - 24 - // Split: content area (75%) and status/stats area (25%) 25 - contentCard, statsArea := mainCard.Split(false, 75) 26 - 27 - // Add padding to content 28 - contentCard.SetMargin(50) 29 - 30 - // Split content horizontally: main content (80%) and avatar area (20%) 31 - mainContent, avatarArea := contentCard.Split(true, 80) 32 - 33 - // Add margin to main content like repo card 34 - mainContent.SetMargin(10) 35 - 36 - // Use full main content area for repo name and title 37 - bounds := mainContent.Img.Bounds() 38 - startX := bounds.Min.X + mainContent.Margin 39 - startY := bounds.Min.Y + mainContent.Margin 40 - 41 - // Draw full repository name at top (owner/repo format) 42 - var repoOwner string 43 - owner, err := rp.idResolver.ResolveIdent(context.Background(), repo.Did) 44 - if err != nil { 45 - repoOwner = repo.Did 46 - } else { 47 - repoOwner = "@" + owner.Handle.String() 48 - } 49 - 50 - fullRepoName := repoOwner + " / " + repo.Name 51 - if len(fullRepoName) > 60 { 52 - fullRepoName = fullRepoName[:60] + "…" 53 - } 54 - 55 - grayColor := color.RGBA{88, 96, 105, 255} 56 - err = mainContent.DrawTextAt(fullRepoName, startX, startY, grayColor, 36, ogcard.Top, ogcard.Left) 57 15 if err != nil { 16 + log.Println("failed to get repo and knot", err) 17 + return 58 - return nil, err 59 - } 60 - 61 - // Draw issue title below repo name with wrapping 62 - titleY := startY + 60 63 - titleX := startX 64 - 65 - // Truncate title if too long 66 - issueTitle := issue.Title 67 - maxTitleLength := 80 68 - if len(issueTitle) > maxTitleLength { 69 - issueTitle = issueTitle[:maxTitleLength] + "…" 70 - } 71 - 72 - // Create a temporary card for the title area to enable wrapping 73 - titleBounds := mainContent.Img.Bounds() 74 - titleWidth := titleBounds.Dx() - (startX - titleBounds.Min.X) - 20 // Leave some margin 75 - titleHeight := titleBounds.Dy() - (titleY - titleBounds.Min.Y) - 100 // Leave space for issue ID 76 - 77 - titleRect := image.Rect(titleX, titleY, titleX+titleWidth, titleY+titleHeight) 78 - titleCard := &ogcard.Card{ 79 - Img: mainContent.Img.SubImage(titleRect).(*image.RGBA), 80 - Font: mainContent.Font, 81 - Margin: 0, 82 18 } 83 19 20 + issue, ok := r.Context().Value("issue").(*models.Issue) 21 + if !ok { 22 + log.Println("issue not found in context") 23 + http.Error(w, "issue not found", http.StatusNotFound) 24 + return 84 - // Draw wrapped title 85 - lines, err := titleCard.DrawText(issueTitle, color.Black, 54, ogcard.Top, ogcard.Left) 86 - if err != nil { 87 - return nil, err 88 25 } 89 26 27 + var ownerHandle string 28 + owner, err := rp.idResolver.ResolveIdent(context.Background(), f.Did) 90 - // Calculate where title ends (number of lines * line height) 91 - lineHeight := 60 // Approximate line height for 54pt font 92 - titleEndY := titleY + (len(lines) * lineHeight) + 10 93 - 94 - // Draw issue ID in gray below the title 95 - issueIdText := fmt.Sprintf("#%d", issue.IssueId) 96 - err = mainContent.DrawTextAt(issueIdText, startX, titleEndY, grayColor, 54, ogcard.Top, ogcard.Left) 97 29 if err != nil { 30 + ownerHandle = f.Did 31 + } else { 32 + ownerHandle = owner.Handle.String() 98 - return nil, err 99 33 } 100 34 101 - // Get issue author handle (needed for avatar and metadata) 102 35 var authorHandle string 103 36 author, err := rp.idResolver.ResolveIdent(context.Background(), issue.Did) 104 37 if err != nil { ··· 107 40 authorHandle = "@" + author.Handle.String() 108 41 } 109 42 43 + avatarUrl := rp.pages.AvatarUrl(authorHandle, "256") 110 - // Draw avatar circle on the right side 111 - avatarBounds := avatarArea.Img.Bounds() 112 - avatarSize := min(avatarBounds.Dx(), avatarBounds.Dy()) - 20 // Leave some margin 113 - if avatarSize > 220 { 114 - avatarSize = 220 115 - } 116 - avatarX := avatarBounds.Min.X + (avatarBounds.Dx() / 2) - (avatarSize / 2) 117 - avatarY := avatarBounds.Min.Y + 20 118 44 45 + status := "closed" 119 - // Get avatar URL for issue author 120 - avatarURL := rp.pages.AvatarUrl(authorHandle, "256") 121 - err = avatarArea.DrawCircularExternalImage(avatarURL, avatarX, avatarY, avatarSize) 122 - if err != nil { 123 - log.Printf("failed to draw avatar (non-fatal): %v", err) 124 - } 125 - 126 - // Split stats area: left side for status/comments (80%), right side for dolly (20%) 127 - statusArea, dollyArea := statsArea.Split(true, 80) 128 - 129 - // Draw status and comment count in status/comments area 130 - statsBounds := statusArea.Img.Bounds() 131 - statsX := statsBounds.Min.X + 60 // left padding 132 - statsY := statsBounds.Min.Y 133 - 134 - iconColor := color.RGBA{88, 96, 105, 255} 135 - iconSize := 36 136 - textSize := 36.0 137 - labelSize := 28.0 138 - iconBaselineOffset := int(textSize) / 2 139 - 140 - // Draw status (open/closed) with colored icon and text 141 - var statusIcon string 142 - var statusText string 143 - var statusColor color.RGBA 144 - 145 46 if issue.Open { 47 + status = "open" 146 - statusIcon = "circle-dot" 147 - statusText = "open" 148 - statusColor = color.RGBA{34, 139, 34, 255} // green 149 - } else { 150 - statusIcon = "ban" 151 - statusText = "closed" 152 - statusColor = color.RGBA{52, 58, 64, 255} // dark gray 153 - } 154 - 155 - statusTextWidth := statusArea.TextWidth(statusText, textSize) 156 - badgePadding := 12 157 - badgeHeight := int(textSize) + (badgePadding * 2) 158 - badgeWidth := iconSize + badgePadding + statusTextWidth + (badgePadding * 2) 159 - cornerRadius := 8 160 - badgeX := 60 161 - badgeY := 0 162 - 163 - statusArea.DrawRoundedRect(badgeX, badgeY, badgeWidth, badgeHeight, cornerRadius, statusColor) 164 - 165 - whiteColor := color.RGBA{255, 255, 255, 255} 166 - iconX := statsX + badgePadding 167 - iconY := statsY + (badgeHeight-iconSize)/2 168 - err = statusArea.DrawLucideIcon(statusIcon, iconX, iconY, iconSize, whiteColor) 169 - if err != nil { 170 - log.Printf("failed to draw status icon: %v", err) 171 - } 172 - 173 - textX := statsX + badgePadding + iconSize + badgePadding 174 - textY := statsY + (badgeHeight-int(textSize))/2 - 5 175 - err = statusArea.DrawTextAt(statusText, textX, textY, whiteColor, textSize, ogcard.Top, ogcard.Left) 176 - if err != nil { 177 - log.Printf("failed to draw status text: %v", err) 178 - } 179 - 180 - currentX := statsX + badgeWidth + 50 181 - 182 - // Draw comment count 183 - err = statusArea.DrawLucideIcon("message-square", currentX, iconY, iconSize, iconColor) 184 - if err != nil { 185 - log.Printf("failed to draw comment icon: %v", err) 186 - } 187 - 188 - currentX += iconSize + 15 189 - commentText := fmt.Sprintf("%d comments", commentCount) 190 - if commentCount == 1 { 191 - commentText = "1 comment" 192 - } 193 - err = statusArea.DrawTextAt(commentText, currentX, textY, iconColor, textSize, ogcard.Top, ogcard.Left) 194 - if err != nil { 195 - log.Printf("failed to draw comment text: %v", err) 196 - } 197 - 198 - // Draw dolly logo on the right side 199 - dollyBounds := dollyArea.Img.Bounds() 200 - dollySize := 90 201 - dollyX := dollyBounds.Min.X + (dollyBounds.Dx() / 2) - (dollySize / 2) 202 - dollyY := statsY + iconBaselineOffset - dollySize/2 + 25 203 - dollyColor := color.RGBA{180, 180, 180, 255} // light gray 204 - err = dollyArea.DrawDolly(dollyX, dollyY, dollySize, dollyColor) 205 - if err != nil { 206 - log.Printf("dolly not available (this is ok): %v", err) 207 - } 208 - 209 - // Draw "opened by @author" and date at the bottom with more spacing 210 - labelY := statsY + iconSize + 30 211 - 212 - // Format the opened date 213 - openedDate := issue.Created.Format("Jan 2, 2006") 214 - metaText := fmt.Sprintf("opened by %s · %s", authorHandle, openedDate) 215 - 216 - err = statusArea.DrawTextAt(metaText, statsX, labelY, iconColor, labelSize, ogcard.Top, ogcard.Left) 217 - if err != nil { 218 - log.Printf("failed to draw metadata: %v", err) 219 - } 220 - 221 - return mainCard, nil 222 - } 223 - 224 - func (rp *Issues) IssueOpenGraphSummary(w http.ResponseWriter, r *http.Request) { 225 - f, err := rp.repoResolver.Resolve(r) 226 - if err != nil { 227 - log.Println("failed to get repo and knot", err) 228 - return 229 48 } 230 49 231 - issue, ok := r.Context().Value("issue").(*models.Issue) 232 - if !ok { 233 - log.Println("issue not found in context") 234 - http.Error(w, "issue not found", http.StatusNotFound) 235 - return 236 - } 237 - 238 - // Get comment count 239 50 commentCount := len(issue.Comments) 240 51 52 + payload := ogre.IssueCardPayload{ 53 + Type: "issue", 54 + RepoName: f.Name, 55 + OwnerHandle: ownerHandle, 56 + AvatarUrl: avatarUrl, 57 + Title: issue.Title, 58 + IssueNumber: issue.IssueId, 59 + Status: status, 60 + Labels: []ogre.LabelData{}, 61 + CommentCount: commentCount, 62 + ReactionCount: 0, 63 + CreatedAt: issue.Created.Format(time.RFC3339), 241 - // Get owner handle for avatar 242 - var ownerHandle string 243 - owner, err := rp.idResolver.ResolveIdent(r.Context(), f.Did) 244 - if err != nil { 245 - ownerHandle = f.Did 246 - } else { 247 - ownerHandle = "@" + owner.Handle.String() 248 - } 249 - 250 - card, err := rp.drawIssueSummaryCard(issue, f, commentCount, ownerHandle) 251 - if err != nil { 252 - log.Println("failed to draw issue summary card", err) 253 - http.Error(w, "failed to draw issue summary card", http.StatusInternalServerError) 254 - return 255 64 } 256 65 66 + imageBytes, err := rp.ogreClient.RenderIssueCard(r.Context(), payload) 257 - var imageBuffer bytes.Buffer 258 - err = png.Encode(&imageBuffer, card.Img) 259 67 if err != nil { 68 + log.Println("failed to render issue card", err) 69 + http.Error(w, "failed to render issue card", http.StatusInternalServerError) 260 - log.Println("failed to encode issue summary card", err) 261 - http.Error(w, "failed to encode issue summary card", http.StatusInternalServerError) 262 70 return 263 71 } 264 - 265 - imageBytes := imageBuffer.Bytes() 266 72 267 73 w.Header().Set("Content-Type", "image/png") 74 + w.Header().Set("Cache-Control", "public, max-age=3600") 268 - w.Header().Set("Cache-Control", "public, max-age=3600") // 1 hour 269 75 w.WriteHeader(http.StatusOK) 270 76 _, err = w.Write(imageBytes) 271 77 if err != nil { 78 + log.Println("failed to write issue card", err) 272 - log.Println("failed to write issue summary card", err) 273 79 return 274 80 } 275 81 }
-1
appview/knots/knots.go
··· 112 112 113 113 repos, err := db.GetRepos( 114 114 k.Db, 115 - 0, 116 115 orm.FilterEq("knot", domain), 117 116 ) 118 117 if err != nil {
+2 -2
appview/labels/labels.go
··· 22 22 "tangled.org/core/tid" 23 23 24 24 comatproto "github.com/bluesky-social/indigo/api/atproto" 25 + "github.com/bluesky-social/indigo/atproto/atclient" 25 - atpclient "github.com/bluesky-social/indigo/atproto/client" 26 26 "github.com/bluesky-social/indigo/atproto/syntax" 27 27 lexutil "github.com/bluesky-social/indigo/lex/util" 28 28 "github.com/go-chi/chi/v5" ··· 269 269 // this is used to rollback changes made to the PDS 270 270 // 271 271 // it is a no-op if the provided ATURI is empty 272 + func rollbackRecord(ctx context.Context, aturi string, client *atclient.APIClient) error { 272 - func rollbackRecord(ctx context.Context, aturi string, client *atpclient.APIClient) error { 273 273 if aturi == "" { 274 274 return nil 275 275 }
+23
appview/models/search.go
··· 53 53 len(o.LabelValues) > 0 || len(o.NegatedLabelValues) > 0 || 54 54 len(o.NegatedKeywords) > 0 || len(o.NegatedPhrases) > 0 55 55 } 56 + 57 + type RepoSearchOptions struct { 58 + Keywords []string // text search across name, description, website, topics 59 + Phrases []string // phrase search 60 + 61 + Language string // exact match on primary language 62 + Knot string // filter by knot domain 63 + Did string // filter by owner DID 64 + Topics []string // exact topic matches 65 + 66 + NegatedKeywords []string 67 + NegatedPhrases []string 68 + NegatedTopics []string 69 + 70 + Page pagination.Page 71 + } 72 + 73 + func (o *RepoSearchOptions) HasSearchFilters() bool { 74 + return len(o.Keywords) > 0 || len(o.Phrases) > 0 || 75 + o.Language != "" || o.Did != "" || 76 + len(o.Topics) > 0 || len(o.NegatedTopics) > 0 || 77 + len(o.NegatedKeywords) > 0 || len(o.NegatedPhrases) > 0 78 + }
+3 -3
appview/oauth/oauth.go
··· 11 11 "time" 12 12 13 13 comatproto "github.com/bluesky-social/indigo/api/atproto" 14 + "github.com/bluesky-social/indigo/atproto/atclient" 15 + "github.com/bluesky-social/indigo/atproto/atcrypto" 14 16 "github.com/bluesky-social/indigo/atproto/auth/oauth" 15 - atpclient "github.com/bluesky-social/indigo/atproto/client" 16 - atcrypto "github.com/bluesky-social/indigo/atproto/crypto" 17 17 "github.com/bluesky-social/indigo/atproto/syntax" 18 18 xrpc "github.com/bluesky-social/indigo/xrpc" 19 19 "github.com/gorilla/sessions" ··· 262 262 return "" 263 263 } 264 264 265 + func (o *OAuth) AuthorizedClient(r *http.Request) (*atclient.APIClient, error) { 265 - func (o *OAuth) AuthorizedClient(r *http.Request) (*atpclient.APIClient, error) { 266 266 session, err := o.ResumeSession(r) 267 267 if err != nil { 268 268 return nil, fmt.Errorf("error getting session: %w", err)
appview/oauth/scopes.go

This file has not been changed.

-640
appview/ogcard/card.go
··· 1 - // Copyright 2024 The Forgejo Authors. All rights reserved. 2 - // Copyright 2025 The Tangled Authors -- repurposed for Tangled use. 3 - // SPDX-License-Identifier: MIT 4 - 5 - package ogcard 6 - 7 - import ( 8 - "bytes" 9 - "fmt" 10 - "html/template" 11 - "image" 12 - "image/color" 13 - "io" 14 - "log" 15 - "math" 16 - "net/http" 17 - "strings" 18 - "sync" 19 - "time" 20 - 21 - "github.com/goki/freetype" 22 - "github.com/goki/freetype/truetype" 23 - "github.com/srwiley/oksvg" 24 - "github.com/srwiley/rasterx" 25 - "golang.org/x/image/draw" 26 - "golang.org/x/image/font" 27 - "tangled.org/core/appview/pages" 28 - 29 - _ "golang.org/x/image/webp" // for processing webp images 30 - ) 31 - 32 - type Card struct { 33 - Img *image.RGBA 34 - Font *truetype.Font 35 - Margin int 36 - Width int 37 - Height int 38 - } 39 - 40 - var fontCache = sync.OnceValues(func() (*truetype.Font, error) { 41 - interVar, err := pages.Files.ReadFile("static/fonts/InterVariable.ttf") 42 - if err != nil { 43 - return nil, err 44 - } 45 - return truetype.Parse(interVar) 46 - }) 47 - 48 - // DefaultSize returns the default size for a card 49 - func DefaultSize() (int, int) { 50 - return 1200, 630 51 - } 52 - 53 - // NewCard creates a new card with the given dimensions in pixels 54 - func NewCard(width, height int) (*Card, error) { 55 - img := image.NewRGBA(image.Rect(0, 0, width, height)) 56 - draw.Draw(img, img.Bounds(), image.NewUniform(color.White), image.Point{}, draw.Src) 57 - 58 - font, err := fontCache() 59 - if err != nil { 60 - return nil, err 61 - } 62 - 63 - return &Card{ 64 - Img: img, 65 - Font: font, 66 - Margin: 0, 67 - Width: width, 68 - Height: height, 69 - }, nil 70 - } 71 - 72 - // Split splits the card horizontally or vertically by a given percentage; the first card returned has the percentage 73 - // size, and the second card has the remainder. Both cards draw to a subsection of the same image buffer. 74 - func (c *Card) Split(vertical bool, percentage int) (*Card, *Card) { 75 - bounds := c.Img.Bounds() 76 - bounds = image.Rect(bounds.Min.X+c.Margin, bounds.Min.Y+c.Margin, bounds.Max.X-c.Margin, bounds.Max.Y-c.Margin) 77 - if vertical { 78 - mid := (bounds.Dx() * percentage / 100) + bounds.Min.X 79 - subleft := c.Img.SubImage(image.Rect(bounds.Min.X, bounds.Min.Y, mid, bounds.Max.Y)).(*image.RGBA) 80 - subright := c.Img.SubImage(image.Rect(mid, bounds.Min.Y, bounds.Max.X, bounds.Max.Y)).(*image.RGBA) 81 - return &Card{Img: subleft, Font: c.Font, Width: subleft.Bounds().Dx(), Height: subleft.Bounds().Dy()}, 82 - &Card{Img: subright, Font: c.Font, Width: subright.Bounds().Dx(), Height: subright.Bounds().Dy()} 83 - } 84 - mid := (bounds.Dy() * percentage / 100) + bounds.Min.Y 85 - subtop := c.Img.SubImage(image.Rect(bounds.Min.X, bounds.Min.Y, bounds.Max.X, mid)).(*image.RGBA) 86 - subbottom := c.Img.SubImage(image.Rect(bounds.Min.X, mid, bounds.Max.X, bounds.Max.Y)).(*image.RGBA) 87 - return &Card{Img: subtop, Font: c.Font, Width: subtop.Bounds().Dx(), Height: subtop.Bounds().Dy()}, 88 - &Card{Img: subbottom, Font: c.Font, Width: subbottom.Bounds().Dx(), Height: subbottom.Bounds().Dy()} 89 - } 90 - 91 - // SetMargin sets the margins for the card 92 - func (c *Card) SetMargin(margin int) { 93 - c.Margin = margin 94 - } 95 - 96 - type ( 97 - VAlign int64 98 - HAlign int64 99 - ) 100 - 101 - const ( 102 - Top VAlign = iota 103 - Middle 104 - Bottom 105 - ) 106 - 107 - const ( 108 - Left HAlign = iota 109 - Center 110 - Right 111 - ) 112 - 113 - // DrawText draws text within the card, respecting margins and alignment 114 - func (c *Card) DrawText(text string, textColor color.Color, sizePt float64, valign VAlign, halign HAlign) ([]string, error) { 115 - ft := freetype.NewContext() 116 - ft.SetDPI(72) 117 - ft.SetFont(c.Font) 118 - ft.SetFontSize(sizePt) 119 - ft.SetClip(c.Img.Bounds()) 120 - ft.SetDst(c.Img) 121 - ft.SetSrc(image.NewUniform(textColor)) 122 - 123 - face := truetype.NewFace(c.Font, &truetype.Options{Size: sizePt, DPI: 72}) 124 - fontHeight := ft.PointToFixed(sizePt).Ceil() 125 - 126 - bounds := c.Img.Bounds() 127 - bounds = image.Rect(bounds.Min.X+c.Margin, bounds.Min.Y+c.Margin, bounds.Max.X-c.Margin, bounds.Max.Y-c.Margin) 128 - boxWidth, boxHeight := bounds.Size().X, bounds.Size().Y 129 - // draw.Draw(c.Img, bounds, image.NewUniform(color.Gray{128}), image.Point{}, draw.Src) // Debug draw box 130 - 131 - // Try to apply wrapping to this text; we'll find the most text that will fit into one line, record that line, move 132 - // on. We precalculate each line before drawing so that we can support valign="middle" correctly which requires 133 - // knowing the total height, which is related to how many lines we'll have. 134 - lines := make([]string, 0) 135 - textWords := strings.Split(text, " ") 136 - currentLine := "" 137 - heightTotal := 0 138 - 139 - for { 140 - if len(textWords) == 0 { 141 - // Ran out of words. 142 - if currentLine != "" { 143 - heightTotal += fontHeight 144 - lines = append(lines, currentLine) 145 - } 146 - break 147 - } 148 - 149 - nextWord := textWords[0] 150 - proposedLine := currentLine 151 - if proposedLine != "" { 152 - proposedLine += " " 153 - } 154 - proposedLine += nextWord 155 - 156 - proposedLineWidth := font.MeasureString(face, proposedLine) 157 - if proposedLineWidth.Ceil() > boxWidth { 158 - // no, proposed line is too big; we'll use the last "currentLine" 159 - heightTotal += fontHeight 160 - if currentLine != "" { 161 - lines = append(lines, currentLine) 162 - currentLine = "" 163 - // leave nextWord in textWords and keep going 164 - } else { 165 - // just nextWord by itself doesn't fit on a line; well, we can't skip it, but we'll consume it 166 - // regardless as a line by itself. It will be clipped by the drawing routine. 167 - lines = append(lines, nextWord) 168 - textWords = textWords[1:] 169 - } 170 - } else { 171 - // yes, it will fit 172 - currentLine = proposedLine 173 - textWords = textWords[1:] 174 - } 175 - } 176 - 177 - textY := 0 178 - switch valign { 179 - case Top: 180 - textY = fontHeight 181 - case Bottom: 182 - textY = boxHeight - heightTotal + fontHeight 183 - case Middle: 184 - textY = ((boxHeight - heightTotal) / 2) + fontHeight 185 - } 186 - 187 - for _, line := range lines { 188 - lineWidth := font.MeasureString(face, line) 189 - 190 - textX := 0 191 - switch halign { 192 - case Left: 193 - textX = 0 194 - case Right: 195 - textX = boxWidth - lineWidth.Ceil() 196 - case Center: 197 - textX = (boxWidth - lineWidth.Ceil()) / 2 198 - } 199 - 200 - pt := freetype.Pt(bounds.Min.X+textX, bounds.Min.Y+textY) 201 - _, err := ft.DrawString(line, pt) 202 - if err != nil { 203 - return nil, err 204 - } 205 - 206 - textY += fontHeight 207 - } 208 - 209 - return lines, nil 210 - } 211 - 212 - // DrawTextAt draws text at a specific position with the given alignment 213 - func (c *Card) DrawTextAt(text string, x, y int, textColor color.Color, sizePt float64, valign VAlign, halign HAlign) error { 214 - _, err := c.DrawTextAtWithWidth(text, x, y, textColor, sizePt, valign, halign) 215 - return err 216 - } 217 - 218 - // DrawTextAtWithWidth draws text at a specific position and returns the text width 219 - func (c *Card) DrawTextAtWithWidth(text string, x, y int, textColor color.Color, sizePt float64, valign VAlign, halign HAlign) (int, error) { 220 - ft := freetype.NewContext() 221 - ft.SetDPI(72) 222 - ft.SetFont(c.Font) 223 - ft.SetFontSize(sizePt) 224 - ft.SetClip(c.Img.Bounds()) 225 - ft.SetDst(c.Img) 226 - ft.SetSrc(image.NewUniform(textColor)) 227 - 228 - face := truetype.NewFace(c.Font, &truetype.Options{Size: sizePt, DPI: 72}) 229 - fontHeight := ft.PointToFixed(sizePt).Ceil() 230 - lineWidth := font.MeasureString(face, text) 231 - textWidth := lineWidth.Ceil() 232 - 233 - // Adjust position based on alignment 234 - adjustedX := x 235 - adjustedY := y 236 - 237 - switch halign { 238 - case Left: 239 - // x is already at the left position 240 - case Right: 241 - adjustedX = x - textWidth 242 - case Center: 243 - adjustedX = x - textWidth/2 244 - } 245 - 246 - switch valign { 247 - case Top: 248 - adjustedY = y + fontHeight 249 - case Bottom: 250 - adjustedY = y 251 - case Middle: 252 - adjustedY = y + fontHeight/2 253 - } 254 - 255 - pt := freetype.Pt(adjustedX, adjustedY) 256 - _, err := ft.DrawString(text, pt) 257 - return textWidth, err 258 - } 259 - 260 - func (c *Card) FontHeight(sizePt float64) int { 261 - ft := freetype.NewContext() 262 - ft.SetDPI(72) 263 - ft.SetFont(c.Font) 264 - ft.SetFontSize(sizePt) 265 - return ft.PointToFixed(sizePt).Ceil() 266 - } 267 - 268 - func (c *Card) TextWidth(text string, sizePt float64) int { 269 - face := truetype.NewFace(c.Font, &truetype.Options{Size: sizePt, DPI: 72}) 270 - lineWidth := font.MeasureString(face, text) 271 - textWidth := lineWidth.Ceil() 272 - return textWidth 273 - } 274 - 275 - // DrawBoldText draws bold text by rendering multiple times with slight offsets 276 - func (c *Card) DrawBoldText(text string, x, y int, textColor color.Color, sizePt float64, valign VAlign, halign HAlign) (int, error) { 277 - // Draw the text multiple times with slight offsets to create bold effect 278 - offsets := []struct{ dx, dy int }{ 279 - {0, 0}, // original 280 - {1, 0}, // right 281 - {0, 1}, // down 282 - {1, 1}, // diagonal 283 - } 284 - 285 - var width int 286 - for _, offset := range offsets { 287 - w, err := c.DrawTextAtWithWidth(text, x+offset.dx, y+offset.dy, textColor, sizePt, valign, halign) 288 - if err != nil { 289 - return 0, err 290 - } 291 - if width == 0 { 292 - width = w 293 - } 294 - } 295 - return width, nil 296 - } 297 - 298 - func BuildSVGIconFromData(svgData []byte, iconColor color.Color) (*oksvg.SvgIcon, error) { 299 - // Convert color to hex string for SVG 300 - rgba, isRGBA := iconColor.(color.RGBA) 301 - if !isRGBA { 302 - r, g, b, a := iconColor.RGBA() 303 - rgba = color.RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)} 304 - } 305 - colorHex := fmt.Sprintf("#%02x%02x%02x", rgba.R, rgba.G, rgba.B) 306 - 307 - // Replace currentColor with our desired color in the SVG 308 - svgString := string(svgData) 309 - svgString = strings.ReplaceAll(svgString, "currentColor", colorHex) 310 - 311 - // Make the stroke thicker 312 - svgString = strings.ReplaceAll(svgString, `stroke-width="2"`, `stroke-width="3"`) 313 - 314 - // Parse SVG 315 - icon, err := oksvg.ReadIconStream(strings.NewReader(svgString)) 316 - if err != nil { 317 - return nil, fmt.Errorf("failed to parse SVG: %w", err) 318 - } 319 - 320 - return icon, nil 321 - } 322 - 323 - func BuildSVGIconFromPath(svgPath string, iconColor color.Color) (*oksvg.SvgIcon, error) { 324 - svgData, err := pages.Files.ReadFile(svgPath) 325 - if err != nil { 326 - return nil, fmt.Errorf("failed to read SVG file %s: %w", svgPath, err) 327 - } 328 - 329 - icon, err := BuildSVGIconFromData(svgData, iconColor) 330 - if err != nil { 331 - return nil, fmt.Errorf("failed to build SVG icon %s: %w", svgPath, err) 332 - } 333 - 334 - return icon, nil 335 - } 336 - 337 - func BuildLucideIcon(name string, iconColor color.Color) (*oksvg.SvgIcon, error) { 338 - return BuildSVGIconFromPath(fmt.Sprintf("static/icons/%s.svg", name), iconColor) 339 - } 340 - 341 - func (c *Card) DrawLucideIcon(name string, x, y, size int, iconColor color.Color) error { 342 - icon, err := BuildSVGIconFromPath(fmt.Sprintf("static/icons/%s.svg", name), iconColor) 343 - if err != nil { 344 - return err 345 - } 346 - 347 - c.DrawSVGIcon(icon, x, y, size) 348 - 349 - return nil 350 - } 351 - 352 - func (c *Card) DrawDolly(x, y, size int, iconColor color.Color) error { 353 - tpl, err := template.New("dolly"). 354 - ParseFS(pages.Files, "templates/fragments/dolly/logo.html") 355 - if err != nil { 356 - return fmt.Errorf("failed to read dolly template: %w", err) 357 - } 358 - 359 - var svgData bytes.Buffer 360 - if err = tpl.ExecuteTemplate(&svgData, "fragments/dolly/logo", nil); err != nil { 361 - return fmt.Errorf("failed to execute dolly template: %w", err) 362 - } 363 - 364 - icon, err := BuildSVGIconFromData(svgData.Bytes(), iconColor) 365 - if err != nil { 366 - return err 367 - } 368 - 369 - c.DrawSVGIcon(icon, x, y, size) 370 - 371 - return nil 372 - } 373 - 374 - // DrawSVGIcon draws an SVG icon from the embedded files at the specified position 375 - func (c *Card) DrawSVGIcon(icon *oksvg.SvgIcon, x, y, size int) { 376 - // Set the icon size 377 - w, h := float64(size), float64(size) 378 - icon.SetTarget(0, 0, w, h) 379 - 380 - // Create a temporary RGBA image for the icon 381 - iconImg := image.NewRGBA(image.Rect(0, 0, size, size)) 382 - 383 - // Create scanner and rasterizer 384 - scanner := rasterx.NewScannerGV(size, size, iconImg, iconImg.Bounds()) 385 - raster := rasterx.NewDasher(size, size, scanner) 386 - 387 - // Draw the icon 388 - icon.Draw(raster, 1.0) 389 - 390 - // Draw the icon onto the card at the specified position 391 - bounds := c.Img.Bounds() 392 - destRect := image.Rect(x, y, x+size, y+size) 393 - 394 - // Make sure we don't draw outside the card bounds 395 - if destRect.Max.X > bounds.Max.X { 396 - destRect.Max.X = bounds.Max.X 397 - } 398 - if destRect.Max.Y > bounds.Max.Y { 399 - destRect.Max.Y = bounds.Max.Y 400 - } 401 - 402 - draw.Draw(c.Img, destRect, iconImg, image.Point{}, draw.Over) 403 - } 404 - 405 - // DrawImage fills the card with an image, scaled to maintain the original aspect ratio and centered with respect to the non-filled dimension 406 - func (c *Card) DrawImage(img image.Image) { 407 - bounds := c.Img.Bounds() 408 - targetRect := image.Rect(bounds.Min.X+c.Margin, bounds.Min.Y+c.Margin, bounds.Max.X-c.Margin, bounds.Max.Y-c.Margin) 409 - srcBounds := img.Bounds() 410 - srcAspect := float64(srcBounds.Dx()) / float64(srcBounds.Dy()) 411 - targetAspect := float64(targetRect.Dx()) / float64(targetRect.Dy()) 412 - 413 - var scale float64 414 - if srcAspect > targetAspect { 415 - // Image is wider than target, scale by width 416 - scale = float64(targetRect.Dx()) / float64(srcBounds.Dx()) 417 - } else { 418 - // Image is taller or equal, scale by height 419 - scale = float64(targetRect.Dy()) / float64(srcBounds.Dy()) 420 - } 421 - 422 - newWidth := int(math.Round(float64(srcBounds.Dx()) * scale)) 423 - newHeight := int(math.Round(float64(srcBounds.Dy()) * scale)) 424 - 425 - // Center the image within the target rectangle 426 - offsetX := (targetRect.Dx() - newWidth) / 2 427 - offsetY := (targetRect.Dy() - newHeight) / 2 428 - 429 - scaledRect := image.Rect(targetRect.Min.X+offsetX, targetRect.Min.Y+offsetY, targetRect.Min.X+offsetX+newWidth, targetRect.Min.Y+offsetY+newHeight) 430 - draw.CatmullRom.Scale(c.Img, scaledRect, img, srcBounds, draw.Over, nil) 431 - } 432 - 433 - func fallbackImage() image.Image { 434 - // can't usage image.Uniform(color.White) because it's infinitely sized causing a panic in the scaler in DrawImage 435 - img := image.NewRGBA(image.Rect(0, 0, 1, 1)) 436 - img.Set(0, 0, color.White) 437 - return img 438 - } 439 - 440 - // As defensively as possible, attempt to load an image from a presumed external and untrusted URL 441 - func (c *Card) fetchExternalImage(url string) (image.Image, bool) { 442 - // Use a short timeout; in the event of any failure we'll be logging and returning a placeholder, but we don't want 443 - // this rendering process to be slowed down 444 - client := &http.Client{ 445 - Timeout: 1 * time.Second, // 1 second timeout 446 - } 447 - 448 - resp, err := client.Get(url) 449 - if err != nil { 450 - log.Printf("error when fetching external image from %s: %v", url, err) 451 - return nil, false 452 - } 453 - defer resp.Body.Close() 454 - 455 - if resp.StatusCode != http.StatusOK { 456 - log.Printf("non-OK error code when fetching external image from %s: %s", url, resp.Status) 457 - return nil, false 458 - } 459 - 460 - contentType := resp.Header.Get("Content-Type") 461 - 462 - body := resp.Body 463 - bodyBytes, err := io.ReadAll(body) 464 - if err != nil { 465 - log.Printf("error when fetching external image from %s: %v", url, err) 466 - return nil, false 467 - } 468 - 469 - // Handle SVG separately 470 - if contentType == "image/svg+xml" || strings.HasSuffix(url, ".svg") { 471 - return convertSVGToPNG(bodyBytes) 472 - } 473 - 474 - // Support content types are in-sync with the allowed custom avatar file types 475 - if contentType != "image/png" && contentType != "image/jpeg" && contentType != "image/gif" && contentType != "image/webp" { 476 - log.Printf("fetching external image returned unsupported Content-Type which was ignored: %s", contentType) 477 - return nil, false 478 - } 479 - 480 - bodyBuffer := bytes.NewReader(bodyBytes) 481 - _, imgType, err := image.DecodeConfig(bodyBuffer) 482 - if err != nil { 483 - log.Printf("error when decoding external image from %s: %v", url, err) 484 - return nil, false 485 - } 486 - 487 - // Verify that we have a match between actual data understood in the image body and the reported Content-Type 488 - if (contentType == "image/png" && imgType != "png") || 489 - (contentType == "image/jpeg" && imgType != "jpeg") || 490 - (contentType == "image/gif" && imgType != "gif") || 491 - (contentType == "image/webp" && imgType != "webp") { 492 - log.Printf("while fetching external image, mismatched image body (%s) and Content-Type (%s)", imgType, contentType) 493 - return nil, false 494 - } 495 - 496 - _, err = bodyBuffer.Seek(0, io.SeekStart) // reset for actual decode 497 - if err != nil { 498 - log.Printf("error w/ bodyBuffer.Seek") 499 - return nil, false 500 - } 501 - img, _, err := image.Decode(bodyBuffer) 502 - if err != nil { 503 - log.Printf("error when decoding external image from %s: %v", url, err) 504 - return nil, false 505 - } 506 - 507 - return img, true 508 - } 509 - 510 - // convertSVGToPNG converts SVG data to a PNG image 511 - func convertSVGToPNG(svgData []byte) (image.Image, bool) { 512 - // Parse the SVG 513 - icon, err := oksvg.ReadIconStream(bytes.NewReader(svgData)) 514 - if err != nil { 515 - log.Printf("error parsing SVG: %v", err) 516 - return nil, false 517 - } 518 - 519 - // Set a reasonable size for the rasterized image 520 - width := 256 521 - height := 256 522 - icon.SetTarget(0, 0, float64(width), float64(height)) 523 - 524 - // Create an image to draw on 525 - rgba := image.NewRGBA(image.Rect(0, 0, width, height)) 526 - 527 - // Fill with white background 528 - draw.Draw(rgba, rgba.Bounds(), &image.Uniform{color.White}, image.Point{}, draw.Src) 529 - 530 - // Create a scanner and rasterize the SVG 531 - scanner := rasterx.NewScannerGV(width, height, rgba, rgba.Bounds()) 532 - raster := rasterx.NewDasher(width, height, scanner) 533 - 534 - icon.Draw(raster, 1.0) 535 - 536 - return rgba, true 537 - } 538 - 539 - func (c *Card) DrawExternalImage(url string) { 540 - image, ok := c.fetchExternalImage(url) 541 - if !ok { 542 - image = fallbackImage() 543 - } 544 - c.DrawImage(image) 545 - } 546 - 547 - // DrawCircularExternalImage draws an external image as a circle at the specified position 548 - func (c *Card) DrawCircularExternalImage(url string, x, y, size int) error { 549 - img, ok := c.fetchExternalImage(url) 550 - if !ok { 551 - img = fallbackImage() 552 - } 553 - 554 - // Create a circular mask 555 - circle := image.NewRGBA(image.Rect(0, 0, size, size)) 556 - center := size / 2 557 - radius := float64(size / 2) 558 - 559 - // Scale the source image to fit the circle 560 - srcBounds := img.Bounds() 561 - scaledImg := image.NewRGBA(image.Rect(0, 0, size, size)) 562 - draw.CatmullRom.Scale(scaledImg, scaledImg.Bounds(), img, srcBounds, draw.Src, nil) 563 - 564 - // Draw the image with circular clipping 565 - for cy := range size { 566 - for cx := range size { 567 - // Calculate distance from center 568 - dx := float64(cx - center) 569 - dy := float64(cy - center) 570 - distance := math.Sqrt(dx*dx + dy*dy) 571 - 572 - // Only draw pixels within the circle 573 - if distance <= radius { 574 - circle.Set(cx, cy, scaledImg.At(cx, cy)) 575 - } 576 - } 577 - } 578 - 579 - // Draw the circle onto the card 580 - bounds := c.Img.Bounds() 581 - destRect := image.Rect(x, y, x+size, y+size) 582 - 583 - // Make sure we don't draw outside the card bounds 584 - if destRect.Max.X > bounds.Max.X { 585 - destRect.Max.X = bounds.Max.X 586 - } 587 - if destRect.Max.Y > bounds.Max.Y { 588 - destRect.Max.Y = bounds.Max.Y 589 - } 590 - 591 - draw.Draw(c.Img, destRect, circle, image.Point{}, draw.Over) 592 - 593 - return nil 594 - } 595 - 596 - // DrawRect draws a rect with the given color 597 - func (c *Card) DrawRect(startX, startY, endX, endY int, color color.Color) { 598 - draw.Draw(c.Img, image.Rect(startX, startY, endX, endY), &image.Uniform{color}, image.Point{}, draw.Src) 599 - } 600 - 601 - // drawRoundedRect draws a filled rounded rectangle on the given card 602 - func (card *Card) DrawRoundedRect(x, y, width, height, cornerRadius int, fillColor color.RGBA) { 603 - cardBounds := card.Img.Bounds() 604 - for py := y; py < y+height; py++ { 605 - for px := x; px < x+width; px++ { 606 - // calculate distance from corners 607 - dx := 0 608 - dy := 0 609 - 610 - // check which corner region we're in 611 - if px < x+cornerRadius && py < y+cornerRadius { 612 - // top-left corner 613 - dx = x + cornerRadius - px 614 - dy = y + cornerRadius - py 615 - } else if px >= x+width-cornerRadius && py < y+cornerRadius { 616 - // top-right corner 617 - dx = px - (x + width - cornerRadius - 1) 618 - dy = y + cornerRadius - py 619 - } else if px < x+cornerRadius && py >= y+height-cornerRadius { 620 - // bottom-left corner 621 - dx = x + cornerRadius - px 622 - dy = py - (y + height - cornerRadius - 1) 623 - } else if px >= x+width-cornerRadius && py >= y+height-cornerRadius { 624 - // Bottom-right corner 625 - dx = px - (x + width - cornerRadius - 1) 626 - dy = py - (y + height - cornerRadius - 1) 627 - } 628 - 629 - // if we're in a corner, check if we're within the radius 630 - inCorner := (dx > 0 || dy > 0) 631 - withinRadius := dx*dx+dy*dy <= cornerRadius*cornerRadius 632 - 633 - // draw pixel if not in corner, or in corner and within radius 634 - // check bounds relative to the card's image bounds 635 - if (!inCorner || withinRadius) && px >= 0 && px < cardBounds.Dx() && py >= 0 && py < cardBounds.Dy() { 636 - card.Img.Set(px+cardBounds.Min.X, py+cardBounds.Min.Y, fillColor) 637 - } 638 - } 639 - } 640 - }
+24 -27
appview/pages/funcmap.go
··· 195 195 {D: math.MaxInt64, Format: "a long while %s", DivBy: 1}, 196 196 }) 197 197 }, 198 + "shortTimeFmt": func(t time.Time) string { 199 + return t.Format("Jan 2, 2006") 200 + }, 198 201 "longTimeFmt": func(t time.Time) string { 199 202 return t.Format("Jan 2, 2006, 3:04 PM MST") 200 203 }, ··· 209 212 return fmt.Sprintf("P%dD%dH%dM%dS", days, hours, minutes, seconds) 210 213 }, 211 214 "durationFmt": func(duration time.Duration) string { 212 - return durationFmt(duration, [4]string{"d", "hr", "min", "s"}) 215 + return durationFmt(duration, [4]string{"d", "h", "m", "s"}) 213 216 }, 214 217 "longDurationFmt": func(duration time.Duration) string { 215 218 return durationFmt(duration, [4]string{"days", "hours", "minutes", "seconds"}) ··· 262 265 return v.Slice(0, min(n, v.Len())).Interface() 263 266 }, 264 267 "markdown": func(text string) template.HTML { 265 - p.rctx.RendererType = markup.RendererTypeDefault 266 - htmlString := p.rctx.RenderMarkdown(text) 267 - sanitized := p.rctx.SanitizeDefault(htmlString) 268 + rctx := p.rctx.Clone() 269 + rctx.RendererType = markup.RendererTypeDefault 270 + htmlString := rctx.RenderMarkdown(text) 271 + sanitized := rctx.SanitizeDefault(htmlString) 268 272 return template.HTML(sanitized) 269 273 }, 270 274 "description": func(text string) template.HTML { 271 - p.rctx.RendererType = markup.RendererTypeDefault 272 - htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New( 275 + rctx := p.rctx.Clone() 276 + rctx.RendererType = markup.RendererTypeDefault 277 + htmlString := rctx.RenderMarkdownWith(text, goldmark.New( 273 278 goldmark.WithExtensions( 274 279 emoji.Emoji, 275 280 ), 276 281 )) 277 - sanitized := p.rctx.SanitizeDescription(htmlString) 282 + sanitized := rctx.SanitizeDescription(htmlString) 278 283 return template.HTML(sanitized) 279 284 }, 280 285 "readme": func(text string) template.HTML { 281 - p.rctx.RendererType = markup.RendererTypeRepoMarkdown 282 - htmlString := p.rctx.RenderMarkdown(text) 283 - sanitized := p.rctx.SanitizeDefault(htmlString) 286 + rctx := p.rctx.Clone() 287 + rctx.RendererType = markup.RendererTypeRepoMarkdown 288 + htmlString := rctx.RenderMarkdown(text) 289 + sanitized := rctx.SanitizeDefault(htmlString) 284 290 return template.HTML(sanitized) 285 291 }, 286 292 "code": func(content, path string) string { ··· 405 411 "placeholderAvatar": func(size string) template.HTML { 406 412 sizeClass := "size-6" 407 413 iconSize := "size-4" 408 - if size == "tiny" { 414 + switch size { 415 + case "tiny": 409 416 sizeClass = "size-6" 410 417 iconSize = "size-4" 411 - } else if size == "small" { 418 + case "small": 412 419 sizeClass = "size-8" 413 420 iconSize = "size-5" 414 - } else { 421 + default: 415 422 sizeClass = "size-12" 416 423 iconSize = "size-8" 417 424 } ··· 496 503 } 497 504 } 498 505 499 - func (p *Pages) resolveDid(did string) string { 500 - identity, err := p.resolver.ResolveIdent(context.Background(), did) 501 - 502 - if err != nil { 503 - return did 504 - } 505 - 506 - if identity.Handle.IsInvalidHandle() { 507 - return "handle.invalid" 508 - } 509 - 510 - return identity.Handle.String() 511 - } 512 - 513 506 func (p *Pages) AvatarUrl(actor, size string) string { 514 507 actor = strings.TrimPrefix(actor, "@") 515 508 ··· 522 515 } 523 516 524 517 secret := p.avatar.SharedSecret 518 + if secret == "" { 519 + return "" 520 + } 525 521 h := hmac.New(sha256.New, []byte(secret)) 526 522 h.Write([]byte(did)) 527 523 signature := hex.EncodeToString(h.Sum(nil)) ··· 550 546 if version != "" { 551 547 return fmt.Sprintf("%s?v=%s", baseUrl, version) 552 548 } 549 + 553 550 return baseUrl 554 551 } 555 552
+9
appview/pages/markup/markdown.go
··· 86 86 return md 87 87 } 88 88 89 + // clone creates a shallow copy of the RenderContext 90 + func (rctx *RenderContext) Clone() *RenderContext { 91 + if rctx == nil { 92 + return nil 93 + } 94 + clone := *rctx 95 + return &clone 96 + } 97 + 89 98 // NewMarkdownWith is an alias for NewMarkdown with extra extensions. 90 99 func NewMarkdownWith(hostname string, extra ...goldmark.Extender) goldmark.Markdown { 91 100 return NewMarkdown(hostname, extra...)
+23 -21
appview/pages/pages.go
··· 350 350 return fmt.Errorf("failed to read %s: %w", filename, err) 351 351 } 352 352 353 - p.rctx.RendererType = markup.RendererTypeDefault 354 - htmlString := p.rctx.RenderMarkdown(string(markdownBytes)) 355 - sanitized := p.rctx.SanitizeDefault(htmlString) 353 + rctx := p.rctx.Clone() 354 + rctx.RendererType = markup.RendererTypeDefault 355 + htmlString := rctx.RenderMarkdown(string(markdownBytes)) 356 + sanitized := rctx.SanitizeDefault(htmlString) 356 357 params.Content = template.HTML(sanitized) 357 358 358 359 return p.execute("legal/terms", w, params) ··· 378 379 return fmt.Errorf("failed to read %s: %w", filename, err) 379 380 } 380 381 381 - p.rctx.RendererType = markup.RendererTypeDefault 382 - htmlString := p.rctx.RenderMarkdown(string(markdownBytes)) 383 - sanitized := p.rctx.SanitizeDefault(htmlString) 382 + rctx := p.rctx.Clone() 383 + rctx.RendererType = markup.RendererTypeDefault 384 + htmlString := rctx.RenderMarkdown(string(markdownBytes)) 385 + sanitized := rctx.SanitizeDefault(htmlString) 384 386 params.Content = template.HTML(sanitized) 385 387 386 388 return p.execute("legal/privacy", w, params) ··· 649 651 Repos []models.Repo 650 652 Card *ProfileCard 651 653 Active string 654 + Page pagination.Page 655 + RepoCount int 656 + FilterQuery string 652 657 } 653 658 654 659 func (p *Pages) ProfileRepos(w io.Writer, params ProfileReposParams) error { ··· 793 798 return p.executeRepo("repo/knotUnreachable", w, params) 794 799 } 795 800 796 - p.rctx.RepoInfo = params.RepoInfo 797 - p.rctx.RepoInfo.Ref = params.Ref 798 - p.rctx.RendererType = markup.RendererTypeRepoMarkdown 801 + rctx := p.rctx.Clone() 802 + rctx.RepoInfo = params.RepoInfo 803 + rctx.RepoInfo.Ref = params.Ref 804 + rctx.RendererType = markup.RendererTypeRepoMarkdown 799 805 800 806 if params.ReadmeFileName != "" { 801 807 ext := filepath.Ext(params.ReadmeFileName) 802 808 switch ext { 803 809 case ".md", ".markdown", ".mdown", ".mkdn", ".mkd": 804 810 params.Raw = false 805 - htmlString := p.rctx.RenderMarkdown(params.Readme) 806 - sanitized := p.rctx.SanitizeDefault(htmlString) 811 + htmlString := rctx.RenderMarkdown(params.Readme) 812 + sanitized := rctx.SanitizeDefault(htmlString) 807 813 params.HTMLReadme = template.HTML(sanitized) 808 814 default: 809 815 params.Raw = true ··· 886 892 func (p *Pages) RepoTree(w io.Writer, params RepoTreeParams) error { 887 893 params.Active = "overview" 888 894 889 - p.rctx.RepoInfo = params.RepoInfo 890 - p.rctx.RepoInfo.Ref = params.Ref 891 - p.rctx.RendererType = markup.RendererTypeRepoMarkdown 895 + rctx := p.rctx.Clone() 896 + rctx.RepoInfo = params.RepoInfo 897 + rctx.RepoInfo.Ref = params.Ref 898 + rctx.RendererType = markup.RendererTypeRepoMarkdown 892 899 893 900 if params.ReadmeFileName != "" { 894 901 ext := filepath.Ext(params.ReadmeFileName) 895 902 switch ext { 896 903 case ".md", ".markdown", ".mdown", ".mkdn", ".mkd": 897 904 params.Raw = false 898 - htmlString := p.rctx.RenderMarkdown(params.Readme) 899 - sanitized := p.rctx.SanitizeDefault(htmlString) 905 + htmlString := rctx.RenderMarkdown(params.Readme) 906 + sanitized := rctx.SanitizeDefault(htmlString) 900 907 params.HTMLReadme = template.HTML(sanitized) 901 908 default: 902 909 params.Raw = true ··· 968 975 } 969 976 970 977 func (p *Pages) RepoBlob(w io.Writer, params RepoBlobParams) error { 971 - switch params.BlobView.ContentType { 972 - case models.BlobContentTypeMarkup: 973 - p.rctx.RepoInfo = params.RepoInfo 974 - } 975 - 976 978 params.Active = "overview" 977 979 return p.executeRepo("repo/blob", w, params) 978 980 }
+3 -1
appview/pages/templates/fragments/line-quote-button.html
··· 227 227 ? firstAnchor 228 228 : `${firstAnchor}~${lastAnchor}`; 229 229 230 + const linkBase = document.getElementById('round-link-base')?.value 231 + || (window.location.pathname + window.location.search); 232 + const md = `[\`${label}\`](${linkBase}#${fragment})`; 230 - const md = `[\`${label}\`](${window.location.pathname}${window.location.search}#${fragment})`; 231 233 232 234 const { selectionStart: s, selectionEnd: end, value } = ta; 233 235 const before = value.slice(0, s);
+1
appview/pages/templates/layouts/fragments/footerMinimal.html
··· 10 10 <a href="https://blog.tangled.org" class="hover:text-gray-900 dark:hover:text-gray-200 hover:underline">blog</a> 11 11 <a href="https://docs.tangled.org" class="hover:text-gray-900 dark:hover:text-gray-200 hover:underline">docs</a> 12 12 <a href="https://tangled.org/tangled.org/core" hx-boost="true" class="hover:text-gray-900 dark:hover:text-gray-200 hover:underline">source</a> 13 + <a href="https://tangled.org/brand" class="hover:text-gray-900 dark:hover:text-gray-200 hover:underline">brand</a> 13 14 <a href="https://chat.tangled.org" class="hover:text-gray-900 dark:hover:text-gray-200 hover:underline" target="_blank" rel="noopener noreferrer">discord</a> 14 15 <a href="https://bsky.app/profile/tangled.org" class="hover:text-gray-900 dark:hover:text-gray-200 hover:underline" target="_blank" rel="noopener noreferrer">bluesky</a> 15 16 <a href="/terms" hx-boost="true" class="hover:text-gray-900 dark:hover:text-gray-200 hover:underline">terms</a>
+153 -3
appview/pages/templates/repo/fragments/diff.html
··· 14 14 {{ template "fragments/resizable" }} 15 15 {{ template "activeFileHighlight" }} 16 16 {{ template "fragments/line-quote-button" }} 17 + {{ template "reviewState" }} 17 18 </div> 18 19 {{ end }} 19 20 ··· 37 38 {{ $stat := $diff.Stats }} 38 39 {{ $count := len $diff.ChangedFiles }} 39 40 {{ template "repo/fragments/diffStatPill" $stat }} 41 + <span id="changed-files-label" class="text-xs text-gray-600 dark:text-gray-400 hidden md:inline-flex" data-total="{{ $count }}">{{ $count }} changed file{{ if ne $count 1 }}s{{ end }}</span> 40 - <span class="text-xs text-gray-600 dark:text-gray-400 hidden md:inline-flex">{{ $count }} changed file{{ if ne $count 1 }}s{{ end }}</span> 41 42 42 43 {{ if $root }} 43 44 {{ if $root.IsInterdiff }} ··· 142 143 {{ $file := index . 1 }} 143 144 {{ $isSplit := index . 2 }} 144 145 {{ $isGenerated := false }} 146 + {{ $isDeleted := false }} 145 147 {{ with $file }} 146 148 {{ $n := .Names }} 149 + {{ $isDeleted = and (eq $n.New "") (ne $n.Old "") }} 147 150 {{ if $n.New }} 148 151 {{ $isGenerated = isGenerated $n.New }} 149 152 {{ else if $n.Old }} 150 153 {{ $isGenerated = isGenerated $n.Old }} 151 154 {{ end }} 155 + <details {{ if and (not $isGenerated) (not $isDeleted) }}open{{ end }} id="file-{{ .Id }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}"> 152 - <details {{ if not $isGenerated }}open{{ end }} id="file-{{ .Id }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}"> 153 156 <summary class="list-none cursor-pointer sticky top-12 group-open:border-b border-gray-200 dark:border-gray-700"> 154 157 <div id="diff-file-header" class="rounded cursor-pointer bg-white dark:bg-gray-800 flex justify-between"> 155 158 <div id="left-side-items" class="p-2 flex gap-2 items-center overflow-x-auto"> ··· 165 168 {{ else }} 166 169 {{ $n.Old }} 167 170 {{ end }} 171 + {{ if $isDeleted }} 172 + <span class="text-gray-400 dark:text-gray-500" title="Deleted files are collapsed by default"> 173 + {{ i "circle-question-mark" "size-4" }} 174 + </span> 175 + {{ else if $isGenerated }} 168 - {{ if $isGenerated }} 169 176 <span class="text-gray-400 dark:text-gray-500" title="Generated files are collapsed by default"> 170 177 {{ i "circle-question-mark" "size-4" }} 171 178 </span> 172 179 {{ end }} 173 180 </div> 174 181 </div> 182 + <label 183 + data-review-btn="file-{{ .Id }}" 184 + onclick="event.stopPropagation()" 185 + class="review-btn hidden p-2 items-center gap-1 text-xs text-gray-400 dark:text-gray-500 hover:text-green-600 dark:hover:text-green-400 transition-colors cursor-pointer" 186 + title="Mark as reviewed" 187 + > 188 + <input 189 + type="checkbox" 190 + class="sr-only peer review-checkbox" 191 + data-file-id="file-{{ .Id }}" 192 + /> 193 + <span class="peer-checked:hidden">{{ i "circle" "size-4" }}</span> 194 + <span class="hidden peer-checked:inline text-green-600 dark:text-green-400">{{ i "circle-check" "size-4" }}</span> 195 + <span class="hidden md:inline">reviewed</span> 196 + </label> 175 197 </div> 176 198 </summary> 177 199 ··· 299 321 })(); 300 322 </script> 301 323 {{ end }} 324 + 325 + {{ define "reviewState" }} 326 + <script> 327 + (() => { 328 + const linkBase = document.getElementById('round-link-base'); 329 + if (!linkBase) return; 330 + 331 + const isInterdiff = !!document.getElementById('is-interdiff'); 332 + const basePath = linkBase.value.replace(/^\//, ''); 333 + const storageKey = 'reviewed:' + basePath + (isInterdiff ? '/interdiff' : ''); 334 + 335 + const REVIEWED_PREFIX = 'reviewed:'; 336 + const MAX_AGE_MS = 30 * 24 * 60 * 60 * 1000; 337 + 338 + const load = () => { 339 + try { 340 + const entry = JSON.parse(localStorage.getItem(storageKey) || '{}'); 341 + return new Set(Array.isArray(entry) ? entry : (entry.files || [])); 342 + } 343 + catch { return new Set(); } 344 + }; 345 + 346 + const save = (reviewed) => { 347 + const liveIds = new Set(Array.from(allFiles()).map(d => d.id)); 348 + localStorage.setItem(storageKey, JSON.stringify({ 349 + files: Array.from(reviewed).filter(id => liveIds.has(id)), 350 + ts: Date.now(), 351 + })); 352 + }; 353 + 354 + const pruneStale = () => { 355 + const now = Date.now(); 356 + Object.keys(localStorage) 357 + .filter(k => k.startsWith(REVIEWED_PREFIX) && k !== storageKey) 358 + .forEach(k => { 359 + try { 360 + const entry = JSON.parse(localStorage.getItem(k)); 361 + if (!entry.ts || now - entry.ts > MAX_AGE_MS) localStorage.removeItem(k); 362 + } catch { localStorage.removeItem(k); } 363 + }); 364 + }; 365 + if (Math.random() < 0.1) pruneStale(); 366 + 367 + const allFiles = () => 368 + document.querySelectorAll('details[id^="file-"]'); 369 + 370 + const applyOne = (fileId, isReviewed) => { 371 + const detail = document.getElementById(fileId); 372 + if (!detail) return; 373 + 374 + const btn = detail.querySelector('[data-review-btn]'); 375 + const checkbox = btn?.querySelector('input[type="checkbox"]'); 376 + const path = CSS.escape(fileId.replace('file-', '')); 377 + const treeLink = document.querySelector(`.filetree-link[data-path="${path}"]`); 378 + 379 + detail.classList.toggle('opacity-60', isReviewed); 380 + 381 + if (checkbox) checkbox.checked = isReviewed; 382 + 383 + if (treeLink) { 384 + const existing = treeLink.parentElement.querySelector('.review-indicator'); 385 + if (isReviewed && !existing) { 386 + const indicator = document.createElement('span'); 387 + indicator.className = 'review-indicator text-green-600 dark:text-green-400 flex-shrink-0'; 388 + indicator.innerHTML = '&#10003;'; 389 + treeLink.parentElement.appendChild(indicator); 390 + } else if (!isReviewed && existing) { 391 + existing.remove(); 392 + } 393 + } 394 + }; 395 + 396 + const updateProgress = (reviewed) => { 397 + const el = document.getElementById('changed-files-label'); 398 + if (!el) return; 399 + const total = parseInt(el.dataset.total, 10); 400 + const files = allFiles(); 401 + const count = Array.from(files).filter(d => reviewed.has(d.id)).length; 402 + const suffix = total === 1 ? 'file' : 'files'; 403 + const allDone = count === total; 404 + el.classList.toggle('text-green-600', allDone); 405 + el.classList.toggle('dark:text-green-400', allDone); 406 + el.classList.toggle('text-gray-600', !allDone); 407 + el.classList.toggle('dark:text-gray-400', !allDone); 408 + el.textContent = count > 0 409 + ? `${count}/${total} ${suffix} reviewed` 410 + : `${total} changed ${suffix}`; 411 + }; 412 + 413 + const reviewed = load(); 414 + 415 + const toggleReview = (fileId) => { 416 + const detail = document.getElementById(fileId); 417 + if (!detail) return; 418 + const isNowReviewed = !reviewed.has(fileId); 419 + if (isNowReviewed) { 420 + reviewed.add(fileId); 421 + detail.open = false; 422 + } else { 423 + reviewed.delete(fileId); 424 + } 425 + save(reviewed); 426 + applyOne(fileId, isNowReviewed); 427 + updateProgress(reviewed); 428 + }; 429 + 430 + document.getElementById('diff-area').addEventListener('change', (e) => { 431 + const checkbox = e.target.closest('.review-checkbox'); 432 + if (!checkbox) return; 433 + const fileId = checkbox.dataset.fileId; 434 + if (fileId) toggleReview(fileId); 435 + }); 436 + 437 + document.querySelectorAll('.review-btn').forEach(btn => { 438 + btn.classList.remove('hidden'); 439 + btn.classList.add('flex'); 440 + }); 441 + 442 + allFiles().forEach(detail => { 443 + if (reviewed.has(detail.id)) { 444 + applyOne(detail.id, true); 445 + detail.open = false; 446 + } 447 + }); 448 + updateProgress(reviewed); 449 + })(); 450 + </script> 451 + {{ end }}
+7 -3
appview/pages/templates/repo/pipelines/workflow.html
··· 48 48 {{ $lastStatus := $all.Latest }} 49 49 {{ $kind := $lastStatus.Status.String }} 50 50 51 + <div id="left" class="flex items-center gap-2 flex-1 min-w-0"> 52 + <div class="flex-shrink-0"> 53 + {{ template "repo/pipelines/fragments/workflowSymbol" $all }} 54 + </div> 55 + <span class="truncate" title="{{ $name }}"> 56 + {{ $name }} 57 + </span> 51 - <div id="left" class="flex items-center gap-2 flex-shrink-0"> 52 - {{ template "repo/pipelines/fragments/workflowSymbol" $all }} 53 - {{ $name }} 54 58 </div> 55 59 <div id="right" class="flex items-center gap-2 flex-shrink-0"> 56 60 <span class="font-bold">{{ $kind }}</span>
+4
appview/pages/templates/repo/pulls/pull.html
··· 99 99 {{ end }} 100 100 101 101 {{ define "contentAfter" }} 102 + <input type="hidden" id="round-link-base" value="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/round/{{ .ActiveRound }}" /> 103 + {{ if .IsInterdiff }} 104 + <input type="hidden" id="is-interdiff" value="1" /> 105 + {{ end }} 102 106 {{ template "repo/fragments/diff" (list .Diff .DiffOpts $) }} 103 107 {{ end }} 104 108
appview/pages/templates/repo/settings/branch_rules.html

This file has not been changed.

+39 -1
appview/pages/templates/user/repos.html
··· 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-repos" class="md:col-span-8 order-2 md:order-2"> 5 + <div class="mb-4"> 6 + <form id="search-form" class="flex relative" method="GET"> 7 + <input type="hidden" name="tab" value="repos"> 8 + <div class="flex-1 flex relative"> 9 + <input 10 + id="search-q" 11 + class="flex-1 py-1 pl-2 pr-10 mr-[-1px] rounded-r-none peer" 12 + type="text" 13 + name="q" 14 + value="{{ .FilterQuery }}" 15 + placeholder="search repos..." 16 + > 17 + <a 18 + {{ if .FilterQuery }}href="?tab=repos"{{ else }}href="#"{{ end }} 19 + class="absolute right-3 top-1/2 -translate-y-1/2 text-gray-400 hover:text-gray-600 dark:hover:text-gray-300 {{ if not .FilterQuery }}hidden{{ end }} peer-[:not(:placeholder-shown)]:block" 20 + > 21 + {{ i "x" "w-4 h-4" }} 22 + </a> 23 + </div> 24 + <button 25 + type="submit" 26 + class="p-2 text-gray-400 border rounded-r border-gray-300 dark:border-gray-600" 27 + > 28 + {{ i "search" "w-4 h-4" }} 29 + </button> 30 + </form> 31 + </div> 32 + 33 + {{ block "ownRepos" . }}{{ end }} 34 + 35 + {{if gt .RepoCount .Page.Limit }} 36 + {{ $handle := resolve .Card.UserDid }} 37 + {{ template "fragments/pagination" (dict 38 + "Page" .Page 39 + "TotalCount" .RepoCount 40 + "BasePath" (printf "/%s" $handle) 41 + "QueryParams" (queryParams "tab" "repos" "q" .FilterQuery) 42 + ) }} 43 + {{ end }} 5 - {{ block "ownRepos" . }}{{ end }} 6 44 </div> 7 45 {{ end }} 8 46
+53 -269
appview/pulls/opengraph.go
··· 1 1 package pulls 2 2 3 3 import ( 4 - "bytes" 5 4 "context" 6 - "fmt" 7 - "image" 8 - "image/color" 9 - "image/png" 10 5 "log" 11 6 "net/http" 7 + "time" 12 8 13 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/ogre" 14 - "tangled.org/core/appview/ogcard" 15 11 "tangled.org/core/patchutil" 16 - "tangled.org/core/types" 17 12 ) 18 13 14 + func (s *Pulls) PullOpenGraphSummary(w http.ResponseWriter, r *http.Request) { 15 + f, err := s.repoResolver.Resolve(r) 19 - func (s *Pulls) drawPullSummaryCard(pull *models.Pull, repo *models.Repo, diffStats types.DiffFileStat, filesChanged int) (*ogcard.Card, error) { 20 - width, height := ogcard.DefaultSize() 21 - mainCard, err := ogcard.NewCard(width, height) 22 - if err != nil { 23 - return nil, err 24 - } 25 - 26 - // Split: content area (75%) and status/stats area (25%) 27 - contentCard, statsArea := mainCard.Split(false, 75) 28 - 29 - // Add padding to content 30 - contentCard.SetMargin(50) 31 - 32 - // Split content horizontally: main content (80%) and avatar area (20%) 33 - mainContent, avatarArea := contentCard.Split(true, 80) 34 - 35 - // Add margin to main content 36 - mainContent.SetMargin(10) 37 - 38 - // Use full main content area for repo name and title 39 - bounds := mainContent.Img.Bounds() 40 - startX := bounds.Min.X + mainContent.Margin 41 - startY := bounds.Min.Y + mainContent.Margin 42 - 43 - // Draw full repository name at top (owner/repo format) 44 - var repoOwner string 45 - owner, err := s.idResolver.ResolveIdent(context.Background(), repo.Did) 46 - if err != nil { 47 - repoOwner = repo.Did 48 - } else { 49 - repoOwner = "@" + owner.Handle.String() 50 - } 51 - 52 - fullRepoName := repoOwner + " / " + repo.Name 53 - if len(fullRepoName) > 60 { 54 - fullRepoName = fullRepoName[:60] + "…" 55 - } 56 - 57 - grayColor := color.RGBA{88, 96, 105, 255} 58 - err = mainContent.DrawTextAt(fullRepoName, startX, startY, grayColor, 36, ogcard.Top, ogcard.Left) 59 16 if err != nil { 17 + log.Println("failed to get repo and knot", err) 18 + return 60 - return nil, err 61 19 } 62 20 21 + pull, ok := r.Context().Value("pull").(*models.Pull) 22 + if !ok { 23 + log.Println("pull not found in context") 24 + http.Error(w, "pull not found", http.StatusNotFound) 25 + return 63 - // Draw pull request title below repo name with wrapping 64 - titleY := startY + 60 65 - titleX := startX 66 - 67 - // Truncate title if too long 68 - pullTitle := pull.Title 69 - maxTitleLength := 80 70 - if len(pullTitle) > maxTitleLength { 71 - pullTitle = pullTitle[:maxTitleLength] + "…" 72 - } 73 - 74 - // Create a temporary card for the title area to enable wrapping 75 - titleBounds := mainContent.Img.Bounds() 76 - titleWidth := titleBounds.Dx() - (startX - titleBounds.Min.X) - 20 // Leave some margin 77 - titleHeight := titleBounds.Dy() - (titleY - titleBounds.Min.Y) - 100 // Leave space for pull ID 78 - 79 - titleRect := image.Rect(titleX, titleY, titleX+titleWidth, titleY+titleHeight) 80 - titleCard := &ogcard.Card{ 81 - Img: mainContent.Img.SubImage(titleRect).(*image.RGBA), 82 - Font: mainContent.Font, 83 - Margin: 0, 84 - } 85 - 86 - // Draw wrapped title 87 - lines, err := titleCard.DrawText(pullTitle, color.Black, 54, ogcard.Top, ogcard.Left) 88 - if err != nil { 89 - return nil, err 90 26 } 91 27 28 + var ownerHandle string 29 + owner, err := s.idResolver.ResolveIdent(context.Background(), f.Did) 92 - // Calculate where title ends (number of lines * line height) 93 - lineHeight := 60 // Approximate line height for 54pt font 94 - titleEndY := titleY + (len(lines) * lineHeight) + 10 95 - 96 - // Draw pull ID in gray below the title 97 - pullIdText := fmt.Sprintf("#%d", pull.PullId) 98 - err = mainContent.DrawTextAt(pullIdText, startX, titleEndY, grayColor, 54, ogcard.Top, ogcard.Left) 99 30 if err != nil { 31 + ownerHandle = f.Did 32 + } else { 33 + ownerHandle = owner.Handle.String() 100 - return nil, err 101 34 } 102 35 103 - // Get pull author handle (needed for avatar and metadata) 104 36 var authorHandle string 105 37 author, err := s.idResolver.ResolveIdent(context.Background(), pull.OwnerDid) 106 38 if err != nil { ··· 109 41 authorHandle = "@" + author.Handle.String() 110 42 } 111 43 44 + avatarUrl := s.pages.AvatarUrl(authorHandle, "256") 112 - // Draw avatar circle on the right side 113 - avatarBounds := avatarArea.Img.Bounds() 114 - avatarSize := min(avatarBounds.Dx(), avatarBounds.Dy()) - 20 // Leave some margin 115 - if avatarSize > 220 { 116 - avatarSize = 220 117 - } 118 - avatarX := avatarBounds.Min.X + (avatarBounds.Dx() / 2) - (avatarSize / 2) 119 - avatarY := avatarBounds.Min.Y + 20 120 45 46 + var status string 121 - // Get avatar URL for pull author 122 - avatarURL := s.pages.AvatarUrl(authorHandle, "256") 123 - err = avatarArea.DrawCircularExternalImage(avatarURL, avatarX, avatarY, avatarSize) 124 - if err != nil { 125 - log.Printf("failed to draw avatar (non-fatal): %v", err) 126 - } 127 - 128 - // Split stats area: left side for status/stats (80%), right side for dolly (20%) 129 - statusArea, dollyArea := statsArea.Split(true, 80) 130 - 131 - // Draw status and stats 132 - statsBounds := statusArea.Img.Bounds() 133 - statsX := statsBounds.Min.X + 60 // left padding 134 - statsY := statsBounds.Min.Y 135 - 136 - iconColor := color.RGBA{88, 96, 105, 255} 137 - iconSize := 36 138 - textSize := 36.0 139 - labelSize := 28.0 140 - iconBaselineOffset := int(textSize) / 2 141 - 142 - // Draw status (open/merged/closed) with colored icon and text 143 - var statusIcon string 144 - var statusText string 145 - var statusColor color.RGBA 146 - 147 47 if pull.State.IsOpen() { 48 + status = "open" 148 - statusIcon = "git-pull-request" 149 - statusText = "open" 150 - statusColor = color.RGBA{34, 139, 34, 255} // green 151 49 } else if pull.State.IsMerged() { 50 + status = "merged" 152 - statusIcon = "git-merge" 153 - statusText = "merged" 154 - statusColor = color.RGBA{138, 43, 226, 255} // purple 155 51 } else { 52 + status = "closed" 156 - statusIcon = "git-pull-request-closed" 157 - statusText = "closed" 158 - statusColor = color.RGBA{52, 58, 64, 255} // dark gray 159 53 } 160 54 55 + var filesChanged int 56 + var additions int64 57 + var deletions int64 161 - statusTextWidth := statusArea.TextWidth(statusText, textSize) 162 - badgePadding := 12 163 - badgeHeight := int(textSize) + (badgePadding * 2) 164 - badgeWidth := iconSize + badgePadding + statusTextWidth + (badgePadding * 2) 165 - cornerRadius := 8 166 - badgeX := 60 167 - badgeY := 0 168 58 59 + if len(pull.Submissions) > 0 { 60 + latestSubmission := pull.LatestSubmission() 61 + niceDiff := patchutil.AsNiceDiff(latestSubmission.Patch, pull.TargetBranch) 62 + filesChanged = niceDiff.Stat.FilesChanged 63 + additions = int64(niceDiff.Stat.Insertions) 64 + deletions = int64(niceDiff.Stat.Deletions) 169 - statusArea.DrawRoundedRect(badgeX, badgeY, badgeWidth, badgeHeight, cornerRadius, statusColor) 170 - 171 - whiteColor := color.RGBA{255, 255, 255, 255} 172 - iconX := statsX + badgePadding 173 - iconY := statsY + (badgeHeight-iconSize)/2 174 - err = statusArea.DrawLucideIcon(statusIcon, iconX, iconY, iconSize, whiteColor) 175 - if err != nil { 176 - log.Printf("failed to draw status icon: %v", err) 177 - } 178 - 179 - textX := statsX + badgePadding + iconSize + badgePadding 180 - textY := statsY + (badgeHeight-int(textSize))/2 - 5 181 - err = statusArea.DrawTextAt(statusText, textX, textY, whiteColor, textSize, ogcard.Top, ogcard.Left) 182 - if err != nil { 183 - log.Printf("failed to draw status text: %v", err) 184 - } 185 - 186 - currentX := statsX + badgeWidth + 50 187 - 188 - // Draw comment count 189 - err = statusArea.DrawLucideIcon("message-square", currentX, iconY, iconSize, iconColor) 190 - if err != nil { 191 - log.Printf("failed to draw comment icon: %v", err) 192 65 } 193 66 194 - currentX += iconSize + 15 195 67 commentCount := pull.TotalComments() 196 - commentText := fmt.Sprintf("%d comments", commentCount) 197 - if commentCount == 1 { 198 - commentText = "1 comment" 199 - } 200 - err = statusArea.DrawTextAt(commentText, currentX, textY, iconColor, textSize, ogcard.Top, ogcard.Left) 201 - if err != nil { 202 - log.Printf("failed to draw comment text: %v", err) 203 - } 204 68 69 + rounds := len(pull.Submissions) 70 + if rounds == 0 { 71 + rounds = 1 205 - commentTextWidth := len(commentText) * 20 206 - currentX += commentTextWidth + 40 207 - 208 - // Draw files changed 209 - err = statusArea.DrawLucideIcon("file-diff", currentX, iconY, iconSize, iconColor) 210 - if err != nil { 211 - log.Printf("failed to draw file diff icon: %v", err) 212 - } 213 - 214 - currentX += iconSize + 15 215 - filesText := fmt.Sprintf("%d files", filesChanged) 216 - if filesChanged == 1 { 217 - filesText = "1 file" 218 - } 219 - err = statusArea.DrawTextAt(filesText, currentX, textY, iconColor, textSize, ogcard.Top, ogcard.Left) 220 - if err != nil { 221 - log.Printf("failed to draw files text: %v", err) 222 - } 223 - 224 - filesTextWidth := len(filesText) * 20 225 - currentX += filesTextWidth 226 - 227 - // Draw additions (green +) 228 - greenColor := color.RGBA{34, 139, 34, 255} 229 - additionsText := fmt.Sprintf("+%d", diffStats.Insertions) 230 - err = statusArea.DrawTextAt(additionsText, currentX, textY, greenColor, textSize, ogcard.Top, ogcard.Left) 231 - if err != nil { 232 - log.Printf("failed to draw additions text: %v", err) 233 - } 234 - 235 - additionsTextWidth := len(additionsText) * 20 236 - currentX += additionsTextWidth + 30 237 - 238 - // Draw deletions (red -) right next to additions 239 - redColor := color.RGBA{220, 20, 60, 255} 240 - deletionsText := fmt.Sprintf("-%d", diffStats.Deletions) 241 - err = statusArea.DrawTextAt(deletionsText, currentX, textY, redColor, textSize, ogcard.Top, ogcard.Left) 242 - if err != nil { 243 - log.Printf("failed to draw deletions text: %v", err) 244 - } 245 - 246 - // Draw dolly logo on the right side 247 - dollyBounds := dollyArea.Img.Bounds() 248 - dollySize := 90 249 - dollyX := dollyBounds.Min.X + (dollyBounds.Dx() / 2) - (dollySize / 2) 250 - dollyY := statsY + iconBaselineOffset - dollySize/2 + 25 251 - dollyColor := color.RGBA{180, 180, 180, 255} // light gray 252 - err = dollyArea.DrawDolly(dollyX, dollyY, dollySize, dollyColor) 253 - if err != nil { 254 - log.Printf("dolly silhouette not available (this is ok): %v", err) 255 72 } 256 73 74 + payload := ogre.PullRequestCardPayload{ 75 + Type: "pullRequest", 76 + RepoName: f.Name, 77 + OwnerHandle: ownerHandle, 78 + AvatarUrl: avatarUrl, 79 + Title: pull.Title, 80 + PullRequestNumber: pull.PullId, 81 + Status: status, 82 + FilesChanged: filesChanged, 83 + Additions: int(additions), 84 + Deletions: int(deletions), 85 + Rounds: rounds, 86 + CommentCount: commentCount, 87 + ReactionCount: 0, 88 + CreatedAt: pull.Created.Format(time.RFC3339), 257 - // Draw "opened by @author" and date at the bottom with more spacing 258 - labelY := statsY + iconSize + 30 259 - 260 - // Format the opened date 261 - openedDate := pull.Created.Format("Jan 2, 2006") 262 - metaText := fmt.Sprintf("opened by %s · %s", authorHandle, openedDate) 263 - 264 - err = statusArea.DrawTextAt(metaText, statsX, labelY, iconColor, labelSize, ogcard.Top, ogcard.Left) 265 - if err != nil { 266 - log.Printf("failed to draw metadata: %v", err) 267 89 } 268 90 91 + imageBytes, err := s.ogreClient.RenderPullRequestCard(r.Context(), payload) 269 - return mainCard, nil 270 - } 271 - 272 - func (s *Pulls) PullOpenGraphSummary(w http.ResponseWriter, r *http.Request) { 273 - f, err := s.repoResolver.Resolve(r) 274 92 if err != nil { 93 + log.Println("failed to render pull request card", err) 94 + http.Error(w, "failed to render pull request card", http.StatusInternalServerError) 275 - log.Println("failed to get repo and knot", err) 276 95 return 277 96 } 278 97 279 - pull, ok := r.Context().Value("pull").(*models.Pull) 280 - if !ok { 281 - log.Println("pull not found in context") 282 - http.Error(w, "pull not found", http.StatusNotFound) 283 - return 284 - } 285 - 286 - // Calculate diff stats from latest submission using patchutil 287 - var diffStats types.DiffFileStat 288 - filesChanged := 0 289 - if len(pull.Submissions) > 0 { 290 - latestSubmission := pull.LatestSubmission() 291 - niceDiff := patchutil.AsNiceDiff(latestSubmission.Patch, pull.TargetBranch) 292 - diffStats.Insertions = int64(niceDiff.Stat.Insertions) 293 - diffStats.Deletions = int64(niceDiff.Stat.Deletions) 294 - filesChanged = niceDiff.Stat.FilesChanged 295 - } 296 - 297 - card, err := s.drawPullSummaryCard(pull, f, diffStats, filesChanged) 298 - if err != nil { 299 - log.Println("failed to draw pull summary card", err) 300 - http.Error(w, "failed to draw pull summary card", http.StatusInternalServerError) 301 - return 302 - } 303 - 304 - var imageBuffer bytes.Buffer 305 - err = png.Encode(&imageBuffer, card.Img) 306 - if err != nil { 307 - log.Println("failed to encode pull summary card", err) 308 - http.Error(w, "failed to encode pull summary card", http.StatusInternalServerError) 309 - return 310 - } 311 - 312 - imageBytes := imageBuffer.Bytes() 313 - 314 98 w.Header().Set("Content-Type", "image/png") 99 + w.Header().Set("Cache-Control", "public, max-age=3600") 315 - w.Header().Set("Cache-Control", "public, max-age=3600") // 1 hour 316 100 w.WriteHeader(http.StatusOK) 317 101 _, err = w.Write(imageBytes) 318 102 if err != nil { 103 + log.Println("failed to write pull request card", err) 319 - log.Println("failed to write pull summary card", err) 320 104 return 321 105 } 322 106 }
+19 -82
appview/pulls/pulls.go
··· 35 35 "tangled.org/core/appview/validator" 36 36 "tangled.org/core/appview/xrpcclient" 37 37 "tangled.org/core/idresolver" 38 + "tangled.org/core/ogre" 38 39 "tangled.org/core/orm" 39 40 "tangled.org/core/patchutil" 40 41 "tangled.org/core/rbac" ··· 65 66 logger *slog.Logger 66 67 validator *validator.Validator 67 68 indexer *pulls_indexer.Indexer 69 + ogreClient *ogre.Client 68 70 } 69 71 70 72 func New( ··· 94 96 logger: logger, 95 97 validator: validator, 96 98 indexer: indexer, 99 + ogreClient: ogre.NewClient(config.Ogre.Host), 97 100 } 98 101 } 99 102 ··· 411 414 return nil 412 415 } 413 416 417 + xrpcc := &indigoxrpc.Client{Host: s.config.KnotMirror.Url} 418 + resp, err := tangled.GitTempGetBranch(r.Context(), xrpcc, branch, repo.RepoAt().String()) 414 - scheme := "http" 415 - if !s.config.Core.Dev { 416 - scheme = "https" 417 - } 418 - host := fmt.Sprintf("%s://%s", scheme, repo.Knot) 419 - xrpcc := &indigoxrpc.Client{ 420 - Host: host, 421 - } 422 - 423 - resp, err := tangled.RepoBranch(r.Context(), xrpcc, branch, fmt.Sprintf("%s/%s", repo.Did, repo.Name)) 424 419 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 425 420 return nil 426 421 } ··· 436 431 return pages.Unknown 437 432 } 438 433 434 + var sourceRepo syntax.ATURI 439 - var knot, ownerDid, repoName string 440 - 441 435 if pull.PullSource.RepoAt != nil { 442 436 // fork-based pulls 437 + sourceRepo = *pull.PullSource.RepoAt 443 - sourceRepo, err := db.GetRepoByAtUri(s.db, pull.PullSource.RepoAt.String()) 444 - if err != nil { 445 - log.Println("failed to get source repo", err) 446 - return pages.Unknown 447 - } 448 - 449 - knot = sourceRepo.Knot 450 - ownerDid = sourceRepo.Did 451 - repoName = sourceRepo.Name 452 438 } else { 453 439 // pulls within the same repo 440 + sourceRepo = repo.RepoAt() 454 - knot = repo.Knot 455 - ownerDid = repo.Did 456 - repoName = repo.Name 457 - } 458 - 459 - scheme := "http" 460 - if !s.config.Core.Dev { 461 - scheme = "https" 462 - } 463 - host := fmt.Sprintf("%s://%s", scheme, knot) 464 - xrpcc := &indigoxrpc.Client{ 465 - Host: host, 466 441 } 467 442 443 + xrpcc := &indigoxrpc.Client{Host: s.config.KnotMirror.Url} 444 + branchResp, err := tangled.GitTempGetBranch(r.Context(), xrpcc, pull.PullSource.Branch, sourceRepo.String()) 468 - didSlashName := fmt.Sprintf("%s/%s", ownerDid, repoName) 469 - branchResp, err := tangled.RepoBranch(r.Context(), xrpcc, pull.PullSource.Branch, didSlashName) 470 445 if err != nil { 471 446 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 472 447 log.Println("failed to call XRPC repo.branches", xrpcerr) ··· 904 879 905 880 switch r.Method { 906 881 case http.MethodGet: 882 + xrpcc := &indigoxrpc.Client{Host: s.config.KnotMirror.Url} 907 - scheme := "http" 908 - if !s.config.Core.Dev { 909 - scheme = "https" 910 - } 911 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 912 - xrpcc := &indigoxrpc.Client{ 913 - Host: host, 914 - } 915 883 884 + xrpcBytes, err := tangled.GitTempListBranches(r.Context(), xrpcc, "", 0, f.RepoAt().String()) 916 - repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 917 - xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 918 885 if err != nil { 919 886 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 920 887 log.Println("failed to call XRPC repo.branches", xrpcerr) ··· 1535 1502 return 1536 1503 } 1537 1504 1505 + xrpcc := &indigoxrpc.Client{Host: s.config.KnotMirror.Url} 1538 - scheme := "http" 1539 - if !s.config.Core.Dev { 1540 - scheme = "https" 1541 - } 1542 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 1543 - xrpcc := &indigoxrpc.Client{ 1544 - Host: host, 1545 - } 1546 1506 1507 + xrpcBytes, err := tangled.GitTempListBranches(r.Context(), xrpcc, "", 0, f.RepoAt().String()) 1547 - repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 1548 - xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 1549 1508 if err != nil { 1550 - if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 1551 - log.Println("failed to call XRPC repo.branches", xrpcerr) 1552 - s.pages.Error503(w) 1553 - return 1554 - } 1555 1509 log.Println("failed to fetch branches", err) 1510 + s.pages.Error503(w) 1556 1511 return 1557 1512 } 1558 1513 ··· 1607 1562 return 1608 1563 } 1609 1564 1565 + xrpcc := &indigoxrpc.Client{Host: s.config.KnotMirror.Url} 1566 + 1610 1567 forkVal := r.URL.Query().Get("fork") 1611 1568 repoString := strings.SplitN(forkVal, "/", 2) 1612 1569 forkOwnerDid := repoString[0] ··· 1622 1579 return 1623 1580 } 1624 1581 1582 + sourceXrpcBytes, err := tangled.GitTempListBranches(r.Context(), xrpcc, "", 0, repo.RepoAt().String()) 1625 - sourceScheme := "http" 1626 - if !s.config.Core.Dev { 1627 - sourceScheme = "https" 1628 - } 1629 - sourceHost := fmt.Sprintf("%s://%s", sourceScheme, repo.Knot) 1630 - sourceXrpcc := &indigoxrpc.Client{ 1631 - Host: sourceHost, 1632 - } 1633 - 1634 - sourceRepo := fmt.Sprintf("%s/%s", forkOwnerDid, repo.Name) 1635 - sourceXrpcBytes, err := tangled.RepoBranches(r.Context(), sourceXrpcc, "", 0, sourceRepo) 1636 1583 if err != nil { 1637 1584 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 1638 1585 log.Println("failed to call XRPC repo.branches for source", xrpcerr) ··· 1651 1598 return 1652 1599 } 1653 1600 1601 + targetXrpcBytes, err := tangled.GitTempListBranches(r.Context(), xrpcc, "", 0, f.RepoAt().String()) 1654 - targetScheme := "http" 1655 - if !s.config.Core.Dev { 1656 - targetScheme = "https" 1657 - } 1658 - targetHost := fmt.Sprintf("%s://%s", targetScheme, f.Knot) 1659 - targetXrpcc := &indigoxrpc.Client{ 1660 - Host: targetHost, 1661 - } 1662 - 1663 - targetRepo := fmt.Sprintf("%s/%s", f.Did, f.Name) 1664 - targetXrpcBytes, err := tangled.RepoBranches(r.Context(), targetXrpcc, "", 0, targetRepo) 1665 1602 if err != nil { 1666 1603 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 1667 1604 log.Println("failed to call XRPC repo.branches for target", xrpcerr)
+9 -19
appview/repo/archive.go
··· 8 8 "strings" 9 9 10 10 "github.com/go-chi/chi/v5" 11 + "tangled.org/core/api/tangled" 11 12 ) 12 13 13 14 func (rp *Repo) DownloadArchive(w http.ResponseWriter, r *http.Request) { ··· 20 21 l.Error("failed to get repo and knot", "err", err) 21 22 return 22 23 } 23 - scheme := "http" 24 - if !rp.config.Core.Dev { 25 - scheme = "https" 26 - } 27 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 28 - didSlashRepo := f.DidSlashRepo() 29 24 30 25 // build the xrpc url 31 - u, err := url.Parse(host) 32 - if err != nil { 33 - l.Error("failed to parse host URL", "err", err) 34 - rp.pages.Error503(w) 35 - return 36 - } 37 - 38 - u.Path = "/xrpc/sh.tangled.repo.archive" 39 26 query := url.Values{} 27 + query.Set("repo", f.RepoAt().String()) 28 + query.Set("ref", ref) 40 29 query.Set("format", "tar.gz") 41 30 query.Set("prefix", r.URL.Query().Get("prefix")) 31 + xrpcURL := fmt.Sprintf( 32 + "%s/xrpc/%s?%s", 33 + rp.config.KnotMirror.Url, 34 + tangled.GitTempGetArchiveNSID, 35 + query.Encode(), 36 + ) 42 - query.Set("ref", ref) 43 - query.Set("repo", didSlashRepo) 44 - u.RawQuery = query.Encode() 45 - 46 - xrpcURL := u.String() 47 37 48 38 // make the get request 49 39 resp, err := http.Get(xrpcURL)
+2 -10
appview/repo/artifact.go
··· 313 313 return nil, err 314 314 } 315 315 316 + xrpcc := &indigoxrpc.Client{Host: rp.config.KnotMirror.Url} 316 - scheme := "http" 317 - if !rp.config.Core.Dev { 318 - scheme = "https" 319 - } 320 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 321 - xrpcc := &indigoxrpc.Client{ 322 - Host: host, 323 - } 324 317 318 + xrpcBytes, err := tangled.GitTempListTags(ctx, xrpcc, "", 0, f.RepoAt().String()) 325 - repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 326 - xrpcBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo) 327 319 if err != nil { 328 320 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 329 321 l.Error("failed to call XRPC repo.tags", "err", xrpcerr)
+5 -12
appview/repo/branches.go
··· 21 21 l.Error("failed to get repo and knot", "err", err) 22 22 return 23 23 } 24 + xrpcc := &indigoxrpc.Client{Host: rp.config.KnotMirror.Url} 25 + 26 + xrpcBytes, err := tangled.GitTempListBranches(r.Context(), xrpcc, "", 0, f.RepoAt().String()) 27 + if err != nil { 28 + l.Error("failed to call XRPC repo.branches", "err", err) 24 - scheme := "http" 25 - if !rp.config.Core.Dev { 26 - scheme = "https" 27 - } 28 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 29 - xrpcc := &indigoxrpc.Client{ 30 - Host: host, 31 - } 32 - repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 33 - xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 34 - if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 35 - l.Error("failed to call XRPC repo.branches", "err", xrpcerr) 36 29 rp.pages.Error503(w) 37 30 return 38 31 }
+3 -11
appview/repo/compare.go
··· 27 27 return 28 28 } 29 29 30 + xrpcc := &indigoxrpc.Client{Host: rp.config.KnotMirror.Url} 30 - scheme := "http" 31 - if !rp.config.Core.Dev { 32 - scheme = "https" 33 - } 34 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 35 - xrpcc := &indigoxrpc.Client{ 36 - Host: host, 37 - } 38 31 32 + branchBytes, err := tangled.GitTempListBranches(r.Context(), xrpcc, "", 0, f.RepoAt().String()) 39 - repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 40 - branchBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 41 33 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 42 34 l.Error("failed to call XRPC repo.branches", "err", xrpcerr) 43 35 rp.pages.Error503(w) ··· 74 66 head = queryHead 75 67 } 76 68 69 + tagBytes, err := tangled.GitTempListTags(r.Context(), xrpcc, "", 0, f.RepoAt().String()) 77 - tagBytes, err := tangled.RepoTags(r.Context(), xrpcc, "", 0, repo) 78 70 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 79 71 l.Error("failed to call XRPC repo.tags", "err", xrpcerr) 80 72 rp.pages.Error503(w)
+34 -56
appview/repo/index.go
··· 22 22 "tangled.org/core/appview/db" 23 23 "tangled.org/core/appview/models" 24 24 "tangled.org/core/appview/pages" 25 - "tangled.org/core/appview/xrpcclient" 26 25 "tangled.org/core/orm" 27 26 "tangled.org/core/types" 28 27 ··· 42 41 return 43 42 } 44 43 45 - scheme := "http" 46 - if !rp.config.Core.Dev { 47 - scheme = "https" 48 - } 49 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 50 - xrpcc := &indigoxrpc.Client{ 51 - Host: host, 52 - } 53 - 54 44 user := rp.oauth.GetMultiAccountUser(r) 55 45 56 46 // Build index response from multiple XRPC calls 47 + result, err := rp.buildIndexResponse(r.Context(), f, ref) 48 + if err != nil { 49 + l.Error("failed to build index response", "err", err) 50 + rp.pages.RepoIndexPage(w, pages.RepoIndexParams{ 51 + LoggedInUser: user, 52 + KnotUnreachable: true, 53 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 54 + }) 55 + return 57 - result, err := rp.buildIndexResponse(r.Context(), xrpcc, f, ref) 58 - if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 59 - if errors.Is(xrpcerr, xrpcclient.ErrXrpcUnsupported) { 60 - l.Error("failed to call XRPC repo.index", "err", err) 61 - rp.pages.RepoIndexPage(w, pages.RepoIndexParams{ 62 - LoggedInUser: user, 63 - NeedsKnotUpgrade: true, 64 - RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 65 - }) 66 - return 67 - } else { 68 - l.Error("failed to build index response", "err", err) 69 - rp.pages.RepoIndexPage(w, pages.RepoIndexParams{ 70 - LoggedInUser: user, 71 - KnotUnreachable: true, 72 - RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 73 - }) 74 - return 75 - } 76 56 } 77 57 78 58 tagMap := make(map[string][]string) ··· 132 112 l.Error("failed to GetVerifiedObjectCommits", "err", err) 133 113 } 134 114 115 + var languageInfo []types.RepoLanguageDetails 116 + if !result.IsEmpty { 117 + // TODO: a bit dirty 118 + languageInfo, err = rp.getLanguageInfo(r.Context(), l, f, result.Ref, ref == "") 119 + if err != nil { 120 + l.Warn("failed to compute language percentages", "err", err) 121 + // non-fatal 122 + } 135 - // TODO: a bit dirty 136 - languageInfo, err := rp.getLanguageInfo(r.Context(), l, f, xrpcc, result.Ref, ref == "") 137 - if err != nil { 138 - l.Warn("failed to compute language percentages", "err", err) 139 - // non-fatal 140 123 } 141 124 142 125 var shas []string ··· 169 152 ctx context.Context, 170 153 l *slog.Logger, 171 154 repo *models.Repo, 172 - xrpcc *indigoxrpc.Client, 173 155 currentRef string, 174 156 isDefaultRef bool, 175 157 ) ([]types.RepoLanguageDetails, error) { ··· 182 164 183 165 if err != nil || langs == nil { 184 166 // non-fatal, fetch langs from ks via XRPC 167 + xrpcc := &indigoxrpc.Client{Host: rp.config.KnotMirror.Url} 168 + ls, err := tangled.GitTempListLanguages(ctx, xrpcc, currentRef, repo.RepoAt().String()) 185 - didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name) 186 - ls, err := tangled.RepoLanguages(ctx, xrpcc, currentRef, didSlashRepo) 187 169 if err != nil { 170 + return nil, fmt.Errorf("calling knotmirror git.listLanguages: %w", err) 188 - if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 189 - l.Error("failed to call XRPC repo.languages", "err", xrpcerr) 190 - return nil, xrpcerr 191 - } 192 - return nil, err 193 171 } 194 172 195 173 if ls == nil || ls.Languages == nil { ··· 258 236 } 259 237 260 238 // buildIndexResponse creates a RepoIndexResponse by combining multiple xrpc calls in parallel 239 + func (rp *Repo) buildIndexResponse(ctx context.Context, repo *models.Repo, ref string) (*types.RepoIndexResponse, error) { 240 + xrpcc := &indigoxrpc.Client{Host: rp.config.KnotMirror.Url} 261 - func (rp *Repo) buildIndexResponse(ctx context.Context, xrpcc *indigoxrpc.Client, repo *models.Repo, ref string) (*types.RepoIndexResponse, error) { 262 - didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name) 263 241 264 242 // first get branches to determine the ref if not specified 243 + branchesBytes, err := tangled.GitTempListBranches(ctx, xrpcc, "", 0, repo.RepoAt().String()) 265 - branchesBytes, err := tangled.RepoBranches(ctx, xrpcc, "", 0, didSlashRepo) 266 244 if err != nil { 245 + return nil, fmt.Errorf("calling knotmirror git.listBranches: %w", err) 267 - return nil, fmt.Errorf("failed to call repoBranches: %w", err) 268 246 } 269 247 270 248 var branchesResp types.RepoBranchesResponse ··· 296 274 297 275 var ( 298 276 tagsResp types.RepoTagsResponse 277 + treeResp *tangled.GitTempGetTree_Output 299 - treeResp *tangled.RepoTree_Output 300 278 logResp types.RepoLogResponse 301 279 readmeContent string 302 280 readmeFileName string ··· 304 282 305 283 // tags 306 284 wg.Go(func() { 285 + tagsBytes, err := tangled.GitTempListTags(ctx, xrpcc, "", 0, repo.RepoAt().String()) 307 - tagsBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, didSlashRepo) 308 286 if err != nil { 287 + errs = errors.Join(errs, fmt.Errorf("failed to call git.ListTags: %w", err)) 309 - errs = errors.Join(errs, fmt.Errorf("failed to call repoTags: %w", err)) 310 288 return 311 289 } 312 290 313 291 if err := json.Unmarshal(tagsBytes, &tagsResp); err != nil { 292 + errs = errors.Join(errs, fmt.Errorf("failed to unmarshal git.ListTags: %w", err)) 314 - errs = errors.Join(errs, fmt.Errorf("failed to unmarshal repoTags: %w", err)) 315 293 } 316 294 }) 317 295 318 296 // tree/files 319 297 wg.Go(func() { 298 + resp, err := tangled.GitTempGetTree(ctx, xrpcc, "", ref, repo.RepoAt().String()) 320 - resp, err := tangled.RepoTree(ctx, xrpcc, "", ref, didSlashRepo) 321 299 if err != nil { 300 + errs = errors.Join(errs, fmt.Errorf("failed to call git.GetTree: %w", err)) 322 - errs = errors.Join(errs, fmt.Errorf("failed to call repoTree: %w", err)) 323 301 return 324 302 } 325 303 treeResp = resp ··· 327 305 328 306 // commits 329 307 wg.Go(func() { 308 + logBytes, err := tangled.GitTempListCommits(ctx, xrpcc, "", 50, ref, repo.RepoAt().String()) 330 - logBytes, err := tangled.RepoLog(ctx, xrpcc, "", 50, "", ref, didSlashRepo) 331 309 if err != nil { 310 + errs = errors.Join(errs, fmt.Errorf("failed to call git.ListCommits: %w", err)) 332 - errs = errors.Join(errs, fmt.Errorf("failed to call repoLog: %w", err)) 333 311 return 334 312 } 335 313 336 314 if err := json.Unmarshal(logBytes, &logResp); err != nil { 315 + errs = errors.Join(errs, fmt.Errorf("failed to unmarshal git.ListCommits: %w", err)) 337 - errs = errors.Join(errs, fmt.Errorf("failed to unmarshal repoLog: %w", err)) 338 316 } 339 317 }) 340 318 ··· 376 354 Readme: readmeContent, 377 355 ReadmeFileName: readmeFileName, 378 356 Commits: logResp.Commits, 357 + Description: "", 379 - Description: logResp.Description, 380 358 Files: files, 381 359 Branches: branchesResp.Branches, 382 360 Tags: tagsResp.Tags,
+10 -18
appview/repo/log.go
··· 40 40 ref := chi.URLParam(r, "ref") 41 41 ref, _ = url.PathUnescape(ref) 42 42 43 + xrpcc := &indigoxrpc.Client{Host: rp.config.KnotMirror.Url} 43 - scheme := "http" 44 - if !rp.config.Core.Dev { 45 - scheme = "https" 46 - } 47 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 48 - xrpcc := &indigoxrpc.Client{ 49 - Host: host, 50 - } 51 44 52 45 limit := int64(60) 53 46 cursor := "" ··· 57 50 cursor = strconv.Itoa(offset) 58 51 } 59 52 53 + xrpcBytes, err := tangled.GitTempListCommits(r.Context(), xrpcc, cursor, limit, ref, f.RepoAt().String()) 54 + if err != nil { 55 + l.Error("failed to call XRPC repo.log", "err", err) 60 - repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 61 - xrpcBytes, err := tangled.RepoLog(r.Context(), xrpcc, cursor, limit, "", ref, repo) 62 - if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 63 - l.Error("failed to call XRPC repo.log", "err", xrpcerr) 64 56 rp.pages.Error503(w) 65 57 return 66 58 } ··· 72 64 return 73 65 } 74 66 67 + tagBytes, err := tangled.GitTempListTags(r.Context(), xrpcc, "", 0, f.RepoAt().String()) 68 + if err != nil { 69 + l.Error("failed to call XRPC repo.tags", "err", err) 75 - tagBytes, err := tangled.RepoTags(r.Context(), xrpcc, "", 0, repo) 76 - if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 77 - l.Error("failed to call XRPC repo.tags", "err", xrpcerr) 78 70 rp.pages.Error503(w) 79 71 return 80 72 } ··· 93 85 } 94 86 } 95 87 88 + branchBytes, err := tangled.GitTempListBranches(r.Context(), xrpcc, "", 0, f.RepoAt().String()) 89 + if err != nil { 90 + l.Error("failed to call XRPC repo.branches", "err", err) 96 - branchBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 97 - if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 98 - l.Error("failed to call XRPC repo.branches", "err", xrpcerr) 99 91 rp.pages.Error503(w) 100 92 return 101 93 }
+36 -329
appview/repo/opengraph.go
··· 1 1 package repo 2 2 3 3 import ( 4 - "bytes" 5 4 "context" 6 - "encoding/hex" 7 - "fmt" 8 - "image/color" 9 - "image/png" 10 5 "log" 11 6 "net/http" 12 7 "sort" 8 + "time" 13 - "strings" 14 9 15 10 "github.com/go-enry/go-enry/v2" 16 11 "tangled.org/core/appview/db" 12 + "tangled.org/core/ogre" 17 - "tangled.org/core/appview/models" 18 - "tangled.org/core/appview/ogcard" 19 13 "tangled.org/core/orm" 20 14 "tangled.org/core/types" 21 15 ) 22 16 17 + func (rp *Repo) Opengraph(w http.ResponseWriter, r *http.Request) { 18 + f, err := rp.repoResolver.Resolve(r) 23 - func (rp *Repo) drawRepoSummaryCard(repo *models.Repo, languageStats []types.RepoLanguageDetails) (*ogcard.Card, error) { 24 - width, height := ogcard.DefaultSize() 25 - mainCard, err := ogcard.NewCard(width, height) 26 19 if err != nil { 20 + log.Println("failed to get repo and knot", err) 21 + return 27 - return nil, err 28 22 } 29 23 30 - // Split: content area (75%) and language bar + icons (25%) 31 - contentCard, bottomArea := mainCard.Split(false, 75) 32 - 33 - // Add padding to content 34 - contentCard.SetMargin(50) 35 - 36 - // Split content horizontally: main content (80%) and avatar area (20%) 37 - mainContent, avatarArea := contentCard.Split(true, 80) 38 - 39 - // Use main content area for both repo name and description to allow dynamic wrapping. 40 - mainContent.SetMargin(10) 41 - 42 24 var ownerHandle string 25 + owner, err := rp.idResolver.ResolveIdent(context.Background(), f.Did) 43 - owner, err := rp.idResolver.ResolveIdent(context.Background(), repo.Did) 44 26 if err != nil { 27 + ownerHandle = f.Did 45 - ownerHandle = repo.Did 46 28 } else { 29 + ownerHandle = owner.Handle.String() 47 - ownerHandle = "@" + owner.Handle.String() 48 - } 49 - 50 - bounds := mainContent.Img.Bounds() 51 - startX := bounds.Min.X + mainContent.Margin 52 - startY := bounds.Min.Y + mainContent.Margin 53 - currentX := startX 54 - currentY := startY 55 - lineHeight := 64 // Font size 54 + padding 56 - textColor := color.RGBA{88, 96, 105, 255} 57 - 58 - // Draw owner handle 59 - ownerWidth, err := mainContent.DrawTextAtWithWidth(ownerHandle, currentX, currentY, textColor, 54, ogcard.Top, ogcard.Left) 60 - if err != nil { 61 - return nil, err 62 - } 63 - currentX += ownerWidth 64 - 65 - // Draw separator 66 - sepWidth, err := mainContent.DrawTextAtWithWidth(" / ", currentX, currentY, textColor, 54, ogcard.Top, ogcard.Left) 67 - if err != nil { 68 - return nil, err 69 - } 70 - currentX += sepWidth 71 - 72 - words := strings.Fields(repo.Name) 73 - spaceWidth, _ := mainContent.DrawTextAtWithWidth(" ", -1000, -1000, color.Black, 54, ogcard.Top, ogcard.Left) 74 - if spaceWidth == 0 { 75 - spaceWidth = 15 76 - } 77 - 78 - for _, word := range words { 79 - // estimate bold width by measuring regular width and adding a multiplier 80 - regularWidth, _ := mainContent.DrawTextAtWithWidth(word, -1000, -1000, color.Black, 54, ogcard.Top, ogcard.Left) 81 - estimatedBoldWidth := int(float64(regularWidth) * 1.15) // Heuristic for bold text 82 - 83 - if currentX+estimatedBoldWidth > (bounds.Max.X - mainContent.Margin) { 84 - currentX = startX 85 - currentY += lineHeight 86 - } 87 - 88 - _, err := mainContent.DrawBoldText(word, currentX, currentY, color.Black, 54, ogcard.Top, ogcard.Left) 89 - if err != nil { 90 - return nil, err 91 - } 92 - currentX += estimatedBoldWidth + spaceWidth 93 - } 94 - 95 - // update Y position for the description 96 - currentY += lineHeight 97 - 98 - // draw description 99 - if currentY < bounds.Max.Y-mainContent.Margin { 100 - totalHeight := float64(bounds.Dy()) 101 - repoNameHeight := float64(currentY - bounds.Min.Y) 102 - 103 - if totalHeight > 0 && repoNameHeight < totalHeight { 104 - repoNamePercent := (repoNameHeight / totalHeight) * 100 105 - if repoNamePercent < 95 { // Ensure there's space left for description 106 - _, descriptionCard := mainContent.Split(false, int(repoNamePercent)) 107 - descriptionCard.SetMargin(8) 108 - 109 - description := repo.Description 110 - if len(description) > 70 { 111 - description = description[:70] + "…" 112 - } 113 - 114 - _, err = descriptionCard.DrawText(description, color.RGBA{88, 96, 105, 255}, 36, ogcard.Top, ogcard.Left) 115 - if err != nil { 116 - log.Printf("failed to draw description: %v", err) 117 - } 118 - } 119 - } 120 - } 121 - 122 - // Draw avatar circle on the right side 123 - avatarBounds := avatarArea.Img.Bounds() 124 - avatarSize := min(avatarBounds.Dx(), avatarBounds.Dy()) - 20 // Leave some margin 125 - if avatarSize > 220 { 126 - avatarSize = 220 127 - } 128 - avatarX := avatarBounds.Min.X + (avatarBounds.Dx() / 2) - (avatarSize / 2) 129 - avatarY := avatarBounds.Min.Y + 20 130 - 131 - // Get avatar URL and draw it 132 - avatarURL := rp.pages.AvatarUrl(ownerHandle, "256") 133 - err = avatarArea.DrawCircularExternalImage(avatarURL, avatarX, avatarY, avatarSize) 134 - if err != nil { 135 - log.Printf("failed to draw avatar (non-fatal): %v", err) 136 - } 137 - 138 - // Split bottom area: icons area (65%) and language bar (35%) 139 - iconsArea, languageBarCard := bottomArea.Split(false, 75) 140 - 141 - // Split icons area: left side for stats (80%), right side for dolly (20%) 142 - statsArea, dollyArea := iconsArea.Split(true, 80) 143 - 144 - // Draw stats with icons in the stats area 145 - starsText := repo.RepoStats.StarCount 146 - issuesText := repo.RepoStats.IssueCount.Open 147 - pullRequestsText := repo.RepoStats.PullCount.Open 148 - 149 - iconColor := color.RGBA{88, 96, 105, 255} 150 - iconSize := 36 151 - textSize := 36.0 152 - 153 - // Position stats in the middle of the stats area 154 - statsBounds := statsArea.Img.Bounds() 155 - statsX := statsBounds.Min.X + 60 // left padding 156 - statsY := statsBounds.Min.Y 157 - currentX = statsX 158 - labelSize := 22.0 159 - // Draw star icon, count, and label 160 - // Align icon baseline with text baseline 161 - iconBaselineOffset := int(textSize) / 2 162 - err = statsArea.DrawLucideIcon("star", currentX, statsY+iconBaselineOffset-iconSize/2+5, iconSize, iconColor) 163 - if err != nil { 164 - log.Printf("failed to draw star icon: %v", err) 165 - } 166 - starIconX := currentX 167 - currentX += iconSize + 15 168 - 169 - starText := fmt.Sprintf("%d", starsText) 170 - err = statsArea.DrawTextAt(starText, currentX, statsY+iconBaselineOffset, iconColor, textSize, ogcard.Middle, ogcard.Left) 171 - if err != nil { 172 - log.Printf("failed to draw star text: %v", err) 173 - } 174 - starTextWidth := len(starText) * 20 175 - starGroupWidth := iconSize + 15 + starTextWidth 176 - 177 - // Draw "stars" label below and centered under the icon+text group 178 - labelY := statsY + iconSize + 15 179 - labelX := starIconX + starGroupWidth/2 180 - err = iconsArea.DrawTextAt("stars", labelX, labelY, iconColor, labelSize, ogcard.Top, ogcard.Center) 181 - if err != nil { 182 - log.Printf("failed to draw stars label: %v", err) 183 - } 184 - 185 - currentX += starTextWidth + 50 186 - 187 - // Draw issues icon, count, and label 188 - issueStartX := currentX 189 - err = statsArea.DrawLucideIcon("circle-dot", currentX, statsY+iconBaselineOffset-iconSize/2+5, iconSize, iconColor) 190 - if err != nil { 191 - log.Printf("failed to draw circle-dot icon: %v", err) 192 - } 193 - currentX += iconSize + 15 194 - 195 - issueText := fmt.Sprintf("%d", issuesText) 196 - err = statsArea.DrawTextAt(issueText, currentX, statsY+iconBaselineOffset, iconColor, textSize, ogcard.Middle, ogcard.Left) 197 - if err != nil { 198 - log.Printf("failed to draw issue text: %v", err) 199 - } 200 - issueTextWidth := len(issueText) * 20 201 - issueGroupWidth := iconSize + 15 + issueTextWidth 202 - 203 - // Draw "issues" label below and centered under the icon+text group 204 - labelX = issueStartX + issueGroupWidth/2 205 - err = iconsArea.DrawTextAt("issues", labelX, labelY, iconColor, labelSize, ogcard.Top, ogcard.Center) 206 - if err != nil { 207 - log.Printf("failed to draw issues label: %v", err) 208 - } 209 - 210 - currentX += issueTextWidth + 50 211 - 212 - // Draw pull request icon, count, and label 213 - prStartX := currentX 214 - err = statsArea.DrawLucideIcon("git-pull-request", currentX, statsY+iconBaselineOffset-iconSize/2+5, iconSize, iconColor) 215 - if err != nil { 216 - log.Printf("failed to draw git-pull-request icon: %v", err) 217 - } 218 - currentX += iconSize + 15 219 - 220 - prText := fmt.Sprintf("%d", pullRequestsText) 221 - err = statsArea.DrawTextAt(prText, currentX, statsY+iconBaselineOffset, iconColor, textSize, ogcard.Middle, ogcard.Left) 222 - if err != nil { 223 - log.Printf("failed to draw PR text: %v", err) 224 - } 225 - prTextWidth := len(prText) * 20 226 - prGroupWidth := iconSize + 15 + prTextWidth 227 - 228 - // Draw "pulls" label below and centered under the icon+text group 229 - labelX = prStartX + prGroupWidth/2 230 - err = iconsArea.DrawTextAt("pulls", labelX, labelY, iconColor, labelSize, ogcard.Top, ogcard.Center) 231 - if err != nil { 232 - log.Printf("failed to draw pulls label: %v", err) 233 - } 234 - 235 - dollyBounds := dollyArea.Img.Bounds() 236 - dollySize := 90 237 - dollyX := dollyBounds.Min.X + (dollyBounds.Dx() / 2) - (dollySize / 2) 238 - dollyY := statsY + iconBaselineOffset - dollySize/2 + 25 239 - dollyColor := color.RGBA{180, 180, 180, 255} // light gray 240 - err = dollyArea.DrawDolly(dollyX, dollyY, dollySize, dollyColor) 241 - if err != nil { 242 - log.Printf("dolly silhouette not available (this is ok): %v", err) 243 - } 244 - 245 - // Draw language bar at bottom 246 - err = drawLanguagesCard(languageBarCard, languageStats) 247 - if err != nil { 248 - log.Printf("failed to draw language bar: %v", err) 249 - return nil, err 250 - } 251 - 252 - return mainCard, nil 253 - } 254 - 255 - // hexToColor converts a hex color to a go color 256 - func hexToColor(colorStr string) (*color.RGBA, error) { 257 - colorStr = strings.TrimLeft(colorStr, "#") 258 - 259 - b, err := hex.DecodeString(colorStr) 260 - if err != nil { 261 - return nil, err 262 - } 263 - 264 - if len(b) < 3 { 265 - return nil, fmt.Errorf("expected at least 3 bytes from DecodeString, got %d", len(b)) 266 30 } 267 31 32 + avatarUrl := rp.pages.AvatarUrl(ownerHandle, "256") 268 - clr := color.RGBA{b[0], b[1], b[2], 255} 269 - 270 - return &clr, nil 271 - } 272 - 273 - func drawLanguagesCard(card *ogcard.Card, languageStats []types.RepoLanguageDetails) error { 274 - bounds := card.Img.Bounds() 275 - cardWidth := bounds.Dx() 276 33 277 - if len(languageStats) == 0 { 278 - // Draw a light gray bar if no languages detected 279 - card.DrawRect(bounds.Min.X, bounds.Min.Y, bounds.Max.X, bounds.Max.Y, color.RGBA{225, 228, 232, 255}) 280 - return nil 281 - } 282 - 283 - // Limit to top 5 languages for the visual bar 284 - displayLanguages := languageStats 285 - if len(displayLanguages) > 5 { 286 - displayLanguages = displayLanguages[:5] 287 - } 288 - 289 - currentX := bounds.Min.X 290 - 291 - for _, lang := range displayLanguages { 292 - var langColor *color.RGBA 293 - var err error 294 - 295 - if lang.Color != "" { 296 - langColor, err = hexToColor(lang.Color) 297 - if err != nil { 298 - // Fallback to a default color 299 - langColor = &color.RGBA{149, 157, 165, 255} 300 - } 301 - } else { 302 - // Default color if no color specified 303 - langColor = &color.RGBA{149, 157, 165, 255} 304 - } 305 - 306 - langWidth := float32(cardWidth) * (lang.Percentage / 100) 307 - card.DrawRect(currentX, bounds.Min.Y, currentX+int(langWidth), bounds.Max.Y, langColor) 308 - currentX += int(langWidth) 309 - } 310 - 311 - // Fill remaining space with the last color (if any gap due to rounding) 312 - if currentX < bounds.Max.X && len(displayLanguages) > 0 { 313 - lastLang := displayLanguages[len(displayLanguages)-1] 314 - var lastColor *color.RGBA 315 - var err error 316 - 317 - if lastLang.Color != "" { 318 - lastColor, err = hexToColor(lastLang.Color) 319 - if err != nil { 320 - lastColor = &color.RGBA{149, 157, 165, 255} 321 - } 322 - } else { 323 - lastColor = &color.RGBA{149, 157, 165, 255} 324 - } 325 - card.DrawRect(currentX, bounds.Min.Y, bounds.Max.X, bounds.Max.Y, lastColor) 326 - } 327 - 328 - return nil 329 - } 330 - 331 - func (rp *Repo) Opengraph(w http.ResponseWriter, r *http.Request) { 332 - f, err := rp.repoResolver.Resolve(r) 333 - if err != nil { 334 - log.Println("failed to get repo and knot", err) 335 - return 336 - } 337 - 338 - // Get language stats directly from database 339 34 var languageStats []types.RepoLanguageDetails 340 35 langs, err := db.GetRepoLanguages( 341 36 rp.db, ··· 344 39 ) 345 40 if err != nil { 346 41 log.Printf("failed to get language stats from db: %v", err) 347 - // non-fatal, continue without language stats 348 42 } else if len(langs) > 0 { 349 43 var total int64 350 44 for _, l := range langs { ··· 375 69 }) 376 70 } 377 71 72 + ogLanguages := []ogre.LanguageData{} 73 + for _, lang := range languageStats { 74 + if len(ogLanguages) >= 5 { 75 + break 76 + } 77 + ogLanguages = append(ogLanguages, ogre.LanguageData{ 78 + Color: lang.Color, 79 + Percentage: lang.Percentage, 80 + }) 378 - card, err := rp.drawRepoSummaryCard(f, languageStats) 379 - if err != nil { 380 - log.Println("failed to draw repo summary card", err) 381 - http.Error(w, "failed to draw repo summary card", http.StatusInternalServerError) 382 - return 383 81 } 384 82 83 + payload := ogre.RepositoryCardPayload{ 84 + Type: "repository", 85 + RepoName: f.Name, 86 + OwnerHandle: ownerHandle, 87 + Stars: f.RepoStats.StarCount, 88 + Pulls: f.RepoStats.PullCount.Open, 89 + Issues: f.RepoStats.IssueCount.Open, 90 + CreatedAt: f.Created.Format(time.RFC3339), 91 + AvatarUrl: avatarUrl, 92 + Languages: ogLanguages, 93 + } 94 + 95 + imageBytes, err := rp.ogreClient.RenderRepositoryCard(r.Context(), payload) 385 - var imageBuffer bytes.Buffer 386 - err = png.Encode(&imageBuffer, card.Img) 387 96 if err != nil { 97 + log.Println("failed to render repository card", err) 98 + http.Error(w, "failed to render repository card", http.StatusInternalServerError) 388 - log.Println("failed to encode repo summary card", err) 389 - http.Error(w, "failed to encode repo summary card", http.StatusInternalServerError) 390 99 return 391 100 } 392 101 393 - imageBytes := imageBuffer.Bytes() 394 - 395 102 w.Header().Set("Content-Type", "image/png") 103 + w.Header().Set("Cache-Control", "public, max-age=3600") 396 - w.Header().Set("Cache-Control", "public, max-age=3600") // 1 hour 397 104 w.WriteHeader(http.StatusOK) 398 105 _, err = w.Write(imageBytes) 399 106 if err != nil { 107 + log.Println("failed to write repository card", err) 400 - log.Println("failed to write repo summary card", err) 401 108 return 402 109 } 403 110 }
+5 -2
appview/repo/repo.go
··· 26 26 xrpcclient "tangled.org/core/appview/xrpcclient" 27 27 "tangled.org/core/eventconsumer" 28 28 "tangled.org/core/idresolver" 29 + "tangled.org/core/ogre" 29 30 "tangled.org/core/orm" 30 31 "tangled.org/core/rbac" 31 32 "tangled.org/core/tid" 32 33 "tangled.org/core/xrpc/serviceauth" 33 34 34 35 comatproto "github.com/bluesky-social/indigo/api/atproto" 36 + "github.com/bluesky-social/indigo/atproto/atclient" 35 - atpclient "github.com/bluesky-social/indigo/atproto/client" 36 37 "github.com/bluesky-social/indigo/atproto/syntax" 37 38 lexutil "github.com/bluesky-social/indigo/lex/util" 38 39 securejoin "github.com/cyphar/filepath-securejoin" ··· 53 54 serviceAuth *serviceauth.ServiceAuth 54 55 validator *validator.Validator 55 56 cfClient *cloudflare.Client 57 + ogreClient *ogre.Client 56 58 } 57 59 58 60 func New( ··· 82 84 logger: logger, 83 85 validator: validator, 84 86 cfClient: cfClient, 87 + ogreClient: ogre.NewClient(config.Ogre.Host), 85 88 } 86 89 } 87 90 ··· 1204 1207 // this is used to rollback changes made to the PDS 1205 1208 // 1206 1209 // it is a no-op if the provided ATURI is empty 1210 + func rollbackRecord(ctx context.Context, aturi string, client *atclient.APIClient) error { 1207 - func rollbackRecord(ctx context.Context, aturi string, client *atpclient.APIClient) error { 1208 1211 if aturi == "" { 1209 1212 return nil 1210 1213 }
appview/repo/router.go

This file has not been changed.

+2 -10
appview/repo/settings.go
··· 392 392 f, err := rp.repoResolver.Resolve(r) 393 393 user := rp.oauth.GetMultiAccountUser(r) 394 394 395 - scheme := "http" 396 - if !rp.config.Core.Dev { 397 - scheme = "https" 398 - } 399 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 400 - xrpcc := &indigoxrpc.Client{ 401 - Host: host, 402 - } 395 + xrpcc := &indigoxrpc.Client{Host: rp.config.KnotMirror.Url} 403 396 404 - repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 405 - xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 397 + xrpcBytes, err := tangled.GitTempListBranches(r.Context(), xrpcc, "", 0, f.RepoAt().String()) 406 398 var result types.RepoBranchesResponse 407 399 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 408 400 l.Error("failed to call XRPC repo.branches", "err", xrpcerr)
+8 -23
appview/repo/tags.go
··· 27 27 l.Error("failed to get repo and knot", "err", err) 28 28 return 29 29 } 30 + xrpcc := &indigoxrpc.Client{Host: rp.config.KnotMirror.Url} 31 + xrpcBytes, err := tangled.GitTempListTags(r.Context(), xrpcc, "", 0, f.RepoAt().String()) 32 + if err != nil { 33 + l.Error("failed to call XRPC repo.tags", "err", err) 30 - scheme := "http" 31 - if !rp.config.Core.Dev { 32 - scheme = "https" 33 - } 34 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 35 - xrpcc := &indigoxrpc.Client{ 36 - Host: host, 37 - } 38 - repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 39 - xrpcBytes, err := tangled.RepoTags(r.Context(), xrpcc, "", 0, repo) 40 - if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 41 - l.Error("failed to call XRPC repo.tags", "err", xrpcerr) 42 34 rp.pages.Error503(w) 43 35 return 44 36 } ··· 90 82 l.Error("failed to get repo and knot", "err", err) 91 83 return 92 84 } 93 - scheme := "http" 94 - if !rp.config.Core.Dev { 95 - scheme = "https" 96 - } 97 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 98 - xrpcc := &indigoxrpc.Client{ 99 - Host: host, 100 - } 101 - repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 102 85 tag := chi.URLParam(r, "tag") 103 86 87 + xrpcc := &indigoxrpc.Client{Host: rp.config.KnotMirror.Url} 88 + 89 + xrpcBytes, err := tangled.GitTempGetTag(r.Context(), xrpcc, f.RepoAt().String(), tag) 104 - xrpcBytes, err := tangled.RepoTag(r.Context(), xrpcc, repo, tag) 105 90 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 106 91 // if we don't match an existing tag, and the tag we're trying 107 92 // to match is "latest", resolve to the most recent tag 108 93 if tag == "latest" { 94 + tagsBytes, err := tangled.GitTempListTags(r.Context(), xrpcc, "", 1, f.RepoAt().String()) 109 - tagsBytes, err := tangled.RepoTags(r.Context(), xrpcc, "", 1, repo) 110 95 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 111 96 l.Error("failed to call XRPC repo.tags for latest", "err", xrpcerr) 112 97 rp.pages.Error503(w)
+3 -10
appview/repo/tree.go
··· 33 33 treePath := chi.URLParam(r, "*") 34 34 treePath, _ = url.PathUnescape(treePath) 35 35 treePath = strings.TrimSuffix(treePath, "/") 36 + 37 + xrpcc := &indigoxrpc.Client{Host: rp.config.KnotMirror.Url} 38 + xrpcResp, err := tangled.GitTempGetTree(r.Context(), xrpcc, treePath, ref, f.RepoAt().String()) 36 - scheme := "http" 37 - if !rp.config.Core.Dev { 38 - scheme = "https" 39 - } 40 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 41 - xrpcc := &indigoxrpc.Client{ 42 - Host: host, 43 - } 44 - repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 45 - xrpcResp, err := tangled.RepoTree(r.Context(), xrpcc, treePath, ref, repo) 46 39 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 47 40 l.Error("failed to call XRPC repo.tree", "err", xrpcerr) 48 41 rp.pages.Error503(w)
+2 -2
appview/settings/settings.go
··· 28 28 "tangled.org/core/tid" 29 29 30 30 comatproto "github.com/bluesky-social/indigo/api/atproto" 31 + "github.com/bluesky-social/indigo/atproto/atclient" 31 - atpclient "github.com/bluesky-social/indigo/atproto/client" 32 32 "github.com/bluesky-social/indigo/atproto/syntax" 33 33 lexutil "github.com/bluesky-social/indigo/lex/util" 34 34 "github.com/gliderlabs/ssh" ··· 816 816 817 817 log.Printf("failed to update handle: %s", err) 818 818 msg := err.Error() 819 + var apiErr *atclient.APIError 819 - var apiErr *atpclient.APIError 820 820 if errors.As(err, &apiErr) && apiErr.Message != "" { 821 821 msg = apiErr.Message 822 822 }
-1
appview/spindles/spindles.go
··· 109 109 110 110 repos, err := db.GetRepos( 111 111 s.Db, 112 - 0, 113 112 orm.FilterEq("spindle", instance), 114 113 ) 115 114 if err != nil {
-4
appview/state/knotstream.go
··· 107 107 var errWebhook error 108 108 repos, err := db.GetRepos( 109 109 d, 110 - 0, 111 110 orm.FilterEq("did", record.RepoDid), 112 111 orm.FilterEq("name", record.RepoName), 113 112 ) ··· 149 148 150 149 repos, err := db.GetRepos( 151 150 d, 152 - 0, 153 151 orm.FilterEq("did", record.RepoDid), 154 152 orm.FilterEq("name", record.RepoName), 155 153 ) ··· 241 239 242 240 repos, err := db.GetRepos( 243 241 d, 244 - 0, 245 242 orm.FilterEq("did", record.RepoDid), 246 243 orm.FilterEq("name", record.RepoName), 247 244 ) ··· 307 304 // does this repo have a spindle configured? 308 305 repos, err := db.GetRepos( 309 306 d, 310 - 0, 311 307 orm.FilterEq("did", record.TriggerMetadata.Repo.Did), 312 308 orm.FilterEq("name", record.TriggerMetadata.Repo.Repo), 313 309 )
+88 -12
appview/state/profile.go
··· 17 17 "github.com/gorilla/feeds" 18 18 "tangled.org/core/api/tangled" 19 19 "tangled.org/core/appview/db" 20 + "tangled.org/core/appview/middleware" 20 21 "tangled.org/core/appview/models" 21 22 "tangled.org/core/appview/pages" 23 + "tangled.org/core/appview/pagination" 24 + "tangled.org/core/appview/searchquery" 22 25 "tangled.org/core/orm" 23 26 "tangled.org/core/xrpc" 24 27 ) ··· 27 30 tabVal := r.URL.Query().Get("tab") 28 31 switch tabVal { 29 32 case "repos": 33 + middleware. 34 + Paginate(http.HandlerFunc(s.reposPage)). 35 + ServeHTTP(w, r) 30 - s.reposPage(w, r) 31 36 case "followers": 32 37 s.followersPage(w, r) 33 38 case "following": ··· 139 144 140 145 repos, err := db.GetRepos( 141 146 s.db, 142 - 0, 143 147 orm.FilterEq("did", profile.UserDid), 144 148 ) 145 149 if err != nil { ··· 229 233 } 230 234 l = l.With("profileDid", profile.UserDid) 231 235 236 + params := r.URL.Query() 237 + page := pagination.FromContext(r.Context()) 238 + 239 + query := searchquery.Parse(params.Get("q")) 240 + 241 + var language string 242 + if lang := query.Get("language"); lang != nil { 243 + language = *lang 244 + } 245 + 246 + tf := searchquery.ExtractTextFilters(query) 247 + 248 + searchOpts := models.RepoSearchOptions{ 249 + Keywords: tf.Keywords, 250 + Phrases: tf.Phrases, 251 + NegatedKeywords: tf.NegatedKeywords, 252 + NegatedPhrases: tf.NegatedPhrases, 253 + Did: profile.UserDid, 254 + Language: language, 255 + Page: page, 256 + } 257 + 258 + var repos []models.Repo 259 + var totalRepos int64 260 + 261 + if searchOpts.HasSearchFilters() { 262 + res, err := s.indexer.Repos.Search(r.Context(), searchOpts) 263 + if err != nil { 264 + l.Error("failed to search repos", "err", err) 265 + s.pages.Error500(w) 266 + return 267 + } 268 + 269 + if len(res.Hits) > 0 { 270 + repos, err = db.GetRepos(s.db, orm.FilterIn("id", res.Hits)) 271 + if err != nil { 272 + l.Error("failed to get repos by IDs", "err", err) 273 + s.pages.Error500(w) 274 + return 275 + } 276 + 277 + // sort repos to match search result order (by relevance) 278 + repoMap := make(map[int64]models.Repo, len(repos)) 279 + for _, repo := range repos { 280 + repoMap[repo.Id] = repo 281 + } 282 + repos = make([]models.Repo, 0, len(res.Hits)) 283 + for _, id := range res.Hits { 284 + if repo, ok := repoMap[id]; ok { 285 + repos = append(repos, repo) 286 + } 287 + } 288 + } 289 + totalRepos = int64(res.Total) 290 + } else { 291 + repos, err = db.GetReposPaginated( 292 + s.db, 293 + page, 294 + orm.FilterEq("did", profile.UserDid), 295 + ) 296 + if err != nil { 297 + l.Error("failed to get repos", "err", err) 298 + s.pages.Error500(w) 299 + return 300 + } 301 + 302 + totalRepos, err = db.CountRepos( 303 + s.db, 304 + orm.FilterEq("did", profile.UserDid), 305 + ) 306 + if err != nil { 307 + l.Error("failed to count repos", "err", err) 308 + s.pages.Error500(w) 309 + return 310 + } 232 - repos, err := db.GetRepos( 233 - s.db, 234 - 0, 235 - orm.FilterEq("did", profile.UserDid), 236 - ) 237 - if err != nil { 238 - l.Error("failed to get repos", "err", err) 239 - s.pages.Error500(w) 240 - return 241 311 } 242 312 243 313 err = s.pages.ProfileRepos(w, pages.ProfileReposParams{ 244 314 LoggedInUser: s.oauth.GetMultiAccountUser(r), 245 315 Repos: repos, 246 316 Card: profile, 317 + Page: page, 318 + RepoCount: int(totalRepos), 319 + FilterQuery: query.String(), 247 320 }) 321 + if err != nil { 322 + l.Error("failed to render page", "err", err) 323 + } 248 324 } 249 325 250 326 func (s *State) starredPage(w http.ResponseWriter, r *http.Request) { ··· 749 825 profile = &models.Profile{Did: user.Active.Did} 750 826 } 751 827 828 + repos, err := db.GetRepos(s.db, orm.FilterEq("did", user.Active.Did)) 752 - repos, err := db.GetRepos(s.db, 0, orm.FilterEq("did", user.Active.Did)) 753 829 if err != nil { 754 830 log.Printf("getting repos for %s: %s", user.Active.Did, err) 755 831 }
+2 -2
appview/state/state.go
··· 38 38 "tangled.org/core/tid" 39 39 40 40 comatproto "github.com/bluesky-social/indigo/api/atproto" 41 + "github.com/bluesky-social/indigo/atproto/atclient" 41 - atpclient "github.com/bluesky-social/indigo/atproto/client" 42 42 "github.com/bluesky-social/indigo/atproto/syntax" 43 43 lexutil "github.com/bluesky-social/indigo/lex/util" 44 44 "github.com/bluesky-social/indigo/xrpc" ··· 588 588 // this is used to rollback changes made to the PDS 589 589 // 590 590 // it is a no-op if the provided ATURI is empty 591 + func rollbackRecord(ctx context.Context, aturi string, client *atclient.APIClient) error { 591 - func rollbackRecord(ctx context.Context, aturi string, client *atpclient.APIClient) error { 592 592 if aturi == "" { 593 593 return nil 594 594 }
+20 -17
avatar/src/index.js
··· 150 150 151 151 const size = searchParams.get("size"); 152 152 const resizeToTiny = size === "tiny"; 153 + const format = searchParams.get("format") || "webp"; 154 + const validFormats = ["webp", "jpeg", "png"]; 155 + const outputFormat = validFormats.includes(format) ? format : "webp"; 156 + 157 + const contentTypes = { 158 + webp: "image/webp", 159 + jpeg: "image/jpeg", 160 + png: "image/png", 161 + }; 153 162 154 163 const cache = caches.default; 155 164 let cacheKey = request.url; ··· 242 251 243 252 // Fetch and optionally resize the avatar 244 253 let avatarResponse; 254 + const cfOptions = outputFormat !== "webp" || resizeToTiny ? { 255 + cf: { 256 + image: { 257 + format: outputFormat, 258 + ...(resizeToTiny ? { width: 32, height: 32, fit: "cover" } : {}), 259 + }, 260 + }, 261 + }: {}; 262 + 263 + avatarResponse = await fetch(avatarUrl, cfOptions); 245 - if (resizeToTiny) { 246 - avatarResponse = await fetch(avatarUrl, { 247 - // cf: { 248 - // image: { 249 - // width: 32, 250 - // height: 32, 251 - // fit: "cover", 252 - // format: "webp", 253 - // }, 254 - // }, 255 - }); 256 - } else { 257 - avatarResponse = await fetch(avatarUrl); 258 - } 259 264 260 265 if (!avatarResponse.ok) { 261 266 return new Response(`failed to fetch avatar for ${actor}.`, { ··· 264 269 } 265 270 266 271 const avatarData = await avatarResponse.arrayBuffer(); 267 - const contentType = 268 - avatarResponse.headers.get("content-type") || "image/jpeg"; 269 272 270 273 response = new Response(avatarData, { 271 274 headers: { 275 + "Content-Type": contentTypes[outputFormat], 272 - "Content-Type": contentType, 273 276 "Cache-Control": "public, max-age=43200", 274 277 }, 275 278 });
+5 -7
blog/blog.go
··· 76 76 77 77 rctx := &markup.RenderContext{ 78 78 RendererType: markup.RendererTypeDefault, 79 - Sanitizer: markup.NewSanitizer(), 80 79 } 81 80 var posts []Post 82 81 for _, entry := range entries { ··· 100 99 } 101 100 102 101 htmlStr := rctx.RenderMarkdownWith(string(rest), markup.NewMarkdownWith("", textension.Dashes)) 103 - sanitized := rctx.SanitizeDefault(htmlStr) 104 102 105 103 posts = append(posts, Post{ 106 104 Meta: meta, 105 + Body: template.HTML(htmlStr), 107 - Body: template.HTML(sanitized), 108 106 }) 109 107 } 110 108 ··· 126 124 for _, p := range posts { 127 125 postURL := strings.TrimRight(baseURL, "/") + "/" + p.Meta.Slug 128 126 127 + var authorName strings.Builder 129 - var authorName string 130 128 for i, a := range p.Meta.Authors { 131 129 if i > 0 { 130 + authorName.WriteString(" & ") 132 - authorName += " & " 133 131 } 132 + authorName.WriteString(a.Name) 134 - authorName += a.Name 135 133 } 136 134 137 135 feed.Items = append(feed.Items, &feeds.Item{ 138 136 Title: p.Meta.Title, 139 137 Link: &feeds.Link{Href: postURL}, 140 138 Description: p.Meta.Subtitle, 139 + Author: &feeds.Author{Name: authorName.String()}, 141 - Author: &feeds.Author{Name: authorName}, 142 140 Created: p.ParsedDate(), 143 141 }) 144 142 }
+1 -1
blog/templates/fragments/footer.html
··· 1 1 {{ define "blog/fragments/footer" }} 2 + <footer class="mt-12 w-full px-6 py-4"> 2 - <footer class="mt-12 w-full px-6 py-4 bg-white dark:bg-gray-800 border-t border-gray-100 dark:border-gray-700"> 3 3 <div class="max-w-[90ch] mx-auto flex flex-wrap justify-center items-center gap-x-4 gap-y-2 text-sm text-gray-500 dark:text-gray-400"> 4 4 <div class="flex items-center justify-center gap-x-2 order-last sm:order-first w-full sm:w-auto"> 5 5 <a href="https://tangled.org" class="no-underline hover:no-underline flex items-center">
+40 -13
blog/templates/index.html
··· 10 10 11 11 {{ define "topbarLayout" }} 12 12 <header class="max-w-screen-xl mx-auto w-full" style="z-index: 20;"> 13 + <nav class="mx-auto space-x-4 px-6 py-2"> 14 + <div class="flex justify-between p-0 items-center"> 15 + <div id="left-items"> 16 + <a href="/" hx-boost="true" class="text-2xl no-underline hover:no-underline flex items-center gap-2"> 17 + {{ template "fragments/logotypeSmall" }} 18 + </a> 19 + </div> 20 + 21 + <div id="right-items" class="flex items-center gap-4"> 22 + <a href="https://tangled.org/login">login</a> 23 + <span class="text-gray-500 dark:text-gray-400">or</span> 24 + <a href="https://tangled.org/signup" class="btn-create py-0 hover:no-underline hover:text-white flex items-center gap-2"> 25 + join now {{ i "arrow-right" "size-4" }} 26 + </a> 27 + </div> 28 + </div> 29 + </nav> 13 - {{ template "layouts/fragments/topbar" . }} 14 30 </header> 15 31 {{ end }} 16 32 ··· 26 42 <div class="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-3 gap-6 mb-14"> 27 43 {{ range .Featured }} 28 44 <a href="/{{ .Meta.Slug }}" class="no-underline hover:no-underline group flex flex-col bg-white dark:bg-gray-800 rounded border border-gray-200 dark:border-gray-700 overflow-hidden hover:bg-gray-100/25 hover:dark:bg-gray-700/25 transition-colors"> 45 + <div class="overflow-hidden bg-gray-100 dark:bg-gray-700 md:h-48"> 29 - <div class="aspect-[16/9] overflow-hidden bg-gray-100 dark:bg-gray-700"> 30 46 <img src="{{ .Meta.Image }}" alt="{{ .Meta.Title }}" class="w-full h-full object-cover group-hover:scale-[1.02] transition-transform duration-300" /> 31 47 </div> 32 48 <div class="flex flex-col flex-1 px-5 py-4"> 33 49 <div class="text-xs text-gray-400 dark:text-gray-500 mb-2"> 50 + {{ $date := .ParsedDate }}{{ $date | shortTimeFmt}} 34 - {{ $date := .ParsedDate }}{{ $date.Format "Jan 2, 2006" }} 35 51 {{ if .Meta.Draft }}<span class="text-red-500">[draft]</span>{{ end }} 36 52 </div> 53 + <h2 class="font-bold text-gray-900 dark:text-white text-base leading-snug mb-1">{{ .Meta.Title }}</h2> 37 - <h2 class="font-bold text-gray-900 dark:text-white text-base leading-snug mb-1 group-hover:underline">{{ .Meta.Title }}</h2> 38 54 <p class="text-sm text-gray-500 dark:text-gray-400 line-clamp-2 flex-1">{{ .Meta.Subtitle }}</p> 55 + <div class="flex items-center mt-4 gap-2"> 56 + {{ $hasAvatar := false }}{{ range .Meta.Authors }}{{ if tinyAvatar .Handle }}{{ $hasAvatar = true }}{{ end }}{{ end }} 57 + {{ if $hasAvatar }} 39 - <div class="flex items-center mt-4"> 40 58 <div class="inline-flex items-center -space-x-2"> 41 59 {{ range .Meta.Authors }} 60 + {{ $av := tinyAvatar .Handle }}{{ if $av }}<img src="{{ $av }}" class="size-6 rounded-full border border-gray-300 dark:border-gray-700" alt="{{ .Name }}" title="{{ .Name }}" />{{ end }} 61 + {{ end }} 62 + </div> 63 + {{ end }} 64 + <div class="text-xs"> 65 + {{ $last := sub (len .Meta.Authors) 1 }} 66 + {{ range $i, $n := .Meta.Authors }} 67 + {{ $n.Handle }}{{ if ne $i $last }}, {{ end }} 42 - <img src="{{ tinyAvatar .Handle }}" class="size-6 rounded-full border border-gray-300 dark:border-gray-700" alt="{{ .Name }}" title="{{ .Name }}" /> 43 68 {{ end }} 44 69 </div> 45 70 </div> ··· 53 78 {{ range .Posts }} 54 79 <a href="/{{ .Meta.Slug }}" class="no-underline hover:no-underline group flex items-center justify-between gap-4 px-6 py-3 hover:bg-gray-100/25 hover:dark:bg-gray-700/25 transition-colors"> 55 80 <div class="flex items-center gap-3 min-w-0"> 81 + <span class="font-medium text-gray-900 dark:text-white truncate"> 82 + {{ .Meta.Title }} 83 + {{ if .Meta.Draft }}<span class="text-red-500 text-xs font-normal ml-1">[draft]</span>{{ end }} 84 + </span> 85 + </div> 86 + <div class="flex items-center gap-2"> 56 87 <div class="inline-flex items-center -space-x-2 shrink-0"> 57 88 {{ range .Meta.Authors }} 58 89 <img src="{{ tinyAvatar .Handle }}" class="size-5 rounded-full border border-gray-300 dark:border-gray-700" alt="{{ .Name }}" title="{{ .Name }}" /> 59 90 {{ end }} 60 91 </div> 92 + <div class="text-sm text-gray-400 dark:text-gray-500 shrink-0"> 93 + {{ $date := .ParsedDate }}{{ $date | shortTimeFmt }} 94 + </div> 61 - <span class="font-medium text-gray-900 dark:text-white group-hover:underline truncate"> 62 - {{ .Meta.Title }} 63 - {{ if .Meta.Draft }}<span class="text-red-500 text-xs font-normal ml-1">[draft]</span>{{ end }} 64 - </span> 65 - </div> 66 - <div class="text-sm text-gray-400 dark:text-gray-500 shrink-0"> 67 - {{ $date := .ParsedDate }}{{ $date.Format "Jan 02, 2006" }} 68 95 </div> 69 96 </a> 70 97 {{ end }}
+5 -2
blog/templates/post.html
··· 35 35 {{ $authors := .Post.Meta.Authors }} 36 36 <p class="mb-1 text-sm text-gray-600 dark:text-gray-400"> 37 37 {{ $date := .Post.ParsedDate }} 38 + {{ $date | shortTimeFmt }} 38 - {{ $date.Format "02 Jan, 2006" }} 39 39 </p> 40 40 41 41 <h1 class="mb-0 text-2xl font-bold dark:text-white"> ··· 45 45 <p class="italic mt-1 mb-3 text-lg text-gray-600 dark:text-gray-400">{{ .Post.Meta.Subtitle }}</p> 46 46 47 47 <div class="flex items-center gap-3 not-prose"> 48 + {{ $hasAvatar := false }}{{ range $authors }}{{ if tinyAvatar .Handle }}{{ $hasAvatar = true }}{{ end }}{{ end }} 49 + {{ if $hasAvatar }} 48 50 <div class="inline-flex items-center -space-x-2"> 49 51 {{ range $authors }} 52 + {{ $av := tinyAvatar .Handle }}{{ if $av }}<img src="{{ $av }}" class="size-7 rounded-full border border-gray-300 dark:border-gray-700" alt="{{ .Handle }}" title="{{ .Handle }}" />{{ end }} 50 - <img src="{{ tinyAvatar .Handle }}" class="size-7 rounded-full border border-gray-300 dark:border-gray-700" alt="{{ .Handle }}" title="{{ .Handle }}" /> 51 53 {{ end }} 52 54 </div> 55 + {{ end }} 53 56 <div class="flex items-center gap-1 text-sm text-gray-700 dark:text-gray-300"> 54 57 {{ range $i, $a := $authors }} 55 58 {{ if gt $i 0 }}<span class="text-gray-400">&amp;</span>{{ end }}
+1 -1
blog/templates/text.html
··· 51 51 <p class="px-6 mb-0 text-sm text-gray-600 dark:text-gray-400"> 52 52 {{ $dateStr := index .Meta "date" }} 53 53 {{ $date := parsedate $dateStr }} 54 + {{ $date.Format | shortTimeFmt }} 54 - {{ $date.Format "02 Jan, 2006" }} 55 55 56 56 <span class="mx-2 select-none">&middot;</span> 57 57
+32 -5
cmd/blog/main.go
··· 4 4 "context" 5 5 "fmt" 6 6 "io" 7 + "io/fs" 7 8 "log/slog" 8 9 "net/http" 9 10 "os" ··· 59 60 func runBuild(ctx context.Context, logger *slog.Logger) error { 60 61 cfg, err := config.LoadConfig(ctx) 61 62 if err != nil { 63 + return fmt.Errorf("failed to load config: %w", err) 62 - cfg = &config.Config{} 63 64 } 64 65 65 66 p, err := makePages(ctx, cfg, logger) ··· 84 85 return fmt.Errorf("rendering index: %w", err) 85 86 } 86 87 87 - // posts — each at build/<slug>/index.html directly (no /blog/ prefix) 88 88 for _, post := range posts { 89 - post := post 90 89 postDir := filepath.Join(outDir, post.Meta.Slug) 91 90 if err := os.MkdirAll(postDir, 0755); err != nil { 92 91 return err ··· 98 97 } 99 98 } 100 99 100 + // atom feed 101 - // atom feed — at build/feed.xml 102 101 baseURL := "https://blog.tangled.org" 103 102 atom, err := blog.AtomFeed(posts, baseURL) 104 103 if err != nil { ··· 108 107 return fmt.Errorf("writing feed: %w", err) 109 108 } 110 109 110 + // copy embedded static assets into build/static/ so Cloudflare Pages 111 + // can serve them from the same origin as the built HTML 112 + staticSrc, err := fs.Sub(pages.Files, "static") 113 + if err != nil { 114 + return fmt.Errorf("accessing embedded static dir: %w", err) 115 + } 116 + if err := copyFS(staticSrc, filepath.Join(outDir, "static")); err != nil { 117 + return fmt.Errorf("copying static assets: %w", err) 118 + } 119 + 111 120 logger.Info("build complete", "dir", outDir) 112 121 return nil 113 122 } 114 123 124 + // copyFS copies all files from src into destDir, preserving directory structure. 125 + func copyFS(src fs.FS, destDir string) error { 126 + return fs.WalkDir(src, ".", func(path string, d fs.DirEntry, err error) error { 127 + if err != nil { 128 + return err 129 + } 130 + dest := filepath.Join(destDir, path) 131 + if d.IsDir() { 132 + return os.MkdirAll(dest, 0755) 133 + } 134 + data, err := fs.ReadFile(src, path) 135 + if err != nil { 136 + return err 137 + } 138 + return os.WriteFile(dest, data, 0644) 139 + }) 140 + } 141 + 115 142 func runServe(ctx context.Context, logger *slog.Logger, addr string) error { 116 143 cfg, err := config.LoadConfig(ctx) 117 144 if err != nil { 145 + return fmt.Errorf("failed to load config: %w", err) 118 - cfg = &config.Config{} 119 146 } 120 147 121 148 p, err := makePages(ctx, cfg, logger)
+58
cmd/knotmirror/main.go
··· 1 + package main 2 + 3 + import ( 4 + "context" 5 + "log/slog" 6 + "os" 7 + "os/signal" 8 + "syscall" 9 + 10 + "github.com/carlmjohnson/versioninfo" 11 + "github.com/urfave/cli/v3" 12 + "tangled.org/core/knotmirror" 13 + "tangled.org/core/knotmirror/config" 14 + "tangled.org/core/log" 15 + ) 16 + 17 + func main() { 18 + if err := run(os.Args); err != nil { 19 + slog.Error("error running knotmirror", "err", err) 20 + os.Exit(-1) 21 + } 22 + } 23 + 24 + func run(args []string) error { 25 + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) 26 + defer cancel() 27 + 28 + logger := log.New("knotmirror") 29 + slog.SetDefault(logger) 30 + ctx = log.IntoContext(ctx, logger) 31 + 32 + app := cli.Command{ 33 + Name: "knotmirror", 34 + Usage: "knot mirroring service", 35 + Version: versioninfo.Short(), 36 + } 37 + app.Flags = []cli.Flag{} 38 + app.Commands = []*cli.Command{ 39 + { 40 + Name: "serve", 41 + Usage: "run the knotmirror daemon", 42 + Action: runKnotMirror, 43 + Flags: []cli.Flag{}, 44 + }, 45 + } 46 + return app.Run(ctx, args) 47 + } 48 + 49 + func runKnotMirror(ctx context.Context, cmd *cli.Command) error { 50 + logger := log.FromContext(ctx) 51 + cfg, err := config.Load(ctx) 52 + if err != nil { 53 + return err 54 + } 55 + 56 + logger.Debug("config loaded:", "config", cfg) 57 + return knotmirror.Run(ctx, cfg) 58 + }
flake.lock

This file has not been changed.

+16 -1
flake.nix
··· 106 106 knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {}; 107 107 knot = self.callPackage ./nix/pkgs/knot.nix {}; 108 108 dolly = self.callPackage ./nix/pkgs/dolly.nix {}; 109 + tap = self.callPackage ./nix/pkgs/tap.nix {}; 110 + knotmirror = self.callPackage ./nix/pkgs/knotmirror.nix {}; 109 111 }); 110 112 in { 111 113 overlays.default = final: prev: { 112 - inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs dolly; 114 + inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs dolly tap knotmirror; 113 115 }; 114 116 115 117 packages = forAllSystems (system: let ··· 130 132 sqlite-lib 131 133 docs 132 134 dolly 135 + tap 136 + knotmirror 133 137 ; 134 138 135 139 pkgsStatic-appview = staticPackages.appview; ··· 204 208 pkgs.coreutils # for those of us who are on systems that use busybox (alpine) 205 209 packages'.lexgen 206 210 packages'.treefmt-wrapper 211 + packages'.tap 207 212 ]; 208 213 shellHook = '' 209 214 mkdir -p appview/pages/static ··· 350 355 351 356 services.tangled.appview.package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.appview; 352 357 }; 358 + nixosModules.knotmirror = { 359 + lib, 360 + pkgs, 361 + ... 362 + }: { 363 + imports = [./nix/modules/knotmirror.nix]; 364 + 365 + services.tangled.knotmirror.tap-package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.tap; 366 + services.tangled.knotmirror.package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.knotmirror; 367 + }; 353 368 nixosModules.knot = { 354 369 lib,
+18 -10
go.mod
··· 12 12 github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 13 13 github.com/blevesearch/bleve/v2 v2.5.3 14 14 github.com/bluekeyes/go-gitdiff v0.8.1 15 + github.com/bluesky-social/indigo v0.0.0-20260220055544-bf41e2ee75ab 16 + github.com/bluesky-social/jetstream v0.0.0-20260226214936-e0274250f654 15 - github.com/bluesky-social/indigo v0.0.0-20251003000214-3259b215110e 16 - github.com/bluesky-social/jetstream v0.0.0-20241210005130-ea96859b93d1 17 17 github.com/bmatcuk/doublestar/v4 v4.9.1 18 18 github.com/carlmjohnson/versioninfo v0.22.5 19 19 github.com/casbin/casbin/v2 v2.103.0 ··· 35 35 github.com/hiddeco/sshsig v0.2.0 36 36 github.com/hpcloud/tail v1.0.0 37 37 github.com/ipfs/go-cid v0.5.0 38 + github.com/jackc/pgx/v5 v5.8.0 38 39 github.com/mattn/go-sqlite3 v1.14.24 39 40 github.com/microcosm-cc/bluemonday v1.0.27 40 41 github.com/openbao/openbao/api/v2 v2.3.0 41 42 github.com/posthog/posthog-go v1.5.5 43 + github.com/prometheus/client_golang v1.23.2 42 44 github.com/redis/go-redis/v9 v9.7.3 43 45 github.com/resend/resend-go/v2 v2.15.0 44 46 github.com/sethvargo/go-envconfig v1.1.0 45 47 github.com/srwiley/oksvg v0.0.0-20221011165216-be6e8873101c 46 48 github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef 49 + github.com/stretchr/testify v1.11.1 50 + github.com/urfave/cli/v3 v3.4.1 47 - github.com/stretchr/testify v1.10.0 48 - github.com/urfave/cli/v3 v3.3.3 49 51 github.com/whyrusleeping/cbor-gen v0.3.1 50 52 github.com/yuin/goldmark v1.7.13 51 53 github.com/yuin/goldmark-emoji v1.0.6 52 54 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc 53 55 gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab 54 56 go.abhg.dev/goldmark/mermaid v0.6.0 57 + golang.org/x/crypto v0.41.0 55 - golang.org/x/crypto v0.40.0 56 58 golang.org/x/image v0.31.0 59 + golang.org/x/net v0.43.0 57 - golang.org/x/net v0.42.0 58 60 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da 59 61 gopkg.in/yaml.v3 v3.0.1 60 62 ) ··· 116 118 github.com/dlclark/regexp2 v1.11.5 // indirect 117 119 github.com/docker/go-connections v0.5.0 // indirect 118 120 github.com/docker/go-units v0.5.0 // indirect 121 + github.com/earthboundkid/versioninfo/v2 v2.24.1 // indirect 119 122 github.com/emirpasic/gods v1.18.1 // indirect 120 123 github.com/felixge/httpsnoop v1.0.4 // indirect 121 124 github.com/fsnotify/fsnotify v1.6.0 // indirect ··· 160 163 github.com/ipfs/go-log v1.0.5 // indirect 161 164 github.com/ipfs/go-log/v2 v2.6.0 // indirect 162 165 github.com/ipfs/go-metrics-interface v0.3.0 // indirect 166 + github.com/jackc/pgpassfile v1.0.0 // indirect 167 + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect 168 + github.com/jackc/puddle/v2 v2.2.2 // indirect 163 169 github.com/json-iterator/go v1.1.12 // indirect 164 170 github.com/kevinburke/ssh_config v1.2.0 // indirect 165 171 github.com/klauspost/compress v1.18.0 // indirect ··· 192 198 github.com/pkg/errors v0.9.1 // indirect 193 199 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 194 200 github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f // indirect 195 - github.com/prometheus/client_golang v1.22.0 // indirect 196 201 github.com/prometheus/client_model v0.6.2 // indirect 202 + github.com/prometheus/common v0.66.1 // indirect 197 - github.com/prometheus/common v0.64.0 // indirect 198 203 github.com/prometheus/procfs v0.16.1 // indirect 199 204 github.com/rivo/uniseg v0.4.7 // indirect 200 205 github.com/ryanuber/go-glob v1.0.0 // indirect ··· 221 226 go.uber.org/atomic v1.11.0 // indirect 222 227 go.uber.org/multierr v1.11.0 // indirect 223 228 go.uber.org/zap v1.27.0 // indirect 229 + go.yaml.in/yaml/v2 v2.4.2 // indirect 224 230 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect 225 231 golang.org/x/sync v0.17.0 // indirect 232 + golang.org/x/sys v0.35.0 // indirect 226 - golang.org/x/sys v0.34.0 // indirect 227 233 golang.org/x/text v0.29.0 // indirect 228 234 golang.org/x/time v0.12.0 // indirect 229 235 google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect 230 236 google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect 231 237 google.golang.org/grpc v1.73.0 // indirect 238 + google.golang.org/protobuf v1.36.8 // indirect 232 - google.golang.org/protobuf v1.36.6 // indirect 233 239 gopkg.in/fsnotify.v1 v1.4.7 // indirect 234 240 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect 235 241 gopkg.in/warnings.v0 v0.1.2 // indirect ··· 245 251 replace github.com/bluekeyes/go-gitdiff => tangled.sh/oppi.li/go-gitdiff v0.8.2 246 252 247 253 replace github.com/alecthomas/chroma/v2 => github.com/oppiliappan/chroma/v2 v2.24.2 254 + 255 + replace github.com/bluesky-social/indigo => github.com/boltlessengineer/indigo v0.0.0-20260315101958-fb1dfa36fed2 248 256 249 257 // from bluesky-social/indigo 250 258 replace github.com/gocql/gocql => github.com/scylladb/gocql v1.14.4
+36 -22
go.sum
··· 94 94 github.com/blevesearch/zapx/v15 v15.4.2/go.mod h1:1pssev/59FsuWcgSnTa0OeEpOzmhtmr/0/11H0Z8+Nw= 95 95 github.com/blevesearch/zapx/v16 v16.2.4 h1:tGgfvleXTAkwsD5mEzgM3zCS/7pgocTCnO1oyAUjlww= 96 96 github.com/blevesearch/zapx/v16 v16.2.4/go.mod h1:Rti/REtuuMmzwsI8/C/qIzRaEoSK/wiFYw5e5ctUKKs= 97 + github.com/bluesky-social/jetstream v0.0.0-20260226214936-e0274250f654 h1:OK76FcHhZp8ohjRB0OMWgti0oYAWFlt3KDQcIkH1pfI= 98 + github.com/bluesky-social/jetstream v0.0.0-20260226214936-e0274250f654/go.mod h1:vt8kVRKtvrBspt9G38wDD8+BotjIMO8u8IYoVnyE4zY= 97 - github.com/bluesky-social/indigo v0.0.0-20251003000214-3259b215110e h1:IutKPwmbU0LrYqw03EuwJtMdAe67rDTrL1U8S8dicRU= 98 - github.com/bluesky-social/indigo v0.0.0-20251003000214-3259b215110e/go.mod h1:n6QE1NDPFoi7PRbMUZmc2y7FibCqiVU4ePpsvhHUBR8= 99 - github.com/bluesky-social/jetstream v0.0.0-20241210005130-ea96859b93d1 h1:CFvRtYNSnWRAi/98M3O466t9dYuwtesNbu6FVPymRrA= 100 - github.com/bluesky-social/jetstream v0.0.0-20241210005130-ea96859b93d1/go.mod h1:WiYEeyJSdUwqoaZ71KJSpTblemUCpwJfh5oVXplK6T4= 101 99 github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= 102 100 github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= 103 101 github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= 104 102 github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= 103 + github.com/boltlessengineer/indigo v0.0.0-20260315101958-fb1dfa36fed2 h1:63+EsT7kltod8g1eA0eNuvq1q9ANJWRdxlLeJjJDVYY= 104 + github.com/boltlessengineer/indigo v0.0.0-20260315101958-fb1dfa36fed2/go.mod h1:VG/LeqLGNI3Ew7lsYixajnZGFfWPv144qbUddh+Oyag= 105 105 github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= 106 106 github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= 107 107 github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= ··· 178 178 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= 179 179 github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= 180 180 github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= 181 + github.com/earthboundkid/versioninfo/v2 v2.24.1 h1:SJTMHaoUx3GzjjnUO1QzP3ZXK6Ee/nbWyCm58eY3oUg= 182 + github.com/earthboundkid/versioninfo/v2 v2.24.1/go.mod h1:VcWEooDEuyUJnMfbdTh0uFN4cfEIg+kHMuWB2CDCLjw= 181 183 github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= 182 184 github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= 183 185 github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= ··· 348 350 github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8= 349 351 github.com/ipfs/go-metrics-interface v0.3.0 h1:YwG7/Cy4R94mYDUuwsBfeziJCVm9pBMJ6q/JR9V40TU= 350 352 github.com/ipfs/go-metrics-interface v0.3.0/go.mod h1:OxxQjZDGocXVdyTPocns6cOLwHieqej/jos7H4POwoY= 353 + github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= 354 + github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= 355 + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= 356 + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= 357 + github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= 358 + github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= 359 + github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= 360 + github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= 351 361 github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 352 362 github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 353 363 github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= ··· 369 379 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 370 380 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 371 381 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 382 + github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 383 + github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 372 384 github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= 373 385 github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= 374 386 github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= ··· 470 482 github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= 471 483 github.com/posthog/posthog-go v1.5.5 h1:2o3j7IrHbTIfxRtj4MPaXKeimuTYg49onNzNBZbwksM= 472 484 github.com/posthog/posthog-go v1.5.5/go.mod h1:3RqUmSnPuwmeVj/GYrS75wNGqcAKdpODiwc83xZWgdE= 485 + github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= 486 + github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= 473 - github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= 474 - github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= 475 487 github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= 476 488 github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= 489 + github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= 490 + github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= 477 - github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= 478 - github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= 479 491 github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= 480 492 github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= 481 493 github.com/redis/go-redis/v9 v9.0.0-rc.4/go.mod h1:Vo3EsyWnicKnSKCA7HhgnvnyA74wOA69Cd2Meli5mmA= ··· 521 533 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 522 534 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 523 535 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 536 + github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= 537 + github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= 524 - github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 525 - github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 526 538 github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= 527 539 github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= 528 540 github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= ··· 535 547 github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= 536 548 github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= 537 549 github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= 550 + github.com/urfave/cli/v3 v3.4.1 h1:1M9UOCy5bLmGnuu1yn3t3CB4rG79Rtoxuv1sPhnm6qM= 551 + github.com/urfave/cli/v3 v3.4.1/go.mod h1:FJSKtM/9AiiTOJL4fJ6TbMUkxBXn7GO9guZqoZtpYpo= 538 - github.com/urfave/cli/v3 v3.3.3 h1:byCBaVdIXuLPIDm5CYZRVG6NvT7tv1ECqdU4YzlEa3I= 539 - github.com/urfave/cli/v3 v3.3.3/go.mod h1:FJSKtM/9AiiTOJL4fJ6TbMUkxBXn7GO9guZqoZtpYpo= 540 552 github.com/vmihailenco/go-tinylfu v0.2.2 h1:H1eiG6HM36iniK6+21n9LLpzx1G9R3DJa2UjUjbynsI= 541 553 github.com/vmihailenco/go-tinylfu v0.2.2/go.mod h1:CutYi2Q9puTxfcolkliPq4npPuofg9N9t8JVrjzwa3Q= 542 554 github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= ··· 606 618 go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= 607 619 go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= 608 620 go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= 621 + go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= 622 + go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= 609 623 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 610 624 golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 611 625 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= ··· 613 627 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 614 628 golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= 615 629 golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= 630 + golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= 631 + golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= 616 - golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= 617 - golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= 618 632 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= 619 633 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= 620 634 golang.org/x/image v0.31.0 h1:mLChjE2MV6g1S7oqbXC0/UcKijjm5fnJLUYKIYrLESA= ··· 649 663 golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= 650 664 golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 651 665 golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= 666 + golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= 667 + golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= 652 - golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= 653 - golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= 654 668 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 655 669 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 656 670 golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= ··· 690 704 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 691 705 golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 692 706 golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 707 + golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= 708 + golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 693 - golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= 694 - golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 695 709 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 696 710 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 697 711 golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= ··· 701 715 golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 702 716 golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= 703 717 golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= 718 + golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= 719 + golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= 704 - golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= 705 - golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= 706 720 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 707 721 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 708 722 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= ··· 755 769 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 756 770 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 757 771 google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 772 + google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= 773 + google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= 758 - google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= 759 - google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= 760 774 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 761 775 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 762 776 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
hook/hook.go

This file has not been changed.

hook/setup.go

This file has not been changed.

+3 -3
idresolver/resolver.go
··· 60 60 base := BaseDirectory(plcUrl) 61 61 cached := identity.NewCacheDirectory(base, 250_000, time.Hour*24, time.Minute*2, time.Minute*5) 62 62 return &Resolver{ 63 + directory: cached, 63 - directory: &cached, 64 64 } 65 65 } 66 66 ··· 80 80 return nil, err 81 81 } 82 82 83 + return r.directory.Lookup(ctx, id) 83 - return r.directory.Lookup(ctx, *id) 84 84 } 85 85 86 86 func (r *Resolver) ResolveIdents(ctx context.Context, idents []string) []*identity.Identity { ··· 117 117 return err 118 118 } 119 119 120 + return r.directory.Purge(ctx, id) 120 - return r.directory.Purge(ctx, *id) 121 121 } 122 122 123 123 func (r *Resolver) Directory() identity.Directory {
+5
input.css
··· 151 151 overflow-wrap: anywhere; 152 152 } 153 153 154 + .prose td, 155 + .prose th { 156 + overflow-wrap: normal; 157 + } 158 + 154 159 .prose hr { 155 160 @apply my-2; 156 161 }
+185
knotmirror/adminpage.go
··· 1 + package knotmirror 2 + 3 + import ( 4 + "database/sql" 5 + "embed" 6 + "fmt" 7 + "html" 8 + "html/template" 9 + "log/slog" 10 + "net/http" 11 + "strconv" 12 + "time" 13 + 14 + "github.com/bluesky-social/indigo/atproto/syntax" 15 + "github.com/go-chi/chi/v5" 16 + "tangled.org/core/appview/pagination" 17 + "tangled.org/core/knotmirror/db" 18 + "tangled.org/core/knotmirror/models" 19 + ) 20 + 21 + //go:embed templates/*.html 22 + var templateFS embed.FS 23 + 24 + const repoPageSize = 20 25 + 26 + type AdminServer struct { 27 + db *sql.DB 28 + resyncer *Resyncer 29 + logger *slog.Logger 30 + } 31 + 32 + func NewAdminServer(l *slog.Logger, database *sql.DB, resyncer *Resyncer) *AdminServer { 33 + return &AdminServer{ 34 + db: database, 35 + resyncer: resyncer, 36 + logger: l, 37 + } 38 + } 39 + 40 + func (s *AdminServer) Router() http.Handler { 41 + r := chi.NewRouter() 42 + r.Get("/repos", s.handleRepos()) 43 + r.Get("/hosts", s.handleHosts()) 44 + 45 + r.Post("/api/triggerRepoResync", s.handleRepoResyncTrigger()) 46 + r.Post("/api/cancelRepoResync", s.handleRepoResyncCancel()) 47 + return r 48 + } 49 + 50 + func funcmap() template.FuncMap { 51 + return template.FuncMap{ 52 + "add": func(a, b int) int { return a + b }, 53 + "sub": func(a, b int) int { return a - b }, 54 + "readt": func(ts int64) string { 55 + if ts <= 0 { 56 + return "n/a" 57 + } 58 + return time.Unix(ts, 0).Format("2006-01-02 15:04") 59 + }, 60 + "const": func() map[string]any { 61 + return map[string]any{ 62 + "AllRepoStates": models.AllRepoStates, 63 + "AllHostStatuses": models.AllHostStatuses, 64 + } 65 + }, 66 + } 67 + } 68 + 69 + func (s *AdminServer) handleRepos() http.HandlerFunc { 70 + tpl := template.Must(template.New("").Funcs(funcmap()).ParseFS(templateFS, "templates/base.html", "templates/repos.html")) 71 + return func(w http.ResponseWriter, r *http.Request) { 72 + pageNum, _ := strconv.Atoi(r.URL.Query().Get("page")) 73 + if pageNum < 1 { 74 + pageNum = 1 75 + } 76 + page := pagination.Page{ 77 + Offset: (pageNum - 1) * repoPageSize, 78 + Limit: repoPageSize, 79 + } 80 + 81 + var ( 82 + did = r.URL.Query().Get("did") 83 + knot = r.URL.Query().Get("knot") 84 + state = r.URL.Query().Get("state") 85 + ) 86 + 87 + repos, err := db.ListRepos(r.Context(), s.db, page, did, knot, state) 88 + if err != nil { 89 + http.Error(w, err.Error(), http.StatusInternalServerError) 90 + return 91 + } 92 + counts, err := db.GetRepoCountsByState(r.Context(), s.db) 93 + if err != nil { 94 + http.Error(w, err.Error(), http.StatusInternalServerError) 95 + return 96 + } 97 + err = tpl.ExecuteTemplate(w, "base", map[string]any{ 98 + "Repos": repos, 99 + "RepoCounts": counts, 100 + "Page": pageNum, 101 + "FilterByDid": did, 102 + "FilterByKnot": knot, 103 + "FilterByState": models.RepoState(state), 104 + }) 105 + if err != nil { 106 + slog.Error("failed to render", "err", err) 107 + } 108 + } 109 + } 110 + 111 + func (s *AdminServer) handleHosts() http.HandlerFunc { 112 + tpl := template.Must(template.New("").Funcs(funcmap()).ParseFS(templateFS, "templates/base.html", "templates/hosts.html")) 113 + return func(w http.ResponseWriter, r *http.Request) { 114 + var status = models.HostStatus(r.URL.Query().Get("status")) 115 + if status == "" { 116 + status = models.HostStatusActive 117 + } 118 + 119 + hosts, err := db.ListHosts(r.Context(), s.db, status) 120 + if err != nil { 121 + http.Error(w, err.Error(), http.StatusInternalServerError) 122 + return 123 + } 124 + err = tpl.ExecuteTemplate(w, "base", map[string]any{ 125 + "Hosts": hosts, 126 + "FilterByStatus": models.HostStatus(status), 127 + }) 128 + if err != nil { 129 + slog.Error("failed to render", "err", err) 130 + } 131 + } 132 + } 133 + 134 + func (s *AdminServer) handleRepoResyncTrigger() http.HandlerFunc { 135 + return func(w http.ResponseWriter, r *http.Request) { 136 + var repoQuery = r.FormValue("repo") 137 + 138 + repo, err := syntax.ParseATURI(repoQuery) 139 + if err != nil || repo.RecordKey() == "" { 140 + writeNotif(w, http.StatusBadRequest, fmt.Sprintf("repo parameter invalid: %s", repoQuery)) 141 + return 142 + } 143 + 144 + if err := s.resyncer.TriggerResyncJob(r.Context(), repo); err != nil { 145 + s.logger.Error("failed to trigger resync job", "err", err) 146 + writeNotif(w, http.StatusInternalServerError, fmt.Sprintf("repo parameter invalid: %s", repoQuery)) 147 + return 148 + } 149 + writeNotif(w, http.StatusOK, "success") 150 + } 151 + } 152 + 153 + func (s *AdminServer) handleRepoResyncCancel() http.HandlerFunc { 154 + return func(w http.ResponseWriter, r *http.Request) { 155 + var repoQuery = r.FormValue("repo") 156 + 157 + repo, err := syntax.ParseATURI(repoQuery) 158 + if err != nil || repo.RecordKey() == "" { 159 + writeNotif(w, http.StatusBadRequest, fmt.Sprintf("repo parameter invalid: %s", repoQuery)) 160 + return 161 + } 162 + 163 + s.resyncer.CancelResyncJob(repo) 164 + writeNotif(w, http.StatusOK, "success") 165 + } 166 + } 167 + 168 + func writeNotif(w http.ResponseWriter, status int, msg string) { 169 + w.Header().Set("Content-Type", "text/html") 170 + w.WriteHeader(status) 171 + 172 + class := "info" 173 + switch { 174 + case status >= 500: 175 + class = "error" 176 + case status >= 400: 177 + class = "warn" 178 + } 179 + 180 + fmt.Fprintf(w, 181 + `<div hx-swap-oob="beforeend:#notifications"><div class="notif %s">%s</div></div>`, 182 + class, 183 + html.EscapeString(msg), 184 + ) 185 + }
+45
knotmirror/config/config.go
··· 1 + package config 2 + 3 + import ( 4 + "context" 5 + "time" 6 + 7 + "github.com/sethvargo/go-envconfig" 8 + ) 9 + 10 + type Config struct { 11 + PlcUrl string `env:"MIRROR_PLC_URL, default=https://plc.directory"` 12 + TapUrl string `env:"MIRROR_TAP_URL, default=http://localhost:2480"` 13 + DbUrl string `env:"MIRROR_DB_URL, required"` 14 + KnotUseSSL bool `env:"MIRROR_KNOT_USE_SSL, default=false"` // use SSL for Knot when not scheme is not specified 15 + KnotSSRF bool `env:"MIRROR_KNOT_SSRF, default=false"` 16 + GitRepoBasePath string `env:"MIRROR_GIT_BASEPATH, default=repos"` 17 + GitRepoFetchTimeout time.Duration `env:"MIRROR_GIT_FETCH_TIMEOUT, default=600s"` 18 + ResyncParallelism int `env:"MIRROR_RESYNC_PARALLELISM, default=5"` 19 + Slurper SlurperConfig `env:",prefix=MIRROR_SLURPER_"` 20 + UseSSL bool `env:"MIRROR_USE_SSL, default=false"` 21 + Hostname string `env:"MIRROR_HOSTNAME, required"` 22 + Listen string `env:"MIRROR_LISTEN, default=:7000"` 23 + MetricsListen string `env:"MIRROR_METRICS_LISTEN, default=127.0.0.1:7100"` 24 + AdminListen string `env:"MIRROR_ADMIN_LISTEN, default=127.0.0.1:7200"` 25 + } 26 + 27 + func (c *Config) BaseUrl() string { 28 + if c.UseSSL { 29 + return "https://" + c.Hostname 30 + } 31 + return "http://" + c.Hostname 32 + } 33 + 34 + type SlurperConfig struct { 35 + PersistCursorPeriod time.Duration `env:"PERSIST_CURSOR_PERIOD, default=4s"` 36 + ConcurrencyPerHost int `env:"CONCURRENCY, default=4"` 37 + } 38 + 39 + func Load(ctx context.Context) (*Config, error) { 40 + var cfg Config 41 + if err := envconfig.Process(ctx, &cfg); err != nil { 42 + return nil, err 43 + } 44 + return &cfg, nil 45 + }
+25
knotmirror/crawler.go
··· 1 + package knotmirror 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "log/slog" 7 + 8 + "tangled.org/core/log" 9 + ) 10 + 11 + type Crawler struct { 12 + logger *slog.Logger 13 + db *sql.DB 14 + } 15 + 16 + func NewCrawler(l *slog.Logger, db *sql.DB) *Crawler { 17 + return &Crawler{ 18 + logger: log.SubLogger(l, "crawler"), 19 + db: db, 20 + } 21 + } 22 + 23 + func (c *Crawler) Start(ctx context.Context) { 24 + // TODO: repository crawler 25 + }
+100
knotmirror/db/db.go
··· 1 + package db 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "fmt" 7 + "time" 8 + 9 + _ "github.com/jackc/pgx/v5/stdlib" 10 + ) 11 + 12 + func Make(ctx context.Context, dbUrl string, maxConns int) (*sql.DB, error) { 13 + db, err := sql.Open("pgx", dbUrl) 14 + if err != nil { 15 + return nil, fmt.Errorf("opening db: %w", err) 16 + } 17 + 18 + db.SetMaxOpenConns(maxConns) 19 + db.SetMaxIdleConns(maxConns) 20 + db.SetConnMaxIdleTime(time.Hour) 21 + 22 + pingCtx, cancel := context.WithTimeout(ctx, 5*time.Second) 23 + defer cancel() 24 + if err := db.PingContext(pingCtx); err != nil { 25 + db.Close() 26 + return nil, fmt.Errorf("ping db: %w", err) 27 + } 28 + 29 + conn, err := db.Conn(ctx) 30 + if err != nil { 31 + return nil, err 32 + } 33 + defer conn.Close() 34 + 35 + _, err = conn.ExecContext(ctx, ` 36 + create table if not exists repos ( 37 + did text not null, 38 + rkey text not null, 39 + at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo' || '/' || rkey) stored, 40 + cid text, 41 + 42 + -- record content 43 + name text not null, 44 + knot_domain text not null, 45 + 46 + -- sync info 47 + git_rev text not null, 48 + repo_sha text not null, 49 + state text not null default 'pending', 50 + error_msg text, 51 + retry_count integer not null default 0, 52 + retry_after integer not null default 0, 53 + db_created_at timestamptz not null default now(), 54 + db_updated_at timestamptz not null default now(), 55 + 56 + constraint repos_pkey primary key (did, rkey) 57 + ); 58 + 59 + -- knot hosts 60 + create table if not exists hosts ( 61 + hostname text not null, 62 + no_ssl boolean not null default false, 63 + status text not null default 'active', 64 + last_seq bigint not null default -1, 65 + db_created_at timestamptz not null default now(), 66 + db_updated_at timestamptz not null default now(), 67 + 68 + constraint hosts_pkey primary key (hostname) 69 + ); 70 + 71 + create index if not exists idx_repos_aturi on repos (at_uri); 72 + create index if not exists idx_repos_db_updated_at on repos (db_updated_at desc); 73 + create index if not exists idx_hosts_db_updated_at on hosts (db_updated_at desc); 74 + 75 + create or replace function set_updated_at() 76 + returns trigger as $$ 77 + begin 78 + new.db_updated_at = now(); 79 + return new; 80 + end; 81 + $$ language plpgsql; 82 + 83 + drop trigger if exists repos_set_updated_at on repos; 84 + create trigger repos_set_updated_at 85 + before update on repos 86 + for each row 87 + execute function set_updated_at(); 88 + 89 + drop trigger if exists hosts_set_updated_at on hosts; 90 + create trigger hosts_set_updated_at 91 + before update on hosts 92 + for each row 93 + execute function set_updated_at(); 94 + `) 95 + if err != nil { 96 + return nil, fmt.Errorf("initializing db schema: %w", err) 97 + } 98 + 99 + return db, nil 100 + }
+102
knotmirror/db/hosts.go
··· 1 + package db 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "errors" 7 + "fmt" 8 + "log" 9 + 10 + "tangled.org/core/knotmirror/models" 11 + ) 12 + 13 + func UpsertHost(ctx context.Context, e *sql.DB, host *models.Host) error { 14 + if _, err := e.ExecContext(ctx, 15 + `insert into hosts (hostname, no_ssl, status, last_seq) 16 + values ($1, $2, $3, $4) 17 + on conflict(hostname) do update set 18 + no_ssl = excluded.no_ssl, 19 + status = excluded.status, 20 + last_seq = excluded.last_seq 21 + `, 22 + host.Hostname, 23 + host.NoSSL, 24 + host.Status, 25 + host.LastSeq, 26 + ); err != nil { 27 + return fmt.Errorf("upserting host: %w", err) 28 + } 29 + return nil 30 + } 31 + 32 + func GetHost(ctx context.Context, e *sql.DB, hostname string) (*models.Host, error) { 33 + var host models.Host 34 + if err := e.QueryRowContext(ctx, 35 + `select hostname, no_ssl, status, last_seq 36 + from hosts where hostname = $1`, 37 + hostname, 38 + ).Scan( 39 + &host.Hostname, 40 + &host.NoSSL, 41 + &host.Status, 42 + &host.LastSeq, 43 + ); err != nil { 44 + if errors.Is(err, sql.ErrNoRows) { 45 + return nil, nil 46 + } 47 + return nil, err 48 + } 49 + return &host, nil 50 + } 51 + 52 + func StoreCursors(ctx context.Context, e *sql.DB, cursors []models.HostCursor) error { 53 + tx, err := e.BeginTx(ctx, nil) 54 + if err != nil { 55 + return fmt.Errorf("starting transaction: %w", err) 56 + } 57 + defer tx.Rollback() 58 + for _, cur := range cursors { 59 + if cur.LastSeq <= 0 { 60 + continue 61 + } 62 + if _, err := tx.ExecContext(ctx, 63 + `update hosts set last_seq = $1 where hostname = $2`, 64 + cur.LastSeq, 65 + cur.Hostname, 66 + ); err != nil { 67 + log.Println("failed to persist host cursor", "host", cur.Hostname, "lastSeq", cur.LastSeq, "err", err) 68 + } 69 + } 70 + return tx.Commit() 71 + } 72 + 73 + func ListHosts(ctx context.Context, e *sql.DB, status models.HostStatus) ([]models.Host, error) { 74 + rows, err := e.QueryContext(ctx, 75 + `select hostname, no_ssl, status, last_seq 76 + from hosts 77 + where status = $1`, 78 + status, 79 + ) 80 + if err != nil { 81 + return nil, fmt.Errorf("querying hosts: %w", err) 82 + } 83 + defer rows.Close() 84 + 85 + var hosts []models.Host 86 + for rows.Next() { 87 + var host models.Host 88 + if err := rows.Scan( 89 + &host.Hostname, 90 + &host.NoSSL, 91 + &host.Status, 92 + &host.LastSeq, 93 + ); err != nil { 94 + return nil, fmt.Errorf("scanning row: %w", err) 95 + } 96 + hosts = append(hosts, host) 97 + } 98 + if err := rows.Err(); err != nil { 99 + return nil, fmt.Errorf("scanning rows: %w ", err) 100 + } 101 + return hosts, nil 102 + }
+275
knotmirror/db/repos.go
··· 1 + package db 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "errors" 7 + "fmt" 8 + 9 + "github.com/bluesky-social/indigo/atproto/syntax" 10 + "tangled.org/core/appview/pagination" 11 + "tangled.org/core/knotmirror/models" 12 + ) 13 + 14 + func AddRepo(ctx context.Context, e *sql.DB, did syntax.DID, rkey syntax.RecordKey, cid syntax.CID, name, knot string) error { 15 + if _, err := e.ExecContext(ctx, 16 + `insert into repos (did, rkey, cid, name, knot_domain) 17 + values ($1, $2, $3, $4, $5)`, 18 + did, rkey, cid, name, knot, 19 + ); err != nil { 20 + return fmt.Errorf("inserting repo: %w", err) 21 + } 22 + return nil 23 + } 24 + 25 + func UpsertRepo(ctx context.Context, e *sql.DB, repo *models.Repo) error { 26 + if _, err := e.ExecContext(ctx, 27 + `insert into repos (did, rkey, cid, name, knot_domain, git_rev, repo_sha, state, error_msg, retry_count, retry_after) 28 + values ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) 29 + on conflict(did, rkey) do update set 30 + cid = excluded.cid, 31 + name = excluded.name, 32 + knot_domain = excluded.knot_domain, 33 + git_rev = excluded.git_rev, 34 + repo_sha = excluded.repo_sha, 35 + state = excluded.state, 36 + error_msg = excluded.error_msg, 37 + retry_count = excluded.retry_count, 38 + retry_after = excluded.retry_after`, 39 + // where repos.cid != excluded.cid`, 40 + repo.Did, 41 + repo.Rkey, 42 + repo.Cid, 43 + repo.Name, 44 + repo.KnotDomain, 45 + repo.GitRev, 46 + repo.RepoSha, 47 + repo.State, 48 + repo.ErrorMsg, 49 + repo.RetryCount, 50 + repo.RetryAfter, 51 + ); err != nil { 52 + return fmt.Errorf("upserting repo: %w", err) 53 + } 54 + return nil 55 + } 56 + 57 + func UpdateRepoState(ctx context.Context, e *sql.DB, did syntax.DID, rkey syntax.RecordKey, state models.RepoState) error { 58 + if _, err := e.ExecContext(ctx, 59 + `update repos 60 + set state = $1 61 + where did = $2 and rkey = $3`, 62 + state, 63 + did, rkey, 64 + ); err != nil { 65 + return fmt.Errorf("updating repo: %w", err) 66 + } 67 + return nil 68 + } 69 + 70 + func DeleteRepo(ctx context.Context, e *sql.DB, did syntax.DID, rkey syntax.RecordKey) error { 71 + if _, err := e.ExecContext(ctx, 72 + `delete from repos where did = $1 and rkey = $2`, 73 + did, 74 + rkey, 75 + ); err != nil { 76 + return fmt.Errorf("deleting repo: %w", err) 77 + } 78 + return nil 79 + } 80 + 81 + func GetRepoByName(ctx context.Context, e *sql.DB, did syntax.DID, name string) (*models.Repo, error) { 82 + var repo models.Repo 83 + if err := e.QueryRowContext(ctx, 84 + `select 85 + did, 86 + rkey, 87 + cid, 88 + name, 89 + knot_domain, 90 + git_rev, 91 + repo_sha, 92 + state, 93 + error_msg, 94 + retry_count, 95 + retry_after 96 + from repos 97 + where did = $1 and name = $2`, 98 + did, 99 + name, 100 + ).Scan( 101 + &repo.Did, 102 + &repo.Rkey, 103 + &repo.Cid, 104 + &repo.Name, 105 + &repo.KnotDomain, 106 + &repo.GitRev, 107 + &repo.RepoSha, 108 + &repo.State, 109 + &repo.ErrorMsg, 110 + &repo.RetryCount, 111 + &repo.RetryAfter, 112 + ); err != nil { 113 + if errors.Is(err, sql.ErrNoRows) { 114 + return nil, nil 115 + } 116 + return nil, fmt.Errorf("querying repo: %w", err) 117 + } 118 + return &repo, nil 119 + } 120 + 121 + func GetRepoByAtUri(ctx context.Context, e *sql.DB, aturi syntax.ATURI) (*models.Repo, error) { 122 + var repo models.Repo 123 + if err := e.QueryRowContext(ctx, 124 + `select 125 + did, 126 + rkey, 127 + cid, 128 + name, 129 + knot_domain, 130 + git_rev, 131 + repo_sha, 132 + state, 133 + error_msg, 134 + retry_count, 135 + retry_after 136 + from repos 137 + where at_uri = $1`, 138 + aturi, 139 + ).Scan( 140 + &repo.Did, 141 + &repo.Rkey, 142 + &repo.Cid, 143 + &repo.Name, 144 + &repo.KnotDomain, 145 + &repo.GitRev, 146 + &repo.RepoSha, 147 + &repo.State, 148 + &repo.ErrorMsg, 149 + &repo.RetryCount, 150 + &repo.RetryAfter, 151 + ); err != nil { 152 + if errors.Is(err, sql.ErrNoRows) { 153 + return nil, nil 154 + } 155 + return nil, fmt.Errorf("querying repo: %w", err) 156 + } 157 + return &repo, nil 158 + } 159 + 160 + func ListRepos(ctx context.Context, e *sql.DB, page pagination.Page, did, knot, state string) ([]models.Repo, error) { 161 + var conditions []string 162 + var args []any 163 + 164 + pageClause := "" 165 + if page.Limit > 0 { 166 + pageClause = " limit $1 offset $2 " 167 + args = append(args, page.Limit, page.Offset) 168 + } 169 + 170 + whereClause := "" 171 + if did != "" { 172 + conditions = append(conditions, fmt.Sprintf("did = $%d", len(args)+1)) 173 + args = append(args, did) 174 + } 175 + if knot != "" { 176 + conditions = append(conditions, fmt.Sprintf("knot_domain = $%d", len(args)+1)) 177 + args = append(args, knot) 178 + } 179 + if state != "" { 180 + conditions = append(conditions, fmt.Sprintf("state = $%d", len(args)+1)) 181 + args = append(args, state) 182 + } 183 + if len(conditions) > 0 { 184 + whereClause = "WHERE " + conditions[0] 185 + for _, condition := range conditions[1:] { 186 + whereClause += " AND " + condition 187 + } 188 + } 189 + 190 + query := ` 191 + select 192 + did, 193 + rkey, 194 + cid, 195 + name, 196 + knot_domain, 197 + git_rev, 198 + repo_sha, 199 + state, 200 + error_msg, 201 + retry_count, 202 + retry_after 203 + from repos 204 + ` + whereClause + pageClause 205 + rows, err := e.QueryContext(ctx, query, args...) 206 + if err != nil { 207 + return nil, err 208 + } 209 + defer rows.Close() 210 + 211 + var repos []models.Repo 212 + for rows.Next() { 213 + var repo models.Repo 214 + if err := rows.Scan( 215 + &repo.Did, 216 + &repo.Rkey, 217 + &repo.Cid, 218 + &repo.Name, 219 + &repo.KnotDomain, 220 + &repo.GitRev, 221 + &repo.RepoSha, 222 + &repo.State, 223 + &repo.ErrorMsg, 224 + &repo.RetryCount, 225 + &repo.RetryAfter, 226 + ); err != nil { 227 + return nil, fmt.Errorf("scanning row: %w", err) 228 + } 229 + repos = append(repos, repo) 230 + } 231 + if err := rows.Err(); err != nil { 232 + return nil, fmt.Errorf("scanning rows: %w ", err) 233 + } 234 + 235 + return repos, nil 236 + } 237 + 238 + func GetRepoCountsByState(ctx context.Context, e *sql.DB) (map[models.RepoState]int64, error) { 239 + const q = ` 240 + SELECT state, COUNT(*) 241 + FROM repos 242 + GROUP BY state 243 + ` 244 + 245 + rows, err := e.QueryContext(ctx, q) 246 + if err != nil { 247 + return nil, err 248 + } 249 + defer rows.Close() 250 + 251 + counts := make(map[models.RepoState]int64) 252 + 253 + for rows.Next() { 254 + var state string 255 + var count int64 256 + 257 + if err := rows.Scan(&state, &count); err != nil { 258 + return nil, err 259 + } 260 + 261 + counts[models.RepoState(state)] = count 262 + } 263 + 264 + if err := rows.Err(); err != nil { 265 + return nil, err 266 + } 267 + 268 + for _, s := range models.AllRepoStates { 269 + if _, ok := counts[s]; !ok { 270 + counts[s] = 0 271 + } 272 + } 273 + 274 + return counts, nil 275 + }
+305
knotmirror/git.go
··· 1 + package knotmirror 2 + 3 + import ( 4 + "context" 5 + "errors" 6 + "fmt" 7 + "net/url" 8 + "os" 9 + "os/exec" 10 + "path/filepath" 11 + "regexp" 12 + "strings" 13 + 14 + "github.com/go-git/go-git/v5" 15 + gitconfig "github.com/go-git/go-git/v5/config" 16 + "github.com/go-git/go-git/v5/plumbing/transport" 17 + "tangled.org/core/knotmirror/models" 18 + ) 19 + 20 + type GitMirrorManager interface { 21 + Exist(repo *models.Repo) (bool, error) 22 + // RemoteSetUrl updates git repository 'origin' remote 23 + RemoteSetUrl(ctx context.Context, repo *models.Repo) error 24 + // Clone clones the repository as a mirror 25 + Clone(ctx context.Context, repo *models.Repo) error 26 + // Fetch fetches the repository 27 + Fetch(ctx context.Context, repo *models.Repo) error 28 + // Sync mirrors the repository. It will clone the repository if repository doesn't exist. 29 + Sync(ctx context.Context, repo *models.Repo) error 30 + } 31 + 32 + type CliGitMirrorManager struct { 33 + repoBasePath string 34 + knotUseSSL bool 35 + } 36 + 37 + func NewCliGitMirrorManager(repoBasePath string, knotUseSSL bool) *CliGitMirrorManager { 38 + return &CliGitMirrorManager{ 39 + repoBasePath, 40 + knotUseSSL, 41 + } 42 + } 43 + 44 + var _ GitMirrorManager = new(CliGitMirrorManager) 45 + 46 + func (c *CliGitMirrorManager) makeRepoPath(repo *models.Repo) string { 47 + return filepath.Join(c.repoBasePath, repo.Did.String(), repo.Rkey.String()) 48 + } 49 + 50 + func (c *CliGitMirrorManager) Exist(repo *models.Repo) (bool, error) { 51 + return isDir(c.makeRepoPath(repo)) 52 + } 53 + 54 + func (c *CliGitMirrorManager) RemoteSetUrl(ctx context.Context, repo *models.Repo) error { 55 + path := c.makeRepoPath(repo) 56 + url, err := makeRepoRemoteUrl(repo.KnotDomain, repo.DidSlashRepo(), c.knotUseSSL) 57 + if err != nil { 58 + return fmt.Errorf("constructing repo remote url: %w", err) 59 + } 60 + cmd := exec.CommandContext(ctx, "git", "-C", path, "remote", "set-url", "origin", url) 61 + if out, err := cmd.CombinedOutput(); err != nil { 62 + if ctx.Err() != nil { 63 + return ctx.Err() 64 + } 65 + msg := string(out) 66 + return fmt.Errorf("running 'git remote set-url origin %s': %w\n%s", url, err, msg) 67 + } 68 + return nil 69 + } 70 + 71 + func (c *CliGitMirrorManager) Clone(ctx context.Context, repo *models.Repo) error { 72 + path := c.makeRepoPath(repo) 73 + url, err := makeRepoRemoteUrl(repo.KnotDomain, repo.DidSlashRepo(), c.knotUseSSL) 74 + if err != nil { 75 + return fmt.Errorf("constructing repo remote url: %w", err) 76 + } 77 + return c.clone(ctx, path, url) 78 + } 79 + 80 + func (c *CliGitMirrorManager) clone(ctx context.Context, path, url string) error { 81 + cmd := exec.CommandContext(ctx, "git", "clone", "--mirror", url, path) 82 + if out, err := cmd.CombinedOutput(); err != nil { 83 + if ctx.Err() != nil { 84 + return ctx.Err() 85 + } 86 + msg := string(out) 87 + if classification := classifyCliError(msg); classification != nil { 88 + return classification 89 + } 90 + return fmt.Errorf("running 'git clone --mirror %s': %w\n%s", url, err, msg) 91 + } 92 + return nil 93 + } 94 + 95 + func (c *CliGitMirrorManager) Fetch(ctx context.Context, repo *models.Repo) error { 96 + path := c.makeRepoPath(repo) 97 + return c.fetch(ctx, path) 98 + } 99 + 100 + func (c *CliGitMirrorManager) fetch(ctx context.Context, path string) error { 101 + // TODO: use `repo.Knot` instead of depending on origin 102 + cmd := exec.CommandContext(ctx, "git", "-C", path, "fetch", "--prune", "origin") 103 + if out, err := cmd.CombinedOutput(); err != nil { 104 + if ctx.Err() != nil { 105 + return ctx.Err() 106 + } 107 + return fmt.Errorf("running 'git fetch': %w\n%s", err, string(out)) 108 + } 109 + return nil 110 + } 111 + 112 + func (c *CliGitMirrorManager) Sync(ctx context.Context, repo *models.Repo) error { 113 + path := c.makeRepoPath(repo) 114 + url, err := makeRepoRemoteUrl(repo.KnotDomain, repo.DidSlashRepo(), c.knotUseSSL) 115 + if err != nil { 116 + return fmt.Errorf("constructing repo remote url: %w", err) 117 + } 118 + 119 + exist, err := isDir(path) 120 + if err != nil { 121 + return fmt.Errorf("checking repo path: %w", err) 122 + } 123 + if !exist { 124 + if err := c.clone(ctx, path, url); err != nil { 125 + return fmt.Errorf("cloning repo: %w", err) 126 + } 127 + } else { 128 + if err := c.fetch(ctx, path); err != nil { 129 + return fmt.Errorf("fetching repo: %w", err) 130 + } 131 + } 132 + return nil 133 + } 134 + 135 + var ( 136 + ErrDNSFailure = errors.New("git: knot: dns failure (could not resolve host)") 137 + ErrCertExpired = errors.New("git: knot: certificate has expired") 138 + ErrCertMismatch = errors.New("git: knot: certificate hostname mismatch") 139 + ErrTLSHandshake = errors.New("git: knot: tls handshake failure") 140 + ErrHTTPStatus = errors.New("git: knot: request url returned error") 141 + ErrUnreachable = errors.New("git: knot: could not connect to server") 142 + ErrRepoNotFound = errors.New("git: repo: repository not found") 143 + ) 144 + 145 + var ( 146 + reDNSFailure = regexp.MustCompile(`Could not resolve host:`) 147 + reCertExpired = regexp.MustCompile(`SSL certificate OpenSSL verify result: certificate has expired`) 148 + reCertMismatch = regexp.MustCompile(`SSL: no alternative certificate subject name matches target hostname`) 149 + reTLSHandshake = regexp.MustCompile(`TLS connect error: (.*)`) 150 + reHTTPStatus = regexp.MustCompile(`The requested URL returned error: (\d\d\d)`) 151 + reUnreachable = regexp.MustCompile(`Could not connect to server`) 152 + reRepoNotFound = regexp.MustCompile(`repository '.*?' not found`) 153 + ) 154 + 155 + // classifyCliError classifies git cli error message. It will return nil for unknown error messages 156 + func classifyCliError(stderr string) error { 157 + msg := strings.TrimSpace(stderr) 158 + if m := reTLSHandshake.FindStringSubmatch(msg); len(m) > 1 { 159 + return fmt.Errorf("%w: %s", ErrTLSHandshake, m[1]) 160 + } 161 + if m := reHTTPStatus.FindStringSubmatch(msg); len(m) > 1 { 162 + return fmt.Errorf("%w: %s", ErrHTTPStatus, m[1]) 163 + } 164 + switch { 165 + case reDNSFailure.MatchString(msg): 166 + return ErrDNSFailure 167 + case reCertExpired.MatchString(msg): 168 + return ErrCertExpired 169 + case reCertMismatch.MatchString(msg): 170 + return ErrCertMismatch 171 + case reUnreachable.MatchString(msg): 172 + return ErrUnreachable 173 + case reRepoNotFound.MatchString(msg): 174 + return ErrRepoNotFound 175 + } 176 + return nil 177 + } 178 + 179 + type GoGitMirrorManager struct { 180 + repoBasePath string 181 + knotUseSSL bool 182 + } 183 + 184 + func NewGoGitMirrorClient(repoBasePath string, knotUseSSL bool) *GoGitMirrorManager { 185 + return &GoGitMirrorManager{ 186 + repoBasePath, 187 + knotUseSSL, 188 + } 189 + } 190 + 191 + var _ GitMirrorManager = new(GoGitMirrorManager) 192 + 193 + func (c *GoGitMirrorManager) makeRepoPath(repo *models.Repo) string { 194 + return filepath.Join(c.repoBasePath, repo.Did.String(), repo.Rkey.String()) 195 + } 196 + 197 + func (c *GoGitMirrorManager) Exist(repo *models.Repo) (bool, error) { 198 + return isDir(c.makeRepoPath(repo)) 199 + } 200 + 201 + func (c *GoGitMirrorManager) RemoteSetUrl(ctx context.Context, repo *models.Repo) error { 202 + panic("unimplemented") 203 + } 204 + 205 + func (c *GoGitMirrorManager) Clone(ctx context.Context, repo *models.Repo) error { 206 + path := c.makeRepoPath(repo) 207 + url, err := makeRepoRemoteUrl(repo.KnotDomain, repo.DidSlashRepo(), c.knotUseSSL) 208 + if err != nil { 209 + return fmt.Errorf("constructing repo remote url: %w", err) 210 + } 211 + return c.clone(ctx, path, url) 212 + } 213 + 214 + func (c *GoGitMirrorManager) clone(ctx context.Context, path, url string) error { 215 + _, err := git.PlainCloneContext(ctx, path, true, &git.CloneOptions{ 216 + URL: url, 217 + Mirror: true, 218 + }) 219 + if err != nil && !errors.Is(err, transport.ErrEmptyRemoteRepository) { 220 + return fmt.Errorf("cloning repo: %w", err) 221 + } 222 + return nil 223 + } 224 + 225 + func (c *GoGitMirrorManager) Fetch(ctx context.Context, repo *models.Repo) error { 226 + path := c.makeRepoPath(repo) 227 + url, err := makeRepoRemoteUrl(repo.KnotDomain, repo.DidSlashRepo(), c.knotUseSSL) 228 + if err != nil { 229 + return fmt.Errorf("constructing repo remote url: %w", err) 230 + } 231 + 232 + return c.fetch(ctx, path, url) 233 + } 234 + 235 + func (c *GoGitMirrorManager) fetch(ctx context.Context, path, url string) error { 236 + gr, err := git.PlainOpen(path) 237 + if err != nil { 238 + return fmt.Errorf("opening local repo: %w", err) 239 + } 240 + if err := gr.FetchContext(ctx, &git.FetchOptions{ 241 + RemoteURL: url, 242 + RefSpecs: []gitconfig.RefSpec{gitconfig.RefSpec("+refs/*:refs/*")}, 243 + Force: true, 244 + Prune: true, 245 + }); err != nil { 246 + return fmt.Errorf("fetching reppo: %w", err) 247 + } 248 + return nil 249 + } 250 + 251 + func (c *GoGitMirrorManager) Sync(ctx context.Context, repo *models.Repo) error { 252 + path := c.makeRepoPath(repo) 253 + url, err := makeRepoRemoteUrl(repo.KnotDomain, repo.DidSlashRepo(), c.knotUseSSL) 254 + if err != nil { 255 + return fmt.Errorf("constructing repo remote url: %w", err) 256 + } 257 + 258 + exist, err := isDir(path) 259 + if err != nil { 260 + return fmt.Errorf("checking repo path: %w", err) 261 + } 262 + if !exist { 263 + if err := c.clone(ctx, path, url); err != nil { 264 + return fmt.Errorf("cloning repo: %w", err) 265 + } 266 + } else { 267 + if err := c.fetch(ctx, path, url); err != nil { 268 + return fmt.Errorf("fetching repo: %w", err) 269 + } 270 + } 271 + return nil 272 + } 273 + 274 + func makeRepoRemoteUrl(knot, didSlashRepo string, knotUseSSL bool) (string, error) { 275 + if !strings.Contains(knot, "://") { 276 + if knotUseSSL { 277 + knot = "https://" + knot 278 + } else { 279 + knot = "http://" + knot 280 + } 281 + } 282 + 283 + u, err := url.Parse(knot) 284 + if err != nil { 285 + return "", err 286 + } 287 + 288 + if u.Scheme != "http" && u.Scheme != "https" { 289 + return "", fmt.Errorf("unsupported scheme: %s", u.Scheme) 290 + } 291 + 292 + u = u.JoinPath(didSlashRepo) 293 + return u.String(), nil 294 + } 295 + 296 + func isDir(path string) (bool, error) { 297 + info, err := os.Stat(path) 298 + if err == nil && info.IsDir() { 299 + return true, nil 300 + } 301 + if os.IsNotExist(err) { 302 + return false, nil 303 + } 304 + return false, err 305 + }
+56
knotmirror/hostutil/hostutil.go
··· 1 + package hostutil 2 + 3 + import ( 4 + "fmt" 5 + "net/url" 6 + "strings" 7 + 8 + "github.com/bluesky-social/indigo/atproto/syntax" 9 + ) 10 + 11 + func ParseHostname(raw string) (hostname string, noSSL bool, err error) { 12 + // handle case of bare hostname 13 + if !strings.Contains(raw, "://") { 14 + if strings.HasPrefix(raw, "localhost:") { 15 + raw = "http://" + raw 16 + } else { 17 + raw = "https://" + raw 18 + } 19 + } 20 + 21 + u, err := url.Parse(raw) 22 + if err != nil { 23 + return "", false, fmt.Errorf("not a valid host URL: %w", err) 24 + } 25 + 26 + switch u.Scheme { 27 + case "https", "wss": 28 + noSSL = false 29 + case "http", "ws": 30 + noSSL = true 31 + default: 32 + return "", false, fmt.Errorf("unsupported URL scheme: %s", u.Scheme) 33 + } 34 + 35 + // 'localhost' (exact string) is allowed *with* a required port number; SSL is optional 36 + if u.Hostname() == "localhost" { 37 + if u.Port() == "" || !strings.HasPrefix(u.Host, "localhost:") { 38 + return "", false, fmt.Errorf("port number is required for localhost") 39 + } 40 + return u.Host, noSSL, nil 41 + } 42 + 43 + // port numbers not allowed otherwise 44 + if u.Port() != "" { 45 + return "", false, fmt.Errorf("port number not allowed for non-local names") 46 + } 47 + 48 + // check it is a real hostname (eg, not IP address or single-word alias) 49 + h, err := syntax.ParseHandle(u.Host) 50 + if err != nil { 51 + return "", false, fmt.Errorf("not a public hostname") 52 + } 53 + 54 + // lower-case in response 55 + return h.Normalize().String(), noSSL, nil 56 + }
+138
knotmirror/knotmirror.go
··· 1 + package knotmirror 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "net/http" 7 + _ "net/http/pprof" 8 + "time" 9 + 10 + "github.com/go-chi/chi/v5" 11 + "github.com/prometheus/client_golang/prometheus/promhttp" 12 + "tangled.org/core/idresolver" 13 + "tangled.org/core/knotmirror/config" 14 + "tangled.org/core/knotmirror/db" 15 + "tangled.org/core/knotmirror/knotstream" 16 + "tangled.org/core/knotmirror/models" 17 + "tangled.org/core/knotmirror/xrpc" 18 + "tangled.org/core/log" 19 + ) 20 + 21 + func Run(ctx context.Context, cfg *config.Config) error { 22 + // make sure every services are cleaned up on fast return 23 + ctx, cancel := context.WithCancel(ctx) 24 + defer cancel() 25 + 26 + logger := log.FromContext(ctx) 27 + 28 + db, err := db.Make(ctx, cfg.DbUrl, 32) 29 + if err != nil { 30 + return fmt.Errorf("initializing db: %w", err) 31 + } 32 + 33 + resolver := idresolver.DefaultResolver(cfg.PlcUrl) 34 + 35 + // NOTE: using plain git-cli for clone/fetch as go-git is too memory-intensive. 36 + gitm := NewCliGitMirrorManager(cfg.GitRepoBasePath, cfg.KnotUseSSL) 37 + 38 + res, err := db.ExecContext(ctx, 39 + `update repos set state = $1 where state = $2`, 40 + models.RepoStateDesynchronized, 41 + models.RepoStateResyncing, 42 + ) 43 + if err != nil { 44 + return fmt.Errorf("clearing resyning states: %w", err) 45 + } 46 + rows, err := res.RowsAffected() 47 + if err != nil { 48 + return fmt.Errorf("getting affected rows: %w", err) 49 + } 50 + logger.Info(fmt.Sprintf("clearing resyning states: %d records updated", rows)) 51 + 52 + knotstream := knotstream.NewKnotStream(logger, db, cfg) 53 + crawler := NewCrawler(logger, db) 54 + resyncer := NewResyncer(logger, db, gitm, cfg) 55 + adminpage := NewAdminServer(logger, db, resyncer) 56 + xrpc := xrpc.New(logger, cfg, db, resolver, knotstream) 57 + 58 + // maintain repository list with tap 59 + // NOTE: this can be removed once we introduce did-for-repo because then we can just listen to KnotStream for #identity events. 60 + tap := NewTapClient(logger, cfg, db, gitm, knotstream) 61 + 62 + // start http server 63 + go func() { 64 + logger.Info("starting http server", "addr", cfg.Listen) 65 + 66 + mux := chi.NewRouter() 67 + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 68 + w.Write([]byte("Welcome to a knotmirror server.\n")) 69 + }) 70 + mux.Mount("/xrpc", xrpc.Router()) 71 + 72 + if err := http.ListenAndServe(cfg.Listen, mux); err != nil { 73 + logger.Error("xrpc server failed", "error", err) 74 + } 75 + }() 76 + 77 + // start metrics endpoint 78 + go func() { 79 + metricsAddr := cfg.MetricsListen 80 + logger.Info("starting metrics server", "addr", metricsAddr) 81 + http.Handle("/metrics", promhttp.Handler()) 82 + if err := http.ListenAndServe(metricsAddr, nil); err != nil { 83 + logger.Error("metrics server failed", "error", err) 84 + } 85 + }() 86 + 87 + // start admin page endpoint 88 + go func() { 89 + logger.Info("starting admin server", "addr", cfg.AdminListen) 90 + if err := http.ListenAndServe(cfg.AdminListen, adminpage.Router()); err != nil { 91 + logger.Error("admin server failed", "error", err) 92 + } 93 + }() 94 + 95 + tap.Start(ctx) 96 + 97 + resyncer.Start(ctx) 98 + 99 + // periodically crawl the entire network to mirror the repositories 100 + crawler.Start(ctx) 101 + 102 + // listen to knotstream (currently we don't have relay for knots, so subscribe every known knots) 103 + knotstream.Start(ctx) 104 + 105 + svcErr := make(chan error, 1) 106 + if err := knotstream.ResubscribeAllHosts(ctx); err != nil { 107 + svcErr <- fmt.Errorf("resubscribing known hosts: %w", err) 108 + } 109 + 110 + logger.Info("startup complete") 111 + select { 112 + case <-ctx.Done(): 113 + logger.Info("received shutdown signal", "reason", ctx.Err()) 114 + case err := <-svcErr: 115 + if err != nil { 116 + logger.Error("service error", "error", err) 117 + } 118 + cancel() 119 + } 120 + 121 + logger.Info("shutting down knotmirror") 122 + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) 123 + defer shutdownCancel() 124 + 125 + var errs []error 126 + if err := knotstream.Shutdown(shutdownCtx); err != nil { 127 + errs = append(errs, err) 128 + } 129 + if err := db.Close(); err != nil { 130 + errs = append(errs, err) 131 + } 132 + for _, err := range errs { 133 + logger.Error("error during shutdown", "err", err) 134 + } 135 + 136 + logger.Info("shutdown complete") 137 + return nil 138 + }
+88
knotmirror/knotstream/knotstream.go
··· 1 + package knotstream 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "fmt" 7 + "log/slog" 8 + "time" 9 + 10 + "tangled.org/core/knotmirror/config" 11 + "tangled.org/core/knotmirror/db" 12 + "tangled.org/core/knotmirror/models" 13 + "tangled.org/core/log" 14 + ) 15 + 16 + type KnotStream struct { 17 + logger *slog.Logger 18 + db *sql.DB 19 + slurper *KnotSlurper 20 + } 21 + 22 + func NewKnotStream(l *slog.Logger, db *sql.DB, cfg *config.Config) *KnotStream { 23 + l = log.SubLogger(l, "knotstream") 24 + return &KnotStream{ 25 + logger: l, 26 + db: db, 27 + slurper: NewKnotSlurper(l, db, cfg.Slurper), 28 + } 29 + } 30 + 31 + func (s *KnotStream) Start(ctx context.Context) { 32 + go s.slurper.Run(ctx) 33 + } 34 + 35 + func (s *KnotStream) Shutdown(ctx context.Context) error { 36 + return s.slurper.Shutdown(ctx) 37 + } 38 + 39 + func (s *KnotStream) CheckIfSubscribed(hostname string) bool { 40 + return s.slurper.CheckIfSubscribed(hostname) 41 + } 42 + 43 + func (s *KnotStream) SubscribeHost(ctx context.Context, hostname string, noSSL bool) error { 44 + l := s.logger.With("hostname", hostname, "nossl", noSSL) 45 + l.Debug("subscribe") 46 + host, err := db.GetHost(ctx, s.db, hostname) 47 + if err != nil { 48 + return fmt.Errorf("loading host from db: %w", err) 49 + } 50 + 51 + if host == nil { 52 + host = &models.Host{ 53 + Hostname: hostname, 54 + NoSSL: noSSL, 55 + Status: models.HostStatusActive, 56 + LastSeq: 0, 57 + } 58 + 59 + if err := db.UpsertHost(ctx, s.db, host); err != nil { 60 + return fmt.Errorf("adding host to db: %w", err) 61 + } 62 + 63 + l.Info("adding new host subscription") 64 + } 65 + 66 + if host.Status == models.HostStatusBanned { 67 + return fmt.Errorf("cannot subscribe to banned knot") 68 + } 69 + return s.slurper.Subscribe(ctx, *host) 70 + } 71 + 72 + func (s *KnotStream) ResubscribeAllHosts(ctx context.Context) error { 73 + hosts, err := db.ListHosts(ctx, s.db, models.HostStatusActive) 74 + if err != nil { 75 + return fmt.Errorf("listing hosts: %w", err) 76 + } 77 + 78 + for _, host := range hosts { 79 + l := s.logger.With("hostname", host.Hostname) 80 + l.Info("re-subscribing to active host") 81 + if err := s.slurper.Subscribe(ctx, host); err != nil { 82 + l.Warn("failed to re-subscribe to host", "err", err) 83 + } 84 + // sleep for a very short period, so we don't open tons of sockets at the same time 85 + time.Sleep(1 * time.Millisecond) 86 + } 87 + return nil 88 + }
+28
knotmirror/knotstream/metrics.go
··· 1 + package knotstream 2 + 3 + import ( 4 + "github.com/prometheus/client_golang/prometheus" 5 + "github.com/prometheus/client_golang/prometheus/promauto" 6 + ) 7 + 8 + // KnotStream metrics 9 + var ( 10 + knotstreamEventsReceived = promauto.NewCounter(prometheus.CounterOpts{ 11 + Name: "knotmirror_knotstream_events_received_total", 12 + Help: "Total number of events received from knotstream", 13 + }) 14 + knotstreamEventsProcessed = promauto.NewCounter(prometheus.CounterOpts{ 15 + Name: "knotmirror_knotstream_events_processed_total", 16 + Help: "Total number of events successfully processed", 17 + }) 18 + knotstreamEventsSkipped = promauto.NewCounter(prometheus.CounterOpts{ 19 + Name: "knotmirror_knotstream_events_skipped_total", 20 + Help: "Total number of events skipped (not tracked)", 21 + }) 22 + ) 23 + 24 + // slurper metrics 25 + var connectedInbound = promauto.NewGauge(prometheus.GaugeOpts{ 26 + Name: "knotmirror_connected_inbound", 27 + Help: "Number of inbound knotstream we are consuming", 28 + })
+102
knotmirror/knotstream/scheduler.go
··· 1 + package knotstream 2 + 3 + import ( 4 + "context" 5 + "log/slog" 6 + "sync" 7 + "sync/atomic" 8 + "time" 9 + 10 + "tangled.org/core/log" 11 + ) 12 + 13 + type ParallelScheduler struct { 14 + concurrency int 15 + 16 + do func(ctx context.Context, task *Task) error 17 + 18 + feeder chan *Task 19 + lk sync.Mutex 20 + scheduled map[string][]*Task 21 + lastSeq atomic.Int64 22 + 23 + logger *slog.Logger 24 + } 25 + 26 + type Task struct { 27 + key string 28 + message []byte 29 + } 30 + 31 + func NewParallelScheduler(maxC int, ident string, do func(context.Context, *Task) error) *ParallelScheduler { 32 + return &ParallelScheduler{ 33 + concurrency: maxC, 34 + do: do, 35 + feeder: make(chan *Task), 36 + scheduled: make(map[string][]*Task), 37 + logger: log.New("parallel-scheduler"), 38 + } 39 + } 40 + 41 + func (s *ParallelScheduler) Start(ctx context.Context) { 42 + for range s.concurrency { 43 + go s.ForEach(ctx, s.do) 44 + } 45 + } 46 + 47 + func (s *ParallelScheduler) AddTask(ctx context.Context, task *Task) { 48 + s.lk.Lock() 49 + if st, ok := s.scheduled[task.key]; ok { 50 + // schedule task 51 + s.scheduled[task.key] = append(st, task) 52 + s.lk.Unlock() 53 + return 54 + } 55 + s.scheduled[task.key] = []*Task{} 56 + s.lk.Unlock() 57 + 58 + select { 59 + case <-ctx.Done(): 60 + return 61 + case s.feeder <- task: 62 + return 63 + } 64 + } 65 + 66 + func (s *ParallelScheduler) ForEach(ctx context.Context, fn func(context.Context, *Task) error) { 67 + for task := range s.feeder { 68 + for task != nil { 69 + select { 70 + case <-ctx.Done(): 71 + return 72 + default: 73 + } 74 + if err := fn(ctx, task); err != nil { 75 + s.logger.Error("event handler failed", "err", err) 76 + } 77 + 78 + s.lk.Lock() 79 + func() { 80 + rem, ok := s.scheduled[task.key] 81 + if !ok { 82 + s.logger.Error("should always have an 'active' entry if a worker is processing a job") 83 + } 84 + if len(rem) == 0 { 85 + delete(s.scheduled, task.key) 86 + task = nil 87 + } else { 88 + task = rem[0] 89 + s.scheduled[task.key] = rem[1:] 90 + } 91 + 92 + // TODO: update seq from received message 93 + s.lastSeq.Store(time.Now().UnixNano()) 94 + }() 95 + s.lk.Unlock() 96 + } 97 + } 98 + } 99 + 100 + func (s *ParallelScheduler) LastSeq() int64 { 101 + return s.lastSeq.Load() 102 + }
+336
knotmirror/knotstream/slurper.go
··· 1 + package knotstream 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "encoding/json" 7 + "fmt" 8 + "log/slog" 9 + "math/rand" 10 + "net/http" 11 + "sync" 12 + "time" 13 + 14 + "github.com/bluesky-social/indigo/atproto/syntax" 15 + "github.com/bluesky-social/indigo/util/ssrf" 16 + "github.com/carlmjohnson/versioninfo" 17 + "github.com/gorilla/websocket" 18 + "tangled.org/core/api/tangled" 19 + "tangled.org/core/knotmirror/config" 20 + "tangled.org/core/knotmirror/db" 21 + "tangled.org/core/knotmirror/models" 22 + "tangled.org/core/log" 23 + ) 24 + 25 + type KnotSlurper struct { 26 + logger *slog.Logger 27 + db *sql.DB 28 + cfg config.SlurperConfig 29 + 30 + subsLk sync.Mutex 31 + subs map[string]*subscription 32 + } 33 + 34 + func NewKnotSlurper(l *slog.Logger, db *sql.DB, cfg config.SlurperConfig) *KnotSlurper { 35 + return &KnotSlurper{ 36 + logger: log.SubLogger(l, "slurper"), 37 + db: db, 38 + cfg: cfg, 39 + subs: make(map[string]*subscription), 40 + } 41 + } 42 + 43 + func (s *KnotSlurper) Run(ctx context.Context) { 44 + for { 45 + select { 46 + case <-ctx.Done(): 47 + return 48 + case <-time.After(s.cfg.PersistCursorPeriod): 49 + if err := s.persistCursors(ctx); err != nil { 50 + s.logger.Error("failed to flush cursors", "err", err) 51 + } 52 + } 53 + } 54 + } 55 + 56 + func (s *KnotSlurper) CheckIfSubscribed(hostname string) bool { 57 + s.subsLk.Lock() 58 + defer s.subsLk.Unlock() 59 + 60 + _, ok := s.subs[hostname] 61 + return ok 62 + } 63 + 64 + func (s *KnotSlurper) Shutdown(ctx context.Context) error { 65 + s.logger.Info("starting shutdown host cursor flush") 66 + err := s.persistCursors(ctx) 67 + if err != nil { 68 + s.logger.Error("shutdown error", "err", err) 69 + } 70 + s.logger.Info("slurper shutdown complete") 71 + return err 72 + } 73 + 74 + func (s *KnotSlurper) persistCursors(ctx context.Context) error { 75 + // // gather cursor list from subscriptions and store them to DB 76 + // start := time.Now() 77 + 78 + s.subsLk.Lock() 79 + cursors := make([]models.HostCursor, len(s.subs)) 80 + i := 0 81 + for _, sub := range s.subs { 82 + cursors[i] = sub.HostCursor() 83 + i++ 84 + } 85 + s.subsLk.Unlock() 86 + 87 + err := db.StoreCursors(ctx, s.db, cursors) 88 + // s.logger.Info("finished persisting cursors", "count", len(cursors), "duration", time.Since(start).String(), "err", err) 89 + return err 90 + } 91 + 92 + func (s *KnotSlurper) Subscribe(ctx context.Context, host models.Host) error { 93 + s.subsLk.Lock() 94 + defer s.subsLk.Unlock() 95 + 96 + _, ok := s.subs[host.Hostname] 97 + if ok { 98 + return fmt.Errorf("already subscribed: %s", host.Hostname) 99 + } 100 + 101 + // TODO: include `cancel` function to kill subscription by hostname 102 + sub := &subscription{ 103 + hostname: host.Hostname, 104 + scheduler: NewParallelScheduler( 105 + s.cfg.ConcurrencyPerHost, 106 + host.Hostname, 107 + s.ProcessEvent, 108 + ), 109 + } 110 + s.subs[host.Hostname] = sub 111 + 112 + sub.scheduler.Start(ctx) 113 + go s.subscribeWithRedialer(ctx, host, sub) 114 + return nil 115 + } 116 + 117 + func (s *KnotSlurper) subscribeWithRedialer(ctx context.Context, host models.Host, sub *subscription) { 118 + l := s.logger.With("host", host.Hostname) 119 + 120 + dialer := websocket.Dialer{ 121 + HandshakeTimeout: time.Second * 5, 122 + } 123 + 124 + // if this isn't a localhost / private connection, then we should enable SSRF protections 125 + if !host.NoSSL { 126 + netDialer := ssrf.PublicOnlyDialer() 127 + dialer.NetDialContext = netDialer.DialContext 128 + } 129 + 130 + cursor := host.LastSeq 131 + 132 + connectedInbound.Inc() 133 + defer connectedInbound.Dec() 134 + 135 + var backoff int 136 + for { 137 + select { 138 + case <-ctx.Done(): 139 + return 140 + default: 141 + } 142 + u := host.LegacyEventsURL(cursor) 143 + l.Debug("made url with cursor", "cursor", cursor, "url", u) 144 + 145 + // NOTE: manual backoff retry implementation to explicitly handle fails 146 + hdr := make(http.Header) 147 + hdr.Add("User-Agent", userAgent()) 148 + conn, resp, err := dialer.DialContext(ctx, u, hdr) 149 + if err != nil { 150 + l.Warn("dialing failed", "err", err, "backoff", backoff) 151 + time.Sleep(sleepForBackoff(backoff)) 152 + backoff++ 153 + if backoff > 30 { 154 + l.Warn("host does not appear to be online, disabling for now") 155 + host.Status = models.HostStatusOffline 156 + if err := db.UpsertHost(ctx, s.db, &host); err != nil { 157 + l.Error("failed to update host status", "err", err) 158 + } 159 + return 160 + } 161 + continue 162 + } 163 + 164 + l.Debug("knot event subscription response", "code", resp.StatusCode, "url", u) 165 + 166 + if err := s.handleConnection(ctx, conn, sub); err != nil { 167 + // TODO: measure the last N connection error times and if they're coming too fast reconnect slower or don't reconnect and wait for requestCrawl 168 + l.Warn("host connection failed", "err", err, "backoff", backoff) 169 + } 170 + 171 + updatedCursor := sub.LastSeq() 172 + didProgress := updatedCursor > cursor 173 + l.Debug("cursor compare", "cursor", cursor, "updatedCursor", updatedCursor, "didProgress", didProgress) 174 + if cursor == 0 || didProgress { 175 + cursor = updatedCursor 176 + backoff = 0 177 + 178 + batch := []models.HostCursor{sub.HostCursor()} 179 + if err := db.StoreCursors(ctx, s.db, batch); err != nil { 180 + l.Error("failed to store cursors", "err", err) 181 + } 182 + } 183 + } 184 + } 185 + 186 + // handleConnection handles websocket connection. 187 + // Schedules task from received event and return when connection is closed 188 + func (s *KnotSlurper) handleConnection(ctx context.Context, conn *websocket.Conn, sub *subscription) error { 189 + // ping on every 30s 190 + ctx, cancel := context.WithCancel(ctx) 191 + defer cancel() // close the background ping job on connection close 192 + go func() { 193 + t := time.NewTicker(30 * time.Second) 194 + defer t.Stop() 195 + failcount := 0 196 + 197 + for { 198 + select { 199 + case <-t.C: 200 + if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second*10)); err != nil { 201 + s.logger.Warn("failed to ping", "err", err) 202 + failcount++ 203 + if failcount >= 4 { 204 + s.logger.Error("too many ping fails", "count", failcount) 205 + _ = conn.Close() 206 + return 207 + } 208 + } else { 209 + failcount = 0 // ok ping 210 + } 211 + case <-ctx.Done(): 212 + _ = conn.Close() 213 + return 214 + } 215 + } 216 + }() 217 + 218 + conn.SetPingHandler(func(message string) error { 219 + err := conn.WriteControl(websocket.PongMessage, []byte(message), time.Now().Add(time.Minute)) 220 + if err == websocket.ErrCloseSent { 221 + return nil 222 + } 223 + return err 224 + }) 225 + conn.SetPongHandler(func(_ string) error { 226 + if err := conn.SetReadDeadline(time.Now().Add(time.Minute)); err != nil { 227 + s.logger.Error("failed to set read deadline", "err", err) 228 + } 229 + return nil 230 + }) 231 + 232 + for { 233 + select { 234 + case <-ctx.Done(): 235 + return ctx.Err() 236 + default: 237 + } 238 + msgType, msg, err := conn.ReadMessage() 239 + if err != nil { 240 + return err 241 + } 242 + 243 + if msgType != websocket.TextMessage { 244 + continue 245 + } 246 + 247 + sub.scheduler.AddTask(ctx, &Task{ 248 + key: sub.hostname, // TODO: replace to repository AT-URI for better concurrency 249 + message: msg, 250 + }) 251 + } 252 + } 253 + 254 + type LegacyGitEvent struct { 255 + Rkey string 256 + Nsid string 257 + Event tangled.GitRefUpdate 258 + } 259 + 260 + func (s *KnotSlurper) ProcessEvent(ctx context.Context, task *Task) error { 261 + var legacyMessage LegacyGitEvent 262 + if err := json.Unmarshal(task.message, &legacyMessage); err != nil { 263 + return fmt.Errorf("unmarshaling message: %w", err) 264 + } 265 + 266 + if err := s.ProcessLegacyGitRefUpdate(ctx, task.key, &legacyMessage); err != nil { 267 + return fmt.Errorf("processing gitRefUpdate: %w", err) 268 + } 269 + return nil 270 + } 271 + 272 + func (s *KnotSlurper) ProcessLegacyGitRefUpdate(ctx context.Context, source string, evt *LegacyGitEvent) error { 273 + knotstreamEventsReceived.Inc() 274 + 275 + l := s.logger.With("src", source) 276 + 277 + curr, err := db.GetRepoByName(ctx, s.db, syntax.DID(evt.Event.RepoDid), evt.Event.RepoName) 278 + if err != nil { 279 + return fmt.Errorf("failed to get repo '%s': %w", evt.Event.RepoDid+"/"+evt.Event.RepoName, err) 280 + } 281 + if curr == nil { 282 + // if repo doesn't exist in DB, just ignore the event. That repo is unknown. 283 + // 284 + // Normally did+name is already enough to perform git-fetch as that's 285 + // what needed to fetch the repository. 286 + // But we want to store that in did/rkey in knot-mirror. 287 + // Therefore, we should ignore when the repository is unknown. 288 + // Hopefully crawler will sync it later. 289 + l.Warn("skipping event from unknown repo", "did/name", evt.Event.RepoDid+"/"+evt.Event.RepoName) 290 + knotstreamEventsSkipped.Inc() 291 + return nil 292 + } 293 + l = l.With("repoAt", curr.AtUri()) 294 + 295 + // TODO: should plan resync to resyncBuffer on RepoStateResyncing 296 + if curr.State != models.RepoStateActive { 297 + l.Debug("skipping non-active repo") 298 + knotstreamEventsSkipped.Inc() 299 + return nil 300 + } 301 + 302 + if curr.GitRev != "" && evt.Rkey <= curr.GitRev.String() { 303 + l.Debug("skipping replayed event", "event.Rkey", evt.Rkey, "currentRev", curr.GitRev) 304 + knotstreamEventsSkipped.Inc() 305 + return nil 306 + } 307 + 308 + // if curr.State == models.RepoStateResyncing { 309 + // firehoseEventsSkipped.Inc() 310 + // return fp.events.addToResyncBuffer(ctx, commit) 311 + // } 312 + 313 + // can't skip anything, update repo state 314 + if err := db.UpdateRepoState(ctx, s.db, curr.Did, curr.Rkey, models.RepoStateDesynchronized); err != nil { 315 + return err 316 + } 317 + 318 + l.Info("event processed", "eventRev", evt.Rkey) 319 + 320 + knotstreamEventsProcessed.Inc() 321 + return nil 322 + } 323 + 324 + func userAgent() string { 325 + return fmt.Sprintf("knotmirror/%s", versioninfo.Short()) 326 + } 327 + 328 + func sleepForBackoff(b int) time.Duration { 329 + if b == 0 { 330 + return 0 331 + } 332 + if b < 10 { 333 + return time.Millisecond * time.Duration((50*b)+rand.Intn(500)) 334 + } 335 + return time.Second * 30 336 + }
+22
knotmirror/knotstream/subscription.go
··· 1 + package knotstream 2 + 3 + import "tangled.org/core/knotmirror/models" 4 + 5 + // subscription represents websocket connection with that host 6 + type subscription struct { 7 + hostname string 8 + 9 + // embedded parallel job scheduler 10 + scheduler *ParallelScheduler 11 + } 12 + 13 + func (s *subscription) LastSeq() int64 { 14 + return s.scheduler.LastSeq() 15 + } 16 + 17 + func (s *subscription) HostCursor() models.HostCursor { 18 + return models.HostCursor{ 19 + Hostname: s.hostname, 20 + LastSeq: s.LastSeq(), 21 + } 22 + }
+29
knotmirror/metrics.go
··· 1 + package knotmirror 2 + 3 + import ( 4 + "github.com/prometheus/client_golang/prometheus" 5 + "github.com/prometheus/client_golang/prometheus/promauto" 6 + ) 7 + 8 + // Resync metrics 9 + var ( 10 + // TODO: 11 + // - working / waiting resycner counts 12 + resyncsStarted = promauto.NewCounter(prometheus.CounterOpts{ 13 + Name: "knotmirror_resyncs_started_total", 14 + Help: "Total number of repo resyncs started", 15 + }) 16 + resyncsCompleted = promauto.NewCounter(prometheus.CounterOpts{ 17 + Name: "knotmirror_resyncs_completed_total", 18 + Help: "Total number of repo resyncs completed", 19 + }) 20 + resyncsFailed = promauto.NewCounter(prometheus.CounterOpts{ 21 + Name: "knotmirror_resyncs_failed_total", 22 + Help: "Total number of repo resyncs failed", 23 + }) 24 + resyncDuration = promauto.NewHistogram(prometheus.HistogramOpts{ 25 + Name: "knotmirror_resync_duration_seconds", 26 + Help: "Duration of repo resync operations", 27 + Buckets: prometheus.ExponentialBuckets(0.1, 2, 12), 28 + }) 29 + )
+122
knotmirror/models/models.go
··· 1 + package models 2 + 3 + import ( 4 + "fmt" 5 + 6 + "github.com/bluesky-social/indigo/atproto/syntax" 7 + "tangled.org/core/api/tangled" 8 + ) 9 + 10 + type Repo struct { 11 + Did syntax.DID 12 + Rkey syntax.RecordKey 13 + Cid *syntax.CID 14 + // content of tangled.Repo 15 + Name string 16 + KnotDomain string 17 + 18 + GitRev syntax.TID // last processed git.refUpdate revision 19 + RepoSha string // sha256 sum of git refs (to avoid no-op git fetch) 20 + State RepoState 21 + ErrorMsg string 22 + RetryCount int 23 + RetryAfter int64 // Unix timestamp (seconds) 24 + } 25 + 26 + func (r *Repo) AtUri() syntax.ATURI { 27 + return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, tangled.RepoNSID, r.Rkey)) 28 + } 29 + 30 + func (r *Repo) DidSlashRepo() string { 31 + return fmt.Sprintf("%s/%s", r.Did, r.Name) 32 + } 33 + 34 + type RepoState string 35 + 36 + const ( 37 + RepoStatePending RepoState = "pending" 38 + RepoStateDesynchronized RepoState = "desynchronized" 39 + RepoStateResyncing RepoState = "resyncing" 40 + RepoStateActive RepoState = "active" 41 + RepoStateSuspended RepoState = "suspended" 42 + RepoStateError RepoState = "error" 43 + ) 44 + 45 + var AllRepoStates = []RepoState{ 46 + RepoStatePending, 47 + RepoStateDesynchronized, 48 + RepoStateResyncing, 49 + RepoStateActive, 50 + RepoStateSuspended, 51 + RepoStateError, 52 + } 53 + 54 + func (s RepoState) IsResyncing() bool { 55 + return s == RepoStateResyncing 56 + } 57 + 58 + type HostCursor struct { 59 + Hostname string 60 + LastSeq int64 61 + } 62 + 63 + type Host struct { 64 + Hostname string 65 + NoSSL bool 66 + Status HostStatus 67 + LastSeq int64 68 + } 69 + 70 + type HostStatus string 71 + 72 + const ( 73 + HostStatusActive HostStatus = "active" 74 + HostStatusIdle HostStatus = "idle" 75 + HostStatusOffline HostStatus = "offline" 76 + HostStatusThrottled HostStatus = "throttled" 77 + HostStatusBanned HostStatus = "banned" 78 + ) 79 + 80 + var AllHostStatuses = []HostStatus{ 81 + HostStatusActive, 82 + HostStatusIdle, 83 + HostStatusOffline, 84 + HostStatusThrottled, 85 + HostStatusBanned, 86 + } 87 + 88 + func (h *Host) URL() string { 89 + if h.NoSSL { 90 + return fmt.Sprintf("http://%s", h.Hostname) 91 + } else { 92 + return fmt.Sprintf("https://%s", h.Hostname) 93 + } 94 + } 95 + 96 + func (h *Host) WsURL() string { 97 + if h.NoSSL { 98 + return fmt.Sprintf("ws://%s", h.Hostname) 99 + } else { 100 + return fmt.Sprintf("wss://%s", h.Hostname) 101 + } 102 + } 103 + 104 + // func (h *Host) SubscribeGitRefsURL(cursor int64) string { 105 + // scheme := "wss" 106 + // if h.NoSSL { 107 + // scheme = "ws" 108 + // } 109 + // u := fmt.Sprintf("%s://%s/xrpc/%s", scheme, h.Hostname, tangled.SubscribeGitRefsNSID) 110 + // if cursor > 0 { 111 + // u = fmt.Sprintf("%s?cursor=%d", u, h.LastSeq) 112 + // } 113 + // return u 114 + // } 115 + 116 + func (h *Host) LegacyEventsURL(cursor int64) string { 117 + u := fmt.Sprintf("%s/events", h.WsURL()) 118 + if cursor > 0 { 119 + u = fmt.Sprintf("%s?cursor=%d", u, cursor) 120 + } 121 + return u 122 + }
+8
knotmirror/readme.md
··· 1 + # KnotMirror 2 + 3 + KnotMirror is a git mirror service for all known repos. Heavily inspired by [indigo/relay] and [indigo/tap]. 4 + 5 + KnotMirror syncs repo list using tap and subscribe to all known knots as KnotStream. 6 + 7 + [indigo/relay]: https://github.com/bluesky-social/indigo/tree/main/cmd/relay 8 + [indigo/tap]: https://github.com/bluesky-social/indigo/tree/main/cmd/tap
+360
knotmirror/resyncer.go
··· 1 + package knotmirror 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "errors" 7 + "fmt" 8 + "log/slog" 9 + "math/rand" 10 + "net/http" 11 + "net/url" 12 + "strings" 13 + "sync" 14 + "time" 15 + 16 + "github.com/bluesky-social/indigo/atproto/syntax" 17 + "tangled.org/core/knotmirror/config" 18 + "tangled.org/core/knotmirror/db" 19 + "tangled.org/core/knotmirror/models" 20 + "tangled.org/core/log" 21 + ) 22 + 23 + type Resyncer struct { 24 + logger *slog.Logger 25 + db *sql.DB 26 + gitm GitMirrorManager 27 + cfg *config.Config 28 + 29 + claimJobMu sync.Mutex 30 + 31 + runningJobs map[syntax.ATURI]context.CancelFunc 32 + runningJobsMu sync.Mutex 33 + 34 + repoFetchTimeout time.Duration 35 + manualResyncTimeout time.Duration 36 + parallelism int 37 + 38 + knotBackoff map[string]time.Time 39 + knotBackoffMu sync.RWMutex 40 + } 41 + 42 + func NewResyncer(l *slog.Logger, db *sql.DB, gitm GitMirrorManager, cfg *config.Config) *Resyncer { 43 + return &Resyncer{ 44 + logger: log.SubLogger(l, "resyncer"), 45 + db: db, 46 + gitm: gitm, 47 + cfg: cfg, 48 + 49 + runningJobs: make(map[syntax.ATURI]context.CancelFunc), 50 + 51 + repoFetchTimeout: cfg.GitRepoFetchTimeout, 52 + manualResyncTimeout: 30 * time.Minute, 53 + parallelism: cfg.ResyncParallelism, 54 + 55 + knotBackoff: make(map[string]time.Time), 56 + } 57 + } 58 + 59 + func (r *Resyncer) Start(ctx context.Context) { 60 + for i := 0; i < r.parallelism; i++ { 61 + go r.runResyncWorker(ctx, i) 62 + } 63 + } 64 + 65 + func (r *Resyncer) runResyncWorker(ctx context.Context, workerID int) { 66 + l := r.logger.With("worker", workerID) 67 + for { 68 + select { 69 + case <-ctx.Done(): 70 + l.Info("resync worker shutting down", "error", ctx.Err()) 71 + return 72 + default: 73 + } 74 + repoAt, found, err := r.claimResyncJob(ctx) 75 + if err != nil { 76 + l.Error("failed to claim resync job", "error", err) 77 + time.Sleep(time.Second) 78 + continue 79 + } 80 + if !found { 81 + time.Sleep(time.Second) 82 + continue 83 + } 84 + l.Info("processing resync", "aturi", repoAt) 85 + if err := r.resyncRepo(ctx, repoAt); err != nil { 86 + l.Error("resync failed", "aturi", repoAt, "error", err) 87 + } 88 + } 89 + } 90 + 91 + func (r *Resyncer) registerRunning(repo syntax.ATURI, cancel context.CancelFunc) { 92 + r.runningJobsMu.Lock() 93 + defer r.runningJobsMu.Unlock() 94 + 95 + if _, exists := r.runningJobs[repo]; exists { 96 + return 97 + } 98 + r.runningJobs[repo] = cancel 99 + } 100 + 101 + func (r *Resyncer) unregisterRunning(repo syntax.ATURI) { 102 + r.runningJobsMu.Lock() 103 + defer r.runningJobsMu.Unlock() 104 + 105 + delete(r.runningJobs, repo) 106 + } 107 + 108 + func (r *Resyncer) CancelResyncJob(repo syntax.ATURI) { 109 + r.runningJobsMu.Lock() 110 + defer r.runningJobsMu.Unlock() 111 + 112 + cancel, ok := r.runningJobs[repo] 113 + if !ok { 114 + return 115 + } 116 + delete(r.runningJobs, repo) 117 + cancel() 118 + } 119 + 120 + // TriggerResyncJob manually triggers the resync job 121 + func (r *Resyncer) TriggerResyncJob(ctx context.Context, repoAt syntax.ATURI) error { 122 + repo, err := db.GetRepoByAtUri(ctx, r.db, repoAt) 123 + if err != nil { 124 + return fmt.Errorf("failed to get repo: %w", err) 125 + } 126 + if repo == nil { 127 + return fmt.Errorf("repo not found: %s", repoAt) 128 + } 129 + 130 + if repo.State == models.RepoStateResyncing { 131 + return fmt.Errorf("repo already resyncing") 132 + } 133 + 134 + repo.State = models.RepoStatePending 135 + repo.RetryAfter = -1 // resyncer will prioritize this 136 + 137 + if err := db.UpsertRepo(ctx, r.db, repo); err != nil { 138 + return fmt.Errorf("updating repo state to pending %w", err) 139 + } 140 + return nil 141 + } 142 + 143 + func (r *Resyncer) claimResyncJob(ctx context.Context) (syntax.ATURI, bool, error) { 144 + // use mutex to prevent duplicated jobs 145 + r.claimJobMu.Lock() 146 + defer r.claimJobMu.Unlock() 147 + 148 + var repoAt syntax.ATURI 149 + now := time.Now().Unix() 150 + if err := r.db.QueryRowContext(ctx, 151 + `update repos 152 + set state = $1 153 + where at_uri = ( 154 + select at_uri from repos 155 + where state in ($2, $3, $4) 156 + and (retry_after = -1 or retry_after = 0 or retry_after < $5) 157 + order by 158 + (retry_after = -1) desc, 159 + (retry_after = 0) desc, 160 + retry_after 161 + limit 1 162 + ) 163 + returning at_uri 164 + `, 165 + models.RepoStateResyncing, 166 + models.RepoStatePending, models.RepoStateDesynchronized, models.RepoStateError, 167 + now, 168 + ).Scan(&repoAt); err != nil { 169 + if errors.Is(err, sql.ErrNoRows) { 170 + return "", false, nil 171 + } 172 + return "", false, err 173 + } 174 + 175 + return repoAt, true, nil 176 + } 177 + 178 + func (r *Resyncer) resyncRepo(ctx context.Context, repoAt syntax.ATURI) error { 179 + // ctx, span := tracer.Start(ctx, "resyncRepo") 180 + // span.SetAttributes(attribute.String("aturi", repoAt)) 181 + // defer span.End() 182 + 183 + resyncsStarted.Inc() 184 + startTime := time.Now() 185 + 186 + jobCtx, cancel := context.WithCancel(ctx) 187 + r.registerRunning(repoAt, cancel) 188 + defer r.unregisterRunning(repoAt) 189 + 190 + success, err := r.doResync(jobCtx, repoAt) 191 + if !success { 192 + resyncsFailed.Inc() 193 + resyncDuration.Observe(time.Since(startTime).Seconds()) 194 + return r.handleResyncFailure(ctx, repoAt, err) 195 + } 196 + 197 + resyncsCompleted.Inc() 198 + resyncDuration.Observe(time.Since(startTime).Seconds()) 199 + return nil 200 + } 201 + 202 + func (r *Resyncer) doResync(ctx context.Context, repoAt syntax.ATURI) (bool, error) { 203 + // ctx, span := tracer.Start(ctx, "doResync") 204 + // span.SetAttributes(attribute.String("aturi", repoAt)) 205 + // defer span.End() 206 + 207 + repo, err := db.GetRepoByAtUri(ctx, r.db, repoAt) 208 + if err != nil { 209 + return false, fmt.Errorf("failed to get repo: %w", err) 210 + } 211 + if repo == nil { // untracked repo, skip 212 + return false, nil 213 + } 214 + 215 + r.knotBackoffMu.RLock() 216 + backoffUntil, inBackoff := r.knotBackoff[repo.KnotDomain] 217 + r.knotBackoffMu.RUnlock() 218 + if inBackoff && time.Now().Before(backoffUntil) { 219 + return false, nil 220 + } 221 + 222 + // HACK: check knot reachability with short timeout before running actual fetch. 223 + // This is crucial as git-cli doesn't support http connection timeout. 224 + // `http.lowSpeedTime` is only applied _after_ the connection. 225 + if err := r.checkKnotReachability(ctx, repo); err != nil { 226 + if isRateLimitError(err) { 227 + r.knotBackoffMu.Lock() 228 + r.knotBackoff[repo.KnotDomain] = time.Now().Add(10 * time.Second) 229 + r.knotBackoffMu.Unlock() 230 + return false, nil 231 + } 232 + // TODO: suspend repo on 404. KnotStream updates will change the repo state back online 233 + return false, fmt.Errorf("knot unreachable: %w", err) 234 + } 235 + 236 + timeout := r.repoFetchTimeout 237 + if repo.RetryAfter == -1 { 238 + timeout = r.manualResyncTimeout 239 + } 240 + fetchCtx, cancel := context.WithTimeout(ctx, timeout) 241 + defer cancel() 242 + 243 + if err := r.gitm.Sync(fetchCtx, repo); err != nil { 244 + return false, err 245 + } 246 + 247 + // repo.GitRev = <processed git.refUpdate revision> 248 + // repo.RepoSha = <sha256 sum of git refs> 249 + repo.State = models.RepoStateActive 250 + repo.ErrorMsg = "" 251 + repo.RetryCount = 0 252 + repo.RetryAfter = 0 253 + if err := db.UpsertRepo(ctx, r.db, repo); err != nil { 254 + return false, fmt.Errorf("updating repo state to active %w", err) 255 + } 256 + return true, nil 257 + } 258 + 259 + type knotStatusError struct { 260 + StatusCode int 261 + } 262 + 263 + func (ke *knotStatusError) Error() string { 264 + return fmt.Sprintf("request failed with status code (HTTP %d)", ke.StatusCode) 265 + } 266 + 267 + func isRateLimitError(err error) bool { 268 + var knotErr *knotStatusError 269 + if errors.As(err, &knotErr) { 270 + return knotErr.StatusCode == http.StatusTooManyRequests 271 + } 272 + return false 273 + } 274 + 275 + // checkKnotReachability checks if Knot is reachable and is valid git remote server 276 + func (r *Resyncer) checkKnotReachability(ctx context.Context, repo *models.Repo) error { 277 + repoUrl, err := makeRepoRemoteUrl(repo.KnotDomain, repo.DidSlashRepo(), r.cfg.KnotUseSSL) 278 + if err != nil { 279 + return err 280 + } 281 + 282 + repoUrl += "/info/refs?service=git-upload-pack" 283 + 284 + r.logger.Debug("checking knot reachability", "url", repoUrl) 285 + 286 + client := http.Client{ 287 + Timeout: 30 * time.Second, 288 + } 289 + req, err := http.NewRequestWithContext(ctx, "GET", repoUrl, nil) 290 + if err != nil { 291 + return err 292 + } 293 + req.Header.Set("User-Agent", "git/2.x") 294 + req.Header.Set("Accept", "*/*") 295 + 296 + resp, err := client.Do(req) 297 + if err != nil { 298 + var uerr *url.Error 299 + if errors.As(err, &uerr) { 300 + return fmt.Errorf("request failed: %w", uerr.Unwrap()) 301 + } 302 + return fmt.Errorf("request failed: %w", err) 303 + } 304 + defer resp.Body.Close() 305 + 306 + if resp.StatusCode != http.StatusOK { 307 + return &knotStatusError{resp.StatusCode} 308 + } 309 + 310 + // check if target is git server 311 + ct := resp.Header.Get("Content-Type") 312 + if !strings.Contains(ct, "application/x-git-upload-pack-advertisement") { 313 + return fmt.Errorf("unexpected content-type: %s", ct) 314 + } 315 + 316 + return nil 317 + } 318 + 319 + func (r *Resyncer) handleResyncFailure(ctx context.Context, repoAt syntax.ATURI, err error) error { 320 + r.logger.Debug("handleResyncFailure", "at_uri", repoAt, "err", err) 321 + var state models.RepoState 322 + var errMsg string 323 + if err == nil { 324 + state = models.RepoStateDesynchronized 325 + errMsg = "" 326 + } else { 327 + state = models.RepoStateError 328 + errMsg = err.Error() 329 + } 330 + 331 + repo, err := db.GetRepoByAtUri(ctx, r.db, repoAt) 332 + if err != nil { 333 + return fmt.Errorf("failed to get repo: %w", err) 334 + } 335 + if repo == nil { 336 + return fmt.Errorf("failed to get repo. repo '%s' doesn't exist in db", repoAt) 337 + } 338 + 339 + // start a 1 min & go up to 1 hr between retries 340 + var retryCount = repo.RetryCount + 1 341 + var retryAfter = time.Now().Add(backoff(retryCount, 60) * 60).Unix() 342 + 343 + // remove null bytes 344 + errMsg = strings.ReplaceAll(errMsg, "\x00", "") 345 + 346 + repo.State = state 347 + repo.ErrorMsg = errMsg 348 + repo.RetryCount = retryCount 349 + repo.RetryAfter = retryAfter 350 + if err := db.UpsertRepo(ctx, r.db, repo); err != nil { 351 + return fmt.Errorf("failed to update repo state: %w", err) 352 + } 353 + return nil 354 + } 355 + 356 + func backoff(retries int, max int) time.Duration { 357 + dur := min(1<<retries, max) 358 + jitter := time.Millisecond * time.Duration(rand.Intn(1000)) 359 + return time.Second*time.Duration(dur) + jitter 360 + }
+167
knotmirror/tapclient.go
··· 1 + package knotmirror 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "encoding/json" 7 + "fmt" 8 + "log/slog" 9 + "net/netip" 10 + "net/url" 11 + "strings" 12 + "time" 13 + 14 + "tangled.org/core/api/tangled" 15 + "tangled.org/core/knotmirror/config" 16 + "tangled.org/core/knotmirror/db" 17 + "tangled.org/core/knotmirror/knotstream" 18 + "tangled.org/core/knotmirror/models" 19 + "tangled.org/core/log" 20 + "tangled.org/core/tapc" 21 + ) 22 + 23 + type Tap struct { 24 + logger *slog.Logger 25 + cfg *config.Config 26 + tap tapc.Client 27 + db *sql.DB 28 + gitm GitMirrorManager 29 + ks *knotstream.KnotStream 30 + } 31 + 32 + func NewTapClient(l *slog.Logger, cfg *config.Config, db *sql.DB, gitm GitMirrorManager, ks *knotstream.KnotStream) *Tap { 33 + return &Tap{ 34 + logger: log.SubLogger(l, "tapclient"), 35 + cfg: cfg, 36 + tap: tapc.NewClient(cfg.TapUrl, ""), 37 + db: db, 38 + gitm: gitm, 39 + ks: ks, 40 + } 41 + } 42 + 43 + func (t *Tap) Start(ctx context.Context) { 44 + // TODO: better reconnect logic 45 + go func() { 46 + for { 47 + t.tap.Connect(ctx, &tapc.SimpleIndexer{ 48 + EventHandler: t.processEvent, 49 + }) 50 + time.Sleep(time.Second) 51 + } 52 + }() 53 + } 54 + 55 + func (t *Tap) processEvent(ctx context.Context, evt tapc.Event) error { 56 + l := t.logger.With("component", "tapIndexer") 57 + 58 + var err error 59 + switch evt.Type { 60 + case tapc.EvtRecord: 61 + switch evt.Record.Collection.String() { 62 + case tangled.RepoNSID: 63 + err = t.processRepo(ctx, evt.Record) 64 + } 65 + } 66 + 67 + if err != nil { 68 + l.Error("failed to process message. will retry later", "event.ID", evt.ID, "err", err) 69 + return err 70 + } 71 + return nil 72 + } 73 + 74 + func (t *Tap) processRepo(ctx context.Context, evt *tapc.RecordEventData) error { 75 + switch evt.Action { 76 + case tapc.RecordCreateAction, tapc.RecordUpdateAction: 77 + record := tangled.Repo{} 78 + if err := json.Unmarshal(evt.Record, &record); err != nil { 79 + return fmt.Errorf("parsing record: %w", err) 80 + } 81 + 82 + knotUrl := record.Knot 83 + if !strings.Contains(record.Knot, "://") { 84 + if host, _ := db.GetHost(ctx, t.db, record.Knot); host != nil { 85 + knotUrl = host.URL() 86 + } else { 87 + t.logger.Warn("repo is from unknown knot") 88 + if t.cfg.KnotUseSSL { 89 + knotUrl = "https://" + knotUrl 90 + } else { 91 + knotUrl = "http://" + knotUrl 92 + } 93 + } 94 + } 95 + 96 + status := models.RepoStatePending 97 + errMsg := "" 98 + u, err := url.Parse(knotUrl) 99 + if err != nil { 100 + status = models.RepoStateSuspended 101 + errMsg = "failed to parse knot url" 102 + } else if t.cfg.KnotSSRF && isPrivate(u.Hostname()) { 103 + status = models.RepoStateSuspended 104 + errMsg = "suspending non-public knot" 105 + } 106 + 107 + repo := &models.Repo{ 108 + Did: evt.Did, 109 + Rkey: evt.Rkey, 110 + Cid: evt.CID, 111 + Name: record.Name, 112 + KnotDomain: knotUrl, 113 + State: status, 114 + ErrorMsg: errMsg, 115 + RetryAfter: 0, // clear retry info 116 + RetryCount: 0, 117 + } 118 + 119 + if evt.Action == tapc.RecordUpdateAction { 120 + exist, err := t.gitm.Exist(repo) 121 + if err != nil { 122 + return fmt.Errorf("checking git repo existance: %w", err) 123 + } 124 + if exist { 125 + // update git repo remote url 126 + if err := t.gitm.RemoteSetUrl(ctx, repo); err != nil { 127 + return fmt.Errorf("updating git repo remote url: %w", err) 128 + } 129 + } 130 + } 131 + 132 + if err := db.UpsertRepo(ctx, t.db, repo); err != nil { 133 + return fmt.Errorf("upserting repo to db: %w", err) 134 + } 135 + 136 + if !t.ks.CheckIfSubscribed(record.Knot) { 137 + if err := t.ks.SubscribeHost(ctx, record.Knot, !t.cfg.KnotUseSSL); err != nil { 138 + return fmt.Errorf("subscribing to knot: %w", err) 139 + } 140 + } 141 + 142 + case tapc.RecordDeleteAction: 143 + if err := db.DeleteRepo(ctx, t.db, evt.Did, evt.Rkey); err != nil { 144 + return fmt.Errorf("deleting repo from db: %w", err) 145 + } 146 + } 147 + return nil 148 + } 149 + 150 + // isPrivate checks if host is private network. It doesn't perform DNS resolution 151 + func isPrivate(host string) bool { 152 + if host == "localhost" { 153 + return true 154 + } 155 + addr, err := netip.ParseAddr(host) 156 + if err != nil { 157 + return false 158 + } 159 + return isPrivateAddr(addr) 160 + } 161 + 162 + func isPrivateAddr(addr netip.Addr) bool { 163 + return addr.IsLoopback() || 164 + addr.IsPrivate() || 165 + addr.IsLinkLocalUnicast() || 166 + addr.IsLinkLocalMulticast() 167 + }
+55
knotmirror/templates/base.html
··· 1 + {{define "base"}} 2 + <!DOCTYPE html> 3 + <html> 4 + <head> 5 + <title>KnotMirror Admin</title> 6 + <script src="https://cdn.jsdelivr.net/npm/htmx.org@2.0.8/dist/htmx.min.js" integrity="sha384-/TgkGk7p307TH7EXJDuUlgG3Ce1UVolAOFopFekQkkXihi5u/6OCvVKyz1W+idaz" crossorigin="anonymous"></script> 7 + <style> 8 + nav { margin-bottom: 20px; border-bottom: 1px solid #ccc; padding: 10px 0; } 9 + nav a { margin-right: 15px; } 10 + table { width: 100%; border-collapse: collapse; } 11 + th, td { text-align: left; padding: 8px; border: 1px solid #ddd; } 12 + .pagination { margin-top: 20px; } 13 + .filters { background: #f4f4f4; padding: 15px; margin-bottom: 20px; } 14 + #notifications { 15 + position: fixed; 16 + bottom: 8px; 17 + right: 8px; 18 + z-index: 1000; 19 + pointer-events: none; 20 + } 21 + .notif { 22 + pointer-events: auto; 23 + background: #333; 24 + color: #fff; 25 + padding: 2px 4px; 26 + margin: 4px 0; 27 + opacity: 0.95; 28 + } 29 + .notif.warn { background: #ed6c02 } 30 + .notif.error { background: #d32f2f } 31 + </style> 32 + </head> 33 + <body> 34 + <nav> 35 + <a href="/repos">Repositories</a> 36 + <a href="/hosts">Knot Hosts</a> 37 + </nav> 38 + <main id="main"> 39 + {{template "content" .}} 40 + </main> 41 + <div id="notifications"></div> 42 + <script> 43 + document.body.addEventListener("htmx:oobBeforeSwap", (evt) => { 44 + evt.detail.fragment.querySelectorAll(".notif").forEach((el) => { 45 + console.debug("set timeout to notif element", el) 46 + setTimeout(() => { 47 + console.debug("clearing notif element", el); 48 + el.remove(); 49 + }, 10 * 1000); 50 + }); 51 + }); 52 + </script> 53 + </body> 54 + </html> 55 + {{end}}
+44
knotmirror/templates/hosts.html
··· 1 + {{template "base" .}} 2 + {{define "content"}} 3 + <h2>Knot Hosts</h2> 4 + 5 + <div class="filters"> 6 + <form 7 + hx-get="" 8 + hx-target="#table" 9 + hx-select="#table" 10 + hx-swap="outerHTML" 11 + hx-trigger="every 10s" 12 + > 13 + <select name="status"> 14 + {{ range const.AllHostStatuses }} 15 + <option value="{{.}}" {{ if eq $.FilterByStatus . }}selected{{end}}>{{.}}</option> 16 + {{ end }} 17 + </select> 18 + <button type="submit">Filter</button> 19 + </form> 20 + </div> 21 + 22 + <table id="table"> 23 + <thead> 24 + <tr> 25 + <th>Hostname</th> 26 + <th>SSL</th> 27 + <th>Status</th> 28 + <th>Last Seq</th> 29 + </tr> 30 + </thead> 31 + <tbody> 32 + {{range .Hosts}} 33 + <tr> 34 + <td>{{.Hostname}}</td> 35 + <td>{{if .NoSSL}}False{{else}}True{{end}}</td> 36 + <td>{{.Status}}</td> 37 + <td>{{.LastSeq}}</td> 38 + </tr> 39 + {{else}} 40 + <tr><td colspan="4">No hosts registered.</td></tr> 41 + {{end}} 42 + </tbody> 43 + </table> 44 + {{end}}
+86
knotmirror/templates/repos.html
··· 1 + {{template "base" .}} 2 + {{define "content"}} 3 + <h2>Repositories</h2> 4 + 5 + <div class="filters"> 6 + <form 7 + hx-get="" 8 + hx-target="#table" 9 + hx-select="#table" 10 + hx-swap="outerHTML" 11 + hx-trigger="every 10s" 12 + > 13 + <input type="text" name="did" placeholder="DID" value="{{.FilterByDid}}"> 14 + <input type="text" name="knot" placeholder="Knot Domain" value="{{.FilterByKnot}}"> 15 + <select name="state"> 16 + <option value="">-- State --</option> 17 + {{ range const.AllRepoStates }} 18 + <option value="{{.}}" {{ if eq $.FilterByState . }}selected{{end}}>{{.}}</option> 19 + {{ end }} 20 + </select> 21 + <button type="submit">Filter</button> 22 + <a href="/repos">Clear</a> 23 + </form> 24 + </div> 25 + 26 + <div id="table"> 27 + <div class="repo-state-indicators"> 28 + {{range const.AllRepoStates}} 29 + <span class="state-pill state-{{.}}"> 30 + {{.}}: {{index $.RepoCounts .}} 31 + </span> 32 + {{end}} 33 + </div> 34 + <table> 35 + <thead> 36 + <tr> 37 + <th>DID</th> 38 + <th>Name</th> 39 + <th>Knot</th> 40 + <th>State</th> 41 + <th>Retry</th> 42 + <th>Retry After</th> 43 + <th>Error Message</th> 44 + <th>Action</th> 45 + </tr> 46 + </thead> 47 + <tbody> 48 + {{range .Repos}} 49 + <tr> 50 + <td><code>{{.Did}}</code></td> 51 + <td>{{.Name}}</td> 52 + <td>{{.KnotDomain}}</td> 53 + <td><strong>{{.State}}</strong></td> 54 + <td>{{.RetryCount}}</td> 55 + <td>{{readt .RetryAfter}}</td> 56 + <td>{{.ErrorMsg}}</td> 57 + <td> 58 + <form 59 + {{ if .State.IsResyncing -}} 60 + hx-post="/api/cancelRepoResync" 61 + {{- else -}} 62 + hx-post="/api/triggerRepoResync" 63 + {{- end }} 64 + hx-swap="none" 65 + hx-disabled-elt="find button" 66 + > 67 + <input type="hidden" name="repo" value="{{.AtUri}}"> 68 + <button type="submit">{{ if .State.IsResyncing }}cancel{{ else }}resync{{ end }}</button> 69 + </form> 70 + </td> 71 + </tr> 72 + {{else}} 73 + <tr><td colspan="99">No repositories found.</td></tr> 74 + {{end}} 75 + </tbody> 76 + </table> 77 + </div> 78 + 79 + <div class="pagination"> 80 + {{if gt .Page 1}} 81 + <a href="?page={{sub .Page 1}}&did={{.FilterByDid}}&knot={{.FilterByKnot}}&state={{.FilterByState}}">« Previous</a> 82 + {{end}} 83 + <span>Page {{.Page}}</span> 84 + <a href="?page={{add .Page 1}}&did={{.FilterByDid}}&knot={{.FilterByKnot}}&state={{.FilterByState}}">Next »</a> 85 + </div> 86 + {{end}}
+106
knotmirror/xrpc/git_get_archive.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "compress/gzip" 5 + "fmt" 6 + "net/http" 7 + "net/url" 8 + "strings" 9 + 10 + "github.com/bluesky-social/indigo/atproto/atclient" 11 + "github.com/bluesky-social/indigo/atproto/syntax" 12 + "github.com/go-git/go-git/v5/plumbing" 13 + "tangled.org/core/api/tangled" 14 + "tangled.org/core/knotmirror/db" 15 + "tangled.org/core/knotserver/git" 16 + ) 17 + 18 + func (x *Xrpc) GetArchive(w http.ResponseWriter, r *http.Request) { 19 + var ( 20 + repoQuery = r.URL.Query().Get("repo") 21 + ref = r.URL.Query().Get("ref") 22 + format = r.URL.Query().Get("format") 23 + prefix = r.URL.Query().Get("prefix") 24 + ) 25 + 26 + repo, err := syntax.ParseATURI(repoQuery) 27 + if err != nil || repo.RecordKey() == "" { 28 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("repo parameter invalid: %s", repoQuery)}) 29 + return 30 + } 31 + 32 + if format != "tar.gz" { 33 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: "only tar.gz format is supported"}) 34 + return 35 + } 36 + if format == "" { 37 + format = "tar.gz" 38 + } 39 + 40 + l := x.logger.With("repo", repo, "ref", ref, "format", format, "prefix", prefix) 41 + ctx := r.Context() 42 + 43 + repoPath, err := x.makeRepoPath(ctx, repo) 44 + if err != nil { 45 + l.Error("failed to resolve repo at-uri", "err", err) 46 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to resolve repo"}) 47 + return 48 + } 49 + 50 + gr, err := git.Open(repoPath, ref) 51 + if err != nil { 52 + l.Error("failed to open git repo", "err", err) 53 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to open git repo"}) 54 + return 55 + } 56 + 57 + repoName, err := func() (string, error) { 58 + r, err := db.GetRepoByAtUri(ctx, x.db, repo) 59 + if err != nil { 60 + return "", err 61 + } 62 + if r == nil { 63 + return "", fmt.Errorf("repo not found: %s", repo) 64 + } 65 + return r.Name, nil 66 + }() 67 + if err != nil { 68 + l.Error("failed to get repo name", "err", err) 69 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to retrieve repo name"}) 70 + return 71 + } 72 + 73 + safeRefFilename := strings.ReplaceAll(plumbing.ReferenceName(ref).Short(), "/", "-") 74 + immutableLink := func() string { 75 + params := url.Values{} 76 + params.Set("repo", repo.String()) 77 + params.Set("ref", gr.Hash().String()) 78 + params.Set("format", format) 79 + params.Set("prefix", prefix) 80 + return fmt.Sprintf("%s/xrpc/%s?%s", x.cfg.BaseUrl(), tangled.GitTempGetArchiveNSID, params.Encode()) 81 + }() 82 + 83 + filename := fmt.Sprintf("%s-%s.tar.gz", repoName, safeRefFilename) 84 + w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename)) 85 + w.Header().Set("Content-Type", "application/gzip") 86 + w.Header().Set("Link", fmt.Sprintf("<%s>; rel=\"immutable\"", immutableLink)) 87 + 88 + gw := gzip.NewWriter(w) 89 + defer gw.Close() 90 + 91 + if err := gr.WriteTar(gw, prefix); err != nil { 92 + // once we start writing to the body we can't report error anymore 93 + // so we are only left with logging the error 94 + l.Error("writing tar file", "err", err.Error()) 95 + w.WriteHeader(http.StatusInternalServerError) 96 + return 97 + } 98 + 99 + if err := gw.Flush(); err != nil { 100 + // once we start writing to the body we can't report error anymore 101 + // so we are only left with logging the error 102 + l.Error("flushing", "err", err.Error()) 103 + w.WriteHeader(http.StatusInternalServerError) 104 + return 105 + } 106 + }
+86
knotmirror/xrpc/git_get_blob.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "io" 7 + "net/http" 8 + "slices" 9 + 10 + "github.com/bluesky-social/indigo/atproto/atclient" 11 + "github.com/bluesky-social/indigo/atproto/syntax" 12 + "github.com/go-git/go-git/v5/plumbing/object" 13 + "tangled.org/core/knotserver/git" 14 + ) 15 + 16 + func (x *Xrpc) GetBlob(w http.ResponseWriter, r *http.Request) { 17 + var ( 18 + repoQuery = r.URL.Query().Get("repo") 19 + ref = r.URL.Query().Get("ref") // ref can be empty (git.Open handles this) 20 + path = r.URL.Query().Get("path") 21 + ) 22 + 23 + repo, err := syntax.ParseATURI(repoQuery) 24 + if err != nil || repo.RecordKey() == "" { 25 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("repo parameter invalid: %s", repoQuery)}) 26 + return 27 + } 28 + 29 + l := x.logger.With("repo", repo, "ref", ref, "path", path) 30 + 31 + if path == "" { 32 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: "missing path parameter"}) 33 + return 34 + } 35 + 36 + file, err := x.getFile(r.Context(), repo, ref, path) 37 + if err != nil { 38 + // TODO: better error return 39 + l.Error("failed to get blob", "err", err) 40 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to get blob"}) 41 + return 42 + } 43 + 44 + reader, err := file.Reader() 45 + if err != nil { 46 + l.Error("failed to read blob", "err", err) 47 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to read the blob"}) 48 + return 49 + } 50 + defer reader.Close() 51 + 52 + w.Header().Set("Content-Type", "application/octet-stream") 53 + if _, err := io.Copy(w, reader); err != nil { 54 + l.Error("failed to serve the blob", "err", err) 55 + } 56 + } 57 + 58 + func (x *Xrpc) getFile(ctx context.Context, repo syntax.ATURI, ref, path string) (*object.File, error) { 59 + repoPath, err := x.makeRepoPath(ctx, repo) 60 + if err != nil { 61 + return nil, fmt.Errorf("resolving repo at-uri: %w", err) 62 + } 63 + 64 + gr, err := git.Open(repoPath, ref) 65 + if err != nil { 66 + return nil, fmt.Errorf("opening git repo: %w", err) 67 + } 68 + 69 + return gr.File(path) 70 + } 71 + 72 + var textualMimeTypes = []string{ 73 + "application/json", 74 + "application/xml", 75 + "application/yaml", 76 + "application/x-yaml", 77 + "application/toml", 78 + "application/javascript", 79 + "application/ecmascript", 80 + } 81 + 82 + // isTextualMimeType returns true if the MIME type represents textual content 83 + // that should be served as text/plain for security reasons 84 + func isTextualMimeType(mimeType string) bool { 85 + return slices.Contains(textualMimeTypes, mimeType) 86 + }
+85
knotmirror/xrpc/git_get_branch.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "net/http" 7 + "net/url" 8 + "time" 9 + 10 + "github.com/bluesky-social/indigo/atproto/atclient" 11 + "github.com/bluesky-social/indigo/atproto/syntax" 12 + "tangled.org/core/api/tangled" 13 + "tangled.org/core/knotserver/git" 14 + ) 15 + 16 + // TODO: maybe rename to `sh.tangled.repo.temp.getCommit`? 17 + // then, we should ensure the given `ref` is valid 18 + func (x *Xrpc) GetBranch(w http.ResponseWriter, r *http.Request) { 19 + var ( 20 + repoQuery = r.URL.Query().Get("repo") 21 + nameQuery = r.URL.Query().Get("name") 22 + ) 23 + 24 + repo, err := syntax.ParseATURI(repoQuery) 25 + if err != nil || repo.RecordKey() == "" { 26 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("repo parameter invalid: %s", repoQuery)}) 27 + return 28 + } 29 + 30 + if nameQuery == "" { 31 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: "missing name parameter"}) 32 + return 33 + } 34 + branchName, _ := url.PathUnescape(nameQuery) 35 + 36 + l := x.logger.With("repo", repo, "branch", branchName) 37 + 38 + out, err := x.getBranch(r.Context(), repo, branchName) 39 + if err != nil { 40 + // TODO: better error return 41 + l.Error("failed to get branch", "err", err) 42 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to get branch"}) 43 + return 44 + } 45 + writeJson(w, http.StatusOK, out) 46 + } 47 + 48 + func (x *Xrpc) getBranch(ctx context.Context, repo syntax.ATURI, branchName string) (*tangled.GitTempGetBranch_Output, error) { 49 + repoPath, err := x.makeRepoPath(ctx, repo) 50 + if err != nil { 51 + return nil, fmt.Errorf("failed to resolve repo at-uri: %w", err) 52 + } 53 + 54 + gr, err := git.PlainOpen(repoPath) 55 + if err != nil { 56 + return nil, fmt.Errorf("failed to open git repo: %w", err) 57 + } 58 + 59 + ref, err := gr.Branch(branchName) 60 + if err != nil { 61 + return nil, fmt.Errorf("getting branch '%s': %w", branchName, err) 62 + } 63 + 64 + commit, err := gr.Commit(ref.Hash()) 65 + if err != nil { 66 + return nil, fmt.Errorf("getting commit '%s': %w", ref.Hash(), err) 67 + } 68 + 69 + out := tangled.GitTempGetBranch_Output{ 70 + Name: ref.Name().Short(), 71 + Hash: ref.Hash().String(), 72 + When: commit.Author.When.Format(time.RFC3339), 73 + Author: &tangled.GitTempDefs_Signature{ 74 + Name: commit.Author.Name, 75 + Email: commit.Author.Email, 76 + When: commit.Author.When.Format(time.RFC3339), 77 + }, 78 + } 79 + 80 + if commit.Message != "" { 81 + out.Message = &commit.Message 82 + } 83 + 84 + return &out, nil 85 + }
+92
knotmirror/xrpc/git_get_tag.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "net/http" 7 + 8 + "github.com/bluesky-social/indigo/atproto/atclient" 9 + "github.com/bluesky-social/indigo/atproto/syntax" 10 + "github.com/go-git/go-git/v5/plumbing" 11 + "github.com/go-git/go-git/v5/plumbing/object" 12 + "tangled.org/core/knotserver/git" 13 + "tangled.org/core/types" 14 + ) 15 + 16 + func (x *Xrpc) GetTag(w http.ResponseWriter, r *http.Request) { 17 + var ( 18 + repoQuery = r.URL.Query().Get("repo") 19 + tagName = r.URL.Query().Get("tag") 20 + ) 21 + 22 + repo, err := syntax.ParseATURI(repoQuery) 23 + if err != nil || repo.RecordKey() == "" { 24 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("repo parameter invalid: %s", repoQuery)}) 25 + return 26 + } 27 + 28 + if tagName == "" { 29 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: "missing 'tag' parameter"}) 30 + return 31 + } 32 + 33 + l := x.logger.With("repo", repo, "tag", tagName) 34 + 35 + out, err := x.getTag(r.Context(), repo, tagName) 36 + if err != nil { 37 + // TODO: better error return 38 + l.Error("failed to get tag", "err", err) 39 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to get tag"}) 40 + return 41 + } 42 + writeJson(w, http.StatusOK, out) 43 + } 44 + 45 + func (x *Xrpc) getTag(ctx context.Context, repo syntax.ATURI, tagName string) (*types.RepoTagResponse, error) { 46 + repoPath, err := x.makeRepoPath(ctx, repo) 47 + if err != nil { 48 + return nil, fmt.Errorf("failed to resolve repo at-uri: %w", err) 49 + } 50 + 51 + gr, err := git.PlainOpen(repoPath) 52 + if err != nil { 53 + return nil, fmt.Errorf("failed to open git repo: %w", err) 54 + } 55 + 56 + // if this is not already formatted as refs/tags/v0.1.0, then format it 57 + if !plumbing.ReferenceName(tagName).IsTag() { 58 + tagName = plumbing.NewTagReferenceName(tagName).String() 59 + } 60 + 61 + tag, err := func() (object.Tag, error) { 62 + tags, err := gr.Tags(&git.TagsOptions{ 63 + Pattern: tagName, 64 + }) 65 + if err != nil { 66 + return object.Tag{}, err 67 + } 68 + if len(tags) != 1 { 69 + return object.Tag{}, fmt.Errorf("expected 1 tag to be returned, got %d tags", len(tags)) 70 + } 71 + return tags[0], nil 72 + }() 73 + if err != nil { 74 + return nil, fmt.Errorf("getting tag: %w", err) 75 + } 76 + 77 + var target *object.Tag 78 + if tag.Target != plumbing.ZeroHash { 79 + target = &tag 80 + } 81 + 82 + return &types.RepoTagResponse{ 83 + Tag: &types.TagReference{ 84 + Tag: target, 85 + Reference: types.Reference{ 86 + Name: tag.Name, 87 + Hash: tag.Hash.String(), 88 + }, 89 + Message: tag.Message, 90 + }, 91 + }, nil 92 + }
+118
knotmirror/xrpc/git_get_tree.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "net/http" 7 + "path/filepath" 8 + "time" 9 + "unicode/utf8" 10 + 11 + "github.com/bluesky-social/indigo/atproto/atclient" 12 + "github.com/bluesky-social/indigo/atproto/syntax" 13 + "tangled.org/core/api/tangled" 14 + "tangled.org/core/appview/pages/markup" 15 + "tangled.org/core/knotserver/git" 16 + ) 17 + 18 + func (x *Xrpc) GetTree(w http.ResponseWriter, r *http.Request) { 19 + var ( 20 + repoQuery = r.URL.Query().Get("repo") 21 + ref = r.URL.Query().Get("ref") // ref can be empty (git.Open handles this) 22 + path = r.URL.Query().Get("path") // path can be empty (defaults to root) 23 + ) 24 + 25 + repo, err := syntax.ParseATURI(repoQuery) 26 + if err != nil || repo.RecordKey() == "" { 27 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("repo parameter invalid: %s", repoQuery)}) 28 + return 29 + } 30 + 31 + l := x.logger.With("repo", repo, "ref", ref, "path", path) 32 + 33 + out, err := x.getTree(r.Context(), repo, ref, path) 34 + if err != nil { 35 + // TODO: better error return 36 + l.Error("failed to get tree", "err", err) 37 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to get tree"}) 38 + return 39 + } 40 + writeJson(w, http.StatusOK, out) 41 + } 42 + 43 + func (x *Xrpc) getTree(ctx context.Context, repo syntax.ATURI, ref, path string) (*tangled.GitTempGetTree_Output, error) { 44 + repoPath, err := x.makeRepoPath(ctx, repo) 45 + if err != nil { 46 + return nil, fmt.Errorf("failed to resolve repo at-uri: %w", err) 47 + } 48 + 49 + gr, err := git.Open(repoPath, ref) 50 + if err != nil { 51 + return nil, fmt.Errorf("opening git repo: %w", err) 52 + } 53 + 54 + files, err := gr.FileTree(ctx, path) 55 + if err != nil { 56 + return nil, fmt.Errorf("reading file tree: %w", err) 57 + } 58 + 59 + // if any of these files are a readme candidate, pass along its blob contents too 60 + var readmeFileName string 61 + var readmeContents string 62 + for _, file := range files { 63 + if markup.IsReadmeFile(file.Name) { 64 + contents, err := gr.RawContent(filepath.Join(path, file.Name)) 65 + if err != nil { 66 + x.logger.Error("failed to read contents of file", "path", path, "file", file.Name) 67 + } 68 + 69 + if utf8.Valid(contents) { 70 + readmeFileName = file.Name 71 + readmeContents = string(contents) 72 + break 73 + } 74 + } 75 + } 76 + 77 + // convert NiceTree -> tangled.RepoTempGetTree_TreeEntry 78 + treeEntries := make([]*tangled.GitTempGetTree_TreeEntry, len(files)) 79 + for i, file := range files { 80 + entry := &tangled.GitTempGetTree_TreeEntry{ 81 + Name: file.Name, 82 + Mode: file.Mode, 83 + Size: file.Size, 84 + } 85 + if file.LastCommit != nil { 86 + entry.Last_commit = &tangled.GitTempGetTree_LastCommit{ 87 + Hash: file.LastCommit.Hash.String(), 88 + Message: file.LastCommit.Message, 89 + When: file.LastCommit.When.Format(time.RFC3339), 90 + } 91 + } 92 + treeEntries[i] = entry 93 + } 94 + 95 + var parentPtr *string 96 + if path != "" { 97 + parentPtr = &path 98 + } 99 + 100 + var dotdotPtr *string 101 + if path != "" { 102 + dotdot := filepath.Dir(path) 103 + if dotdot != "." { 104 + dotdotPtr = &dotdot 105 + } 106 + } 107 + 108 + return &tangled.GitTempGetTree_Output{ 109 + Ref: ref, 110 + Parent: parentPtr, 111 + Dotdot: dotdotPtr, 112 + Files: treeEntries, 113 + Readme: &tangled.GitTempGetTree_Readme{ 114 + Filename: readmeFileName, 115 + Contents: readmeContents, 116 + }, 117 + }, nil 118 + }
+95
knotmirror/xrpc/git_list_branches.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "net/http" 7 + "path/filepath" 8 + "strconv" 9 + 10 + "github.com/bluesky-social/indigo/atproto/atclient" 11 + "github.com/bluesky-social/indigo/atproto/syntax" 12 + "tangled.org/core/knotserver/git" 13 + "tangled.org/core/types" 14 + ) 15 + 16 + func (x *Xrpc) ListBranches(w http.ResponseWriter, r *http.Request) { 17 + var ( 18 + repoQuery = r.URL.Query().Get("repo") 19 + limitQuery = r.URL.Query().Get("limit") 20 + cursorQuery = r.URL.Query().Get("cursor") 21 + ) 22 + 23 + repo, err := syntax.ParseATURI(repoQuery) 24 + if err != nil || repo.RecordKey() == "" { 25 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("repo parameter invalid: %s", repoQuery)}) 26 + return 27 + } 28 + 29 + limit := 50 30 + if limitQuery != "" { 31 + limit, err = strconv.Atoi(limitQuery) 32 + if err != nil || limit < 1 || limit > 1000 { 33 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("limit parameter invalid: %s", limitQuery)}) 34 + return 35 + } 36 + } 37 + 38 + var cursor int64 39 + if cursorQuery != "" { 40 + cursor, err = strconv.ParseInt(cursorQuery, 10, 64) 41 + if err != nil || cursor < 0 { 42 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("cursor parameter invalid: %s", cursorQuery)}) 43 + return 44 + } 45 + } 46 + 47 + l := x.logger.With("repo", repoQuery, "limit", limit, "cursor", cursor) 48 + 49 + out, err := x.listBranches(r.Context(), repo, limit, cursor) 50 + if err != nil { 51 + // TODO: better error return 52 + l.Error("failed to list branches", "err", err) 53 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to list branches"}) 54 + return 55 + } 56 + writeJson(w, http.StatusOK, out) 57 + } 58 + 59 + func (x *Xrpc) listBranches(ctx context.Context, repo syntax.ATURI, limit int, cursor int64) (*types.RepoBranchesResponse, error) { 60 + repoPath, err := x.makeRepoPath(ctx, repo) 61 + if err != nil { 62 + return nil, fmt.Errorf("resolving repo at-uri: %w", err) 63 + } 64 + 65 + gr, err := git.PlainOpen(repoPath) 66 + if err != nil { 67 + return nil, fmt.Errorf("opening git repo: %w", err) 68 + } 69 + 70 + branches, err := gr.Branches(&git.BranchesOptions{ 71 + Limit: limit, 72 + Offset: int(cursor), 73 + }) 74 + if err != nil { 75 + return nil, fmt.Errorf("listing git branches: %w", err) 76 + } 77 + 78 + return &types.RepoBranchesResponse{ 79 + // TODO: include default branch and cursor 80 + Branches: branches, 81 + }, nil 82 + } 83 + 84 + func (x *Xrpc) makeRepoPath(ctx context.Context, repo syntax.ATURI) (string, error) { 85 + id, err := x.resolver.ResolveIdent(ctx, repo.Authority().String()) 86 + if err != nil { 87 + return "", err 88 + } 89 + 90 + return filepath.Join( 91 + x.cfg.GitRepoBasePath, 92 + id.DID.String(), 93 + repo.RecordKey().String(), 94 + ), nil 95 + }
+95
knotmirror/xrpc/git_list_commits.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "net/http" 7 + "strconv" 8 + 9 + "github.com/bluesky-social/indigo/atproto/atclient" 10 + "github.com/bluesky-social/indigo/atproto/syntax" 11 + "tangled.org/core/knotserver/git" 12 + "tangled.org/core/types" 13 + ) 14 + 15 + func (x *Xrpc) ListCommits(w http.ResponseWriter, r *http.Request) { 16 + var ( 17 + repoQuery = r.URL.Query().Get("repo") 18 + ref = r.URL.Query().Get("ref") // ref can be empty (git.Open handles this) 19 + limitQuery = r.URL.Query().Get("limit") 20 + cursorQuery = r.URL.Query().Get("cursor") 21 + ) 22 + 23 + repo, err := syntax.ParseATURI(repoQuery) 24 + if err != nil || repo.RecordKey() == "" { 25 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("repo parameter invalid: %s", repoQuery)}) 26 + return 27 + } 28 + 29 + limit := 50 30 + if limitQuery != "" { 31 + limit, err = strconv.Atoi(limitQuery) 32 + if err != nil || limit < 1 || limit > 1000 { 33 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("limit parameter invalid: %s", limitQuery)}) 34 + return 35 + } 36 + } 37 + 38 + var cursor int64 39 + if cursorQuery != "" { 40 + cursor, err = strconv.ParseInt(cursorQuery, 10, 64) 41 + if err != nil || cursor < 0 { 42 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("cursor parameter invalid: %s", cursorQuery)}) 43 + return 44 + } 45 + } 46 + 47 + l := x.logger.With("repo", repo, "ref", ref) 48 + 49 + out, err := x.listCommits(r.Context(), repo, ref, limit, cursor) 50 + if err != nil { 51 + // TODO: better error return 52 + l.Error("failed to list commits", "err", err) 53 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to list commits"}) 54 + return 55 + } 56 + writeJson(w, http.StatusOK, out) 57 + } 58 + 59 + func (x *Xrpc) listCommits(ctx context.Context, repo syntax.ATURI, ref string, limit int, cursor int64) (*types.RepoLogResponse, error) { 60 + repoPath, err := x.makeRepoPath(ctx, repo) 61 + if err != nil { 62 + return nil, fmt.Errorf("resolving repo at-uri: %w", err) 63 + } 64 + 65 + gr, err := git.Open(repoPath, ref) 66 + if err != nil { 67 + return nil, fmt.Errorf("opening git repo: %w", err) 68 + } 69 + 70 + offset := int(cursor) 71 + 72 + commits, err := gr.Commits(offset, limit) 73 + if err != nil { 74 + return nil, fmt.Errorf("listing git commits: %w", err) 75 + } 76 + 77 + tcommits := make([]types.Commit, len(commits)) 78 + for i, c := range commits { 79 + tcommits[i].FromGoGitCommit(c) 80 + } 81 + 82 + total, err := gr.TotalCommits() 83 + if err != nil { 84 + return nil, fmt.Errorf("counting total commits: %w", err) 85 + } 86 + 87 + return &types.RepoLogResponse{ 88 + Commits: tcommits, 89 + Ref: ref, 90 + Page: (offset / limit) + 1, 91 + PerPage: limit, 92 + Total: total, 93 + Log: true, 94 + }, nil 95 + }
+86
knotmirror/xrpc/git_list_languages.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "math" 7 + "net/http" 8 + "time" 9 + 10 + "github.com/bluesky-social/indigo/atproto/atclient" 11 + "github.com/bluesky-social/indigo/atproto/syntax" 12 + "tangled.org/core/api/tangled" 13 + "tangled.org/core/knotserver/git" 14 + ) 15 + 16 + func (x *Xrpc) ListLanguages(w http.ResponseWriter, r *http.Request) { 17 + var ( 18 + repoQuery = r.URL.Query().Get("repo") 19 + ref = r.URL.Query().Get("ref") 20 + ) 21 + l := x.logger.With("repo", repoQuery, "ref", ref) 22 + 23 + repo, err := syntax.ParseATURI(repoQuery) 24 + if err != nil || repo.RecordKey() == "" { 25 + l.Error("invalid repo at-uri", "err", err) 26 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("repo parameter invalid: %s", repoQuery)}) 27 + return 28 + } 29 + 30 + out, err := x.listLanguages(r.Context(), repo, ref) 31 + if err != nil { 32 + l.Error("failed to list languages", "err", err) 33 + writeErr(w, err) 34 + return 35 + } 36 + 37 + writeJson(w, http.StatusOK, out) 38 + } 39 + 40 + func (x *Xrpc) listLanguages(ctx context.Context, repo syntax.ATURI, ref string) (*tangled.GitTempListLanguages_Output, error) { 41 + repoPath, err := x.makeRepoPath(ctx, repo) 42 + if err != nil { 43 + return nil, fmt.Errorf("resolving repo at-uri: %w", err) 44 + } 45 + 46 + gr, err := git.Open(repoPath, ref) 47 + if err != nil { 48 + return nil, &atclient.APIError{StatusCode: http.StatusNotFound, Name: "RepoNotFound", Message: "failed to find git repo"} 49 + } 50 + 51 + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) 52 + defer cancel() 53 + 54 + sizes, err := gr.AnalyzeLanguages(ctx) 55 + if err != nil { 56 + return nil, fmt.Errorf("analyzing languages: %w", err) 57 + } 58 + 59 + return &tangled.GitTempListLanguages_Output{ 60 + Ref: ref, 61 + Languages: sizesToLanguages(sizes), 62 + }, nil 63 + } 64 + 65 + func sizesToLanguages(sizes git.LangBreakdown) []*tangled.GitTempListLanguages_Language { 66 + var apiLanguages []*tangled.GitTempListLanguages_Language 67 + var totalSize int64 68 + for _, size := range sizes { 69 + totalSize += size 70 + } 71 + 72 + for name, size := range sizes { 73 + percentagef64 := float64(size) / float64(totalSize) * 100 74 + percentage := math.Round(percentagef64) 75 + 76 + lang := &tangled.GitTempListLanguages_Language{ 77 + Name: name, 78 + Size: size, 79 + Percentage: int64(percentage), 80 + } 81 + 82 + apiLanguages = append(apiLanguages, lang) 83 + } 84 + 85 + return apiLanguages 86 + }
+98
knotmirror/xrpc/git_list_tags.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "net/http" 7 + "strconv" 8 + 9 + "github.com/bluesky-social/indigo/atproto/atclient" 10 + "github.com/bluesky-social/indigo/atproto/syntax" 11 + "github.com/go-git/go-git/v5/plumbing" 12 + "github.com/go-git/go-git/v5/plumbing/object" 13 + "tangled.org/core/knotserver/git" 14 + "tangled.org/core/types" 15 + ) 16 + 17 + func (x *Xrpc) ListTags(w http.ResponseWriter, r *http.Request) { 18 + var ( 19 + repoQuery = r.URL.Query().Get("repo") 20 + limitQuery = r.URL.Query().Get("limit") 21 + cursorQuery = r.URL.Query().Get("cursor") 22 + ) 23 + 24 + repo, err := syntax.ParseATURI(repoQuery) 25 + if err != nil || repo.RecordKey() == "" { 26 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("repo parameter invalid: %s", repoQuery)}) 27 + return 28 + } 29 + 30 + limit := 50 31 + if limitQuery != "" { 32 + limit, err = strconv.Atoi(limitQuery) 33 + if err != nil || limit < 1 || limit > 1000 { 34 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("limit parameter invalid: %s", limitQuery)}) 35 + return 36 + } 37 + } 38 + 39 + var cursor int64 40 + if cursorQuery != "" { 41 + cursor, err = strconv.ParseInt(cursorQuery, 10, 64) 42 + if err != nil || cursor < 0 { 43 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("cursor parameter invalid: %s", cursorQuery)}) 44 + return 45 + } 46 + } 47 + 48 + l := x.logger.With("repo", repo, "limit", limit, "cursor", cursor) 49 + 50 + out, err := x.listTags(r.Context(), repo, limit, cursor) 51 + if err != nil { 52 + // TODO: better error return 53 + l.Error("failed to list tags", "err", err) 54 + writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "failed to list tags"}) 55 + return 56 + } 57 + writeJson(w, http.StatusOK, out) 58 + } 59 + 60 + func (x *Xrpc) listTags(ctx context.Context, repo syntax.ATURI, limit int, cursor int64) (*types.RepoTagsResponse, error) { 61 + repoPath, err := x.makeRepoPath(ctx, repo) 62 + if err != nil { 63 + return nil, fmt.Errorf("failed to resolve repo at-uri: %w", err) 64 + } 65 + 66 + gr, err := git.PlainOpen(repoPath) 67 + if err != nil { 68 + return nil, fmt.Errorf("failed to open git repo: %w", err) 69 + } 70 + 71 + tags, err := gr.Tags(&git.TagsOptions{ 72 + Limit: limit, 73 + Offset: int(cursor), 74 + }) 75 + if err != nil { 76 + return nil, fmt.Errorf("failed to get git tags: %w", err) 77 + } 78 + 79 + rtags := make([]*types.TagReference, len(tags)) 80 + for i, tag := range tags { 81 + var target *object.Tag 82 + if tag.Target != plumbing.ZeroHash { 83 + target = &tag 84 + } 85 + rtags[i] = &types.TagReference{ 86 + Reference: types.Reference{ 87 + Name: tag.Name, 88 + Hash: tag.Hash.String(), 89 + }, 90 + Tag: target, 91 + Message: tag.Message, 92 + } 93 + } 94 + 95 + return &types.RepoTagsResponse{ 96 + Tags: rtags, 97 + }, nil 98 + }
+104
knotmirror/xrpc/sync_request_crawl.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "encoding/json" 5 + "fmt" 6 + "net/http" 7 + "strings" 8 + 9 + "github.com/bluesky-social/indigo/api/atproto" 10 + "github.com/bluesky-social/indigo/atproto/atclient" 11 + "github.com/bluesky-social/indigo/atproto/syntax" 12 + "github.com/bluesky-social/indigo/xrpc" 13 + "tangled.org/core/api/tangled" 14 + "tangled.org/core/knotmirror/db" 15 + "tangled.org/core/knotmirror/hostutil" 16 + "tangled.org/core/knotmirror/models" 17 + ) 18 + 19 + func (x *Xrpc) RequestCrawl(w http.ResponseWriter, r *http.Request) { 20 + var input tangled.SyncRequestCrawl_Input 21 + if err := json.NewDecoder(r.Body).Decode(&input); err != nil { 22 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: "failed to decode json body"}) 23 + return 24 + } 25 + 26 + ctx := r.Context() 27 + 28 + l := x.logger.With("input", input) 29 + 30 + hostname, noSSL, err := hostutil.ParseHostname(input.Hostname) 31 + if err != nil { 32 + l.Error("invalid hostname", "err", err) 33 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("hostname field empty or invalid: %s", input.Hostname)}) 34 + return 35 + } 36 + 37 + // TODO: check if host is Knot with knot.describeServer 38 + 39 + // store given repoAt to db 40 + // this will allow knotmirror to ingest repo creation event bypassing tap. 41 + // this step won't be needed once we introduce did-for-repo 42 + // TODO(boltless): remove this section 43 + if input.EnsureRepo != nil { 44 + repoAt, err := syntax.ParseATURI(*input.EnsureRepo) 45 + if err != nil { 46 + l.Error("invalid repo at-uri", "err", err) 47 + writeJson(w, http.StatusBadRequest, atclient.ErrorBody{Name: "BadRequest", Message: fmt.Sprintf("repo parameter invalid: %s", *input.EnsureRepo)}) 48 + return 49 + } 50 + owner, err := x.resolver.ResolveIdent(ctx, repoAt.Authority().String()) 51 + if err != nil || owner.Handle.IsInvalidHandle() { 52 + l.Error("failed to resolve ident", "err", err, "owner", repoAt.Authority().String()) 53 + writeErr(w, fmt.Errorf("failed to resolve repo owner")) 54 + return 55 + } 56 + xrpcc := xrpc.Client{Host: owner.PDSEndpoint()} 57 + out, err := atproto.RepoGetRecord(ctx, &xrpcc, "", tangled.RepoNSID, repoAt.Authority().String(), repoAt.RecordKey().String()) 58 + if err != nil { 59 + l.Error("failed to get repo record", "err", err, "repo", repoAt) 60 + writeErr(w, fmt.Errorf("failed to get repo record")) 61 + return 62 + } 63 + record := out.Value.Val.(*tangled.Repo) 64 + 65 + knotUrl := record.Knot 66 + if !strings.Contains(record.Knot, "://") { 67 + if noSSL { 68 + knotUrl = "http://" + knotUrl 69 + } else { 70 + knotUrl = "https://" + knotUrl 71 + } 72 + } 73 + 74 + repo := &models.Repo{ 75 + Did: owner.DID, 76 + Rkey: repoAt.RecordKey(), 77 + Cid: (*syntax.CID)(out.Cid), 78 + Name: record.Name, 79 + KnotDomain: knotUrl, 80 + State: models.RepoStatePending, 81 + ErrorMsg: "", 82 + RetryAfter: 0, 83 + RetryCount: 0, 84 + } 85 + 86 + if err := db.UpsertRepo(ctx, x.db, repo); err != nil { 87 + l.Error("failed to upsert repo", "err", err) 88 + writeErr(w, err) 89 + return 90 + } 91 + } 92 + 93 + // subscribe to requested host 94 + if !x.ks.CheckIfSubscribed(hostname) { 95 + if err := x.ks.SubscribeHost(ctx, hostname, noSSL); err != nil { 96 + // TODO(boltless): return HostBanned on banned hosts 97 + l.Error("failed to subscribe host", "err", err) 98 + writeErr(w, err) 99 + return 100 + } 101 + } 102 + 103 + w.WriteHeader(http.StatusOK) 104 + }
+73
knotmirror/xrpc/xrpc.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "database/sql" 5 + "encoding/json" 6 + "errors" 7 + "log/slog" 8 + "net/http" 9 + 10 + "github.com/bluesky-social/indigo/atproto/atclient" 11 + "github.com/go-chi/chi/v5" 12 + "tangled.org/core/api/tangled" 13 + "tangled.org/core/idresolver" 14 + "tangled.org/core/knotmirror/config" 15 + "tangled.org/core/knotmirror/knotstream" 16 + "tangled.org/core/log" 17 + ) 18 + 19 + type Xrpc struct { 20 + cfg *config.Config 21 + db *sql.DB 22 + resolver *idresolver.Resolver 23 + ks *knotstream.KnotStream 24 + logger *slog.Logger 25 + } 26 + 27 + func New(logger *slog.Logger, cfg *config.Config, db *sql.DB, resolver *idresolver.Resolver, ks *knotstream.KnotStream) *Xrpc { 28 + return &Xrpc{ 29 + cfg, 30 + db, 31 + resolver, 32 + ks, 33 + log.SubLogger(logger, "xrpc"), 34 + } 35 + } 36 + 37 + func (x *Xrpc) Router() http.Handler { 38 + r := chi.NewRouter() 39 + 40 + r.Get("/"+tangled.GitTempGetArchiveNSID, x.GetArchive) 41 + r.Get("/"+tangled.GitTempGetBlobNSID, x.GetBlob) 42 + r.Get("/"+tangled.GitTempGetBranchNSID, x.GetBranch) 43 + // r.Get("/"+tangled.GitTempGetCommitNSID, x.GetCommit) // todo 44 + // r.Get("/"+tangled.GitTempGetDiffNSID, x.GetDiff) // todo 45 + // r.Get("/"+tangled.GitTempGetEntityNSID, x.GetEntity) // todo 46 + // r.Get("/"+tangled.GitTempGetHeadNSID, x.GetHead) // todo 47 + r.Get("/"+tangled.GitTempGetTagNSID, x.GetTag) // using types.Response 48 + r.Get("/"+tangled.GitTempGetTreeNSID, x.GetTree) 49 + r.Get("/"+tangled.GitTempListBranchesNSID, x.ListBranches) // wip, unknown output 50 + r.Get("/"+tangled.GitTempListCommitsNSID, x.ListCommits) 51 + r.Get("/"+tangled.GitTempListLanguagesNSID, x.ListLanguages) 52 + r.Get("/"+tangled.GitTempListTagsNSID, x.ListTags) 53 + r.Post("/"+tangled.SyncRequestCrawlNSID, x.RequestCrawl) 54 + 55 + return r 56 + } 57 + 58 + func writeJson(w http.ResponseWriter, status int, response any) error { 59 + w.Header().Set("Content-Type", "application/json") 60 + w.WriteHeader(status) 61 + if err := json.NewEncoder(w).Encode(response); err != nil { 62 + return err 63 + } 64 + return nil 65 + } 66 + 67 + func writeErr(w http.ResponseWriter, err error) error { 68 + var apiErr *atclient.APIError 69 + if errors.As(err, &apiErr) { 70 + return writeJson(w, apiErr.StatusCode, atclient.ErrorBody{Name: apiErr.Name, Message: apiErr.Message}) 71 + } 72 + return writeJson(w, http.StatusInternalServerError, atclient.ErrorBody{Name: "InternalServerError", Message: "internal server error"}) 73 + }
+5 -4
knotserver/config/config.go
··· 39 39 } 40 40 41 41 type Config struct { 42 + Repo Repo `env:",prefix=KNOT_REPO_"` 43 + Server Server `env:",prefix=KNOT_SERVER_"` 44 + Git Git `env:",prefix=KNOT_GIT_"` 45 + AppViewEndpoint string `env:"APPVIEW_ENDPOINT, default=https://tangled.org"` 46 + KnotMirrors []string `env:"KNOT_MIRRORS, default=https://mirror.tangled.network"` 42 - Repo Repo `env:",prefix=KNOT_REPO_"` 43 - Server Server `env:",prefix=KNOT_SERVER_"` 44 - Git Git `env:",prefix=KNOT_GIT_"` 45 - AppViewEndpoint string `env:"APPVIEW_ENDPOINT, default=https://tangled.org"` 46 47 } 47 48 48 49 func Load(ctx context.Context) (*Config, error) {
knotserver/db/branch_rules.go

This file has not been changed.

+1
knotserver/db/db.go
··· 22 22 "_journal_mode=WAL", 23 23 "_synchronous=NORMAL", 24 24 "_auto_vacuum=incremental", 25 + "_busy_timeout=5000", 25 26 } 26 27 27 28 logger := log.FromContext(ctx)
+29
knotserver/events.go
··· 7 7 "strconv" 8 8 "time" 9 9 10 + "github.com/bluesky-social/indigo/xrpc" 10 11 "github.com/gorilla/websocket" 12 + "tangled.org/core/api/tangled" 11 13 "tangled.org/core/log" 12 14 ) 13 15 ··· 61 63 return 62 64 } 63 65 66 + // try request crawl when connection closed 67 + defer func() { 68 + go func() { 69 + retryCtx, retryCancel := context.WithTimeout(context.Background(), 10*time.Second) 70 + defer retryCancel() 71 + if err := h.requestCrawl(retryCtx); err != nil { 72 + l.Error("error requesting crawls", "err", err) 73 + } 74 + }() 75 + }() 76 + 64 77 for { 65 78 // wait for new data or timeout 66 79 select { ··· 118 131 119 132 return nil 120 133 } 134 + 135 + func (h *Knot) requestCrawl(ctx context.Context) error { 136 + h.l.Info("requesting crawl", "mirrors", h.c.KnotMirrors) 137 + input := &tangled.SyncRequestCrawl_Input{ 138 + Hostname: h.c.Server.Hostname, 139 + } 140 + for _, knotmirror := range h.c.KnotMirrors { 141 + xrpcc := xrpc.Client{Host: knotmirror} 142 + if err := tangled.SyncRequestCrawl(ctx, &xrpcc, input); err != nil { 143 + h.l.Error("error requesting crawl", "err", err) 144 + } else { 145 + h.l.Info("crawl requested successfully") 146 + } 147 + } 148 + return nil 149 + }
+14
knotserver/git/git.go
··· 199 199 return io.ReadAll(reader) 200 200 } 201 201 202 + func (g *GitRepo) File(path string) (*object.File, error) { 203 + c, err := g.r.CommitObject(g.h) 204 + if err != nil { 205 + return nil, fmt.Errorf("commit object: %w", err) 206 + } 207 + 208 + tree, err := c.Tree() 209 + if err != nil { 210 + return nil, fmt.Errorf("file tree: %w", err) 211 + } 212 + 213 + return tree.File(path) 214 + } 215 + 202 216 // read and parse .gitmodules 203 217 func (g *GitRepo) Submodules() (*config.Modules, error) { 204 218 c, err := g.r.CommitObject(g.h)
+3 -2
knotserver/git/merge.go
··· 22 22 } 23 23 24 24 var ( 25 + mergeCheckCache MergeCheckCache 26 + conflictErrorRegex = regexp.MustCompile(`^error: (.*):(\d+): (.*)$`) 25 - mergeCheckCache MergeCheckCache 26 27 ) 27 28 28 29 func init() { ··· 408 409 continue 409 410 } 410 411 412 + if match := conflictErrorRegex.FindStringSubmatch(line); len(match) >= 4 { 411 - if match := regexp.MustCompile(`^error: (.*):(\d+): (.*)$`).FindStringSubmatch(line); len(match) >= 4 { 412 413 if currentFile == "" { 413 414 currentFile = match[1] 414 415 }
knotserver/internal.go

This file has not been changed.

+3 -2
knotserver/router.go
··· 83 83 84 84 r.Route("/{did}", func(r chi.Router) { 85 85 r.Use(h.resolveDidRedirect) 86 - r.Use(h.resolveRepo) 87 86 r.Route("/{name}", func(r chi.Router) { 87 + r.Use(h.resolveRepo) 88 + 88 89 // routes for git operations 89 90 r.Get("/info/refs", h.InfoRefs) 90 91 r.Post("/git-upload-archive", h.UploadArchive) ··· 176 177 return 177 178 } 178 179 180 + ctx := context.WithValue(r.Context(), ctxRepoPathKey{}, repoPath) 179 - ctx := context.WithValue(r.Context(), "repoPath", repoPath) 180 181 next.ServeHTTP(w, r.WithContext(ctx)) 181 182 }) 182 183 }
+16
knotserver/server.go
··· 5 5 "fmt" 6 6 "net/http" 7 7 8 + "github.com/bluesky-social/indigo/xrpc" 8 9 "github.com/urfave/cli/v3" 9 10 "tangled.org/core/api/tangled" 10 11 "tangled.org/core/hook" ··· 97 98 98 99 logger.Info("starting internal server", "address", c.Server.InternalListenAddr) 99 100 go http.ListenAndServe(c.Server.InternalListenAddr, imux) 101 + 102 + // TODO(boltless): too lazy here. should clear this up 103 + go func() { 104 + input := &tangled.SyncRequestCrawl_Input{ 105 + Hostname: c.Server.Hostname, 106 + } 107 + for _, knotmirror := range c.KnotMirrors { 108 + xrpcc := xrpc.Client{Host: knotmirror} 109 + if err := tangled.SyncRequestCrawl(ctx, &xrpcc, input); err != nil { 110 + logger.Error("error requesting crawl", "err", err) 111 + } else { 112 + logger.Info("crawl requested successfully") 113 + } 114 + } 115 + }() 100 116 101 117 logger.Info("starting main server", "address", c.Server.ListenAddr) 102 118 logger.Error("server error", "error", http.ListenAndServe(c.Server.ListenAddr, mux))
+30
knotserver/xrpc/create_repo.go
··· 1 1 package xrpc 2 2 3 3 import ( 4 + "context" 4 5 "encoding/json" 5 6 "errors" 6 7 "fmt" 7 8 "net/http" 8 9 "path/filepath" 9 10 "strings" 11 + "time" 10 12 11 13 comatproto "github.com/bluesky-social/indigo/api/atproto" 12 14 "github.com/bluesky-social/indigo/atproto/syntax" ··· 120 122 repoPath, 121 123 ) 122 124 125 + // HACK: request crawl for this repository 126 + // Users won't want to sync entire network from their local knotmirror. 127 + // Therefore, to bypass the local tap, requestCrawl directly to the knotmirror. 128 + go func() { 129 + if h.Config.Server.Dev { 130 + repoAt := fmt.Sprintf("at://%s/%s/%s", actorDid, tangled.RepoNSID, rkey) 131 + rCtx, rCancel := context.WithTimeout(context.Background(), 10*time.Second) 132 + defer rCancel() 133 + h.requestCrawl(rCtx, &tangled.SyncRequestCrawl_Input{ 134 + Hostname: h.Config.Server.Hostname, 135 + EnsureRepo: &repoAt, 136 + }) 137 + } 138 + }() 139 + 123 140 w.WriteHeader(http.StatusOK) 141 + } 142 + 143 + func (h *Xrpc) requestCrawl(ctx context.Context, input *tangled.SyncRequestCrawl_Input) error { 144 + h.Logger.Info("requesting crawl", "mirrors", h.Config.KnotMirrors) 145 + for _, knotmirror := range h.Config.KnotMirrors { 146 + xrpcc := xrpc.Client{Host: knotmirror} 147 + if err := tangled.SyncRequestCrawl(ctx, &xrpcc, input); err != nil { 148 + h.Logger.Error("error requesting crawl", "err", err) 149 + } else { 150 + h.Logger.Info("crawl requested successfully") 151 + } 152 + } 153 + return nil 124 154 } 125 155 126 156 func validateRepoName(name string) error {
knotserver/xrpc/create_repo_branch_rule.go

This file has not been changed.

knotserver/xrpc/delete_repo_branch_rule.go

This file has not been changed.

knotserver/xrpc/list_branch_rules.go

This file has not been changed.

knotserver/xrpc/update_repo_branch_rule.go

This file has not been changed.

knotserver/xrpc/xrpc.go

This file has not been changed.

+64
lexicons/git/temp/analyzeMerge.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.analyzeMerge", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "description": "Check if a merge is possible between two branches", 8 + "parameters": { 9 + "type": "params", 10 + "required": ["repo", "patch", "branch"], 11 + "properties": { 12 + "repo": { 13 + "type": "string", 14 + "format": "at-uri", 15 + "description": "AT-URI of the repository" 16 + }, 17 + "patch": { 18 + "type": "string", 19 + "description": "Patch or pull request to check for merge conflicts" 20 + }, 21 + "branch": { 22 + "type": "string", 23 + "description": "Target branch to merge into" 24 + } 25 + } 26 + }, 27 + "output": { 28 + "encoding": "application/json", 29 + "schema": { 30 + "type": "object", 31 + "required": ["is_conflicted"], 32 + "properties": { 33 + "is_conflicted": { 34 + "type": "boolean", 35 + "description": "Whether the merge has conflicts" 36 + }, 37 + "conflicts": { 38 + "type": "array", 39 + "description": "List of files with merge conflicts", 40 + "items": { 41 + "type": "ref", 42 + "ref": "#conflictInfo" 43 + } 44 + } 45 + } 46 + } 47 + } 48 + }, 49 + "conflictInfo": { 50 + "type": "object", 51 + "required": ["filename", "reason"], 52 + "properties": { 53 + "filename": { 54 + "type": "string", 55 + "description": "Name of the conflicted file" 56 + }, 57 + "reason": { 58 + "type": "string", 59 + "description": "Reason for the conflict" 60 + } 61 + } 62 + } 63 + } 64 + }
+112
lexicons/git/temp/defs.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.defs", 4 + "defs": { 5 + "blob": { 6 + "type": "object", 7 + "description": "blob metadata. This object doesn't include the blob content", 8 + "required": ["name", "mode", "size", "lastCommit"], 9 + "properties": { 10 + "name": { 11 + "type": "string", 12 + "description": "The file name" 13 + }, 14 + "mode": { 15 + "type": "string" 16 + }, 17 + "size": { 18 + "type": "integer", 19 + "description": "File size in bytes" 20 + }, 21 + "lastCommit": { 22 + "type": "ref", 23 + "ref": "#commit" 24 + }, 25 + "submodule": { 26 + "type": "ref", 27 + "ref": "#submodule", 28 + "description": "Submodule information if path is a submodule" 29 + } 30 + } 31 + }, 32 + "branch": { 33 + "type": "object", 34 + "required": ["name", "commit"], 35 + "properties": { 36 + "name": { 37 + "type": "string", 38 + "description": "branch name" 39 + }, 40 + "commit": { 41 + "type": "ref", 42 + "ref": "#commit", 43 + "description": "hydrated commit object" 44 + } 45 + } 46 + }, 47 + "tag": { 48 + "type": "object", 49 + "required": ["name", "tagger", "target"], 50 + "properties": { 51 + "name": { 52 + "type": "string", 53 + "description": "tag name" 54 + }, 55 + "tagger": { "type": "ref", "ref": "#signature" }, 56 + "message": { "type": "string" }, 57 + "target": { "type": "unknown" } 58 + } 59 + }, 60 + "commit": { 61 + "type": "object", 62 + "required": ["hash", "author", "committer", "message", "tree"], 63 + "properties": { 64 + "hash": { "type": "ref", "ref": "#hash" }, 65 + "author": { "type": "ref", "ref": "#signature" }, 66 + "committer": { "type": "ref", "ref": "#signature" }, 67 + "message": { "type": "string" }, 68 + "tree": { "type": "ref", "ref": "#hash" } 69 + } 70 + }, 71 + "hash": { 72 + "type": "string" 73 + }, 74 + "signature": { 75 + "type": "object", 76 + "required": ["name", "email", "when"], 77 + "properties": { 78 + "name": { 79 + "type": "string", 80 + "description": "Person name" 81 + }, 82 + "email": { 83 + "type": "string", 84 + "description": "Person email" 85 + }, 86 + "when": { 87 + "type": "string", 88 + "format": "datetime", 89 + "description": "Timestamp of the signature" 90 + } 91 + } 92 + }, 93 + "submodule": { 94 + "type": "object", 95 + "required": ["name", "url"], 96 + "properties": { 97 + "name": { 98 + "type": "string", 99 + "description": "Submodule name" 100 + }, 101 + "url": { 102 + "type": "string", 103 + "description": "Submodule repository URL" 104 + }, 105 + "branch": { 106 + "type": "string", 107 + "description": "Branch to track in the submodule" 108 + } 109 + } 110 + } 111 + } 112 + }
+56
lexicons/git/temp/getArchive.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.getArchive", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": ["repo", "ref"], 10 + "properties": { 11 + "repo": { 12 + "type": "string", 13 + "format": "at-uri", 14 + "description": "AT-URI of the repository" 15 + }, 16 + "ref": { 17 + "type": "string", 18 + "description": "Git reference (branch, tag, or commit SHA)" 19 + }, 20 + "format": { 21 + "type": "string", 22 + "description": "Archive format", 23 + "enum": ["tar", "zip", "tar.gz", "tar.bz2", "tar.xz"], 24 + "default": "tar.gz" 25 + }, 26 + "prefix": { 27 + "type": "string", 28 + "description": "Prefix for files in the archive" 29 + } 30 + } 31 + }, 32 + "output": { 33 + "encoding": "*/*", 34 + "description": "Binary archive data" 35 + }, 36 + "errors": [ 37 + { 38 + "name": "RepoNotFound", 39 + "description": "Repository not found or access denied" 40 + }, 41 + { 42 + "name": "RefNotFound", 43 + "description": "Git reference not found" 44 + }, 45 + { 46 + "name": "InvalidRequest", 47 + "description": "Invalid request parameters" 48 + }, 49 + { 50 + "name": "ArchiveError", 51 + "description": "Failed to create archive" 52 + } 53 + ] 54 + } 55 + } 56 + }
+47
lexicons/git/temp/getBlob.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.getBlob", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": ["repo", "path"], 10 + "properties": { 11 + "repo": { 12 + "type": "string", 13 + "format": "at-uri", 14 + "description": "AT-URI of the repository" 15 + }, 16 + "ref": { 17 + "type": "string", 18 + "description": "Git reference (branch, tag, or commit SHA)", 19 + "default": "HEAD" 20 + }, 21 + "path": { 22 + "type": "string", 23 + "description": "Path within the repository tree" 24 + } 25 + } 26 + }, 27 + "output": { 28 + "encoding": "*/*", 29 + "description": "raw blob served in octet-stream" 30 + }, 31 + "errors": [ 32 + { 33 + "name": "RepoNotFound", 34 + "description": "Repository not found or access denied" 35 + }, 36 + { 37 + "name": "BlobNotFound", 38 + "description": "Blob not found" 39 + }, 40 + { 41 + "name": "InvalidRequest", 42 + "description": "Invalid request parameters" 43 + } 44 + ] 45 + } 46 + } 47 + }
+68
lexicons/git/temp/getBranch.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.getBranch", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": ["repo", "name"], 10 + "properties": { 11 + "repo": { 12 + "type": "string", 13 + "format": "at-uri", 14 + "description": "AT-URI of the repository" 15 + }, 16 + "name": { 17 + "type": "string", 18 + "description": "Branch name to get information for" 19 + } 20 + } 21 + }, 22 + "output": { 23 + "encoding": "application/json", 24 + "schema": { 25 + "type": "object", 26 + "required": ["name", "hash", "when"], 27 + "properties": { 28 + "name": { 29 + "type": "string", 30 + "description": "Branch name" 31 + }, 32 + "hash": { 33 + "type": "string", 34 + "description": "Latest commit hash on this branch" 35 + }, 36 + "when": { 37 + "type": "string", 38 + "format": "datetime", 39 + "description": "Timestamp of latest commit" 40 + }, 41 + "message": { 42 + "type": "string", 43 + "description": "Latest commit message" 44 + }, 45 + "author": { 46 + "type": "ref", 47 + "ref": "sh.tangled.git.temp.defs#signature" 48 + } 49 + } 50 + } 51 + }, 52 + "errors": [ 53 + { 54 + "name": "RepoNotFound", 55 + "description": "Repository not found or access denied" 56 + }, 57 + { 58 + "name": "BranchNotFound", 59 + "description": "Branch not found" 60 + }, 61 + { 62 + "name": "InvalidRequest", 63 + "description": "Invalid request parameters" 64 + } 65 + ] 66 + } 67 + } 68 + }
+46
lexicons/git/temp/getCommit.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.getCommit", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "description": "resolve commit from given ref", 8 + "parameters": { 9 + "type": "params", 10 + "required": ["repo", "ref"], 11 + "properties": { 12 + "repo": { 13 + "type": "string", 14 + "format": "at-uri", 15 + "description": "AT-URI of the repository" 16 + }, 17 + "ref": { 18 + "type": "string", 19 + "description": "reference name to resolve" 20 + } 21 + } 22 + }, 23 + "output": { 24 + "encoding": "application/json", 25 + "schema": { 26 + "type": "ref", 27 + "ref": "sh.tangled.git.temp.defs#commit" 28 + } 29 + }, 30 + "errors": [ 31 + { 32 + "name": "RepoNotFound", 33 + "description": "Repository not found or access denied" 34 + }, 35 + { 36 + "name": "CommitNotFound", 37 + "description": "Commit not found" 38 + }, 39 + { 40 + "name": "InvalidRequest", 41 + "description": "Invalid request parameters" 42 + } 43 + ] 44 + } 45 + } 46 + }
+50
lexicons/git/temp/getDiff.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.getDiff", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": ["repo", "rev1", "rev2"], 10 + "properties": { 11 + "repo": { 12 + "type": "string", 13 + "format": "at-uri", 14 + "description": "AT-URI of the repository" 15 + }, 16 + "rev1": { 17 + "type": "string", 18 + "description": "First revision (commit, branch, or tag)" 19 + }, 20 + "rev2": { 21 + "type": "string", 22 + "description": "Second revision (commit, branch, or tag)" 23 + } 24 + } 25 + }, 26 + "output": { 27 + "encoding": "*/*", 28 + "description": "Compare output in application/json" 29 + }, 30 + "errors": [ 31 + { 32 + "name": "RepoNotFound", 33 + "description": "Repository not found or access denied" 34 + }, 35 + { 36 + "name": "RevisionNotFound", 37 + "description": "One or both revisions not found" 38 + }, 39 + { 40 + "name": "InvalidRequest", 41 + "description": "Invalid request parameters" 42 + }, 43 + { 44 + "name": "CompareError", 45 + "description": "Failed to compare revisions" 46 + } 47 + ] 48 + } 49 + } 50 + }
+51
lexicons/git/temp/getEntity.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.getEntity", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "description": "get metadata of blob by ref and path", 8 + "parameters": { 9 + "type": "params", 10 + "required": ["repo", "path"], 11 + "properties": { 12 + "repo": { 13 + "type": "string", 14 + "format": "at-uri", 15 + "description": "AT-URI of the repository" 16 + }, 17 + "ref": { 18 + "type": "string", 19 + "description": "Git reference (branch, tag, or commit SHA)", 20 + "default": "HEAD" 21 + }, 22 + "path": { 23 + "type": "string", 24 + "description": "path of the entity" 25 + } 26 + } 27 + }, 28 + "output": { 29 + "encoding": "application/json", 30 + "schema": { 31 + "type": "ref", 32 + "ref": "sh.tangled.git.temp.defs#blob" 33 + } 34 + }, 35 + "errors": [ 36 + { 37 + "name": "RepoNotFound", 38 + "description": "Repository not found or access denied" 39 + }, 40 + { 41 + "name": "BlobNotFound", 42 + "description": "Blob not found" 43 + }, 44 + { 45 + "name": "InvalidRequest", 46 + "description": "Invalid request parameters" 47 + } 48 + ] 49 + } 50 + } 51 + }
+37
lexicons/git/temp/getHead.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.getHead", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": ["repo"], 10 + "properties": { 11 + "repo": { 12 + "type": "string", 13 + "format": "at-uri", 14 + "description": "AT-URI of the repository" 15 + } 16 + } 17 + }, 18 + "output": { 19 + "encoding": "application/json", 20 + "schema": { 21 + "type": "ref", 22 + "ref": "sh.tangled.git.temp.defs#branch" 23 + } 24 + }, 25 + "errors": [ 26 + { 27 + "name": "RepoNotFound", 28 + "description": "Repository not found or access denied" 29 + }, 30 + { 31 + "name": "InvalidRequest", 32 + "description": "Invalid request parameters" 33 + } 34 + ] 35 + } 36 + } 37 + }
+44
lexicons/git/temp/getTag.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.getTag", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": [ 10 + "repo", 11 + "tag" 12 + ], 13 + "properties": { 14 + "repo": { 15 + "type": "string", 16 + "format": "at-uri", 17 + "description": "AT-URI of the repository" 18 + }, 19 + "tag": { 20 + "type": "string", 21 + "description": "Name of tag, such as v1.3.0" 22 + } 23 + } 24 + }, 25 + "output": { 26 + "encoding": "*/*" 27 + }, 28 + "errors": [ 29 + { 30 + "name": "RepoNotFound", 31 + "description": "Repository not found or access denied" 32 + }, 33 + { 34 + "name": "TagNotFound", 35 + "description": "Tag not found" 36 + }, 37 + { 38 + "name": "InvalidRequest", 39 + "description": "Invalid request parameters" 40 + } 41 + ] 42 + } 43 + } 44 + }
+183
lexicons/git/temp/getTree.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.getTree", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": [ 10 + "repo", 11 + "ref" 12 + ], 13 + "properties": { 14 + "repo": { 15 + "type": "string", 16 + "format": "at-uri", 17 + "description": "AT-URI of the repository" 18 + }, 19 + "ref": { 20 + "type": "string", 21 + "description": "Git reference (branch, tag, or commit SHA)" 22 + }, 23 + "path": { 24 + "type": "string", 25 + "description": "Path within the repository tree", 26 + "default": "" 27 + } 28 + } 29 + }, 30 + "output": { 31 + "encoding": "application/json", 32 + "schema": { 33 + "type": "object", 34 + "required": [ 35 + "ref", 36 + "files" 37 + ], 38 + "properties": { 39 + "ref": { 40 + "type": "string", 41 + "description": "The git reference used" 42 + }, 43 + "parent": { 44 + "type": "string", 45 + "description": "The parent path in the tree" 46 + }, 47 + "dotdot": { 48 + "type": "string", 49 + "description": "Parent directory path" 50 + }, 51 + "readme": { 52 + "type": "ref", 53 + "ref": "#readme", 54 + "description": "Readme for this file tree" 55 + }, 56 + "lastCommit": { 57 + "type": "ref", 58 + "ref": "#lastCommit" 59 + }, 60 + "files": { 61 + "type": "array", 62 + "items": { 63 + "type": "ref", 64 + "ref": "#treeEntry" 65 + } 66 + } 67 + } 68 + } 69 + }, 70 + "errors": [ 71 + { 72 + "name": "RepoNotFound", 73 + "description": "Repository not found or access denied" 74 + }, 75 + { 76 + "name": "RefNotFound", 77 + "description": "Git reference not found" 78 + }, 79 + { 80 + "name": "PathNotFound", 81 + "description": "Path not found in repository tree" 82 + }, 83 + { 84 + "name": "InvalidRequest", 85 + "description": "Invalid request parameters" 86 + } 87 + ] 88 + }, 89 + "readme": { 90 + "type": "object", 91 + "required": [ 92 + "filename", 93 + "contents" 94 + ], 95 + "properties": { 96 + "filename": { 97 + "type": "string", 98 + "description": "Name of the readme file" 99 + }, 100 + "contents": { 101 + "type": "string", 102 + "description": "Contents of the readme file" 103 + } 104 + } 105 + }, 106 + "treeEntry": { 107 + "type": "object", 108 + "required": [ 109 + "name", 110 + "mode", 111 + "size" 112 + ], 113 + "properties": { 114 + "name": { 115 + "type": "string", 116 + "description": "Relative file or directory name" 117 + }, 118 + "mode": { 119 + "type": "string", 120 + "description": "File mode" 121 + }, 122 + "size": { 123 + "type": "integer", 124 + "description": "File size in bytes" 125 + }, 126 + "last_commit": { 127 + "type": "ref", 128 + "ref": "#lastCommit" 129 + } 130 + } 131 + }, 132 + "lastCommit": { 133 + "type": "object", 134 + "required": [ 135 + "hash", 136 + "message", 137 + "when" 138 + ], 139 + "properties": { 140 + "hash": { 141 + "type": "string", 142 + "description": "Commit hash" 143 + }, 144 + "message": { 145 + "type": "string", 146 + "description": "Commit message" 147 + }, 148 + "author": { 149 + "type": "ref", 150 + "ref": "#signature" 151 + }, 152 + "when": { 153 + "type": "string", 154 + "format": "datetime", 155 + "description": "Commit timestamp" 156 + } 157 + } 158 + }, 159 + "signature": { 160 + "type": "object", 161 + "required": [ 162 + "name", 163 + "email", 164 + "when" 165 + ], 166 + "properties": { 167 + "name": { 168 + "type": "string", 169 + "description": "Author name" 170 + }, 171 + "email": { 172 + "type": "string", 173 + "description": "Author email" 174 + }, 175 + "when": { 176 + "type": "string", 177 + "format": "datetime", 178 + "description": "Author timestamp" 179 + } 180 + } 181 + } 182 + } 183 + }
+44
lexicons/git/temp/listBranches.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.listBranches", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": ["repo"], 10 + "properties": { 11 + "repo": { 12 + "type": "string", 13 + "format": "at-uri", 14 + "description": "AT-URI of the repository" 15 + }, 16 + "limit": { 17 + "type": "integer", 18 + "description": "Maximum number of branches to return", 19 + "minimum": 1, 20 + "maximum": 100, 21 + "default": 50 22 + }, 23 + "cursor": { 24 + "type": "string", 25 + "description": "Pagination cursor" 26 + } 27 + } 28 + }, 29 + "output": { 30 + "encoding": "*/*" 31 + }, 32 + "errors": [ 33 + { 34 + "name": "RepoNotFound", 35 + "description": "Repository not found or access denied" 36 + }, 37 + { 38 + "name": "InvalidRequest", 39 + "description": "Invalid request parameters" 40 + } 41 + ] 42 + } 43 + } 44 + }
+56
lexicons/git/temp/listCommits.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.listCommits", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": ["repo"], 10 + "properties": { 11 + "repo": { 12 + "type": "string", 13 + "format": "at-uri", 14 + "description": "AT-URI of the repository" 15 + }, 16 + "ref": { 17 + "type": "string", 18 + "description": "Git reference (branch, tag, or commit SHA)" 19 + }, 20 + "limit": { 21 + "type": "integer", 22 + "description": "Maximum number of commits to return", 23 + "minimum": 1, 24 + "maximum": 100, 25 + "default": 50 26 + }, 27 + "cursor": { 28 + "type": "string", 29 + "description": "Pagination cursor (commit SHA)" 30 + } 31 + } 32 + }, 33 + "output": { 34 + "encoding": "*/*" 35 + }, 36 + "errors": [ 37 + { 38 + "name": "RepoNotFound", 39 + "description": "Repository not found or access denied" 40 + }, 41 + { 42 + "name": "RefNotFound", 43 + "description": "Git reference not found" 44 + }, 45 + { 46 + "name": "PathNotFound", 47 + "description": "Path not found in repository" 48 + }, 49 + { 50 + "name": "InvalidRequest", 51 + "description": "Invalid request parameters" 52 + } 53 + ] 54 + } 55 + } 56 + }
+100
lexicons/git/temp/listLanguages.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.listLanguages", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": ["repo"], 10 + "properties": { 11 + "repo": { 12 + "type": "string", 13 + "format": "at-uri", 14 + "description": "AT-URI of the repository" 15 + }, 16 + "ref": { 17 + "type": "string", 18 + "description": "Git reference (branch, tag, or commit SHA)", 19 + "default": "HEAD" 20 + } 21 + } 22 + }, 23 + "output": { 24 + "encoding": "application/json", 25 + "schema": { 26 + "type": "object", 27 + "required": ["ref", "languages"], 28 + "properties": { 29 + "ref": { 30 + "type": "string", 31 + "description": "The git reference used" 32 + }, 33 + "languages": { 34 + "type": "array", 35 + "items": { 36 + "type": "ref", 37 + "ref": "#language" 38 + } 39 + }, 40 + "totalSize": { 41 + "type": "integer", 42 + "description": "Total size of all analyzed files in bytes" 43 + }, 44 + "totalFiles": { 45 + "type": "integer", 46 + "description": "Total number of files analyzed" 47 + } 48 + } 49 + } 50 + }, 51 + "errors": [ 52 + { 53 + "name": "RepoNotFound", 54 + "description": "Repository not found or access denied" 55 + }, 56 + { 57 + "name": "RefNotFound", 58 + "description": "Git reference not found" 59 + }, 60 + { 61 + "name": "InvalidRequest", 62 + "description": "Invalid request parameters" 63 + } 64 + ] 65 + }, 66 + "language": { 67 + "type": "object", 68 + "required": ["name", "size", "percentage"], 69 + "properties": { 70 + "name": { 71 + "type": "string", 72 + "description": "Programming language name" 73 + }, 74 + "size": { 75 + "type": "integer", 76 + "description": "Total size of files in this language (bytes)" 77 + }, 78 + "percentage": { 79 + "type": "integer", 80 + "description": "Percentage of total codebase (0-100)" 81 + }, 82 + "fileCount": { 83 + "type": "integer", 84 + "description": "Number of files in this language" 85 + }, 86 + "color": { 87 + "type": "string", 88 + "description": "Hex color code for this language" 89 + }, 90 + "extensions": { 91 + "type": "array", 92 + "items": { 93 + "type": "string" 94 + }, 95 + "description": "File extensions associated with this language" 96 + } 97 + } 98 + } 99 + } 100 + }
+44
lexicons/git/temp/listTags.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.git.temp.listTags", 4 + "defs": { 5 + "main": { 6 + "type": "query", 7 + "parameters": { 8 + "type": "params", 9 + "required": ["repo"], 10 + "properties": { 11 + "repo": { 12 + "type": "string", 13 + "format": "at-uri", 14 + "description": "AT-URI of the repository" 15 + }, 16 + "limit": { 17 + "type": "integer", 18 + "description": "Maximum number of tags to return", 19 + "minimum": 1, 20 + "maximum": 100, 21 + "default": 50 22 + }, 23 + "cursor": { 24 + "type": "string", 25 + "description": "Pagination cursor" 26 + } 27 + } 28 + }, 29 + "output": { 30 + "encoding": "*/*" 31 + }, 32 + "errors": [ 33 + { 34 + "name": "RepoNotFound", 35 + "description": "Repository not found or access denied" 36 + }, 37 + { 38 + "name": "InvalidRequest", 39 + "description": "Invalid request parameters" 40 + } 41 + ] 42 + } 43 + } 44 + }
+57
lexicons/knot/subscribeRepos.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.knot.subscribeRepos", 4 + "defs": { 5 + "main": { 6 + "type": "subscription", 7 + "description": "Repository event stream, aka Firehose endpoint. Outputs repo commits with diff data, and identity update events, for all repositories on the current server. See the atproto specifications for details around stream sequencing, repo versioning, CAR diff format, and more. Public and does not require auth; implemented by PDS and Relay.", 8 + "parameters": { 9 + "type": "params", 10 + "properties": { 11 + "cursor": { 12 + "type": "integer", 13 + "description": "The last known event seq number to backfill from." 14 + } 15 + } 16 + }, 17 + "message": { 18 + "schema": { 19 + "type": "union", 20 + "refs": ["#identity", "gitRefUpdate"] 21 + } 22 + }, 23 + "errors": [ 24 + { "name": "FutureCursor" }, 25 + { 26 + "name": "ConsumerTooSlow", 27 + "description": "If the consumer of the stream can not keep up with events, and a backlog gets too large, the server will drop the connection." 28 + } 29 + ] 30 + }, 31 + "identity": { 32 + "type": "object", 33 + "required": ["seq", "did", "time"], 34 + "properties": { 35 + "seq": { "type": "integer", "description": "The stream sequence number of this message." }, 36 + "did": { "type": "string", "format": "did", "description": "Repository DID identifier" }, 37 + "time": { "type": "string", "format": "datetime" } 38 + } 39 + }, 40 + "gitSync1": { 41 + "type": "object", 42 + "required": ["seq", "did"], 43 + "properties": { 44 + "seq": { "type": "integer", "description": "The stream sequence number of this message." }, 45 + "did": { "type": "string", "format": "did", "description": "Repository DID identifier" } 46 + } 47 + }, 48 + "gitSync2": { 49 + "type": "object", 50 + "required": ["seq", "repo"], 51 + "properties": { 52 + "seq": { "type": "integer", "description": "The stream sequence number of this message." }, 53 + "did": { "type": "string", "format": "at-uri", "description": "Repository AT-URI identifier" } 54 + } 55 + } 56 + } 57 + }
+29
lexicons/sync/requestCrawl.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.sync.requestCrawl", 4 + "defs": { 5 + "main": { 6 + "type": "procedure", 7 + "description": "Request a service to persistently crawl hosted repos. Does not require auth.", 8 + "input": { 9 + "encoding": "application/json", 10 + "schema": { 11 + "type": "object", 12 + "required": ["hostname"], 13 + "properties": { 14 + "hostname": { 15 + "type": "string", 16 + "description": "Hostname of the current service (eg, Knot) that is requesting to be crawled." 17 + }, 18 + "ensureRepo": { 19 + "type": "string", 20 + "format": "at-uri", 21 + "description": "specific repository to ensure crawling" 22 + } 23 + } 24 + } 25 + }, 26 + "errors": [{ "name": "HostBanned" }] 27 + } 28 + } 29 + }
+39 -20
nix/gomod2nix.toml
··· 139 139 hash = "sha256-GWm5i1ukuBukV0GMF1rffpbOSSXZdfg6/0pABMiGzLQ=" 140 140 replaced = "tangled.sh/oppi.li/go-gitdiff" 141 141 [mod."github.com/bluesky-social/indigo"] 142 + version = "v0.0.0-20260315101958-fb1dfa36fed2" 143 + hash = "sha256-R5Dmcsi1a5LquA/a30YyjLAh7Mjg17EuTNVCDxyw4JE=" 144 + replaced = "github.com/boltlessengineer/indigo" 142 - version = "v0.0.0-20251003000214-3259b215110e" 143 - hash = "sha256-qi/GrquJznbLnnHVpd7IqoryCESbi6xE4X1SiEM2qlo=" 144 145 [mod."github.com/bluesky-social/jetstream"] 146 + version = "v0.0.0-20260226214936-e0274250f654" 147 + hash = "sha256-VE93NvI3PreteLHnlv7WT6GgH2vSjtoFjMygCmrznfg=" 145 - version = "v0.0.0-20241210005130-ea96859b93d1" 146 - hash = "sha256-AiapbrkjXboIKc5QNiWH0KyNs0zKnn6UlGwWFlkUfm0=" 147 148 [mod."github.com/bmatcuk/doublestar/v4"] 148 149 version = "v4.9.1" 149 150 hash = "sha256-0iyHjyTAsfhgYSsE+NKxSNGBuM3Id615VWeQhssTShE=" ··· 225 226 [mod."github.com/dustin/go-humanize"] 226 227 version = "v1.0.1" 227 228 hash = "sha256-yuvxYYngpfVkUg9yAmG99IUVmADTQA0tMbBXe0Fq0Mc=" 229 + [mod."github.com/earthboundkid/versioninfo/v2"] 230 + version = "v2.24.1" 231 + hash = "sha256-nbRdiX9WN2y1aiw1CR/DQ6AYqztow8FazndwY3kByHM=" 228 232 [mod."github.com/emirpasic/gods"] 229 233 version = "v1.18.1" 230 234 hash = "sha256-hGDKddjLj+5dn2woHtXKUdd49/3xdsqnhx7VEdCu1m4=" ··· 394 398 [mod."github.com/ipfs/go-metrics-interface"] 395 399 version = "v0.3.0" 396 400 hash = "sha256-b3tp3jxecLmJEGx2kW7MiKGlAKPEWg/LJ7hXylSC8jQ=" 401 + [mod."github.com/jackc/pgpassfile"] 402 + version = "v1.0.0" 403 + hash = "sha256-H0nFbC34/3pZUFnuiQk9W7yvAMh6qJDrqvHp+akBPLM=" 404 + [mod."github.com/jackc/pgservicefile"] 405 + version = "v0.0.0-20240606120523-5a60cdf6a761" 406 + hash = "sha256-ETpGsLAA2wcm5xJBayr/mZrCE1YsWbnkbSSX3ptrFn0=" 407 + [mod."github.com/jackc/pgx/v5"] 408 + version = "v5.8.0" 409 + hash = "sha256-Mq5/A/Obcceu6kKxUv30DPC2ZaVvD8Iq/YtmLm1BVec=" 410 + [mod."github.com/jackc/puddle/v2"] 411 + version = "v2.2.2" 412 + hash = "sha256-IUxdu4JYfsCh/qlz2SiUWu7EVPHhyooiVA4oaS2Z6yk=" 397 413 [mod."github.com/json-iterator/go"] 398 414 version = "v1.1.12" 399 415 hash = "sha256-To8A0h+lbfZ/6zM+2PpRpY3+L6725OPC66lffq6fUoM=" ··· 503 519 version = "v1.5.5" 504 520 hash = "sha256-ouhfDUCXsfpcgaCLfJE9oYprAQHuV61OJzb/aEhT0j8=" 505 521 [mod."github.com/prometheus/client_golang"] 522 + version = "v1.23.2" 523 + hash = "sha256-3GD4fBFa1tJu8MS4TNP6r2re2eViUE+kWUaieIOQXCg=" 506 - version = "v1.22.0" 507 - hash = "sha256-OJ/9rlWG1DIPQJAZUTzjykkX0o+f+4IKLvW8YityaMQ=" 508 524 [mod."github.com/prometheus/client_model"] 509 525 version = "v0.6.2" 510 526 hash = "sha256-q6Fh6v8iNJN9ypD47LjWmx66YITa3FyRjZMRsuRTFeQ=" 511 527 [mod."github.com/prometheus/common"] 528 + version = "v0.66.1" 529 + hash = "sha256-bqHPaV9IV70itx63wqwgy2PtxMN0sn5ThVxDmiD7+Tk=" 512 - version = "v0.64.0" 513 - hash = "sha256-uy3KO60F2Cvhamz3fWQALGSsy13JiTk3NfpXgRuwqtI=" 514 530 [mod."github.com/prometheus/procfs"] 515 531 version = "v0.16.1" 516 532 hash = "sha256-OBCvKlLW2obct35p0L9Q+1ZrxZjpTmbgHMP2rng9hpo=" ··· 543 559 version = "v0.0.0-20220730225603-2ab79fcdd4ef" 544 560 hash = "sha256-/XmSE/J+f6FLWXGvljh6uBK71uoCAK3h82XQEQ1Ki68=" 545 561 [mod."github.com/stretchr/testify"] 562 + version = "v1.11.1" 563 + hash = "sha256-sWfjkuKJyDllDEtnM8sb/pdLzPQmUYWYtmeWz/5suUc=" 546 - version = "v1.10.0" 547 - hash = "sha256-fJ4gnPr0vnrOhjQYQwJ3ARDKPsOtA7d4olQmQWR+wpI=" 548 564 [mod."github.com/tidwall/gjson"] 549 565 version = "v1.18.0" 550 566 hash = "sha256-CO6hqDu8Y58Po6A01e5iTpwiUBQ5khUZsw7czaJHw0I=" ··· 558 574 version = "v1.2.5" 559 575 hash = "sha256-OYGNolkmL7E1Qs2qrQ3IVpQp5gkcHNU/AB/z2O+Myps=" 560 576 [mod."github.com/urfave/cli/v3"] 577 + version = "v3.4.1" 578 + hash = "sha256-cDMaQrIVMthUhdyI1mKXzDC5/wIK151073lzRl92RnA=" 561 - version = "v3.3.3" 562 - hash = "sha256-FdPiu7koY1qBinkfca4A05zCrX+Vu4eRz8wlRDZJyGg=" 563 579 [mod."github.com/vmihailenco/go-tinylfu"] 564 580 version = "v0.2.2" 565 581 hash = "sha256-ZHr4g7DJAV6rLcfrEWZwo9wJSeZcXB9KSP38UIOFfaM=" ··· 629 645 [mod."go.uber.org/zap"] 630 646 version = "v1.27.0" 631 647 hash = "sha256-8655KDrulc4Das3VRduO9MjCn8ZYD5WkULjCvruaYsU=" 648 + [mod."go.yaml.in/yaml/v2"] 649 + version = "v2.4.2" 650 + hash = "sha256-oC8RWdf1zbMYCtmR0ATy/kCkhIwPR9UqFZSMOKLVF/A=" 632 651 [mod."golang.org/x/crypto"] 652 + version = "v0.41.0" 653 + hash = "sha256-o5Di0lsFmYnXl7a5MBTqmN9vXMCRpE9ay71C1Ar8jEY=" 633 - version = "v0.40.0" 634 - hash = "sha256-I6p2fqvz63P9MwAuoQrljI7IUbfZQvCem0ii4Q2zZng=" 635 654 [mod."golang.org/x/exp"] 636 655 version = "v0.0.0-20250620022241-b7579e27df2b" 637 656 hash = "sha256-IsDTeuWLj4UkPO4NhWTvFeZ22WNtlxjoWiyAJh6zdig=" ··· 639 658 version = "v0.31.0" 640 659 hash = "sha256-ZFTlu9+4QToPPLA8C5UcG2eq/lQylq81RoG/WtYo9rg=" 641 660 [mod."golang.org/x/net"] 661 + version = "v0.43.0" 662 + hash = "sha256-bf3iQFrsC8BoarVaS0uSspEFAcr1zHp1uziTtBpwV34=" 642 - version = "v0.42.0" 643 - hash = "sha256-YxileisIIez+kcAI+21kY5yk0iRuEqti2YdmS8jvP2s=" 644 663 [mod."golang.org/x/sync"] 645 664 version = "v0.17.0" 646 665 hash = "sha256-M85lz4hK3/fzmcUViAp/CowHSxnr3BHSO7pjHp1O6i0=" 647 666 [mod."golang.org/x/sys"] 667 + version = "v0.35.0" 668 + hash = "sha256-ZKM8pesQE6NAFZeKQ84oPn5JMhGr8g4TSwLYAsHMGSI=" 648 - version = "v0.34.0" 649 - hash = "sha256-5rZ7p8IaGli5X1sJbfIKOcOEwY4c0yQhinJPh2EtK50=" 650 669 [mod."golang.org/x/text"] 651 670 version = "v0.29.0" 652 671 hash = "sha256-2cWBtJje+Yc+AnSgCANqBlIwnOMZEGkpQ2cFI45VfLI=" ··· 666 685 version = "v1.73.0" 667 686 hash = "sha256-LfVlwip++q2DX70RU6CxoXglx1+r5l48DwlFD05G11c=" 668 687 [mod."google.golang.org/protobuf"] 688 + version = "v1.36.8" 689 + hash = "sha256-yZN8ZON0b5HjUNUSubHst7zbvnMsOzd81tDPYQRtPgM=" 669 - version = "v1.36.6" 670 - hash = "sha256-lT5qnefI5FDJnowz9PEkAGylH3+fE+A3DJDkAyy9RMc=" 671 690 [mod."gopkg.in/fsnotify.v1"] 672 691 version = "v1.4.7" 673 692 hash = "sha256-j/Ts92oXa3k1MFU7Yd8/AqafRTsFn7V2pDKCyDJLah8="
+9
nix/modules/knot.nix
··· 115 115 ''; 116 116 }; 117 117 118 + knotmirrors = mkOption { 119 + type = types.listOf types.str; 120 + default = [ 121 + "https://mirror.tangled.network" 122 + ]; 123 + description = "List of knotmirror hosts to request crawl"; 124 + }; 125 + 118 126 server = { 119 127 listenAddr = mkOption { 120 128 type = types.str; ··· 263 271 "KNOT_SERVER_PLC_URL=${cfg.server.plcUrl}" 264 272 "KNOT_SERVER_JETSTREAM_ENDPOINT=${cfg.server.jetstreamEndpoint}" 265 273 "KNOT_SERVER_OWNER=${cfg.server.owner}" 274 + "KNOT_MIRRORS=${concatStringsSep "," cfg.knotmirrors}" 266 275 "KNOT_SERVER_LOG_DIDS=${ 267 276 if cfg.server.logDids 268 277 then "true"
+155
nix/modules/knotmirror.nix
··· 1 + { 2 + config, 3 + pkgs, 4 + lib, 5 + ... 6 + }: let 7 + cfg = config.services.tangled.knotmirror; 8 + in 9 + with lib; { 10 + options.services.tangled.knotmirror = { 11 + enable = mkOption { 12 + type = types.bool; 13 + default = false; 14 + description = "Enable a tangled knot"; 15 + }; 16 + 17 + package = mkOption { 18 + type = types.package; 19 + description = "Package to use for the knotmirror"; 20 + }; 21 + 22 + tap-package = mkOption { 23 + type = types.package; 24 + description = "tap package to use for the knotmirror"; 25 + }; 26 + 27 + listenAddr = mkOption { 28 + type = types.str; 29 + default = "0.0.0.0:7000"; 30 + description = "Address to listen on"; 31 + }; 32 + 33 + adminListenAddr = mkOption { 34 + type = types.str; 35 + default = "127.0.0.1:7200"; 36 + description = "Address to listen on"; 37 + }; 38 + 39 + hostname = mkOption { 40 + type = types.str; 41 + example = "my.knotmirror.com"; 42 + description = "Hostname for the server (required)"; 43 + }; 44 + 45 + dbUrl = mkOption { 46 + type = types.str; 47 + example = "postgresql://..."; 48 + description = "Database URL. postgresql expected (required)"; 49 + }; 50 + 51 + atpPlcUrl = mkOption { 52 + type = types.str; 53 + default = "https://plc.directory"; 54 + description = "atproto PLC directory"; 55 + }; 56 + 57 + atpRelayUrl = mkOption { 58 + type = types.str; 59 + default = "https://relay1.us-east.bsky.network"; 60 + description = "atproto relay"; 61 + }; 62 + 63 + fullNetwork = mkOption { 64 + type = types.bool; 65 + default = false; 66 + description = "Whether to automatically mirror from entire network"; 67 + }; 68 + 69 + knotUseSSL = mkOption { 70 + type = types.bool; 71 + default = true; 72 + description = "Use SSL for knot connection"; 73 + }; 74 + 75 + knotSSRF = mkOption { 76 + type = types.bool; 77 + default = true; 78 + description = "enable SSRF protection for knots"; 79 + }; 80 + 81 + tap = { 82 + port = mkOption { 83 + type = types.port; 84 + default = 7480; 85 + description = "Internal port to run the knotmirror tap"; 86 + }; 87 + 88 + dbUrl = mkOption { 89 + type = types.str; 90 + default = "sqlite:///var/lib/knotmirror-tap/tap.db"; 91 + description = "database connection string (sqlite://path or postgres://...)"; 92 + }; 93 + }; 94 + }; 95 + config = mkIf cfg.enable { 96 + environment.systemPackages = [ 97 + pkgs.git 98 + cfg.package 99 + ]; 100 + 101 + systemd.services.tap-knotmirror = { 102 + description = "knotmirror tap service"; 103 + after = ["network.target"]; 104 + wantedBy = ["multi-user.target"]; 105 + serviceConfig = { 106 + LogsDirectory = "knotmirror-tap"; 107 + StateDirectory = "knotmirror-tap"; 108 + Environment = [ 109 + "TAP_BIND=:${toString cfg.tap.port}" 110 + "TAP_PLC_URL=${cfg.atpPlcUrl}" 111 + "TAP_RELAY_URL=${cfg.atpRelayUrl}" 112 + "TAP_RESYNC_PARALLELISM=10" 113 + "TAP_DATABASE_URL=${cfg.tap.dbUrl}" 114 + "TAP_RETRY_TIMEOUT=60s" 115 + "TAP_COLLECTION_FILTERS=sh.tangled.repo" 116 + ( 117 + if cfg.fullNetwork 118 + then "TAP_SIGNAL_COLLECTION=sh.tangled.repo" 119 + else "TAP_FULL_NETWORK=false" 120 + ) 121 + ]; 122 + ExecStart = "${getExe cfg.tap-package} run"; 123 + }; 124 + }; 125 + 126 + systemd.services.knotmirror = { 127 + description = "knotmirror service"; 128 + after = ["network.target" "tap-knotmirror.service"]; 129 + wantedBy = ["multi-user.target"]; 130 + path = [ 131 + pkgs.git 132 + ]; 133 + serviceConfig = { 134 + LogsDirectory = "knotmirror"; 135 + StateDirectory = "knotmirror"; 136 + Environment = [ 137 + # TODO: add environment variables 138 + "MIRROR_LISTEN=${cfg.listenAddr}" 139 + "MIRROR_HOSTNAME=${cfg.hostname}" 140 + "MIRROR_TAP_URL=http://localhost:${toString cfg.tap.port}" 141 + "MIRROR_DB_URL=${cfg.dbUrl}" 142 + "MIRROR_GIT_BASEPATH=/var/lib/knotmirror/repos" 143 + "MIRROR_KNOT_USE_SSL=${boolToString cfg.knotUseSSL}" 144 + "MIRROR_KNOT_SSRF=${boolToString cfg.knotSSRF}" 145 + "MIRROR_RESYNC_PARALLELISM=12" 146 + "MIRROR_METRICS_LISTEN=127.0.0.1:7100" 147 + "MIRROR_ADMIN_LISTEN=${cfg.adminListenAddr}" 148 + "MIRROR_SLURPER_CONCURRENCY=4" 149 + ]; 150 + ExecStart = "${getExe cfg.package} serve"; 151 + Restart = "always"; 152 + }; 153 + }; 154 + }; 155 + }
+18
nix/pkgs/knotmirror.nix
··· 1 + { 2 + buildGoApplication, 3 + modules, 4 + src, 5 + }: 6 + buildGoApplication { 7 + pname = "knotmirror"; 8 + version = "0.1.0"; 9 + inherit src modules; 10 + 11 + doCheck = false; 12 + 13 + subPackages = ["cmd/knotmirror"]; 14 + 15 + meta = { 16 + mainProgram = "knotmirror"; 17 + }; 18 + }
+20
nix/pkgs/tap.nix
··· 1 + { 2 + buildGoModule, 3 + fetchFromGitHub, 4 + }: 5 + buildGoModule { 6 + pname = "tap"; 7 + version = "0.1.0"; 8 + src = fetchFromGitHub { 9 + owner = "bluesky-social"; 10 + repo = "indigo"; 11 + rev = "498ecb9693e8ae050f73234c86f340f51ad896a9"; 12 + sha256 = "sha256-KASCdwkg/hlKBt7RTW3e3R5J3hqJkphoarFbaMgtN1k="; 13 + }; 14 + subPackages = ["cmd/tap"]; 15 + vendorHash = "sha256-UOedwNYnM8Jx6B7Y9tFcZX8IeUBESAFAPTRYk7n0yo8="; 16 + doCheck = false; 17 + meta = { 18 + mainProgram = "tap"; 19 + }; 20 + }
+47 -1
nix/vm.nix
··· 25 25 modules = [ 26 26 self.nixosModules.knot 27 27 self.nixosModules.spindle 28 + self.nixosModules.knotmirror 28 29 ({ 29 30 lib, 30 31 config, ··· 57 58 host.port = 6555; 58 59 guest.port = 6555; 59 60 } 61 + # knotmirror 62 + { 63 + from = "host"; 64 + host.port = 7007; # 7000 is deserved in macos for Airplay 65 + guest.port = 7000; 66 + } 67 + # knotmirror-tap 68 + { 69 + from = "host"; 70 + host.port = 7480; 71 + guest.port = 7480; 72 + } 73 + # knotmirror-admin 74 + { 75 + from = "host"; 76 + host.port = 7200; 77 + guest.port = 7200; 78 + } 60 79 ]; 61 80 sharedDirectories = { 62 81 # We can't use the 9p mounts directly for most of these ··· 81 100 networking.firewall.enable = false; 82 101 time.timeZone = "Europe/London"; 83 102 services.getty.autologinUser = "root"; 103 + environment.systemPackages = with pkgs; [curl vim git sqlite litecli postgresql_14]; 84 - environment.systemPackages = with pkgs; [curl vim git sqlite litecli]; 85 104 services.tangled.knot = { 86 105 enable = true; 87 106 motd = "Welcome to the development knot!\n"; ··· 91 110 plcUrl = plcUrl; 92 111 jetstreamEndpoint = jetstream; 93 112 listenAddr = "0.0.0.0:6444"; 113 + dev = true; 94 114 }; 115 + knotmirrors = [ 116 + "http://localhost:7000" 117 + ]; 95 118 }; 96 119 services.tangled.spindle = { 97 120 enable = true; ··· 109 132 }; 110 133 }; 111 134 }; 135 + services.postgresql = { 136 + enable = true; 137 + package = pkgs.postgresql_14; 138 + ensureDatabases = ["mirror" "tap"]; 139 + ensureUsers = [ 140 + {name = "tnglr";} 141 + ]; 142 + authentication = '' 143 + local all tnglr trust 144 + host all tnglr 127.0.0.1/32 trust 145 + ''; 146 + }; 147 + services.tangled.knotmirror = { 148 + enable = true; 149 + listenAddr = "0.0.0.0:7000"; 150 + adminListenAddr = "0.0.0.0:7200"; 151 + hostname = "localhost:7000"; 152 + dbUrl = "postgresql://tnglr@127.0.0.1:5432/mirror"; 153 + fullNetwork = false; 154 + tap.dbUrl = "postgresql://tnglr@127.0.0.1:5432/tap"; 155 + }; 112 156 users = { 113 157 # So we don't have to deal with permission clashing between 114 158 # blank disk VMs and existing state ··· 135 179 in { 136 180 knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled.knot.stateDir; 137 181 spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled.spindle.server.dbPath); 182 + knotmirror.after = ["postgresql.target"]; 183 + tap-knotmirror.after = ["postgresql.target"]; 138 184 }; 139 185 }) 140 186 ];
+4
ogre/.gitignore
··· 1 + node_modules/ 2 + output/ 3 + .wrangler/ 4 + .DS_Store
+492
ogre/bun.lock
··· 1 + { 2 + "lockfileVersion": 1, 3 + "configVersion": 1, 4 + "workspaces": { 5 + "": { 6 + "name": "@tangled/ogre-worker", 7 + "dependencies": { 8 + "@fontsource/inter": "^5.2.8", 9 + "@resvg/resvg-wasm": "^2.6.2", 10 + "@tangled/ogre-runtime": "*", 11 + "lucide-static": "^0.577.0", 12 + "preact": "^10.29.0", 13 + "satori": "0.25.0", 14 + "zod": "^4.3.6", 15 + }, 16 + "devDependencies": { 17 + "@cloudflare/workers-types": "^4.20260317.1", 18 + "@types/bun": "^1.3.11", 19 + "@types/node": "^25.5.0", 20 + "knip": "^6.0.1", 21 + "tsx": "^4.21.0", 22 + "typescript": "^5.9.3", 23 + "wrangler": "^4.75.0", 24 + }, 25 + }, 26 + "packages/runtime": { 27 + "name": "@tangled/ogre-runtime", 28 + "version": "1.0.0", 29 + }, 30 + }, 31 + "packages": { 32 + "@cloudflare/kv-asset-handler": ["@cloudflare/kv-asset-handler@0.4.2", "", {}, "sha512-SIOD2DxrRRwQ+jgzlXCqoEFiKOFqaPjhnNTGKXSRLvp1HiOvapLaFG2kEr9dYQTYe8rKrd9uvDUzmAITeNyaHQ=="], 33 + 34 + "@cloudflare/unenv-preset": ["@cloudflare/unenv-preset@2.15.0", "", { "peerDependencies": { "unenv": "2.0.0-rc.24", "workerd": "1.20260301.1 || ~1.20260302.1 || ~1.20260303.1 || ~1.20260304.1 || >1.20260305.0 <2.0.0-0" }, "optionalPeers": ["workerd"] }, "sha512-EGYmJaGZKWl+X8tXxcnx4v2bOZSjQeNI5dWFeXivgX9+YCT69AkzHHwlNbVpqtEUTbew8eQurpyOpeN8fg00nw=="], 35 + 36 + "@cloudflare/workerd-darwin-64": ["@cloudflare/workerd-darwin-64@1.20260317.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-8hjh3sPMwY8M/zedq3/sXoA2Q4BedlGufn3KOOleIG+5a4ReQKLlUah140D7J6zlKmYZAFMJ4tWC7hCuI/s79g=="], 37 + 38 + "@cloudflare/workerd-darwin-arm64": ["@cloudflare/workerd-darwin-arm64@1.20260317.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-M/MnNyvO5HMgoIdr3QHjdCj2T1ki9gt0vIUnxYxBu9ISXS/jgtMl6chUVPJ7zHYBn9MyYr8ByeN6frjYxj0MGg=="], 39 + 40 + "@cloudflare/workerd-linux-64": ["@cloudflare/workerd-linux-64@1.20260317.1", "", { "os": "linux", "cpu": "x64" }, "sha512-1ltuEjkRcS3fsVF7CxsKlWiRmzq2ZqMfqDN0qUOgbUwkpXsLVJsXmoblaLf5OP00ELlcgF0QsN0p2xPEua4Uug=="], 41 + 42 + "@cloudflare/workerd-linux-arm64": ["@cloudflare/workerd-linux-arm64@1.20260317.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-3QrNnPF1xlaNwkHpasvRvAMidOvQs2NhXQmALJrEfpIJ/IDL2la8g499yXp3eqhG3hVMCB07XVY149GTs42Xtw=="], 43 + 44 + "@cloudflare/workerd-windows-64": ["@cloudflare/workerd-windows-64@1.20260317.1", "", { "os": "win32", "cpu": "x64" }, "sha512-MfZTz+7LfuIpMGTa3RLXHX8Z/pnycZLItn94WRdHr8LPVet+C5/1Nzei399w/jr3+kzT4pDKk26JF/tlI5elpQ=="], 45 + 46 + "@cloudflare/workers-types": ["@cloudflare/workers-types@4.20260317.1", "", {}, "sha512-+G4eVwyCpm8Au1ex8vQBCuA9wnwqetz4tPNRoB/53qvktERWBRMQnrtvC1k584yRE3emMThtuY0gWshvSJ++PQ=="], 47 + 48 + "@cspotcode/source-map-support": ["@cspotcode/source-map-support@0.8.1", "", { "dependencies": { "@jridgewell/trace-mapping": "0.3.9" } }, "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw=="], 49 + 50 + "@emnapi/core": ["@emnapi/core@1.9.1", "", { "dependencies": { "@emnapi/wasi-threads": "1.2.0", "tslib": "^2.4.0" } }, "sha512-mukuNALVsoix/w1BJwFzwXBN/dHeejQtuVzcDsfOEsdpCumXb/E9j8w11h5S54tT1xhifGfbbSm/ICrObRb3KA=="], 51 + 52 + "@emnapi/runtime": ["@emnapi/runtime@1.9.0", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-QN75eB0IH2ywSpRpNddCRfQIhmJYBCJ1x5Lb3IscKAL8bMnVAKnRg8dCoXbHzVLLH7P38N2Z3mtulB7W0J0FKw=="], 53 + 54 + "@emnapi/wasi-threads": ["@emnapi/wasi-threads@1.2.0", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-N10dEJNSsUx41Z6pZsXU8FjPjpBEplgH24sfkmITrBED1/U2Esum9F3lfLrMjKHHjmi557zQn7kR9R+XWXu5Rg=="], 55 + 56 + "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.4", "", { "os": "aix", "cpu": "ppc64" }, "sha512-cQPwL2mp2nSmHHJlCyoXgHGhbEPMrEEU5xhkcy3Hs/O7nGZqEpZ2sUtLaL9MORLtDfRvVl2/3PAuEkYZH0Ty8Q=="], 57 + 58 + "@esbuild/android-arm": ["@esbuild/android-arm@0.27.4", "", { "os": "android", "cpu": "arm" }, "sha512-X9bUgvxiC8CHAGKYufLIHGXPJWnr0OCdR0anD2e21vdvgCI8lIfqFbnoeOz7lBjdrAGUhqLZLcQo6MLhTO2DKQ=="], 59 + 60 + "@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.4", "", { "os": "android", "cpu": "arm64" }, "sha512-gdLscB7v75wRfu7QSm/zg6Rx29VLdy9eTr2t44sfTW7CxwAtQghZ4ZnqHk3/ogz7xao0QAgrkradbBzcqFPasw=="], 61 + 62 + "@esbuild/android-x64": ["@esbuild/android-x64@0.27.4", "", { "os": "android", "cpu": "x64" }, "sha512-PzPFnBNVF292sfpfhiyiXCGSn9HZg5BcAz+ivBuSsl6Rk4ga1oEXAamhOXRFyMcjwr2DVtm40G65N3GLeH1Lvw=="], 63 + 64 + "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-b7xaGIwdJlht8ZFCvMkpDN6uiSmnxxK56N2GDTMYPr2/gzvfdQN8rTfBsvVKmIVY/X7EM+/hJKEIbbHs9oA4tQ=="], 65 + 66 + "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-sR+OiKLwd15nmCdqpXMnuJ9W2kpy0KigzqScqHI3Hqwr7IXxBp3Yva+yJwoqh7rE8V77tdoheRYataNKL4QrPw=="], 67 + 68 + "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.4", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-jnfpKe+p79tCnm4GVav68A7tUFeKQwQyLgESwEAUzyxk/TJr4QdGog9sqWNcUbr/bZt/O/HXouspuQDd9JxFSw=="], 69 + 70 + "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.4", "", { "os": "freebsd", "cpu": "x64" }, "sha512-2kb4ceA/CpfUrIcTUl1wrP/9ad9Atrp5J94Lq69w7UwOMolPIGrfLSvAKJp0RTvkPPyn6CIWrNy13kyLikZRZQ=="], 71 + 72 + "@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.4", "", { "os": "linux", "cpu": "arm" }, "sha512-aBYgcIxX/wd5n2ys0yESGeYMGF+pv6g0DhZr3G1ZG4jMfruU9Tl1i2Z+Wnj9/KjGz1lTLCcorqE2viePZqj4Eg=="], 73 + 74 + "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-7nQOttdzVGth1iz57kxg9uCz57dxQLHWxopL6mYuYthohPKEK0vU0C3O21CcBK6KDlkYVcnDXY099HcCDXd9dA=="], 75 + 76 + "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.4", "", { "os": "linux", "cpu": "ia32" }, "sha512-oPtixtAIzgvzYcKBQM/qZ3R+9TEUd1aNJQu0HhGyqtx6oS7qTpvjheIWBbes4+qu1bNlo2V4cbkISr8q6gRBFA=="], 77 + 78 + "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.4", "", { "os": "linux", "cpu": "none" }, "sha512-8mL/vh8qeCoRcFH2nM8wm5uJP+ZcVYGGayMavi8GmRJjuI3g1v6Z7Ni0JJKAJW+m0EtUuARb6Lmp4hMjzCBWzA=="], 79 + 80 + "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.4", "", { "os": "linux", "cpu": "none" }, "sha512-1RdrWFFiiLIW7LQq9Q2NES+HiD4NyT8Itj9AUeCl0IVCA459WnPhREKgwrpaIfTOe+/2rdntisegiPWn/r/aAw=="], 81 + 82 + "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.4", "", { "os": "linux", "cpu": "ppc64" }, "sha512-tLCwNG47l3sd9lpfyx9LAGEGItCUeRCWeAx6x2Jmbav65nAwoPXfewtAdtbtit/pJFLUWOhpv0FpS6GQAmPrHA=="], 83 + 84 + "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.4", "", { "os": "linux", "cpu": "none" }, "sha512-BnASypppbUWyqjd1KIpU4AUBiIhVr6YlHx/cnPgqEkNoVOhHg+YiSVxM1RLfiy4t9cAulbRGTNCKOcqHrEQLIw=="], 85 + 86 + "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.4", "", { "os": "linux", "cpu": "s390x" }, "sha512-+eUqgb/Z7vxVLezG8bVB9SfBie89gMueS+I0xYh2tJdw3vqA/0ImZJ2ROeWwVJN59ihBeZ7Tu92dF/5dy5FttA=="], 87 + 88 + "@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.4", "", { "os": "linux", "cpu": "x64" }, "sha512-S5qOXrKV8BQEzJPVxAwnryi2+Iq5pB40gTEIT69BQONqR7JH1EPIcQ/Uiv9mCnn05jff9umq/5nqzxlqTOg9NA=="], 89 + 90 + "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.4", "", { "os": "none", "cpu": "arm64" }, "sha512-xHT8X4sb0GS8qTqiwzHqpY00C95DPAq7nAwX35Ie/s+LO9830hrMd3oX0ZMKLvy7vsonee73x0lmcdOVXFzd6Q=="], 91 + 92 + "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.4", "", { "os": "none", "cpu": "x64" }, "sha512-RugOvOdXfdyi5Tyv40kgQnI0byv66BFgAqjdgtAKqHoZTbTF2QqfQrFwa7cHEORJf6X2ht+l9ABLMP0dnKYsgg=="], 93 + 94 + "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.4", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-2MyL3IAaTX+1/qP0O1SwskwcwCoOI4kV2IBX1xYnDDqthmq5ArrW94qSIKCAuRraMgPOmG0RDTA74mzYNQA9ow=="], 95 + 96 + "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.4", "", { "os": "openbsd", "cpu": "x64" }, "sha512-u8fg/jQ5aQDfsnIV6+KwLOf1CmJnfu1ShpwqdwC0uA7ZPwFws55Ngc12vBdeUdnuWoQYx/SOQLGDcdlfXhYmXQ=="], 97 + 98 + "@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.4", "", { "os": "none", "cpu": "arm64" }, "sha512-JkTZrl6VbyO8lDQO3yv26nNr2RM2yZzNrNHEsj9bm6dOwwu9OYN28CjzZkH57bh4w0I2F7IodpQvUAEd1mbWXg=="], 99 + 100 + "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.4", "", { "os": "sunos", "cpu": "x64" }, "sha512-/gOzgaewZJfeJTlsWhvUEmUG4tWEY2Spp5M20INYRg2ZKl9QPO3QEEgPeRtLjEWSW8FilRNacPOg8R1uaYkA6g=="], 101 + 102 + "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.4", "", { "os": "win32", "cpu": "arm64" }, "sha512-Z9SExBg2y32smoDQdf1HRwHRt6vAHLXcxD2uGgO/v2jK7Y718Ix4ndsbNMU/+1Qiem9OiOdaqitioZwxivhXYg=="], 103 + 104 + "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.4", "", { "os": "win32", "cpu": "ia32" }, "sha512-DAyGLS0Jz5G5iixEbMHi5KdiApqHBWMGzTtMiJ72ZOLhbu/bzxgAe8Ue8CTS3n3HbIUHQz/L51yMdGMeoxXNJw=="], 105 + 106 + "@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.4", "", { "os": "win32", "cpu": "x64" }, "sha512-+knoa0BDoeXgkNvvV1vvbZX4+hizelrkwmGJBdT17t8FNPwG2lKemmuMZlmaNQ3ws3DKKCxpb4zRZEIp3UxFCg=="], 107 + 108 + "@fontsource/inter": ["@fontsource/inter@5.2.8", "", {}, "sha512-P6r5WnJoKiNVV+zvW2xM13gNdFhAEpQ9dQJHt3naLvfg+LkF2ldgSLiF4T41lf1SQCM9QmkqPTn4TH568IRagg=="], 109 + 110 + "@img/colour": ["@img/colour@1.1.0", "", {}, "sha512-Td76q7j57o/tLVdgS746cYARfSyxk8iEfRxewL9h4OMzYhbW4TAcppl0mT4eyqXddh6L/jwoM75mo7ixa/pCeQ=="], 111 + 112 + "@img/sharp-darwin-arm64": ["@img/sharp-darwin-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-arm64": "1.2.4" }, "os": "darwin", "cpu": "arm64" }, "sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w=="], 113 + 114 + "@img/sharp-darwin-x64": ["@img/sharp-darwin-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-x64": "1.2.4" }, "os": "darwin", "cpu": "x64" }, "sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw=="], 115 + 116 + "@img/sharp-libvips-darwin-arm64": ["@img/sharp-libvips-darwin-arm64@1.2.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g=="], 117 + 118 + "@img/sharp-libvips-darwin-x64": ["@img/sharp-libvips-darwin-x64@1.2.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg=="], 119 + 120 + "@img/sharp-libvips-linux-arm": ["@img/sharp-libvips-linux-arm@1.2.4", "", { "os": "linux", "cpu": "arm" }, "sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A=="], 121 + 122 + "@img/sharp-libvips-linux-arm64": ["@img/sharp-libvips-linux-arm64@1.2.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw=="], 123 + 124 + "@img/sharp-libvips-linux-ppc64": ["@img/sharp-libvips-linux-ppc64@1.2.4", "", { "os": "linux", "cpu": "ppc64" }, "sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA=="], 125 + 126 + "@img/sharp-libvips-linux-riscv64": ["@img/sharp-libvips-linux-riscv64@1.2.4", "", { "os": "linux", "cpu": "none" }, "sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA=="], 127 + 128 + "@img/sharp-libvips-linux-s390x": ["@img/sharp-libvips-linux-s390x@1.2.4", "", { "os": "linux", "cpu": "s390x" }, "sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ=="], 129 + 130 + "@img/sharp-libvips-linux-x64": ["@img/sharp-libvips-linux-x64@1.2.4", "", { "os": "linux", "cpu": "x64" }, "sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw=="], 131 + 132 + "@img/sharp-libvips-linuxmusl-arm64": ["@img/sharp-libvips-linuxmusl-arm64@1.2.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw=="], 133 + 134 + "@img/sharp-libvips-linuxmusl-x64": ["@img/sharp-libvips-linuxmusl-x64@1.2.4", "", { "os": "linux", "cpu": "x64" }, "sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg=="], 135 + 136 + "@img/sharp-linux-arm": ["@img/sharp-linux-arm@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm": "1.2.4" }, "os": "linux", "cpu": "arm" }, "sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw=="], 137 + 138 + "@img/sharp-linux-arm64": ["@img/sharp-linux-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm64": "1.2.4" }, "os": "linux", "cpu": "arm64" }, "sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg=="], 139 + 140 + "@img/sharp-linux-ppc64": ["@img/sharp-linux-ppc64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-ppc64": "1.2.4" }, "os": "linux", "cpu": "ppc64" }, "sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA=="], 141 + 142 + "@img/sharp-linux-riscv64": ["@img/sharp-linux-riscv64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-riscv64": "1.2.4" }, "os": "linux", "cpu": "none" }, "sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw=="], 143 + 144 + "@img/sharp-linux-s390x": ["@img/sharp-linux-s390x@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-s390x": "1.2.4" }, "os": "linux", "cpu": "s390x" }, "sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg=="], 145 + 146 + "@img/sharp-linux-x64": ["@img/sharp-linux-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-x64": "1.2.4" }, "os": "linux", "cpu": "x64" }, "sha512-MEzd8HPKxVxVenwAa+JRPwEC7QFjoPWuS5NZnBt6B3pu7EG2Ge0id1oLHZpPJdn3OQK+BQDiw9zStiHBTJQQQQ=="], 147 + 148 + "@img/sharp-linuxmusl-arm64": ["@img/sharp-linuxmusl-arm64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-arm64": "1.2.4" }, "os": "linux", "cpu": "arm64" }, "sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg=="], 149 + 150 + "@img/sharp-linuxmusl-x64": ["@img/sharp-linuxmusl-x64@0.34.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-x64": "1.2.4" }, "os": "linux", "cpu": "x64" }, "sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q=="], 151 + 152 + "@img/sharp-wasm32": ["@img/sharp-wasm32@0.34.5", "", { "dependencies": { "@emnapi/runtime": "^1.7.0" }, "cpu": "none" }, "sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw=="], 153 + 154 + "@img/sharp-win32-arm64": ["@img/sharp-win32-arm64@0.34.5", "", { "os": "win32", "cpu": "arm64" }, "sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g=="], 155 + 156 + "@img/sharp-win32-ia32": ["@img/sharp-win32-ia32@0.34.5", "", { "os": "win32", "cpu": "ia32" }, "sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg=="], 157 + 158 + "@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.34.5", "", { "os": "win32", "cpu": "x64" }, "sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw=="], 159 + 160 + "@jridgewell/resolve-uri": ["@jridgewell/resolve-uri@3.1.2", "", {}, "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw=="], 161 + 162 + "@jridgewell/sourcemap-codec": ["@jridgewell/sourcemap-codec@1.5.5", "", {}, "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og=="], 163 + 164 + "@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.9", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.0.3", "@jridgewell/sourcemap-codec": "^1.4.10" } }, "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ=="], 165 + 166 + "@napi-rs/wasm-runtime": ["@napi-rs/wasm-runtime@1.1.1", "", { "dependencies": { "@emnapi/core": "^1.7.1", "@emnapi/runtime": "^1.7.1", "@tybys/wasm-util": "^0.10.1" } }, "sha512-p64ah1M1ld8xjWv3qbvFwHiFVWrq1yFvV4f7w+mzaqiR4IlSgkqhcRdHwsGgomwzBH51sRY4NEowLxnaBjcW/A=="], 167 + 168 + "@nodelib/fs.scandir": ["@nodelib/fs.scandir@2.1.5", "", { "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g=="], 169 + 170 + "@nodelib/fs.stat": ["@nodelib/fs.stat@2.0.5", "", {}, "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="], 171 + 172 + "@nodelib/fs.walk": ["@nodelib/fs.walk@1.2.8", "", { "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg=="], 173 + 174 + "@oxc-parser/binding-android-arm-eabi": ["@oxc-parser/binding-android-arm-eabi@0.120.0", "", { "os": "android", "cpu": "arm" }, "sha512-WU3qtINx802wOl8RxAF1v0VvmC2O4D9M8Sv486nLeQ7iPHVmncYZrtBhB4SYyX+XZxj2PNnCcN+PW21jHgiOxg=="], 175 + 176 + "@oxc-parser/binding-android-arm64": ["@oxc-parser/binding-android-arm64@0.120.0", "", { "os": "android", "cpu": "arm64" }, "sha512-SEf80EHdhlbjZEgzeWm0ZA/br4GKMenDW3QB/gtyeTV1gStvvZeFi40ioHDZvds2m4Z9J1bUAUL8yn1/+A6iGg=="], 177 + 178 + "@oxc-parser/binding-darwin-arm64": ["@oxc-parser/binding-darwin-arm64@0.120.0", "", { "os": "darwin", "cpu": "arm64" }, "sha512-xVrrbCai8R8CUIBu3CjryutQnEYhZqs1maIqDvtUCFZb8vY33H7uh9mHpL3a0JBIKoBUKjPH8+rzyAeXnS2d6A=="], 179 + 180 + "@oxc-parser/binding-darwin-x64": ["@oxc-parser/binding-darwin-x64@0.120.0", "", { "os": "darwin", "cpu": "x64" }, "sha512-xyHBbnJ6mydnQUH7MAcafOkkrNzQC6T+LXgDH/3InEq2BWl/g424IMRiJVSpVqGjB+p2bd0h0WRR8iIwzjU7rw=="], 181 + 182 + "@oxc-parser/binding-freebsd-x64": ["@oxc-parser/binding-freebsd-x64@0.120.0", "", { "os": "freebsd", "cpu": "x64" }, "sha512-UMnVRllquXUYTeNfFKmxTTEdZ/ix1nLl0ducDzMSREoWYGVIHnOOxoKMWlCOvRr9Wk/HZqo2rh1jeumbPGPV9A=="], 183 + 184 + "@oxc-parser/binding-linux-arm-gnueabihf": ["@oxc-parser/binding-linux-arm-gnueabihf@0.120.0", "", { "os": "linux", "cpu": "arm" }, "sha512-tkvn2CQ7QdcsMnpfiX3fd3wA3EFsWKYlcQzq9cFw/xc89Al7W6Y4O0FgLVkVQpo0Tnq/qtE1XfkJOnRRA9S/NA=="], 185 + 186 + "@oxc-parser/binding-linux-arm-musleabihf": ["@oxc-parser/binding-linux-arm-musleabihf@0.120.0", "", { "os": "linux", "cpu": "arm" }, "sha512-WN5y135Ic42gQDk9grbwY9++fDhqf8knN6fnP+0WALlAUh4odY/BDK1nfTJRSfpJD9P3r1BwU0m3pW2DU89whQ=="], 187 + 188 + "@oxc-parser/binding-linux-arm64-gnu": ["@oxc-parser/binding-linux-arm64-gnu@0.120.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-1GgQBCcXvFMw99EPdMy+4NZ3aYyXsxjf9kbUUg8HuAy3ZBXzOry5KfFEzT9nqmgZI1cuetvApkiJBZLAPo8uaw=="], 189 + 190 + "@oxc-parser/binding-linux-arm64-musl": ["@oxc-parser/binding-linux-arm64-musl@0.120.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-gmMQ70gsPdDBgpcErvJEoWNBr7bJooSLlvOBVBSGfOzlP5NvJ3bFvnUeZZ9d+dPrqSngtonf7nyzWUTUj/U+lw=="], 191 + 192 + "@oxc-parser/binding-linux-ppc64-gnu": ["@oxc-parser/binding-linux-ppc64-gnu@0.120.0", "", { "os": "linux", "cpu": "ppc64" }, "sha512-T/kZuU0ajop0xhzVMwH5r3srC9Nqup5HaIo+3uFjIN5uPxa0LvSxC1ZqP4aQGJVW5G0z8/nCkjIfSMS91P/wzw=="], 193 + 194 + "@oxc-parser/binding-linux-riscv64-gnu": ["@oxc-parser/binding-linux-riscv64-gnu@0.120.0", "", { "os": "linux", "cpu": "none" }, "sha512-vn21KXLAXzaI3N5CZWlBr1iWeXLl9QFIMor7S1hUjUGTeUuWCoE6JZB040/ZNDwf+JXPX8Ao9KbmJq9FMC2iGw=="], 195 + 196 + "@oxc-parser/binding-linux-riscv64-musl": ["@oxc-parser/binding-linux-riscv64-musl@0.120.0", "", { "os": "linux", "cpu": "none" }, "sha512-SUbUxlar007LTGmSLGIC5x/WJvwhdX+PwNzFJ9f/nOzZOrCFbOT4ikt7pJIRg1tXVsEfzk5mWpGO1NFiSs4PIw=="], 197 + 198 + "@oxc-parser/binding-linux-s390x-gnu": ["@oxc-parser/binding-linux-s390x-gnu@0.120.0", "", { "os": "linux", "cpu": "s390x" }, "sha512-hYiPJTxyfJY2+lMBFk3p2bo0R9GN+TtpPFlRqVchL1qvLG+pznstramHNvJlw9AjaoRUHwp9IKR7UZQnRPGjgQ=="], 199 + 200 + "@oxc-parser/binding-linux-x64-gnu": ["@oxc-parser/binding-linux-x64-gnu@0.120.0", "", { "os": "linux", "cpu": "x64" }, "sha512-q+5jSVZkprJCIy3dzJpApat0InJaoxQLsJuD6DkX8hrUS61z2lHQ1Fe9L2+TYbKHXCLWbL0zXe7ovkIdopBGMQ=="], 201 + 202 + "@oxc-parser/binding-linux-x64-musl": ["@oxc-parser/binding-linux-x64-musl@0.120.0", "", { "os": "linux", "cpu": "x64" }, "sha512-D9QDDZNnH24e7X4ftSa6ar/2hCavETfW3uk0zgcMIrZNy459O5deTbWrjGzZiVrSWigGtlQwzs2McBP0QsfV1w=="], 203 + 204 + "@oxc-parser/binding-openharmony-arm64": ["@oxc-parser/binding-openharmony-arm64@0.120.0", "", { "os": "none", "cpu": "arm64" }, "sha512-TBU8ZwOUWAOUWVfmI16CYWbvh4uQb9zHnGBHsw5Cp2JUVG044OIY1CSHODLifqzQIMTXvDvLzcL89GGdUIqNrA=="], 205 + 206 + "@oxc-parser/binding-wasm32-wasi": ["@oxc-parser/binding-wasm32-wasi@0.120.0", "", { "dependencies": { "@napi-rs/wasm-runtime": "^1.1.1" }, "cpu": "none" }, "sha512-WG/FOZgDJCpJnuF3ToG/K28rcOmSY7FmFmfBKYb2fmLyhDzPpUldFGV7/Fz4ru0Iz/v4KPmf8xVgO8N3lO4KHA=="], 207 + 208 + "@oxc-parser/binding-win32-arm64-msvc": ["@oxc-parser/binding-win32-arm64-msvc@0.120.0", "", { "os": "win32", "cpu": "arm64" }, "sha512-1T0HKGcsz/BKo77t7+89L8Qvu4f9DoleKWHp3C5sJEcbCjDOLx3m9m722bWZTY+hANlUEs+yjlK+lBFsA+vrVQ=="], 209 + 210 + "@oxc-parser/binding-win32-ia32-msvc": ["@oxc-parser/binding-win32-ia32-msvc@0.120.0", "", { "os": "win32", "cpu": "ia32" }, "sha512-L7vfLzbOXsjBXV0rv/6Y3Jd9BRjPeCivINZAqrSyAOZN3moCopDN+Psq9ZrGNZtJzP8946MtlRFZ0Als0wBCOw=="], 211 + 212 + "@oxc-parser/binding-win32-x64-msvc": ["@oxc-parser/binding-win32-x64-msvc@0.120.0", "", { "os": "win32", "cpu": "x64" }, "sha512-ys+upfqNtSu58huAhJMBKl3XCkGzyVFBlMlGPzHeFKgpFF/OdgNs1MMf8oaJIbgMH8ZxgGF7qfue39eJohmKIg=="], 213 + 214 + "@oxc-project/types": ["@oxc-project/types@0.120.0", "", {}, "sha512-k1YNu55DuvAip/MGE1FTsIuU3FUCn6v/ujG9V7Nq5Df/kX2CWb13hhwD0lmJGMGqE+bE1MXvv9SZVnMzEXlWcg=="], 215 + 216 + "@oxc-resolver/binding-android-arm-eabi": ["@oxc-resolver/binding-android-arm-eabi@11.19.1", "", { "os": "android", "cpu": "arm" }, "sha512-aUs47y+xyXHUKlbhqHUjBABjvycq6YSD7bpxSW7vplUmdzAlJ93yXY6ZR0c1o1x5A/QKbENCvs3+NlY8IpIVzg=="], 217 + 218 + "@oxc-resolver/binding-android-arm64": ["@oxc-resolver/binding-android-arm64@11.19.1", "", { "os": "android", "cpu": "arm64" }, "sha512-oolbkRX+m7Pq2LNjr/kKgYeC7bRDMVTWPgxBGMjSpZi/+UskVo4jsMU3MLheZV55jL6c3rNelPl4oD60ggYmqA=="], 219 + 220 + "@oxc-resolver/binding-darwin-arm64": ["@oxc-resolver/binding-darwin-arm64@11.19.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-nUC6d2i3R5B12sUW4O646qD5cnMXf2oBGPLIIeaRfU9doJRORAbE2SGv4eW6rMqhD+G7nf2Y8TTJTLiiO3Q/dQ=="], 221 + 222 + "@oxc-resolver/binding-darwin-x64": ["@oxc-resolver/binding-darwin-x64@11.19.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-cV50vE5+uAgNcFa3QY1JOeKDSkM/9ReIcc/9wn4TavhW/itkDGrXhw9jaKnkQnGbjJ198Yh5nbX/Gr2mr4Z5jQ=="], 223 + 224 + "@oxc-resolver/binding-freebsd-x64": ["@oxc-resolver/binding-freebsd-x64@11.19.1", "", { "os": "freebsd", "cpu": "x64" }, "sha512-xZOQiYGFxtk48PBKff+Zwoym7ScPAIVp4c14lfLxizO2LTTTJe5sx9vQNGrBymrf/vatSPNMD4FgsaaRigPkqw=="], 225 + 226 + "@oxc-resolver/binding-linux-arm-gnueabihf": ["@oxc-resolver/binding-linux-arm-gnueabihf@11.19.1", "", { "os": "linux", "cpu": "arm" }, "sha512-lXZYWAC6kaGe/ky2su94e9jN9t6M0/6c+GrSlCqL//XO1cxi5lpAhnJYdyrKfm0ZEr/c7RNyAx3P7FSBcBd5+A=="], 227 + 228 + "@oxc-resolver/binding-linux-arm-musleabihf": ["@oxc-resolver/binding-linux-arm-musleabihf@11.19.1", "", { "os": "linux", "cpu": "arm" }, "sha512-veG1kKsuK5+t2IsO9q0DErYVSw2azvCVvWHnfTOS73WE0STdLLB7Q1bB9WR+yHPQM76ASkFyRbogWo1GR1+WbQ=="], 229 + 230 + "@oxc-resolver/binding-linux-arm64-gnu": ["@oxc-resolver/binding-linux-arm64-gnu@11.19.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-heV2+jmXyYnUrpUXSPugqWDRpnsQcDm2AX4wzTuvgdlZfoNYO0O3W2AVpJYaDn9AG4JdM6Kxom8+foE7/BcSig=="], 231 + 232 + "@oxc-resolver/binding-linux-arm64-musl": ["@oxc-resolver/binding-linux-arm64-musl@11.19.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-jvo2Pjs1c9KPxMuMPIeQsgu0mOJF9rEb3y3TdpsrqwxRM+AN6/nDDwv45n5ZrUnQMsdBy5gIabioMKnQfWo9ew=="], 233 + 234 + "@oxc-resolver/binding-linux-ppc64-gnu": ["@oxc-resolver/binding-linux-ppc64-gnu@11.19.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-vLmdNxWCdN7Uo5suays6A/+ywBby2PWBBPXctWPg5V0+eVuzsJxgAn6MMB4mPlshskYbppjpN2Zg83ArHze9gQ=="], 235 + 236 + "@oxc-resolver/binding-linux-riscv64-gnu": ["@oxc-resolver/binding-linux-riscv64-gnu@11.19.1", "", { "os": "linux", "cpu": "none" }, "sha512-/b+WgR+VTSBxzgOhDO7TlMXC1ufPIMR6Vj1zN+/x+MnyXGW7prTLzU9eW85Aj7Th7CCEG9ArCbTeqxCzFWdg2w=="], 237 + 238 + "@oxc-resolver/binding-linux-riscv64-musl": ["@oxc-resolver/binding-linux-riscv64-musl@11.19.1", "", { "os": "linux", "cpu": "none" }, "sha512-YlRdeWb9j42p29ROh+h4eg/OQ3dTJlpHSa+84pUM9+p6i3djtPz1q55yLJhgW9XfDch7FN1pQ/Vd6YP+xfRIuw=="], 239 + 240 + "@oxc-resolver/binding-linux-s390x-gnu": ["@oxc-resolver/binding-linux-s390x-gnu@11.19.1", "", { "os": "linux", "cpu": "s390x" }, "sha512-EDpafVOQWF8/MJynsjOGFThcqhRHy417sRyLfQmeiamJ8qVhSKAn2Dn2VVKUGCjVB9C46VGjhNo7nOPUi1x6uA=="], 241 + 242 + "@oxc-resolver/binding-linux-x64-gnu": ["@oxc-resolver/binding-linux-x64-gnu@11.19.1", "", { "os": "linux", "cpu": "x64" }, "sha512-NxjZe+rqWhr+RT8/Ik+5ptA3oz7tUw361Wa5RWQXKnfqwSSHdHyrw6IdcTfYuml9dM856AlKWZIUXDmA9kkiBQ=="], 243 + 244 + "@oxc-resolver/binding-linux-x64-musl": ["@oxc-resolver/binding-linux-x64-musl@11.19.1", "", { "os": "linux", "cpu": "x64" }, "sha512-cM/hQwsO3ReJg5kR+SpI69DMfvNCp+A/eVR4b4YClE5bVZwz8rh2Nh05InhwI5HR/9cArbEkzMjcKgTHS6UaNw=="], 245 + 246 + "@oxc-resolver/binding-openharmony-arm64": ["@oxc-resolver/binding-openharmony-arm64@11.19.1", "", { "os": "none", "cpu": "arm64" }, "sha512-QF080IowFB0+9Rh6RcD19bdgh49BpQHUW5TajG1qvWHvmrQznTZZjYlgE2ltLXyKY+qs4F/v5xuX1XS7Is+3qA=="], 247 + 248 + "@oxc-resolver/binding-wasm32-wasi": ["@oxc-resolver/binding-wasm32-wasi@11.19.1", "", { "dependencies": { "@napi-rs/wasm-runtime": "^1.1.1" }, "cpu": "none" }, "sha512-w8UCKhX826cP/ZLokXDS6+milN8y4X7zidsAttEdWlVoamTNf6lhBJldaWr3ukTDiye7s4HRcuPEPOXNC432Vg=="], 249 + 250 + "@oxc-resolver/binding-win32-arm64-msvc": ["@oxc-resolver/binding-win32-arm64-msvc@11.19.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-nJ4AsUVZrVKwnU/QRdzPCCrO0TrabBqgJ8pJhXITdZGYOV28TIYystV1VFLbQ7DtAcaBHpocT5/ZJnF78YJPtQ=="], 251 + 252 + "@oxc-resolver/binding-win32-ia32-msvc": ["@oxc-resolver/binding-win32-ia32-msvc@11.19.1", "", { "os": "win32", "cpu": "ia32" }, "sha512-EW+ND5q2Tl+a3pH81l1QbfgbF3HmqgwLfDfVithRFheac8OTcnbXt/JxqD2GbDkb7xYEqy1zNaVFRr3oeG8npA=="], 253 + 254 + "@oxc-resolver/binding-win32-x64-msvc": ["@oxc-resolver/binding-win32-x64-msvc@11.19.1", "", { "os": "win32", "cpu": "x64" }, "sha512-6hIU3RQu45B+VNTY4Ru8ppFwjVS/S5qwYyGhBotmjxfEKk41I2DlGtRfGJndZ5+6lneE2pwloqunlOyZuX/XAw=="], 255 + 256 + "@poppinss/colors": ["@poppinss/colors@4.1.6", "", { "dependencies": { "kleur": "^4.1.5" } }, "sha512-H9xkIdFswbS8n1d6vmRd8+c10t2Qe+rZITbbDHHkQixH5+2x1FDGmi/0K+WgWiqQFKPSlIYB7jlH6Kpfn6Fleg=="], 257 + 258 + "@poppinss/dumper": ["@poppinss/dumper@0.6.5", "", { "dependencies": { "@poppinss/colors": "^4.1.5", "@sindresorhus/is": "^7.0.2", "supports-color": "^10.0.0" } }, "sha512-NBdYIb90J7LfOI32dOewKI1r7wnkiH6m920puQ3qHUeZkxNkQiFnXVWoE6YtFSv6QOiPPf7ys6i+HWWecDz7sw=="], 259 + 260 + "@poppinss/exception": ["@poppinss/exception@1.2.3", "", {}, "sha512-dCED+QRChTVatE9ibtoaxc+WkdzOSjYTKi/+uacHWIsfodVfpsueo3+DKpgU5Px8qXjgmXkSvhXvSCz3fnP9lw=="], 261 + 262 + "@resvg/resvg-wasm": ["@resvg/resvg-wasm@2.6.2", "", {}, "sha512-FqALmHI8D4o6lk/LRWDnhw95z5eO+eAa6ORjVg09YRR7BkcM6oPHU9uyC0gtQG5vpFLvgpeU4+zEAz2H8APHNw=="], 263 + 264 + "@shuding/opentype.js": ["@shuding/opentype.js@1.4.0-beta.0", "", { "dependencies": { "fflate": "^0.7.3", "string.prototype.codepointat": "^0.2.1" }, "bin": { "ot": "bin/ot" } }, "sha512-3NgmNyH3l/Hv6EvsWJbsvpcpUba6R8IREQ83nH83cyakCw7uM1arZKNfHwv1Wz6jgqrF/j4x5ELvR6PnK9nTcA=="], 265 + 266 + "@sindresorhus/is": ["@sindresorhus/is@7.2.0", "", {}, "sha512-P1Cz1dWaFfR4IR+U13mqqiGsLFf1KbayybWwdd2vfctdV6hDpUkgCY0nKOLLTMSoRd/jJNjtbqzf13K8DCCXQw=="], 267 + 268 + "@speed-highlight/core": ["@speed-highlight/core@1.2.15", "", {}, "sha512-BMq1K3DsElxDWawkX6eLg9+CKJrTVGCBAWVuHXVUV2u0s2711qiChLSId6ikYPfxhdYocLNt3wWwSvDiTvFabw=="], 269 + 270 + "@tangled/ogre-runtime": ["@tangled/ogre-runtime@workspace:packages/runtime"], 271 + 272 + "@tybys/wasm-util": ["@tybys/wasm-util@0.10.1", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg=="], 273 + 274 + "@types/bun": ["@types/bun@1.3.11", "", { "dependencies": { "bun-types": "1.3.11" } }, "sha512-5vPne5QvtpjGpsGYXiFyycfpDF2ECyPcTSsFBMa0fraoxiQyMJ3SmuQIGhzPg2WJuWxVBoxWJ2kClYTcw/4fAg=="], 275 + 276 + "@types/node": ["@types/node@25.5.0", "", { "dependencies": { "undici-types": "~7.18.0" } }, "sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw=="], 277 + 278 + "base64-js": ["base64-js@0.0.8", "", {}, "sha512-3XSA2cR/h/73EzlXXdU6YNycmYI7+kicTxks4eJg2g39biHR84slg2+des+p7iHYhbRg/udIS4TD53WabcOUkw=="], 279 + 280 + "blake3-wasm": ["blake3-wasm@2.1.5", "", {}, "sha512-F1+K8EbfOZE49dtoPtmxUQrpXaBIl3ICvasLh+nJta0xkz+9kF/7uet9fLnwKqhDrmj6g+6K3Tw9yQPUg2ka5g=="], 281 + 282 + "braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="], 283 + 284 + "bun-types": ["bun-types@1.3.11", "", { "dependencies": { "@types/node": "*" } }, "sha512-1KGPpoxQWl9f6wcZh57LvrPIInQMn2TQ7jsgxqpRzg+l0QPOFvJVH7HmvHo/AiPgwXy+/Thf6Ov3EdVn1vOabg=="], 285 + 286 + "camelize": ["camelize@1.0.1", "", {}, "sha512-dU+Tx2fsypxTgtLoE36npi3UqcjSSMNYfkqgmoEhtZrraP5VWq0K7FkWVTYa8eMPtnU/G2txVsfdCJTn9uzpuQ=="], 287 + 288 + "color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], 289 + 290 + "cookie": ["cookie@1.1.1", "", {}, "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ=="], 291 + 292 + "css-background-parser": ["css-background-parser@0.1.0", "", {}, "sha512-2EZLisiZQ+7m4wwur/qiYJRniHX4K5Tc9w93MT3AS0WS1u5kaZ4FKXlOTBhOjc+CgEgPiGY+fX1yWD8UwpEqUA=="], 293 + 294 + "css-box-shadow": ["css-box-shadow@1.0.0-3", "", {}, "sha512-9jaqR6e7Ohds+aWwmhe6wILJ99xYQbfmK9QQB9CcMjDbTxPZjwEmUQpU91OG05Xgm8BahT5fW+svbsQGjS/zPg=="], 295 + 296 + "css-color-keywords": ["css-color-keywords@1.0.0", "", {}, "sha512-FyyrDHZKEjXDpNJYvVsV960FiqQyXc/LlYmsxl2BcdMb2WPx0OGRVgTg55rPSyLSNMqP52R9r8geSp7apN3Ofg=="], 297 + 298 + "css-gradient-parser": ["css-gradient-parser@0.0.17", "", {}, "sha512-w2Xy9UMMwlKtou0vlRnXvWglPAceXCTtcmVSo8ZBUvqCV5aXEFP/PC6d+I464810I9FT++UACwTD5511bmGPUg=="], 299 + 300 + "css-to-react-native": ["css-to-react-native@3.2.0", "", { "dependencies": { "camelize": "^1.0.0", "css-color-keywords": "^1.0.0", "postcss-value-parser": "^4.0.2" } }, "sha512-e8RKaLXMOFii+02mOlqwjbD00KSEKqblnpO9e++1aXS1fPQOpS1YoqdVHBqPjHNoxeF2mimzVqawm2KCbEdtHQ=="], 301 + 302 + "detect-libc": ["detect-libc@2.1.2", "", {}, "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ=="], 303 + 304 + "emoji-regex-xs": ["emoji-regex-xs@2.0.1", "", {}, "sha512-1QFuh8l7LqUcKe24LsPUNzjrzJQ7pgRwp1QMcZ5MX6mFplk2zQ08NVCM84++1cveaUUYtcCYHmeFEuNg16sU4g=="], 305 + 306 + "error-stack-parser-es": ["error-stack-parser-es@1.0.5", "", {}, "sha512-5qucVt2XcuGMcEGgWI7i+yZpmpByQ8J1lHhcL7PwqCwu9FPP3VUXzT4ltHe5i2z9dePwEHcDVOAfSnHsOlCXRA=="], 307 + 308 + "esbuild": ["esbuild@0.27.4", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.4", "@esbuild/android-arm": "0.27.4", "@esbuild/android-arm64": "0.27.4", "@esbuild/android-x64": "0.27.4", "@esbuild/darwin-arm64": "0.27.4", "@esbuild/darwin-x64": "0.27.4", "@esbuild/freebsd-arm64": "0.27.4", "@esbuild/freebsd-x64": "0.27.4", "@esbuild/linux-arm": "0.27.4", "@esbuild/linux-arm64": "0.27.4", "@esbuild/linux-ia32": "0.27.4", "@esbuild/linux-loong64": "0.27.4", "@esbuild/linux-mips64el": "0.27.4", "@esbuild/linux-ppc64": "0.27.4", "@esbuild/linux-riscv64": "0.27.4", "@esbuild/linux-s390x": "0.27.4", "@esbuild/linux-x64": "0.27.4", "@esbuild/netbsd-arm64": "0.27.4", "@esbuild/netbsd-x64": "0.27.4", "@esbuild/openbsd-arm64": "0.27.4", "@esbuild/openbsd-x64": "0.27.4", "@esbuild/openharmony-arm64": "0.27.4", "@esbuild/sunos-x64": "0.27.4", "@esbuild/win32-arm64": "0.27.4", "@esbuild/win32-ia32": "0.27.4", "@esbuild/win32-x64": "0.27.4" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-Rq4vbHnYkK5fws5NF7MYTU68FPRE1ajX7heQ/8QXXWqNgqqJ/GkmmyxIzUnf2Sr/bakf8l54716CcMGHYhMrrQ=="], 309 + 310 + "escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="], 311 + 312 + "fast-glob": ["fast-glob@3.3.3", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.8" } }, "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg=="], 313 + 314 + "fastq": ["fastq@1.20.1", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw=="], 315 + 316 + "fd-package-json": ["fd-package-json@2.0.0", "", { "dependencies": { "walk-up-path": "^4.0.0" } }, "sha512-jKmm9YtsNXN789RS/0mSzOC1NUq9mkVd65vbSSVsKdjGvYXBuE4oWe2QOEoFeRmJg+lPuZxpmrfFclNhoRMneQ=="], 317 + 318 + "fflate": ["fflate@0.7.4", "", {}, "sha512-5u2V/CDW15QM1XbbgS+0DfPxVB+jUKhWEKuuFuHncbk3tEEqzmoXL+2KyOFuKGqOnmdIy0/davWF1CkuwtibCw=="], 319 + 320 + "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], 321 + 322 + "formatly": ["formatly@0.3.0", "", { "dependencies": { "fd-package-json": "^2.0.0" }, "bin": { "formatly": "bin/index.mjs" } }, "sha512-9XNj/o4wrRFyhSMJOvsuyMwy8aUfBaZ1VrqHVfohyXf0Sw0e+yfKG+xZaY3arGCOMdwFsqObtzVOc1gU9KiT9w=="], 323 + 324 + "fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], 325 + 326 + "get-tsconfig": ["get-tsconfig@4.13.6", "", { "dependencies": { "resolve-pkg-maps": "^1.0.0" } }, "sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw=="], 327 + 328 + "glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], 329 + 330 + "hex-rgb": ["hex-rgb@4.3.0", "", {}, "sha512-Ox1pJVrDCyGHMG9CFg1tmrRUMRPRsAWYc/PinY0XzJU4K7y7vjNoLKIQ7BR5UJMCxNN8EM1MNDmHWA/B3aZUuw=="], 331 + 332 + "is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="], 333 + 334 + "is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="], 335 + 336 + "is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="], 337 + 338 + "jiti": ["jiti@2.6.1", "", { "bin": { "jiti": "lib/jiti-cli.mjs" } }, "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ=="], 339 + 340 + "kleur": ["kleur@4.1.5", "", {}, "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ=="], 341 + 342 + "knip": ["knip@6.0.1", "", { "dependencies": { "@nodelib/fs.walk": "^1.2.3", "fast-glob": "^3.3.3", "formatly": "^0.3.0", "get-tsconfig": "4.13.6", "jiti": "^2.6.0", "minimist": "^1.2.8", "oxc-parser": "^0.120.0", "oxc-resolver": "^11.19.1", "picocolors": "^1.1.1", "picomatch": "^4.0.1", "smol-toml": "^1.5.2", "strip-json-comments": "5.0.3", "unbash": "^2.2.0", "yaml": "^2.8.2", "zod": "^4.1.11" }, "bin": { "knip": "bin/knip.js", "knip-bun": "bin/knip-bun.js" } }, "sha512-qk5m+w6IYEqfRG5546DXZJYl5AXsgFfDD6ULaDvkubqNtLye79sokBg3usURrWFjASMeQtvX19TfldU3jHkMNA=="], 343 + 344 + "linebreak": ["linebreak@1.1.0", "", { "dependencies": { "base64-js": "0.0.8", "unicode-trie": "^2.0.0" } }, "sha512-MHp03UImeVhB7XZtjd0E4n6+3xr5Dq/9xI/5FptGk5FrbDR3zagPa2DS6U8ks/3HjbKWG9Q1M2ufOzxV2qLYSQ=="], 345 + 346 + "lucide-static": ["lucide-static@0.577.0", "", {}, "sha512-hx39J5Tq4JWF2ALY+5YRg+SxQLpeAmLJDXNcqiBJH/UuVwp43it9fyki/onZO7AVFgG5ZbB+fWwZR9mwGHE2XQ=="], 347 + 348 + "merge2": ["merge2@1.4.1", "", {}, "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="], 349 + 350 + "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], 351 + 352 + "miniflare": ["miniflare@4.20260317.0", "", { "dependencies": { "@cspotcode/source-map-support": "0.8.1", "sharp": "^0.34.5", "undici": "7.24.4", "workerd": "1.20260317.1", "ws": "8.18.0", "youch": "4.1.0-beta.10" }, "bin": { "miniflare": "bootstrap.js" } }, "sha512-xuwk5Kjv+shi5iUBAdCrRl9IaWSGnTU8WuTQzsUS2GlSDIMCJuu8DiF/d9ExjMXYiQG5ml+k9SVKnMj8cRkq0w=="], 353 + 354 + "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], 355 + 356 + "oxc-parser": ["oxc-parser@0.120.0", "", { "dependencies": { "@oxc-project/types": "^0.120.0" }, "optionalDependencies": { "@oxc-parser/binding-android-arm-eabi": "0.120.0", "@oxc-parser/binding-android-arm64": "0.120.0", "@oxc-parser/binding-darwin-arm64": "0.120.0", "@oxc-parser/binding-darwin-x64": "0.120.0", "@oxc-parser/binding-freebsd-x64": "0.120.0", "@oxc-parser/binding-linux-arm-gnueabihf": "0.120.0", "@oxc-parser/binding-linux-arm-musleabihf": "0.120.0", "@oxc-parser/binding-linux-arm64-gnu": "0.120.0", "@oxc-parser/binding-linux-arm64-musl": "0.120.0", "@oxc-parser/binding-linux-ppc64-gnu": "0.120.0", "@oxc-parser/binding-linux-riscv64-gnu": "0.120.0", "@oxc-parser/binding-linux-riscv64-musl": "0.120.0", "@oxc-parser/binding-linux-s390x-gnu": "0.120.0", "@oxc-parser/binding-linux-x64-gnu": "0.120.0", "@oxc-parser/binding-linux-x64-musl": "0.120.0", "@oxc-parser/binding-openharmony-arm64": "0.120.0", "@oxc-parser/binding-wasm32-wasi": "0.120.0", "@oxc-parser/binding-win32-arm64-msvc": "0.120.0", "@oxc-parser/binding-win32-ia32-msvc": "0.120.0", "@oxc-parser/binding-win32-x64-msvc": "0.120.0" } }, "sha512-WyPWZlcIm+Fkte63FGfgFB8mAAk33aH9h5N9lphXVOHSXEBFFsmYdOBedVKly363aWABjZdaj/m9lBfEY4wt+w=="], 357 + 358 + "oxc-resolver": ["oxc-resolver@11.19.1", "", { "optionalDependencies": { "@oxc-resolver/binding-android-arm-eabi": "11.19.1", "@oxc-resolver/binding-android-arm64": "11.19.1", "@oxc-resolver/binding-darwin-arm64": "11.19.1", "@oxc-resolver/binding-darwin-x64": "11.19.1", "@oxc-resolver/binding-freebsd-x64": "11.19.1", "@oxc-resolver/binding-linux-arm-gnueabihf": "11.19.1", "@oxc-resolver/binding-linux-arm-musleabihf": "11.19.1", "@oxc-resolver/binding-linux-arm64-gnu": "11.19.1", "@oxc-resolver/binding-linux-arm64-musl": "11.19.1", "@oxc-resolver/binding-linux-ppc64-gnu": "11.19.1", "@oxc-resolver/binding-linux-riscv64-gnu": "11.19.1", "@oxc-resolver/binding-linux-riscv64-musl": "11.19.1", "@oxc-resolver/binding-linux-s390x-gnu": "11.19.1", "@oxc-resolver/binding-linux-x64-gnu": "11.19.1", "@oxc-resolver/binding-linux-x64-musl": "11.19.1", "@oxc-resolver/binding-openharmony-arm64": "11.19.1", "@oxc-resolver/binding-wasm32-wasi": "11.19.1", "@oxc-resolver/binding-win32-arm64-msvc": "11.19.1", "@oxc-resolver/binding-win32-ia32-msvc": "11.19.1", "@oxc-resolver/binding-win32-x64-msvc": "11.19.1" } }, "sha512-qE/CIg/spwrTBFt5aKmwe3ifeDdLfA2NESN30E42X/lII5ClF8V7Wt6WIJhcGZjp0/Q+nQ+9vgxGk//xZNX2hg=="], 359 + 360 + "pako": ["pako@0.2.9", "", {}, "sha512-NUcwaKxUxWrZLpDG+z/xZaCgQITkA/Dv4V/T6bw7VON6l1Xz/VnrBqrYjZQ12TamKHzITTfOEIYUj48y2KXImA=="], 361 + 362 + "parse-css-color": ["parse-css-color@0.2.1", "", { "dependencies": { "color-name": "^1.1.4", "hex-rgb": "^4.1.0" } }, "sha512-bwS/GGIFV3b6KS4uwpzCFj4w297Yl3uqnSgIPsoQkx7GMLROXfMnWvxfNkL0oh8HVhZA4hvJoEoEIqonfJ3BWg=="], 363 + 364 + "path-to-regexp": ["path-to-regexp@6.3.0", "", {}, "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ=="], 365 + 366 + "pathe": ["pathe@2.0.3", "", {}, "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w=="], 367 + 368 + "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="], 369 + 370 + "picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="], 371 + 372 + "postcss-value-parser": ["postcss-value-parser@4.2.0", "", {}, "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="], 373 + 374 + "preact": ["preact@10.29.0", "", {}, "sha512-wSAGyk2bYR1c7t3SZ3jHcM6xy0lcBcDel6lODcs9ME6Th++Dx2KU+6D3HD8wMMKGA8Wpw7OMd3/4RGzYRpzwRg=="], 375 + 376 + "queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="], 377 + 378 + "resolve-pkg-maps": ["resolve-pkg-maps@1.0.0", "", {}, "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw=="], 379 + 380 + "reusify": ["reusify@1.1.0", "", {}, "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw=="], 381 + 382 + "run-parallel": ["run-parallel@1.2.0", "", { "dependencies": { "queue-microtask": "^1.2.2" } }, "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA=="], 383 + 384 + "satori": ["satori@0.25.0", "", { "dependencies": { "@shuding/opentype.js": "1.4.0-beta.0", "css-background-parser": "^0.1.0", "css-box-shadow": "1.0.0-3", "css-gradient-parser": "^0.0.17", "css-to-react-native": "^3.0.0", "emoji-regex-xs": "^2.0.1", "escape-html": "^1.0.3", "linebreak": "^1.1.0", "parse-css-color": "^0.2.1", "postcss-value-parser": "^4.2.0", "yoga-layout": "^3.2.1" } }, "sha512-utINfLxrYrmSnLvxFT4ZwgwWa8KOjrz7ans32V5wItgHVmzESl/9i33nE38uG0miycab8hUqQtDlOpqrIpB/iw=="], 385 + 386 + "semver": ["semver@7.7.4", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA=="], 387 + 388 + "sharp": ["sharp@0.34.5", "", { "dependencies": { "@img/colour": "^1.0.0", "detect-libc": "^2.1.2", "semver": "^7.7.3" }, "optionalDependencies": { "@img/sharp-darwin-arm64": "0.34.5", "@img/sharp-darwin-x64": "0.34.5", "@img/sharp-libvips-darwin-arm64": "1.2.4", "@img/sharp-libvips-darwin-x64": "1.2.4", "@img/sharp-libvips-linux-arm": "1.2.4", "@img/sharp-libvips-linux-arm64": "1.2.4", "@img/sharp-libvips-linux-ppc64": "1.2.4", "@img/sharp-libvips-linux-riscv64": "1.2.4", "@img/sharp-libvips-linux-s390x": "1.2.4", "@img/sharp-libvips-linux-x64": "1.2.4", "@img/sharp-libvips-linuxmusl-arm64": "1.2.4", "@img/sharp-libvips-linuxmusl-x64": "1.2.4", "@img/sharp-linux-arm": "0.34.5", "@img/sharp-linux-arm64": "0.34.5", "@img/sharp-linux-ppc64": "0.34.5", "@img/sharp-linux-riscv64": "0.34.5", "@img/sharp-linux-s390x": "0.34.5", "@img/sharp-linux-x64": "0.34.5", "@img/sharp-linuxmusl-arm64": "0.34.5", "@img/sharp-linuxmusl-x64": "0.34.5", "@img/sharp-wasm32": "0.34.5", "@img/sharp-win32-arm64": "0.34.5", "@img/sharp-win32-ia32": "0.34.5", "@img/sharp-win32-x64": "0.34.5" } }, "sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg=="], 389 + 390 + "smol-toml": ["smol-toml@1.6.0", "", {}, "sha512-4zemZi0HvTnYwLfrpk/CF9LOd9Lt87kAt50GnqhMpyF9U3poDAP2+iukq2bZsO/ufegbYehBkqINbsWxj4l4cw=="], 391 + 392 + "string.prototype.codepointat": ["string.prototype.codepointat@0.2.1", "", {}, "sha512-2cBVCj6I4IOvEnjgO/hWqXjqBGsY+zwPmHl12Srk9IXSZ56Jwwmy+66XO5Iut/oQVR7t5ihYdLB0GMa4alEUcg=="], 393 + 394 + "strip-json-comments": ["strip-json-comments@5.0.3", "", {}, "sha512-1tB5mhVo7U+ETBKNf92xT4hrQa3pm0MZ0PQvuDnWgAAGHDsfp4lPSpiS6psrSiet87wyGPh9ft6wmhOMQ0hDiw=="], 395 + 396 + "supports-color": ["supports-color@10.2.2", "", {}, "sha512-SS+jx45GF1QjgEXQx4NJZV9ImqmO2NPz5FNsIHrsDjh2YsHnawpan7SNQ1o8NuhrbHZy9AZhIoCUiCeaW/C80g=="], 397 + 398 + "tiny-inflate": ["tiny-inflate@1.0.3", "", {}, "sha512-pkY1fj1cKHb2seWDy0B16HeWyczlJA9/WW3u3c4z/NiWDsO3DOU5D7nhTLE9CF0yXv/QZFY7sEJmj24dK+Rrqw=="], 399 + 400 + "to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="], 401 + 402 + "tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], 403 + 404 + "tsx": ["tsx@4.21.0", "", { "dependencies": { "esbuild": "~0.27.0", "get-tsconfig": "^4.7.5" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "bin": { "tsx": "dist/cli.mjs" } }, "sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw=="], 405 + 406 + "typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="], 407 + 408 + "unbash": ["unbash@2.2.0", "", {}, "sha512-X2wH19RAPZE3+ldGicOkoj/SIA83OIxcJ6Cuaw23hf8Xc6fQpvZXY0SftE2JgS0QhYLUG4uwodSI3R53keyh7w=="], 409 + 410 + "undici": ["undici@7.24.4", "", {}, "sha512-BM/JzwwaRXxrLdElV2Uo6cTLEjhSb3WXboncJamZ15NgUURmvlXvxa6xkwIOILIjPNo9i8ku136ZvWV0Uly8+w=="], 411 + 412 + "undici-types": ["undici-types@7.18.2", "", {}, "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w=="], 413 + 414 + "unenv": ["unenv@2.0.0-rc.24", "", { "dependencies": { "pathe": "^2.0.3" } }, "sha512-i7qRCmY42zmCwnYlh9H2SvLEypEFGye5iRmEMKjcGi7zk9UquigRjFtTLz0TYqr0ZGLZhaMHl/foy1bZR+Cwlw=="], 415 + 416 + "unicode-trie": ["unicode-trie@2.0.0", "", { "dependencies": { "pako": "^0.2.5", "tiny-inflate": "^1.0.0" } }, "sha512-x7bc76x0bm4prf1VLg79uhAzKw8DVboClSN5VxJuQ+LKDOVEW9CdH+VY7SP+vX7xCYQqzzgQpFqz15zeLvAtZQ=="], 417 + 418 + "walk-up-path": ["walk-up-path@4.0.0", "", {}, "sha512-3hu+tD8YzSLGuFYtPRb48vdhKMi0KQV5sn+uWr8+7dMEq/2G/dtLrdDinkLjqq5TIbIBjYJ4Ax/n3YiaW7QM8A=="], 419 + 420 + "workerd": ["workerd@1.20260317.1", "", { "optionalDependencies": { "@cloudflare/workerd-darwin-64": "1.20260317.1", "@cloudflare/workerd-darwin-arm64": "1.20260317.1", "@cloudflare/workerd-linux-64": "1.20260317.1", "@cloudflare/workerd-linux-arm64": "1.20260317.1", "@cloudflare/workerd-windows-64": "1.20260317.1" }, "bin": { "workerd": "bin/workerd" } }, "sha512-ZuEq1OdrJBS+NV+L5HMYPCzVn49a2O60slQiiLpG44jqtlOo+S167fWC76kEXteXLLLydeuRrluRel7WdOUa4g=="], 421 + 422 + "wrangler": ["wrangler@4.75.0", "", { "dependencies": { "@cloudflare/kv-asset-handler": "0.4.2", "@cloudflare/unenv-preset": "2.15.0", "blake3-wasm": "2.1.5", "esbuild": "0.27.3", "miniflare": "4.20260317.0", "path-to-regexp": "6.3.0", "unenv": "2.0.0-rc.24", "workerd": "1.20260317.1" }, "optionalDependencies": { "fsevents": "~2.3.2" }, "peerDependencies": { "@cloudflare/workers-types": "^4.20260317.1" }, "optionalPeers": ["@cloudflare/workers-types"], "bin": { "wrangler": "bin/wrangler.js", "wrangler2": "bin/wrangler.js" } }, "sha512-Efk1tcnm4eduBYpH1sSjMYydXMnIFPns/qABI3+fsbDrUk5GksNYX8nYGVP4sFygvGPO7kJc36YJKB5ooA7JAg=="], 423 + 424 + "ws": ["ws@8.18.0", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": ">=5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw=="], 425 + 426 + "yaml": ["yaml@2.8.2", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A=="], 427 + 428 + "yoga-layout": ["yoga-layout@3.2.1", "", {}, "sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ=="], 429 + 430 + "youch": ["youch@4.1.0-beta.10", "", { "dependencies": { "@poppinss/colors": "^4.1.5", "@poppinss/dumper": "^0.6.4", "@speed-highlight/core": "^1.2.7", "cookie": "^1.0.2", "youch-core": "^0.3.3" } }, "sha512-rLfVLB4FgQneDr0dv1oddCVZmKjcJ6yX6mS4pU82Mq/Dt9a3cLZQ62pDBL4AUO+uVrCvtWz3ZFUL2HFAFJ/BXQ=="], 431 + 432 + "youch-core": ["youch-core@0.3.3", "", { "dependencies": { "@poppinss/exception": "^1.2.2", "error-stack-parser-es": "^1.0.5" } }, "sha512-ho7XuGjLaJ2hWHoK8yFnsUGy2Y5uDpqSTq1FkHLK4/oqKtyUU1AFbOOxY4IpC9f0fTLjwYbslUz0Po5BpD1wrA=="], 433 + 434 + "zod": ["zod@4.3.6", "", {}, "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg=="], 435 + 436 + "micromatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], 437 + 438 + "wrangler/esbuild": ["esbuild@0.27.3", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.3", "@esbuild/android-arm": "0.27.3", "@esbuild/android-arm64": "0.27.3", "@esbuild/android-x64": "0.27.3", "@esbuild/darwin-arm64": "0.27.3", "@esbuild/darwin-x64": "0.27.3", "@esbuild/freebsd-arm64": "0.27.3", "@esbuild/freebsd-x64": "0.27.3", "@esbuild/linux-arm": "0.27.3", "@esbuild/linux-arm64": "0.27.3", "@esbuild/linux-ia32": "0.27.3", "@esbuild/linux-loong64": "0.27.3", "@esbuild/linux-mips64el": "0.27.3", "@esbuild/linux-ppc64": "0.27.3", "@esbuild/linux-riscv64": "0.27.3", "@esbuild/linux-s390x": "0.27.3", "@esbuild/linux-x64": "0.27.3", "@esbuild/netbsd-arm64": "0.27.3", "@esbuild/netbsd-x64": "0.27.3", "@esbuild/openbsd-arm64": "0.27.3", "@esbuild/openbsd-x64": "0.27.3", "@esbuild/openharmony-arm64": "0.27.3", "@esbuild/sunos-x64": "0.27.3", "@esbuild/win32-arm64": "0.27.3", "@esbuild/win32-ia32": "0.27.3", "@esbuild/win32-x64": "0.27.3" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg=="], 439 + 440 + "wrangler/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.3", "", { "os": "aix", "cpu": "ppc64" }, "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg=="], 441 + 442 + "wrangler/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.3", "", { "os": "android", "cpu": "arm" }, "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA=="], 443 + 444 + "wrangler/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.3", "", { "os": "android", "cpu": "arm64" }, "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg=="], 445 + 446 + "wrangler/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.3", "", { "os": "android", "cpu": "x64" }, "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ=="], 447 + 448 + "wrangler/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg=="], 449 + 450 + "wrangler/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg=="], 451 + 452 + "wrangler/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.3", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w=="], 453 + 454 + "wrangler/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.3", "", { "os": "freebsd", "cpu": "x64" }, "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA=="], 455 + 456 + "wrangler/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.3", "", { "os": "linux", "cpu": "arm" }, "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw=="], 457 + 458 + "wrangler/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg=="], 459 + 460 + "wrangler/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.3", "", { "os": "linux", "cpu": "ia32" }, "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg=="], 461 + 462 + "wrangler/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA=="], 463 + 464 + "wrangler/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw=="], 465 + 466 + "wrangler/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.3", "", { "os": "linux", "cpu": "ppc64" }, "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA=="], 467 + 468 + "wrangler/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ=="], 469 + 470 + "wrangler/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.3", "", { "os": "linux", "cpu": "s390x" }, "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw=="], 471 + 472 + "wrangler/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.3", "", { "os": "linux", "cpu": "x64" }, "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA=="], 473 + 474 + "wrangler/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA=="], 475 + 476 + "wrangler/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.3", "", { "os": "none", "cpu": "x64" }, "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA=="], 477 + 478 + "wrangler/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.3", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw=="], 479 + 480 + "wrangler/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.3", "", { "os": "openbsd", "cpu": "x64" }, "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ=="], 481 + 482 + "wrangler/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g=="], 483 + 484 + "wrangler/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.3", "", { "os": "sunos", "cpu": "x64" }, "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA=="], 485 + 486 + "wrangler/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA=="], 487 + 488 + "wrangler/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.3", "", { "os": "win32", "cpu": "ia32" }, "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q=="], 489 + 490 + "wrangler/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.3", "", { "os": "win32", "cpu": "x64" }, "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA=="], 491 + } 492 + }
+117
ogre/client.go
··· 1 + package ogre 2 + 3 + import ( 4 + "bytes" 5 + "context" 6 + "encoding/json" 7 + "fmt" 8 + "io" 9 + "net/http" 10 + "time" 11 + ) 12 + 13 + type Client struct { 14 + host string 15 + client *http.Client 16 + } 17 + 18 + func NewClient(host string) *Client { 19 + return &Client{ 20 + host: host, 21 + client: &http.Client{ 22 + Timeout: 10 * time.Second, 23 + }, 24 + } 25 + } 26 + 27 + type LabelData struct { 28 + Name string `json:"name"` 29 + Color string `json:"color"` 30 + } 31 + 32 + type LanguageData struct { 33 + Color string `json:"color"` 34 + Percentage float32 `json:"percentage"` 35 + } 36 + 37 + type RepositoryCardPayload struct { 38 + Type string `json:"type"` 39 + RepoName string `json:"repoName"` 40 + OwnerHandle string `json:"ownerHandle"` 41 + Stars int `json:"stars"` 42 + Pulls int `json:"pulls"` 43 + Issues int `json:"issues"` 44 + CreatedAt string `json:"createdAt"` 45 + AvatarUrl string `json:"avatarUrl"` 46 + Languages []LanguageData `json:"languages"` 47 + } 48 + 49 + type IssueCardPayload struct { 50 + Type string `json:"type"` 51 + RepoName string `json:"repoName"` 52 + OwnerHandle string `json:"ownerHandle"` 53 + AvatarUrl string `json:"avatarUrl"` 54 + Title string `json:"title"` 55 + IssueNumber int `json:"issueNumber"` 56 + Status string `json:"status"` 57 + Labels []LabelData `json:"labels"` 58 + CommentCount int `json:"commentCount"` 59 + ReactionCount int `json:"reactionCount"` 60 + CreatedAt string `json:"createdAt"` 61 + } 62 + 63 + type PullRequestCardPayload struct { 64 + Type string `json:"type"` 65 + RepoName string `json:"repoName"` 66 + OwnerHandle string `json:"ownerHandle"` 67 + AvatarUrl string `json:"avatarUrl"` 68 + Title string `json:"title"` 69 + PullRequestNumber int `json:"pullRequestNumber"` 70 + Status string `json:"status"` 71 + FilesChanged int `json:"filesChanged"` 72 + Additions int `json:"additions"` 73 + Deletions int `json:"deletions"` 74 + Rounds int `json:"rounds"` 75 + CommentCount int `json:"commentCount"` 76 + ReactionCount int `json:"reactionCount"` 77 + CreatedAt string `json:"createdAt"` 78 + } 79 + 80 + func (c *Client) doRequest(ctx context.Context, path string, payload any) ([]byte, error) { 81 + body, err := json.Marshal(payload) 82 + if err != nil { 83 + return nil, fmt.Errorf("marshal payload: %w", err) 84 + } 85 + 86 + url := fmt.Sprintf("%s/%s", c.host, path) 87 + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(body)) 88 + if err != nil { 89 + return nil, fmt.Errorf("create request: %w", err) 90 + } 91 + req.Header.Set("Content-Type", "application/json") 92 + 93 + resp, err := c.client.Do(req) 94 + if err != nil { 95 + return nil, fmt.Errorf("do request: %w", err) 96 + } 97 + defer resp.Body.Close() 98 + 99 + if resp.StatusCode != http.StatusOK { 100 + respBody, _ := io.ReadAll(resp.Body) 101 + return nil, fmt.Errorf("unexpected status: %d, body: %s", resp.StatusCode, string(respBody)) 102 + } 103 + 104 + return io.ReadAll(resp.Body) 105 + } 106 + 107 + func (c *Client) RenderRepositoryCard(ctx context.Context, payload RepositoryCardPayload) ([]byte, error) { 108 + return c.doRequest(ctx, "repository", payload) 109 + } 110 + 111 + func (c *Client) RenderIssueCard(ctx context.Context, payload IssueCardPayload) ([]byte, error) { 112 + return c.doRequest(ctx, "issue", payload) 113 + } 114 + 115 + func (c *Client) RenderPullRequestCard(ctx context.Context, payload PullRequestCardPayload) ([]byte, error) { 116 + return c.doRequest(ctx, "pullRequest", payload) 117 + }
+4
ogre/knip.json
··· 1 + { 2 + "$schema": "https://unpkg.com/knip@5/schema.json", 3 + "tags": ["-lintignore"] 4 + }
+34
ogre/package.json
··· 1 + { 2 + "name": "@tangled/ogre-worker", 3 + "version": "1.0.0", 4 + "private": true, 5 + "type": "module", 6 + "workspaces": [ 7 + "packages/runtime" 8 + ], 9 + "scripts": { 10 + "dev": "wrangler dev", 11 + "deploy": "wrangler deploy", 12 + "typecheck": "tsc --noEmit", 13 + "test": "bun test", 14 + "knip": "knip" 15 + }, 16 + "dependencies": { 17 + "@fontsource/inter": "^5.2.8", 18 + "@resvg/resvg-wasm": "^2.6.2", 19 + "@tangled/ogre-runtime": "*", 20 + "lucide-static": "^0.577.0", 21 + "preact": "^10.29.0", 22 + "satori": "0.25.0", 23 + "zod": "^4.3.6" 24 + }, 25 + "devDependencies": { 26 + "@cloudflare/workers-types": "^4.20260317.1", 27 + "@types/bun": "^1.3.11", 28 + "@types/node": "^25.5.0", 29 + "knip": "^6.0.1", 30 + "tsx": "^4.21.0", 31 + "typescript": "^5.9.3", 32 + "wrangler": "^4.75.0" 33 + } 34 + }
+88
ogre/packages/runtime/index.ts
··· 1 + /** 2 + * Bun/Node.js runtime implementation 3 + * Uses filesystem APIs to load WASM and fonts 4 + */ 5 + import { readFile } from "node:fs/promises"; 6 + import { createRequire } from "node:module"; 7 + import type { FontData, SatoriFn, ResvgClass } from "./types"; 8 + 9 + const require = createRequire(import.meta.url); 10 + 11 + let satoriFn: SatoriFn | null = null; 12 + let resvgInitialized = false; 13 + let Resvg: ResvgClass | null = null; 14 + 15 + export async function initSatori(): Promise<SatoriFn> { 16 + if (satoriFn) return satoriFn; 17 + 18 + const { default: satori } = await import("satori"); 19 + satoriFn = satori; 20 + 21 + return satoriFn; 22 + } 23 + 24 + export async function initResvg(): Promise<ResvgClass> { 25 + if (resvgInitialized) return Resvg!; 26 + 27 + const { Resvg: ResvgClass, initWasm } = await import("@resvg/resvg-wasm"); 28 + const wasmPath = require.resolve("@resvg/resvg-wasm/index_bg.wasm"); 29 + const wasmBuffer = await readFile(wasmPath); 30 + await initWasm(wasmBuffer); 31 + 32 + Resvg = ResvgClass; 33 + resvgInitialized = true; 34 + return Resvg; 35 + } 36 + 37 + export async function loadFonts(): Promise<FontData[]> { 38 + // In Bun, .woff imports return a Module object with `default` being the file path 39 + const inter400Module = await import( 40 + "@fontsource/inter/files/inter-latin-400-normal.woff" 41 + ); 42 + const inter500Module = await import( 43 + "@fontsource/inter/files/inter-latin-500-normal.woff" 44 + ); 45 + const inter600Module = await import( 46 + "@fontsource/inter/files/inter-latin-600-normal.woff" 47 + ); 48 + 49 + const inter400Path = (inter400Module as { default: string }).default; 50 + const inter500Path = (inter500Module as { default: string }).default; 51 + const inter600Path = (inter600Module as { default: string }).default; 52 + 53 + const [buf400, buf500, buf600] = await Promise.all([ 54 + readFile(inter400Path), 55 + readFile(inter500Path), 56 + readFile(inter600Path), 57 + ]); 58 + 59 + return [ 60 + { 61 + name: "Inter", 62 + data: buf400.buffer.slice( 63 + buf400.byteOffset, 64 + buf400.byteOffset + buf400.byteLength, 65 + ), 66 + weight: 400, 67 + style: "normal", 68 + }, 69 + { 70 + name: "Inter", 71 + data: buf500.buffer.slice( 72 + buf500.byteOffset, 73 + buf500.byteOffset + buf500.byteLength, 74 + ), 75 + weight: 500, 76 + style: "normal", 77 + }, 78 + { 79 + name: "Inter", 80 + data: buf600.buffer.slice( 81 + buf600.byteOffset, 82 + buf600.byteOffset + buf600.byteLength, 83 + ), 84 + weight: 600, 85 + style: "normal", 86 + }, 87 + ]; 88 + }
+12
ogre/packages/runtime/package.json
··· 1 + { 2 + "name": "@tangled/ogre-runtime", 3 + "version": "1.0.0", 4 + "private": true, 5 + "type": "module", 6 + "exports": { 7 + "workerd": "./workerd.ts", 8 + "bun": "./index.ts", 9 + "default": "./index.ts" 10 + }, 11 + "types": "./types.ts" 12 + }
+10
ogre/packages/runtime/types.ts
··· 1 + export interface FontData { 2 + name: string; 3 + data: ArrayBuffer; 4 + weight: 100 | 200 | 300 | 400 | 500 | 600 | 700 | 800 | 900; 5 + style: "normal" | "italic"; 6 + } 7 + 8 + export type SatoriFn = typeof import("satori").default; 9 + 10 + export type ResvgClass = typeof import("@resvg/resvg-wasm").Resvg;
+60
ogre/packages/runtime/workerd.ts
··· 1 + /** 2 + * Cloudflare Workers runtime implementation 3 + * Uses ?module suffix for WASM imports as required by Wrangler 4 + */ 5 + import type { FontData, SatoriFn, ResvgClass } from "./types"; 6 + 7 + import inter400 from "@fontsource/inter/files/inter-latin-400-normal.woff"; 8 + import inter500 from "@fontsource/inter/files/inter-latin-500-normal.woff"; 9 + import inter600 from "@fontsource/inter/files/inter-latin-600-normal.woff"; 10 + 11 + let satoriFn: SatoriFn | null = null; 12 + let resvgInitialized = false; 13 + let Resvg: ResvgClass | null = null; 14 + 15 + export async function initSatori(): Promise<SatoriFn> { 16 + if (satoriFn) return satoriFn; 17 + 18 + const { default: satori, init } = await import("satori/standalone"); 19 + const wasmModule = (await import("satori/yoga.wasm?module")).default; 20 + await init(wasmModule); 21 + satoriFn = satori; 22 + 23 + return satoriFn; 24 + } 25 + 26 + export async function initResvg(): Promise<ResvgClass> { 27 + if (resvgInitialized) return Resvg!; 28 + 29 + const { Resvg: ResvgClass, initWasm } = await import("@resvg/resvg-wasm"); 30 + const wasmModule = (await import("@resvg/resvg-wasm/index_bg.wasm?module")) 31 + .default; 32 + await initWasm(wasmModule); 33 + 34 + Resvg = ResvgClass; 35 + resvgInitialized = true; 36 + return Resvg; 37 + } 38 + 39 + export async function loadFonts(): Promise<FontData[]> { 40 + return [ 41 + { 42 + name: "Inter", 43 + data: inter400 as ArrayBuffer, 44 + weight: 400, 45 + style: "normal", 46 + }, 47 + { 48 + name: "Inter", 49 + data: inter500 as ArrayBuffer, 50 + weight: 500, 51 + style: "normal", 52 + }, 53 + { 54 + name: "Inter", 55 + data: inter600 as ArrayBuffer, 56 + weight: 600, 57 + style: "normal", 58 + }, 59 + ]; 60 + }
+3
ogre/readme.md
··· 1 + # ogre: open graph rendering engine 2 + 3 + 👹
ogre/src/__tests__/assets/avatar.jpg
+87
ogre/src/__tests__/fixtures.ts
··· 1 + import type { 2 + RepositoryCardData, 3 + IssueCardData, 4 + PullRequestCardData, 5 + } from "../validation"; 6 + 7 + const LONG_TITLE = 8 + "fix critical memory leak in WebSocket connection handler that causes server crashes under high load conditions in production environments"; 9 + 10 + export const createRepoData = (avatarUrl: string): RepositoryCardData => ({ 11 + type: "repository", 12 + repoName: "core", 13 + ownerHandle: "tangled.org", 14 + stars: 746, 15 + pulls: 82, 16 + issues: 176, 17 + createdAt: "2026-01-29T00:00:00Z", 18 + avatarUrl, 19 + languages: [ 20 + { color: "#00ADD8", percentage: 50 }, 21 + { color: "#e34c26", percentage: 30 }, 22 + { color: "#7e7eff", percentage: 10 }, 23 + { color: "#663399", percentage: 5 }, 24 + { color: "#f1e05a", percentage: 5 }, 25 + ], 26 + }); 27 + 28 + export const createIssueData = ( 29 + avatarUrl: string, 30 + overrides?: Partial<IssueCardData>, 31 + ): IssueCardData => ({ 32 + type: "issue", 33 + repoName: "core", 34 + ownerHandle: "tangled.org", 35 + avatarUrl, 36 + title: "feature request: sync fork button", 37 + issueNumber: 8, 38 + status: "open", 39 + labels: [ 40 + { name: "feature", color: "#4639d6" }, 41 + { name: "help-wanted", color: "#008672" }, 42 + { name: "enhancement", color: "#0052cc" }, 43 + ], 44 + commentCount: 12, 45 + reactionCount: 5, 46 + createdAt: "2026-01-29T00:00:00Z", 47 + ...overrides, 48 + }); 49 + 50 + export const createPullRequestData = ( 51 + avatarUrl: string, 52 + overrides?: Partial<PullRequestCardData>, 53 + ): PullRequestCardData => ({ 54 + type: "pullRequest", 55 + repoName: "core", 56 + ownerHandle: "tangled.org", 57 + avatarUrl, 58 + title: "add author description to README.md", 59 + pullRequestNumber: 1, 60 + status: "open", 61 + filesChanged: 2, 62 + additions: 116, 63 + deletions: 59, 64 + rounds: 3, 65 + commentCount: 12, 66 + reactionCount: 31, 67 + createdAt: "2026-01-29T00:00:00Z", 68 + ...overrides, 69 + }); 70 + 71 + export const createLongTitleIssueData = ( 72 + avatarUrl: string, 73 + overrides?: Partial<IssueCardData>, 74 + ): IssueCardData => ({ 75 + ...createIssueData(avatarUrl), 76 + title: LONG_TITLE, 77 + ...overrides, 78 + }); 79 + 80 + export const createLongTitlePullRequestData = ( 81 + avatarUrl: string, 82 + overrides?: Partial<PullRequestCardData>, 83 + ): PullRequestCardData => ({ 84 + ...createPullRequestData(avatarUrl), 85 + title: LONG_TITLE, 86 + ...overrides, 87 + });
+132
ogre/src/__tests__/render.test.ts
··· 1 + import { test, describe, beforeAll } from "bun:test"; 2 + import { writeFileSync, mkdirSync, readFileSync } from "fs"; 3 + import { join } from "path"; 4 + import { h, type VNode } from "preact"; 5 + import { renderCard } from "../lib/render"; 6 + import { RepositoryCard } from "../components/cards/repository"; 7 + import { IssueCard } from "../components/cards/issue"; 8 + import { PullRequestCard } from "../components/cards/pull-request"; 9 + import { 10 + repositoryCardSchema, 11 + issueCardSchema, 12 + pullRequestCardSchema, 13 + } from "../validation"; 14 + import { 15 + createRepoData, 16 + createIssueData, 17 + createPullRequestData, 18 + createLongTitleIssueData, 19 + createLongTitlePullRequestData, 20 + } from "./fixtures"; 21 + 22 + const outputDir = join(process.cwd(), "output"); 23 + let avatarDataUri: string; 24 + 25 + const loadAvatar = (): string => { 26 + const avatarPath = join( 27 + process.cwd(), 28 + "src", 29 + "__tests__", 30 + "assets", 31 + "avatar.jpg", 32 + ); 33 + const avatarBase64 = readFileSync(avatarPath).toString("base64"); 34 + return `data:image/jpeg;base64,${avatarBase64}`; 35 + }; 36 + 37 + beforeAll(() => { 38 + mkdirSync(outputDir, { recursive: true }); 39 + avatarDataUri = loadAvatar(); 40 + }); 41 + 42 + const savePng = (filename: string, buffer: Uint8Array) => { 43 + writeFileSync(join(outputDir, filename), buffer); 44 + }; 45 + 46 + const renderAndSave = async <P>(component: VNode<P>, filename: string) => { 47 + const { png } = await renderCard(component as VNode); 48 + savePng(filename, png); 49 + }; 50 + 51 + describe("repository card", () => { 52 + test("renders repository card", async () => { 53 + const data = createRepoData(avatarDataUri); 54 + const validated = repositoryCardSchema.parse(data); 55 + await renderAndSave(h(RepositoryCard, validated), "repository-card.png"); 56 + }); 57 + }); 58 + 59 + describe("issue cards", () => { 60 + test("renders open issue", async () => { 61 + const data = createIssueData(avatarDataUri); 62 + const validated = issueCardSchema.parse(data); 63 + await renderAndSave(h(IssueCard, validated), "issue-card.png"); 64 + }); 65 + 66 + test("renders closed issue", async () => { 67 + const data = createIssueData(avatarDataUri, { 68 + issueNumber: 5, 69 + status: "closed", 70 + labels: [{ name: "wontfix", color: "#6a737d" }], 71 + reactionCount: 2, 72 + }); 73 + const validated = issueCardSchema.parse(data); 74 + await renderAndSave(h(IssueCard, validated), "issue-card-closed.png"); 75 + }); 76 + 77 + test("renders issue with long title", async () => { 78 + const data = createLongTitleIssueData(avatarDataUri, { 79 + issueNumber: 42, 80 + }); 81 + const validated = issueCardSchema.parse(data); 82 + await renderAndSave(h(IssueCard, validated), "issue-card-long-title.png"); 83 + }); 84 + }); 85 + 86 + describe("pull request cards", () => { 87 + test("renders open pull request", async () => { 88 + const data = createPullRequestData(avatarDataUri); 89 + const validated = pullRequestCardSchema.parse(data); 90 + await renderAndSave(h(PullRequestCard, validated), "pull-request-card.png"); 91 + }); 92 + 93 + test("renders merged pull request", async () => { 94 + const data = createPullRequestData(avatarDataUri, { 95 + pullRequestNumber: 2, 96 + status: "merged", 97 + title: "Implement OAuth2 authentication flow", 98 + filesChanged: 5, 99 + additions: 342, 100 + deletions: 28, 101 + }); 102 + const validated = pullRequestCardSchema.parse(data); 103 + await renderAndSave( 104 + h(PullRequestCard, validated), 105 + "pull-request-card-merged.png", 106 + ); 107 + }); 108 + 109 + test("renders closed pull request", async () => { 110 + const data = createPullRequestData(avatarDataUri, { 111 + pullRequestNumber: 3, 112 + status: "closed", 113 + title: "WIP: Experimental feature", 114 + }); 115 + const validated = pullRequestCardSchema.parse(data); 116 + await renderAndSave( 117 + h(PullRequestCard, validated), 118 + "pull-request-card-closed.png", 119 + ); 120 + }); 121 + 122 + test("renders pull request with long title", async () => { 123 + const data = createLongTitlePullRequestData(avatarDataUri, { 124 + pullRequestNumber: 42, 125 + }); 126 + const validated = pullRequestCardSchema.parse(data); 127 + await renderAndSave( 128 + h(PullRequestCard, validated), 129 + "pull-request-card-long-title.png", 130 + ); 131 + }); 132 + });
+52
ogre/src/components/cards/issue.tsx
··· 1 + import { Card, Row, Col } from "../shared/layout"; 2 + import { TangledLogo } from "../shared/logo"; 3 + import { IssueStatusBadge } from "../shared/status-badge"; 4 + import { CardHeader } from "../shared/card-header"; 5 + import { LabelList } from "../shared/label-pill"; 6 + import { FooterStats } from "../shared/footer-stats"; 7 + import { TYPOGRAPHY } from "../shared/constants"; 8 + import type { IssueCardData } from "../../validation"; 9 + 10 + export function IssueCard(data: IssueCardData) { 11 + return ( 12 + <Card style={{ justifyContent: "space-between" }}> 13 + <Col style={{ gap: 48 }}> 14 + <Col style={{ gap: 32 }}> 15 + <Row style={{ justifyContent: "space-between" }}> 16 + <CardHeader 17 + avatarUrl={data.avatarUrl} 18 + ownerHandle={data.ownerHandle} 19 + repoName={data.repoName} 20 + /> 21 + <IssueStatusBadge status={data.status} /> 22 + </Row> 23 + 24 + <div 25 + style={{ 26 + ...TYPOGRAPHY.title, 27 + color: "#000000", 28 + display: "block", 29 + lineClamp: `2 "... #${data.issueNumber}"`, 30 + }}> 31 + {data.title} 32 + </div> 33 + </Col> 34 + 35 + <LabelList labels={data.labels} /> 36 + </Col> 37 + 38 + <Row 39 + style={{ 40 + alignItems: "flex-end", 41 + justifyContent: "space-between", 42 + }}> 43 + <FooterStats 44 + createdAt={data.createdAt} 45 + reactionCount={data.reactionCount} 46 + commentCount={data.commentCount} 47 + /> 48 + <TangledLogo /> 49 + </Row> 50 + </Card> 51 + ); 52 + }
+137
ogre/src/components/cards/pull-request.tsx
··· 1 + import { Card, Row, Col } from "../shared/layout"; 2 + import { TangledLogo } from "../shared/logo"; 3 + import { StatusBadge } from "../shared/status-badge"; 4 + import { CardHeader } from "../shared/card-header"; 5 + import { FooterStats } from "../shared/footer-stats"; 6 + import { FileDiff, RefreshCw } from "../../icons/lucide"; 7 + import { COLORS, TYPOGRAPHY } from "../shared/constants"; 8 + import type { PullRequestCardData } from "../../validation"; 9 + 10 + interface FilesChangedPillProps { 11 + filesChanged: number; 12 + additions: number; 13 + deletions: number; 14 + } 15 + 16 + function FilesChangedPill({ 17 + filesChanged, 18 + additions, 19 + deletions, 20 + }: FilesChangedPillProps) { 21 + return ( 22 + <Row 23 + style={{ 24 + overflow: "hidden", 25 + borderRadius: 18, 26 + backgroundColor: "#fff", 27 + border: `4px solid ${COLORS.label.border}`, 28 + }}> 29 + <Row 30 + style={{ 31 + gap: 16, 32 + padding: "16px 28px", 33 + }}> 34 + <FileDiff size={34} color="#202020" /> 35 + <span style={{ ...TYPOGRAPHY.body, color: "#202020" }}> 36 + {filesChanged} files 37 + </span> 38 + </Row> 39 + <Row style={{ gap: 0 }}> 40 + <Row 41 + style={{ 42 + padding: "16px 10px 16px 11px", 43 + backgroundColor: COLORS.diff.additions.bg, 44 + }}> 45 + <span 46 + style={{ ...TYPOGRAPHY.body, color: COLORS.diff.additions.text }}> 47 + +{additions} 48 + </span> 49 + </Row> 50 + <Row 51 + style={{ 52 + padding: "16px 16px 16px 11px", 53 + backgroundColor: COLORS.diff.deletions.bg, 54 + }}> 55 + <span 56 + style={{ ...TYPOGRAPHY.body, color: COLORS.diff.deletions.text }}> 57 + -{deletions} 58 + </span> 59 + </Row> 60 + </Row> 61 + </Row> 62 + ); 63 + } 64 + 65 + interface MetricPillProps { 66 + value: number; 67 + label: string; 68 + } 69 + 70 + function RoundsPill({ value, label }: MetricPillProps) { 71 + return ( 72 + <Row 73 + style={{ 74 + gap: 16, 75 + padding: "16px 28px", 76 + borderRadius: 18, 77 + backgroundColor: "#fff", 78 + border: `4px solid ${COLORS.label.border}`, 79 + }}> 80 + <RefreshCw size={36} color="#202020" /> 81 + <span style={{ ...TYPOGRAPHY.body, color: "#202020" }}> 82 + {value} {label} 83 + </span> 84 + </Row> 85 + ); 86 + } 87 + 88 + export function PullRequestCard(data: PullRequestCardData) { 89 + return ( 90 + <Card style={{ justifyContent: "space-between" }}> 91 + <Col style={{ gap: 48 }}> 92 + <Col style={{ gap: 32 }}> 93 + <Row style={{ justifyContent: "space-between" }}> 94 + <CardHeader 95 + avatarUrl={data.avatarUrl} 96 + ownerHandle={data.ownerHandle} 97 + repoName={data.repoName} 98 + /> 99 + <StatusBadge status={data.status} /> 100 + </Row> 101 + 102 + <span 103 + style={{ 104 + ...TYPOGRAPHY.title, 105 + color: "#000000", 106 + display: "block", 107 + lineClamp: `2 "... #${data.pullRequestNumber}"`, 108 + }}> 109 + {data.title} 110 + </span> 111 + </Col> 112 + 113 + <Row style={{ gap: 16 }}> 114 + <FilesChangedPill 115 + filesChanged={data.filesChanged} 116 + additions={data.additions} 117 + deletions={data.deletions} 118 + /> 119 + <RoundsPill value={data.rounds} label="rounds" /> 120 + </Row> 121 + </Col> 122 + 123 + <Row 124 + style={{ 125 + alignItems: "flex-end", 126 + justifyContent: "space-between", 127 + }}> 128 + <FooterStats 129 + createdAt={data.createdAt} 130 + reactionCount={data.reactionCount} 131 + commentCount={data.commentCount} 132 + /> 133 + <TangledLogo /> 134 + </Row> 135 + </Card> 136 + ); 137 + }
+53
ogre/src/components/cards/repository.tsx
··· 1 + import { Card, Row, Col } from "../shared/layout"; 2 + import { Avatar } from "../shared/avatar"; 3 + import { LanguageCircles } from "../shared/language-circles"; 4 + import { Metrics } from "../shared/metrics"; 5 + import { TangledLogo } from "../shared/logo"; 6 + import { FooterStats } from "../shared/footer-stats"; 7 + import { TYPOGRAPHY } from "../shared/constants"; 8 + import type { RepositoryCardData } from "../../validation"; 9 + 10 + function repoNameFontSize(name: string): number { 11 + // Available width ~1000px (1104px card content minus language circles area). 12 + // Inter 600 average char width is ~0.58× the font size. 13 + const maxSize = TYPOGRAPHY.repoName.fontSize; 14 + const fitted = Math.floor(1000 / (name.length * 0.58)); 15 + return Math.min(maxSize, Math.max(fitted, 48)); 16 + } 17 + 18 + export function RepositoryCard(data: RepositoryCardData) { 19 + const fontSize = repoNameFontSize(data.repoName); 20 + return ( 21 + <Card> 22 + <LanguageCircles languages={data.languages} /> 23 + 24 + <Col style={{ gap: 64 }}> 25 + <Col style={{ gap: 24 }}> 26 + <span style={{ ...TYPOGRAPHY.repoName, fontSize, color: "#000000" }}> 27 + {data.repoName} 28 + </span> 29 + 30 + <Row style={{ gap: 16 }}> 31 + <Avatar src={data.avatarUrl} size={64} /> 32 + <span style={{ ...TYPOGRAPHY.ownerHandle, color: "#000000" }}> 33 + {data.ownerHandle} 34 + </span> 35 + </Row> 36 + </Col> 37 + 38 + <Metrics stars={data.stars} pulls={data.pulls} issues={data.issues} /> 39 + </Col> 40 + 41 + <Row 42 + style={{ 43 + alignItems: "flex-end", 44 + justifyContent: "space-between", 45 + flexGrow: 1, 46 + }}> 47 + <FooterStats createdAt={data.createdAt} /> 48 + 49 + <TangledLogo /> 50 + </Row> 51 + </Card> 52 + ); 53 + }
+31
ogre/src/components/shared/avatar.tsx
··· 1 + interface AvatarProps { 2 + src: string; 3 + size?: number; 4 + } 5 + 6 + export function Avatar({ src, size = 64 }: AvatarProps) { 7 + const avatarSrc = src.includes("avatar.tangled.sh") 8 + ? src.replace(/[?&]format=\w+/, "").replace(/[?&]$/, "") + 9 + (src.includes("?") ? "&" : "?") + "format=jpeg" 10 + : src; 11 + 12 + return ( 13 + <div 14 + style={{ 15 + width: size, 16 + height: size, 17 + borderRadius: size / 2, 18 + overflow: "hidden", 19 + display: "flex", 20 + alignItems: "center", 21 + justifyContent: "center", 22 + }}> 23 + <img 24 + src={avatarSrc} 25 + width={size} 26 + height={size} 27 + style={{ objectFit: "cover" }} 28 + /> 29 + </div> 30 + ); 31 + }
+24
ogre/src/components/shared/card-header.tsx
··· 1 + import { Row } from "./layout"; 2 + import { Avatar } from "./avatar"; 3 + import { TYPOGRAPHY } from "./constants"; 4 + 5 + interface CardHeaderProps { 6 + avatarUrl: string; 7 + ownerHandle: string; 8 + repoName: string; 9 + } 10 + 11 + export function CardHeader({ 12 + avatarUrl, 13 + ownerHandle, 14 + repoName, 15 + }: CardHeaderProps) { 16 + return ( 17 + <Row style={{ gap: 16 }}> 18 + <Avatar src={avatarUrl} size={64} /> 19 + <span style={{ ...TYPOGRAPHY.cardHeader, color: "#000000" }}> 20 + {ownerHandle} / {repoName} 21 + </span> 22 + </Row> 23 + ); 24 + }
+30
ogre/src/components/shared/constants.ts
··· 1 + export const COLORS = { 2 + text: "#000000", 3 + textSecondary: "#7D7D7D", 4 + icon: "#404040", 5 + status: { 6 + open: { bg: "#16A34A", text: "#ffffff" }, 7 + closed: { bg: "#1f2937", text: "#ffffff" }, 8 + merged: { bg: "#7C3AED", text: "#ffffff" }, 9 + }, 10 + label: { 11 + text: "#202020", 12 + border: "#E6E6E6", 13 + }, 14 + diff: { 15 + additions: { bg: "#dcfce7", text: "#15803d" }, 16 + deletions: { bg: "#fee2e2", text: "#b91c1c" }, 17 + }, 18 + } as const; 19 + 20 + export const TYPOGRAPHY = { 21 + title: { fontFamily: "Inter", fontSize: 64, fontWeight: 600 }, 22 + repoName: { fontFamily: "Inter", fontSize: 144, fontWeight: 600 }, 23 + ownerHandle: { fontFamily: "Inter", fontSize: 48, fontWeight: 500 }, 24 + cardHeader: { fontFamily: "Inter", fontSize: 48, fontWeight: 500 }, 25 + status: { fontFamily: "Inter", fontSize: 48, fontWeight: 500 }, 26 + metricValue: { fontFamily: "Inter", fontSize: 48, fontWeight: 500 }, 27 + body: { fontFamily: "Inter", fontSize: 36, fontWeight: 400 }, 28 + meta: { fontFamily: "Inter", fontSize: 32, fontWeight: 400 }, 29 + label: { fontFamily: "Inter", fontSize: 24, fontWeight: 400 }, 30 + } as const;
+33
ogre/src/components/shared/footer-stats.tsx
··· 1 + import { Row } from "./layout"; 2 + import { Calendar, MessageSquare, SmilePlus } from "../../icons/lucide"; 3 + import { StatItem } from "./stat-item"; 4 + 5 + interface FooterStatsProps { 6 + createdAt: string; 7 + reactionCount?: number; 8 + commentCount?: number; 9 + } 10 + 11 + export function FooterStats({ 12 + createdAt, 13 + reactionCount, 14 + commentCount, 15 + }: FooterStatsProps) { 16 + const formattedDate = new Intl.DateTimeFormat("en-GB", { 17 + day: "numeric", 18 + month: "short", 19 + year: "numeric", 20 + }).format(new Date(createdAt)); 21 + 22 + return ( 23 + <Row style={{ gap: 64 }}> 24 + <StatItem Icon={Calendar} value={formattedDate} /> 25 + {reactionCount ? ( 26 + <StatItem Icon={SmilePlus} value={reactionCount} /> 27 + ) : null} 28 + {commentCount ? ( 29 + <StatItem Icon={MessageSquare} value={commentCount} /> 30 + ) : null} 31 + </Row> 32 + ); 33 + }
+49
ogre/src/components/shared/label-pill.tsx
··· 1 + import { Row } from "./layout"; 2 + import { COLORS, TYPOGRAPHY } from "./constants"; 3 + 4 + interface LabelPillProps { 5 + name: string; 6 + color: string; 7 + } 8 + 9 + function LabelPill({ name, color }: LabelPillProps) { 10 + return ( 11 + <Row 12 + style={{ 13 + gap: 16, 14 + padding: "16px 28px", 15 + borderRadius: 18, 16 + backgroundColor: "#fff", 17 + border: `4px solid ${COLORS.label.border}`, 18 + }}> 19 + <div 20 + style={{ 21 + width: 24, 22 + height: 24, 23 + borderRadius: "50%", 24 + backgroundColor: color, 25 + }} 26 + /> 27 + <span style={{ ...TYPOGRAPHY.body, color: COLORS.label.text }}> 28 + {name} 29 + </span> 30 + </Row> 31 + ); 32 + } 33 + 34 + interface LabelListProps { 35 + labels: Array<{ name: string; color: string }>; 36 + max?: number; 37 + } 38 + 39 + export function LabelList({ labels, max = 5 }: LabelListProps) { 40 + if (labels.length === 0) return null; 41 + 42 + return ( 43 + <Row style={{ gap: 12 }}> 44 + {labels.slice(0, max).map((label, i) => ( 45 + <LabelPill key={i} name={label.name} color={label.color} /> 46 + ))} 47 + </Row> 48 + ); 49 + }
+56
ogre/src/components/shared/language-circles.tsx
··· 1 + import type { Language } from "../../validation"; 2 + 3 + interface LanguageCirclesProps { 4 + languages: Language[]; 5 + } 6 + 7 + const MAX_RADIUS = 380; 8 + 9 + function percentageToThickness(percentage: number): number { 10 + return (percentage / 100) * MAX_RADIUS; 11 + } 12 + 13 + export function LanguageCircles({ languages }: LanguageCirclesProps) { 14 + const sortedLanguages = [...languages] 15 + .sort((a, b) => b.percentage - a.percentage) 16 + .slice(0, 5) 17 + .reverse(); 18 + 19 + let cumulativeRadius = 0; 20 + 21 + return ( 22 + <div 23 + style={{ 24 + position: "absolute", 25 + right: -MAX_RADIUS, 26 + top: -MAX_RADIUS, 27 + width: MAX_RADIUS * 2, 28 + height: MAX_RADIUS * 2, 29 + display: "flex", 30 + }}> 31 + {sortedLanguages.map((lang, i) => { 32 + const thickness = percentageToThickness(lang.percentage); 33 + const contentSize = cumulativeRadius * 2; 34 + 35 + cumulativeRadius += thickness; 36 + 37 + return ( 38 + <div 39 + key={i} 40 + style={{ 41 + position: "absolute", 42 + left: "50%", 43 + top: "50%", 44 + transform: "translate(-50%, -50%)", 45 + width: contentSize, 46 + height: contentSize, 47 + borderRadius: "50%", 48 + border: `${thickness}px solid ${lang.color}`, 49 + boxSizing: "content-box", 50 + }} 51 + /> 52 + ); 53 + })} 54 + </div> 55 + ); 56 + }
+45
ogre/src/components/shared/layout.tsx
··· 1 + import type { ComponentChildren } from "preact"; 2 + 3 + interface StyleProps { 4 + style?: Record<string, string | number>; 5 + children?: ComponentChildren; 6 + } 7 + 8 + export function Card({ children, style }: StyleProps) { 9 + return ( 10 + <div 11 + style={{ 12 + width: 1200, 13 + height: 630, 14 + background: "white", 15 + display: "flex", 16 + flexDirection: "column", 17 + padding: 48, 18 + ...style, 19 + }}> 20 + {children} 21 + </div> 22 + ); 23 + } 24 + 25 + export function Row({ children, style }: StyleProps) { 26 + return ( 27 + <div 28 + style={{ 29 + display: "flex", 30 + flexDirection: "row", 31 + alignItems: "center", 32 + ...style, 33 + }}> 34 + {children} 35 + </div> 36 + ); 37 + } 38 + 39 + export function Col({ children, style }: StyleProps) { 40 + return ( 41 + <div style={{ display: "flex", flexDirection: "column", ...style }}> 42 + {children} 43 + </div> 44 + ); 45 + }
+68
ogre/src/components/shared/logo.tsx
··· 1 + export function TangledLogo() { 2 + return ( 3 + <div 4 + style={{ 5 + width: 256, 6 + height: 70, 7 + display: "contents", 8 + }}> 9 + <svg 10 + width="256" 11 + height="70" 12 + viewBox="0 0 256 70" 13 + fill="none" 14 + xmlns="http://www.w3.org/2000/svg"> 15 + <path 16 + d="M38.6562 30.0449L39.168 30.2402L39.6807 30.4346L40.0234 30.8076L40.3672 31.1807L40.4922 31.7363L40.6162 32.292L39.9473 35.0566L39.7441 36.6553L39.5049 41.5752L39.3906 42.0312L39.2773 42.4863L38.9443 42.8193L38.6104 43.1514L37.9229 43.4463L37.3574 43.4414L36.793 43.4375L36.4531 43.2549L36.1143 43.0723L35.7949 42.6689L35.4756 42.2666L35.3438 41.7139L35.2109 41.1621L35.3564 37.832L35.5674 35.2646L36.002 33.0186L36.165 32.3838L36.3271 31.748L36.7324 30.9541L37.5293 30.2979L38.0928 30.1719L38.6562 30.0449Z" 17 + fill="black" 18 + /> 19 + <path 20 + d="M30.5889 31.1201L30.8682 31.4277L31.1484 31.7354L31.2695 32.0986L31.3916 32.4619V33.3789L31.1904 33.9082L30.8477 35.5654L30.8486 38.1523L31.1074 40.5127L30.5547 41.7197L30.1074 42.0156L29.6611 42.3115L28.4502 42.3838L28.0098 42.1787L27.5693 41.9727L27.04 41.2539L26.9248 40.9336L26.8086 40.6123L26.6738 39.8008L26.4814 36.9756L26.6992 34.3018L26.8467 33.6553L26.9932 33.0078L27.4502 31.7852L28.0137 31.1162L28.8779 30.7236H29.7334L30.5889 31.1201Z" 21 + fill="black" 22 + /> 23 + <path 24 + fill-rule="evenodd" 25 + clip-rule="evenodd" 26 + d="M45.4551 0L48.0215 0.143555L50.1611 0.571289L51.8721 1.12793L53.5869 1.91602L55.0186 2.78613L56.5781 3.96191L58.3262 5.74609L59.2383 6.91309L59.6455 7.55957L60.0518 8.20605L60.5176 9.16797L60.9824 10.1309L61.2656 10.9355L61.5479 11.7402L61.9658 13.7363L61.999 13.7607L62.0332 13.7852L64.707 15.0918L66.5674 16.3906L68.4297 18.1523L69.5566 19.5645L70.5576 21.1123L71.4766 23.0723L72.0156 24.6768L72.5146 27.0293L72.5107 30.9873L72.1279 32.8848L71.8564 33.7539L71.584 34.623L70.8457 36.3145L69.9648 37.832L68.8193 39.3867L67.2871 41L65.5625 42.3418L63.6367 43.4707L63.5068 43.5762L63.376 43.6816L63.5449 44.0723L63.7148 44.4629L63.9775 45.1045L64.2393 45.7461L65.3809 50.1318L65.5762 51.041L65.7725 51.9492L65.7617 56.0137L65.2803 58.2598L64.7393 59.8643L63.5947 62.2168L62.2529 64.1426L61.3398 65.0898L60.4277 66.0381L58.7158 67.3643L58.0547 67.7627L57.3926 68.1602L56.6641 68.4814L55.9355 68.8018L55.668 68.9443L55.4004 69.0859L53.9033 69.5225L51.9668 69.8672L50.2666 69.8496L49.8389 69.8301L48.7695 69.8105L48.3955 69.8877L48.0205 69.9639L47.8271 69.8506L47.6328 69.7373L46.418 69.6748L45.5889 69.5967L44.7607 69.5176V69.3584L44.1455 69.2383L43.5303 69.1172L43.0264 68.876L42.5225 68.6338L42.5146 68.3857L40.9121 67.5215L39.252 66.2988L37.6768 64.7842L36.3486 63.0723L35.3242 61.3613L35.2842 61.3184L35.2441 61.2744L34.3906 62.2441L33.0488 63.5322L31.9307 64.4111L31.1709 64.9014L30.4121 65.3926L28.915 66.0859L28.9307 66.1836L28.9473 66.2812L28.4307 66.4189L27.915 66.5576L25.8828 67.165L24.8135 67.4365L22.46 67.5928L20.3213 67.3818L20.209 67.29L20.0967 67.1992L19.6338 67.3203L19.4824 67.168L19.3301 67.0166L18.7031 66.8779L18.0752 66.7383L16.2891 66.1172L14.332 65.1357L12.3652 63.7656L10.5332 62.0312L9.18457 60.292L8.41992 59.0078L7.78125 57.7246L7.02539 55.6924L6.52441 53.3398L6.52734 49.1689L6.91406 47.251L7.56738 45.2109L8.51465 43.292L8.72559 42.9502L8.93652 42.6094L8.34277 42.2012L7.80762 41.8965L7.27246 41.5928L5.66895 40.416L4.09082 38.9014L2.95703 37.4893L2.47852 36.748L1.99902 36.0078L1.61035 35.208L1.2207 34.4092L0.859375 33.3926L0.49707 32.377L0 30.0244L0.0126953 25.8525L0.610352 23.291L1.40625 21.2383L1.9082 20.3164L2.41113 19.3955L3.06738 18.5059L3.72461 17.6172L5.24121 16.04L5.93555 15.4834L6.63086 14.9277L8.02148 14.0195L9.95117 13.0859L11.0166 12.6934L12.0859 10.665L13.3633 8.84766L15.252 6.89062L16.0215 6.30273L16.792 5.71582L18.6221 4.63867L20.5225 3.85156L21.7842 3.44922L24.1719 2.97266L27.7012 2.99023L30.375 3.5459L32.1934 4.21094L33.1846 4.70605L34.1768 5.2002L35.6152 3.80762L36.5781 3.05176L37.0596 2.74512L37.541 2.4375L37.9688 2.19043L38.3965 1.94238L39.0918 1.61523L39.7861 1.28711L41.3906 0.693359L43.3164 0.251953L45.4551 0ZM39.8984 21.0156L39.7324 21.3662L38.7773 22.457L37.7549 23.1094L37.0898 23.3311L36.4248 23.5537H34.7402L33.5654 23.1494L33.2246 22.9727L32.8848 22.7969L32.1631 22.1328L31.4424 21.4678L31.1543 21.2139L30.8672 20.959L29.5518 22.1494L29.0537 22.3848L28.5566 22.6211L28.0225 22.7646L27.4873 22.9092L26.8281 22.9102L26.1699 22.9121L25.4941 22.7012L24.8184 22.4912L24.5127 22.7812L24.207 23.0723L23.8438 23.5645L23.4814 24.0576L22.9707 24.248L22.4609 24.4375L21.7676 24.9531L21.4385 25.2959L21.1104 25.6387L18.9746 28.8174L16.3428 34.3018L15.4658 36.334L15.1318 37.6572L15.084 38.2793L15.0371 38.9014L15.1396 39.6318L15.2432 40.3613L15.5322 40.9531L15.8223 41.5439L16.2539 41.9209L16.6855 42.2969L17.8115 42.8223L19.4658 42.834L20.1611 42.54L20.8564 42.2471L21.498 41.7891L23.71 40.0234L23.791 40.0732L23.8711 40.123L24.0127 42.9297L24.5439 46.0146L25.0791 48.2061L25.8701 50.0244L26.1865 50.5049L26.502 50.9863L27.167 51.7002L28.8779 52.9375L29.7461 53.377L31.0381 53.7109L32.5146 53.8965L33.1289 53.8584L33.7441 53.8213L34.3896 53.7686L35.0361 53.7168L36.2783 53.3086L37.0127 52.9541L37.7471 52.6006L39.3975 51.3564L40.7764 49.8105L42.0938 48.0986L43.2275 46.3877L44.5176 44.1416L45.3223 42.2656L45.4082 42.2129L45.4941 42.1592L46.6807 43.7354L47.6641 44.6426L48.2715 44.9209L48.8779 45.2002L49.7871 45.2432L50.6963 45.2852L51.9795 44.8027L53.1855 43.5723L53.4639 42.7344L53.7412 41.8955L54.0879 40.0781L53.9785 37.5107L53.5176 34.9492L53.001 33.0186L52.1523 30.5996L51.1514 28.7402L50.4463 27.7783L50.0723 27.418L49.6982 27.0586L49.8115 25.96L49.4424 24.4629L48.6377 22.751L47.5391 21.5381L46.4971 20.7734L45.8828 21.1426L45.2686 21.5127L43.6631 21.8701L43.0449 21.8047L42.4268 21.7402L41.0137 21.209L40.6572 20.9375L40.3018 20.666H40.0645L39.8984 21.0156Z" 27 + fill="black" 28 + /> 29 + <path 30 + fill-rule="evenodd" 31 + clip-rule="evenodd" 32 + d="M171.79 22.4316C173.259 22.4316 174.489 22.6826 175.479 23.1836C176.47 23.6732 177.268 24.2882 177.871 25.0283C178.486 25.7568 178.958 26.474 179.288 27.1797H179.562V22.7734H186.787V49.2646C186.787 51.4507 186.24 53.2782 185.147 54.7471C184.054 56.2273 182.539 57.3432 180.604 58.0947C178.679 58.8462 176.465 59.2226 173.96 59.2227C171.603 59.2227 169.582 58.9032 167.896 58.2656C166.223 57.628 164.89 56.7683 163.899 55.6865C162.909 54.6049 162.266 53.4037 161.97 52.083L168.699 51.1777C168.904 51.6559 169.229 52.1061 169.673 52.5273C170.117 52.9598 170.703 53.3127 171.432 53.5859C172.172 53.8592 173.071 53.9961 174.13 53.9961C175.713 53.9961 177.017 53.6197 178.042 52.8682C179.078 52.1166 179.596 50.8813 179.596 49.1621V44.3623H179.288C178.969 45.0911 178.491 45.7806 177.854 46.4297C177.216 47.0785 176.396 47.6077 175.395 48.0176C174.393 48.4275 173.197 48.6328 171.808 48.6328C169.838 48.6328 168.044 48.1775 166.427 47.2666C164.821 46.3443 163.54 44.9379 162.584 43.0479C161.639 41.1463 161.167 38.7434 161.167 35.8398C161.167 32.8679 161.65 30.3853 162.618 28.3926C163.586 26.3999 164.873 24.9086 166.479 23.918C168.095 22.9274 169.866 22.4317 171.79 22.4316ZM174.113 28.2217C172.918 28.2217 171.91 28.5463 171.09 29.1953C170.27 29.833 169.65 30.7217 169.229 31.8604C168.807 32.999 168.597 34.3141 168.597 35.8057C168.597 37.32 168.807 38.6299 169.229 39.7344C169.661 40.8273 170.281 41.6759 171.09 42.2793C171.91 42.8714 172.918 43.167 174.113 43.167C175.286 43.167 176.277 42.8765 177.085 42.2959C177.905 41.7039 178.531 40.8615 178.964 39.7686C179.408 38.6641 179.63 37.3428 179.63 35.8057C179.63 34.2685 179.414 32.9359 178.981 31.8086C178.549 30.67 177.922 29.7874 177.103 29.1611C176.283 28.5349 175.286 28.2217 174.113 28.2217Z" 33 + fill="black" 34 + /> 35 + <path 36 + fill-rule="evenodd" 37 + clip-rule="evenodd" 38 + d="M215.798 22.4316C217.528 22.4317 219.139 22.7107 220.631 23.2686C222.134 23.8151 223.444 24.6407 224.56 25.7451C225.687 26.8496 226.564 28.2392 227.19 29.9131C227.817 31.5754 228.13 33.5224 228.13 35.7539V37.7529H210.264V37.7695C210.264 39.0676 210.503 40.1897 210.981 41.1348C211.471 42.0796 212.16 42.808 213.048 43.3203C213.936 43.8327 214.99 44.0889 216.208 44.0889C217.016 44.0888 217.756 43.9757 218.428 43.748C219.1 43.5203 219.675 43.1781 220.153 42.7227C220.631 42.2672 220.996 41.7091 221.246 41.0488L227.976 41.4932C227.634 43.1101 226.934 44.5225 225.875 45.7295C224.827 46.925 223.472 47.8585 221.81 48.5303C220.159 49.1906 218.251 49.5205 216.088 49.5205C213.389 49.5205 211.066 48.974 209.119 47.8809C207.184 46.7764 205.692 45.2165 204.645 43.2012C203.597 41.1744 203.073 38.7776 203.073 36.0107C203.073 33.3121 203.597 30.9435 204.645 28.9053C205.692 26.867 207.167 25.2783 209.068 24.1396C210.981 23.0011 213.225 22.4316 215.798 22.4316ZM215.917 27.8633C214.813 27.8633 213.833 28.1195 212.979 28.6318C212.137 29.1328 211.476 29.8102 210.998 30.6641C210.557 31.4413 210.317 32.3013 210.273 33.2432H221.28C221.28 32.1957 221.052 31.2674 220.597 30.459C220.141 29.6508 219.509 29.0189 218.701 28.5635C217.904 28.0967 216.976 27.8633 215.917 27.8633Z" 39 + fill="black" 40 + /> 41 + <path 42 + fill-rule="evenodd" 43 + clip-rule="evenodd" 44 + d="M118.389 22.4316C119.846 22.4316 121.241 22.6028 122.573 22.9443C123.917 23.2859 125.107 23.8149 126.144 24.5322C127.191 25.2496 128.017 26.1725 128.62 27.2998C129.224 28.4157 129.525 29.7536 129.525 31.3135V49.0088H122.625V45.3701H122.42C121.999 46.1898 121.435 46.9129 120.729 47.5391C120.024 48.1539 119.175 48.6382 118.185 48.9912C117.194 49.3328 116.049 49.5039 114.751 49.5039C113.077 49.5039 111.586 49.2134 110.276 48.6328C108.967 48.0407 107.93 47.1696 107.167 46.0195C106.416 44.8581 106.04 43.4114 106.04 41.6807C106.04 40.2233 106.308 38.9993 106.843 38.0088C107.378 37.0181 108.107 36.2207 109.029 35.6172C109.952 35.0138 110.999 34.5584 112.172 34.251C113.356 33.9435 114.597 33.7278 115.896 33.6025C117.421 33.4431 118.651 33.2948 119.585 33.1582C120.519 33.0102 121.196 32.7934 121.617 32.5088C122.038 32.2241 122.249 31.803 122.249 31.2451V31.1426C122.249 30.0609 121.908 29.2239 121.225 28.6318C120.553 28.0397 119.596 27.7441 118.354 27.7441C117.045 27.7442 116.004 28.0346 115.229 28.6152C114.455 29.1845 113.943 29.9014 113.692 30.7666L106.963 30.2207C107.304 28.6266 107.976 27.2483 108.978 26.0869C109.98 24.9141 111.273 24.0149 112.855 23.3887C114.449 22.7511 116.294 22.4317 118.389 22.4316ZM122.301 36.8477C122.073 36.9956 121.76 37.1316 121.361 37.2568C120.974 37.3707 120.536 37.4796 120.046 37.582C119.556 37.6731 119.067 37.7582 118.577 37.8379C118.088 37.9062 117.644 37.9694 117.245 38.0264C116.391 38.1516 115.644 38.3507 115.007 38.624C114.369 38.8973 113.874 39.2676 113.521 39.7344C113.169 40.1898 112.992 40.7593 112.992 41.4424C112.992 42.4327 113.35 43.1902 114.067 43.7139C114.796 44.2263 115.719 44.4824 116.835 44.4824C117.905 44.4824 118.85 44.2718 119.67 43.8506C120.49 43.4179 121.134 42.8371 121.601 42.1084C122.067 41.3798 122.301 40.554 122.301 39.6318V36.8477Z" 45 + fill="black" 46 + /> 47 + <path 48 + fill-rule="evenodd" 49 + clip-rule="evenodd" 50 + d="M256 14.0283V49.0088H248.826V44.8066H248.52C248.178 45.5353 247.694 46.2583 247.067 46.9756C246.453 47.6815 245.65 48.2685 244.659 48.7354C243.68 49.2022 242.484 49.4355 241.072 49.4355C239.08 49.4355 237.275 48.9231 235.658 47.8984C234.053 46.8623 232.777 45.3419 231.832 43.3379C230.898 41.3224 230.432 38.8512 230.432 35.9248C230.432 32.9188 230.915 30.4194 231.883 28.4268C232.851 26.4227 234.138 24.9252 235.743 23.9346C237.36 22.9326 239.131 22.4317 241.055 22.4316C242.524 22.4316 243.748 22.6826 244.728 23.1836C245.718 23.6732 246.515 24.2883 247.118 25.0283C247.733 25.757 248.201 26.4738 248.52 27.1797H248.741V14.0283H256ZM243.378 28.2217C242.182 28.2217 241.174 28.5463 240.354 29.1953C239.535 29.8444 238.914 30.7445 238.493 31.8945C238.072 33.0445 237.861 34.3764 237.861 35.8906C237.861 37.4163 238.072 38.7657 238.493 39.9385C238.926 41.0999 239.546 42.0114 240.354 42.6719C241.174 43.3209 242.182 43.6455 243.378 43.6455C244.551 43.6455 245.541 43.3261 246.35 42.6885C247.169 42.0394 247.796 41.1341 248.229 39.9727C248.673 38.8113 248.895 37.4505 248.895 35.8906C248.895 34.3309 248.679 32.9761 248.246 31.8262C247.813 30.6761 247.187 29.7874 246.367 29.1611C245.547 28.5349 244.551 28.2217 243.378 28.2217Z" 51 + fill="black" 52 + /> 53 + <path 54 + d="M99.0752 16.4883V22.7734H104.012V28.2393H99.0752V40.9463C99.0752 41.6179 99.178 42.1418 99.3828 42.5176C99.5878 42.882 99.8729 43.1381 100.237 43.2861C100.613 43.4341 101.046 43.5088 101.535 43.5088C101.877 43.5088 102.218 43.4798 102.56 43.4229C102.901 43.3545 103.164 43.3037 103.346 43.2695L104.49 48.6836C104.126 48.7974 103.613 48.9292 102.953 49.0771C102.293 49.2366 101.489 49.333 100.544 49.3672C98.7906 49.4354 97.2534 49.2021 95.9326 48.667C94.6232 48.1318 93.6037 47.3001 92.875 46.1729C92.1464 45.0457 91.7885 43.6224 91.7998 41.9033V28.2393H88.2129V22.7734H91.7998V16.4883H99.0752Z" 55 + fill="black" 56 + /> 57 + <path 58 + d="M198.397 41.1514C198.409 41.9824 198.556 42.5861 198.841 42.9619C199.137 43.3263 199.639 43.5088 200.345 43.5088C200.709 43.4974 200.993 43.4745 201.198 43.4404C201.403 43.4063 201.574 43.3607 201.711 43.3037L202.872 48.5986C202.496 48.7125 202.035 48.8318 201.488 48.957C200.953 49.0709 200.23 49.1446 199.319 49.1787C196.53 49.2812 194.469 48.7575 193.137 47.6074C191.804 46.446 191.132 44.6187 191.121 42.125V14.0283H198.397V41.1514Z" 59 + fill="black" 60 + /> 61 + <path 62 + d="M148.839 22.4316C150.661 22.4316 152.249 22.8299 153.604 23.627C154.959 24.4239 156.012 25.5629 156.764 27.043C157.515 28.5118 157.892 30.2656 157.892 32.3037V49.0088H150.615V33.6025C150.627 31.9972 150.217 30.7443 149.386 29.8447C148.555 28.9338 147.41 28.4785 145.952 28.4785C144.973 28.4785 144.108 28.6891 143.356 29.1104C142.616 29.5317 142.036 30.1466 141.614 30.9551C141.204 31.752 140.994 32.7138 140.982 33.8408V49.0088H133.706V22.7734H140.641V27.4023H140.948C141.529 25.8766 142.502 24.6694 143.868 23.7812C145.235 22.8817 146.892 22.4316 148.839 22.4316Z" 63 + fill="black" 64 + /> 65 + </svg> 66 + </div> 67 + ); 68 + }
+43
ogre/src/components/shared/metrics.tsx
··· 1 + import { Row, Col } from "./layout"; 2 + import { TYPOGRAPHY } from "./constants"; 3 + import { 4 + Star, 5 + GitPullRequest, 6 + CircleDot, 7 + type LucideIcon, 8 + } from "../../icons/lucide"; 9 + 10 + interface MetricsProps { 11 + stars: number; 12 + pulls: number; 13 + issues: number; 14 + } 15 + 16 + // Display stars, pulls, issues with Lucide icons 17 + export function Metrics({ stars, pulls, issues }: MetricsProps) { 18 + return ( 19 + <Row style={{ gap: 56, alignItems: "flex-start" }}> 20 + <MetricItem value={stars} label="stars" Icon={Star} /> 21 + <MetricItem value={pulls} label="pulls" Icon={GitPullRequest} /> 22 + <MetricItem value={issues} label="issues" Icon={CircleDot} /> 23 + </Row> 24 + ); 25 + } 26 + 27 + interface MetricItemProps { 28 + value: number; 29 + label: string; 30 + Icon: LucideIcon; 31 + } 32 + 33 + function MetricItem({ value, label, Icon }: MetricItemProps) { 34 + return ( 35 + <Col style={{ gap: 12 }}> 36 + <Row style={{ gap: 12, alignItems: "center" }}> 37 + <span style={TYPOGRAPHY.metricValue}>{value}</span> 38 + <Icon size={48} /> 39 + </Row> 40 + <span style={{ ...TYPOGRAPHY.label, opacity: 0.75 }}>{label}</span> 41 + </Col> 42 + ); 43 + }
+17
ogre/src/components/shared/stat-item.tsx
··· 1 + import { Row } from "./layout"; 2 + import { TYPOGRAPHY } from "./constants"; 3 + import type { LucideIcon } from "../../icons/lucide"; 4 + 5 + interface StatItemProps { 6 + Icon: LucideIcon; 7 + value: string | number; 8 + } 9 + 10 + export function StatItem({ Icon, value }: StatItemProps) { 11 + return ( 12 + <Row style={{ gap: 16 }}> 13 + <Icon size={36} color="#404040" /> 14 + <span style={{ ...TYPOGRAPHY.body, color: "#404040" }}>{value}</span> 15 + </Row> 16 + ); 17 + }
+73
ogre/src/components/shared/status-badge.tsx
··· 1 + import { Row } from "./layout"; 2 + import { 3 + CircleDot, 4 + Ban, 5 + GitPullRequest, 6 + GitPullRequestClosed, 7 + GitMerge, 8 + } from "../../icons/lucide"; 9 + import { COLORS, TYPOGRAPHY } from "./constants"; 10 + 11 + const STATUS_CONFIG = { 12 + open: { 13 + Icon: CircleDot, 14 + bg: COLORS.status.open.bg, 15 + text: COLORS.status.open.text, 16 + }, 17 + closed: { 18 + Icon: Ban, 19 + bg: COLORS.status.closed.bg, 20 + text: COLORS.status.closed.text, 21 + }, 22 + merged: { 23 + Icon: GitMerge, 24 + bg: COLORS.status.merged.bg, 25 + text: COLORS.status.merged.text, 26 + }, 27 + } as const; 28 + 29 + interface StatusBadgeProps { 30 + status: "open" | "closed" | "merged"; 31 + } 32 + 33 + export function StatusBadge({ status }: StatusBadgeProps) { 34 + const config = 35 + status === "merged" 36 + ? STATUS_CONFIG.merged 37 + : status === "closed" 38 + ? STATUS_CONFIG.closed 39 + : STATUS_CONFIG.open; 40 + const Icon = config.Icon; 41 + 42 + return ( 43 + <Row 44 + style={{ 45 + gap: 12, 46 + padding: "14px 26px 14px 24px", 47 + borderRadius: 18, 48 + backgroundColor: config.bg, 49 + }}> 50 + <Icon size={48} color={config.text} /> 51 + <span style={{ ...TYPOGRAPHY.status, color: config.text }}>{status}</span> 52 + </Row> 53 + ); 54 + } 55 + 56 + export function IssueStatusBadge({ status }: { status: "open" | "closed" }) { 57 + const config = 58 + status === "closed" ? STATUS_CONFIG.closed : STATUS_CONFIG.open; 59 + const Icon = config.Icon; 60 + 61 + return ( 62 + <Row 63 + style={{ 64 + gap: 12, 65 + padding: "14px 26px 14px 24px", 66 + borderRadius: 18, 67 + backgroundColor: config.bg, 68 + }}> 69 + <Icon size={48} color={config.text} /> 70 + <span style={{ ...TYPOGRAPHY.status, color: config.text }}>{status}</span> 71 + </Row> 72 + ); 73 + }
+52
ogre/src/icons/lucide.tsx
··· 1 + import { h } from "preact"; 2 + import iconNodes from "lucide-static/icon-nodes.json"; 3 + 4 + interface IconProps { 5 + size?: number; 6 + color?: string; 7 + strokeWidth?: number; 8 + } 9 + 10 + type IconNodeEntry = [string, Record<string, string | number>]; 11 + 12 + function createIcon(name: string) { 13 + const nodes = (iconNodes as unknown as Record<string, IconNodeEntry[]>)[name]; 14 + if (!nodes) throw new Error(`Icon "${name}" not found`); 15 + 16 + return function Icon({ 17 + size = 24, 18 + color = "currentColor", 19 + strokeWidth = 2, 20 + }: IconProps = {}) { 21 + return h( 22 + "svg", 23 + { 24 + xmlns: "http://www.w3.org/2000/svg", 25 + width: size, 26 + height: size, 27 + viewBox: "0 0 24 24", 28 + fill: "none", 29 + stroke: color, 30 + strokeWidth, 31 + strokeLinecap: "round" as const, 32 + strokeLinejoin: "round" as const, 33 + }, 34 + nodes.map(([tag, attrs], i) => h(tag, { key: i, ...attrs })), 35 + ); 36 + }; 37 + } 38 + 39 + export const Star = createIcon("star"); 40 + export const GitPullRequest = createIcon("git-pull-request"); 41 + export const GitPullRequestClosed = createIcon("git-pull-request-closed"); 42 + export const GitMerge = createIcon("git-merge"); 43 + export const CircleDot = createIcon("circle-dot"); 44 + export const Calendar = createIcon("calendar"); 45 + export const MessageSquare = createIcon("message-square"); 46 + export const MessageSquareCode = createIcon("message-square-code"); 47 + export const Ban = createIcon("ban"); 48 + export const SmilePlus = createIcon("smile-plus"); 49 + export const FileDiff = createIcon("file-diff"); 50 + export const RefreshCw = createIcon("refresh-cw"); 51 + 52 + export type LucideIcon = typeof Star;
+96
ogre/src/index.tsx
··· 1 + import { cardPayloadSchema } from "./validation"; 2 + import { renderCard } from "./lib/render"; 3 + import { RepositoryCard } from "./components/cards/repository"; 4 + import { IssueCard } from "./components/cards/issue"; 5 + import { PullRequestCard } from "./components/cards/pull-request"; 6 + import { z } from "zod"; 7 + 8 + declare global { 9 + interface CacheStorage { 10 + default: Cache; 11 + } 12 + } 13 + 14 + interface Env { 15 + ENVIRONMENT: string; 16 + } 17 + 18 + export default { 19 + async fetch(request: Request, env: Env): Promise<Response> { 20 + if (request.method !== "POST") { 21 + return new Response("Method not allowed", { status: 405 }); 22 + } 23 + 24 + const url = new URL(request.url); 25 + const cardType = url.pathname.split("/").pop(); 26 + 27 + try { 28 + const body = await request.json(); 29 + const payload = cardPayloadSchema.parse(body); 30 + 31 + let component; 32 + switch (payload.type) { 33 + case "repository": 34 + component = <RepositoryCard {...payload} />; 35 + break; 36 + case "issue": 37 + component = <IssueCard {...payload} />; 38 + break; 39 + case "pullRequest": 40 + component = <PullRequestCard {...payload} />; 41 + break; 42 + default: 43 + return new Response("Unknown card type", { status: 400 }); 44 + } 45 + 46 + const cacheKeyUrl = new URL(request.url); 47 + cacheKeyUrl.searchParams.set("payload", JSON.stringify(payload)); 48 + const cacheKey = new Request(cacheKeyUrl.toString(), { method: "GET" }); 49 + const cache = caches.default; 50 + const cached = await cache.match(cacheKey); 51 + 52 + if (cached) { 53 + return cached; 54 + } 55 + 56 + const { png: pngBuffer } = await renderCard(component); 57 + 58 + const response = new Response(pngBuffer as any, { 59 + headers: { 60 + "Content-Type": "image/png", 61 + "Cache-Control": "public, max-age=3600", 62 + }, 63 + }); 64 + 65 + await cache.put(cacheKey, response.clone()); 66 + 67 + return response; 68 + } catch (error) { 69 + if (error instanceof z.ZodError) { 70 + return new Response( 71 + JSON.stringify({ errors: (error as z.ZodError).issues }), 72 + { 73 + status: 400, 74 + headers: { "Content-Type": "application/json" }, 75 + }, 76 + ); 77 + } 78 + 79 + console.error("Error generating card:", error); 80 + const errorMessage = 81 + error instanceof Error ? error.message : String(error); 82 + const errorStack = error instanceof Error ? error.stack : ""; 83 + console.error("Error stack:", errorStack); 84 + return new Response( 85 + JSON.stringify({ 86 + error: errorMessage, 87 + stack: errorStack, 88 + }), 89 + { 90 + status: 500, 91 + headers: { "Content-Type": "application/json" }, 92 + }, 93 + ); 94 + } 95 + }, 96 + };
+46
ogre/src/lib/render.ts
··· 1 + import type { VNode } from "preact"; 2 + import { initSatori, initResvg, loadFonts } from "@tangled/ogre-runtime"; 3 + import type { ResvgClass } from "@tangled/ogre-runtime/types"; 4 + 5 + let satoriFn: typeof import("satori").default | null = null; 6 + let Resvg: ResvgClass | null = null; 7 + let fontsLoaded = false; 8 + let cachedFonts: Awaited<ReturnType<typeof loadFonts>> | null = null; 9 + 10 + export interface RenderResult { 11 + svg: string; 12 + png: Uint8Array; 13 + } 14 + 15 + export async function renderCard(component: VNode): Promise<RenderResult> { 16 + if (!satoriFn) { 17 + satoriFn = await initSatori(); 18 + } 19 + 20 + if (!Resvg) { 21 + Resvg = await initResvg(); 22 + } 23 + 24 + if (!fontsLoaded) { 25 + cachedFonts = await loadFonts(); 26 + fontsLoaded = true; 27 + } 28 + 29 + const svg = await satoriFn(component as any, { 30 + width: 1200, 31 + height: 630, 32 + fonts: cachedFonts!, 33 + embedFont: true, 34 + }); 35 + 36 + const resvg = new Resvg!(svg, { 37 + fitTo: { mode: "width", value: 1200 }, 38 + }); 39 + 40 + const pngData = resvg.render(); 41 + 42 + return { 43 + svg, 44 + png: pngData.asPng(), 45 + }; 46 + }
+9
ogre/src/types.d.ts
··· 1 + declare module "*.wasm?module" { 2 + const value: WebAssembly.Module; 3 + export default value; 4 + } 5 + 6 + declare module "*.woff" { 7 + const value: ArrayBuffer; 8 + export default value; 9 + }
+70
ogre/src/validation.ts
··· 1 + import { z } from "zod"; 2 + 3 + const hexColor = /^#[0-9A-Fa-f]{6}$/; 4 + 5 + const languageSchema = z.object({ 6 + color: z.string().regex(hexColor), 7 + percentage: z.number().min(0).max(100), 8 + }); 9 + 10 + export const repositoryCardSchema = z.object({ 11 + type: z.literal("repository"), 12 + repoName: z.string().min(1).max(100), 13 + ownerHandle: z.string().min(1).max(100), 14 + stars: z.number().int().min(0).max(1000000), 15 + pulls: z.number().int().min(0).max(100000), 16 + issues: z.number().int().min(0).max(100000), 17 + createdAt: z.string().max(100), 18 + avatarUrl: z.string().url(), 19 + languages: z.array(languageSchema).max(5), 20 + }); 21 + 22 + export const issueCardSchema = z.object({ 23 + type: z.literal("issue"), 24 + repoName: z.string().min(1).max(100), 25 + ownerHandle: z.string().min(1).max(100), 26 + avatarUrl: z.string().url(), 27 + title: z.string().min(1).max(500), 28 + issueNumber: z.number().int().positive(), 29 + status: z.enum(["open", "closed"]), 30 + labels: z 31 + .array( 32 + z.object({ 33 + name: z.string().max(50), 34 + color: z.string().regex(hexColor), 35 + }), 36 + ) 37 + .max(10), 38 + commentCount: z.number().int().min(0), 39 + reactionCount: z.number().int().min(0), 40 + createdAt: z.string(), 41 + }); 42 + 43 + export const pullRequestCardSchema = z.object({ 44 + type: z.literal("pullRequest"), 45 + repoName: z.string().min(1).max(100), 46 + ownerHandle: z.string().min(1).max(100), 47 + avatarUrl: z.string().url(), 48 + title: z.string().min(1).max(500), 49 + pullRequestNumber: z.number().int().positive(), 50 + status: z.enum(["open", "closed", "merged"]), 51 + filesChanged: z.number().int().min(0), 52 + additions: z.number().int().min(0), 53 + deletions: z.number().int().min(0), 54 + rounds: z.number().int().min(1), 55 + // reviews: z.number().int().min(0), // TODO: implement review tracking 56 + commentCount: z.number().int().min(0), 57 + reactionCount: z.number().int().min(0), 58 + createdAt: z.string(), 59 + }); 60 + 61 + export const cardPayloadSchema = z.discriminatedUnion("type", [ 62 + repositoryCardSchema, 63 + issueCardSchema, 64 + pullRequestCardSchema, 65 + ]); 66 + 67 + export type Language = z.infer<typeof languageSchema>; 68 + export type RepositoryCardData = z.infer<typeof repositoryCardSchema>; 69 + export type IssueCardData = z.infer<typeof issueCardSchema>; 70 + export type PullRequestCardData = z.infer<typeof pullRequestCardSchema>;
+19
ogre/tsconfig.json
··· 1 + { 2 + "compilerOptions": { 3 + "target": "ES2022", 4 + "module": "ES2022", 5 + "lib": ["ES2022"], 6 + "moduleResolution": "bundler", 7 + "types": ["@cloudflare/workers-types", "node", "bun"], 8 + "jsx": "react-jsx", 9 + "jsxImportSource": "preact", 10 + "strict": true, 11 + "esModuleInterop": true, 12 + "skipLibCheck": true, 13 + "forceConsistentCasingInFileNames": true, 14 + "resolveJsonModule": true, 15 + "noEmit": true 16 + }, 17 + "include": ["src/**/*"], 18 + "exclude": ["node_modules"] 19 + }
+25
ogre/wrangler.jsonc
··· 1 + { 2 + "$schema": "node_modules/wrangler/config-schema.json", 3 + "name": "ogre", 4 + "main": "src/index.tsx", 5 + "compatibility_date": "2026-03-07", 6 + "observability": { 7 + "enabled": true, 8 + }, 9 + "routes": [ 10 + { 11 + "pattern": "ogre.tangled.network", 12 + "custom_domain": true, 13 + }, 14 + ], 15 + "vars": { 16 + "ENVIRONMENT": "production", 17 + }, 18 + "rules": [ 19 + { 20 + "type": "Data", 21 + "globs": ["**/*.woff"], 22 + "fallthrough": true, 23 + }, 24 + ], 25 + }
+1
package.json
··· 1 + {}
pr.png
+1
spindle/db/db.go
··· 18 18 "_journal_mode=WAL", 19 19 "_synchronous=NORMAL", 20 20 "_auto_vacuum=incremental", 21 + "_busy_timeout=5000", 21 22 } 22 23 23 24 db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
+1 -1
spindle/server.go
··· 298 298 tpl := tangled.Pipeline{} 299 299 err := json.Unmarshal(msg.EventJson, &tpl) 300 300 if err != nil { 301 + s.l.Error("failed to unmarshal pipeline event", "err", err) 301 - fmt.Println("error unmarshalling", err) 302 302 return err 303 303 } 304 304
-97
spindle/xrpc/pipeline_cancelPipeline.go
··· 1 - package xrpc 2 - 3 - import ( 4 - "encoding/json" 5 - "fmt" 6 - "net/http" 7 - "strings" 8 - 9 - "github.com/bluesky-social/indigo/api/atproto" 10 - "github.com/bluesky-social/indigo/atproto/syntax" 11 - "github.com/bluesky-social/indigo/xrpc" 12 - securejoin "github.com/cyphar/filepath-securejoin" 13 - "tangled.org/core/api/tangled" 14 - "tangled.org/core/rbac" 15 - "tangled.org/core/spindle/models" 16 - xrpcerr "tangled.org/core/xrpc/errors" 17 - ) 18 - 19 - func (x *Xrpc) CancelPipeline(w http.ResponseWriter, r *http.Request) { 20 - l := x.Logger 21 - fail := func(e xrpcerr.XrpcError) { 22 - l.Error("failed", "kind", e.Tag, "error", e.Message) 23 - writeError(w, e, http.StatusBadRequest) 24 - } 25 - l.Debug("cancel pipeline") 26 - 27 - actorDid, ok := r.Context().Value(ActorDid).(syntax.DID) 28 - if !ok { 29 - fail(xrpcerr.MissingActorDidError) 30 - return 31 - } 32 - 33 - var input tangled.PipelineCancelPipeline_Input 34 - if err := json.NewDecoder(r.Body).Decode(&input); err != nil { 35 - fail(xrpcerr.GenericError(err)) 36 - return 37 - } 38 - 39 - aturi := syntax.ATURI(input.Pipeline) 40 - wid := models.WorkflowId{ 41 - PipelineId: models.PipelineId{ 42 - Knot: strings.TrimPrefix(aturi.Authority().String(), "did:web:"), 43 - Rkey: aturi.RecordKey().String(), 44 - }, 45 - Name: input.Workflow, 46 - } 47 - l.Debug("cancel pipeline", "wid", wid) 48 - 49 - // unfortunately we have to resolve repo-at here 50 - repoAt, err := syntax.ParseATURI(input.Repo) 51 - if err != nil { 52 - fail(xrpcerr.InvalidRepoError(input.Repo)) 53 - return 54 - } 55 - 56 - ident, err := x.Resolver.ResolveIdent(r.Context(), repoAt.Authority().String()) 57 - if err != nil || ident.Handle.IsInvalidHandle() { 58 - fail(xrpcerr.GenericError(fmt.Errorf("failed to resolve handle: %w", err))) 59 - return 60 - } 61 - 62 - xrpcc := xrpc.Client{Host: ident.PDSEndpoint()} 63 - resp, err := atproto.RepoGetRecord(r.Context(), &xrpcc, "", tangled.RepoNSID, repoAt.Authority().String(), repoAt.RecordKey().String()) 64 - if err != nil { 65 - fail(xrpcerr.GenericError(err)) 66 - return 67 - } 68 - 69 - repo := resp.Value.Val.(*tangled.Repo) 70 - didSlashRepo, err := securejoin.SecureJoin(ident.DID.String(), repo.Name) 71 - if err != nil { 72 - fail(xrpcerr.GenericError(err)) 73 - return 74 - } 75 - 76 - // TODO: fine-grained role based control 77 - isRepoOwner, err := x.Enforcer.IsRepoOwner(actorDid.String(), rbac.ThisServer, didSlashRepo) 78 - if err != nil || !isRepoOwner { 79 - fail(xrpcerr.AccessControlError(actorDid.String())) 80 - return 81 - } 82 - for _, engine := range x.Engines { 83 - l.Debug("destorying workflow", "wid", wid) 84 - err = engine.DestroyWorkflow(r.Context(), wid) 85 - if err != nil { 86 - fail(xrpcerr.GenericError(fmt.Errorf("failed to destroy workflow: %w", err))) 87 - return 88 - } 89 - err = x.Db.StatusCancelled(wid, "User canceled the workflow", -1, x.Notifier) 90 - if err != nil { 91 - fail(xrpcerr.GenericError(fmt.Errorf("failed to emit status failed: %w", err))) 92 - return 93 - } 94 - } 95 - 96 - w.WriteHeader(http.StatusOK) 97 - }
+97
spindle/xrpc/pipeline_cancel_pipeline.go
··· 1 + package xrpc 2 + 3 + import ( 4 + "encoding/json" 5 + "fmt" 6 + "net/http" 7 + "strings" 8 + 9 + "github.com/bluesky-social/indigo/api/atproto" 10 + "github.com/bluesky-social/indigo/atproto/syntax" 11 + "github.com/bluesky-social/indigo/xrpc" 12 + securejoin "github.com/cyphar/filepath-securejoin" 13 + "tangled.org/core/api/tangled" 14 + "tangled.org/core/rbac" 15 + "tangled.org/core/spindle/models" 16 + xrpcerr "tangled.org/core/xrpc/errors" 17 + ) 18 + 19 + func (x *Xrpc) CancelPipeline(w http.ResponseWriter, r *http.Request) { 20 + l := x.Logger 21 + fail := func(e xrpcerr.XrpcError) { 22 + l.Error("failed", "kind", e.Tag, "error", e.Message) 23 + writeError(w, e, http.StatusBadRequest) 24 + } 25 + l.Debug("cancel pipeline") 26 + 27 + actorDid, ok := r.Context().Value(ActorDid).(syntax.DID) 28 + if !ok { 29 + fail(xrpcerr.MissingActorDidError) 30 + return 31 + } 32 + 33 + var input tangled.PipelineCancelPipeline_Input 34 + if err := json.NewDecoder(r.Body).Decode(&input); err != nil { 35 + fail(xrpcerr.GenericError(err)) 36 + return 37 + } 38 + 39 + aturi := syntax.ATURI(input.Pipeline) 40 + wid := models.WorkflowId{ 41 + PipelineId: models.PipelineId{ 42 + Knot: strings.TrimPrefix(aturi.Authority().String(), "did:web:"), 43 + Rkey: aturi.RecordKey().String(), 44 + }, 45 + Name: input.Workflow, 46 + } 47 + l.Debug("cancel pipeline", "wid", wid) 48 + 49 + // unfortunately we have to resolve repo-at here 50 + repoAt, err := syntax.ParseATURI(input.Repo) 51 + if err != nil { 52 + fail(xrpcerr.InvalidRepoError(input.Repo)) 53 + return 54 + } 55 + 56 + ident, err := x.Resolver.ResolveIdent(r.Context(), repoAt.Authority().String()) 57 + if err != nil || ident.Handle.IsInvalidHandle() { 58 + fail(xrpcerr.GenericError(fmt.Errorf("failed to resolve handle: %w", err))) 59 + return 60 + } 61 + 62 + xrpcc := xrpc.Client{Host: ident.PDSEndpoint()} 63 + resp, err := atproto.RepoGetRecord(r.Context(), &xrpcc, "", tangled.RepoNSID, repoAt.Authority().String(), repoAt.RecordKey().String()) 64 + if err != nil { 65 + fail(xrpcerr.GenericError(err)) 66 + return 67 + } 68 + 69 + repo := resp.Value.Val.(*tangled.Repo) 70 + didSlashRepo, err := securejoin.SecureJoin(ident.DID.String(), repo.Name) 71 + if err != nil { 72 + fail(xrpcerr.GenericError(err)) 73 + return 74 + } 75 + 76 + // TODO: fine-grained role based control 77 + isRepoOwner, err := x.Enforcer.IsRepoOwner(actorDid.String(), rbac.ThisServer, didSlashRepo) 78 + if err != nil || !isRepoOwner { 79 + fail(xrpcerr.AccessControlError(actorDid.String())) 80 + return 81 + } 82 + for _, engine := range x.Engines { 83 + l.Debug("destorying workflow", "wid", wid) 84 + err = engine.DestroyWorkflow(r.Context(), wid) 85 + if err != nil { 86 + fail(xrpcerr.GenericError(fmt.Errorf("failed to destroy workflow: %w", err))) 87 + return 88 + } 89 + err = x.Db.StatusCancelled(wid, "User canceled the workflow", -1, x.Notifier) 90 + if err != nil { 91 + fail(xrpcerr.GenericError(fmt.Errorf("failed to emit status failed: %w", err))) 92 + return 93 + } 94 + } 95 + 96 + w.WriteHeader(http.StatusOK) 97 + }
+3
tapc/readme.md
··· 1 + basic tap client package 2 + 3 + Replace this to official indigo package when <https://github.com/bluesky-social/indigo/pull/1241> gets merged.
+24
tapc/simple_indexer.go
··· 1 + package tapc 2 + 3 + import "context" 4 + 5 + type SimpleIndexer struct { 6 + EventHandler func(ctx context.Context, evt Event) error 7 + ErrorHandler func(ctx context.Context, err error) 8 + } 9 + 10 + var _ Handler = (*SimpleIndexer)(nil) 11 + 12 + func (i *SimpleIndexer) OnEvent(ctx context.Context, evt Event) error { 13 + if i.EventHandler == nil { 14 + return nil 15 + } 16 + return i.EventHandler(ctx, evt) 17 + } 18 + 19 + func (i *SimpleIndexer) OnError(ctx context.Context, err error) { 20 + if i.ErrorHandler == nil { 21 + return 22 + } 23 + i.ErrorHandler(ctx, err) 24 + }
+170
tapc/tap.go
··· 1 + /// heavily inspired by <https://github.com/bluesky-social/atproto/blob/c7f5a868837d3e9b3289f988fee2267789327b06/packages/tap/README.md> 2 + 3 + package tapc 4 + 5 + import ( 6 + "bytes" 7 + "context" 8 + "encoding/json" 9 + "fmt" 10 + "net/http" 11 + "net/url" 12 + "time" 13 + 14 + "github.com/bluesky-social/indigo/atproto/syntax" 15 + "github.com/gorilla/websocket" 16 + "tangled.org/core/log" 17 + ) 18 + 19 + type Handler interface { 20 + OnEvent(ctx context.Context, evt Event) error 21 + OnError(ctx context.Context, err error) 22 + } 23 + 24 + type Client struct { 25 + Url string 26 + AdminPassword string 27 + HTTPClient *http.Client 28 + } 29 + 30 + func NewClient(url, adminPassword string) Client { 31 + return Client{ 32 + Url: url, 33 + AdminPassword: adminPassword, 34 + HTTPClient: &http.Client{}, 35 + } 36 + } 37 + 38 + func (c *Client) AddRepos(ctx context.Context, dids []syntax.DID) error { 39 + body, err := json.Marshal(map[string][]syntax.DID{"dids": dids}) 40 + if err != nil { 41 + return err 42 + } 43 + req, err := http.NewRequestWithContext(ctx, "POST", c.Url+"/repos/add", bytes.NewReader(body)) 44 + if err != nil { 45 + return err 46 + } 47 + req.SetBasicAuth("admin", c.AdminPassword) 48 + req.Header.Set("Content-Type", "application/json") 49 + 50 + resp, err := c.HTTPClient.Do(req) 51 + if err != nil { 52 + return err 53 + } 54 + defer resp.Body.Close() 55 + if resp.StatusCode != http.StatusOK { 56 + return fmt.Errorf("tap: /repos/add failed with status %d", resp.StatusCode) 57 + } 58 + return nil 59 + } 60 + 61 + func (c *Client) RemoveRepos(ctx context.Context, dids []syntax.DID) error { 62 + body, err := json.Marshal(map[string][]syntax.DID{"dids": dids}) 63 + if err != nil { 64 + return err 65 + } 66 + req, err := http.NewRequestWithContext(ctx, "POST", c.Url+"/repos/remove", bytes.NewReader(body)) 67 + if err != nil { 68 + return err 69 + } 70 + req.SetBasicAuth("admin", c.AdminPassword) 71 + req.Header.Set("Content-Type", "application/json") 72 + 73 + resp, err := c.HTTPClient.Do(req) 74 + if err != nil { 75 + return err 76 + } 77 + defer resp.Body.Close() 78 + if resp.StatusCode != http.StatusOK { 79 + return fmt.Errorf("tap: /repos/remove failed with status %d", resp.StatusCode) 80 + } 81 + return nil 82 + } 83 + 84 + func (c *Client) Connect(ctx context.Context, handler Handler) error { 85 + l := log.FromContext(ctx) 86 + 87 + u, err := url.Parse(c.Url) 88 + if err != nil { 89 + return err 90 + } 91 + if u.Scheme == "https" { 92 + u.Scheme = "wss" 93 + } else { 94 + u.Scheme = "ws" 95 + } 96 + u.Path = "/channel" 97 + 98 + // TODO: set auth on dial 99 + 100 + url := u.String() 101 + 102 + var backoff int 103 + for { 104 + select { 105 + case <-ctx.Done(): 106 + return ctx.Err() 107 + default: 108 + } 109 + 110 + header := http.Header{ 111 + "Authorization": []string{""}, 112 + } 113 + conn, res, err := websocket.DefaultDialer.DialContext(ctx, url, header) 114 + if err != nil { 115 + l.Warn("dialing failed", "url", url, "err", err, "backoff", backoff) 116 + time.Sleep(time.Duration(5+backoff) * time.Second) 117 + backoff++ 118 + 119 + continue 120 + } 121 + l.Info("connected to tap service") 122 + 123 + l.Info("tap event subscription response", "code", res.StatusCode) 124 + 125 + if err = c.handleConnection(ctx, conn, handler); err != nil { 126 + l.Warn("tap connection failed", "err", err, "backoff", backoff) 127 + } 128 + } 129 + } 130 + 131 + func (c *Client) handleConnection(ctx context.Context, conn *websocket.Conn, handler Handler) error { 132 + l := log.FromContext(ctx) 133 + 134 + defer func() { 135 + conn.Close() 136 + l.Warn("closed tap conection") 137 + }() 138 + l.Info("established tap conection") 139 + 140 + for { 141 + select { 142 + case <-ctx.Done(): 143 + return ctx.Err() 144 + default: 145 + } 146 + _, message, err := conn.ReadMessage() 147 + if err != nil { 148 + return err 149 + } 150 + 151 + var ev Event 152 + if err := json.Unmarshal(message, &ev); err != nil { 153 + handler.OnError(ctx, fmt.Errorf("failed to parse message: %w", err)) 154 + continue 155 + } 156 + if err := handler.OnEvent(ctx, ev); err != nil { 157 + handler.OnError(ctx, fmt.Errorf("failed to process event %d: %w", ev.ID, err)) 158 + continue 159 + } 160 + 161 + ack := map[string]any{ 162 + "type": "ack", 163 + "id": ev.ID, 164 + } 165 + if err := conn.WriteJSON(ack); err != nil { 166 + l.Warn("failed to send ack", "err", err) 167 + continue 168 + } 169 + } 170 + }
+62
tapc/types.go
··· 1 + package tapc 2 + 3 + import ( 4 + "encoding/json" 5 + "fmt" 6 + 7 + "github.com/bluesky-social/indigo/atproto/syntax" 8 + ) 9 + 10 + type EventType string 11 + 12 + const ( 13 + EvtRecord EventType = "record" 14 + EvtIdentity EventType = "identity" 15 + ) 16 + 17 + type Event struct { 18 + ID int64 `json:"id"` 19 + Type EventType `json:"type"` 20 + Record *RecordEventData `json:"record,omitempty"` 21 + Identity *IdentityEventData `json:"identity,omitempty"` 22 + } 23 + 24 + type RecordEventData struct { 25 + Live bool `json:"live"` 26 + Did syntax.DID `json:"did"` 27 + Rev string `json:"rev"` 28 + Collection syntax.NSID `json:"collection"` 29 + Rkey syntax.RecordKey `json:"rkey"` 30 + Action RecordAction `json:"action"` 31 + Record json.RawMessage `json:"record,omitempty"` 32 + CID *syntax.CID `json:"cid,omitempty"` 33 + } 34 + 35 + func (r *RecordEventData) AtUri() syntax.ATURI { 36 + return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, r.Collection, r.Rkey)) 37 + } 38 + 39 + type RecordAction string 40 + 41 + const ( 42 + RecordCreateAction RecordAction = "create" 43 + RecordUpdateAction RecordAction = "update" 44 + RecordDeleteAction RecordAction = "delete" 45 + ) 46 + 47 + type IdentityEventData struct { 48 + DID syntax.DID `json:"did"` 49 + Handle string `json:"handle"` 50 + IsActive bool `json:"is_active"` 51 + Status RepoStatus `json:"status"` 52 + } 53 + 54 + type RepoStatus string 55 + 56 + const ( 57 + RepoStatusActive RepoStatus = "active" 58 + RepoStatusTakendown RepoStatus = "takendown" 59 + RepoStatusSuspended RepoStatus = "suspended" 60 + RepoStatusDeactivated RepoStatus = "deactivated" 61 + RepoStatusDeleted RepoStatus = "deleted" 62 + )
+1 -6
types/diff.go
··· 84 84 func (d NiceDiff) FileTree() *filetree.FileTreeNode { 85 85 fs := make([]string, len(d.Diff)) 86 86 for i, s := range d.Diff { 87 + fs[i] = s.Id() 87 - n := s.Names() 88 - if n.New == "" { 89 - fs[i] = n.Old 90 - } else { 91 - fs[i] = n.New 92 - } 93 88 } 94 89 return filetree.FileTree(fs) 95 90 }

History

2 rounds 0 comments
sign up or login to add to the discussion
5 commits
expand
api/tangled: add types for branch rules
knotserver/xrpc,db: add table and crud endpoints for branch rules
appview/pages,oauth: add ux and validation for managing branch rules in settings
flake: add git as knot dependency
knotserver/hook: add git hook for checking branch rules
merge conflicts detected
expand
  • knotserver/internal.go:7
expand 0 comments
5 commits
expand
api/tangled: add types for branch rules
knotserver/xrpc,db: add table and crud endpoints for branch rules
appview/pages,oauth: add ux and validation for managing branch rules in settings
flake: add git as knot dependency
knotserver/hook: add git hook for checking branch rules
expand 0 comments