lexicons: pulls: add stack information to pull records #405

open
opened by nel.pet targeting master from nel.pet/core: push-kyupnpkvqmsy
Changed files
+306 -20
api
appview
cmd
lexicons
pulls
+202 -1
api/tangled/cbor_gen.go
··· 6939 6939 6940 6940 return nil 6941 6941 } 6942 + func (t *RepoPull_StackInfo) MarshalCBOR(w io.Writer) error { 6943 + if t == nil { 6944 + _, err := w.Write(cbg.CborNull) 6945 + return err 6946 + } 6947 + 6948 + cw := cbg.NewCborWriter(w) 6949 + fieldCount := 2 6950 + 6951 + if t.Parent == nil { 6952 + fieldCount-- 6953 + } 6954 + 6955 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 6956 + return err 6957 + } 6958 + 6959 + // t.Parent (string) (string) 6960 + if t.Parent != nil { 6961 + 6962 + if len("parent") > 1000000 { 6963 + return xerrors.Errorf("Value in field \"parent\" was too long") 6964 + } 6965 + 6966 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("parent"))); err != nil { 6967 + return err 6968 + } 6969 + if _, err := cw.WriteString(string("parent")); err != nil { 6970 + return err 6971 + } 6972 + 6973 + if t.Parent == nil { 6974 + if _, err := cw.Write(cbg.CborNull); err != nil { 6975 + return err 6976 + } 6977 + } else { 6978 + if len(*t.Parent) > 1000000 { 6979 + return xerrors.Errorf("Value in field t.Parent was too long") 6980 + } 6981 + 6982 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Parent))); err != nil { 6983 + return err 6984 + } 6985 + if _, err := cw.WriteString(string(*t.Parent)); err != nil { 6986 + return err 6987 + } 6988 + } 6989 + } 6990 + 6991 + // t.ChangeId (string) (string) 6992 + if len("changeId") > 1000000 { 6993 + return xerrors.Errorf("Value in field \"changeId\" was too long") 6994 + } 6995 + 6996 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("changeId"))); err != nil { 6997 + return err 6998 + } 6999 + if _, err := cw.WriteString(string("changeId")); err != nil { 7000 + return err 7001 + } 7002 + 7003 + if len(t.ChangeId) > 1000000 { 7004 + return xerrors.Errorf("Value in field t.ChangeId was too long") 7005 + } 7006 + 7007 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ChangeId))); err != nil { 7008 + return err 7009 + } 7010 + if _, err := cw.WriteString(string(t.ChangeId)); err != nil { 7011 + return err 7012 + } 7013 + return nil 7014 + } 7015 + 7016 + func (t *RepoPull_StackInfo) UnmarshalCBOR(r io.Reader) (err error) { 7017 + *t = RepoPull_StackInfo{} 7018 + 7019 + cr := cbg.NewCborReader(r) 7020 + 7021 + maj, extra, err := cr.ReadHeader() 7022 + if err != nil { 7023 + return err 7024 + } 7025 + defer func() { 7026 + if err == io.EOF { 7027 + err = io.ErrUnexpectedEOF 7028 + } 7029 + }() 7030 + 7031 + if maj != cbg.MajMap { 7032 + return fmt.Errorf("cbor input should be of type map") 7033 + } 7034 + 7035 + if extra > cbg.MaxLength { 7036 + return fmt.Errorf("RepoPull_StackInfo: map struct too large (%d)", extra) 7037 + } 7038 + 7039 + n := extra 7040 + 7041 + nameBuf := make([]byte, 8) 7042 + for i := uint64(0); i < n; i++ { 7043 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 7044 + if err != nil { 7045 + return err 7046 + } 7047 + 7048 + if !ok { 7049 + // Field doesn't exist on this type, so ignore it 7050 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 7051 + return err 7052 + } 7053 + continue 7054 + } 7055 + 7056 + switch string(nameBuf[:nameLen]) { 7057 + // t.Parent (string) (string) 7058 + case "parent": 7059 + 7060 + { 7061 + b, err := cr.ReadByte() 7062 + if err != nil { 7063 + return err 7064 + } 7065 + if b != cbg.CborNull[0] { 7066 + if err := cr.UnreadByte(); err != nil { 7067 + return err 7068 + } 7069 + 7070 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 7071 + if err != nil { 7072 + return err 7073 + } 7074 + 7075 + t.Parent = (*string)(&sval) 7076 + } 7077 + } 7078 + // t.ChangeId (string) (string) 7079 + case "changeId": 7080 + 7081 + { 7082 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 7083 + if err != nil { 7084 + return err 7085 + } 7086 + 7087 + t.ChangeId = string(sval) 7088 + } 7089 + 7090 + default: 7091 + // Field doesn't exist on this type, so ignore it 7092 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 7093 + return err 7094 + } 7095 + } 7096 + } 7097 + 7098 + return nil 7099 + } 6942 7100 func (t *RepoPull_Target) MarshalCBOR(w io.Writer) error { 6943 7101 if t == nil { 6944 7102 _, err := w.Write(cbg.CborNull) ··· 7080 7238 } 7081 7239 7082 7240 cw := cbg.NewCborWriter(w) 7083 - fieldCount := 7 7241 + fieldCount := 8 7084 7242 7085 7243 if t.Body == nil { 7086 7244 fieldCount-- ··· 7090 7248 fieldCount-- 7091 7249 } 7092 7250 7251 + if t.StackInfo == nil { 7252 + fieldCount-- 7253 + } 7254 + 7093 7255 if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 7094 7256 return err 7095 7257 } ··· 7248 7410 if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 7249 7411 return err 7250 7412 } 7413 + 7414 + // t.StackInfo (tangled.RepoPull_StackInfo) (struct) 7415 + if t.StackInfo != nil { 7416 + 7417 + if len("stackInfo") > 1000000 { 7418 + return xerrors.Errorf("Value in field \"stackInfo\" was too long") 7419 + } 7420 + 7421 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("stackInfo"))); err != nil { 7422 + return err 7423 + } 7424 + if _, err := cw.WriteString(string("stackInfo")); err != nil { 7425 + return err 7426 + } 7427 + 7428 + if err := t.StackInfo.MarshalCBOR(cw); err != nil { 7429 + return err 7430 + } 7431 + } 7251 7432 return nil 7252 7433 } 7253 7434 ··· 7397 7578 7398 7579 t.CreatedAt = string(sval) 7399 7580 } 7581 + // t.StackInfo (tangled.RepoPull_StackInfo) (struct) 7582 + case "stackInfo": 7583 + 7584 + { 7585 + 7586 + b, err := cr.ReadByte() 7587 + if err != nil { 7588 + return err 7589 + } 7590 + if b != cbg.CborNull[0] { 7591 + if err := cr.UnreadByte(); err != nil { 7592 + return err 7593 + } 7594 + t.StackInfo = new(RepoPull_StackInfo) 7595 + if err := t.StackInfo.UnmarshalCBOR(cr); err != nil { 7596 + return xerrors.Errorf("unmarshaling t.StackInfo pointer: %w", err) 7597 + } 7598 + } 7599 + 7600 + } 7400 7601 7401 7602 default: 7402 7603 // Field doesn't exist on this type, so ignore it
+16 -7
api/tangled/repopull.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoPull 19 19 type RepoPull struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"` 21 - Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Patch string `json:"patch" cborgen:"patch"` 24 - Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"` 25 - Target *RepoPull_Target `json:"target" cborgen:"target"` 26 - Title string `json:"title" cborgen:"title"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"` 21 + Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Patch string `json:"patch" cborgen:"patch"` 24 + Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"` 25 + StackInfo *RepoPull_StackInfo `json:"stackInfo,omitempty" cborgen:"stackInfo,omitempty"` 26 + Target *RepoPull_Target `json:"target" cborgen:"target"` 27 + Title string `json:"title" cborgen:"title"` 27 28 } 28 29 29 30 // RepoPull_Source is a "source" in the sh.tangled.repo.pull schema. ··· 33 34 Sha string `json:"sha" cborgen:"sha"` 34 35 } 35 36 37 + // RepoPull_StackInfo is a "stackInfo" in the sh.tangled.repo.pull schema. 38 + type RepoPull_StackInfo struct { 39 + // changeId: Change ID of this commit/change. Principly also available in the patch itself as a line in the commit footer. 40 + ChangeId string `json:"changeId" cborgen:"changeId"` 41 + // parent: AT-URI of the PR for the parent commit/change in the change stack. 42 + Parent *string `json:"parent,omitempty" cborgen:"parent,omitempty"` 43 + } 44 + 36 45 // RepoPull_Target is a "target" in the sh.tangled.repo.pull schema. 37 46 type RepoPull_Target struct { 38 47 Branch string `json:"branch" cborgen:"branch"`
+8
appview/db/db.go
··· 655 655 return err 656 656 }) 657 657 658 + 659 + runMigration(db, "add-parent-at-for-stacks-to-pulls", func(tx *sql.Tx) error { 660 + _, err := tx.Exec(` 661 + alter table pulls add column parent_at text; 662 + `) 663 + return err 664 + }) 665 + 658 666 return &DB{db}, nil 659 667 } 660 668
+37 -10
appview/db/pulls.go
··· 72 72 // stacking 73 73 StackId string // nullable string 74 74 ChangeId string // nullable string 75 + ParentAt *syntax.ATURI 75 76 ParentChangeId string // nullable string 76 77 77 78 // meta ··· 91 92 } 92 93 93 94 record := tangled.RepoPull{ 94 - Title: p.Title, 95 - Body: &p.Body, 96 - CreatedAt: p.Created.Format(time.RFC3339), 95 + Title: p.Title, 96 + Body: &p.Body, 97 + CreatedAt: p.Created.Format(time.RFC3339), 97 98 Target: &tangled.RepoPull_Target{ 98 99 Repo: p.RepoAt.String(), 99 100 Branch: p.TargetBranch, 100 101 }, 101 - Patch: p.LatestPatch(), 102 - Source: source, 102 + Patch: p.LatestPatch(), 103 + Source: source, 104 + StackInfo: &tangled.RepoPull_StackInfo{ 105 + ChangeId: p.ChangeId, 106 + Parent: (*string)(p.ParentAt), 107 + }, 103 108 } 104 109 return record 105 110 } ··· 255 260 } 256 261 } 257 262 258 - var stackId, changeId, parentChangeId *string 263 + var stackId, changeId, parentAt, parentChangeId *string 259 264 if pull.StackId != "" { 260 265 stackId = &pull.StackId 261 266 } 262 267 if pull.ChangeId != "" { 263 268 changeId = &pull.ChangeId 264 269 } 270 + if pull.ParentAt != nil { 271 + parentAt = (*string)(pull.ParentAt) 272 + } 265 273 if pull.ParentChangeId != "" { 266 274 parentChangeId = &pull.ParentChangeId 267 275 } ··· 269 277 _, err = tx.Exec( 270 278 ` 271 279 insert into pulls ( 272 - repo_at, owner_did, pull_id, title, target_branch, body, rkey, state, source_branch, source_repo_at, stack_id, change_id, parent_change_id 280 + repo_at, owner_did, pull_id, title, target_branch, body, rkey, state, source_branch, source_repo_at, stack_id, change_id, parent_at, parent_change_id 273 281 ) 274 - values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, 282 + values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, 275 283 pull.RepoAt, 276 284 pull.OwnerDid, 277 285 pull.PullId, ··· 284 292 sourceRepoAt, 285 293 stackId, 286 294 changeId, 295 + parentAt, 287 296 parentChangeId, 288 297 ) 289 298 if err != nil { ··· 341 350 source_repo_at, 342 351 stack_id, 343 352 change_id, 353 + parent_at, 344 354 parent_change_id 345 355 from 346 356 pulls ··· 356 366 for rows.Next() { 357 367 var pull Pull 358 368 var createdAt string 359 - var sourceBranch, sourceRepoAt, stackId, changeId, parentChangeId sql.NullString 369 + var sourceBranch, sourceRepoAt, stackId, changeId, parentAt, parentChangeId sql.NullString 360 370 err := rows.Scan( 361 371 &pull.OwnerDid, 362 372 &pull.RepoAt, ··· 371 381 &sourceRepoAt, 372 382 &stackId, 373 383 &changeId, 384 + &parentAt, 374 385 &parentChangeId, 375 386 ) 376 387 if err != nil { ··· 402 413 if changeId.Valid { 403 414 pull.ChangeId = changeId.String 404 415 } 416 + if parentAt.Valid { 417 + parentAtParsed, err := syntax.ParseATURI(parentAt.String) 418 + if err != nil { 419 + return nil, err 420 + } 421 + pull.ParentAt = &parentAtParsed 422 + } 405 423 if parentChangeId.Valid { 406 424 pull.ParentChangeId = parentChangeId.String 407 425 } ··· 530 548 source_repo_at, 531 549 stack_id, 532 550 change_id, 551 + parent_at, 533 552 parent_change_id 534 553 from 535 554 pulls ··· 540 559 541 560 var pull Pull 542 561 var createdAt string 543 - var sourceBranch, sourceRepoAt, stackId, changeId, parentChangeId sql.NullString 562 + var sourceBranch, sourceRepoAt, stackId, changeId, parentAt, parentChangeId sql.NullString 544 563 err := row.Scan( 545 564 &pull.OwnerDid, 546 565 &pull.PullId, ··· 555 574 &sourceRepoAt, 556 575 &stackId, 557 576 &changeId, 577 + &parentAt, 558 578 &parentChangeId, 559 579 ) 560 580 if err != nil { ··· 587 607 if changeId.Valid { 588 608 pull.ChangeId = changeId.String 589 609 } 610 + if parentAt.Valid { 611 + parsedParentAt, err := syntax.ParseATURI(parentAt.String) 612 + if err != nil { 613 + return nil, err 614 + } 615 + pull.ParentAt = &parsedParentAt 616 + } 590 617 if parentChangeId.Valid { 591 618 pull.ParentChangeId = parentChangeId.String 592 619 }
+21 -2
appview/pulls/pulls.go
··· 1693 1693 newStack, err := newStack(f, user, targetBranch, patch, pull.PullSource, stackId) 1694 1694 if err != nil { 1695 1695 log.Println("failed to create resubmitted stack", err) 1696 - s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.") 1696 + s.pages.Notice(w, "pull-resubmit-error", "Failed to merge pull request. Try again later.") 1697 1697 return 1698 1698 } 1699 1699 1700 1700 // find the diff between the stacks, first, map them by changeId 1701 1701 origById := make(map[string]*db.Pull) 1702 1702 newById := make(map[string]*db.Pull) 1703 + chIdToAtUri := make(map[string]*syntax.ATURI) 1703 1704 for _, p := range origStack { 1704 1705 origById[p.ChangeId] = p 1706 + 1707 + // build map from change id to existing at uris (ignore error as it shouldnt be possible here) 1708 + pAtUri, _ := syntax.ParseATURI(fmt.Sprintf("at://%s/%s/%s", user.Did, tangled.RepoPullNSID, p.Rkey)) 1709 + chIdToAtUri[p.ChangeId] = &pAtUri 1705 1710 } 1706 1711 for _, p := range newStack { 1712 + // if change id has already been given a PR use its at uri instead of the newly created (and thus incorrect) 1713 + // one made by newStack 1714 + if ppAt, ok := chIdToAtUri[p.ParentChangeId]; ok { 1715 + p.ParentAt = ppAt 1716 + } 1717 + 1707 1718 newById[p.ChangeId] = p 1708 1719 } 1709 1720 ··· 1751 1762 // we still need to update the hash in submission.Patch and submission.SourceRev 1752 1763 if patchutil.Equal(newFiles, origFiles) && 1753 1764 origHeader.Title == newHeader.Title && 1754 - origHeader.Body == newHeader.Body { 1765 + origHeader.Body == newHeader.Body && 1766 + op.ParentChangeId == np.ParentChangeId { 1755 1767 unchanged[op.ChangeId] = struct{}{} 1756 1768 } else { 1757 1769 updated[op.ChangeId] = struct{}{} ··· 1825 1837 1826 1838 record := op.AsRecord() 1827 1839 record.Patch = submission.Patch 1840 + record.StackInfo.Parent = (*string)(np.ParentAt) 1828 1841 1829 1842 writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 1830 1843 RepoApplyWrites_Update: &comatproto.RepoApplyWrites_Update{ ··· 2173 2186 // the stack is identified by a UUID 2174 2187 var stack db.Stack 2175 2188 parentChangeId := "" 2189 + var parentAt *syntax.ATURI = nil 2176 2190 for _, fp := range formatPatches { 2177 2191 // all patches must have a jj change-id 2178 2192 changeId, err := fp.ChangeId() ··· 2203 2217 2204 2218 StackId: stackId, 2205 2219 ChangeId: changeId, 2220 + ParentAt: parentAt, 2206 2221 ParentChangeId: parentChangeId, 2207 2222 } 2208 2223 2209 2224 stack = append(stack, &pull) 2210 2225 2211 2226 parentChangeId = changeId 2227 + // this is a bit of an ugly way to create the ATURI but its the best we can do with the data flow here 2228 + // (igore error as it shouldnt be possible here) 2229 + parsedParentAt, _ := syntax.ParseATURI(fmt.Sprintf("at://%s/%s/%s", user.Did, tangled.RepoPullNSID, pull.Rkey)); 2230 + parentAt = &parsedParentAt 2212 2231 } 2213 2232 2214 2233 return stack, nil
+1
cmd/gen.go
··· 46 46 tangled.RepoIssueState{}, 47 47 tangled.RepoPull{}, 48 48 tangled.RepoPullComment{}, 49 + tangled.RepoPull_StackInfo{}, 49 50 tangled.RepoPull_Source{}, 50 51 tangled.RepoPull_Target{}, 51 52 tangled.RepoPullStatus{},
+21
lexicons/pulls/pull.json
··· 29 29 "patch": { 30 30 "type": "string" 31 31 }, 32 + "stackInfo": { 33 + "type": "ref", 34 + "ref": "#stackInfo" 35 + }, 32 36 "source": { 33 37 "type": "ref", 34 38 "ref": "#source" ··· 76 80 "format": "at-uri" 77 81 } 78 82 } 83 + }, 84 + "stackInfo": { 85 + "type": "object", 86 + "required": [ 87 + "changeId" 88 + ], 89 + "properties": { 90 + "changeId": { 91 + "type": "string", 92 + "description": "Change ID of this commit/change." 93 + }, 94 + "parent": { 95 + "type": "string", 96 + "description": "AT-URI of the PR for the parent commit/change in the change stack.", 97 + "format": "at-uri" 98 + } 99 + } 79 100 } 80 101 } 81 102 }