Monorepo for Tangled tangled.org

Compare changes

Choose any two refs to compare.

Changed files
+8066 -3477
api
appview
commitverify
db
issues
knots
labels
mentions
middleware
models
notifications
notify
oauth
pages
pipelines
pulls
repo
reporesolver
serververify
spindles
state
strings
validator
cmd
cborgen
crypto
docs
hook
jetstream
knotserver
lexicons
nix
orm
patchutil
rbac
sets
spindle
types
+1143 -27
api/tangled/cbor_gen.go
··· 561 561 562 562 return nil 563 563 } 564 + func (t *Comment) MarshalCBOR(w io.Writer) error { 565 + if t == nil { 566 + _, err := w.Write(cbg.CborNull) 567 + return err 568 + } 569 + 570 + cw := cbg.NewCborWriter(w) 571 + fieldCount := 7 572 + 573 + if t.Mentions == nil { 574 + fieldCount-- 575 + } 576 + 577 + if t.References == nil { 578 + fieldCount-- 579 + } 580 + 581 + if t.ReplyTo == nil { 582 + fieldCount-- 583 + } 584 + 585 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 586 + return err 587 + } 588 + 589 + // t.Body (string) (string) 590 + if len("body") > 1000000 { 591 + return xerrors.Errorf("Value in field \"body\" was too long") 592 + } 593 + 594 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("body"))); err != nil { 595 + return err 596 + } 597 + if _, err := cw.WriteString(string("body")); err != nil { 598 + return err 599 + } 600 + 601 + if len(t.Body) > 1000000 { 602 + return xerrors.Errorf("Value in field t.Body was too long") 603 + } 604 + 605 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Body))); err != nil { 606 + return err 607 + } 608 + if _, err := cw.WriteString(string(t.Body)); err != nil { 609 + return err 610 + } 611 + 612 + // t.LexiconTypeID (string) (string) 613 + if len("$type") > 1000000 { 614 + return xerrors.Errorf("Value in field \"$type\" was too long") 615 + } 616 + 617 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil { 618 + return err 619 + } 620 + if _, err := cw.WriteString(string("$type")); err != nil { 621 + return err 622 + } 623 + 624 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("sh.tangled.comment"))); err != nil { 625 + return err 626 + } 627 + if _, err := cw.WriteString(string("sh.tangled.comment")); err != nil { 628 + return err 629 + } 630 + 631 + // t.ReplyTo (string) (string) 632 + if t.ReplyTo != nil { 633 + 634 + if len("replyTo") > 1000000 { 635 + return xerrors.Errorf("Value in field \"replyTo\" was too long") 636 + } 637 + 638 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("replyTo"))); err != nil { 639 + return err 640 + } 641 + if _, err := cw.WriteString(string("replyTo")); err != nil { 642 + return err 643 + } 644 + 645 + if t.ReplyTo == nil { 646 + if _, err := cw.Write(cbg.CborNull); err != nil { 647 + return err 648 + } 649 + } else { 650 + if len(*t.ReplyTo) > 1000000 { 651 + return xerrors.Errorf("Value in field t.ReplyTo was too long") 652 + } 653 + 654 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.ReplyTo))); err != nil { 655 + return err 656 + } 657 + if _, err := cw.WriteString(string(*t.ReplyTo)); err != nil { 658 + return err 659 + } 660 + } 661 + } 662 + 663 + // t.Subject (string) (string) 664 + if len("subject") > 1000000 { 665 + return xerrors.Errorf("Value in field \"subject\" was too long") 666 + } 667 + 668 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("subject"))); err != nil { 669 + return err 670 + } 671 + if _, err := cw.WriteString(string("subject")); err != nil { 672 + return err 673 + } 674 + 675 + if len(t.Subject) > 1000000 { 676 + return xerrors.Errorf("Value in field t.Subject was too long") 677 + } 678 + 679 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Subject))); err != nil { 680 + return err 681 + } 682 + if _, err := cw.WriteString(string(t.Subject)); err != nil { 683 + return err 684 + } 685 + 686 + // t.Mentions ([]string) (slice) 687 + if t.Mentions != nil { 688 + 689 + if len("mentions") > 1000000 { 690 + return xerrors.Errorf("Value in field \"mentions\" was too long") 691 + } 692 + 693 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil { 694 + return err 695 + } 696 + if _, err := cw.WriteString(string("mentions")); err != nil { 697 + return err 698 + } 699 + 700 + if len(t.Mentions) > 8192 { 701 + return xerrors.Errorf("Slice value in field t.Mentions was too long") 702 + } 703 + 704 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil { 705 + return err 706 + } 707 + for _, v := range t.Mentions { 708 + if len(v) > 1000000 { 709 + return xerrors.Errorf("Value in field v was too long") 710 + } 711 + 712 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 713 + return err 714 + } 715 + if _, err := cw.WriteString(string(v)); err != nil { 716 + return err 717 + } 718 + 719 + } 720 + } 721 + 722 + // t.CreatedAt (string) (string) 723 + if len("createdAt") > 1000000 { 724 + return xerrors.Errorf("Value in field \"createdAt\" was too long") 725 + } 726 + 727 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil { 728 + return err 729 + } 730 + if _, err := cw.WriteString(string("createdAt")); err != nil { 731 + return err 732 + } 733 + 734 + if len(t.CreatedAt) > 1000000 { 735 + return xerrors.Errorf("Value in field t.CreatedAt was too long") 736 + } 737 + 738 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil { 739 + return err 740 + } 741 + if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 742 + return err 743 + } 744 + 745 + // t.References ([]string) (slice) 746 + if t.References != nil { 747 + 748 + if len("references") > 1000000 { 749 + return xerrors.Errorf("Value in field \"references\" was too long") 750 + } 751 + 752 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil { 753 + return err 754 + } 755 + if _, err := cw.WriteString(string("references")); err != nil { 756 + return err 757 + } 758 + 759 + if len(t.References) > 8192 { 760 + return xerrors.Errorf("Slice value in field t.References was too long") 761 + } 762 + 763 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil { 764 + return err 765 + } 766 + for _, v := range t.References { 767 + if len(v) > 1000000 { 768 + return xerrors.Errorf("Value in field v was too long") 769 + } 770 + 771 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 772 + return err 773 + } 774 + if _, err := cw.WriteString(string(v)); err != nil { 775 + return err 776 + } 777 + 778 + } 779 + } 780 + return nil 781 + } 782 + 783 + func (t *Comment) UnmarshalCBOR(r io.Reader) (err error) { 784 + *t = Comment{} 785 + 786 + cr := cbg.NewCborReader(r) 787 + 788 + maj, extra, err := cr.ReadHeader() 789 + if err != nil { 790 + return err 791 + } 792 + defer func() { 793 + if err == io.EOF { 794 + err = io.ErrUnexpectedEOF 795 + } 796 + }() 797 + 798 + if maj != cbg.MajMap { 799 + return fmt.Errorf("cbor input should be of type map") 800 + } 801 + 802 + if extra > cbg.MaxLength { 803 + return fmt.Errorf("Comment: map struct too large (%d)", extra) 804 + } 805 + 806 + n := extra 807 + 808 + nameBuf := make([]byte, 10) 809 + for i := uint64(0); i < n; i++ { 810 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 811 + if err != nil { 812 + return err 813 + } 814 + 815 + if !ok { 816 + // Field doesn't exist on this type, so ignore it 817 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 818 + return err 819 + } 820 + continue 821 + } 822 + 823 + switch string(nameBuf[:nameLen]) { 824 + // t.Body (string) (string) 825 + case "body": 826 + 827 + { 828 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 829 + if err != nil { 830 + return err 831 + } 832 + 833 + t.Body = string(sval) 834 + } 835 + // t.LexiconTypeID (string) (string) 836 + case "$type": 837 + 838 + { 839 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 840 + if err != nil { 841 + return err 842 + } 843 + 844 + t.LexiconTypeID = string(sval) 845 + } 846 + // t.ReplyTo (string) (string) 847 + case "replyTo": 848 + 849 + { 850 + b, err := cr.ReadByte() 851 + if err != nil { 852 + return err 853 + } 854 + if b != cbg.CborNull[0] { 855 + if err := cr.UnreadByte(); err != nil { 856 + return err 857 + } 858 + 859 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 860 + if err != nil { 861 + return err 862 + } 863 + 864 + t.ReplyTo = (*string)(&sval) 865 + } 866 + } 867 + // t.Subject (string) (string) 868 + case "subject": 869 + 870 + { 871 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 872 + if err != nil { 873 + return err 874 + } 875 + 876 + t.Subject = string(sval) 877 + } 878 + // t.Mentions ([]string) (slice) 879 + case "mentions": 880 + 881 + maj, extra, err = cr.ReadHeader() 882 + if err != nil { 883 + return err 884 + } 885 + 886 + if extra > 8192 { 887 + return fmt.Errorf("t.Mentions: array too large (%d)", extra) 888 + } 889 + 890 + if maj != cbg.MajArray { 891 + return fmt.Errorf("expected cbor array") 892 + } 893 + 894 + if extra > 0 { 895 + t.Mentions = make([]string, extra) 896 + } 897 + 898 + for i := 0; i < int(extra); i++ { 899 + { 900 + var maj byte 901 + var extra uint64 902 + var err error 903 + _ = maj 904 + _ = extra 905 + _ = err 906 + 907 + { 908 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 909 + if err != nil { 910 + return err 911 + } 912 + 913 + t.Mentions[i] = string(sval) 914 + } 915 + 916 + } 917 + } 918 + // t.CreatedAt (string) (string) 919 + case "createdAt": 920 + 921 + { 922 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 923 + if err != nil { 924 + return err 925 + } 926 + 927 + t.CreatedAt = string(sval) 928 + } 929 + // t.References ([]string) (slice) 930 + case "references": 931 + 932 + maj, extra, err = cr.ReadHeader() 933 + if err != nil { 934 + return err 935 + } 936 + 937 + if extra > 8192 { 938 + return fmt.Errorf("t.References: array too large (%d)", extra) 939 + } 940 + 941 + if maj != cbg.MajArray { 942 + return fmt.Errorf("expected cbor array") 943 + } 944 + 945 + if extra > 0 { 946 + t.References = make([]string, extra) 947 + } 948 + 949 + for i := 0; i < int(extra); i++ { 950 + { 951 + var maj byte 952 + var extra uint64 953 + var err error 954 + _ = maj 955 + _ = extra 956 + _ = err 957 + 958 + { 959 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 960 + if err != nil { 961 + return err 962 + } 963 + 964 + t.References[i] = string(sval) 965 + } 966 + 967 + } 968 + } 969 + 970 + default: 971 + // Field doesn't exist on this type, so ignore it 972 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 973 + return err 974 + } 975 + } 976 + } 977 + 978 + return nil 979 + } 564 980 func (t *FeedReaction) MarshalCBOR(w io.Writer) error { 565 981 if t == nil { 566 982 _, err := w.Write(cbg.CborNull) ··· 6938 7354 } 6939 7355 6940 7356 cw := cbg.NewCborWriter(w) 6941 - fieldCount := 5 7357 + fieldCount := 7 6942 7358 6943 7359 if t.Body == nil { 7360 + fieldCount-- 7361 + } 7362 + 7363 + if t.Mentions == nil { 7364 + fieldCount-- 7365 + } 7366 + 7367 + if t.References == nil { 6944 7368 fieldCount-- 6945 7369 } 6946 7370 ··· 7045 7469 return err 7046 7470 } 7047 7471 7472 + // t.Mentions ([]string) (slice) 7473 + if t.Mentions != nil { 7474 + 7475 + if len("mentions") > 1000000 { 7476 + return xerrors.Errorf("Value in field \"mentions\" was too long") 7477 + } 7478 + 7479 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil { 7480 + return err 7481 + } 7482 + if _, err := cw.WriteString(string("mentions")); err != nil { 7483 + return err 7484 + } 7485 + 7486 + if len(t.Mentions) > 8192 { 7487 + return xerrors.Errorf("Slice value in field t.Mentions was too long") 7488 + } 7489 + 7490 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil { 7491 + return err 7492 + } 7493 + for _, v := range t.Mentions { 7494 + if len(v) > 1000000 { 7495 + return xerrors.Errorf("Value in field v was too long") 7496 + } 7497 + 7498 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 7499 + return err 7500 + } 7501 + if _, err := cw.WriteString(string(v)); err != nil { 7502 + return err 7503 + } 7504 + 7505 + } 7506 + } 7507 + 7048 7508 // t.CreatedAt (string) (string) 7049 7509 if len("createdAt") > 1000000 { 7050 7510 return xerrors.Errorf("Value in field \"createdAt\" was too long") ··· 7067 7527 if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 7068 7528 return err 7069 7529 } 7530 + 7531 + // t.References ([]string) (slice) 7532 + if t.References != nil { 7533 + 7534 + if len("references") > 1000000 { 7535 + return xerrors.Errorf("Value in field \"references\" was too long") 7536 + } 7537 + 7538 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil { 7539 + return err 7540 + } 7541 + if _, err := cw.WriteString(string("references")); err != nil { 7542 + return err 7543 + } 7544 + 7545 + if len(t.References) > 8192 { 7546 + return xerrors.Errorf("Slice value in field t.References was too long") 7547 + } 7548 + 7549 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil { 7550 + return err 7551 + } 7552 + for _, v := range t.References { 7553 + if len(v) > 1000000 { 7554 + return xerrors.Errorf("Value in field v was too long") 7555 + } 7556 + 7557 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 7558 + return err 7559 + } 7560 + if _, err := cw.WriteString(string(v)); err != nil { 7561 + return err 7562 + } 7563 + 7564 + } 7565 + } 7070 7566 return nil 7071 7567 } 7072 7568 ··· 7095 7591 7096 7592 n := extra 7097 7593 7098 - nameBuf := make([]byte, 9) 7594 + nameBuf := make([]byte, 10) 7099 7595 for i := uint64(0); i < n; i++ { 7100 7596 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 7101 7597 if err != nil { ··· 7165 7661 7166 7662 t.Title = string(sval) 7167 7663 } 7664 + // t.Mentions ([]string) (slice) 7665 + case "mentions": 7666 + 7667 + maj, extra, err = cr.ReadHeader() 7668 + if err != nil { 7669 + return err 7670 + } 7671 + 7672 + if extra > 8192 { 7673 + return fmt.Errorf("t.Mentions: array too large (%d)", extra) 7674 + } 7675 + 7676 + if maj != cbg.MajArray { 7677 + return fmt.Errorf("expected cbor array") 7678 + } 7679 + 7680 + if extra > 0 { 7681 + t.Mentions = make([]string, extra) 7682 + } 7683 + 7684 + for i := 0; i < int(extra); i++ { 7685 + { 7686 + var maj byte 7687 + var extra uint64 7688 + var err error 7689 + _ = maj 7690 + _ = extra 7691 + _ = err 7692 + 7693 + { 7694 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 7695 + if err != nil { 7696 + return err 7697 + } 7698 + 7699 + t.Mentions[i] = string(sval) 7700 + } 7701 + 7702 + } 7703 + } 7168 7704 // t.CreatedAt (string) (string) 7169 7705 case "createdAt": 7170 7706 ··· 7175 7711 } 7176 7712 7177 7713 t.CreatedAt = string(sval) 7714 + } 7715 + // t.References ([]string) (slice) 7716 + case "references": 7717 + 7718 + maj, extra, err = cr.ReadHeader() 7719 + if err != nil { 7720 + return err 7721 + } 7722 + 7723 + if extra > 8192 { 7724 + return fmt.Errorf("t.References: array too large (%d)", extra) 7725 + } 7726 + 7727 + if maj != cbg.MajArray { 7728 + return fmt.Errorf("expected cbor array") 7729 + } 7730 + 7731 + if extra > 0 { 7732 + t.References = make([]string, extra) 7733 + } 7734 + 7735 + for i := 0; i < int(extra); i++ { 7736 + { 7737 + var maj byte 7738 + var extra uint64 7739 + var err error 7740 + _ = maj 7741 + _ = extra 7742 + _ = err 7743 + 7744 + { 7745 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 7746 + if err != nil { 7747 + return err 7748 + } 7749 + 7750 + t.References[i] = string(sval) 7751 + } 7752 + 7753 + } 7178 7754 } 7179 7755 7180 7756 default: ··· 7194 7770 } 7195 7771 7196 7772 cw := cbg.NewCborWriter(w) 7197 - fieldCount := 5 7773 + fieldCount := 7 7774 + 7775 + if t.Mentions == nil { 7776 + fieldCount-- 7777 + } 7778 + 7779 + if t.References == nil { 7780 + fieldCount-- 7781 + } 7198 7782 7199 7783 if t.ReplyTo == nil { 7200 7784 fieldCount-- ··· 7301 7885 } 7302 7886 } 7303 7887 7888 + // t.Mentions ([]string) (slice) 7889 + if t.Mentions != nil { 7890 + 7891 + if len("mentions") > 1000000 { 7892 + return xerrors.Errorf("Value in field \"mentions\" was too long") 7893 + } 7894 + 7895 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil { 7896 + return err 7897 + } 7898 + if _, err := cw.WriteString(string("mentions")); err != nil { 7899 + return err 7900 + } 7901 + 7902 + if len(t.Mentions) > 8192 { 7903 + return xerrors.Errorf("Slice value in field t.Mentions was too long") 7904 + } 7905 + 7906 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil { 7907 + return err 7908 + } 7909 + for _, v := range t.Mentions { 7910 + if len(v) > 1000000 { 7911 + return xerrors.Errorf("Value in field v was too long") 7912 + } 7913 + 7914 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 7915 + return err 7916 + } 7917 + if _, err := cw.WriteString(string(v)); err != nil { 7918 + return err 7919 + } 7920 + 7921 + } 7922 + } 7923 + 7304 7924 // t.CreatedAt (string) (string) 7305 7925 if len("createdAt") > 1000000 { 7306 7926 return xerrors.Errorf("Value in field \"createdAt\" was too long") ··· 7323 7943 if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 7324 7944 return err 7325 7945 } 7946 + 7947 + // t.References ([]string) (slice) 7948 + if t.References != nil { 7949 + 7950 + if len("references") > 1000000 { 7951 + return xerrors.Errorf("Value in field \"references\" was too long") 7952 + } 7953 + 7954 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil { 7955 + return err 7956 + } 7957 + if _, err := cw.WriteString(string("references")); err != nil { 7958 + return err 7959 + } 7960 + 7961 + if len(t.References) > 8192 { 7962 + return xerrors.Errorf("Slice value in field t.References was too long") 7963 + } 7964 + 7965 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil { 7966 + return err 7967 + } 7968 + for _, v := range t.References { 7969 + if len(v) > 1000000 { 7970 + return xerrors.Errorf("Value in field v was too long") 7971 + } 7972 + 7973 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 7974 + return err 7975 + } 7976 + if _, err := cw.WriteString(string(v)); err != nil { 7977 + return err 7978 + } 7979 + 7980 + } 7981 + } 7326 7982 return nil 7327 7983 } 7328 7984 ··· 7351 8007 7352 8008 n := extra 7353 8009 7354 - nameBuf := make([]byte, 9) 8010 + nameBuf := make([]byte, 10) 7355 8011 for i := uint64(0); i < n; i++ { 7356 8012 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 7357 8013 if err != nil { ··· 7421 8077 t.ReplyTo = (*string)(&sval) 7422 8078 } 7423 8079 } 8080 + // t.Mentions ([]string) (slice) 8081 + case "mentions": 8082 + 8083 + maj, extra, err = cr.ReadHeader() 8084 + if err != nil { 8085 + return err 8086 + } 8087 + 8088 + if extra > 8192 { 8089 + return fmt.Errorf("t.Mentions: array too large (%d)", extra) 8090 + } 8091 + 8092 + if maj != cbg.MajArray { 8093 + return fmt.Errorf("expected cbor array") 8094 + } 8095 + 8096 + if extra > 0 { 8097 + t.Mentions = make([]string, extra) 8098 + } 8099 + 8100 + for i := 0; i < int(extra); i++ { 8101 + { 8102 + var maj byte 8103 + var extra uint64 8104 + var err error 8105 + _ = maj 8106 + _ = extra 8107 + _ = err 8108 + 8109 + { 8110 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8111 + if err != nil { 8112 + return err 8113 + } 8114 + 8115 + t.Mentions[i] = string(sval) 8116 + } 8117 + 8118 + } 8119 + } 7424 8120 // t.CreatedAt (string) (string) 7425 8121 case "createdAt": 7426 8122 ··· 7431 8127 } 7432 8128 7433 8129 t.CreatedAt = string(sval) 8130 + } 8131 + // t.References ([]string) (slice) 8132 + case "references": 8133 + 8134 + maj, extra, err = cr.ReadHeader() 8135 + if err != nil { 8136 + return err 8137 + } 8138 + 8139 + if extra > 8192 { 8140 + return fmt.Errorf("t.References: array too large (%d)", extra) 8141 + } 8142 + 8143 + if maj != cbg.MajArray { 8144 + return fmt.Errorf("expected cbor array") 8145 + } 8146 + 8147 + if extra > 0 { 8148 + t.References = make([]string, extra) 8149 + } 8150 + 8151 + for i := 0; i < int(extra); i++ { 8152 + { 8153 + var maj byte 8154 + var extra uint64 8155 + var err error 8156 + _ = maj 8157 + _ = extra 8158 + _ = err 8159 + 8160 + { 8161 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8162 + if err != nil { 8163 + return err 8164 + } 8165 + 8166 + t.References[i] = string(sval) 8167 + } 8168 + 8169 + } 7434 8170 } 7435 8171 7436 8172 default: ··· 7614 8350 } 7615 8351 7616 8352 cw := cbg.NewCborWriter(w) 7617 - fieldCount := 7 8353 + fieldCount := 10 7618 8354 7619 8355 if t.Body == nil { 8356 + fieldCount-- 8357 + } 8358 + 8359 + if t.Mentions == nil { 8360 + fieldCount-- 8361 + } 8362 + 8363 + if t.Patch == nil { 8364 + fieldCount-- 8365 + } 8366 + 8367 + if t.References == nil { 7620 8368 fieldCount-- 7621 8369 } 7622 8370 ··· 7680 8428 } 7681 8429 7682 8430 // t.Patch (string) (string) 7683 - if len("patch") > 1000000 { 7684 - return xerrors.Errorf("Value in field \"patch\" was too long") 7685 - } 8431 + if t.Patch != nil { 7686 8432 7687 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil { 7688 - return err 7689 - } 7690 - if _, err := cw.WriteString(string("patch")); err != nil { 7691 - return err 7692 - } 8433 + if len("patch") > 1000000 { 8434 + return xerrors.Errorf("Value in field \"patch\" was too long") 8435 + } 7693 8436 7694 - if len(t.Patch) > 1000000 { 7695 - return xerrors.Errorf("Value in field t.Patch was too long") 7696 - } 8437 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil { 8438 + return err 8439 + } 8440 + if _, err := cw.WriteString(string("patch")); err != nil { 8441 + return err 8442 + } 7697 8443 7698 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Patch))); err != nil { 7699 - return err 7700 - } 7701 - if _, err := cw.WriteString(string(t.Patch)); err != nil { 7702 - return err 8444 + if t.Patch == nil { 8445 + if _, err := cw.Write(cbg.CborNull); err != nil { 8446 + return err 8447 + } 8448 + } else { 8449 + if len(*t.Patch) > 1000000 { 8450 + return xerrors.Errorf("Value in field t.Patch was too long") 8451 + } 8452 + 8453 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Patch))); err != nil { 8454 + return err 8455 + } 8456 + if _, err := cw.WriteString(string(*t.Patch)); err != nil { 8457 + return err 8458 + } 8459 + } 7703 8460 } 7704 8461 7705 8462 // t.Title (string) (string) ··· 7760 8517 return err 7761 8518 } 7762 8519 8520 + // t.Mentions ([]string) (slice) 8521 + if t.Mentions != nil { 8522 + 8523 + if len("mentions") > 1000000 { 8524 + return xerrors.Errorf("Value in field \"mentions\" was too long") 8525 + } 8526 + 8527 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil { 8528 + return err 8529 + } 8530 + if _, err := cw.WriteString(string("mentions")); err != nil { 8531 + return err 8532 + } 8533 + 8534 + if len(t.Mentions) > 8192 { 8535 + return xerrors.Errorf("Slice value in field t.Mentions was too long") 8536 + } 8537 + 8538 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil { 8539 + return err 8540 + } 8541 + for _, v := range t.Mentions { 8542 + if len(v) > 1000000 { 8543 + return xerrors.Errorf("Value in field v was too long") 8544 + } 8545 + 8546 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 8547 + return err 8548 + } 8549 + if _, err := cw.WriteString(string(v)); err != nil { 8550 + return err 8551 + } 8552 + 8553 + } 8554 + } 8555 + 7763 8556 // t.CreatedAt (string) (string) 7764 8557 if len("createdAt") > 1000000 { 7765 8558 return xerrors.Errorf("Value in field \"createdAt\" was too long") ··· 7782 8575 if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 7783 8576 return err 7784 8577 } 8578 + 8579 + // t.PatchBlob (util.LexBlob) (struct) 8580 + if len("patchBlob") > 1000000 { 8581 + return xerrors.Errorf("Value in field \"patchBlob\" was too long") 8582 + } 8583 + 8584 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patchBlob"))); err != nil { 8585 + return err 8586 + } 8587 + if _, err := cw.WriteString(string("patchBlob")); err != nil { 8588 + return err 8589 + } 8590 + 8591 + if err := t.PatchBlob.MarshalCBOR(cw); err != nil { 8592 + return err 8593 + } 8594 + 8595 + // t.References ([]string) (slice) 8596 + if t.References != nil { 8597 + 8598 + if len("references") > 1000000 { 8599 + return xerrors.Errorf("Value in field \"references\" was too long") 8600 + } 8601 + 8602 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil { 8603 + return err 8604 + } 8605 + if _, err := cw.WriteString(string("references")); err != nil { 8606 + return err 8607 + } 8608 + 8609 + if len(t.References) > 8192 { 8610 + return xerrors.Errorf("Slice value in field t.References was too long") 8611 + } 8612 + 8613 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil { 8614 + return err 8615 + } 8616 + for _, v := range t.References { 8617 + if len(v) > 1000000 { 8618 + return xerrors.Errorf("Value in field v was too long") 8619 + } 8620 + 8621 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 8622 + return err 8623 + } 8624 + if _, err := cw.WriteString(string(v)); err != nil { 8625 + return err 8626 + } 8627 + 8628 + } 8629 + } 7785 8630 return nil 7786 8631 } 7787 8632 ··· 7810 8655 7811 8656 n := extra 7812 8657 7813 - nameBuf := make([]byte, 9) 8658 + nameBuf := make([]byte, 10) 7814 8659 for i := uint64(0); i < n; i++ { 7815 8660 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 7816 8661 if err != nil { ··· 7862 8707 case "patch": 7863 8708 7864 8709 { 7865 - sval, err := cbg.ReadStringWithMax(cr, 1000000) 8710 + b, err := cr.ReadByte() 7866 8711 if err != nil { 7867 8712 return err 7868 8713 } 8714 + if b != cbg.CborNull[0] { 8715 + if err := cr.UnreadByte(); err != nil { 8716 + return err 8717 + } 7869 8718 7870 - t.Patch = string(sval) 8719 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8720 + if err != nil { 8721 + return err 8722 + } 8723 + 8724 + t.Patch = (*string)(&sval) 8725 + } 7871 8726 } 7872 8727 // t.Title (string) (string) 7873 8728 case "title": ··· 7920 8775 } 7921 8776 7922 8777 } 8778 + // t.Mentions ([]string) (slice) 8779 + case "mentions": 8780 + 8781 + maj, extra, err = cr.ReadHeader() 8782 + if err != nil { 8783 + return err 8784 + } 8785 + 8786 + if extra > 8192 { 8787 + return fmt.Errorf("t.Mentions: array too large (%d)", extra) 8788 + } 8789 + 8790 + if maj != cbg.MajArray { 8791 + return fmt.Errorf("expected cbor array") 8792 + } 8793 + 8794 + if extra > 0 { 8795 + t.Mentions = make([]string, extra) 8796 + } 8797 + 8798 + for i := 0; i < int(extra); i++ { 8799 + { 8800 + var maj byte 8801 + var extra uint64 8802 + var err error 8803 + _ = maj 8804 + _ = extra 8805 + _ = err 8806 + 8807 + { 8808 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8809 + if err != nil { 8810 + return err 8811 + } 8812 + 8813 + t.Mentions[i] = string(sval) 8814 + } 8815 + 8816 + } 8817 + } 7923 8818 // t.CreatedAt (string) (string) 7924 8819 case "createdAt": 7925 8820 ··· 7931 8826 7932 8827 t.CreatedAt = string(sval) 7933 8828 } 8829 + // t.PatchBlob (util.LexBlob) (struct) 8830 + case "patchBlob": 8831 + 8832 + { 8833 + 8834 + b, err := cr.ReadByte() 8835 + if err != nil { 8836 + return err 8837 + } 8838 + if b != cbg.CborNull[0] { 8839 + if err := cr.UnreadByte(); err != nil { 8840 + return err 8841 + } 8842 + t.PatchBlob = new(util.LexBlob) 8843 + if err := t.PatchBlob.UnmarshalCBOR(cr); err != nil { 8844 + return xerrors.Errorf("unmarshaling t.PatchBlob pointer: %w", err) 8845 + } 8846 + } 8847 + 8848 + } 8849 + // t.References ([]string) (slice) 8850 + case "references": 8851 + 8852 + maj, extra, err = cr.ReadHeader() 8853 + if err != nil { 8854 + return err 8855 + } 8856 + 8857 + if extra > 8192 { 8858 + return fmt.Errorf("t.References: array too large (%d)", extra) 8859 + } 8860 + 8861 + if maj != cbg.MajArray { 8862 + return fmt.Errorf("expected cbor array") 8863 + } 8864 + 8865 + if extra > 0 { 8866 + t.References = make([]string, extra) 8867 + } 8868 + 8869 + for i := 0; i < int(extra); i++ { 8870 + { 8871 + var maj byte 8872 + var extra uint64 8873 + var err error 8874 + _ = maj 8875 + _ = extra 8876 + _ = err 8877 + 8878 + { 8879 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8880 + if err != nil { 8881 + return err 8882 + } 8883 + 8884 + t.References[i] = string(sval) 8885 + } 8886 + 8887 + } 8888 + } 7934 8889 7935 8890 default: 7936 8891 // Field doesn't exist on this type, so ignore it ··· 7949 8904 } 7950 8905 7951 8906 cw := cbg.NewCborWriter(w) 8907 + fieldCount := 6 7952 8908 7953 - if _, err := cw.Write([]byte{164}); err != nil { 8909 + if t.Mentions == nil { 8910 + fieldCount-- 8911 + } 8912 + 8913 + if t.References == nil { 8914 + fieldCount-- 8915 + } 8916 + 8917 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 7954 8918 return err 7955 8919 } 7956 8920 ··· 8019 8983 return err 8020 8984 } 8021 8985 8986 + // t.Mentions ([]string) (slice) 8987 + if t.Mentions != nil { 8988 + 8989 + if len("mentions") > 1000000 { 8990 + return xerrors.Errorf("Value in field \"mentions\" was too long") 8991 + } 8992 + 8993 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil { 8994 + return err 8995 + } 8996 + if _, err := cw.WriteString(string("mentions")); err != nil { 8997 + return err 8998 + } 8999 + 9000 + if len(t.Mentions) > 8192 { 9001 + return xerrors.Errorf("Slice value in field t.Mentions was too long") 9002 + } 9003 + 9004 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil { 9005 + return err 9006 + } 9007 + for _, v := range t.Mentions { 9008 + if len(v) > 1000000 { 9009 + return xerrors.Errorf("Value in field v was too long") 9010 + } 9011 + 9012 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 9013 + return err 9014 + } 9015 + if _, err := cw.WriteString(string(v)); err != nil { 9016 + return err 9017 + } 9018 + 9019 + } 9020 + } 9021 + 8022 9022 // t.CreatedAt (string) (string) 8023 9023 if len("createdAt") > 1000000 { 8024 9024 return xerrors.Errorf("Value in field \"createdAt\" was too long") ··· 8041 9041 if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 8042 9042 return err 8043 9043 } 9044 + 9045 + // t.References ([]string) (slice) 9046 + if t.References != nil { 9047 + 9048 + if len("references") > 1000000 { 9049 + return xerrors.Errorf("Value in field \"references\" was too long") 9050 + } 9051 + 9052 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil { 9053 + return err 9054 + } 9055 + if _, err := cw.WriteString(string("references")); err != nil { 9056 + return err 9057 + } 9058 + 9059 + if len(t.References) > 8192 { 9060 + return xerrors.Errorf("Slice value in field t.References was too long") 9061 + } 9062 + 9063 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil { 9064 + return err 9065 + } 9066 + for _, v := range t.References { 9067 + if len(v) > 1000000 { 9068 + return xerrors.Errorf("Value in field v was too long") 9069 + } 9070 + 9071 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 9072 + return err 9073 + } 9074 + if _, err := cw.WriteString(string(v)); err != nil { 9075 + return err 9076 + } 9077 + 9078 + } 9079 + } 8044 9080 return nil 8045 9081 } 8046 9082 ··· 8069 9105 8070 9106 n := extra 8071 9107 8072 - nameBuf := make([]byte, 9) 9108 + nameBuf := make([]byte, 10) 8073 9109 for i := uint64(0); i < n; i++ { 8074 9110 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 8075 9111 if err != nil { ··· 8118 9154 8119 9155 t.LexiconTypeID = string(sval) 8120 9156 } 9157 + // t.Mentions ([]string) (slice) 9158 + case "mentions": 9159 + 9160 + maj, extra, err = cr.ReadHeader() 9161 + if err != nil { 9162 + return err 9163 + } 9164 + 9165 + if extra > 8192 { 9166 + return fmt.Errorf("t.Mentions: array too large (%d)", extra) 9167 + } 9168 + 9169 + if maj != cbg.MajArray { 9170 + return fmt.Errorf("expected cbor array") 9171 + } 9172 + 9173 + if extra > 0 { 9174 + t.Mentions = make([]string, extra) 9175 + } 9176 + 9177 + for i := 0; i < int(extra); i++ { 9178 + { 9179 + var maj byte 9180 + var extra uint64 9181 + var err error 9182 + _ = maj 9183 + _ = extra 9184 + _ = err 9185 + 9186 + { 9187 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 9188 + if err != nil { 9189 + return err 9190 + } 9191 + 9192 + t.Mentions[i] = string(sval) 9193 + } 9194 + 9195 + } 9196 + } 8121 9197 // t.CreatedAt (string) (string) 8122 9198 case "createdAt": 8123 9199 ··· 8128 9204 } 8129 9205 8130 9206 t.CreatedAt = string(sval) 9207 + } 9208 + // t.References ([]string) (slice) 9209 + case "references": 9210 + 9211 + maj, extra, err = cr.ReadHeader() 9212 + if err != nil { 9213 + return err 9214 + } 9215 + 9216 + if extra > 8192 { 9217 + return fmt.Errorf("t.References: array too large (%d)", extra) 9218 + } 9219 + 9220 + if maj != cbg.MajArray { 9221 + return fmt.Errorf("expected cbor array") 9222 + } 9223 + 9224 + if extra > 0 { 9225 + t.References = make([]string, extra) 9226 + } 9227 + 9228 + for i := 0; i < int(extra); i++ { 9229 + { 9230 + var maj byte 9231 + var extra uint64 9232 + var err error 9233 + _ = maj 9234 + _ = extra 9235 + _ = err 9236 + 9237 + { 9238 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 9239 + if err != nil { 9240 + return err 9241 + } 9242 + 9243 + t.References[i] = string(sval) 9244 + } 9245 + 9246 + } 8131 9247 } 8132 9248 8133 9249 default:
+7 -5
api/tangled/issuecomment.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoIssueComment 19 19 type RepoIssueComment struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue.comment" cborgen:"$type,const=sh.tangled.repo.issue.comment"` 21 - Body string `json:"body" cborgen:"body"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Issue string `json:"issue" cborgen:"issue"` 24 - ReplyTo *string `json:"replyTo,omitempty" cborgen:"replyTo,omitempty"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue.comment" cborgen:"$type,const=sh.tangled.repo.issue.comment"` 21 + Body string `json:"body" cborgen:"body"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Issue string `json:"issue" cborgen:"issue"` 24 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 25 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 26 + ReplyTo *string `json:"replyTo,omitempty" cborgen:"replyTo,omitempty"` 25 27 }
+6 -4
api/tangled/pullcomment.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoPullComment 19 19 type RepoPullComment struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull.comment" cborgen:"$type,const=sh.tangled.repo.pull.comment"` 21 - Body string `json:"body" cborgen:"body"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Pull string `json:"pull" cborgen:"pull"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull.comment" cborgen:"$type,const=sh.tangled.repo.pull.comment"` 21 + Body string `json:"body" cborgen:"body"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 24 + Pull string `json:"pull" cborgen:"pull"` 25 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 24 26 }
+7 -5
api/tangled/repoissue.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoIssue 19 19 type RepoIssue struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue" cborgen:"$type,const=sh.tangled.repo.issue"` 21 - Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Repo string `json:"repo" cborgen:"repo"` 24 - Title string `json:"title" cborgen:"title"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue" cborgen:"$type,const=sh.tangled.repo.issue"` 21 + Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 24 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 25 + Repo string `json:"repo" cborgen:"repo"` 26 + Title string `json:"title" cborgen:"title"` 25 27 }
+12 -7
api/tangled/repopull.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoPull 19 19 type RepoPull struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"` 21 - Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Patch string `json:"patch" cborgen:"patch"` 24 - Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"` 25 - Target *RepoPull_Target `json:"target" cborgen:"target"` 26 - Title string `json:"title" cborgen:"title"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"` 21 + Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 24 + // patch: (deprecated) use patchBlob instead 25 + Patch *string `json:"patch,omitempty" cborgen:"patch,omitempty"` 26 + // patchBlob: patch content 27 + PatchBlob *util.LexBlob `json:"patchBlob" cborgen:"patchBlob"` 28 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 29 + Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"` 30 + Target *RepoPull_Target `json:"target" cborgen:"target"` 31 + Title string `json:"title" cborgen:"title"` 27 32 } 28 33 29 34 // RepoPull_Source is a "source" in the sh.tangled.repo.pull schema.
+27
api/tangled/tangledcomment.go
··· 1 + // Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT. 2 + 3 + package tangled 4 + 5 + // schema: sh.tangled.comment 6 + 7 + import ( 8 + "github.com/bluesky-social/indigo/lex/util" 9 + ) 10 + 11 + const ( 12 + CommentNSID = "sh.tangled.comment" 13 + ) 14 + 15 + func init() { 16 + util.RegisterType("sh.tangled.comment", &Comment{}) 17 + } // 18 + // RECORDTYPE: Comment 19 + type Comment struct { 20 + LexiconTypeID string `json:"$type,const=sh.tangled.comment" cborgen:"$type,const=sh.tangled.comment"` 21 + Body string `json:"body" cborgen:"body"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 24 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 25 + ReplyTo *string `json:"replyTo,omitempty" cborgen:"replyTo,omitempty"` 26 + Subject string `json:"subject" cborgen:"subject"` 27 + }
+6 -45
appview/commitverify/verify.go
··· 3 3 import ( 4 4 "log" 5 5 6 - "github.com/go-git/go-git/v5/plumbing/object" 7 6 "tangled.org/core/appview/db" 8 7 "tangled.org/core/appview/models" 9 8 "tangled.org/core/crypto" ··· 35 34 return "" 36 35 } 37 36 38 - func GetVerifiedObjectCommits(e db.Execer, emailToDid map[string]string, commits []*object.Commit) (VerifiedCommits, error) { 39 - ndCommits := []types.NiceDiff{} 40 - for _, commit := range commits { 41 - ndCommits = append(ndCommits, ObjectCommitToNiceDiff(commit)) 42 - } 43 - return GetVerifiedCommits(e, emailToDid, ndCommits) 44 - } 45 - 46 - func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.NiceDiff) (VerifiedCommits, error) { 37 + func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.Commit) (VerifiedCommits, error) { 47 38 vcs := VerifiedCommits{} 48 39 49 40 didPubkeyCache := make(map[string][]models.PublicKey) 50 41 51 42 for _, commit := range ndCommits { 52 - c := commit.Commit 53 - 54 - committerEmail := c.Committer.Email 43 + committerEmail := commit.Committer.Email 55 44 if did, exists := emailToDid[committerEmail]; exists { 56 45 // check if we've already fetched public keys for this did 57 46 pubKeys, ok := didPubkeyCache[did] ··· 67 56 } 68 57 69 58 // try to verify with any associated pubkeys 59 + payload := commit.Payload() 60 + signature := commit.PGPSignature 70 61 for _, pk := range pubKeys { 71 - if _, ok := crypto.VerifyCommitSignature(pk.Key, commit); ok { 62 + if _, ok := crypto.VerifySignature([]byte(pk.Key), []byte(signature), []byte(payload)); ok { 72 63 73 64 fp, err := crypto.SSHFingerprint(pk.Key) 74 65 if err != nil { 75 66 log.Println("error computing ssh fingerprint:", err) 76 67 } 77 68 78 - vc := verifiedCommit{fingerprint: fp, hash: c.This} 69 + vc := verifiedCommit{fingerprint: fp, hash: commit.This} 79 70 vcs[vc] = struct{}{} 80 71 break 81 72 } ··· 86 77 87 78 return vcs, nil 88 79 } 89 - 90 - // ObjectCommitToNiceDiff is a compatibility function to convert a 91 - // commit object into a NiceDiff structure. 92 - func ObjectCommitToNiceDiff(c *object.Commit) types.NiceDiff { 93 - var niceDiff types.NiceDiff 94 - 95 - // set commit information 96 - niceDiff.Commit.Message = c.Message 97 - niceDiff.Commit.Author = c.Author 98 - niceDiff.Commit.This = c.Hash.String() 99 - niceDiff.Commit.Committer = c.Committer 100 - niceDiff.Commit.Tree = c.TreeHash.String() 101 - niceDiff.Commit.PGPSignature = c.PGPSignature 102 - 103 - changeId, ok := c.ExtraHeaders["change-id"] 104 - if ok { 105 - niceDiff.Commit.ChangedId = string(changeId) 106 - } 107 - 108 - // set parent hash if available 109 - if len(c.ParentHashes) > 0 { 110 - niceDiff.Commit.Parent = c.ParentHashes[0].String() 111 - } 112 - 113 - // XXX: Stats and Diff fields are typically populated 114 - // after fetching the actual diff information, which isn't 115 - // directly available in the commit object itself. 116 - 117 - return niceDiff 118 - }
+3 -2
appview/db/artifact.go
··· 8 8 "github.com/go-git/go-git/v5/plumbing" 9 9 "github.com/ipfs/go-cid" 10 10 "tangled.org/core/appview/models" 11 + "tangled.org/core/orm" 11 12 ) 12 13 13 14 func AddArtifact(e Execer, artifact models.Artifact) error { ··· 37 38 return err 38 39 } 39 40 40 - func GetArtifact(e Execer, filters ...filter) ([]models.Artifact, error) { 41 + func GetArtifact(e Execer, filters ...orm.Filter) ([]models.Artifact, error) { 41 42 var artifacts []models.Artifact 42 43 43 44 var conditions []string ··· 109 110 return artifacts, nil 110 111 } 111 112 112 - func DeleteArtifact(e Execer, filters ...filter) error { 113 + func DeleteArtifact(e Execer, filters ...orm.Filter) error { 113 114 var conditions []string 114 115 var args []any 115 116 for _, filter := range filters {
+4 -3
appview/db/collaborators.go
··· 6 6 "time" 7 7 8 8 "tangled.org/core/appview/models" 9 + "tangled.org/core/orm" 9 10 ) 10 11 11 12 func AddCollaborator(e Execer, c models.Collaborator) error { ··· 16 17 return err 17 18 } 18 19 19 - func DeleteCollaborator(e Execer, filters ...filter) error { 20 + func DeleteCollaborator(e Execer, filters ...orm.Filter) error { 20 21 var conditions []string 21 22 var args []any 22 23 for _, filter := range filters { ··· 58 59 return nil, nil 59 60 } 60 61 61 - return GetRepos(e, 0, FilterIn("at_uri", repoAts)) 62 + return GetRepos(e, 0, orm.FilterIn("at_uri", repoAts)) 62 63 } 63 64 64 - func GetCollaborators(e Execer, filters ...filter) ([]models.Collaborator, error) { 65 + func GetCollaborators(e Execer, filters ...orm.Filter) ([]models.Collaborator, error) { 65 66 var collaborators []models.Collaborator 66 67 var conditions []string 67 68 var args []any
+199
appview/db/comments.go
··· 1 + package db 2 + 3 + import ( 4 + "database/sql" 5 + "fmt" 6 + "maps" 7 + "slices" 8 + "sort" 9 + "strings" 10 + "time" 11 + 12 + "github.com/bluesky-social/indigo/atproto/syntax" 13 + "tangled.org/core/appview/models" 14 + "tangled.org/core/orm" 15 + ) 16 + 17 + func PutComment(tx *sql.Tx, c *models.Comment) error { 18 + result, err := tx.Exec( 19 + `insert into comments ( 20 + did, 21 + rkey, 22 + subject_at, 23 + reply_to, 24 + body, 25 + pull_submission_id, 26 + created 27 + ) 28 + values (?, ?, ?, ?, ?, ?, ?) 29 + on conflict(did, rkey) do update set 30 + subject_at = excluded.subject_at, 31 + reply_to = excluded.reply_to, 32 + body = excluded.body, 33 + edited = case 34 + when 35 + comments.subject_at != excluded.subject_at 36 + or comments.body != excluded.body 37 + or comments.reply_to != excluded.reply_to 38 + then ? 39 + else comments.edited 40 + end`, 41 + c.Did, 42 + c.Rkey, 43 + c.Subject, 44 + c.ReplyTo, 45 + c.Body, 46 + c.PullSubmissionId, 47 + c.Created.Format(time.RFC3339), 48 + time.Now().Format(time.RFC3339), 49 + ) 50 + if err != nil { 51 + return err 52 + } 53 + 54 + c.Id, err = result.LastInsertId() 55 + if err != nil { 56 + return err 57 + } 58 + 59 + if err := putReferences(tx, c.AtUri(), c.References); err != nil { 60 + return fmt.Errorf("put reference_links: %w", err) 61 + } 62 + 63 + return nil 64 + } 65 + 66 + func DeleteComments(e Execer, filters ...orm.Filter) error { 67 + var conditions []string 68 + var args []any 69 + for _, filter := range filters { 70 + conditions = append(conditions, filter.Condition()) 71 + args = append(args, filter.Arg()...) 72 + } 73 + 74 + whereClause := "" 75 + if conditions != nil { 76 + whereClause = " where " + strings.Join(conditions, " and ") 77 + } 78 + 79 + query := fmt.Sprintf(`update comments set body = "", deleted = strftime('%%Y-%%m-%%dT%%H:%%M:%%SZ', 'now') %s`, whereClause) 80 + 81 + _, err := e.Exec(query, args...) 82 + return err 83 + } 84 + 85 + func GetComments(e Execer, filters ...orm.Filter) ([]models.Comment, error) { 86 + commentMap := make(map[string]*models.Comment) 87 + 88 + var conditions []string 89 + var args []any 90 + for _, filter := range filters { 91 + conditions = append(conditions, filter.Condition()) 92 + args = append(args, filter.Arg()...) 93 + } 94 + 95 + whereClause := "" 96 + if conditions != nil { 97 + whereClause = " where " + strings.Join(conditions, " and ") 98 + } 99 + 100 + query := fmt.Sprintf(` 101 + select 102 + id, 103 + did, 104 + rkey, 105 + subject_at, 106 + reply_to, 107 + body, 108 + pull_submission_id, 109 + created, 110 + edited, 111 + deleted 112 + from 113 + comments 114 + %s 115 + `, whereClause) 116 + 117 + rows, err := e.Query(query, args...) 118 + if err != nil { 119 + return nil, err 120 + } 121 + 122 + for rows.Next() { 123 + var comment models.Comment 124 + var created string 125 + var rkey, edited, deleted, replyTo sql.Null[string] 126 + err := rows.Scan( 127 + &comment.Id, 128 + &comment.Did, 129 + &rkey, 130 + &comment.Subject, 131 + &replyTo, 132 + &comment.Body, 133 + &comment.PullSubmissionId, 134 + &created, 135 + &edited, 136 + &deleted, 137 + ) 138 + if err != nil { 139 + return nil, err 140 + } 141 + 142 + // this is a remnant from old times, newer comments always have rkey 143 + if rkey.Valid { 144 + comment.Rkey = rkey.V 145 + } 146 + 147 + if t, err := time.Parse(time.RFC3339, created); err == nil { 148 + comment.Created = t 149 + } 150 + 151 + if edited.Valid { 152 + if t, err := time.Parse(time.RFC3339, edited.V); err == nil { 153 + comment.Edited = &t 154 + } 155 + } 156 + 157 + if deleted.Valid { 158 + if t, err := time.Parse(time.RFC3339, deleted.V); err == nil { 159 + comment.Deleted = &t 160 + } 161 + } 162 + 163 + if replyTo.Valid { 164 + rt := syntax.ATURI(replyTo.V) 165 + comment.ReplyTo = &rt 166 + } 167 + 168 + atUri := comment.AtUri().String() 169 + commentMap[atUri] = &comment 170 + } 171 + 172 + if err := rows.Err(); err != nil { 173 + return nil, err 174 + } 175 + defer rows.Close() 176 + 177 + // collect references from each comments 178 + commentAts := slices.Collect(maps.Keys(commentMap)) 179 + allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts)) 180 + if err != nil { 181 + return nil, fmt.Errorf("failed to query reference_links: %w", err) 182 + } 183 + for commentAt, references := range allReferencs { 184 + if comment, ok := commentMap[commentAt.String()]; ok { 185 + comment.References = references 186 + } 187 + } 188 + 189 + var comments []models.Comment 190 + for _, c := range commentMap { 191 + comments = append(comments, *c) 192 + } 193 + 194 + sort.Slice(comments, func(i, j int) bool { 195 + return comments[i].Created.After(comments[j].Created) 196 + }) 197 + 198 + return comments, nil 199 + }
+110 -133
appview/db/db.go
··· 3 3 import ( 4 4 "context" 5 5 "database/sql" 6 - "fmt" 7 6 "log/slog" 8 - "reflect" 9 7 "strings" 10 8 11 9 _ "github.com/mattn/go-sqlite3" 12 10 "tangled.org/core/log" 11 + "tangled.org/core/orm" 13 12 ) 14 13 15 14 type DB struct { ··· 561 560 email_notifications integer not null default 0 562 561 ); 563 562 563 + create table if not exists reference_links ( 564 + id integer primary key autoincrement, 565 + from_at text not null, 566 + to_at text not null, 567 + unique (from_at, to_at) 568 + ); 569 + 564 570 create table if not exists migrations ( 565 571 id integer primary key autoincrement, 566 572 name text unique ··· 569 575 -- indexes for better performance 570 576 create index if not exists idx_notifications_recipient_created on notifications(recipient_did, created desc); 571 577 create index if not exists idx_notifications_recipient_read on notifications(recipient_did, read); 578 + create index if not exists idx_references_from_at on reference_links(from_at); 579 + create index if not exists idx_references_to_at on reference_links(to_at); 572 580 `) 573 581 if err != nil { 574 582 return nil, err 575 583 } 576 584 577 585 // run migrations 578 - runMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error { 586 + orm.RunMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error { 579 587 tx.Exec(` 580 588 alter table repos add column description text check (length(description) <= 200); 581 589 `) 582 590 return nil 583 591 }) 584 592 585 - runMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error { 593 + orm.RunMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error { 586 594 // add unconstrained column 587 595 _, err := tx.Exec(` 588 596 alter table public_keys ··· 605 613 return nil 606 614 }) 607 615 608 - runMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error { 616 + orm.RunMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error { 609 617 _, err := tx.Exec(` 610 618 alter table comments drop column comment_at; 611 619 alter table comments add column rkey text; ··· 613 621 return err 614 622 }) 615 623 616 - runMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error { 624 + orm.RunMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error { 617 625 _, err := tx.Exec(` 618 626 alter table comments add column deleted text; -- timestamp 619 627 alter table comments add column edited text; -- timestamp ··· 621 629 return err 622 630 }) 623 631 624 - runMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error { 632 + orm.RunMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error { 625 633 _, err := tx.Exec(` 626 634 alter table pulls add column source_branch text; 627 635 alter table pulls add column source_repo_at text; ··· 630 638 return err 631 639 }) 632 640 633 - runMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error { 641 + orm.RunMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error { 634 642 _, err := tx.Exec(` 635 643 alter table repos add column source text; 636 644 `) ··· 642 650 // 643 651 // [0]: https://sqlite.org/pragma.html#pragma_foreign_keys 644 652 conn.ExecContext(ctx, "pragma foreign_keys = off;") 645 - runMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error { 653 + orm.RunMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error { 646 654 _, err := tx.Exec(` 647 655 create table pulls_new ( 648 656 -- identifiers ··· 699 707 }) 700 708 conn.ExecContext(ctx, "pragma foreign_keys = on;") 701 709 702 - runMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error { 710 + orm.RunMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error { 703 711 tx.Exec(` 704 712 alter table repos add column spindle text; 705 713 `) ··· 709 717 // drop all knot secrets, add unique constraint to knots 710 718 // 711 719 // knots will henceforth use service auth for signed requests 712 - runMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error { 720 + orm.RunMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error { 713 721 _, err := tx.Exec(` 714 722 create table registrations_new ( 715 723 id integer primary key autoincrement, ··· 732 740 }) 733 741 734 742 // recreate and add rkey + created columns with default constraint 735 - runMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error { 743 + orm.RunMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error { 736 744 // create new table 737 745 // - repo_at instead of repo integer 738 746 // - rkey field ··· 786 794 return err 787 795 }) 788 796 789 - runMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error { 797 + orm.RunMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error { 790 798 _, err := tx.Exec(` 791 799 alter table issues add column rkey text not null default ''; 792 800 ··· 798 806 }) 799 807 800 808 // repurpose the read-only column to "needs-upgrade" 801 - runMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error { 809 + orm.RunMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error { 802 810 _, err := tx.Exec(` 803 811 alter table registrations rename column read_only to needs_upgrade; 804 812 `) ··· 806 814 }) 807 815 808 816 // require all knots to upgrade after the release of total xrpc 809 - runMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error { 817 + orm.RunMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error { 810 818 _, err := tx.Exec(` 811 819 update registrations set needs_upgrade = 1; 812 820 `) ··· 814 822 }) 815 823 816 824 // require all knots to upgrade after the release of total xrpc 817 - runMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error { 825 + orm.RunMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error { 818 826 _, err := tx.Exec(` 819 827 alter table spindles add column needs_upgrade integer not null default 0; 820 828 `) ··· 832 840 // 833 841 // disable foreign-keys for the next migration 834 842 conn.ExecContext(ctx, "pragma foreign_keys = off;") 835 - runMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error { 843 + orm.RunMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error { 836 844 _, err := tx.Exec(` 837 845 create table if not exists issues_new ( 838 846 -- identifiers ··· 902 910 // - new columns 903 911 // * column "reply_to" which can be any other comment 904 912 // * column "at-uri" which is a generated column 905 - runMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error { 913 + orm.RunMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error { 906 914 _, err := tx.Exec(` 907 915 create table if not exists issue_comments ( 908 916 -- identifiers ··· 962 970 // 963 971 // disable foreign-keys for the next migration 964 972 conn.ExecContext(ctx, "pragma foreign_keys = off;") 965 - runMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error { 973 + orm.RunMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error { 966 974 _, err := tx.Exec(` 967 975 create table if not exists pulls_new ( 968 976 -- identifiers ··· 1043 1051 // 1044 1052 // disable foreign-keys for the next migration 1045 1053 conn.ExecContext(ctx, "pragma foreign_keys = off;") 1046 - runMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error { 1054 + orm.RunMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error { 1047 1055 _, err := tx.Exec(` 1048 1056 create table if not exists pull_submissions_new ( 1049 1057 -- identifiers ··· 1097 1105 1098 1106 // knots may report the combined patch for a comparison, we can store that on the appview side 1099 1107 // (but not on the pds record), because calculating the combined patch requires a git index 1100 - runMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error { 1108 + orm.RunMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error { 1101 1109 _, err := tx.Exec(` 1102 1110 alter table pull_submissions add column combined text; 1103 1111 `) 1104 1112 return err 1105 1113 }) 1106 1114 1107 - runMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error { 1115 + orm.RunMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error { 1108 1116 _, err := tx.Exec(` 1109 1117 alter table profile add column pronouns text; 1110 1118 `) 1111 1119 return err 1112 1120 }) 1113 1121 1114 - runMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error { 1122 + orm.RunMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error { 1115 1123 _, err := tx.Exec(` 1116 1124 alter table repos add column website text; 1117 1125 alter table repos add column topics text; ··· 1119 1127 return err 1120 1128 }) 1121 1129 1122 - runMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error { 1130 + orm.RunMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error { 1123 1131 _, err := tx.Exec(` 1124 1132 alter table notification_preferences add column user_mentioned integer not null default 1; 1125 1133 `) ··· 1127 1135 }) 1128 1136 1129 1137 // remove the foreign key constraints from stars. 1130 - runMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error { 1138 + orm.RunMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error { 1131 1139 _, err := tx.Exec(` 1132 1140 create table stars_new ( 1133 1141 id integer primary key autoincrement, ··· 1165 1173 return err 1166 1174 }) 1167 1175 1168 - return &DB{ 1169 - db, 1170 - logger, 1171 - }, nil 1172 - } 1176 + orm.RunMigration(conn, logger, "add-comments-table", func(tx *sql.Tx) error { 1177 + _, err := tx.Exec(` 1178 + drop table if exists comments; 1173 1179 1174 - type migrationFn = func(*sql.Tx) error 1180 + create table comments ( 1181 + -- identifiers 1182 + id integer primary key autoincrement, 1183 + did text not null, 1184 + collection text not null default 'sh.tangled.comment', 1185 + rkey text not null, 1186 + at_uri text generated always as ('at://' || did || '/' || collection || '/' || rkey) stored, 1175 1187 1176 - func runMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error { 1177 - logger = logger.With("migration", name) 1188 + -- at identifiers 1189 + subject_at text not null, 1190 + reply_to text, -- at_uri of parent comment 1178 1191 1179 - tx, err := c.BeginTx(context.Background(), nil) 1180 - if err != nil { 1181 - return err 1182 - } 1183 - defer tx.Rollback() 1192 + pull_submission_id integer, -- dirty fix until we atprotate the pull-rounds 1184 1193 1185 - var exists bool 1186 - err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists) 1187 - if err != nil { 1188 - return err 1189 - } 1194 + -- content 1195 + body text not null, 1196 + created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1197 + edited text, 1198 + deleted text, 1190 1199 1191 - if !exists { 1192 - // run migration 1193 - err = migrationFn(tx) 1194 - if err != nil { 1195 - logger.Error("failed to run migration", "err", err) 1196 - return err 1197 - } 1200 + -- constraints 1201 + unique(did, rkey) 1202 + ); 1198 1203 1199 - // mark migration as complete 1200 - _, err = tx.Exec("insert into migrations (name) values (?)", name) 1201 - if err != nil { 1202 - logger.Error("failed to mark migration as complete", "err", err) 1203 - return err 1204 - } 1204 + insert into comments ( 1205 + did, 1206 + collection, 1207 + rkey, 1208 + subject_at, 1209 + reply_to, 1210 + body, 1211 + created, 1212 + edited, 1213 + deleted 1214 + ) 1215 + select 1216 + did, 1217 + 'sh.tangled.repo.issue.comment', 1218 + rkey, 1219 + issue_at, 1220 + reply_to, 1221 + body, 1222 + created, 1223 + edited, 1224 + deleted 1225 + from issue_comments 1226 + where rkey is not null; 1205 1227 1206 - // commit the transaction 1207 - if err := tx.Commit(); err != nil { 1208 - return err 1209 - } 1210 - 1211 - logger.Info("migration applied successfully") 1212 - } else { 1213 - logger.Warn("skipped migration, already applied") 1214 - } 1228 + insert into comments ( 1229 + did, 1230 + collection, 1231 + rkey, 1232 + subject_at, 1233 + pull_submission_id, 1234 + body, 1235 + created 1236 + ) 1237 + select 1238 + c.owner_did, 1239 + 'sh.tangled.repo.pull.comment', 1240 + substr( 1241 + substr(c.comment_at, 6 + instr(substr(c.comment_at, 6), '/')), -- nsid/rkey 1242 + instr( 1243 + substr(c.comment_at, 6 + instr(substr(c.comment_at, 6), '/')), -- nsid/rkey 1244 + '/' 1245 + ) + 1 1246 + ), -- rkey 1247 + p.at_uri, 1248 + c.submission_id, 1249 + c.body, 1250 + c.created 1251 + from pull_comments c 1252 + join pulls p on c.repo_at = p.repo_at and c.pull_id = p.pull_id; 1253 + `) 1254 + return err 1255 + }) 1215 1256 1216 - return nil 1257 + return &DB{ 1258 + db, 1259 + logger, 1260 + }, nil 1217 1261 } 1218 1262 1219 1263 func (d *DB) Close() error { 1220 1264 return d.DB.Close() 1221 1265 } 1222 - 1223 - type filter struct { 1224 - key string 1225 - arg any 1226 - cmp string 1227 - } 1228 - 1229 - func newFilter(key, cmp string, arg any) filter { 1230 - return filter{ 1231 - key: key, 1232 - arg: arg, 1233 - cmp: cmp, 1234 - } 1235 - } 1236 - 1237 - func FilterEq(key string, arg any) filter { return newFilter(key, "=", arg) } 1238 - func FilterNotEq(key string, arg any) filter { return newFilter(key, "<>", arg) } 1239 - func FilterGte(key string, arg any) filter { return newFilter(key, ">=", arg) } 1240 - func FilterLte(key string, arg any) filter { return newFilter(key, "<=", arg) } 1241 - func FilterIs(key string, arg any) filter { return newFilter(key, "is", arg) } 1242 - func FilterIsNot(key string, arg any) filter { return newFilter(key, "is not", arg) } 1243 - func FilterIn(key string, arg any) filter { return newFilter(key, "in", arg) } 1244 - func FilterLike(key string, arg any) filter { return newFilter(key, "like", arg) } 1245 - func FilterNotLike(key string, arg any) filter { return newFilter(key, "not like", arg) } 1246 - func FilterContains(key string, arg any) filter { 1247 - return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg)) 1248 - } 1249 - 1250 - func (f filter) Condition() string { 1251 - rv := reflect.ValueOf(f.arg) 1252 - kind := rv.Kind() 1253 - 1254 - // if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)` 1255 - if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 1256 - if rv.Len() == 0 { 1257 - // always false 1258 - return "1 = 0" 1259 - } 1260 - 1261 - placeholders := make([]string, rv.Len()) 1262 - for i := range placeholders { 1263 - placeholders[i] = "?" 1264 - } 1265 - 1266 - return fmt.Sprintf("%s %s (%s)", f.key, f.cmp, strings.Join(placeholders, ", ")) 1267 - } 1268 - 1269 - return fmt.Sprintf("%s %s ?", f.key, f.cmp) 1270 - } 1271 - 1272 - func (f filter) Arg() []any { 1273 - rv := reflect.ValueOf(f.arg) 1274 - kind := rv.Kind() 1275 - if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 1276 - if rv.Len() == 0 { 1277 - return nil 1278 - } 1279 - 1280 - out := make([]any, rv.Len()) 1281 - for i := range rv.Len() { 1282 - out[i] = rv.Index(i).Interface() 1283 - } 1284 - return out 1285 - } 1286 - 1287 - return []any{f.arg} 1288 - }
+6 -3
appview/db/follow.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 13 func AddFollow(e Execer, follow *models.Follow) error { ··· 134 135 return result, nil 135 136 } 136 137 137 - func GetFollows(e Execer, limit int, filters ...filter) ([]models.Follow, error) { 138 + func GetFollows(e Execer, limit int, filters ...orm.Filter) ([]models.Follow, error) { 138 139 var follows []models.Follow 139 140 140 141 var conditions []string ··· 166 167 if err != nil { 167 168 return nil, err 168 169 } 170 + defer rows.Close() 171 + 169 172 for rows.Next() { 170 173 var follow models.Follow 171 174 var followedAt string ··· 191 194 } 192 195 193 196 func GetFollowers(e Execer, did string) ([]models.Follow, error) { 194 - return GetFollows(e, 0, FilterEq("subject_did", did)) 197 + return GetFollows(e, 0, orm.FilterEq("subject_did", did)) 195 198 } 196 199 197 200 func GetFollowing(e Execer, did string) ([]models.Follow, error) { 198 - return GetFollows(e, 0, FilterEq("user_did", did)) 201 + return GetFollows(e, 0, orm.FilterEq("user_did", did)) 199 202 } 200 203 201 204 func getFollowStatuses(e Execer, userDid string, subjectDids []string) (map[string]models.FollowStatus, error) {
+61 -184
appview/db/issues.go
··· 10 10 "time" 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 + "tangled.org/core/api/tangled" 13 14 "tangled.org/core/appview/models" 14 15 "tangled.org/core/appview/pagination" 16 + "tangled.org/core/orm" 15 17 ) 16 18 17 19 func PutIssue(tx *sql.Tx, issue *models.Issue) error { ··· 26 28 27 29 issues, err := GetIssues( 28 30 tx, 29 - FilterEq("did", issue.Did), 30 - FilterEq("rkey", issue.Rkey), 31 + orm.FilterEq("did", issue.Did), 32 + orm.FilterEq("rkey", issue.Rkey), 31 33 ) 32 34 switch { 33 35 case err != nil: ··· 69 71 returning rowid, issue_id 70 72 `, issue.RepoAt, issue.Did, issue.Rkey, newIssueId, issue.Title, issue.Body) 71 73 72 - return row.Scan(&issue.Id, &issue.IssueId) 74 + err = row.Scan(&issue.Id, &issue.IssueId) 75 + if err != nil { 76 + return fmt.Errorf("scan row: %w", err) 77 + } 78 + 79 + if err := putReferences(tx, issue.AtUri(), issue.References); err != nil { 80 + return fmt.Errorf("put reference_links: %w", err) 81 + } 82 + return nil 73 83 } 74 84 75 85 func updateIssue(tx *sql.Tx, issue *models.Issue) error { ··· 79 89 set title = ?, body = ?, edited = ? 80 90 where did = ? and rkey = ? 81 91 `, issue.Title, issue.Body, time.Now().Format(time.RFC3339), issue.Did, issue.Rkey) 82 - return err 92 + if err != nil { 93 + return err 94 + } 95 + 96 + if err := putReferences(tx, issue.AtUri(), issue.References); err != nil { 97 + return fmt.Errorf("put reference_links: %w", err) 98 + } 99 + return nil 83 100 } 84 101 85 - func GetIssuesPaginated(e Execer, page pagination.Page, filters ...filter) ([]models.Issue, error) { 86 - issueMap := make(map[string]*models.Issue) // at-uri -> issue 102 + func GetIssuesPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]models.Issue, error) { 103 + issueMap := make(map[syntax.ATURI]*models.Issue) // at-uri -> issue 87 104 88 105 var conditions []string 89 106 var args []any ··· 98 115 whereClause = " where " + strings.Join(conditions, " and ") 99 116 } 100 117 101 - pLower := FilterGte("row_num", page.Offset+1) 102 - pUpper := FilterLte("row_num", page.Offset+page.Limit) 118 + pLower := orm.FilterGte("row_num", page.Offset+1) 119 + pUpper := orm.FilterLte("row_num", page.Offset+page.Limit) 103 120 104 121 pageClause := "" 105 122 if page.Limit > 0 { ··· 179 196 } 180 197 } 181 198 182 - atUri := issue.AtUri().String() 183 - issueMap[atUri] = &issue 199 + issueMap[issue.AtUri()] = &issue 184 200 } 185 201 186 202 // collect reverse repos ··· 189 205 repoAts = append(repoAts, string(issue.RepoAt)) 190 206 } 191 207 192 - repos, err := GetRepos(e, 0, FilterIn("at_uri", repoAts)) 208 + repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoAts)) 193 209 if err != nil { 194 210 return nil, fmt.Errorf("failed to build repo mappings: %w", err) 195 211 } ··· 212 228 // collect comments 213 229 issueAts := slices.Collect(maps.Keys(issueMap)) 214 230 215 - comments, err := GetIssueComments(e, FilterIn("issue_at", issueAts)) 231 + comments, err := GetComments(e, orm.FilterIn("subject_at", issueAts)) 216 232 if err != nil { 217 233 return nil, fmt.Errorf("failed to query comments: %w", err) 218 234 } 219 235 for i := range comments { 220 - issueAt := comments[i].IssueAt 236 + issueAt := comments[i].Subject 221 237 if issue, ok := issueMap[issueAt]; ok { 222 238 issue.Comments = append(issue.Comments, comments[i]) 223 239 } 224 240 } 225 241 226 242 // collect allLabels for each issue 227 - allLabels, err := GetLabels(e, FilterIn("subject", issueAts)) 243 + allLabels, err := GetLabels(e, orm.FilterIn("subject", issueAts)) 228 244 if err != nil { 229 245 return nil, fmt.Errorf("failed to query labels: %w", err) 230 246 } 231 247 for issueAt, labels := range allLabels { 232 - if issue, ok := issueMap[issueAt.String()]; ok { 248 + if issue, ok := issueMap[issueAt]; ok { 233 249 issue.Labels = labels 234 250 } 235 251 } 236 252 253 + // collect references for each issue 254 + allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", issueAts)) 255 + if err != nil { 256 + return nil, fmt.Errorf("failed to query reference_links: %w", err) 257 + } 258 + for issueAt, references := range allReferencs { 259 + if issue, ok := issueMap[issueAt]; ok { 260 + issue.References = references 261 + } 262 + } 263 + 237 264 var issues []models.Issue 238 265 for _, i := range issueMap { 239 266 issues = append(issues, *i) ··· 250 277 issues, err := GetIssuesPaginated( 251 278 e, 252 279 pagination.Page{}, 253 - FilterEq("repo_at", repoAt), 254 - FilterEq("issue_id", issueId), 280 + orm.FilterEq("repo_at", repoAt), 281 + orm.FilterEq("issue_id", issueId), 255 282 ) 256 283 if err != nil { 257 284 return nil, err ··· 263 290 return &issues[0], nil 264 291 } 265 292 266 - func GetIssues(e Execer, filters ...filter) ([]models.Issue, error) { 293 + func GetIssues(e Execer, filters ...orm.Filter) ([]models.Issue, error) { 267 294 return GetIssuesPaginated(e, pagination.Page{}, filters...) 268 295 } 269 296 ··· 271 298 func GetIssueIDs(e Execer, opts models.IssueSearchOptions) ([]int64, error) { 272 299 var ids []int64 273 300 274 - var filters []filter 301 + var filters []orm.Filter 275 302 openValue := 0 276 303 if opts.IsOpen { 277 304 openValue = 1 278 305 } 279 - filters = append(filters, FilterEq("open", openValue)) 306 + filters = append(filters, orm.FilterEq("open", openValue)) 280 307 if opts.RepoAt != "" { 281 - filters = append(filters, FilterEq("repo_at", opts.RepoAt)) 308 + filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt)) 282 309 } 283 310 284 311 var conditions []string ··· 323 350 return ids, nil 324 351 } 325 352 326 - func AddIssueComment(e Execer, c models.IssueComment) (int64, error) { 327 - result, err := e.Exec( 328 - `insert into issue_comments ( 329 - did, 330 - rkey, 331 - issue_at, 332 - body, 333 - reply_to, 334 - created, 335 - edited 336 - ) 337 - values (?, ?, ?, ?, ?, ?, null) 338 - on conflict(did, rkey) do update set 339 - issue_at = excluded.issue_at, 340 - body = excluded.body, 341 - edited = case 342 - when 343 - issue_comments.issue_at != excluded.issue_at 344 - or issue_comments.body != excluded.body 345 - or issue_comments.reply_to != excluded.reply_to 346 - then ? 347 - else issue_comments.edited 348 - end`, 349 - c.Did, 350 - c.Rkey, 351 - c.IssueAt, 352 - c.Body, 353 - c.ReplyTo, 354 - c.Created.Format(time.RFC3339), 355 - time.Now().Format(time.RFC3339), 353 + func DeleteIssues(tx *sql.Tx, did, rkey string) error { 354 + _, err := tx.Exec( 355 + `delete from issues 356 + where did = ? and rkey = ?`, 357 + did, 358 + rkey, 356 359 ) 357 360 if err != nil { 358 - return 0, err 361 + return fmt.Errorf("delete issue: %w", err) 359 362 } 360 363 361 - id, err := result.LastInsertId() 364 + uri := syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", did, tangled.RepoIssueNSID, rkey)) 365 + err = deleteReferences(tx, uri) 362 366 if err != nil { 363 - return 0, err 367 + return fmt.Errorf("delete reference_links: %w", err) 364 368 } 365 369 366 - return id, nil 370 + return nil 367 371 } 368 372 369 - func DeleteIssueComments(e Execer, filters ...filter) error { 370 - var conditions []string 371 - var args []any 372 - for _, filter := range filters { 373 - conditions = append(conditions, filter.Condition()) 374 - args = append(args, filter.Arg()...) 375 - } 376 - 377 - whereClause := "" 378 - if conditions != nil { 379 - whereClause = " where " + strings.Join(conditions, " and ") 380 - } 381 - 382 - query := fmt.Sprintf(`update issue_comments set body = "", deleted = strftime('%%Y-%%m-%%dT%%H:%%M:%%SZ', 'now') %s`, whereClause) 383 - 384 - _, err := e.Exec(query, args...) 385 - return err 386 - } 387 - 388 - func GetIssueComments(e Execer, filters ...filter) ([]models.IssueComment, error) { 389 - var comments []models.IssueComment 390 - 391 - var conditions []string 392 - var args []any 393 - for _, filter := range filters { 394 - conditions = append(conditions, filter.Condition()) 395 - args = append(args, filter.Arg()...) 396 - } 397 - 398 - whereClause := "" 399 - if conditions != nil { 400 - whereClause = " where " + strings.Join(conditions, " and ") 401 - } 402 - 403 - query := fmt.Sprintf(` 404 - select 405 - id, 406 - did, 407 - rkey, 408 - issue_at, 409 - reply_to, 410 - body, 411 - created, 412 - edited, 413 - deleted 414 - from 415 - issue_comments 416 - %s 417 - `, whereClause) 418 - 419 - rows, err := e.Query(query, args...) 420 - if err != nil { 421 - return nil, err 422 - } 423 - 424 - for rows.Next() { 425 - var comment models.IssueComment 426 - var created string 427 - var rkey, edited, deleted, replyTo sql.Null[string] 428 - err := rows.Scan( 429 - &comment.Id, 430 - &comment.Did, 431 - &rkey, 432 - &comment.IssueAt, 433 - &replyTo, 434 - &comment.Body, 435 - &created, 436 - &edited, 437 - &deleted, 438 - ) 439 - if err != nil { 440 - return nil, err 441 - } 442 - 443 - // this is a remnant from old times, newer comments always have rkey 444 - if rkey.Valid { 445 - comment.Rkey = rkey.V 446 - } 447 - 448 - if t, err := time.Parse(time.RFC3339, created); err == nil { 449 - comment.Created = t 450 - } 451 - 452 - if edited.Valid { 453 - if t, err := time.Parse(time.RFC3339, edited.V); err == nil { 454 - comment.Edited = &t 455 - } 456 - } 457 - 458 - if deleted.Valid { 459 - if t, err := time.Parse(time.RFC3339, deleted.V); err == nil { 460 - comment.Deleted = &t 461 - } 462 - } 463 - 464 - if replyTo.Valid { 465 - comment.ReplyTo = &replyTo.V 466 - } 467 - 468 - comments = append(comments, comment) 469 - } 470 - 471 - if err = rows.Err(); err != nil { 472 - return nil, err 473 - } 474 - 475 - return comments, nil 476 - } 477 - 478 - func DeleteIssues(e Execer, filters ...filter) error { 479 - var conditions []string 480 - var args []any 481 - for _, filter := range filters { 482 - conditions = append(conditions, filter.Condition()) 483 - args = append(args, filter.Arg()...) 484 - } 485 - 486 - whereClause := "" 487 - if conditions != nil { 488 - whereClause = " where " + strings.Join(conditions, " and ") 489 - } 490 - 491 - query := fmt.Sprintf(`delete from issues %s`, whereClause) 492 - _, err := e.Exec(query, args...) 493 - return err 494 - } 495 - 496 - func CloseIssues(e Execer, filters ...filter) error { 373 + func CloseIssues(e Execer, filters ...orm.Filter) error { 497 374 var conditions []string 498 375 var args []any 499 376 for _, filter := range filters { ··· 511 388 return err 512 389 } 513 390 514 - func ReopenIssues(e Execer, filters ...filter) error { 391 + func ReopenIssues(e Execer, filters ...orm.Filter) error { 515 392 var conditions []string 516 393 var args []any 517 394 for _, filter := range filters {
+8 -7
appview/db/label.go
··· 10 10 11 11 "github.com/bluesky-social/indigo/atproto/syntax" 12 12 "tangled.org/core/appview/models" 13 + "tangled.org/core/orm" 13 14 ) 14 15 15 16 // no updating type for now ··· 59 60 return id, nil 60 61 } 61 62 62 - func DeleteLabelDefinition(e Execer, filters ...filter) error { 63 + func DeleteLabelDefinition(e Execer, filters ...orm.Filter) error { 63 64 var conditions []string 64 65 var args []any 65 66 for _, filter := range filters { ··· 75 76 return err 76 77 } 77 78 78 - func GetLabelDefinitions(e Execer, filters ...filter) ([]models.LabelDefinition, error) { 79 + func GetLabelDefinitions(e Execer, filters ...orm.Filter) ([]models.LabelDefinition, error) { 79 80 var labelDefinitions []models.LabelDefinition 80 81 var conditions []string 81 82 var args []any ··· 167 168 } 168 169 169 170 // helper to get exactly one label def 170 - func GetLabelDefinition(e Execer, filters ...filter) (*models.LabelDefinition, error) { 171 + func GetLabelDefinition(e Execer, filters ...orm.Filter) (*models.LabelDefinition, error) { 171 172 labels, err := GetLabelDefinitions(e, filters...) 172 173 if err != nil { 173 174 return nil, err ··· 227 228 return id, nil 228 229 } 229 230 230 - func GetLabelOps(e Execer, filters ...filter) ([]models.LabelOp, error) { 231 + func GetLabelOps(e Execer, filters ...orm.Filter) ([]models.LabelOp, error) { 231 232 var labelOps []models.LabelOp 232 233 var conditions []string 233 234 var args []any ··· 302 303 } 303 304 304 305 // get labels for a given list of subject URIs 305 - func GetLabels(e Execer, filters ...filter) (map[syntax.ATURI]models.LabelState, error) { 306 + func GetLabels(e Execer, filters ...orm.Filter) (map[syntax.ATURI]models.LabelState, error) { 306 307 ops, err := GetLabelOps(e, filters...) 307 308 if err != nil { 308 309 return nil, err ··· 322 323 } 323 324 labelAts := slices.Collect(maps.Keys(labelAtSet)) 324 325 325 - actx, err := NewLabelApplicationCtx(e, FilterIn("at_uri", labelAts)) 326 + actx, err := NewLabelApplicationCtx(e, orm.FilterIn("at_uri", labelAts)) 326 327 if err != nil { 327 328 return nil, err 328 329 } ··· 338 339 return results, nil 339 340 } 340 341 341 - func NewLabelApplicationCtx(e Execer, filters ...filter) (*models.LabelApplicationCtx, error) { 342 + func NewLabelApplicationCtx(e Execer, filters ...orm.Filter) (*models.LabelApplicationCtx, error) { 342 343 labels, err := GetLabelDefinitions(e, filters...) 343 344 if err != nil { 344 345 return nil, err
+6 -5
appview/db/language.go
··· 7 7 8 8 "github.com/bluesky-social/indigo/atproto/syntax" 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetRepoLanguages(e Execer, filters ...filter) ([]models.RepoLanguage, error) { 13 + func GetRepoLanguages(e Execer, filters ...orm.Filter) ([]models.RepoLanguage, error) { 13 14 var conditions []string 14 15 var args []any 15 16 for _, filter := range filters { ··· 27 28 whereClause, 28 29 ) 29 30 rows, err := e.Query(query, args...) 30 - 31 31 if err != nil { 32 32 return nil, fmt.Errorf("failed to execute query: %w ", err) 33 33 } 34 + defer rows.Close() 34 35 35 36 var langs []models.RepoLanguage 36 37 for rows.Next() { ··· 85 86 return nil 86 87 } 87 88 88 - func DeleteRepoLanguages(e Execer, filters ...filter) error { 89 + func DeleteRepoLanguages(e Execer, filters ...orm.Filter) error { 89 90 var conditions []string 90 91 var args []any 91 92 for _, filter := range filters { ··· 107 108 func UpdateRepoLanguages(tx *sql.Tx, repoAt syntax.ATURI, ref string, langs []models.RepoLanguage) error { 108 109 err := DeleteRepoLanguages( 109 110 tx, 110 - FilterEq("repo_at", repoAt), 111 - FilterEq("ref", ref), 111 + orm.FilterEq("repo_at", repoAt), 112 + orm.FilterEq("ref", ref), 112 113 ) 113 114 if err != nil { 114 115 return fmt.Errorf("failed to delete existing languages: %w", err)
+14 -13
appview/db/notifications.go
··· 11 11 "github.com/bluesky-social/indigo/atproto/syntax" 12 12 "tangled.org/core/appview/models" 13 13 "tangled.org/core/appview/pagination" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 func CreateNotification(e Execer, notification *models.Notification) error { ··· 44 45 } 45 46 46 47 // GetNotificationsPaginated retrieves notifications with filters and pagination 47 - func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...filter) ([]*models.Notification, error) { 48 + func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.Notification, error) { 48 49 var conditions []string 49 50 var args []any 50 51 ··· 113 114 } 114 115 115 116 // GetNotificationsWithEntities retrieves notifications with their related entities 116 - func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...filter) ([]*models.NotificationWithEntity, error) { 117 + func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.NotificationWithEntity, error) { 117 118 var conditions []string 118 119 var args []any 119 120 ··· 256 257 } 257 258 258 259 // GetNotifications retrieves notifications with filters 259 - func GetNotifications(e Execer, filters ...filter) ([]*models.Notification, error) { 260 + func GetNotifications(e Execer, filters ...orm.Filter) ([]*models.Notification, error) { 260 261 return GetNotificationsPaginated(e, pagination.FirstPage(), filters...) 261 262 } 262 263 263 - func CountNotifications(e Execer, filters ...filter) (int64, error) { 264 + func CountNotifications(e Execer, filters ...orm.Filter) (int64, error) { 264 265 var conditions []string 265 266 var args []any 266 267 for _, filter := range filters { ··· 285 286 } 286 287 287 288 func MarkNotificationRead(e Execer, notificationID int64, userDID string) error { 288 - idFilter := FilterEq("id", notificationID) 289 - recipientFilter := FilterEq("recipient_did", userDID) 289 + idFilter := orm.FilterEq("id", notificationID) 290 + recipientFilter := orm.FilterEq("recipient_did", userDID) 290 291 291 292 query := fmt.Sprintf(` 292 293 UPDATE notifications ··· 314 315 } 315 316 316 317 func MarkAllNotificationsRead(e Execer, userDID string) error { 317 - recipientFilter := FilterEq("recipient_did", userDID) 318 - readFilter := FilterEq("read", 0) 318 + recipientFilter := orm.FilterEq("recipient_did", userDID) 319 + readFilter := orm.FilterEq("read", 0) 319 320 320 321 query := fmt.Sprintf(` 321 322 UPDATE notifications ··· 334 335 } 335 336 336 337 func DeleteNotification(e Execer, notificationID int64, userDID string) error { 337 - idFilter := FilterEq("id", notificationID) 338 - recipientFilter := FilterEq("recipient_did", userDID) 338 + idFilter := orm.FilterEq("id", notificationID) 339 + recipientFilter := orm.FilterEq("recipient_did", userDID) 339 340 340 341 query := fmt.Sprintf(` 341 342 DELETE FROM notifications ··· 362 363 } 363 364 364 365 func GetNotificationPreference(e Execer, userDid string) (*models.NotificationPreferences, error) { 365 - prefs, err := GetNotificationPreferences(e, FilterEq("user_did", userDid)) 366 + prefs, err := GetNotificationPreferences(e, orm.FilterEq("user_did", userDid)) 366 367 if err != nil { 367 368 return nil, err 368 369 } ··· 375 376 return p, nil 376 377 } 377 378 378 - func GetNotificationPreferences(e Execer, filters ...filter) (map[syntax.DID]*models.NotificationPreferences, error) { 379 + func GetNotificationPreferences(e Execer, filters ...orm.Filter) (map[syntax.DID]*models.NotificationPreferences, error) { 379 380 prefsMap := make(map[syntax.DID]*models.NotificationPreferences) 380 381 381 382 var conditions []string ··· 483 484 484 485 func (d *DB) ClearOldNotifications(ctx context.Context, olderThan time.Duration) error { 485 486 cutoff := time.Now().Add(-olderThan) 486 - createdFilter := FilterLte("created", cutoff) 487 + createdFilter := orm.FilterLte("created", cutoff) 487 488 488 489 query := fmt.Sprintf(` 489 490 DELETE FROM notifications
+6 -5
appview/db/pipeline.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetPipelines(e Execer, filters ...filter) ([]models.Pipeline, error) { 13 + func GetPipelines(e Execer, filters ...orm.Filter) ([]models.Pipeline, error) { 13 14 var pipelines []models.Pipeline 14 15 15 16 var conditions []string ··· 168 169 169 170 // this is a mega query, but the most useful one: 170 171 // get N pipelines, for each one get the latest status of its N workflows 171 - func GetPipelineStatuses(e Execer, limit int, filters ...filter) ([]models.Pipeline, error) { 172 + func GetPipelineStatuses(e Execer, limit int, filters ...orm.Filter) ([]models.Pipeline, error) { 172 173 var conditions []string 173 174 var args []any 174 175 for _, filter := range filters { 175 - filter.key = "p." + filter.key // the table is aliased in the query to `p` 176 + filter.Key = "p." + filter.Key // the table is aliased in the query to `p` 176 177 conditions = append(conditions, filter.Condition()) 177 178 args = append(args, filter.Arg()...) 178 179 } ··· 264 265 conditions = nil 265 266 args = nil 266 267 for _, p := range pipelines { 267 - knotFilter := FilterEq("pipeline_knot", p.Knot) 268 - rkeyFilter := FilterEq("pipeline_rkey", p.Rkey) 268 + knotFilter := orm.FilterEq("pipeline_knot", p.Knot) 269 + rkeyFilter := orm.FilterEq("pipeline_rkey", p.Rkey) 269 270 conditions = append(conditions, fmt.Sprintf("(%s and %s)", knotFilter.Condition(), rkeyFilter.Condition())) 270 271 args = append(args, p.Knot) 271 272 args = append(args, p.Rkey)
+29 -16
appview/db/profile.go
··· 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 13 "tangled.org/core/appview/models" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 const TimeframeMonths = 7 ··· 19 20 timeline := models.ProfileTimeline{ 20 21 ByMonth: make([]models.ByMonth, TimeframeMonths), 21 22 } 22 - currentMonth := time.Now().Month() 23 + now := time.Now() 23 24 timeframe := fmt.Sprintf("-%d months", TimeframeMonths) 24 25 25 26 pulls, err := GetPullsByOwnerDid(e, forDid, timeframe) ··· 29 30 30 31 // group pulls by month 31 32 for _, pull := range pulls { 32 - pullMonth := pull.Created.Month() 33 + monthsAgo := monthsBetween(pull.Created, now) 33 34 34 - if currentMonth-pullMonth >= TimeframeMonths { 35 + if monthsAgo >= TimeframeMonths { 35 36 // shouldn't happen; but times are weird 36 37 continue 37 38 } 38 39 39 - idx := currentMonth - pullMonth 40 + idx := monthsAgo 40 41 items := &timeline.ByMonth[idx].PullEvents.Items 41 42 42 43 *items = append(*items, &pull) ··· 44 45 45 46 issues, err := GetIssues( 46 47 e, 47 - FilterEq("did", forDid), 48 - FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)), 48 + orm.FilterEq("did", forDid), 49 + orm.FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)), 49 50 ) 50 51 if err != nil { 51 52 return nil, fmt.Errorf("error getting issues by owner did: %w", err) 52 53 } 53 54 54 55 for _, issue := range issues { 55 - issueMonth := issue.Created.Month() 56 + monthsAgo := monthsBetween(issue.Created, now) 56 57 57 - if currentMonth-issueMonth >= TimeframeMonths { 58 + if monthsAgo >= TimeframeMonths { 58 59 // shouldn't happen; but times are weird 59 60 continue 60 61 } 61 62 62 - idx := currentMonth - issueMonth 63 + idx := monthsAgo 63 64 items := &timeline.ByMonth[idx].IssueEvents.Items 64 65 65 66 *items = append(*items, &issue) 66 67 } 67 68 68 - repos, err := GetRepos(e, 0, FilterEq("did", forDid)) 69 + repos, err := GetRepos(e, 0, orm.FilterEq("did", forDid)) 69 70 if err != nil { 70 71 return nil, fmt.Errorf("error getting all repos by did: %w", err) 71 72 } ··· 76 77 if repo.Source != "" { 77 78 sourceRepo, err = GetRepoByAtUri(e, repo.Source) 78 79 if err != nil { 79 - return nil, err 80 + // the source repo was not found, skip this bit 81 + log.Println("profile", "err", err) 80 82 } 81 83 } 82 84 83 - repoMonth := repo.Created.Month() 85 + monthsAgo := monthsBetween(repo.Created, now) 84 86 85 - if currentMonth-repoMonth >= TimeframeMonths { 87 + if monthsAgo >= TimeframeMonths { 86 88 // shouldn't happen; but times are weird 87 89 continue 88 90 } 89 91 90 - idx := currentMonth - repoMonth 92 + idx := monthsAgo 91 93 92 94 items := &timeline.ByMonth[idx].RepoEvents 93 95 *items = append(*items, models.RepoEvent{ ··· 99 101 return &timeline, nil 100 102 } 101 103 104 + func monthsBetween(from, to time.Time) int { 105 + years := to.Year() - from.Year() 106 + months := int(to.Month() - from.Month()) 107 + return years*12 + months 108 + } 109 + 102 110 func UpsertProfile(tx *sql.Tx, profile *models.Profile) error { 103 111 defer tx.Rollback() 104 112 ··· 199 207 return tx.Commit() 200 208 } 201 209 202 - func GetProfiles(e Execer, filters ...filter) (map[string]*models.Profile, error) { 210 + func GetProfiles(e Execer, filters ...orm.Filter) (map[string]*models.Profile, error) { 203 211 var conditions []string 204 212 var args []any 205 213 for _, filter := range filters { ··· 229 237 if err != nil { 230 238 return nil, err 231 239 } 240 + defer rows.Close() 232 241 233 242 profileMap := make(map[string]*models.Profile) 234 243 for rows.Next() { ··· 269 278 if err != nil { 270 279 return nil, err 271 280 } 281 + defer rows.Close() 282 + 272 283 idxs := make(map[string]int) 273 284 for did := range profileMap { 274 285 idxs[did] = 0 ··· 289 300 if err != nil { 290 301 return nil, err 291 302 } 303 + defer rows.Close() 304 + 292 305 idxs = make(map[string]int) 293 306 for did := range profileMap { 294 307 idxs[did] = 0 ··· 441 454 } 442 455 443 456 // ensure all pinned repos are either own repos or collaborating repos 444 - repos, err := GetRepos(e, 0, FilterEq("did", profile.Did)) 457 + repos, err := GetRepos(e, 0, orm.FilterEq("did", profile.Did)) 445 458 if err != nil { 446 459 log.Printf("getting repos for %s: %s", profile.Did, err) 447 460 }
+43 -113
appview/db/pulls.go
··· 13 13 14 14 "github.com/bluesky-social/indigo/atproto/syntax" 15 15 "tangled.org/core/appview/models" 16 + "tangled.org/core/orm" 16 17 ) 17 18 18 19 func NewPull(tx *sql.Tx, pull *models.Pull) error { ··· 93 94 insert into pull_submissions (pull_at, round_number, patch, combined, source_rev) 94 95 values (?, ?, ?, ?, ?) 95 96 `, pull.AtUri(), 0, pull.Submissions[0].Patch, pull.Submissions[0].Combined, pull.Submissions[0].SourceRev) 96 - return err 97 + if err != nil { 98 + return err 99 + } 100 + 101 + if err := putReferences(tx, pull.AtUri(), pull.References); err != nil { 102 + return fmt.Errorf("put reference_links: %w", err) 103 + } 104 + 105 + return nil 97 106 } 98 107 99 108 func GetPullAt(e Execer, repoAt syntax.ATURI, pullId int) (syntax.ATURI, error) { ··· 110 119 return pullId - 1, err 111 120 } 112 121 113 - func GetPullsWithLimit(e Execer, limit int, filters ...filter) ([]*models.Pull, error) { 122 + func GetPullsWithLimit(e Execer, limit int, filters ...orm.Filter) ([]*models.Pull, error) { 114 123 pulls := make(map[syntax.ATURI]*models.Pull) 115 124 116 125 var conditions []string ··· 221 230 for _, p := range pulls { 222 231 pullAts = append(pullAts, p.AtUri()) 223 232 } 224 - submissionsMap, err := GetPullSubmissions(e, FilterIn("pull_at", pullAts)) 233 + submissionsMap, err := GetPullSubmissions(e, orm.FilterIn("pull_at", pullAts)) 225 234 if err != nil { 226 235 return nil, fmt.Errorf("failed to get submissions: %w", err) 227 236 } ··· 233 242 } 234 243 235 244 // collect allLabels for each issue 236 - allLabels, err := GetLabels(e, FilterIn("subject", pullAts)) 245 + allLabels, err := GetLabels(e, orm.FilterIn("subject", pullAts)) 237 246 if err != nil { 238 247 return nil, fmt.Errorf("failed to query labels: %w", err) 239 248 } ··· 250 259 sourceAts = append(sourceAts, *p.PullSource.RepoAt) 251 260 } 252 261 } 253 - sourceRepos, err := GetRepos(e, 0, FilterIn("at_uri", sourceAts)) 262 + sourceRepos, err := GetRepos(e, 0, orm.FilterIn("at_uri", sourceAts)) 254 263 if err != nil && !errors.Is(err, sql.ErrNoRows) { 255 264 return nil, fmt.Errorf("failed to get source repos: %w", err) 256 265 } ··· 266 275 } 267 276 } 268 277 278 + allReferences, err := GetReferencesAll(e, orm.FilterIn("from_at", pullAts)) 279 + if err != nil { 280 + return nil, fmt.Errorf("failed to query reference_links: %w", err) 281 + } 282 + for pullAt, references := range allReferences { 283 + if pull, ok := pulls[pullAt]; ok { 284 + pull.References = references 285 + } 286 + } 287 + 269 288 orderedByPullId := []*models.Pull{} 270 289 for _, p := range pulls { 271 290 orderedByPullId = append(orderedByPullId, p) ··· 277 296 return orderedByPullId, nil 278 297 } 279 298 280 - func GetPulls(e Execer, filters ...filter) ([]*models.Pull, error) { 299 + func GetPulls(e Execer, filters ...orm.Filter) ([]*models.Pull, error) { 281 300 return GetPullsWithLimit(e, 0, filters...) 282 301 } 283 302 284 303 func GetPullIDs(e Execer, opts models.PullSearchOptions) ([]int64, error) { 285 304 var ids []int64 286 305 287 - var filters []filter 288 - filters = append(filters, FilterEq("state", opts.State)) 306 + var filters []orm.Filter 307 + filters = append(filters, orm.FilterEq("state", opts.State)) 289 308 if opts.RepoAt != "" { 290 - filters = append(filters, FilterEq("repo_at", opts.RepoAt)) 309 + filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt)) 291 310 } 292 311 293 312 var conditions []string ··· 343 362 } 344 363 345 364 func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) { 346 - pulls, err := GetPullsWithLimit(e, 1, FilterEq("repo_at", repoAt), FilterEq("pull_id", pullId)) 365 + pulls, err := GetPullsWithLimit(e, 1, orm.FilterEq("repo_at", repoAt), orm.FilterEq("pull_id", pullId)) 347 366 if err != nil { 348 367 return nil, err 349 368 } ··· 355 374 } 356 375 357 376 // mapping from pull -> pull submissions 358 - func GetPullSubmissions(e Execer, filters ...filter) (map[syntax.ATURI][]*models.PullSubmission, error) { 377 + func GetPullSubmissions(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]*models.PullSubmission, error) { 359 378 var conditions []string 360 379 var args []any 361 380 for _, filter := range filters { ··· 428 447 return nil, err 429 448 } 430 449 431 - // Get comments for all submissions using GetPullComments 450 + // Get comments for all submissions using GetComments 432 451 submissionIds := slices.Collect(maps.Keys(submissionMap)) 433 - comments, err := GetPullComments(e, FilterIn("submission_id", submissionIds)) 452 + comments, err := GetComments(e, orm.FilterIn("pull_submission_id", submissionIds)) 434 453 if err != nil { 435 - return nil, err 454 + return nil, fmt.Errorf("failed to get pull comments: %w", err) 436 455 } 437 456 for _, comment := range comments { 438 - if submission, ok := submissionMap[comment.SubmissionId]; ok { 439 - submission.Comments = append(submission.Comments, comment) 457 + if comment.PullSubmissionId != nil { 458 + if submission, ok := submissionMap[*comment.PullSubmissionId]; ok { 459 + submission.Comments = append(submission.Comments, comment) 460 + } 440 461 } 441 462 } 442 463 ··· 456 477 return m, nil 457 478 } 458 479 459 - func GetPullComments(e Execer, filters ...filter) ([]models.PullComment, error) { 460 - var conditions []string 461 - var args []any 462 - for _, filter := range filters { 463 - conditions = append(conditions, filter.Condition()) 464 - args = append(args, filter.Arg()...) 465 - } 466 - 467 - whereClause := "" 468 - if conditions != nil { 469 - whereClause = " where " + strings.Join(conditions, " and ") 470 - } 471 - 472 - query := fmt.Sprintf(` 473 - select 474 - id, 475 - pull_id, 476 - submission_id, 477 - repo_at, 478 - owner_did, 479 - comment_at, 480 - body, 481 - created 482 - from 483 - pull_comments 484 - %s 485 - order by 486 - created asc 487 - `, whereClause) 488 - 489 - rows, err := e.Query(query, args...) 490 - if err != nil { 491 - return nil, err 492 - } 493 - defer rows.Close() 494 - 495 - var comments []models.PullComment 496 - for rows.Next() { 497 - var comment models.PullComment 498 - var createdAt string 499 - err := rows.Scan( 500 - &comment.ID, 501 - &comment.PullId, 502 - &comment.SubmissionId, 503 - &comment.RepoAt, 504 - &comment.OwnerDid, 505 - &comment.CommentAt, 506 - &comment.Body, 507 - &createdAt, 508 - ) 509 - if err != nil { 510 - return nil, err 511 - } 512 - 513 - if t, err := time.Parse(time.RFC3339, createdAt); err == nil { 514 - comment.Created = t 515 - } 516 - 517 - comments = append(comments, comment) 518 - } 519 - 520 - if err := rows.Err(); err != nil { 521 - return nil, err 522 - } 523 - 524 - return comments, nil 525 - } 526 - 527 480 // timeframe here is directly passed into the sql query filter, and any 528 481 // timeframe in the past should be negative; e.g.: "-3 months" 529 482 func GetPullsByOwnerDid(e Execer, did, timeframe string) ([]models.Pull, error) { ··· 600 553 return pulls, nil 601 554 } 602 555 603 - func NewPullComment(e Execer, comment *models.PullComment) (int64, error) { 604 - query := `insert into pull_comments (owner_did, repo_at, submission_id, comment_at, pull_id, body) values (?, ?, ?, ?, ?, ?)` 605 - res, err := e.Exec( 606 - query, 607 - comment.OwnerDid, 608 - comment.RepoAt, 609 - comment.SubmissionId, 610 - comment.CommentAt, 611 - comment.PullId, 612 - comment.Body, 613 - ) 614 - if err != nil { 615 - return 0, err 616 - } 617 - 618 - i, err := res.LastInsertId() 619 - if err != nil { 620 - return 0, err 621 - } 622 - 623 - return i, nil 624 - } 625 - 626 556 func SetPullState(e Execer, repoAt syntax.ATURI, pullId int, pullState models.PullState) error { 627 557 _, err := e.Exec( 628 558 `update pulls set state = ? where repo_at = ? and pull_id = ? and (state <> ? or state <> ?)`, ··· 664 594 return err 665 595 } 666 596 667 - func SetPullParentChangeId(e Execer, parentChangeId string, filters ...filter) error { 597 + func SetPullParentChangeId(e Execer, parentChangeId string, filters ...orm.Filter) error { 668 598 var conditions []string 669 599 var args []any 670 600 ··· 688 618 689 619 // Only used when stacking to update contents in the event of a rebase (the interdiff should be empty). 690 620 // otherwise submissions are immutable 691 - func UpdatePull(e Execer, newPatch, sourceRev string, filters ...filter) error { 621 + func UpdatePull(e Execer, newPatch, sourceRev string, filters ...orm.Filter) error { 692 622 var conditions []string 693 623 var args []any 694 624 ··· 746 676 func GetStack(e Execer, stackId string) (models.Stack, error) { 747 677 unorderedPulls, err := GetPulls( 748 678 e, 749 - FilterEq("stack_id", stackId), 750 - FilterNotEq("state", models.PullDeleted), 679 + orm.FilterEq("stack_id", stackId), 680 + orm.FilterNotEq("state", models.PullDeleted), 751 681 ) 752 682 if err != nil { 753 683 return nil, err ··· 791 721 func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) { 792 722 pulls, err := GetPulls( 793 723 e, 794 - FilterEq("stack_id", stackId), 795 - FilterEq("state", models.PullDeleted), 724 + orm.FilterEq("stack_id", stackId), 725 + orm.FilterEq("state", models.PullDeleted), 796 726 ) 797 727 if err != nil { 798 728 return nil, err
+3 -2
appview/db/punchcard.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 13 // this adds to the existing count ··· 20 21 return err 21 22 } 22 23 23 - func MakePunchcard(e Execer, filters ...filter) (*models.Punchcard, error) { 24 + func MakePunchcard(e Execer, filters ...orm.Filter) (*models.Punchcard, error) { 24 25 punchcard := &models.Punchcard{} 25 26 now := time.Now() 26 27 startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC) ··· 77 78 punch.Count = int(count.Int64) 78 79 } 79 80 80 - punchcard.Punches[punch.Date.YearDay()] = punch 81 + punchcard.Punches[punch.Date.YearDay()-1] = punch 81 82 punchcard.Total += punch.Count 82 83 } 83 84
+451
appview/db/reference.go
··· 1 + package db 2 + 3 + import ( 4 + "database/sql" 5 + "fmt" 6 + "strings" 7 + 8 + "github.com/bluesky-social/indigo/atproto/syntax" 9 + "tangled.org/core/api/tangled" 10 + "tangled.org/core/appview/models" 11 + "tangled.org/core/orm" 12 + ) 13 + 14 + // ValidateReferenceLinks resolves refLinks to Issue/PR/Comment ATURIs. 15 + // It will ignore missing refLinks. 16 + func ValidateReferenceLinks(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) { 17 + var ( 18 + issueRefs []models.ReferenceLink 19 + pullRefs []models.ReferenceLink 20 + ) 21 + for _, ref := range refLinks { 22 + switch ref.Kind { 23 + case models.RefKindIssue: 24 + issueRefs = append(issueRefs, ref) 25 + case models.RefKindPull: 26 + pullRefs = append(pullRefs, ref) 27 + } 28 + } 29 + issueUris, err := findIssueReferences(e, issueRefs) 30 + if err != nil { 31 + return nil, fmt.Errorf("find issue references: %w", err) 32 + } 33 + pullUris, err := findPullReferences(e, pullRefs) 34 + if err != nil { 35 + return nil, fmt.Errorf("find pull references: %w", err) 36 + } 37 + 38 + return append(issueUris, pullUris...), nil 39 + } 40 + 41 + func findIssueReferences(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) { 42 + if len(refLinks) == 0 { 43 + return nil, nil 44 + } 45 + vals := make([]string, len(refLinks)) 46 + args := make([]any, 0, len(refLinks)*4) 47 + for i, ref := range refLinks { 48 + vals[i] = "(?, ?, ?, ?)" 49 + args = append(args, ref.Handle, ref.Repo, ref.SubjectId, ref.CommentId) 50 + } 51 + query := fmt.Sprintf( 52 + `with input(owner_did, name, issue_id, comment_id) as ( 53 + values %s 54 + ) 55 + select 56 + i.at_uri, c.at_uri 57 + from input inp 58 + join repos r 59 + on r.did = inp.owner_did 60 + and r.name = inp.name 61 + join issues i 62 + on i.repo_at = r.at_uri 63 + and i.issue_id = inp.issue_id 64 + left join comments c 65 + on inp.comment_id is not null 66 + and c.subject_at = i.at_uri 67 + and c.id = inp.comment_id 68 + `, 69 + strings.Join(vals, ","), 70 + ) 71 + rows, err := e.Query(query, args...) 72 + if err != nil { 73 + return nil, err 74 + } 75 + defer rows.Close() 76 + 77 + var uris []syntax.ATURI 78 + 79 + for rows.Next() { 80 + // Scan rows 81 + var issueUri string 82 + var commentUri sql.NullString 83 + var uri syntax.ATURI 84 + if err := rows.Scan(&issueUri, &commentUri); err != nil { 85 + return nil, err 86 + } 87 + if commentUri.Valid { 88 + uri = syntax.ATURI(commentUri.String) 89 + } else { 90 + uri = syntax.ATURI(issueUri) 91 + } 92 + uris = append(uris, uri) 93 + } 94 + if err := rows.Err(); err != nil { 95 + return nil, fmt.Errorf("iterate rows: %w", err) 96 + } 97 + 98 + return uris, nil 99 + } 100 + 101 + func findPullReferences(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) { 102 + if len(refLinks) == 0 { 103 + return nil, nil 104 + } 105 + vals := make([]string, len(refLinks)) 106 + args := make([]any, 0, len(refLinks)*4) 107 + for i, ref := range refLinks { 108 + vals[i] = "(?, ?, ?, ?)" 109 + args = append(args, ref.Handle, ref.Repo, ref.SubjectId, ref.CommentId) 110 + } 111 + query := fmt.Sprintf( 112 + `with input(owner_did, name, pull_id, comment_id) as ( 113 + values %s 114 + ) 115 + select 116 + p.owner_did, p.rkey, c.at_uri 117 + from input inp 118 + join repos r 119 + on r.did = inp.owner_did 120 + and r.name = inp.name 121 + join pulls p 122 + on p.repo_at = r.at_uri 123 + and p.pull_id = inp.pull_id 124 + left join comments c 125 + on inp.comment_id is not null 126 + and c.subject_at = ('at://' || p.owner_did || '/' || 'sh.tangled.repo.pull' || '/' || p.rkey) 127 + and c.id = inp.comment_id 128 + `, 129 + strings.Join(vals, ","), 130 + ) 131 + rows, err := e.Query(query, args...) 132 + if err != nil { 133 + return nil, err 134 + } 135 + defer rows.Close() 136 + 137 + var uris []syntax.ATURI 138 + 139 + for rows.Next() { 140 + // Scan rows 141 + var pullOwner, pullRkey string 142 + var commentUri sql.NullString 143 + var uri syntax.ATURI 144 + if err := rows.Scan(&pullOwner, &pullRkey, &commentUri); err != nil { 145 + return nil, err 146 + } 147 + if commentUri.Valid { 148 + // no-op 149 + uri = syntax.ATURI(commentUri.String) 150 + } else { 151 + uri = syntax.ATURI(fmt.Sprintf( 152 + "at://%s/%s/%s", 153 + pullOwner, 154 + tangled.RepoPullNSID, 155 + pullRkey, 156 + )) 157 + } 158 + uris = append(uris, uri) 159 + } 160 + return uris, nil 161 + } 162 + 163 + func putReferences(tx *sql.Tx, fromAt syntax.ATURI, references []syntax.ATURI) error { 164 + err := deleteReferences(tx, fromAt) 165 + if err != nil { 166 + return fmt.Errorf("delete old reference_links: %w", err) 167 + } 168 + if len(references) == 0 { 169 + return nil 170 + } 171 + 172 + values := make([]string, 0, len(references)) 173 + args := make([]any, 0, len(references)*2) 174 + for _, ref := range references { 175 + values = append(values, "(?, ?)") 176 + args = append(args, fromAt, ref) 177 + } 178 + _, err = tx.Exec( 179 + fmt.Sprintf( 180 + `insert into reference_links (from_at, to_at) 181 + values %s`, 182 + strings.Join(values, ","), 183 + ), 184 + args..., 185 + ) 186 + if err != nil { 187 + return fmt.Errorf("insert new reference_links: %w", err) 188 + } 189 + return nil 190 + } 191 + 192 + func deleteReferences(tx *sql.Tx, fromAt syntax.ATURI) error { 193 + _, err := tx.Exec(`delete from reference_links where from_at = ?`, fromAt) 194 + return err 195 + } 196 + 197 + func GetReferencesAll(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]syntax.ATURI, error) { 198 + var ( 199 + conditions []string 200 + args []any 201 + ) 202 + for _, filter := range filters { 203 + conditions = append(conditions, filter.Condition()) 204 + args = append(args, filter.Arg()...) 205 + } 206 + 207 + whereClause := "" 208 + if conditions != nil { 209 + whereClause = " where " + strings.Join(conditions, " and ") 210 + } 211 + 212 + rows, err := e.Query( 213 + fmt.Sprintf( 214 + `select from_at, to_at from reference_links %s`, 215 + whereClause, 216 + ), 217 + args..., 218 + ) 219 + if err != nil { 220 + return nil, fmt.Errorf("query reference_links: %w", err) 221 + } 222 + defer rows.Close() 223 + 224 + result := make(map[syntax.ATURI][]syntax.ATURI) 225 + 226 + for rows.Next() { 227 + var from, to syntax.ATURI 228 + if err := rows.Scan(&from, &to); err != nil { 229 + return nil, fmt.Errorf("scan row: %w", err) 230 + } 231 + 232 + result[from] = append(result[from], to) 233 + } 234 + if err := rows.Err(); err != nil { 235 + return nil, fmt.Errorf("iterate rows: %w", err) 236 + } 237 + 238 + return result, nil 239 + } 240 + 241 + func GetBacklinks(e Execer, target syntax.ATURI) ([]models.RichReferenceLink, error) { 242 + rows, err := e.Query( 243 + `select from_at from reference_links 244 + where to_at = ?`, 245 + target, 246 + ) 247 + if err != nil { 248 + return nil, fmt.Errorf("query backlinks: %w", err) 249 + } 250 + defer rows.Close() 251 + 252 + var ( 253 + backlinks []models.RichReferenceLink 254 + backlinksMap = make(map[string][]syntax.ATURI) 255 + ) 256 + for rows.Next() { 257 + var from syntax.ATURI 258 + if err := rows.Scan(&from); err != nil { 259 + return nil, fmt.Errorf("scan row: %w", err) 260 + } 261 + nsid := from.Collection().String() 262 + backlinksMap[nsid] = append(backlinksMap[nsid], from) 263 + } 264 + if err := rows.Err(); err != nil { 265 + return nil, fmt.Errorf("iterate rows: %w", err) 266 + } 267 + 268 + var ls []models.RichReferenceLink 269 + ls, err = getIssueBacklinks(e, backlinksMap[tangled.RepoIssueNSID]) 270 + if err != nil { 271 + return nil, fmt.Errorf("get issue backlinks: %w", err) 272 + } 273 + backlinks = append(backlinks, ls...) 274 + ls, err = getIssueCommentBacklinks(e, backlinksMap[tangled.CommentNSID]) 275 + if err != nil { 276 + return nil, fmt.Errorf("get issue_comment backlinks: %w", err) 277 + } 278 + backlinks = append(backlinks, ls...) 279 + ls, err = getPullBacklinks(e, backlinksMap[tangled.RepoPullNSID]) 280 + if err != nil { 281 + return nil, fmt.Errorf("get pull backlinks: %w", err) 282 + } 283 + backlinks = append(backlinks, ls...) 284 + ls, err = getPullCommentBacklinks(e, backlinksMap[tangled.CommentNSID]) 285 + if err != nil { 286 + return nil, fmt.Errorf("get pull_comment backlinks: %w", err) 287 + } 288 + backlinks = append(backlinks, ls...) 289 + 290 + return backlinks, nil 291 + } 292 + 293 + func getIssueBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 294 + if len(aturis) == 0 { 295 + return nil, nil 296 + } 297 + vals := make([]string, len(aturis)) 298 + args := make([]any, 0, len(aturis)*2) 299 + for i, aturi := range aturis { 300 + vals[i] = "(?, ?)" 301 + did := aturi.Authority().String() 302 + rkey := aturi.RecordKey().String() 303 + args = append(args, did, rkey) 304 + } 305 + rows, err := e.Query( 306 + fmt.Sprintf( 307 + `select r.did, r.name, i.issue_id, i.title, i.open 308 + from issues i 309 + join repos r 310 + on r.at_uri = i.repo_at 311 + where (i.did, i.rkey) in (%s)`, 312 + strings.Join(vals, ","), 313 + ), 314 + args..., 315 + ) 316 + if err != nil { 317 + return nil, err 318 + } 319 + defer rows.Close() 320 + var refLinks []models.RichReferenceLink 321 + for rows.Next() { 322 + var l models.RichReferenceLink 323 + l.Kind = models.RefKindIssue 324 + if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, &l.Title, &l.State); err != nil { 325 + return nil, err 326 + } 327 + refLinks = append(refLinks, l) 328 + } 329 + if err := rows.Err(); err != nil { 330 + return nil, fmt.Errorf("iterate rows: %w", err) 331 + } 332 + return refLinks, nil 333 + } 334 + 335 + func getIssueCommentBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 336 + if len(aturis) == 0 { 337 + return nil, nil 338 + } 339 + filter := orm.FilterIn("c.at_uri", aturis) 340 + rows, err := e.Query( 341 + fmt.Sprintf( 342 + `select r.did, r.name, i.issue_id, c.id, i.title, i.open 343 + from comments c 344 + join issues i 345 + on i.at_uri = c.subject_at 346 + join repos r 347 + on r.at_uri = i.repo_at 348 + where %s`, 349 + filter.Condition(), 350 + ), 351 + filter.Arg()..., 352 + ) 353 + if err != nil { 354 + return nil, err 355 + } 356 + defer rows.Close() 357 + var refLinks []models.RichReferenceLink 358 + for rows.Next() { 359 + var l models.RichReferenceLink 360 + l.Kind = models.RefKindIssue 361 + l.CommentId = new(int) 362 + if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, l.CommentId, &l.Title, &l.State); err != nil { 363 + return nil, err 364 + } 365 + refLinks = append(refLinks, l) 366 + } 367 + if err := rows.Err(); err != nil { 368 + return nil, fmt.Errorf("iterate rows: %w", err) 369 + } 370 + return refLinks, nil 371 + } 372 + 373 + func getPullBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 374 + if len(aturis) == 0 { 375 + return nil, nil 376 + } 377 + vals := make([]string, len(aturis)) 378 + args := make([]any, 0, len(aturis)*2) 379 + for i, aturi := range aturis { 380 + vals[i] = "(?, ?)" 381 + did := aturi.Authority().String() 382 + rkey := aturi.RecordKey().String() 383 + args = append(args, did, rkey) 384 + } 385 + rows, err := e.Query( 386 + fmt.Sprintf( 387 + `select r.did, r.name, p.pull_id, p.title, p.state 388 + from pulls p 389 + join repos r 390 + on r.at_uri = p.repo_at 391 + where (p.owner_did, p.rkey) in (%s)`, 392 + strings.Join(vals, ","), 393 + ), 394 + args..., 395 + ) 396 + if err != nil { 397 + return nil, err 398 + } 399 + defer rows.Close() 400 + var refLinks []models.RichReferenceLink 401 + for rows.Next() { 402 + var l models.RichReferenceLink 403 + l.Kind = models.RefKindPull 404 + if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, &l.Title, &l.State); err != nil { 405 + return nil, err 406 + } 407 + refLinks = append(refLinks, l) 408 + } 409 + if err := rows.Err(); err != nil { 410 + return nil, fmt.Errorf("iterate rows: %w", err) 411 + } 412 + return refLinks, nil 413 + } 414 + 415 + func getPullCommentBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 416 + if len(aturis) == 0 { 417 + return nil, nil 418 + } 419 + filter := orm.FilterIn("c.at_uri", aturis) 420 + rows, err := e.Query( 421 + fmt.Sprintf( 422 + `select r.did, r.name, p.pull_id, c.id, p.title, p.state 423 + from repos r 424 + join pulls p 425 + on r.at_uri = p.repo_at 426 + join comments c 427 + on ('at://' || p.owner_did || '/' || 'sh.tangled.repo.pull' || '/' || p.rkey) = c.subject_at 428 + where %s`, 429 + filter.Condition(), 430 + ), 431 + filter.Arg()..., 432 + ) 433 + if err != nil { 434 + return nil, err 435 + } 436 + defer rows.Close() 437 + var refLinks []models.RichReferenceLink 438 + for rows.Next() { 439 + var l models.RichReferenceLink 440 + l.Kind = models.RefKindPull 441 + l.CommentId = new(int) 442 + if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, l.CommentId, &l.Title, &l.State); err != nil { 443 + return nil, err 444 + } 445 + refLinks = append(refLinks, l) 446 + } 447 + if err := rows.Err(); err != nil { 448 + return nil, fmt.Errorf("iterate rows: %w", err) 449 + } 450 + return refLinks, nil 451 + }
+5 -3
appview/db/registration.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetRegistrations(e Execer, filters ...filter) ([]models.Registration, error) { 13 + func GetRegistrations(e Execer, filters ...orm.Filter) ([]models.Registration, error) { 13 14 var registrations []models.Registration 14 15 15 16 var conditions []string ··· 37 38 if err != nil { 38 39 return nil, err 39 40 } 41 + defer rows.Close() 40 42 41 43 for rows.Next() { 42 44 var createdAt string ··· 69 71 return registrations, nil 70 72 } 71 73 72 - func MarkRegistered(e Execer, filters ...filter) error { 74 + func MarkRegistered(e Execer, filters ...orm.Filter) error { 73 75 var conditions []string 74 76 var args []any 75 77 for _, filter := range filters { ··· 94 96 return err 95 97 } 96 98 97 - func DeleteKnot(e Execer, filters ...filter) error { 99 + func DeleteKnot(e Execer, filters ...orm.Filter) error { 98 100 var conditions []string 99 101 var args []any 100 102 for _, filter := range filters {
+29 -34
appview/db/repos.go
··· 10 10 "time" 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 - securejoin "github.com/cyphar/filepath-securejoin" 14 - "tangled.org/core/api/tangled" 15 13 "tangled.org/core/appview/models" 14 + "tangled.org/core/orm" 16 15 ) 17 16 18 - type Repo struct { 19 - Id int64 20 - Did string 21 - Name string 22 - Knot string 23 - Rkey string 24 - Created time.Time 25 - Description string 26 - Spindle string 27 - 28 - // optionally, populate this when querying for reverse mappings 29 - RepoStats *models.RepoStats 30 - 31 - // optional 32 - Source string 33 - } 34 - 35 - func (r Repo) RepoAt() syntax.ATURI { 36 - return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, tangled.RepoNSID, r.Rkey)) 37 - } 38 - 39 - func (r Repo) DidSlashRepo() string { 40 - p, _ := securejoin.SecureJoin(r.Did, r.Name) 41 - return p 42 - } 43 - 44 - func GetRepos(e Execer, limit int, filters ...filter) ([]models.Repo, error) { 17 + func GetRepos(e Execer, limit int, filters ...orm.Filter) ([]models.Repo, error) { 45 18 repoMap := make(map[syntax.ATURI]*models.Repo) 46 19 47 20 var conditions []string ··· 83 56 limitClause, 84 57 ) 85 58 rows, err := e.Query(repoQuery, args...) 86 - 87 59 if err != nil { 88 60 return nil, fmt.Errorf("failed to execute repo query: %w ", err) 89 61 } 62 + defer rows.Close() 90 63 91 64 for rows.Next() { 92 65 var repo models.Repo ··· 155 128 if err != nil { 156 129 return nil, fmt.Errorf("failed to execute labels query: %w ", err) 157 130 } 131 + defer rows.Close() 132 + 158 133 for rows.Next() { 159 134 var repoat, labelat string 160 135 if err := rows.Scan(&repoat, &labelat); err != nil { ··· 183 158 from repo_languages 184 159 where repo_at in (%s) 185 160 and is_default_ref = 1 161 + and language <> '' 186 162 ) 187 163 where rn = 1 188 164 `, ··· 192 168 if err != nil { 193 169 return nil, fmt.Errorf("failed to execute lang query: %w ", err) 194 170 } 171 + defer rows.Close() 172 + 195 173 for rows.Next() { 196 174 var repoat, lang string 197 175 if err := rows.Scan(&repoat, &lang); err != nil { ··· 218 196 if err != nil { 219 197 return nil, fmt.Errorf("failed to execute star-count query: %w ", err) 220 198 } 199 + defer rows.Close() 200 + 221 201 for rows.Next() { 222 202 var repoat string 223 203 var count int ··· 247 227 if err != nil { 248 228 return nil, fmt.Errorf("failed to execute issue-count query: %w ", err) 249 229 } 230 + defer rows.Close() 231 + 250 232 for rows.Next() { 251 233 var repoat string 252 234 var open, closed int ··· 288 270 if err != nil { 289 271 return nil, fmt.Errorf("failed to execute pulls-count query: %w ", err) 290 272 } 273 + defer rows.Close() 274 + 291 275 for rows.Next() { 292 276 var repoat string 293 277 var open, merged, closed, deleted int ··· 322 306 } 323 307 324 308 // helper to get exactly one repo 325 - func GetRepo(e Execer, filters ...filter) (*models.Repo, error) { 309 + func GetRepo(e Execer, filters ...orm.Filter) (*models.Repo, error) { 326 310 repos, err := GetRepos(e, 0, filters...) 327 311 if err != nil { 328 312 return nil, err ··· 339 323 return &repos[0], nil 340 324 } 341 325 342 - func CountRepos(e Execer, filters ...filter) (int64, error) { 326 + func CountRepos(e Execer, filters ...orm.Filter) (int64, error) { 343 327 var conditions []string 344 328 var args []any 345 329 for _, filter := range filters { ··· 439 423 return nullableSource.String, nil 440 424 } 441 425 426 + func GetRepoSourceRepo(e Execer, repoAt syntax.ATURI) (*models.Repo, error) { 427 + source, err := GetRepoSource(e, repoAt) 428 + if source == "" || errors.Is(err, sql.ErrNoRows) { 429 + return nil, nil 430 + } 431 + if err != nil { 432 + return nil, err 433 + } 434 + return GetRepoByAtUri(e, source) 435 + } 436 + 442 437 func GetForksByDid(e Execer, did string) ([]models.Repo, error) { 443 438 var repos []models.Repo 444 439 ··· 559 554 return err 560 555 } 561 556 562 - func UnsubscribeLabel(e Execer, filters ...filter) error { 557 + func UnsubscribeLabel(e Execer, filters ...orm.Filter) error { 563 558 var conditions []string 564 559 var args []any 565 560 for _, filter := range filters { ··· 577 572 return err 578 573 } 579 574 580 - func GetRepoLabels(e Execer, filters ...filter) ([]models.RepoLabel, error) { 575 + func GetRepoLabels(e Execer, filters ...orm.Filter) ([]models.RepoLabel, error) { 581 576 var conditions []string 582 577 var args []any 583 578 for _, filter := range filters {
+6 -5
appview/db/spindle.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetSpindles(e Execer, filters ...filter) ([]models.Spindle, error) { 13 + func GetSpindles(e Execer, filters ...orm.Filter) ([]models.Spindle, error) { 13 14 var spindles []models.Spindle 14 15 15 16 var conditions []string ··· 91 92 return err 92 93 } 93 94 94 - func VerifySpindle(e Execer, filters ...filter) (int64, error) { 95 + func VerifySpindle(e Execer, filters ...orm.Filter) (int64, error) { 95 96 var conditions []string 96 97 var args []any 97 98 for _, filter := range filters { ··· 114 115 return res.RowsAffected() 115 116 } 116 117 117 - func DeleteSpindle(e Execer, filters ...filter) error { 118 + func DeleteSpindle(e Execer, filters ...orm.Filter) error { 118 119 var conditions []string 119 120 var args []any 120 121 for _, filter := range filters { ··· 144 145 return err 145 146 } 146 147 147 - func RemoveSpindleMember(e Execer, filters ...filter) error { 148 + func RemoveSpindleMember(e Execer, filters ...orm.Filter) error { 148 149 var conditions []string 149 150 var args []any 150 151 for _, filter := range filters { ··· 163 164 return err 164 165 } 165 166 166 - func GetSpindleMembers(e Execer, filters ...filter) ([]models.SpindleMember, error) { 167 + func GetSpindleMembers(e Execer, filters ...orm.Filter) ([]models.SpindleMember, error) { 167 168 var members []models.SpindleMember 168 169 169 170 var conditions []string
+6 -4
appview/db/star.go
··· 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 13 "tangled.org/core/appview/models" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 func AddStar(e Execer, star *models.Star) error { ··· 133 134 134 135 // GetRepoStars return a list of stars each holding target repository. 135 136 // If there isn't known repo with starred at-uri, those stars will be ignored. 136 - func GetRepoStars(e Execer, limit int, filters ...filter) ([]models.RepoStar, error) { 137 + func GetRepoStars(e Execer, limit int, filters ...orm.Filter) ([]models.RepoStar, error) { 137 138 var conditions []string 138 139 var args []any 139 140 for _, filter := range filters { ··· 164 165 if err != nil { 165 166 return nil, err 166 167 } 168 + defer rows.Close() 167 169 168 170 starMap := make(map[string][]models.Star) 169 171 for rows.Next() { ··· 195 197 return nil, nil 196 198 } 197 199 198 - repos, err := GetRepos(e, 0, FilterIn("at_uri", args)) 200 + repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", args)) 199 201 if err != nil { 200 202 return nil, err 201 203 } ··· 225 227 return repoStars, nil 226 228 } 227 229 228 - func CountStars(e Execer, filters ...filter) (int64, error) { 230 + func CountStars(e Execer, filters ...orm.Filter) (int64, error) { 229 231 var conditions []string 230 232 var args []any 231 233 for _, filter := range filters { ··· 298 300 } 299 301 300 302 // get full repo data 301 - repos, err := GetRepos(e, 0, FilterIn("at_uri", repoUris)) 303 + repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoUris)) 302 304 if err != nil { 303 305 return nil, err 304 306 }
+4 -3
appview/db/strings.go
··· 8 8 "time" 9 9 10 10 "tangled.org/core/appview/models" 11 + "tangled.org/core/orm" 11 12 ) 12 13 13 14 func AddString(e Execer, s models.String) error { ··· 44 45 return err 45 46 } 46 47 47 - func GetStrings(e Execer, limit int, filters ...filter) ([]models.String, error) { 48 + func GetStrings(e Execer, limit int, filters ...orm.Filter) ([]models.String, error) { 48 49 var all []models.String 49 50 50 51 var conditions []string ··· 127 128 return all, nil 128 129 } 129 130 130 - func CountStrings(e Execer, filters ...filter) (int64, error) { 131 + func CountStrings(e Execer, filters ...orm.Filter) (int64, error) { 131 132 var conditions []string 132 133 var args []any 133 134 for _, filter := range filters { ··· 151 152 return count, nil 152 153 } 153 154 154 - func DeleteString(e Execer, filters ...filter) error { 155 + func DeleteString(e Execer, filters ...orm.Filter) error { 155 156 var conditions []string 156 157 var args []any 157 158 for _, filter := range filters {
+9 -8
appview/db/timeline.go
··· 5 5 6 6 "github.com/bluesky-social/indigo/atproto/syntax" 7 7 "tangled.org/core/appview/models" 8 + "tangled.org/core/orm" 8 9 ) 9 10 10 11 // TODO: this gathers heterogenous events from different sources and aggregates ··· 84 85 } 85 86 86 87 func getTimelineRepos(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) { 87 - filters := make([]filter, 0) 88 + filters := make([]orm.Filter, 0) 88 89 if userIsFollowing != nil { 89 - filters = append(filters, FilterIn("did", userIsFollowing)) 90 + filters = append(filters, orm.FilterIn("did", userIsFollowing)) 90 91 } 91 92 92 93 repos, err := GetRepos(e, limit, filters...) ··· 104 105 105 106 var origRepos []models.Repo 106 107 if args != nil { 107 - origRepos, err = GetRepos(e, 0, FilterIn("at_uri", args)) 108 + origRepos, err = GetRepos(e, 0, orm.FilterIn("at_uri", args)) 108 109 } 109 110 if err != nil { 110 111 return nil, err ··· 144 145 } 145 146 146 147 func getTimelineStars(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) { 147 - filters := make([]filter, 0) 148 + filters := make([]orm.Filter, 0) 148 149 if userIsFollowing != nil { 149 - filters = append(filters, FilterIn("did", userIsFollowing)) 150 + filters = append(filters, orm.FilterIn("did", userIsFollowing)) 150 151 } 151 152 152 153 stars, err := GetRepoStars(e, limit, filters...) ··· 180 181 } 181 182 182 183 func getTimelineFollows(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) { 183 - filters := make([]filter, 0) 184 + filters := make([]orm.Filter, 0) 184 185 if userIsFollowing != nil { 185 - filters = append(filters, FilterIn("user_did", userIsFollowing)) 186 + filters = append(filters, orm.FilterIn("user_did", userIsFollowing)) 186 187 } 187 188 188 189 follows, err := GetFollows(e, limit, filters...) ··· 199 200 return nil, nil 200 201 } 201 202 202 - profiles, err := GetProfiles(e, FilterIn("did", subjects)) 203 + profiles, err := GetProfiles(e, orm.FilterIn("did", subjects)) 203 204 if err != nil { 204 205 return nil, err 205 206 }
+65 -39
appview/ingester.go
··· 21 21 "tangled.org/core/appview/serververify" 22 22 "tangled.org/core/appview/validator" 23 23 "tangled.org/core/idresolver" 24 + "tangled.org/core/orm" 24 25 "tangled.org/core/rbac" 25 26 ) 26 27 ··· 78 79 err = i.ingestString(e) 79 80 case tangled.RepoIssueNSID: 80 81 err = i.ingestIssue(ctx, e) 81 - case tangled.RepoIssueCommentNSID: 82 - err = i.ingestIssueComment(e) 82 + case tangled.CommentNSID: 83 + err = i.ingestComment(e) 83 84 case tangled.LabelDefinitionNSID: 84 85 err = i.ingestLabelDefinition(e) 85 86 case tangled.LabelOpNSID: ··· 253 254 254 255 err = db.AddArtifact(i.Db, artifact) 255 256 case jmodels.CommitOperationDelete: 256 - err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey)) 257 + err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey)) 257 258 } 258 259 259 260 if err != nil { ··· 350 351 351 352 err = db.UpsertProfile(tx, &profile) 352 353 case jmodels.CommitOperationDelete: 353 - err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey)) 354 + err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey)) 354 355 } 355 356 356 357 if err != nil { ··· 424 425 // get record from db first 425 426 members, err := db.GetSpindleMembers( 426 427 ddb, 427 - db.FilterEq("did", did), 428 - db.FilterEq("rkey", rkey), 428 + orm.FilterEq("did", did), 429 + orm.FilterEq("rkey", rkey), 429 430 ) 430 431 if err != nil || len(members) != 1 { 431 432 return fmt.Errorf("failed to get member: %w, len(members) = %d", err, len(members)) ··· 440 441 // remove record by rkey && update enforcer 441 442 if err = db.RemoveSpindleMember( 442 443 tx, 443 - db.FilterEq("did", did), 444 - db.FilterEq("rkey", rkey), 444 + orm.FilterEq("did", did), 445 + orm.FilterEq("rkey", rkey), 445 446 ); err != nil { 446 447 return fmt.Errorf("failed to remove from db: %w", err) 447 448 } ··· 523 524 // get record from db first 524 525 spindles, err := db.GetSpindles( 525 526 ddb, 526 - db.FilterEq("owner", did), 527 - db.FilterEq("instance", instance), 527 + orm.FilterEq("owner", did), 528 + orm.FilterEq("instance", instance), 528 529 ) 529 530 if err != nil || len(spindles) != 1 { 530 531 return fmt.Errorf("failed to get spindles: %w, len(spindles) = %d", err, len(spindles)) ··· 543 544 // remove spindle members first 544 545 err = db.RemoveSpindleMember( 545 546 tx, 546 - db.FilterEq("owner", did), 547 - db.FilterEq("instance", instance), 547 + orm.FilterEq("owner", did), 548 + orm.FilterEq("instance", instance), 548 549 ) 549 550 if err != nil { 550 551 return err ··· 552 553 553 554 err = db.DeleteSpindle( 554 555 tx, 555 - db.FilterEq("owner", did), 556 - db.FilterEq("instance", instance), 556 + orm.FilterEq("owner", did), 557 + orm.FilterEq("instance", instance), 557 558 ) 558 559 if err != nil { 559 560 return err ··· 621 622 case jmodels.CommitOperationDelete: 622 623 if err := db.DeleteString( 623 624 ddb, 624 - db.FilterEq("did", did), 625 - db.FilterEq("rkey", rkey), 625 + orm.FilterEq("did", did), 626 + orm.FilterEq("rkey", rkey), 626 627 ); err != nil { 627 628 l.Error("failed to delete", "err", err) 628 629 return fmt.Errorf("failed to delete string record: %w", err) ··· 740 741 // get record from db first 741 742 registrations, err := db.GetRegistrations( 742 743 ddb, 743 - db.FilterEq("domain", domain), 744 - db.FilterEq("did", did), 744 + orm.FilterEq("domain", domain), 745 + orm.FilterEq("did", did), 745 746 ) 746 747 if err != nil { 747 748 return fmt.Errorf("failed to get registration: %w", err) ··· 762 763 763 764 err = db.DeleteKnot( 764 765 tx, 765 - db.FilterEq("did", did), 766 - db.FilterEq("domain", domain), 766 + orm.FilterEq("did", did), 767 + orm.FilterEq("domain", domain), 767 768 ) 768 769 if err != nil { 769 770 return err ··· 841 842 return nil 842 843 843 844 case jmodels.CommitOperationDelete: 845 + tx, err := ddb.BeginTx(ctx, nil) 846 + if err != nil { 847 + l.Error("failed to begin transaction", "err", err) 848 + return err 849 + } 850 + defer tx.Rollback() 851 + 844 852 if err := db.DeleteIssues( 845 - ddb, 846 - db.FilterEq("did", did), 847 - db.FilterEq("rkey", rkey), 853 + tx, 854 + did, 855 + rkey, 848 856 ); err != nil { 849 857 l.Error("failed to delete", "err", err) 850 858 return fmt.Errorf("failed to delete issue record: %w", err) 851 859 } 860 + if err := tx.Commit(); err != nil { 861 + l.Error("failed to commit txn", "err", err) 862 + return err 863 + } 852 864 853 865 return nil 854 866 } ··· 856 868 return nil 857 869 } 858 870 859 - func (i *Ingester) ingestIssueComment(e *jmodels.Event) error { 871 + func (i *Ingester) ingestComment(e *jmodels.Event) error { 860 872 did := e.Did 861 873 rkey := e.Commit.RKey 862 874 863 875 var err error 864 876 865 - l := i.Logger.With("handler", "ingestIssueComment", "nsid", e.Commit.Collection, "did", did, "rkey", rkey) 877 + l := i.Logger.With("handler", "ingestComment", "nsid", e.Commit.Collection, "did", did, "rkey", rkey) 866 878 l.Info("ingesting record") 867 879 868 880 ddb, ok := i.Db.Execer.(*db.DB) ··· 873 885 switch e.Commit.Operation { 874 886 case jmodels.CommitOperationCreate, jmodels.CommitOperationUpdate: 875 887 raw := json.RawMessage(e.Commit.Record) 876 - record := tangled.RepoIssueComment{} 888 + record := tangled.Comment{} 877 889 err = json.Unmarshal(raw, &record) 878 890 if err != nil { 879 891 return fmt.Errorf("invalid record: %w", err) 880 892 } 881 893 882 - comment, err := models.IssueCommentFromRecord(did, rkey, record) 894 + comment, err := models.CommentFromRecord(did, rkey, record) 883 895 if err != nil { 884 896 return fmt.Errorf("failed to parse comment from record: %w", err) 885 897 } 886 898 887 - if err := i.Validator.ValidateIssueComment(comment); err != nil { 899 + // TODO: ingest pull comments 900 + // we aren't ingesting pull comments yet because pull itself isn't fully atprotated. 901 + // so we cannot know which round this comment is pointing to 902 + if comment.Subject.Collection().String() == tangled.RepoPullNSID { 903 + l.Info("skip ingesting pull comments") 904 + return nil 905 + } 906 + 907 + if err := comment.Validate(); err != nil { 888 908 return fmt.Errorf("failed to validate comment: %w", err) 889 909 } 890 910 891 - _, err = db.AddIssueComment(ddb, *comment) 911 + tx, err := ddb.Begin() 892 912 if err != nil { 893 - return fmt.Errorf("failed to create issue comment: %w", err) 913 + return fmt.Errorf("failed to start transaction: %w", err) 914 + } 915 + defer tx.Rollback() 916 + 917 + err = db.PutComment(tx, comment) 918 + if err != nil { 919 + return fmt.Errorf("failed to create comment: %w", err) 894 920 } 895 921 896 - return nil 922 + return tx.Commit() 897 923 898 924 case jmodels.CommitOperationDelete: 899 - if err := db.DeleteIssueComments( 925 + if err := db.DeleteComments( 900 926 ddb, 901 - db.FilterEq("did", did), 902 - db.FilterEq("rkey", rkey), 927 + orm.FilterEq("did", did), 928 + orm.FilterEq("rkey", rkey), 903 929 ); err != nil { 904 - return fmt.Errorf("failed to delete issue comment record: %w", err) 930 + return fmt.Errorf("failed to delete comment record: %w", err) 905 931 } 906 932 907 933 return nil ··· 952 978 case jmodels.CommitOperationDelete: 953 979 if err := db.DeleteLabelDefinition( 954 980 ddb, 955 - db.FilterEq("did", did), 956 - db.FilterEq("rkey", rkey), 981 + orm.FilterEq("did", did), 982 + orm.FilterEq("rkey", rkey), 957 983 ); err != nil { 958 984 return fmt.Errorf("failed to delete labeldef record: %w", err) 959 985 } ··· 993 1019 var repo *models.Repo 994 1020 switch collection { 995 1021 case tangled.RepoIssueNSID: 996 - i, err := db.GetIssues(ddb, db.FilterEq("at_uri", subject)) 1022 + i, err := db.GetIssues(ddb, orm.FilterEq("at_uri", subject)) 997 1023 if err != nil || len(i) != 1 { 998 1024 return fmt.Errorf("failed to find subject: %w || subject count %d", err, len(i)) 999 1025 } ··· 1002 1028 return fmt.Errorf("unsupport label subject: %s", collection) 1003 1029 } 1004 1030 1005 - actx, err := db.NewLabelApplicationCtx(ddb, db.FilterIn("at_uri", repo.Labels)) 1031 + actx, err := db.NewLabelApplicationCtx(ddb, orm.FilterIn("at_uri", repo.Labels)) 1006 1032 if err != nil { 1007 1033 return fmt.Errorf("failed to build label application ctx: %w", err) 1008 1034 }
+168 -158
appview/issues/issues.go
··· 7 7 "fmt" 8 8 "log/slog" 9 9 "net/http" 10 - "slices" 11 10 "time" 12 11 13 12 comatproto "github.com/bluesky-social/indigo/api/atproto" ··· 20 19 "tangled.org/core/appview/config" 21 20 "tangled.org/core/appview/db" 22 21 issues_indexer "tangled.org/core/appview/indexer/issues" 22 + "tangled.org/core/appview/mentions" 23 23 "tangled.org/core/appview/models" 24 24 "tangled.org/core/appview/notify" 25 25 "tangled.org/core/appview/oauth" 26 26 "tangled.org/core/appview/pages" 27 - "tangled.org/core/appview/pages/markup" 27 + "tangled.org/core/appview/pages/repoinfo" 28 28 "tangled.org/core/appview/pagination" 29 29 "tangled.org/core/appview/reporesolver" 30 30 "tangled.org/core/appview/validator" 31 31 "tangled.org/core/idresolver" 32 + "tangled.org/core/orm" 33 + "tangled.org/core/rbac" 32 34 "tangled.org/core/tid" 33 35 ) 34 36 35 37 type Issues struct { 36 - oauth *oauth.OAuth 37 - repoResolver *reporesolver.RepoResolver 38 - pages *pages.Pages 39 - idResolver *idresolver.Resolver 40 - db *db.DB 41 - config *config.Config 42 - notifier notify.Notifier 43 - logger *slog.Logger 44 - validator *validator.Validator 45 - indexer *issues_indexer.Indexer 38 + oauth *oauth.OAuth 39 + repoResolver *reporesolver.RepoResolver 40 + enforcer *rbac.Enforcer 41 + pages *pages.Pages 42 + idResolver *idresolver.Resolver 43 + mentionsResolver *mentions.Resolver 44 + db *db.DB 45 + config *config.Config 46 + notifier notify.Notifier 47 + logger *slog.Logger 48 + validator *validator.Validator 49 + indexer *issues_indexer.Indexer 46 50 } 47 51 48 52 func New( 49 53 oauth *oauth.OAuth, 50 54 repoResolver *reporesolver.RepoResolver, 55 + enforcer *rbac.Enforcer, 51 56 pages *pages.Pages, 52 57 idResolver *idresolver.Resolver, 58 + mentionsResolver *mentions.Resolver, 53 59 db *db.DB, 54 60 config *config.Config, 55 61 notifier notify.Notifier, ··· 58 64 logger *slog.Logger, 59 65 ) *Issues { 60 66 return &Issues{ 61 - oauth: oauth, 62 - repoResolver: repoResolver, 63 - pages: pages, 64 - idResolver: idResolver, 65 - db: db, 66 - config: config, 67 - notifier: notifier, 68 - logger: logger, 69 - validator: validator, 70 - indexer: indexer, 67 + oauth: oauth, 68 + repoResolver: repoResolver, 69 + enforcer: enforcer, 70 + pages: pages, 71 + idResolver: idResolver, 72 + mentionsResolver: mentionsResolver, 73 + db: db, 74 + config: config, 75 + notifier: notifier, 76 + logger: logger, 77 + validator: validator, 78 + indexer: indexer, 71 79 } 72 80 } 73 81 ··· 97 105 userReactions = db.GetReactionStatusMap(rp.db, user.Did, issue.AtUri()) 98 106 } 99 107 108 + backlinks, err := db.GetBacklinks(rp.db, issue.AtUri()) 109 + if err != nil { 110 + l.Error("failed to fetch backlinks", "err", err) 111 + rp.pages.Error503(w) 112 + return 113 + } 114 + 100 115 labelDefs, err := db.GetLabelDefinitions( 101 116 rp.db, 102 - db.FilterIn("at_uri", f.Repo.Labels), 103 - db.FilterContains("scope", tangled.RepoIssueNSID), 117 + orm.FilterIn("at_uri", f.Labels), 118 + orm.FilterContains("scope", tangled.RepoIssueNSID), 104 119 ) 105 120 if err != nil { 106 121 l.Error("failed to fetch labels", "err", err) ··· 115 130 116 131 rp.pages.RepoSingleIssue(w, pages.RepoSingleIssueParams{ 117 132 LoggedInUser: user, 118 - RepoInfo: f.RepoInfo(user), 133 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 119 134 Issue: issue, 120 135 CommentList: issue.CommentList(), 136 + Backlinks: backlinks, 121 137 OrderedReactionKinds: models.OrderedReactionKinds, 122 138 Reactions: reactionMap, 123 139 UserReacted: userReactions, ··· 128 144 func (rp *Issues) EditIssue(w http.ResponseWriter, r *http.Request) { 129 145 l := rp.logger.With("handler", "EditIssue") 130 146 user := rp.oauth.GetUser(r) 131 - f, err := rp.repoResolver.Resolve(r) 132 - if err != nil { 133 - l.Error("failed to get repo and knot", "err", err) 134 - return 135 - } 136 147 137 148 issue, ok := r.Context().Value("issue").(*models.Issue) 138 149 if !ok { ··· 145 156 case http.MethodGet: 146 157 rp.pages.EditIssueFragment(w, pages.EditIssueParams{ 147 158 LoggedInUser: user, 148 - RepoInfo: f.RepoInfo(user), 159 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 149 160 Issue: issue, 150 161 }) 151 162 case http.MethodPost: ··· 153 164 newIssue := issue 154 165 newIssue.Title = r.FormValue("title") 155 166 newIssue.Body = r.FormValue("body") 167 + newIssue.Mentions, newIssue.References = rp.mentionsResolver.Resolve(r.Context(), newIssue.Body) 156 168 157 169 if err := rp.validator.ValidateIssue(newIssue); err != nil { 158 170 l.Error("validation error", "err", err) ··· 222 234 l := rp.logger.With("handler", "DeleteIssue") 223 235 noticeId := "issue-actions-error" 224 236 225 - user := rp.oauth.GetUser(r) 226 - 227 237 f, err := rp.repoResolver.Resolve(r) 228 238 if err != nil { 229 239 l.Error("failed to get repo and knot", "err", err) ··· 238 248 } 239 249 l = l.With("did", issue.Did, "rkey", issue.Rkey) 240 250 251 + tx, err := rp.db.Begin() 252 + if err != nil { 253 + l.Error("failed to start transaction", "err", err) 254 + rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.") 255 + return 256 + } 257 + defer tx.Rollback() 258 + 241 259 // delete from PDS 242 260 client, err := rp.oauth.AuthorizedClient(r) 243 261 if err != nil { ··· 258 276 } 259 277 260 278 // delete from db 261 - if err := db.DeleteIssues(rp.db, db.FilterEq("id", issue.Id)); err != nil { 279 + if err := db.DeleteIssues(tx, issue.Did, issue.Rkey); err != nil { 262 280 l.Error("failed to delete issue", "err", err) 263 281 rp.pages.Notice(w, noticeId, "Failed to delete issue.") 264 282 return 265 283 } 284 + tx.Commit() 266 285 267 286 rp.notifier.DeleteIssue(r.Context(), issue) 268 287 269 288 // return to all issues page 270 - rp.pages.HxRedirect(w, "/"+f.RepoInfo(user).FullName()+"/issues") 289 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 290 + rp.pages.HxRedirect(w, "/"+ownerSlashRepo+"/issues") 271 291 } 272 292 273 293 func (rp *Issues) CloseIssue(w http.ResponseWriter, r *http.Request) { ··· 286 306 return 287 307 } 288 308 289 - collaborators, err := f.Collaborators(r.Context()) 290 - if err != nil { 291 - l.Error("failed to fetch repo collaborators", "err", err) 292 - } 293 - isCollaborator := slices.ContainsFunc(collaborators, func(collab pages.Collaborator) bool { 294 - return user.Did == collab.Did 295 - }) 309 + roles := repoinfo.RolesInRepo{Roles: rp.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 310 + isRepoOwner := roles.IsOwner() 311 + isCollaborator := roles.IsCollaborator() 296 312 isIssueOwner := user.Did == issue.Did 297 313 298 314 // TODO: make this more granular 299 - if isIssueOwner || isCollaborator { 315 + if isIssueOwner || isRepoOwner || isCollaborator { 300 316 err = db.CloseIssues( 301 317 rp.db, 302 - db.FilterEq("id", issue.Id), 318 + orm.FilterEq("id", issue.Id), 303 319 ) 304 320 if err != nil { 305 321 l.Error("failed to close issue", "err", err) ··· 312 328 // notify about the issue closure 313 329 rp.notifier.NewIssueState(r.Context(), syntax.DID(user.Did), issue) 314 330 315 - rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId)) 331 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 332 + rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId)) 316 333 return 317 334 } else { 318 335 l.Error("user is not permitted to close issue") ··· 337 354 return 338 355 } 339 356 340 - collaborators, err := f.Collaborators(r.Context()) 341 - if err != nil { 342 - l.Error("failed to fetch repo collaborators", "err", err) 343 - } 344 - isCollaborator := slices.ContainsFunc(collaborators, func(collab pages.Collaborator) bool { 345 - return user.Did == collab.Did 346 - }) 357 + roles := repoinfo.RolesInRepo{Roles: rp.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 358 + isRepoOwner := roles.IsOwner() 359 + isCollaborator := roles.IsCollaborator() 347 360 isIssueOwner := user.Did == issue.Did 348 361 349 - if isCollaborator || isIssueOwner { 362 + if isCollaborator || isRepoOwner || isIssueOwner { 350 363 err := db.ReopenIssues( 351 364 rp.db, 352 - db.FilterEq("id", issue.Id), 365 + orm.FilterEq("id", issue.Id), 353 366 ) 354 367 if err != nil { 355 368 l.Error("failed to reopen issue", "err", err) ··· 362 375 // notify about the issue reopen 363 376 rp.notifier.NewIssueState(r.Context(), syntax.DID(user.Did), issue) 364 377 365 - rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId)) 378 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 379 + rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId)) 366 380 return 367 381 } else { 368 382 l.Error("user is not the owner of the repo") ··· 389 403 390 404 body := r.FormValue("body") 391 405 if body == "" { 392 - rp.pages.Notice(w, "issue", "Body is required") 406 + rp.pages.Notice(w, "issue-comment", "Body is required") 393 407 return 394 408 } 395 409 396 - replyToUri := r.FormValue("reply-to") 397 - var replyTo *string 398 - if replyToUri != "" { 399 - replyTo = &replyToUri 410 + var replyTo *syntax.ATURI 411 + replyToRaw := r.FormValue("reply-to") 412 + if replyToRaw != "" { 413 + aturi, err := syntax.ParseATURI(r.FormValue("reply-to")) 414 + if err != nil { 415 + rp.pages.Notice(w, "issue-comment", "reply-to should be valid AT-URI") 416 + return 417 + } 418 + replyTo = &aturi 400 419 } 401 420 402 - comment := models.IssueComment{ 403 - Did: user.Did, 404 - Rkey: tid.TID(), 405 - IssueAt: issue.AtUri().String(), 406 - ReplyTo: replyTo, 407 - Body: body, 408 - Created: time.Now(), 421 + mentions, references := rp.mentionsResolver.Resolve(r.Context(), body) 422 + 423 + comment := models.Comment{ 424 + Did: syntax.DID(user.Did), 425 + Rkey: tid.TID(), 426 + Subject: issue.AtUri(), 427 + ReplyTo: replyTo, 428 + Body: body, 429 + Created: time.Now(), 430 + Mentions: mentions, 431 + References: references, 409 432 } 410 - if err = rp.validator.ValidateIssueComment(&comment); err != nil { 433 + if err = comment.Validate(); err != nil { 411 434 l.Error("failed to validate comment", "err", err) 412 435 rp.pages.Notice(w, "issue-comment", "Failed to create comment.") 413 436 return ··· 423 446 424 447 // create a record first 425 448 resp, err := comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 426 - Collection: tangled.RepoIssueCommentNSID, 427 - Repo: comment.Did, 449 + Collection: tangled.CommentNSID, 450 + Repo: user.Did, 428 451 Rkey: comment.Rkey, 429 452 Record: &lexutil.LexiconTypeDecoder{ 430 453 Val: &record, ··· 442 465 } 443 466 }() 444 467 445 - commentId, err := db.AddIssueComment(rp.db, comment) 468 + tx, err := rp.db.Begin() 469 + if err != nil { 470 + l.Error("failed to start transaction", "err", err) 471 + rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.") 472 + return 473 + } 474 + defer tx.Rollback() 475 + 476 + err = db.PutComment(tx, &comment) 446 477 if err != nil { 447 478 l.Error("failed to create comment", "err", err) 448 479 rp.pages.Notice(w, "issue-comment", "Failed to create comment.") 449 480 return 450 481 } 482 + err = tx.Commit() 483 + if err != nil { 484 + l.Error("failed to commit transaction", "err", err) 485 + rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.") 486 + return 487 + } 451 488 452 489 // reset atUri to make rollback a no-op 453 490 atUri = "" 454 491 455 - // notify about the new comment 456 - comment.Id = commentId 492 + rp.notifier.NewComment(r.Context(), &comment) 457 493 458 - rawMentions := markup.FindUserMentions(comment.Body) 459 - idents := rp.idResolver.ResolveIdents(r.Context(), rawMentions) 460 - l.Debug("parsed mentions", "raw", rawMentions, "idents", idents) 461 - var mentions []syntax.DID 462 - for _, ident := range idents { 463 - if ident != nil && !ident.Handle.IsInvalidHandle() { 464 - mentions = append(mentions, ident.DID) 465 - } 466 - } 467 - rp.notifier.NewIssueComment(r.Context(), &comment, mentions) 468 - 469 - rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d#comment-%d", f.OwnerSlashRepo(), issue.IssueId, commentId)) 494 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 495 + rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d#comment-%d", ownerSlashRepo, issue.IssueId, comment.Id)) 470 496 } 471 497 472 498 func (rp *Issues) IssueComment(w http.ResponseWriter, r *http.Request) { 473 499 l := rp.logger.With("handler", "IssueComment") 474 500 user := rp.oauth.GetUser(r) 475 - f, err := rp.repoResolver.Resolve(r) 476 - if err != nil { 477 - l.Error("failed to get repo and knot", "err", err) 478 - return 479 - } 480 501 481 502 issue, ok := r.Context().Value("issue").(*models.Issue) 482 503 if !ok { ··· 486 507 } 487 508 488 509 commentId := chi.URLParam(r, "commentId") 489 - comments, err := db.GetIssueComments( 510 + comments, err := db.GetComments( 490 511 rp.db, 491 - db.FilterEq("id", commentId), 512 + orm.FilterEq("id", commentId), 492 513 ) 493 514 if err != nil { 494 515 l.Error("failed to fetch comment", "id", commentId) ··· 504 525 505 526 rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{ 506 527 LoggedInUser: user, 507 - RepoInfo: f.RepoInfo(user), 528 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 508 529 Issue: issue, 509 530 Comment: &comment, 510 531 }) ··· 513 534 func (rp *Issues) EditIssueComment(w http.ResponseWriter, r *http.Request) { 514 535 l := rp.logger.With("handler", "EditIssueComment") 515 536 user := rp.oauth.GetUser(r) 516 - f, err := rp.repoResolver.Resolve(r) 517 - if err != nil { 518 - l.Error("failed to get repo and knot", "err", err) 519 - return 520 - } 521 537 522 538 issue, ok := r.Context().Value("issue").(*models.Issue) 523 539 if !ok { ··· 527 543 } 528 544 529 545 commentId := chi.URLParam(r, "commentId") 530 - comments, err := db.GetIssueComments( 546 + comments, err := db.GetComments( 531 547 rp.db, 532 - db.FilterEq("id", commentId), 548 + orm.FilterEq("id", commentId), 533 549 ) 534 550 if err != nil { 535 551 l.Error("failed to fetch comment", "id", commentId) ··· 543 559 } 544 560 comment := comments[0] 545 561 546 - if comment.Did != user.Did { 562 + if comment.Did.String() != user.Did { 547 563 l.Error("unauthorized comment edit", "expectedDid", comment.Did, "gotDid", user.Did) 548 564 http.Error(w, "you are not the author of this comment", http.StatusUnauthorized) 549 565 return ··· 553 569 case http.MethodGet: 554 570 rp.pages.EditIssueCommentFragment(w, pages.EditIssueCommentParams{ 555 571 LoggedInUser: user, 556 - RepoInfo: f.RepoInfo(user), 572 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 557 573 Issue: issue, 558 574 Comment: &comment, 559 575 }) ··· 571 587 newComment := comment 572 588 newComment.Body = newBody 573 589 newComment.Edited = &now 590 + newComment.Mentions, newComment.References = rp.mentionsResolver.Resolve(r.Context(), newBody) 591 + 574 592 record := newComment.AsRecord() 575 593 576 - _, err = db.AddIssueComment(rp.db, newComment) 594 + tx, err := rp.db.Begin() 595 + if err != nil { 596 + l.Error("failed to start transaction", "err", err) 597 + rp.pages.Notice(w, "repo-notice", "Failed to update description, try again later.") 598 + return 599 + } 600 + defer tx.Rollback() 601 + 602 + err = db.PutComment(tx, &newComment) 577 603 if err != nil { 578 604 l.Error("failed to perferom update-description query", "err", err) 579 605 rp.pages.Notice(w, "repo-notice", "Failed to update description, try again later.") 580 606 return 581 607 } 608 + tx.Commit() 582 609 583 610 // rkey is optional, it was introduced later 584 611 if newComment.Rkey != "" { 585 612 // update the record on pds 586 - ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoIssueCommentNSID, user.Did, comment.Rkey) 613 + ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.CommentNSID, user.Did, comment.Rkey) 587 614 if err != nil { 588 615 l.Error("failed to get record", "err", err, "did", newComment.Did, "rkey", newComment.Rkey) 589 616 rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "Failed to update description, no record found on PDS.") ··· 591 618 } 592 619 593 620 _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 594 - Collection: tangled.RepoIssueCommentNSID, 621 + Collection: tangled.CommentNSID, 595 622 Repo: user.Did, 596 623 Rkey: newComment.Rkey, 597 624 SwapRecord: ex.Cid, ··· 607 634 // return new comment body with htmx 608 635 rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{ 609 636 LoggedInUser: user, 610 - RepoInfo: f.RepoInfo(user), 637 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 611 638 Issue: issue, 612 639 Comment: &newComment, 613 640 }) ··· 617 644 func (rp *Issues) ReplyIssueCommentPlaceholder(w http.ResponseWriter, r *http.Request) { 618 645 l := rp.logger.With("handler", "ReplyIssueCommentPlaceholder") 619 646 user := rp.oauth.GetUser(r) 620 - f, err := rp.repoResolver.Resolve(r) 621 - if err != nil { 622 - l.Error("failed to get repo and knot", "err", err) 623 - return 624 - } 625 647 626 648 issue, ok := r.Context().Value("issue").(*models.Issue) 627 649 if !ok { ··· 631 653 } 632 654 633 655 commentId := chi.URLParam(r, "commentId") 634 - comments, err := db.GetIssueComments( 656 + comments, err := db.GetComments( 635 657 rp.db, 636 - db.FilterEq("id", commentId), 658 + orm.FilterEq("id", commentId), 637 659 ) 638 660 if err != nil { 639 661 l.Error("failed to fetch comment", "id", commentId) ··· 649 671 650 672 rp.pages.ReplyIssueCommentPlaceholderFragment(w, pages.ReplyIssueCommentPlaceholderParams{ 651 673 LoggedInUser: user, 652 - RepoInfo: f.RepoInfo(user), 674 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 653 675 Issue: issue, 654 676 Comment: &comment, 655 677 }) ··· 658 680 func (rp *Issues) ReplyIssueComment(w http.ResponseWriter, r *http.Request) { 659 681 l := rp.logger.With("handler", "ReplyIssueComment") 660 682 user := rp.oauth.GetUser(r) 661 - f, err := rp.repoResolver.Resolve(r) 662 - if err != nil { 663 - l.Error("failed to get repo and knot", "err", err) 664 - return 665 - } 666 683 667 684 issue, ok := r.Context().Value("issue").(*models.Issue) 668 685 if !ok { ··· 672 689 } 673 690 674 691 commentId := chi.URLParam(r, "commentId") 675 - comments, err := db.GetIssueComments( 692 + comments, err := db.GetComments( 676 693 rp.db, 677 - db.FilterEq("id", commentId), 694 + orm.FilterEq("id", commentId), 678 695 ) 679 696 if err != nil { 680 697 l.Error("failed to fetch comment", "id", commentId) ··· 690 707 691 708 rp.pages.ReplyIssueCommentFragment(w, pages.ReplyIssueCommentParams{ 692 709 LoggedInUser: user, 693 - RepoInfo: f.RepoInfo(user), 710 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 694 711 Issue: issue, 695 712 Comment: &comment, 696 713 }) ··· 699 716 func (rp *Issues) DeleteIssueComment(w http.ResponseWriter, r *http.Request) { 700 717 l := rp.logger.With("handler", "DeleteIssueComment") 701 718 user := rp.oauth.GetUser(r) 702 - f, err := rp.repoResolver.Resolve(r) 703 - if err != nil { 704 - l.Error("failed to get repo and knot", "err", err) 705 - return 706 - } 707 719 708 720 issue, ok := r.Context().Value("issue").(*models.Issue) 709 721 if !ok { ··· 713 725 } 714 726 715 727 commentId := chi.URLParam(r, "commentId") 716 - comments, err := db.GetIssueComments( 728 + comments, err := db.GetComments( 717 729 rp.db, 718 - db.FilterEq("id", commentId), 730 + orm.FilterEq("id", commentId), 719 731 ) 720 732 if err != nil { 721 733 l.Error("failed to fetch comment", "id", commentId) ··· 729 741 } 730 742 comment := comments[0] 731 743 732 - if comment.Did != user.Did { 744 + if comment.Did.String() != user.Did { 733 745 l.Error("unauthorized action", "expectedDid", comment.Did, "gotDid", user.Did) 734 746 http.Error(w, "you are not the author of this comment", http.StatusUnauthorized) 735 747 return ··· 742 754 743 755 // optimistic deletion 744 756 deleted := time.Now() 745 - err = db.DeleteIssueComments(rp.db, db.FilterEq("id", comment.Id)) 757 + err = db.DeleteComments(rp.db, orm.FilterEq("id", comment.Id)) 746 758 if err != nil { 747 759 l.Error("failed to delete comment", "err", err) 748 760 rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "failed to delete comment") ··· 758 770 return 759 771 } 760 772 _, err = comatproto.RepoDeleteRecord(r.Context(), client, &comatproto.RepoDeleteRecord_Input{ 761 - Collection: tangled.RepoIssueCommentNSID, 773 + Collection: tangled.CommentNSID, 762 774 Repo: user.Did, 763 775 Rkey: comment.Rkey, 764 776 }) ··· 774 786 // htmx fragment of comment after deletion 775 787 rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{ 776 788 LoggedInUser: user, 777 - RepoInfo: f.RepoInfo(user), 789 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 778 790 Issue: issue, 779 791 Comment: &comment, 780 792 }) ··· 831 843 832 844 issues, err = db.GetIssues( 833 845 rp.db, 834 - db.FilterIn("id", res.Hits), 846 + orm.FilterIn("id", res.Hits), 835 847 ) 836 848 if err != nil { 837 849 l.Error("failed to get issues", "err", err) ··· 847 859 issues, err = db.GetIssuesPaginated( 848 860 rp.db, 849 861 page, 850 - db.FilterEq("repo_at", f.RepoAt()), 851 - db.FilterEq("open", openInt), 862 + orm.FilterEq("repo_at", f.RepoAt()), 863 + orm.FilterEq("open", openInt), 852 864 ) 853 865 if err != nil { 854 866 l.Error("failed to get issues", "err", err) ··· 859 871 860 872 labelDefs, err := db.GetLabelDefinitions( 861 873 rp.db, 862 - db.FilterIn("at_uri", f.Repo.Labels), 863 - db.FilterContains("scope", tangled.RepoIssueNSID), 874 + orm.FilterIn("at_uri", f.Labels), 875 + orm.FilterContains("scope", tangled.RepoIssueNSID), 864 876 ) 865 877 if err != nil { 866 878 l.Error("failed to fetch labels", "err", err) ··· 875 887 876 888 rp.pages.RepoIssues(w, pages.RepoIssuesParams{ 877 889 LoggedInUser: rp.oauth.GetUser(r), 878 - RepoInfo: f.RepoInfo(user), 890 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 879 891 Issues: issues, 880 892 IssueCount: totalIssues, 881 893 LabelDefs: defs, ··· 899 911 case http.MethodGet: 900 912 rp.pages.RepoNewIssue(w, pages.RepoNewIssueParams{ 901 913 LoggedInUser: user, 902 - RepoInfo: f.RepoInfo(user), 914 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 903 915 }) 904 916 case http.MethodPost: 917 + body := r.FormValue("body") 918 + mentions, references := rp.mentionsResolver.Resolve(r.Context(), body) 919 + 905 920 issue := &models.Issue{ 906 - RepoAt: f.RepoAt(), 907 - Rkey: tid.TID(), 908 - Title: r.FormValue("title"), 909 - Body: r.FormValue("body"), 910 - Open: true, 911 - Did: user.Did, 912 - Created: time.Now(), 913 - Repo: &f.Repo, 921 + RepoAt: f.RepoAt(), 922 + Rkey: tid.TID(), 923 + Title: r.FormValue("title"), 924 + Body: body, 925 + Open: true, 926 + Did: user.Did, 927 + Created: time.Now(), 928 + Mentions: mentions, 929 + References: references, 930 + Repo: f, 914 931 } 915 932 916 933 if err := rp.validator.ValidateIssue(issue); err != nil { ··· 978 995 // everything is successful, do not rollback the atproto record 979 996 atUri = "" 980 997 981 - rawMentions := markup.FindUserMentions(issue.Body) 982 - idents := rp.idResolver.ResolveIdents(r.Context(), rawMentions) 983 - l.Debug("parsed mentions", "raw", rawMentions, "idents", idents) 984 - var mentions []syntax.DID 985 - for _, ident := range idents { 986 - if ident != nil && !ident.Handle.IsInvalidHandle() { 987 - mentions = append(mentions, ident.DID) 988 - } 989 - } 990 998 rp.notifier.NewIssue(r.Context(), issue, mentions) 991 - rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId)) 999 + 1000 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 1001 + rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId)) 992 1002 return 993 1003 } 994 1004 }
+3 -3
appview/issues/opengraph.go
··· 232 232 233 233 // Get owner handle for avatar 234 234 var ownerHandle string 235 - owner, err := rp.idResolver.ResolveIdent(r.Context(), f.Repo.Did) 235 + owner, err := rp.idResolver.ResolveIdent(r.Context(), f.Did) 236 236 if err != nil { 237 - ownerHandle = f.Repo.Did 237 + ownerHandle = f.Did 238 238 } else { 239 239 ownerHandle = "@" + owner.Handle.String() 240 240 } 241 241 242 - card, err := rp.drawIssueSummaryCard(issue, &f.Repo, commentCount, ownerHandle) 242 + card, err := rp.drawIssueSummaryCard(issue, f, commentCount, ownerHandle) 243 243 if err != nil { 244 244 log.Println("failed to draw issue summary card", err) 245 245 http.Error(w, "failed to draw issue summary card", http.StatusInternalServerError)
+19 -23
appview/knots/knots.go
··· 21 21 "tangled.org/core/appview/xrpcclient" 22 22 "tangled.org/core/eventconsumer" 23 23 "tangled.org/core/idresolver" 24 + "tangled.org/core/orm" 24 25 "tangled.org/core/rbac" 25 26 "tangled.org/core/tid" 26 27 ··· 72 73 user := k.OAuth.GetUser(r) 73 74 registrations, err := db.GetRegistrations( 74 75 k.Db, 75 - db.FilterEq("did", user.Did), 76 + orm.FilterEq("did", user.Did), 76 77 ) 77 78 if err != nil { 78 79 k.Logger.Error("failed to fetch knot registrations", "err", err) ··· 102 103 103 104 registrations, err := db.GetRegistrations( 104 105 k.Db, 105 - db.FilterEq("did", user.Did), 106 - db.FilterEq("domain", domain), 106 + orm.FilterEq("did", user.Did), 107 + orm.FilterEq("domain", domain), 107 108 ) 108 109 if err != nil { 109 110 l.Error("failed to get registrations", "err", err) ··· 127 128 repos, err := db.GetRepos( 128 129 k.Db, 129 130 0, 130 - db.FilterEq("knot", domain), 131 + orm.FilterEq("knot", domain), 131 132 ) 132 133 if err != nil { 133 134 l.Error("failed to get knot repos", "err", err) ··· 293 294 // get record from db first 294 295 registrations, err := db.GetRegistrations( 295 296 k.Db, 296 - db.FilterEq("did", user.Did), 297 - db.FilterEq("domain", domain), 297 + orm.FilterEq("did", user.Did), 298 + orm.FilterEq("domain", domain), 298 299 ) 299 300 if err != nil { 300 301 l.Error("failed to get registration", "err", err) ··· 321 322 322 323 err = db.DeleteKnot( 323 324 tx, 324 - db.FilterEq("did", user.Did), 325 - db.FilterEq("domain", domain), 325 + orm.FilterEq("did", user.Did), 326 + orm.FilterEq("domain", domain), 326 327 ) 327 328 if err != nil { 328 329 l.Error("failed to delete registration", "err", err) ··· 402 403 // get record from db first 403 404 registrations, err := db.GetRegistrations( 404 405 k.Db, 405 - db.FilterEq("did", user.Did), 406 - db.FilterEq("domain", domain), 406 + orm.FilterEq("did", user.Did), 407 + orm.FilterEq("domain", domain), 407 408 ) 408 409 if err != nil { 409 410 l.Error("failed to get registration", "err", err) ··· 493 494 // Get updated registration to show 494 495 registrations, err = db.GetRegistrations( 495 496 k.Db, 496 - db.FilterEq("did", user.Did), 497 - db.FilterEq("domain", domain), 497 + orm.FilterEq("did", user.Did), 498 + orm.FilterEq("domain", domain), 498 499 ) 499 500 if err != nil { 500 501 l.Error("failed to get registration", "err", err) ··· 529 530 530 531 registrations, err := db.GetRegistrations( 531 532 k.Db, 532 - db.FilterEq("did", user.Did), 533 - db.FilterEq("domain", domain), 534 - db.FilterIsNot("registered", "null"), 533 + orm.FilterEq("did", user.Did), 534 + orm.FilterEq("domain", domain), 535 + orm.FilterIsNot("registered", "null"), 535 536 ) 536 537 if err != nil { 537 538 l.Error("failed to get registration", "err", err) ··· 637 638 638 639 registrations, err := db.GetRegistrations( 639 640 k.Db, 640 - db.FilterEq("did", user.Did), 641 - db.FilterEq("domain", domain), 642 - db.FilterIsNot("registered", "null"), 641 + orm.FilterEq("did", user.Did), 642 + orm.FilterEq("domain", domain), 643 + orm.FilterIsNot("registered", "null"), 643 644 ) 644 645 if err != nil { 645 646 l.Error("failed to get registration", "err", err) ··· 662 663 memberId, err := k.IdResolver.ResolveIdent(r.Context(), member) 663 664 if err != nil { 664 665 l.Error("failed to resolve member identity to handle", "err", err) 665 - k.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.") 666 - return 667 - } 668 - if memberId.Handle.IsInvalidHandle() { 669 - l.Error("failed to resolve member identity to handle") 670 666 k.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.") 671 667 return 672 668 }
+5 -4
appview/labels/labels.go
··· 16 16 "tangled.org/core/appview/oauth" 17 17 "tangled.org/core/appview/pages" 18 18 "tangled.org/core/appview/validator" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/rbac" 20 21 "tangled.org/core/tid" 21 22 ··· 88 89 repoAt := r.Form.Get("repo") 89 90 subjectUri := r.Form.Get("subject") 90 91 91 - repo, err := db.GetRepo(l.db, db.FilterEq("at_uri", repoAt)) 92 + repo, err := db.GetRepo(l.db, orm.FilterEq("at_uri", repoAt)) 92 93 if err != nil { 93 94 fail("Failed to get repository.", err) 94 95 return 95 96 } 96 97 97 98 // find all the labels that this repo subscribes to 98 - repoLabels, err := db.GetRepoLabels(l.db, db.FilterEq("repo_at", repoAt)) 99 + repoLabels, err := db.GetRepoLabels(l.db, orm.FilterEq("repo_at", repoAt)) 99 100 if err != nil { 100 101 fail("Failed to get labels for this repository.", err) 101 102 return ··· 106 107 labelAts = append(labelAts, rl.LabelAt.String()) 107 108 } 108 109 109 - actx, err := db.NewLabelApplicationCtx(l.db, db.FilterIn("at_uri", labelAts)) 110 + actx, err := db.NewLabelApplicationCtx(l.db, orm.FilterIn("at_uri", labelAts)) 110 111 if err != nil { 111 112 fail("Invalid form data.", err) 112 113 return 113 114 } 114 115 115 116 // calculate the start state by applying already known labels 116 - existingOps, err := db.GetLabelOps(l.db, db.FilterEq("subject", subjectUri)) 117 + existingOps, err := db.GetLabelOps(l.db, orm.FilterEq("subject", subjectUri)) 117 118 if err != nil { 118 119 fail("Invalid form data.", err) 119 120 return
+67
appview/mentions/resolver.go
··· 1 + package mentions 2 + 3 + import ( 4 + "context" 5 + "log/slog" 6 + 7 + "github.com/bluesky-social/indigo/atproto/syntax" 8 + "tangled.org/core/appview/config" 9 + "tangled.org/core/appview/db" 10 + "tangled.org/core/appview/models" 11 + "tangled.org/core/appview/pages/markup" 12 + "tangled.org/core/idresolver" 13 + ) 14 + 15 + type Resolver struct { 16 + config *config.Config 17 + idResolver *idresolver.Resolver 18 + execer db.Execer 19 + logger *slog.Logger 20 + } 21 + 22 + func New( 23 + config *config.Config, 24 + idResolver *idresolver.Resolver, 25 + execer db.Execer, 26 + logger *slog.Logger, 27 + ) *Resolver { 28 + return &Resolver{ 29 + config, 30 + idResolver, 31 + execer, 32 + logger, 33 + } 34 + } 35 + 36 + func (r *Resolver) Resolve(ctx context.Context, source string) ([]syntax.DID, []syntax.ATURI) { 37 + l := r.logger.With("method", "Resolve") 38 + 39 + rawMentions, rawRefs := markup.FindReferences(r.config.Core.AppviewHost, source) 40 + l.Debug("found possible references", "mentions", rawMentions, "refs", rawRefs) 41 + 42 + idents := r.idResolver.ResolveIdents(ctx, rawMentions) 43 + var mentions []syntax.DID 44 + for _, ident := range idents { 45 + if ident != nil && !ident.Handle.IsInvalidHandle() { 46 + mentions = append(mentions, ident.DID) 47 + } 48 + } 49 + l.Debug("found mentions", "mentions", mentions) 50 + 51 + var resolvedRefs []models.ReferenceLink 52 + for _, rawRef := range rawRefs { 53 + ident, err := r.idResolver.ResolveIdent(ctx, rawRef.Handle) 54 + if err != nil || ident == nil || ident.Handle.IsInvalidHandle() { 55 + continue 56 + } 57 + rawRef.Handle = string(ident.DID) 58 + resolvedRefs = append(resolvedRefs, rawRef) 59 + } 60 + aturiRefs, err := db.ValidateReferenceLinks(r.execer, resolvedRefs) 61 + if err != nil { 62 + l.Error("failed running query", "err", err) 63 + } 64 + l.Debug("found references", "refs", aturiRefs) 65 + 66 + return mentions, aturiRefs 67 + }
+9 -4
appview/middleware/middleware.go
··· 18 18 "tangled.org/core/appview/pagination" 19 19 "tangled.org/core/appview/reporesolver" 20 20 "tangled.org/core/idresolver" 21 + "tangled.org/core/orm" 21 22 "tangled.org/core/rbac" 22 23 ) 23 24 ··· 164 165 ok, err := mw.enforcer.E.Enforce(actor.Did, f.Knot, f.DidSlashRepo(), requiredPerm) 165 166 if err != nil || !ok { 166 167 // we need a logged in user 167 - log.Printf("%s does not have perms of a %s in repo %s", actor.Did, requiredPerm, f.OwnerSlashRepo()) 168 + log.Printf("%s does not have perms of a %s in repo %s", actor.Did, requiredPerm, f.DidSlashRepo()) 168 169 http.Error(w, "Forbiden", http.StatusUnauthorized) 169 170 return 170 171 } ··· 217 218 218 219 repo, err := db.GetRepo( 219 220 mw.db, 220 - db.FilterEq("did", id.DID.String()), 221 - db.FilterEq("name", repoName), 221 + orm.FilterEq("did", id.DID.String()), 222 + orm.FilterEq("name", repoName), 222 223 ) 223 224 if err != nil { 224 225 log.Println("failed to resolve repo", "err", err) 226 + w.WriteHeader(http.StatusNotFound) 225 227 mw.pages.ErrorKnot404(w) 226 228 return 227 229 } ··· 239 241 f, err := mw.repoResolver.Resolve(r) 240 242 if err != nil { 241 243 log.Println("failed to fully resolve repo", err) 244 + w.WriteHeader(http.StatusNotFound) 242 245 mw.pages.ErrorKnot404(w) 243 246 return 244 247 } ··· 287 290 f, err := mw.repoResolver.Resolve(r) 288 291 if err != nil { 289 292 log.Println("failed to fully resolve repo", err) 293 + w.WriteHeader(http.StatusNotFound) 290 294 mw.pages.ErrorKnot404(w) 291 295 return 292 296 } ··· 323 327 f, err := mw.repoResolver.Resolve(r) 324 328 if err != nil { 325 329 log.Println("failed to fully resolve repo", err) 330 + w.WriteHeader(http.StatusNotFound) 326 331 mw.pages.ErrorKnot404(w) 327 332 return 328 333 } 329 334 330 - fullName := f.OwnerHandle() + "/" + f.Name 335 + fullName := reporesolver.GetBaseRepoPath(r, f) 331 336 332 337 if r.Header.Get("User-Agent") == "Go-http-client/1.1" { 333 338 if r.URL.Query().Get("go-get") == "1" {
+117
appview/models/comment.go
··· 1 + package models 2 + 3 + import ( 4 + "fmt" 5 + "strings" 6 + "time" 7 + 8 + "github.com/bluesky-social/indigo/atproto/syntax" 9 + "tangled.org/core/api/tangled" 10 + ) 11 + 12 + type Comment struct { 13 + Id int64 14 + Did syntax.DID 15 + Rkey string 16 + Subject syntax.ATURI 17 + ReplyTo *syntax.ATURI 18 + Body string 19 + Created time.Time 20 + Edited *time.Time 21 + Deleted *time.Time 22 + Mentions []syntax.DID 23 + References []syntax.ATURI 24 + PullSubmissionId *int 25 + } 26 + 27 + func (c *Comment) AtUri() syntax.ATURI { 28 + return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", c.Did, tangled.CommentNSID, c.Rkey)) 29 + } 30 + 31 + func (c *Comment) AsRecord() tangled.Comment { 32 + mentions := make([]string, len(c.Mentions)) 33 + for i, did := range c.Mentions { 34 + mentions[i] = string(did) 35 + } 36 + references := make([]string, len(c.References)) 37 + for i, uri := range c.References { 38 + references[i] = string(uri) 39 + } 40 + var replyTo *string 41 + if c.ReplyTo != nil { 42 + replyToStr := c.ReplyTo.String() 43 + replyTo = &replyToStr 44 + } 45 + return tangled.Comment{ 46 + Subject: c.Subject.String(), 47 + Body: c.Body, 48 + CreatedAt: c.Created.Format(time.RFC3339), 49 + ReplyTo: replyTo, 50 + Mentions: mentions, 51 + References: references, 52 + } 53 + } 54 + 55 + func (c *Comment) IsTopLevel() bool { 56 + return c.ReplyTo == nil 57 + } 58 + 59 + func (c *Comment) IsReply() bool { 60 + return c.ReplyTo != nil 61 + } 62 + 63 + func (c *Comment) Validate() error { 64 + // TODO: sanitize the body and then trim space 65 + if sb := strings.TrimSpace(c.Body); sb == "" { 66 + return fmt.Errorf("body is empty after HTML sanitization") 67 + } 68 + 69 + // if it's for PR, PullSubmissionId should not be nil 70 + if c.Subject.Collection().String() == tangled.RepoPullNSID { 71 + if c.PullSubmissionId == nil { 72 + return fmt.Errorf("PullSubmissionId should not be nil") 73 + } 74 + } 75 + return nil 76 + } 77 + 78 + func CommentFromRecord(did, rkey string, record tangled.Comment) (*Comment, error) { 79 + created, err := time.Parse(time.RFC3339, record.CreatedAt) 80 + if err != nil { 81 + created = time.Now() 82 + } 83 + 84 + ownerDid := did 85 + 86 + if _, err = syntax.ParseATURI(record.Subject); err != nil { 87 + return nil, err 88 + } 89 + 90 + i := record 91 + mentions := make([]syntax.DID, len(record.Mentions)) 92 + for i, did := range record.Mentions { 93 + mentions[i] = syntax.DID(did) 94 + } 95 + references := make([]syntax.ATURI, len(record.References)) 96 + for i, uri := range i.References { 97 + references[i] = syntax.ATURI(uri) 98 + } 99 + var replyTo *syntax.ATURI 100 + if record.ReplyTo != nil { 101 + replyToAtUri := syntax.ATURI(*record.ReplyTo) 102 + replyTo = &replyToAtUri 103 + } 104 + 105 + comment := Comment{ 106 + Did: syntax.DID(ownerDid), 107 + Rkey: rkey, 108 + Body: record.Body, 109 + Subject: syntax.ATURI(record.Subject), 110 + ReplyTo: replyTo, 111 + Created: created, 112 + Mentions: mentions, 113 + References: references, 114 + } 115 + 116 + return &comment, nil 117 + }
+35 -80
appview/models/issue.go
··· 10 10 ) 11 11 12 12 type Issue struct { 13 - Id int64 14 - Did string 15 - Rkey string 16 - RepoAt syntax.ATURI 17 - IssueId int 18 - Created time.Time 19 - Edited *time.Time 20 - Deleted *time.Time 21 - Title string 22 - Body string 23 - Open bool 13 + Id int64 14 + Did string 15 + Rkey string 16 + RepoAt syntax.ATURI 17 + IssueId int 18 + Created time.Time 19 + Edited *time.Time 20 + Deleted *time.Time 21 + Title string 22 + Body string 23 + Open bool 24 + Mentions []syntax.DID 25 + References []syntax.ATURI 24 26 25 27 // optionally, populate this when querying for reverse mappings 26 28 // like comment counts, parent repo etc. 27 - Comments []IssueComment 29 + Comments []Comment 28 30 Labels LabelState 29 31 Repo *Repo 30 32 } ··· 34 36 } 35 37 36 38 func (i *Issue) AsRecord() tangled.RepoIssue { 39 + mentions := make([]string, len(i.Mentions)) 40 + for i, did := range i.Mentions { 41 + mentions[i] = string(did) 42 + } 43 + references := make([]string, len(i.References)) 44 + for i, uri := range i.References { 45 + references[i] = string(uri) 46 + } 37 47 return tangled.RepoIssue{ 38 - Repo: i.RepoAt.String(), 39 - Title: i.Title, 40 - Body: &i.Body, 41 - CreatedAt: i.Created.Format(time.RFC3339), 48 + Repo: i.RepoAt.String(), 49 + Title: i.Title, 50 + Body: &i.Body, 51 + Mentions: mentions, 52 + References: references, 53 + CreatedAt: i.Created.Format(time.RFC3339), 42 54 } 43 55 } 44 56 ··· 50 62 } 51 63 52 64 type CommentListItem struct { 53 - Self *IssueComment 54 - Replies []*IssueComment 65 + Self *Comment 66 + Replies []*Comment 55 67 } 56 68 57 69 func (it *CommentListItem) Participants() []syntax.DID { ··· 76 88 77 89 func (i *Issue) CommentList() []CommentListItem { 78 90 // Create a map to quickly find comments by their aturi 79 - toplevel := make(map[string]*CommentListItem) 80 - var replies []*IssueComment 91 + toplevel := make(map[syntax.ATURI]*CommentListItem) 92 + var replies []*Comment 81 93 82 94 // collect top level comments into the map 83 95 for _, comment := range i.Comments { 84 96 if comment.IsTopLevel() { 85 - toplevel[comment.AtUri().String()] = &CommentListItem{ 97 + toplevel[comment.AtUri()] = &CommentListItem{ 86 98 Self: &comment, 87 99 } 88 100 } else { ··· 103 115 } 104 116 105 117 // sort everything 106 - sortFunc := func(a, b *IssueComment) bool { 118 + sortFunc := func(a, b *Comment) bool { 107 119 return a.Created.Before(b.Created) 108 120 } 109 121 sort.Slice(listing, func(i, j int) bool { ··· 132 144 addParticipant(i.Did) 133 145 134 146 for _, c := range i.Comments { 135 - addParticipant(c.Did) 147 + addParticipant(c.Did.String()) 136 148 } 137 149 138 150 return participants ··· 159 171 Open: true, // new issues are open by default 160 172 } 161 173 } 162 - 163 - type IssueComment struct { 164 - Id int64 165 - Did string 166 - Rkey string 167 - IssueAt string 168 - ReplyTo *string 169 - Body string 170 - Created time.Time 171 - Edited *time.Time 172 - Deleted *time.Time 173 - } 174 - 175 - func (i *IssueComment) AtUri() syntax.ATURI { 176 - return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.Did, tangled.RepoIssueCommentNSID, i.Rkey)) 177 - } 178 - 179 - func (i *IssueComment) AsRecord() tangled.RepoIssueComment { 180 - return tangled.RepoIssueComment{ 181 - Body: i.Body, 182 - Issue: i.IssueAt, 183 - CreatedAt: i.Created.Format(time.RFC3339), 184 - ReplyTo: i.ReplyTo, 185 - } 186 - } 187 - 188 - func (i *IssueComment) IsTopLevel() bool { 189 - return i.ReplyTo == nil 190 - } 191 - 192 - func (i *IssueComment) IsReply() bool { 193 - return i.ReplyTo != nil 194 - } 195 - 196 - func IssueCommentFromRecord(did, rkey string, record tangled.RepoIssueComment) (*IssueComment, error) { 197 - created, err := time.Parse(time.RFC3339, record.CreatedAt) 198 - if err != nil { 199 - created = time.Now() 200 - } 201 - 202 - ownerDid := did 203 - 204 - if _, err = syntax.ParseATURI(record.Issue); err != nil { 205 - return nil, err 206 - } 207 - 208 - comment := IssueComment{ 209 - Did: ownerDid, 210 - Rkey: rkey, 211 - Body: record.Body, 212 - IssueAt: record.Issue, 213 - ReplyTo: record.ReplyTo, 214 - Created: created, 215 - } 216 - 217 - return &comment, nil 218 - }
+3 -1
appview/models/profile.go
··· 111 111 } 112 112 113 113 type ByMonth struct { 114 + Commits int 114 115 RepoEvents []RepoEvent 115 116 IssueEvents IssueEvents 116 117 PullEvents PullEvents ··· 119 120 func (b ByMonth) IsEmpty() bool { 120 121 return len(b.RepoEvents) == 0 && 121 122 len(b.IssueEvents.Items) == 0 && 122 - len(b.PullEvents.Items) == 0 123 + len(b.PullEvents.Items) == 0 && 124 + b.Commits == 0 123 125 } 124 126 125 127 type IssueEvents struct {
+18 -24
appview/models/pull.go
··· 66 66 TargetBranch string 67 67 State PullState 68 68 Submissions []*PullSubmission 69 + Mentions []syntax.DID 70 + References []syntax.ATURI 69 71 70 72 // stacking 71 73 StackId string // nullable string ··· 81 83 Repo *Repo 82 84 } 83 85 86 + // NOTE: This method does not include patch blob in returned atproto record 84 87 func (p Pull) AsRecord() tangled.RepoPull { 85 88 var source *tangled.RepoPull_Source 86 89 if p.PullSource != nil { ··· 92 95 source.Repo = &s 93 96 } 94 97 } 98 + mentions := make([]string, len(p.Mentions)) 99 + for i, did := range p.Mentions { 100 + mentions[i] = string(did) 101 + } 102 + references := make([]string, len(p.References)) 103 + for i, uri := range p.References { 104 + references[i] = string(uri) 105 + } 95 106 96 107 record := tangled.RepoPull{ 97 - Title: p.Title, 98 - Body: &p.Body, 99 - CreatedAt: p.Created.Format(time.RFC3339), 108 + Title: p.Title, 109 + Body: &p.Body, 110 + Mentions: mentions, 111 + References: references, 112 + CreatedAt: p.Created.Format(time.RFC3339), 100 113 Target: &tangled.RepoPull_Target{ 101 114 Repo: p.RepoAt.String(), 102 115 Branch: p.TargetBranch, 103 116 }, 104 - Patch: p.LatestPatch(), 105 117 Source: source, 106 118 } 107 119 return record ··· 126 138 RoundNumber int 127 139 Patch string 128 140 Combined string 129 - Comments []PullComment 141 + Comments []Comment 130 142 SourceRev string // include the rev that was used to create this submission: only for branch/fork PRs 131 - 132 - // meta 133 - Created time.Time 134 - } 135 - 136 - type PullComment struct { 137 - // ids 138 - ID int 139 - PullId int 140 - SubmissionId int 141 - 142 - // at ids 143 - RepoAt string 144 - OwnerDid string 145 - CommentAt string 146 - 147 - // content 148 - Body string 149 143 150 144 // meta 151 145 Created time.Time ··· 251 245 addParticipant(s.PullAt.Authority().String()) 252 246 253 247 for _, c := range s.Comments { 254 - addParticipant(c.OwnerDid) 248 + addParticipant(c.Did.String()) 255 249 } 256 250 257 251 return participants
+49
appview/models/reference.go
··· 1 + package models 2 + 3 + import "fmt" 4 + 5 + type RefKind int 6 + 7 + const ( 8 + RefKindIssue RefKind = iota 9 + RefKindPull 10 + ) 11 + 12 + func (k RefKind) String() string { 13 + if k == RefKindIssue { 14 + return "issues" 15 + } else { 16 + return "pulls" 17 + } 18 + } 19 + 20 + // /@alice.com/cool-proj/issues/123 21 + // /@alice.com/cool-proj/issues/123#comment-321 22 + type ReferenceLink struct { 23 + Handle string 24 + Repo string 25 + Kind RefKind 26 + SubjectId int 27 + CommentId *int 28 + } 29 + 30 + func (l ReferenceLink) String() string { 31 + comment := "" 32 + if l.CommentId != nil { 33 + comment = fmt.Sprintf("#comment-%d", *l.CommentId) 34 + } 35 + return fmt.Sprintf("/%s/%s/%s/%d%s", 36 + l.Handle, 37 + l.Repo, 38 + l.Kind.String(), 39 + l.SubjectId, 40 + comment, 41 + ) 42 + } 43 + 44 + type RichReferenceLink struct { 45 + ReferenceLink 46 + Title string 47 + // reusing PullState for both issue & PR 48 + State PullState 49 + }
+5 -4
appview/notifications/notifications.go
··· 11 11 "tangled.org/core/appview/oauth" 12 12 "tangled.org/core/appview/pages" 13 13 "tangled.org/core/appview/pagination" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 type Notifications struct { ··· 53 54 54 55 total, err := db.CountNotifications( 55 56 n.db, 56 - db.FilterEq("recipient_did", user.Did), 57 + orm.FilterEq("recipient_did", user.Did), 57 58 ) 58 59 if err != nil { 59 60 l.Error("failed to get total notifications", "err", err) ··· 64 65 notifications, err := db.GetNotificationsWithEntities( 65 66 n.db, 66 67 page, 67 - db.FilterEq("recipient_did", user.Did), 68 + orm.FilterEq("recipient_did", user.Did), 68 69 ) 69 70 if err != nil { 70 71 l.Error("failed to get notifications", "err", err) ··· 96 97 97 98 count, err := db.CountNotifications( 98 99 n.db, 99 - db.FilterEq("recipient_did", user.Did), 100 - db.FilterEq("read", 0), 100 + orm.FilterEq("recipient_did", user.Did), 101 + orm.FilterEq("read", 0), 101 102 ) 102 103 if err != nil { 103 104 http.Error(w, "Failed to get unread count", http.StatusInternalServerError)
+157 -148
appview/notify/db/db.go
··· 3 3 import ( 4 4 "context" 5 5 "log" 6 - "maps" 7 6 "slices" 8 7 9 8 "github.com/bluesky-social/indigo/atproto/syntax" ··· 12 11 "tangled.org/core/appview/models" 13 12 "tangled.org/core/appview/notify" 14 13 "tangled.org/core/idresolver" 14 + "tangled.org/core/orm" 15 + "tangled.org/core/sets" 15 16 ) 16 17 17 18 const ( 18 - maxMentions = 5 19 + maxMentions = 8 19 20 ) 20 21 21 22 type databaseNotifier struct { ··· 42 43 return 43 44 } 44 45 var err error 45 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(star.RepoAt))) 46 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(star.RepoAt))) 46 47 if err != nil { 47 48 log.Printf("NewStar: failed to get repos: %v", err) 48 49 return 49 50 } 50 51 51 52 actorDid := syntax.DID(star.Did) 52 - recipients := []syntax.DID{syntax.DID(repo.Did)} 53 + recipients := sets.Singleton(syntax.DID(repo.Did)) 53 54 eventType := models.NotificationTypeRepoStarred 54 55 entityType := "repo" 55 56 entityId := star.RepoAt.String() ··· 73 74 // no-op 74 75 } 75 76 76 - func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 77 + func (n *databaseNotifier) NewComment(ctx context.Context, comment *models.Comment) { 78 + var ( 79 + // built the recipients list: 80 + // - the owner of the repo 81 + // - | if the comment is a reply -> everybody on that thread 82 + // | if the comment is a top level -> just the issue owner 83 + // - remove mentioned users from the recipients list 84 + recipients = sets.New[syntax.DID]() 85 + entityType string 86 + entityId string 87 + repoId *int64 88 + issueId *int64 89 + pullId *int64 90 + ) 77 91 78 - // build the recipients list 79 - // - owner of the repo 80 - // - collaborators in the repo 81 - var recipients []syntax.DID 82 - recipients = append(recipients, syntax.DID(issue.Repo.Did)) 83 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt())) 92 + subjectDid, err := comment.Subject.Authority().AsDID() 84 93 if err != nil { 85 - log.Printf("failed to fetch collaborators: %v", err) 94 + log.Printf("NewComment: expected did based at-uri for comment.subject") 86 95 return 87 96 } 88 - for _, c := range collaborators { 89 - recipients = append(recipients, c.SubjectDid) 97 + switch comment.Subject.Collection() { 98 + case tangled.RepoIssueNSID: 99 + issues, err := db.GetIssues( 100 + n.db, 101 + orm.FilterEq("did", subjectDid), 102 + orm.FilterEq("rkey", comment.Subject.RecordKey()), 103 + ) 104 + if err != nil { 105 + log.Printf("NewComment: failed to get issues: %v", err) 106 + return 107 + } 108 + if len(issues) == 0 { 109 + log.Printf("NewComment: no issue found for %s", comment.Subject) 110 + return 111 + } 112 + issue := issues[0] 113 + 114 + recipients.Insert(syntax.DID(issue.Repo.Did)) 115 + if comment.IsReply() { 116 + // if this comment is a reply, then notify everybody in that thread 117 + parentAtUri := *comment.ReplyTo 118 + 119 + // find the parent thread, and add all DIDs from here to the recipient list 120 + for _, t := range issue.CommentList() { 121 + if t.Self.AtUri() == parentAtUri { 122 + for _, p := range t.Participants() { 123 + recipients.Insert(p) 124 + } 125 + } 126 + } 127 + } else { 128 + // not a reply, notify just the issue author 129 + recipients.Insert(syntax.DID(issue.Did)) 130 + } 131 + 132 + entityType = "issue" 133 + entityId = issue.AtUri().String() 134 + repoId = &issue.Repo.Id 135 + issueId = &issue.Id 136 + case tangled.RepoPullNSID: 137 + pulls, err := db.GetPullsWithLimit( 138 + n.db, 139 + 1, 140 + orm.FilterEq("owner_did", subjectDid), 141 + orm.FilterEq("rkey", comment.Subject.RecordKey()), 142 + ) 143 + if err != nil { 144 + log.Printf("NewComment: failed to get pulls: %v", err) 145 + return 146 + } 147 + if len(pulls) == 0 { 148 + log.Printf("NewComment: no pull found for %s", comment.Subject) 149 + return 150 + } 151 + pull := pulls[0] 152 + 153 + pull.Repo, err = db.GetRepo(n.db, orm.FilterEq("at_uri", pull.RepoAt)) 154 + if err != nil { 155 + log.Printf("NewComment: failed to get repos: %v", err) 156 + return 157 + } 158 + 159 + recipients.Insert(syntax.DID(pull.Repo.Did)) 160 + for _, p := range pull.Participants() { 161 + recipients.Insert(syntax.DID(p)) 162 + } 163 + 164 + entityType = "pull" 165 + entityId = pull.AtUri().String() 166 + repoId = &pull.Repo.Id 167 + p := int64(pull.ID) 168 + pullId = &p 169 + default: 170 + return // no-op 90 171 } 91 172 92 - actorDid := syntax.DID(issue.Did) 93 - entityType := "issue" 94 - entityId := issue.AtUri().String() 95 - repoId := &issue.Repo.Id 96 - issueId := &issue.Id 97 - var pullId *int64 173 + for _, m := range comment.Mentions { 174 + recipients.Remove(m) 175 + } 98 176 99 177 n.notifyEvent( 100 - actorDid, 178 + comment.Did, 101 179 recipients, 102 - models.NotificationTypeIssueCreated, 180 + models.NotificationTypeIssueCommented, 103 181 entityType, 104 182 entityId, 105 183 repoId, ··· 107 185 pullId, 108 186 ) 109 187 n.notifyEvent( 110 - actorDid, 111 - mentions, 188 + comment.Did, 189 + sets.Collect(slices.Values(comment.Mentions)), 112 190 models.NotificationTypeUserMentioned, 113 191 entityType, 114 192 entityId, ··· 118 196 ) 119 197 } 120 198 121 - func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 122 - issues, err := db.GetIssues(n.db, db.FilterEq("at_uri", comment.IssueAt)) 199 + func (n *databaseNotifier) DeleteComment(ctx context.Context, comment *models.Comment) { 200 + // no-op 201 + } 202 + 203 + func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 204 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt())) 123 205 if err != nil { 124 - log.Printf("NewIssueComment: failed to get issues: %v", err) 206 + log.Printf("failed to fetch collaborators: %v", err) 125 207 return 126 208 } 127 - if len(issues) == 0 { 128 - log.Printf("NewIssueComment: no issue found for %s", comment.IssueAt) 129 - return 209 + 210 + // build the recipients list 211 + // - owner of the repo 212 + // - collaborators in the repo 213 + // - remove users already mentioned 214 + recipients := sets.Singleton(syntax.DID(issue.Repo.Did)) 215 + for _, c := range collaborators { 216 + recipients.Insert(c.SubjectDid) 130 217 } 131 - issue := issues[0] 132 - 133 - var recipients []syntax.DID 134 - recipients = append(recipients, syntax.DID(issue.Repo.Did)) 135 - 136 - if comment.IsReply() { 137 - // if this comment is a reply, then notify everybody in that thread 138 - parentAtUri := *comment.ReplyTo 139 - allThreads := issue.CommentList() 140 - 141 - // find the parent thread, and add all DIDs from here to the recipient list 142 - for _, t := range allThreads { 143 - if t.Self.AtUri().String() == parentAtUri { 144 - recipients = append(recipients, t.Participants()...) 145 - } 146 - } 147 - } else { 148 - // not a reply, notify just the issue author 149 - recipients = append(recipients, syntax.DID(issue.Did)) 218 + for _, m := range mentions { 219 + recipients.Remove(m) 150 220 } 151 221 152 - actorDid := syntax.DID(comment.Did) 222 + actorDid := syntax.DID(issue.Did) 153 223 entityType := "issue" 154 224 entityId := issue.AtUri().String() 155 225 repoId := &issue.Repo.Id ··· 159 229 n.notifyEvent( 160 230 actorDid, 161 231 recipients, 162 - models.NotificationTypeIssueCommented, 232 + models.NotificationTypeIssueCreated, 163 233 entityType, 164 234 entityId, 165 235 repoId, ··· 168 238 ) 169 239 n.notifyEvent( 170 240 actorDid, 171 - mentions, 241 + sets.Collect(slices.Values(mentions)), 172 242 models.NotificationTypeUserMentioned, 173 243 entityType, 174 244 entityId, ··· 184 254 185 255 func (n *databaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) { 186 256 actorDid := syntax.DID(follow.UserDid) 187 - recipients := []syntax.DID{syntax.DID(follow.SubjectDid)} 257 + recipients := sets.Singleton(syntax.DID(follow.SubjectDid)) 188 258 eventType := models.NotificationTypeFollowed 189 259 entityType := "follow" 190 260 entityId := follow.UserDid ··· 207 277 } 208 278 209 279 func (n *databaseNotifier) NewPull(ctx context.Context, pull *models.Pull) { 210 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt))) 280 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt))) 211 281 if err != nil { 212 282 log.Printf("NewPull: failed to get repos: %v", err) 213 283 return 214 284 } 285 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt())) 286 + if err != nil { 287 + log.Printf("failed to fetch collaborators: %v", err) 288 + return 289 + } 215 290 216 291 // build the recipients list 217 292 // - owner of the repo 218 293 // - collaborators in the repo 219 - var recipients []syntax.DID 220 - recipients = append(recipients, syntax.DID(repo.Did)) 221 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt())) 222 - if err != nil { 223 - log.Printf("failed to fetch collaborators: %v", err) 224 - return 225 - } 294 + recipients := sets.Singleton(syntax.DID(repo.Did)) 226 295 for _, c := range collaborators { 227 - recipients = append(recipients, c.SubjectDid) 296 + recipients.Insert(c.SubjectDid) 228 297 } 229 298 230 299 actorDid := syntax.DID(pull.OwnerDid) ··· 248 317 ) 249 318 } 250 319 251 - func (n *databaseNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 252 - pull, err := db.GetPull(n.db, 253 - syntax.ATURI(comment.RepoAt), 254 - comment.PullId, 255 - ) 256 - if err != nil { 257 - log.Printf("NewPullComment: failed to get pulls: %v", err) 258 - return 259 - } 260 - 261 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", comment.RepoAt)) 262 - if err != nil { 263 - log.Printf("NewPullComment: failed to get repos: %v", err) 264 - return 265 - } 266 - 267 - // build up the recipients list: 268 - // - repo owner 269 - // - all pull participants 270 - var recipients []syntax.DID 271 - recipients = append(recipients, syntax.DID(repo.Did)) 272 - for _, p := range pull.Participants() { 273 - recipients = append(recipients, syntax.DID(p)) 274 - } 275 - 276 - actorDid := syntax.DID(comment.OwnerDid) 277 - eventType := models.NotificationTypePullCommented 278 - entityType := "pull" 279 - entityId := pull.AtUri().String() 280 - repoId := &repo.Id 281 - var issueId *int64 282 - p := int64(pull.ID) 283 - pullId := &p 284 - 285 - n.notifyEvent( 286 - actorDid, 287 - recipients, 288 - eventType, 289 - entityType, 290 - entityId, 291 - repoId, 292 - issueId, 293 - pullId, 294 - ) 295 - n.notifyEvent( 296 - actorDid, 297 - mentions, 298 - models.NotificationTypeUserMentioned, 299 - entityType, 300 - entityId, 301 - repoId, 302 - issueId, 303 - pullId, 304 - ) 305 - } 306 - 307 320 func (n *databaseNotifier) UpdateProfile(ctx context.Context, profile *models.Profile) { 308 321 // no-op 309 322 } ··· 321 334 } 322 335 323 336 func (n *databaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) { 324 - // build up the recipients list: 325 - // - repo owner 326 - // - repo collaborators 327 - // - all issue participants 328 - var recipients []syntax.DID 329 - recipients = append(recipients, syntax.DID(issue.Repo.Did)) 330 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt())) 337 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt())) 331 338 if err != nil { 332 339 log.Printf("failed to fetch collaborators: %v", err) 333 340 return 334 341 } 342 + 343 + // build up the recipients list: 344 + // - repo owner 345 + // - repo collaborators 346 + // - all issue participants 347 + recipients := sets.Singleton(syntax.DID(issue.Repo.Did)) 335 348 for _, c := range collaborators { 336 - recipients = append(recipients, c.SubjectDid) 349 + recipients.Insert(c.SubjectDid) 337 350 } 338 351 for _, p := range issue.Participants() { 339 - recipients = append(recipients, syntax.DID(p)) 352 + recipients.Insert(syntax.DID(p)) 340 353 } 341 354 342 355 entityType := "pull" ··· 366 379 367 380 func (n *databaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) { 368 381 // Get repo details 369 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt))) 382 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt))) 370 383 if err != nil { 371 384 log.Printf("NewPullState: failed to get repos: %v", err) 372 385 return 373 386 } 374 387 375 - // build up the recipients list: 376 - // - repo owner 377 - // - all pull participants 378 - var recipients []syntax.DID 379 - recipients = append(recipients, syntax.DID(repo.Did)) 380 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt())) 388 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt())) 381 389 if err != nil { 382 390 log.Printf("failed to fetch collaborators: %v", err) 383 391 return 384 392 } 393 + 394 + // build up the recipients list: 395 + // - repo owner 396 + // - all pull participants 397 + recipients := sets.Singleton(syntax.DID(repo.Did)) 385 398 for _, c := range collaborators { 386 - recipients = append(recipients, c.SubjectDid) 399 + recipients.Insert(c.SubjectDid) 387 400 } 388 401 for _, p := range pull.Participants() { 389 - recipients = append(recipients, syntax.DID(p)) 402 + recipients.Insert(syntax.DID(p)) 390 403 } 391 404 392 405 entityType := "pull" ··· 422 435 423 436 func (n *databaseNotifier) notifyEvent( 424 437 actorDid syntax.DID, 425 - recipients []syntax.DID, 438 + recipients sets.Set[syntax.DID], 426 439 eventType models.NotificationType, 427 440 entityType string, 428 441 entityId string, ··· 430 443 issueId *int64, 431 444 pullId *int64, 432 445 ) { 433 - if eventType == models.NotificationTypeUserMentioned && len(recipients) > maxMentions { 434 - recipients = recipients[:maxMentions] 446 + // if the user is attempting to mention >maxMentions users, this is probably spam, do not mention anybody 447 + if eventType == models.NotificationTypeUserMentioned && recipients.Len() > maxMentions { 448 + return 435 449 } 436 - recipientSet := make(map[syntax.DID]struct{}) 437 - for _, did := range recipients { 438 - // everybody except actor themselves 439 - if did != actorDid { 440 - recipientSet[did] = struct{}{} 441 - } 442 - } 450 + 451 + recipients.Remove(actorDid) 443 452 444 453 prefMap, err := db.GetNotificationPreferences( 445 454 n.db, 446 - db.FilterIn("user_did", slices.Collect(maps.Keys(recipientSet))), 455 + orm.FilterIn("user_did", slices.Collect(recipients.All())), 447 456 ) 448 457 if err != nil { 449 458 // failed to get prefs for users ··· 459 468 defer tx.Rollback() 460 469 461 470 // filter based on preferences 462 - for recipientDid := range recipientSet { 471 + for recipientDid := range recipients.All() { 463 472 prefs, ok := prefMap[recipientDid] 464 473 if !ok { 465 474 prefs = models.DefaultNotificationPreferences(recipientDid)
+8 -9
appview/notify/merged_notifier.go
··· 39 39 v.Call(in) 40 40 }(n) 41 41 } 42 - wg.Wait() 43 42 } 44 43 45 44 func (m *mergedNotifier) NewRepo(ctx context.Context, repo *models.Repo) { ··· 52 51 53 52 func (m *mergedNotifier) DeleteStar(ctx context.Context, star *models.Star) { 54 53 m.fanout("DeleteStar", ctx, star) 54 + } 55 + 56 + func (m *mergedNotifier) NewComment(ctx context.Context, comment *models.Comment) { 57 + m.fanout("NewComment", ctx, comment) 58 + } 59 + 60 + func (m *mergedNotifier) DeleteComment(ctx context.Context, comment *models.Comment) { 61 + m.fanout("DeleteComment", ctx, comment) 55 62 } 56 63 57 64 func (m *mergedNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 58 65 m.fanout("NewIssue", ctx, issue, mentions) 59 66 } 60 67 61 - func (m *mergedNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 62 - m.fanout("NewIssueComment", ctx, comment, mentions) 63 - } 64 - 65 68 func (m *mergedNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) { 66 69 m.fanout("NewIssueState", ctx, actor, issue) 67 70 } ··· 80 83 81 84 func (m *mergedNotifier) NewPull(ctx context.Context, pull *models.Pull) { 82 85 m.fanout("NewPull", ctx, pull) 83 - } 84 - 85 - func (m *mergedNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 86 - m.fanout("NewPullComment", ctx, comment, mentions) 87 86 } 88 87 89 88 func (m *mergedNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {
+7 -7
appview/notify/notifier.go
··· 13 13 NewStar(ctx context.Context, star *models.Star) 14 14 DeleteStar(ctx context.Context, star *models.Star) 15 15 16 + NewComment(ctx context.Context, comment *models.Comment) 17 + DeleteComment(ctx context.Context, comment *models.Comment) 18 + 16 19 NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) 17 - NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) 18 20 NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) 19 21 DeleteIssue(ctx context.Context, issue *models.Issue) 20 22 ··· 22 24 DeleteFollow(ctx context.Context, follow *models.Follow) 23 25 24 26 NewPull(ctx context.Context, pull *models.Pull) 25 - NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) 26 27 NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) 27 28 28 29 UpdateProfile(ctx context.Context, profile *models.Profile) ··· 42 43 func (m *BaseNotifier) NewStar(ctx context.Context, star *models.Star) {} 43 44 func (m *BaseNotifier) DeleteStar(ctx context.Context, star *models.Star) {} 44 45 46 + func (m *BaseNotifier) NewComment(ctx context.Context, comment *models.Comment) {} 47 + func (m *BaseNotifier) DeleteComment(ctx context.Context, comment *models.Comment) {} 48 + 45 49 func (m *BaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {} 46 - func (m *BaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 47 - } 48 50 func (m *BaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) {} 49 51 func (m *BaseNotifier) DeleteIssue(ctx context.Context, issue *models.Issue) {} 50 52 51 53 func (m *BaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) {} 52 54 func (m *BaseNotifier) DeleteFollow(ctx context.Context, follow *models.Follow) {} 53 55 54 - func (m *BaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {} 55 - func (m *BaseNotifier) NewPullComment(ctx context.Context, models *models.PullComment, mentions []syntax.DID) { 56 - } 56 + func (m *BaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {} 57 57 func (m *BaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {} 58 58 59 59 func (m *BaseNotifier) UpdateProfile(ctx context.Context, profile *models.Profile) {}
+5 -20
appview/notify/posthog/notifier.go
··· 86 86 } 87 87 } 88 88 89 - func (n *posthogNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 90 - err := n.client.Enqueue(posthog.Capture{ 91 - DistinctId: comment.OwnerDid, 92 - Event: "new_pull_comment", 93 - Properties: posthog.Properties{ 94 - "repo_at": comment.RepoAt, 95 - "pull_id": comment.PullId, 96 - "mentions": mentions, 97 - }, 98 - }) 99 - if err != nil { 100 - log.Println("failed to enqueue posthog event:", err) 101 - } 102 - } 103 - 104 89 func (n *posthogNotifier) NewPullClosed(ctx context.Context, pull *models.Pull) { 105 90 err := n.client.Enqueue(posthog.Capture{ 106 91 DistinctId: pull.OwnerDid, ··· 180 165 } 181 166 } 182 167 183 - func (n *posthogNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 168 + func (n *posthogNotifier) NewComment(ctx context.Context, comment *models.Comment) { 184 169 err := n.client.Enqueue(posthog.Capture{ 185 - DistinctId: comment.Did, 186 - Event: "new_issue_comment", 170 + DistinctId: comment.Did.String(), 171 + Event: "new_comment", 187 172 Properties: posthog.Properties{ 188 - "issue_at": comment.IssueAt, 189 - "mentions": mentions, 173 + "subject_at": comment.Subject, 174 + "mentions": comment.Mentions, 190 175 }, 191 176 }) 192 177 if err != nil {
+3 -2
appview/oauth/handler.go
··· 16 16 "tangled.org/core/api/tangled" 17 17 "tangled.org/core/appview/db" 18 18 "tangled.org/core/consts" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/tid" 20 21 ) 21 22 ··· 97 98 // and create an sh.tangled.spindle.member record with that 98 99 spindleMembers, err := db.GetSpindleMembers( 99 100 o.Db, 100 - db.FilterEq("instance", "spindle.tangled.sh"), 101 - db.FilterEq("subject", did), 101 + orm.FilterEq("instance", "spindle.tangled.sh"), 102 + orm.FilterEq("subject", did), 102 103 ) 103 104 if err != nil { 104 105 l.Error("failed to get spindle members", "err", err)
+35 -10
appview/pages/funcmap.go
··· 22 22 chromahtml "github.com/alecthomas/chroma/v2/formatters/html" 23 23 "github.com/alecthomas/chroma/v2/lexers" 24 24 "github.com/alecthomas/chroma/v2/styles" 25 - "github.com/bluesky-social/indigo/atproto/syntax" 26 25 "github.com/dustin/go-humanize" 27 26 "github.com/go-enry/go-enry/v2" 28 27 "github.com/yuin/goldmark" 28 + emoji "github.com/yuin/goldmark-emoji" 29 29 "tangled.org/core/appview/filetree" 30 + "tangled.org/core/appview/models" 30 31 "tangled.org/core/appview/pages/markup" 31 32 "tangled.org/core/crypto" 32 33 ) ··· 72 73 73 74 return identity.Handle.String() 74 75 }, 76 + "ownerSlashRepo": func(repo *models.Repo) string { 77 + ownerId, err := p.resolver.ResolveIdent(context.Background(), repo.Did) 78 + if err != nil { 79 + return repo.DidSlashRepo() 80 + } 81 + handle := ownerId.Handle 82 + if handle != "" && !handle.IsInvalidHandle() { 83 + return string(handle) + "/" + repo.Name 84 + } 85 + return repo.DidSlashRepo() 86 + }, 75 87 "truncateAt30": func(s string) string { 76 88 if len(s) <= 30 { 77 89 return s ··· 141 153 142 154 return b 143 155 }, 144 - "didOrHandle": func(did, handle string) string { 145 - if handle != "" && handle != syntax.HandleInvalid.String() { 146 - return handle 147 - } else { 148 - return did 149 - } 150 - }, 151 156 "assoc": func(values ...string) ([][]string, error) { 152 157 if len(values)%2 != 0 { 153 158 return nil, fmt.Errorf("invalid assoc call, must have an even number of arguments") ··· 158 163 } 159 164 return pairs, nil 160 165 }, 161 - "append": func(s []string, values ...string) []string { 166 + "append": func(s []any, values ...any) []any { 162 167 s = append(s, values...) 163 168 return s 164 169 }, ··· 257 262 }, 258 263 "description": func(text string) template.HTML { 259 264 p.rctx.RendererType = markup.RendererTypeDefault 260 - htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New()) 265 + htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New( 266 + goldmark.WithExtensions( 267 + emoji.Emoji, 268 + ), 269 + )) 261 270 sanitized := p.rctx.SanitizeDescription(htmlString) 262 271 return template.HTML(sanitized) 263 272 }, ··· 379 388 } 380 389 } 381 390 391 + func (p *Pages) resolveDid(did string) string { 392 + identity, err := p.resolver.ResolveIdent(context.Background(), did) 393 + 394 + if err != nil { 395 + return did 396 + } 397 + 398 + if identity.Handle.IsInvalidHandle() { 399 + return "handle.invalid" 400 + } 401 + 402 + return identity.Handle.String() 403 + } 404 + 382 405 func (p *Pages) AvatarUrl(handle, size string) string { 383 406 handle = strings.TrimPrefix(handle, "@") 407 + 408 + handle = p.resolveDid(handle) 384 409 385 410 secret := p.avatar.SharedSecret 386 411 h := hmac.New(sha256.New, []byte(secret))
+13 -3
appview/pages/markup/extension/atlink.go
··· 35 35 return KindAt 36 36 } 37 37 38 - var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`) 38 + var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`) 39 + var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`) 39 40 40 41 type atParser struct{} 41 42 ··· 55 56 if m == nil { 56 57 return nil 57 58 } 59 + 60 + // Check for all links in the markdown to see if the handle found is inside one 61 + linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1) 62 + for _, linkMatch := range linksIndexes { 63 + if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] { 64 + return nil 65 + } 66 + } 67 + 58 68 atSegment := text.NewSegment(segment.Start, segment.Start+m[1]) 59 69 block.Advance(m[1]) 60 70 node := &AtNode{} ··· 87 97 88 98 func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) { 89 99 if entering { 90 - w.WriteString(`<a href="/@`) 100 + w.WriteString(`<a href="/`) 91 101 w.WriteString(n.(*AtNode).Handle) 92 - w.WriteString(`" class="mention font-bold">`) 102 + w.WriteString(`" class="mention">`) 93 103 } else { 94 104 w.WriteString("</a>") 95 105 }
+2 -26
appview/pages/markup/markdown.go
··· 12 12 13 13 chromahtml "github.com/alecthomas/chroma/v2/formatters/html" 14 14 "github.com/alecthomas/chroma/v2/styles" 15 - treeblood "github.com/wyatt915/goldmark-treeblood" 16 15 "github.com/yuin/goldmark" 16 + "github.com/yuin/goldmark-emoji" 17 17 highlighting "github.com/yuin/goldmark-highlighting/v2" 18 18 "github.com/yuin/goldmark/ast" 19 19 "github.com/yuin/goldmark/extension" ··· 65 65 extension.NewFootnote( 66 66 extension.WithFootnoteIDPrefix([]byte("footnote")), 67 67 ), 68 - treeblood.MathML(), 69 68 callout.CalloutExtention, 70 69 textension.AtExt, 70 + emoji.Emoji, 71 71 ), 72 72 goldmark.WithParserOptions( 73 73 parser.WithAutoHeadingID(), ··· 302 302 } 303 303 304 304 return path.Join(rctx.CurrentDir, dst) 305 - } 306 - 307 - // FindUserMentions returns Set of user handles from given markup soruce. 308 - // It doesn't guarntee unique DIDs 309 - func FindUserMentions(source string) []string { 310 - var ( 311 - mentions []string 312 - mentionsSet = make(map[string]struct{}) 313 - md = NewMarkdown() 314 - sourceBytes = []byte(source) 315 - root = md.Parser().Parse(text.NewReader(sourceBytes)) 316 - ) 317 - ast.Walk(root, func(n ast.Node, entering bool) (ast.WalkStatus, error) { 318 - if entering && n.Kind() == textension.KindAt { 319 - handle := n.(*textension.AtNode).Handle 320 - mentionsSet[handle] = struct{}{} 321 - return ast.WalkSkipChildren, nil 322 - } 323 - return ast.WalkContinue, nil 324 - }) 325 - for handle := range mentionsSet { 326 - mentions = append(mentions, handle) 327 - } 328 - return mentions 329 305 } 330 306 331 307 func isAbsoluteUrl(link string) bool {
+121
appview/pages/markup/markdown_test.go
··· 1 + package markup 2 + 3 + import ( 4 + "bytes" 5 + "testing" 6 + ) 7 + 8 + func TestAtExtension_Rendering(t *testing.T) { 9 + tests := []struct { 10 + name string 11 + markdown string 12 + expected string 13 + }{ 14 + { 15 + name: "renders simple at mention", 16 + markdown: "Hello @user.tngl.sh!", 17 + expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`, 18 + }, 19 + { 20 + name: "renders multiple at mentions", 21 + markdown: "Hi @alice.tngl.sh and @bob.example.com", 22 + expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`, 23 + }, 24 + { 25 + name: "renders at mention in parentheses", 26 + markdown: "Check this out (@user.tngl.sh)", 27 + expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`, 28 + }, 29 + { 30 + name: "does not render email", 31 + markdown: "Contact me at test@example.com", 32 + expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`, 33 + }, 34 + { 35 + name: "renders at mention with hyphen", 36 + markdown: "Follow @user-name.tngl.sh", 37 + expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`, 38 + }, 39 + { 40 + name: "renders at mention with numbers", 41 + markdown: "@user123.test456.social", 42 + expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`, 43 + }, 44 + { 45 + name: "at mention at start of line", 46 + markdown: "@user.tngl.sh is cool", 47 + expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`, 48 + }, 49 + } 50 + 51 + for _, tt := range tests { 52 + t.Run(tt.name, func(t *testing.T) { 53 + md := NewMarkdown() 54 + 55 + var buf bytes.Buffer 56 + if err := md.Convert([]byte(tt.markdown), &buf); err != nil { 57 + t.Fatalf("failed to convert markdown: %v", err) 58 + } 59 + 60 + result := buf.String() 61 + if result != tt.expected+"\n" { 62 + t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result) 63 + } 64 + }) 65 + } 66 + } 67 + 68 + func TestAtExtension_WithOtherMarkdown(t *testing.T) { 69 + tests := []struct { 70 + name string 71 + markdown string 72 + contains string 73 + }{ 74 + { 75 + name: "at mention with bold", 76 + markdown: "**Hello @user.tngl.sh**", 77 + contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`, 78 + }, 79 + { 80 + name: "at mention with italic", 81 + markdown: "*Check @user.tngl.sh*", 82 + contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`, 83 + }, 84 + { 85 + name: "at mention in list", 86 + markdown: "- Item 1\n- @user.tngl.sh\n- Item 3", 87 + contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`, 88 + }, 89 + { 90 + name: "at mention in link", 91 + markdown: "[@regnault.dev](https://regnault.dev)", 92 + contains: `<a href="https://regnault.dev">@regnault.dev</a>`, 93 + }, 94 + { 95 + name: "at mention in link again", 96 + markdown: "[check out @regnault.dev](https://regnault.dev)", 97 + contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`, 98 + }, 99 + { 100 + name: "at mention in link again, multiline", 101 + markdown: "[\ncheck out @regnault.dev](https://regnault.dev)", 102 + contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>", 103 + }, 104 + } 105 + 106 + for _, tt := range tests { 107 + t.Run(tt.name, func(t *testing.T) { 108 + md := NewMarkdown() 109 + 110 + var buf bytes.Buffer 111 + if err := md.Convert([]byte(tt.markdown), &buf); err != nil { 112 + t.Fatalf("failed to convert markdown: %v", err) 113 + } 114 + 115 + result := buf.String() 116 + if !bytes.Contains([]byte(result), []byte(tt.contains)) { 117 + t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result) 118 + } 119 + }) 120 + } 121 + }
+124
appview/pages/markup/reference_link.go
··· 1 + package markup 2 + 3 + import ( 4 + "maps" 5 + "net/url" 6 + "path" 7 + "slices" 8 + "strconv" 9 + "strings" 10 + 11 + "github.com/yuin/goldmark/ast" 12 + "github.com/yuin/goldmark/text" 13 + "tangled.org/core/appview/models" 14 + textension "tangled.org/core/appview/pages/markup/extension" 15 + ) 16 + 17 + // FindReferences collects all links referencing tangled-related objects 18 + // like issues, PRs, comments or even @-mentions 19 + // This funciton doesn't actually check for the existence of records in the DB 20 + // or the PDS; it merely returns a list of what are presumed to be references. 21 + func FindReferences(baseUrl string, source string) ([]string, []models.ReferenceLink) { 22 + var ( 23 + refLinkSet = make(map[models.ReferenceLink]struct{}) 24 + mentionsSet = make(map[string]struct{}) 25 + md = NewMarkdown() 26 + sourceBytes = []byte(source) 27 + root = md.Parser().Parse(text.NewReader(sourceBytes)) 28 + ) 29 + // trim url scheme. the SSL shouldn't matter 30 + baseUrl = strings.TrimPrefix(baseUrl, "https://") 31 + baseUrl = strings.TrimPrefix(baseUrl, "http://") 32 + 33 + ast.Walk(root, func(n ast.Node, entering bool) (ast.WalkStatus, error) { 34 + if !entering { 35 + return ast.WalkContinue, nil 36 + } 37 + switch n.Kind() { 38 + case textension.KindAt: 39 + handle := n.(*textension.AtNode).Handle 40 + mentionsSet[handle] = struct{}{} 41 + return ast.WalkSkipChildren, nil 42 + case ast.KindLink: 43 + dest := string(n.(*ast.Link).Destination) 44 + ref := parseTangledLink(baseUrl, dest) 45 + if ref != nil { 46 + refLinkSet[*ref] = struct{}{} 47 + } 48 + return ast.WalkSkipChildren, nil 49 + case ast.KindAutoLink: 50 + an := n.(*ast.AutoLink) 51 + if an.AutoLinkType == ast.AutoLinkURL { 52 + dest := string(an.URL(sourceBytes)) 53 + ref := parseTangledLink(baseUrl, dest) 54 + if ref != nil { 55 + refLinkSet[*ref] = struct{}{} 56 + } 57 + } 58 + return ast.WalkSkipChildren, nil 59 + } 60 + return ast.WalkContinue, nil 61 + }) 62 + mentions := slices.Collect(maps.Keys(mentionsSet)) 63 + references := slices.Collect(maps.Keys(refLinkSet)) 64 + return mentions, references 65 + } 66 + 67 + func parseTangledLink(baseHost string, urlStr string) *models.ReferenceLink { 68 + u, err := url.Parse(urlStr) 69 + if err != nil { 70 + return nil 71 + } 72 + 73 + if u.Host != "" && !strings.EqualFold(u.Host, baseHost) { 74 + return nil 75 + } 76 + 77 + p := path.Clean(u.Path) 78 + parts := strings.FieldsFunc(p, func(r rune) bool { return r == '/' }) 79 + if len(parts) < 4 { 80 + // need at least: handle / repo / kind / id 81 + return nil 82 + } 83 + 84 + var ( 85 + handle = parts[0] 86 + repo = parts[1] 87 + kindSeg = parts[2] 88 + subjectSeg = parts[3] 89 + ) 90 + 91 + handle = strings.TrimPrefix(handle, "@") 92 + 93 + var kind models.RefKind 94 + switch kindSeg { 95 + case "issues": 96 + kind = models.RefKindIssue 97 + case "pulls": 98 + kind = models.RefKindPull 99 + default: 100 + return nil 101 + } 102 + 103 + subjectId, err := strconv.Atoi(subjectSeg) 104 + if err != nil { 105 + return nil 106 + } 107 + var commentId *int 108 + if u.Fragment != "" { 109 + if strings.HasPrefix(u.Fragment, "comment-") { 110 + commentIdStr := u.Fragment[len("comment-"):] 111 + if id, err := strconv.Atoi(commentIdStr); err == nil { 112 + commentId = &id 113 + } 114 + } 115 + } 116 + 117 + return &models.ReferenceLink{ 118 + Handle: handle, 119 + Repo: repo, 120 + Kind: kind, 121 + SubjectId: subjectId, 122 + CommentId: commentId, 123 + } 124 + }
+15 -17
appview/pages/pages.go
··· 31 31 "github.com/bluesky-social/indigo/atproto/identity" 32 32 "github.com/bluesky-social/indigo/atproto/syntax" 33 33 "github.com/go-git/go-git/v5/plumbing" 34 - "github.com/go-git/go-git/v5/plumbing/object" 35 34 ) 36 35 37 36 //go:embed templates/* static legal ··· 492 491 493 492 type ProfileCard struct { 494 493 UserDid string 495 - UserHandle string 496 494 FollowStatus models.FollowStatus 497 495 Punchcard *models.Punchcard 498 496 Profile *models.Profile ··· 642 640 } 643 641 644 642 func (p *Pages) StarBtnFragment(w io.Writer, params StarBtnFragmentParams) error { 645 - return p.executePlain("fragments/starBtn", w, params) 643 + return p.executePlain("fragments/starBtn-oob", w, params) 646 644 } 647 645 648 646 type RepoIndexParams struct { ··· 650 648 RepoInfo repoinfo.RepoInfo 651 649 Active string 652 650 TagMap map[string][]string 653 - CommitsTrunc []*object.Commit 651 + CommitsTrunc []types.Commit 654 652 TagsTrunc []*types.TagReference 655 653 BranchesTrunc []types.Branch 656 654 // ForkInfo *types.ForkInfo ··· 841 839 } 842 840 843 841 type Collaborator struct { 844 - Did string 845 - Handle string 846 - Role string 842 + Did string 843 + Role string 847 844 } 848 845 849 846 type RepoSettingsParams struct { ··· 936 933 Active string 937 934 Issue *models.Issue 938 935 CommentList []models.CommentListItem 936 + Backlinks []models.RichReferenceLink 939 937 LabelDefs map[string]*models.LabelDefinition 940 938 941 939 OrderedReactionKinds []models.ReactionKind ··· 990 988 LoggedInUser *oauth.User 991 989 RepoInfo repoinfo.RepoInfo 992 990 Issue *models.Issue 993 - Comment *models.IssueComment 991 + Comment *models.Comment 994 992 } 995 993 996 994 func (p *Pages) EditIssueCommentFragment(w io.Writer, params EditIssueCommentParams) error { ··· 1001 999 LoggedInUser *oauth.User 1002 1000 RepoInfo repoinfo.RepoInfo 1003 1001 Issue *models.Issue 1004 - Comment *models.IssueComment 1002 + Comment *models.Comment 1005 1003 } 1006 1004 1007 1005 func (p *Pages) ReplyIssueCommentPlaceholderFragment(w io.Writer, params ReplyIssueCommentPlaceholderParams) error { ··· 1012 1010 LoggedInUser *oauth.User 1013 1011 RepoInfo repoinfo.RepoInfo 1014 1012 Issue *models.Issue 1015 - Comment *models.IssueComment 1013 + Comment *models.Comment 1016 1014 } 1017 1015 1018 1016 func (p *Pages) ReplyIssueCommentFragment(w io.Writer, params ReplyIssueCommentParams) error { ··· 1023 1021 LoggedInUser *oauth.User 1024 1022 RepoInfo repoinfo.RepoInfo 1025 1023 Issue *models.Issue 1026 - Comment *models.IssueComment 1024 + Comment *models.Comment 1027 1025 } 1028 1026 1029 1027 func (p *Pages) IssueCommentBodyFragment(w io.Writer, params IssueCommentBodyParams) error { ··· 1089 1087 Pull *models.Pull 1090 1088 Stack models.Stack 1091 1089 AbandonedPulls []*models.Pull 1090 + Backlinks []models.RichReferenceLink 1092 1091 BranchDeleteStatus *models.BranchDeleteStatus 1093 1092 MergeCheck types.MergeCheckResponse 1094 1093 ResubmitCheck ResubmitResult ··· 1260 1259 return p.executePlain("repo/fragments/compareAllowPull", w, params) 1261 1260 } 1262 1261 1263 - type RepoCompareDiffParams struct { 1264 - LoggedInUser *oauth.User 1265 - RepoInfo repoinfo.RepoInfo 1266 - Diff types.NiceDiff 1262 + type RepoCompareDiffFragmentParams struct { 1263 + Diff types.NiceDiff 1264 + DiffOpts types.DiffOpts 1267 1265 } 1268 1266 1269 - func (p *Pages) RepoCompareDiff(w io.Writer, params RepoCompareDiffParams) error { 1270 - return p.executePlain("repo/fragments/diff", w, []any{params.RepoInfo.FullName, &params.Diff}) 1267 + func (p *Pages) RepoCompareDiffFragment(w io.Writer, params RepoCompareDiffFragmentParams) error { 1268 + return p.executePlain("repo/fragments/diff", w, []any{&params.Diff, &params.DiffOpts}) 1271 1269 } 1272 1270 1273 1271 type LabelPanelParams struct {
+25 -22
appview/pages/repoinfo/repoinfo.go
··· 1 1 package repoinfo 2 2 3 3 import ( 4 + "fmt" 4 5 "path" 5 6 "slices" 6 7 7 8 "github.com/bluesky-social/indigo/atproto/syntax" 9 + "tangled.org/core/api/tangled" 8 10 "tangled.org/core/appview/models" 9 11 "tangled.org/core/appview/state/userutil" 10 12 ) 11 13 12 - func (r RepoInfo) Owner() string { 14 + func (r RepoInfo) owner() string { 13 15 if r.OwnerHandle != "" { 14 16 return r.OwnerHandle 15 17 } else { ··· 18 20 } 19 21 20 22 func (r RepoInfo) FullName() string { 21 - return path.Join(r.Owner(), r.Name) 23 + return path.Join(r.owner(), r.Name) 22 24 } 23 25 24 - func (r RepoInfo) OwnerWithoutAt() string { 26 + func (r RepoInfo) ownerWithoutAt() string { 25 27 if r.OwnerHandle != "" { 26 28 return r.OwnerHandle 27 29 } else { ··· 30 32 } 31 33 32 34 func (r RepoInfo) FullNameWithoutAt() string { 33 - return path.Join(r.OwnerWithoutAt(), r.Name) 35 + return path.Join(r.ownerWithoutAt(), r.Name) 34 36 } 35 37 36 38 func (r RepoInfo) GetTabs() [][]string { ··· 48 50 return tabs 49 51 } 50 52 53 + func (r RepoInfo) RepoAt() syntax.ATURI { 54 + return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.OwnerDid, tangled.RepoNSID, r.Rkey)) 55 + } 56 + 51 57 type RepoInfo struct { 52 - Name string 53 - Rkey string 54 - OwnerDid string 55 - OwnerHandle string 56 - Description string 57 - Website string 58 - Topics []string 59 - Knot string 60 - Spindle string 61 - RepoAt syntax.ATURI 62 - IsStarred bool 63 - Stats models.RepoStats 64 - Roles RolesInRepo 65 - Source *models.Repo 66 - SourceHandle string 67 - Ref string 68 - DisableFork bool 69 - CurrentDir string 58 + Name string 59 + Rkey string 60 + OwnerDid string 61 + OwnerHandle string 62 + Description string 63 + Website string 64 + Topics []string 65 + Knot string 66 + Spindle string 67 + IsStarred bool 68 + Stats models.RepoStats 69 + Roles RolesInRepo 70 + Source *models.Repo 71 + Ref string 72 + CurrentDir string 70 73 } 71 74 72 75 // each tab on a repo could have some metadata:
+1 -1
appview/pages/templates/banner.html
··· 30 30 <div class="mx-6"> 31 31 These services may not be fully accessible until upgraded. 32 32 <a class="underline text-red-800 dark:text-red-200" 33 - href="https://tangled.org/@tangled.org/core/tree/master/docs/migrations.md"> 33 + href="https://docs.tangled.org/migrating-knots-spindles.html#migrating-knots-spindles"> 34 34 Click to read the upgrade guide</a>. 35 35 </div> 36 36 </details>
+5
appview/pages/templates/fragments/starBtn-oob.html
··· 1 + {{ define "fragments/starBtn-oob" }} 2 + <div hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]'> 3 + {{ template "fragments/starBtn" . }} 4 + </div> 5 + {{ end }}
+1 -3
appview/pages/templates/fragments/starBtn.html
··· 1 1 {{ define "fragments/starBtn" }} 2 + {{/* NOTE: this fragment is always replaced with hx-swap-oob */}} 2 3 <button 3 4 id="starBtn" 4 5 class="btn disabled:opacity-50 disabled:cursor-not-allowed flex gap-2 items-center group" ··· 10 11 {{ end }} 11 12 12 13 hx-trigger="click" 13 - hx-target="this" 14 - hx-swap="outerHTML" 15 - hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]' 16 14 hx-disabled-elt="#starBtn" 17 15 > 18 16 {{ if .IsStarred }}
+8
appview/pages/templates/fragments/tabSelector.html
··· 2 2 {{ $name := .Name }} 3 3 {{ $all := .Values }} 4 4 {{ $active := .Active }} 5 + {{ $include := .Include }} 5 6 <div class="flex justify-between divide-x divide-gray-200 dark:divide-gray-700 rounded border border-gray-200 dark:border-gray-700 overflow-hidden"> 6 7 {{ $activeTab := "bg-white dark:bg-gray-700 shadow-sm" }} 7 8 {{ $inactiveTab := "bg-gray-100 dark:bg-gray-800 shadow-inner" }} 8 9 {{ range $index, $value := $all }} 9 10 {{ $isActive := eq $value.Key $active }} 10 11 <a href="?{{ $name }}={{ $value.Key }}" 12 + {{ if $include }} 13 + hx-get="?{{ $name }}={{ $value.Key }}" 14 + hx-include="{{ $include }}" 15 + hx-push-url="true" 16 + hx-target="body" 17 + hx-on:htmx:config-request="if(!event.detail.parameters.q) delete event.detail.parameters.q" 18 + {{ end }} 11 19 class="p-2 whitespace-nowrap flex justify-center items-center gap-2 text-sm w-full block hover:no-underline text-center {{ if $isActive }} {{$activeTab }} {{ else }} {{ $inactiveTab }} {{ end }}"> 12 20 {{ if $value.Icon }} 13 21 {{ i $value.Icon "size-4" }}
+22
appview/pages/templates/fragments/tinyAvatarList.html
··· 1 + {{ define "fragments/tinyAvatarList" }} 2 + {{ $all := .all }} 3 + {{ $classes := .classes }} 4 + {{ $ps := take $all 5 }} 5 + <div class="inline-flex items-center -space-x-3"> 6 + {{ $c := "z-50 z-40 z-30 z-20 z-10" }} 7 + {{ range $i, $p := $ps }} 8 + <img 9 + src="{{ tinyAvatar . }}" 10 + alt="" 11 + class="rounded-full size-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0 {{ $classes }}" 12 + /> 13 + {{ end }} 14 + 15 + {{ if gt (len $all) 5 }} 16 + <span class="pl-4 text-gray-500 dark:text-gray-400 text-sm"> 17 + +{{ sub (len $all) 5 }} 18 + </span> 19 + {{ end }} 20 + </div> 21 + {{ end }} 22 +
+1 -1
appview/pages/templates/knots/index.html
··· 105 105 {{ define "docsButton" }} 106 106 <a 107 107 class="btn flex items-center gap-2" 108 - href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md"> 108 + href="https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide"> 109 109 {{ i "book" "size-4" }} 110 110 docs 111 111 </a>
+2 -2
appview/pages/templates/layouts/fragments/footer.html
··· 26 26 <div class="flex flex-col gap-1"> 27 27 <div class="{{ $headerStyle }}">resources</div> 28 28 <a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a> 29 - <a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 29 + <a href="https://docs.tangled.org" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 30 30 <a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a> 31 31 <a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a> 32 32 </div> ··· 73 73 <div class="flex flex-col gap-1"> 74 74 <div class="{{ $headerStyle }}">resources</div> 75 75 <a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a> 76 - <a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 76 + <a href="https://docs.tangled.org" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 77 77 <a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a> 78 78 <a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a> 79 79 </div>
+8 -7
appview/pages/templates/layouts/profilebase.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }}{{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }}{{ end }} 2 2 3 3 {{ define "extrameta" }} 4 - {{ $avatarUrl := fullAvatar .Card.UserHandle }} 5 - <meta property="og:title" content="{{ or .Card.UserHandle .Card.UserDid }}" /> 4 + {{ $handle := resolve .Card.UserDid }} 5 + {{ $avatarUrl := fullAvatar $handle }} 6 + <meta property="og:title" content="{{ $handle }}" /> 6 7 <meta property="og:type" content="profile" /> 7 - <meta property="og:url" content="https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}?tab={{ .Active }}" /> 8 - <meta property="og:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" /> 8 + <meta property="og:url" content="https://tangled.org/{{ $handle }}?tab={{ .Active }}" /> 9 + <meta property="og:description" content="{{ or .Card.Profile.Description $handle }}" /> 9 10 <meta property="og:image" content="{{ $avatarUrl }}" /> 10 11 <meta property="og:image:width" content="512" /> 11 12 <meta property="og:image:height" content="512" /> 12 13 13 14 <meta name="twitter:card" content="summary" /> 14 - <meta name="twitter:title" content="{{ or .Card.UserHandle .Card.UserDid }}" /> 15 - <meta name="twitter:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" /> 15 + <meta name="twitter:title" content="{{ $handle }}" /> 16 + <meta name="twitter:description" content="{{ or .Card.Profile.Description $handle }}" /> 16 17 <meta name="twitter:image" content="{{ $avatarUrl }}" /> 17 18 {{ end }} 18 19
+35 -10
appview/pages/templates/repo/commit.html
··· 25 25 </div> 26 26 27 27 <div class="flex flex-wrap items-center space-x-2"> 28 - <p class="flex flex-wrap items-center gap-2 text-sm text-gray-500 dark:text-gray-300"> 29 - {{ $did := index $.EmailToDid $commit.Author.Email }} 30 - 31 - {{ if $did }} 32 - {{ template "user/fragments/picHandleLink" $did }} 33 - {{ else }} 34 - <a href="mailto:{{ $commit.Author.Email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $commit.Author.Name }}</a> 35 - {{ end }} 28 + <p class="flex flex-wrap items-center gap-1 text-sm text-gray-500 dark:text-gray-300"> 29 + {{ template "attribution" . }} 36 30 37 31 <span class="px-1 select-none before:content-['\00B7']"></span> 38 - {{ template "repo/fragments/time" $commit.Author.When }} 32 + {{ template "repo/fragments/time" $commit.Committer.When }} 39 33 <span class="px-1 select-none before:content-['\00B7']"></span> 40 34 41 35 <a href="/{{ $repo }}/commit/{{ $commit.This }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ slice $commit.This 0 8 }}</a> ··· 79 73 </section> 80 74 {{end}} 81 75 76 + {{ define "attribution" }} 77 + {{ $commit := .Diff.Commit }} 78 + {{ $showCommitter := true }} 79 + {{ if eq $commit.Author.Email $commit.Committer.Email }} 80 + {{ $showCommitter = false }} 81 + {{ end }} 82 + 83 + {{ if $showCommitter }} 84 + authored by {{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid) }} 85 + {{ range $commit.CoAuthors }} 86 + {{ template "attributedUser" (list .Email .Name $.EmailToDid) }} 87 + {{ end }} 88 + and committed by {{ template "attributedUser" (list $commit.Committer.Email $commit.Committer.Name $.EmailToDid) }} 89 + {{ else }} 90 + {{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid )}} 91 + {{ end }} 92 + {{ end }} 93 + 94 + {{ define "attributedUser" }} 95 + {{ $email := index . 0 }} 96 + {{ $name := index . 1 }} 97 + {{ $map := index . 2 }} 98 + {{ $did := index $map $email }} 99 + 100 + {{ if $did }} 101 + {{ template "user/fragments/picHandleLink" $did }} 102 + {{ else }} 103 + <a href="mailto:{{ $email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $name }}</a> 104 + {{ end }} 105 + {{ end }} 106 + 82 107 {{ define "topbarLayout" }} 83 108 <header class="col-span-full" style="z-index: 20;"> 84 109 {{ template "layouts/fragments/topbar" . }} ··· 111 136 {{ end }} 112 137 113 138 {{ define "contentAfter" }} 114 - {{ template "repo/fragments/diff" (list .RepoInfo.FullName .Diff .DiffOpts) }} 139 + {{ template "repo/fragments/diff" (list .Diff .DiffOpts) }} 115 140 {{end}} 116 141 117 142 {{ define "contentAfterLeft" }}
+1 -1
appview/pages/templates/repo/compare/compare.html
··· 42 42 {{ end }} 43 43 44 44 {{ define "contentAfter" }} 45 - {{ template "repo/fragments/diff" (list .RepoInfo.FullName .Diff .DiffOpts) }} 45 + {{ template "repo/fragments/diff" (list .Diff .DiffOpts) }} 46 46 {{end}} 47 47 48 48 {{ define "contentAfterLeft" }}
+2 -2
appview/pages/templates/repo/empty.html
··· 26 26 {{ else if (and .LoggedInUser (eq .LoggedInUser.Did .RepoInfo.OwnerDid)) }} 27 27 {{ $knot := .RepoInfo.Knot }} 28 28 {{ if eq $knot "knot1.tangled.sh" }} 29 - {{ $knot = "tangled.sh" }} 29 + {{ $knot = "tangled.org" }} 30 30 {{ end }} 31 31 <div class="w-full flex place-content-center"> 32 32 <div class="py-6 w-fit flex flex-col gap-4"> ··· 35 35 36 36 <p><span class="{{$bullet}}">1</span>First, generate a new <a href="https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key" class="underline">SSH key pair</a>.</p> 37 37 <p><span class="{{$bullet}}">2</span>Then add the public key to your account from the <a href="/settings" class="underline">settings</a> page.</p> 38 - <p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code></p> 38 + <p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ resolve .RepoInfo.OwnerDid }}/{{ .RepoInfo.Name }}</code></p> 39 39 <p><span class="{{$bullet}}">4</span>Push!</p> 40 40 </div> 41 41 </div>
+49
appview/pages/templates/repo/fragments/backlinks.html
··· 1 + {{ define "repo/fragments/backlinks" }} 2 + {{ if .Backlinks }} 3 + <div id="at-uri-panel" class="px-2 md:px-0"> 4 + <div> 5 + <span class="text-sm py-1 font-bold text-gray-500 dark:text-gray-400">Referenced by</span> 6 + </div> 7 + <ul> 8 + {{ range .Backlinks }} 9 + <li> 10 + {{ $repoOwner := resolve .Handle }} 11 + {{ $repoName := .Repo }} 12 + {{ $repoUrl := printf "%s/%s" $repoOwner $repoName }} 13 + <div class="flex flex-col"> 14 + <div class="flex gap-2 items-center"> 15 + {{ if .State.IsClosed }} 16 + <span class="text-gray-500 dark:text-gray-400"> 17 + {{ i "ban" "size-3" }} 18 + </span> 19 + {{ else if eq .Kind.String "issues" }} 20 + <span class="text-green-600 dark:text-green-500"> 21 + {{ i "circle-dot" "size-3" }} 22 + </span> 23 + {{ else if .State.IsOpen }} 24 + <span class="text-green-600 dark:text-green-500"> 25 + {{ i "git-pull-request" "size-3" }} 26 + </span> 27 + {{ else if .State.IsMerged }} 28 + <span class="text-purple-600 dark:text-purple-500"> 29 + {{ i "git-merge" "size-3" }} 30 + </span> 31 + {{ else }} 32 + <span class="text-gray-600 dark:text-gray-300"> 33 + {{ i "git-pull-request-closed" "size-3" }} 34 + </span> 35 + {{ end }} 36 + <a href="{{ . }}" class="line-clamp-1 text-sm"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a> 37 + </div> 38 + {{ if not (eq $.RepoInfo.FullName $repoUrl) }} 39 + <div> 40 + <span>on <a href="/{{ $repoUrl }}">{{ $repoUrl }}</a></span> 41 + </div> 42 + {{ end }} 43 + </div> 44 + </li> 45 + {{ end }} 46 + </ul> 47 + </div> 48 + {{ end }} 49 + {{ end }}
+3 -2
appview/pages/templates/repo/fragments/cloneDropdown.html
··· 43 43 44 44 <!-- SSH Clone --> 45 45 <div class="mb-3"> 46 + {{ $repoOwnerHandle := resolve .RepoInfo.OwnerDid }} 46 47 <label class="block text-xs font-medium text-gray-700 dark:text-gray-300 mb-1">SSH</label> 47 48 <div class="flex items-center border border-gray-300 dark:border-gray-600 rounded"> 48 49 <code 49 50 class="flex-1 px-3 py-2 text-sm bg-gray-50 dark:bg-gray-700 text-gray-900 dark:text-gray-100 rounded-l select-all cursor-pointer whitespace-nowrap overflow-x-auto" 50 51 onclick="window.getSelection().selectAllChildren(this)" 51 - data-url="git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}" 52 - >git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code> 52 + data-url="git@{{ $knot | stripPort }}:{{ $repoOwnerHandle }}/{{ .RepoInfo.Name }}" 53 + >git@{{ $knot | stripPort }}:{{ $repoOwnerHandle }}/{{ .RepoInfo.Name }}</code> 53 54 <button 54 55 onclick="copyToClipboard(this, this.previousElementSibling.getAttribute('data-url'))" 55 56 class="px-3 py-2 text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-200 border-l border-gray-300 dark:border-gray-600"
+3 -4
appview/pages/templates/repo/fragments/diff.html
··· 1 1 {{ define "repo/fragments/diff" }} 2 - {{ $repo := index . 0 }} 3 - {{ $diff := index . 1 }} 4 - {{ $opts := index . 2 }} 2 + {{ $diff := index . 0 }} 3 + {{ $opts := index . 1 }} 5 4 6 5 {{ $commit := $diff.Commit }} 7 6 {{ $diff := $diff.Diff }} ··· 18 17 {{ else }} 19 18 {{ range $idx, $hunk := $diff }} 20 19 {{ with $hunk }} 21 - <details open id="file-{{ .Name.New }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}"> 20 + <details open id="file-{{ .Id }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}"> 22 21 <summary class="list-none cursor-pointer sticky top-0"> 23 22 <div id="diff-file-header" class="rounded cursor-pointer bg-white dark:bg-gray-800 flex justify-between"> 24 23 <div id="left-side-items" class="p-2 flex gap-2 items-center overflow-x-auto">
+15 -1
appview/pages/templates/repo/fragments/editLabelPanel.html
··· 170 170 {{ $fieldName := $def.AtUri }} 171 171 {{ $valueType := $def.ValueType }} 172 172 {{ $value := .value }} 173 + 173 174 {{ if $valueType.IsDidFormat }} 174 175 {{ $value = trimPrefix (resolve .value) "@" }} 176 + <actor-typeahead> 177 + <input 178 + autocapitalize="none" 179 + autocorrect="off" 180 + autocomplete="off" 181 + placeholder="user.tngl.sh" 182 + value="{{$value}}" 183 + name="{{$fieldName}}" 184 + type="text" 185 + class="p-1 w-full text-sm" 186 + /> 187 + </actor-typeahead> 188 + {{ else }} 189 + <input class="p-1 w-full" type="text" name="{{$fieldName}}" value="{{$value}}"> 175 190 {{ end }} 176 - <input class="p-1 w-full" type="text" name="{{$fieldName}}" value="{{$value}}"> 177 191 {{ end }} 178 192 179 193 {{ define "nullTypeInput" }}
+1 -16
appview/pages/templates/repo/fragments/participants.html
··· 6 6 <span class="font-bold text-gray-500 dark:text-gray-400 capitalize">Participants</span> 7 7 <span class="bg-gray-200 dark:bg-gray-700 rounded py-1/2 px-1 ml-1">{{ len $all }}</span> 8 8 </div> 9 - <div class="flex items-center -space-x-3 mt-2"> 10 - {{ $c := "z-50 z-40 z-30 z-20 z-10" }} 11 - {{ range $i, $p := $ps }} 12 - <img 13 - src="{{ tinyAvatar . }}" 14 - alt="" 15 - class="rounded-full h-8 w-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0" 16 - /> 17 - {{ end }} 18 - 19 - {{ if gt (len $all) 5 }} 20 - <span class="pl-4 text-gray-500 dark:text-gray-400 text-sm"> 21 - +{{ sub (len $all) 5 }} 22 - </span> 23 - {{ end }} 24 - </div> 9 + {{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "w-8 h-8") }} 25 10 </div> 26 11 {{ end }}
+35 -35
appview/pages/templates/repo/fragments/splitDiff.html
··· 3 3 {{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800" -}} 4 4 {{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}} 5 5 {{- $lineNrSepStyle := "pr-2 border-r border-gray-200 dark:border-gray-700" -}} 6 - {{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 6 + {{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 7 7 {{- $emptyStyle := "bg-gray-200/30 dark:bg-gray-700/30" -}} 8 8 {{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400" -}} 9 9 {{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}} 10 10 {{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}} 11 11 {{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}} 12 12 <div class="grid grid-cols-2 divide-x divide-gray-200 dark:divide-gray-700"> 13 - <pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</div> 13 + <div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</span> 14 14 {{- range .LeftLines -}} 15 15 {{- if .IsEmpty -}} 16 - <div class="{{ $emptyStyle }} {{ $containerStyle }}"> 17 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div> 18 - <div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div> 19 - <div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div> 20 - </div> 16 + <span class="{{ $emptyStyle }} {{ $containerStyle }}"> 17 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span> 18 + <span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span> 19 + <span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span> 20 + </span> 21 21 {{- else if eq .Op.String "-" -}} 22 - <div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 23 - <div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div> 24 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 25 - <div class="px-2">{{ .Content }}</div> 26 - </div> 22 + <span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 23 + <span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span> 24 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 25 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 26 + </span> 27 27 {{- else if eq .Op.String " " -}} 28 - <div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 29 - <div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div> 30 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 31 - <div class="px-2">{{ .Content }}</div> 32 - </div> 28 + <span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 29 + <span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span> 30 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 31 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 32 + </span> 33 33 {{- end -}} 34 34 {{- end -}} 35 - {{- end -}}</div></div></pre> 35 + {{- end -}}</div></div></div> 36 36 37 - <pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</div> 37 + <div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</span> 38 38 {{- range .RightLines -}} 39 39 {{- if .IsEmpty -}} 40 - <div class="{{ $emptyStyle }} {{ $containerStyle }}"> 41 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div> 42 - <div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div> 43 - <div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div> 44 - </div> 40 + <span class="{{ $emptyStyle }} {{ $containerStyle }}"> 41 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span> 42 + <span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span> 43 + <span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span> 44 + </span> 45 45 {{- else if eq .Op.String "+" -}} 46 - <div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 47 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div> 48 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 49 - <div class="px-2" >{{ .Content }}</div> 50 - </div> 46 + <span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 47 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></span> 48 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 49 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 50 + </span> 51 51 {{- else if eq .Op.String " " -}} 52 - <div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 53 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div> 54 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 55 - <div class="px-2">{{ .Content }}</div> 56 - </div> 52 + <span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 53 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a> </span> 54 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 55 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 56 + </span> 57 57 {{- end -}} 58 58 {{- end -}} 59 - {{- end -}}</div></div></pre> 59 + {{- end -}}</div></div></div> 60 60 </div> 61 61 {{ end }}
+21 -22
appview/pages/templates/repo/fragments/unifiedDiff.html
··· 1 1 {{ define "repo/fragments/unifiedDiff" }} 2 2 {{ $name := .Id }} 3 - <pre class="overflow-x-auto"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</div> 3 + <div class="overflow-x-auto font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</span> 4 4 {{- $oldStart := .OldPosition -}} 5 5 {{- $newStart := .NewPosition -}} 6 6 {{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800 target:bg-yellow-200 target:dark:bg-yellow-600" -}} 7 7 {{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}} 8 8 {{- $lineNrSepStyle1 := "" -}} 9 9 {{- $lineNrSepStyle2 := "pr-2 border-r border-gray-200 dark:border-gray-700" -}} 10 - {{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 10 + {{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 11 11 {{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400 " -}} 12 12 {{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}} 13 13 {{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}} 14 14 {{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}} 15 15 {{- range .Lines -}} 16 16 {{- if eq .Op.String "+" -}} 17 - <div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}"> 18 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></div> 19 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></div> 20 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 21 - <div class="px-2">{{ .Line }}</div> 22 - </div> 17 + <span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}"> 18 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></span> 19 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></span> 20 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 21 + <span class="px-2 whitespace-pre">{{ .Line }}</span> 22 + </span> 23 23 {{- $newStart = add64 $newStart 1 -}} 24 24 {{- end -}} 25 25 {{- if eq .Op.String "-" -}} 26 - <div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}"> 27 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></div> 28 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></div> 29 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 30 - <div class="px-2">{{ .Line }}</div> 31 - </div> 26 + <span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}"> 27 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></span> 28 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></span> 29 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 30 + <span class="px-2 whitespace-pre">{{ .Line }}</span> 31 + </span> 32 32 {{- $oldStart = add64 $oldStart 1 -}} 33 33 {{- end -}} 34 34 {{- if eq .Op.String " " -}} 35 - <div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}"> 36 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></div> 37 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></div> 38 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 39 - <div class="px-2">{{ .Line }}</div> 40 - </div> 35 + <span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}"> 36 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></span> 37 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></span> 38 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 39 + <span class="px-2 whitespace-pre">{{ .Line }}</span> 40 + </span> 41 41 {{- $newStart = add64 $newStart 1 -}} 42 42 {{- $oldStart = add64 $oldStart 1 -}} 43 43 {{- end -}} 44 44 {{- end -}} 45 - {{- end -}}</div></div></pre> 45 + {{- end -}}</div></div></div> 46 46 {{ end }} 47 -
+31 -9
appview/pages/templates/repo/index.html
··· 14 14 {{ end }} 15 15 <div class="flex items-center justify-between pb-5"> 16 16 {{ block "branchSelector" . }}{{ end }} 17 - <div class="flex md:hidden items-center gap-2"> 17 + <div class="flex md:hidden items-center gap-3"> 18 18 <a href="/{{ .RepoInfo.FullName }}/commits/{{ .Ref | urlquery }}" class="inline-flex items-center text-sm gap-1 font-bold"> 19 19 {{ i "git-commit-horizontal" "w-4" "h-4" }} {{ .TotalCommits }} 20 20 </a> ··· 47 47 <div class="px-4 py-2 border-b border-gray-200 dark:border-gray-600 flex items-center gap-4 flex-wrap"> 48 48 {{ range $value := .Languages }} 49 49 <div 50 - class="flex flex-grow items-center gap-2 text-xs align-items-center justify-center" 50 + class="flex items-center gap-2 text-xs align-items-center justify-center" 51 51 > 52 52 {{ template "repo/fragments/colorBall" (dict "color" (langColor $value.Name)) }} 53 53 <div>{{ or $value.Name "Other" }} ··· 66 66 67 67 {{ define "branchSelector" }} 68 68 <div class="flex gap-2 items-center justify-between w-full"> 69 - <div class="flex gap-2 items-center"> 69 + <div class="flex gap-2 items-stretch"> 70 70 <select 71 71 onchange="window.location.href = '/{{ .RepoInfo.FullName }}/tree/' + encodeURIComponent(this.value)" 72 72 class="p-1 border max-w-32 border-gray-200 bg-white dark:bg-gray-800 dark:text-white dark:border-gray-700" ··· 228 228 <span 229 229 class="mx-1 before:content-['ยท'] before:select-none" 230 230 ></span> 231 - <span> 232 - {{ $did := index $.EmailToDid .Author.Email }} 233 - <a href="{{ if $did }}/{{ resolve $did }}{{ else }}mailto:{{ .Author.Email }}{{ end }}" 234 - class="text-gray-500 dark:text-gray-400 no-underline hover:underline" 235 - >{{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ .Author.Name }}{{ end }}</a> 236 - </span> 231 + {{ template "attribution" (list . $.EmailToDid) }} 237 232 <div class="inline-block px-1 select-none after:content-['ยท']"></div> 238 233 {{ template "repo/fragments/time" .Committer.When }} 239 234 ··· 259 254 {{ end }} 260 255 </div> 261 256 </div> 257 + {{ end }} 258 + 259 + {{ define "attribution" }} 260 + {{ $commit := index . 0 }} 261 + {{ $map := index . 1 }} 262 + <span class="flex items-center"> 263 + {{ $author := index $map $commit.Author.Email }} 264 + {{ $coauthors := $commit.CoAuthors }} 265 + {{ $all := list }} 266 + 267 + {{ if $author }} 268 + {{ $all = append $all $author }} 269 + {{ end }} 270 + {{ range $coauthors }} 271 + {{ $co := index $map .Email }} 272 + {{ if $co }} 273 + {{ $all = append $all $co }} 274 + {{ end }} 275 + {{ end }} 276 + 277 + {{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }} 278 + <a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}" 279 + class="no-underline hover:underline"> 280 + {{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }} 281 + {{ if $coauthors }} +{{ length $coauthors }}{{ end }} 282 + </a> 283 + </span> 262 284 {{ end }} 263 285 264 286 {{ define "branchList" }}
+4 -4
appview/pages/templates/repo/issues/fragments/issueCommentHeader.html
··· 1 1 {{ define "repo/issues/fragments/issueCommentHeader" }} 2 2 <div class="flex flex-wrap items-center gap-2 text-sm text-gray-500 dark:text-gray-400 "> 3 - {{ template "user/fragments/picHandleLink" .Comment.Did }} 3 + {{ template "user/fragments/picHandleLink" .Comment.Did.String }} 4 4 {{ template "hats" $ }} 5 5 {{ template "timestamp" . }} 6 - {{ $isCommentOwner := and .LoggedInUser (eq .LoggedInUser.Did .Comment.Did) }} 6 + {{ $isCommentOwner := and .LoggedInUser (eq .LoggedInUser.Did .Comment.Did.String) }} 7 7 {{ if and $isCommentOwner (not .Comment.Deleted) }} 8 8 {{ template "editIssueComment" . }} 9 9 {{ template "deleteIssueComment" . }} ··· 19 19 {{ end }} 20 20 21 21 {{ define "timestamp" }} 22 - <a href="#{{ .Comment.Id }}" 22 + <a href="#comment-{{ .Comment.Id }}" 23 23 class="text-gray-500 dark:text-gray-400 hover:text-gray-500 dark:hover:text-gray-400 hover:underline no-underline" 24 - id="{{ .Comment.Id }}"> 24 + id="comment-{{ .Comment.Id }}"> 25 25 {{ if .Comment.Deleted }} 26 26 {{ template "repo/fragments/shortTimeAgo" .Comment.Deleted }} 27 27 {{ else if .Comment.Edited }}
+3
appview/pages/templates/repo/issues/issue.html
··· 20 20 "Subject" $.Issue.AtUri 21 21 "State" $.Issue.Labels) }} 22 22 {{ template "repo/fragments/participants" $.Issue.Participants }} 23 + {{ template "repo/fragments/backlinks" 24 + (dict "RepoInfo" $.RepoInfo 25 + "Backlinks" $.Backlinks) }} 23 26 {{ template "repo/fragments/externalLinkPanel" $.Issue.AtUri }} 24 27 </div> 25 28 </div>
+2 -1
appview/pages/templates/repo/issues/issues.html
··· 32 32 <input type="hidden" name="state" value="{{ if .FilteringByOpen }}open{{ else }}closed{{ end }}"> 33 33 <div class="flex-1 flex relative"> 34 34 <input 35 + id="search-q" 35 36 class="flex-1 py-1 pl-2 pr-10 mr-[-1px] rounded-r-none focus:border-0 focus:outline-none focus:ring focus:ring-blue-400 ring-inset peer" 36 37 type="text" 37 38 name="q" ··· 53 54 </button> 54 55 </form> 55 56 <div class="sm:row-start-1"> 56 - {{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active) }} 57 + {{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active "Include" "#search-q") }} 57 58 </div> 58 59 <a 59 60 href="/{{ .RepoInfo.FullName }}/issues/new"
+40 -23
appview/pages/templates/repo/log.html
··· 17 17 <div class="hidden md:flex md:flex-col divide-y divide-gray-200 dark:divide-gray-700"> 18 18 {{ $grid := "grid grid-cols-14 gap-4" }} 19 19 <div class="{{ $grid }}"> 20 - <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2">Author</div> 20 + <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Author</div> 21 21 <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Commit</div> 22 22 <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-6">Message</div> 23 - <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-1"></div> 24 23 <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2 justify-self-end">Date</div> 25 24 </div> 26 25 {{ range $index, $commit := .Commits }} 27 26 {{ $messageParts := splitN $commit.Message "\n\n" 2 }} 28 27 <div class="{{ $grid }} py-3"> 29 - <div class="align-top truncate col-span-2"> 30 - {{ $did := index $.EmailToDid $commit.Author.Email }} 31 - {{ if $did }} 32 - {{ template "user/fragments/picHandleLink" $did }} 33 - {{ else }} 34 - <a href="mailto:{{ $commit.Author.Email }}" class="text-gray-700 dark:text-gray-300 no-underline hover:underline">{{ $commit.Author.Name }}</a> 35 - {{ end }} 28 + <div class="align-top col-span-3"> 29 + {{ template "attribution" (list $commit $.EmailToDid) }} 36 30 </div> 37 31 <div class="align-top font-mono flex items-start col-span-3"> 38 32 {{ $verified := $.VerifiedCommits.IsVerified $commit.Hash.String }} ··· 61 55 <div class="align-top col-span-6"> 62 56 <div> 63 57 <a href="/{{ $.RepoInfo.FullName }}/commit/{{ $commit.Hash.String }}" class="dark:text-white no-underline hover:underline">{{ index $messageParts 0 }}</a> 58 + 64 59 {{ if gt (len $messageParts) 1 }} 65 60 <button class="py-1/2 px-1 bg-gray-200 hover:bg-gray-400 dark:bg-gray-700 dark:hover:bg-gray-600 rounded" hx-on:click="this.parentElement.nextElementSibling.classList.toggle('hidden')">{{ i "ellipsis" "w-3 h-3" }}</button> 66 61 {{ end }} ··· 72 67 </span> 73 68 {{ end }} 74 69 {{ end }} 70 + 71 + <!-- ci status --> 72 + <span class="text-xs"> 73 + {{ $pipeline := index $.Pipelines .Hash.String }} 74 + {{ if and $pipeline (gt (len $pipeline.Statuses) 0) }} 75 + {{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }} 76 + {{ end }} 77 + </span> 75 78 </div> 76 79 77 80 {{ if gt (len $messageParts) 1 }} 78 81 <p class="hidden mt-1 text-sm text-gray-600 dark:text-gray-400">{{ nl2br (index $messageParts 1) }}</p> 79 82 {{ end }} 80 - </div> 81 - <div class="align-top col-span-1"> 82 - <!-- ci status --> 83 - {{ $pipeline := index $.Pipelines .Hash.String }} 84 - {{ if and $pipeline (gt (len $pipeline.Statuses) 0) }} 85 - {{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }} 86 - {{ end }} 87 83 </div> 88 84 <div class="align-top justify-self-end text-gray-500 dark:text-gray-400 col-span-2">{{ template "repo/fragments/shortTimeAgo" $commit.Committer.When }}</div> 89 85 </div> ··· 152 148 </a> 153 149 </span> 154 150 <span class="mx-2 before:content-['ยท'] before:select-none"></span> 155 - <span> 156 - {{ $did := index $.EmailToDid $commit.Author.Email }} 157 - <a href="{{ if $did }}/{{ $did }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}" 158 - class="text-gray-500 dark:text-gray-400 no-underline hover:underline"> 159 - {{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ $commit.Author.Name }}{{ end }} 160 - </a> 161 - </span> 151 + {{ template "attribution" (list $commit $.EmailToDid) }} 162 152 <div class="inline-block px-1 select-none after:content-['ยท']"></div> 163 153 <span>{{ template "repo/fragments/shortTime" $commit.Committer.When }}</span> 164 154 ··· 176 166 </div> 177 167 </section> 178 168 169 + {{ end }} 170 + 171 + {{ define "attribution" }} 172 + {{ $commit := index . 0 }} 173 + {{ $map := index . 1 }} 174 + <span class="flex items-center gap-1"> 175 + {{ $author := index $map $commit.Author.Email }} 176 + {{ $coauthors := $commit.CoAuthors }} 177 + {{ $all := list }} 178 + 179 + {{ if $author }} 180 + {{ $all = append $all $author }} 181 + {{ end }} 182 + {{ range $coauthors }} 183 + {{ $co := index $map .Email }} 184 + {{ if $co }} 185 + {{ $all = append $all $co }} 186 + {{ end }} 187 + {{ end }} 188 + 189 + {{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }} 190 + <a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}" 191 + class="no-underline hover:underline"> 192 + {{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }} 193 + {{ if $coauthors }} +{{ length $coauthors }}{{ end }} 194 + </a> 195 + </span> 179 196 {{ end }} 180 197 181 198 {{ define "repoAfter" }}
+1 -1
appview/pages/templates/repo/pipelines/pipelines.html
··· 23 23 </p> 24 24 <p> 25 25 <span class="{{ $bullet }}">2</span>Configure your CI/CD 26 - <a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/pipeline.md" class="underline">pipeline</a>. 26 + <a href="https://docs.tangled.org/spindles.html#pipelines" class="underline">pipeline</a>. 27 27 </p> 28 28 <p><span class="{{ $bullet }}">3</span>Trigger a workflow with a push or a pull-request!</p> 29 29 </div>
+1 -1
appview/pages/templates/repo/pulls/patch.html
··· 54 54 {{ end }} 55 55 56 56 {{ define "contentAfter" }} 57 - {{ template "repo/fragments/diff" (list .RepoInfo.FullName .Diff .DiffOpts) }} 57 + {{ template "repo/fragments/diff" (list .Diff .DiffOpts) }} 58 58 {{end}} 59 59 60 60 {{ define "contentAfterLeft" }}
+6 -3
appview/pages/templates/repo/pulls/pull.html
··· 21 21 "Subject" $.Pull.AtUri 22 22 "State" $.Pull.Labels) }} 23 23 {{ template "repo/fragments/participants" $.Pull.Participants }} 24 + {{ template "repo/fragments/backlinks" 25 + (dict "RepoInfo" $.RepoInfo 26 + "Backlinks" $.Backlinks) }} 24 27 {{ template "repo/fragments/externalLinkPanel" $.Pull.AtUri }} 25 28 </div> 26 29 </div> ··· 162 165 163 166 <div class="md:pl-[3.5rem] flex flex-col gap-2 mt-2 relative"> 164 167 {{ range $cidx, $c := .Comments }} 165 - <div id="comment-{{$c.ID}}" class="bg-white dark:bg-gray-800 rounded drop-shadow-sm py-2 px-4 relative w-full"> 168 + <div id="comment-{{$c.Id}}" class="bg-white dark:bg-gray-800 rounded drop-shadow-sm py-2 px-4 relative w-full"> 166 169 {{ if gt $cidx 0 }} 167 170 <div class="absolute left-8 -top-2 w-px h-2 bg-gray-300 dark:bg-gray-600"></div> 168 171 {{ end }} 169 172 <div class="text-sm text-gray-500 dark:text-gray-400 flex items-center gap-1"> 170 - {{ template "user/fragments/picHandleLink" $c.OwnerDid }} 173 + {{ template "user/fragments/picHandleLink" $c.Did.String }} 171 174 <span class="before:content-['ยท']"></span> 172 - <a class="text-gray-500 dark:text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" href="#comment-{{.ID}}">{{ template "repo/fragments/time" $c.Created }}</a> 175 + <a class="text-gray-500 dark:text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" href="#comment-{{.Id}}">{{ template "repo/fragments/time" $c.Created }}</a> 173 176 </div> 174 177 <div class="prose dark:prose-invert"> 175 178 {{ $c.Body | markdown }}
+2 -1
appview/pages/templates/repo/pulls/pulls.html
··· 38 38 <input type="hidden" name="state" value="{{ .FilteringBy.String }}"> 39 39 <div class="flex-1 flex relative"> 40 40 <input 41 + id="search-q" 41 42 class="flex-1 py-1 pl-2 pr-10 mr-[-1px] rounded-r-none focus:border-0 focus:outline-none focus:ring focus:ring-blue-400 ring-inset peer" 42 43 type="text" 43 44 name="q" ··· 59 60 </button> 60 61 </form> 61 62 <div class="sm:row-start-1"> 62 - {{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active) }} 63 + {{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active "Include" "#search-q") }} 63 64 </div> 64 65 <a 65 66 href="/{{ .RepoInfo.FullName }}/pulls/new"
+5 -4
appview/pages/templates/repo/settings/access.html
··· 29 29 {{ template "addCollaboratorButton" . }} 30 30 {{ end }} 31 31 {{ range .Collaborators }} 32 + {{ $handle := resolve .Did }} 32 33 <div class="border border-gray-200 dark:border-gray-700 rounded p-4"> 33 34 <div class="flex items-center gap-3"> 34 35 <img 35 - src="{{ fullAvatar .Handle }}" 36 - alt="{{ .Handle }}" 36 + src="{{ fullAvatar $handle }}" 37 + alt="{{ $handle }}" 37 38 class="rounded-full h-10 w-10 border border-gray-300 dark:border-gray-600 flex-shrink-0"/> 38 39 39 40 <div class="flex-1 min-w-0"> 40 - <a href="/{{ .Handle }}" class="block truncate"> 41 - {{ didOrHandle .Did .Handle }} 41 + <a href="/{{ $handle }}" class="block truncate"> 42 + {{ $handle }} 42 43 </a> 43 44 <p class="text-sm text-gray-500 dark:text-gray-400">{{ .Role }}</p> 44 45 </div>
+1 -1
appview/pages/templates/repo/settings/pipelines.html
··· 22 22 <p class="text-gray-500 dark:text-gray-400"> 23 23 Choose a spindle to execute your workflows on. Only repository owners 24 24 can configure spindles. Spindles can be selfhosted, 25 - <a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md"> 25 + <a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide"> 26 26 click to learn more. 27 27 </a> 28 28 </p>
+1 -1
appview/pages/templates/spindles/index.html
··· 102 102 {{ define "docsButton" }} 103 103 <a 104 104 class="btn flex items-center gap-2" 105 - href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md"> 105 + href="https://docs.tangled.org/spindles.html#self-hosting-guide"> 106 106 {{ i "book" "size-4" }} 107 107 docs 108 108 </a>
+6 -5
appview/pages/templates/strings/dashboard.html
··· 1 - {{ define "title" }}strings by {{ or .Card.UserHandle .Card.UserDid }}{{ end }} 1 + {{ define "title" }}strings by {{ resolve .Card.UserDid }}{{ end }} 2 2 3 3 {{ define "extrameta" }} 4 - <meta property="og:title" content="{{ or .Card.UserHandle .Card.UserDid }}" /> 4 + {{ $handle := resolve .Card.UserDid }} 5 + <meta property="og:title" content="{{ $handle }}" /> 5 6 <meta property="og:type" content="profile" /> 6 - <meta property="og:url" content="https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}" /> 7 - <meta property="og:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" /> 7 + <meta property="og:url" content="https://tangled.org/{{ $handle }}" /> 8 + <meta property="og:description" content="{{ or .Card.Profile.Description $handle }}" /> 8 9 {{ end }} 9 10 10 11 ··· 35 36 {{ $s := index . 1 }} 36 37 <div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800"> 37 38 <div class="font-medium dark:text-white flex gap-2 items-center"> 38 - <a href="/strings/{{ or $root.Card.UserHandle $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a> 39 + <a href="/strings/{{ resolve $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a> 39 40 </div> 40 41 {{ with $s.Description }} 41 42 <div class="text-gray-600 dark:text-gray-300 text-sm">
+4 -4
appview/pages/templates/strings/string.html
··· 1 - {{ define "title" }}{{ .String.Filename }} ยท by {{ didOrHandle .Owner.DID.String .Owner.Handle.String }}{{ end }} 1 + {{ define "title" }}{{ .String.Filename }} ยท by {{ resolve .Owner.DID.String }}{{ end }} 2 2 3 3 {{ define "extrameta" }} 4 - {{ $ownerId := didOrHandle .Owner.DID.String .Owner.Handle.String }} 4 + {{ $ownerId := resolve .Owner.DID.String }} 5 5 <meta property="og:title" content="{{ .String.Filename }} ยท by {{ $ownerId }}" /> 6 6 <meta property="og:type" content="object" /> 7 7 <meta property="og:url" content="https://tangled.org/strings/{{ $ownerId }}/{{ .String.Rkey }}" /> ··· 9 9 {{ end }} 10 10 11 11 {{ define "content" }} 12 - {{ $ownerId := didOrHandle .Owner.DID.String .Owner.Handle.String }} 12 + {{ $ownerId := resolve .Owner.DID.String }} 13 13 <section id="string-header" class="mb-4 py-2 px-6 dark:text-white"> 14 14 <div class="text-lg flex items-center justify-between"> 15 15 <div> ··· 17 17 <span class="select-none">/</span> 18 18 <a href="/strings/{{ $ownerId }}/{{ .String.Rkey }}" class="font-bold">{{ .String.Filename }}</a> 19 19 </div> 20 - <div class="flex gap-2 text-base"> 20 + <div class="flex gap-2 items-stretch text-base"> 21 21 {{ if and .LoggedInUser (eq .LoggedInUser.Did .String.Did) }} 22 22 <a class="btn flex items-center gap-2 no-underline hover:no-underline p-2 group" 23 23 hx-boost="true"
+4 -2
appview/pages/templates/user/followers.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} ยท followers {{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }} ยท followers {{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-followers" class="md:col-span-8 order-2 md:order-2"> ··· 19 19 "FollowersCount" .FollowersCount 20 20 "FollowingCount" .FollowingCount) }} 21 21 {{ else }} 22 - <p class="px-6 dark:text-white">This user does not have any followers yet.</p> 22 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 23 + <span>This user does not have any followers yet.</span> 24 + </div> 23 25 {{ end }} 24 26 </div> 25 27 {{ end }}
+4 -2
appview/pages/templates/user/following.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} ยท following {{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }} ยท following {{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-following" class="md:col-span-8 order-2 md:order-2"> ··· 19 19 "FollowersCount" .FollowersCount 20 20 "FollowingCount" .FollowingCount) }} 21 21 {{ else }} 22 - <p class="px-6 dark:text-white">This user does not follow anyone yet.</p> 22 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 23 + <span>This user does not follow anyone yet.</span> 24 + </div> 23 25 {{ end }} 24 26 </div> 25 27 {{ end }}
+2 -2
appview/pages/templates/user/fragments/followCard.html
··· 6 6 <img class="object-cover rounded-full p-2" src="{{ fullAvatar $userIdent }}" alt="{{ $userIdent }}" /> 7 7 </div> 8 8 9 - <div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full"> 9 + <div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full min-w-0"> 10 10 <div class="flex-1 min-h-0 justify-around flex flex-col"> 11 11 <a href="/{{ $userIdent }}"> 12 12 <span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $userIdent | truncateAt30 }}</span> 13 13 </a> 14 14 {{ with .Profile }} 15 - <p class="text-sm pb-2 md:pb-2">{{.Description}}</p> 15 + <p class="text-sm pb-2 md:pb-2 break-words">{{.Description}}</p> 16 16 {{ end }} 17 17 <div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full"> 18 18 <span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
+1 -1
appview/pages/templates/user/fragments/profileCard.html
··· 1 1 {{ define "user/fragments/profileCard" }} 2 - {{ $userIdent := didOrHandle .UserDid .UserHandle }} 2 + {{ $userIdent := resolve .UserDid }} 3 3 <div class="grid grid-cols-3 md:grid-cols-1 gap-1 items-center"> 4 4 <div id="avatar" class="col-span-1 flex justify-center items-center"> 5 5 <div class="w-3/4 aspect-square relative">
+22 -4
appview/pages/templates/user/overview.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }}{{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }}{{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-repos" class="md:col-span-4 order-2 md:order-2"> ··· 16 16 <p class="text-sm font-bold px-2 pb-4 dark:text-white">ACTIVITY</p> 17 17 <div class="flex flex-col gap-4 relative"> 18 18 {{ if .ProfileTimeline.IsEmpty }} 19 - <p class="dark:text-white">This user does not have any activity yet.</p> 19 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 20 + <span class="flex items-center gap-2"> 21 + This user does not have any activity yet. 22 + </span> 23 + </div> 20 24 {{ end }} 21 25 22 26 {{ with .ProfileTimeline }} ··· 33 37 </p> 34 38 35 39 <div class="flex flex-col gap-1"> 40 + {{ block "commits" .Commits }} {{ end }} 36 41 {{ block "repoEvents" .RepoEvents }} {{ end }} 37 42 {{ block "issueEvents" .IssueEvents }} {{ end }} 38 43 {{ block "pullEvents" .PullEvents }} {{ end }} ··· 43 48 {{ end }} 44 49 {{ end }} 45 50 </div> 51 + {{ end }} 52 + 53 + {{ define "commits" }} 54 + {{ if . }} 55 + <div class="flex flex-wrap items-center gap-1"> 56 + {{ i "git-commit-horizontal" "size-5" }} 57 + created {{ . }} commits 58 + </div> 59 + {{ end }} 46 60 {{ end }} 47 61 48 62 {{ define "repoEvents" }} ··· 224 238 {{ define "ownRepos" }} 225 239 <div> 226 240 <div class="text-sm font-bold px-2 pb-4 dark:text-white flex items-center gap-2"> 227 - <a href="/@{{ or $.Card.UserHandle $.Card.UserDid }}?tab=repos" 241 + <a href="/{{ resolve $.Card.UserDid }}?tab=repos" 228 242 class="flex text-black dark:text-white items-center gap-2 no-underline hover:no-underline group"> 229 243 <span>PINNED REPOS</span> 230 244 </a> ··· 244 258 {{ template "user/fragments/repoCard" (list $ . false) }} 245 259 </div> 246 260 {{ else }} 247 - <p class="dark:text-white">This user does not have any pinned repos.</p> 261 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 262 + <span class="flex items-center gap-2"> 263 + This user does not have any pinned repos. 264 + </span> 265 + </div> 248 266 {{ end }} 249 267 </div> 250 268 </div>
+4 -2
appview/pages/templates/user/repos.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} ยท repos {{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }} ยท repos {{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-repos" class="md:col-span-8 order-2 md:order-2"> ··· 13 13 {{ template "user/fragments/repoCard" (list $ . false) }} 14 14 </div> 15 15 {{ else }} 16 - <p class="px-6 dark:text-white">This user does not have any repos yet.</p> 16 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 17 + <span>This user does not have any repos yet.</span> 18 + </div> 17 19 {{ end }} 18 20 </div> 19 21 {{ end }}
+9 -6
appview/pages/templates/user/signup.html
··· 43 43 page to complete your registration. 44 44 </span> 45 45 <div class="w-full mt-4 text-center"> 46 - <div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}"></div> 46 + <div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}" data-size="flexible"></div> 47 47 </div> 48 48 <button class="btn text-base w-full my-2 mt-6" type="submit" id="signup-button" tabindex="7" > 49 49 <span>join now</span> 50 50 </button> 51 + <p class="text-sm text-gray-500"> 52 + Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>. 53 + </p> 54 + 55 + <p id="signup-msg" class="error w-full"></p> 56 + <p class="text-sm text-gray-500 pt-4"> 57 + By signing up, you agree to our <a href="/terms" class="underline">Terms of Service</a> and <a href="/privacy" class="underline">Privacy Policy</a>. 58 + </p> 51 59 </form> 52 - <p class="text-sm text-gray-500"> 53 - Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>. 54 - </p> 55 - 56 - <p id="signup-msg" class="error w-full"></p> 57 60 </main> 58 61 </body> 59 62 </html>
+4 -2
appview/pages/templates/user/starred.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} ยท repos {{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }} ยท repos {{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-repos" class="md:col-span-8 order-2 md:order-2"> ··· 13 13 {{ template "user/fragments/repoCard" (list $ . true) }} 14 14 </div> 15 15 {{ else }} 16 - <p class="px-6 dark:text-white">This user does not have any starred repos yet.</p> 16 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 17 + <span>This user does not have any starred repos yet.</span> 18 + </div> 17 19 {{ end }} 18 20 </div> 19 21 {{ end }}
+5 -3
appview/pages/templates/user/strings.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} ยท strings {{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }} ยท strings {{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-strings" class="md:col-span-8 order-2 md:order-2"> ··· 13 13 {{ template "singleString" (list $ .) }} 14 14 </div> 15 15 {{ else }} 16 - <p class="px-6 dark:text-white">This user does not have any strings yet.</p> 16 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 17 + <span>This user does not have any strings yet.</span> 18 + </div> 17 19 {{ end }} 18 20 </div> 19 21 {{ end }} ··· 23 25 {{ $s := index . 1 }} 24 26 <div class="py-4 px-6 rounded bg-white dark:bg-gray-800"> 25 27 <div class="font-medium dark:text-white flex gap-2 items-center"> 26 - <a href="/strings/{{ or $root.Card.UserHandle $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a> 28 + <a href="/strings/{{ resolve $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a> 27 29 </div> 28 30 {{ with $s.Description }} 29 31 <div class="text-gray-600 dark:text-gray-300 text-sm">
+16 -22
appview/pipelines/pipelines.go
··· 16 16 "tangled.org/core/appview/reporesolver" 17 17 "tangled.org/core/eventconsumer" 18 18 "tangled.org/core/idresolver" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/rbac" 20 21 spindlemodel "tangled.org/core/spindle/models" 21 22 ··· 78 79 return 79 80 } 80 81 81 - repoInfo := f.RepoInfo(user) 82 - 83 82 ps, err := db.GetPipelineStatuses( 84 83 p.db, 85 84 30, 86 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 87 - db.FilterEq("repo_name", repoInfo.Name), 88 - db.FilterEq("knot", repoInfo.Knot), 85 + orm.FilterEq("repo_owner", f.Did), 86 + orm.FilterEq("repo_name", f.Name), 87 + orm.FilterEq("knot", f.Knot), 89 88 ) 90 89 if err != nil { 91 90 l.Error("failed to query db", "err", err) ··· 94 93 95 94 p.pages.Pipelines(w, pages.PipelinesParams{ 96 95 LoggedInUser: user, 97 - RepoInfo: repoInfo, 96 + RepoInfo: p.repoResolver.GetRepoInfo(r, user), 98 97 Pipelines: ps, 99 98 }) 100 99 } ··· 108 107 l.Error("failed to get repo and knot", "err", err) 109 108 return 110 109 } 111 - 112 - repoInfo := f.RepoInfo(user) 113 110 114 111 pipelineId := chi.URLParam(r, "pipeline") 115 112 if pipelineId == "" { ··· 126 123 ps, err := db.GetPipelineStatuses( 127 124 p.db, 128 125 1, 129 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 130 - db.FilterEq("repo_name", repoInfo.Name), 131 - db.FilterEq("knot", repoInfo.Knot), 132 - db.FilterEq("id", pipelineId), 126 + orm.FilterEq("repo_owner", f.Did), 127 + orm.FilterEq("repo_name", f.Name), 128 + orm.FilterEq("knot", f.Knot), 129 + orm.FilterEq("id", pipelineId), 133 130 ) 134 131 if err != nil { 135 132 l.Error("failed to query db", "err", err) ··· 145 142 146 143 p.pages.Workflow(w, pages.WorkflowParams{ 147 144 LoggedInUser: user, 148 - RepoInfo: repoInfo, 145 + RepoInfo: p.repoResolver.GetRepoInfo(r, user), 149 146 Pipeline: singlePipeline, 150 147 Workflow: workflow, 151 148 }) ··· 176 173 ctx, cancel := context.WithCancel(r.Context()) 177 174 defer cancel() 178 175 179 - user := p.oauth.GetUser(r) 180 176 f, err := p.repoResolver.Resolve(r) 181 177 if err != nil { 182 178 l.Error("failed to get repo and knot", "err", err) ··· 184 180 return 185 181 } 186 182 187 - repoInfo := f.RepoInfo(user) 188 - 189 183 pipelineId := chi.URLParam(r, "pipeline") 190 184 workflow := chi.URLParam(r, "workflow") 191 185 if pipelineId == "" || workflow == "" { ··· 196 190 ps, err := db.GetPipelineStatuses( 197 191 p.db, 198 192 1, 199 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 200 - db.FilterEq("repo_name", repoInfo.Name), 201 - db.FilterEq("knot", repoInfo.Knot), 202 - db.FilterEq("id", pipelineId), 193 + orm.FilterEq("repo_owner", f.Did), 194 + orm.FilterEq("repo_name", f.Name), 195 + orm.FilterEq("knot", f.Knot), 196 + orm.FilterEq("id", pipelineId), 203 197 ) 204 198 if err != nil || len(ps) != 1 { 205 199 l.Error("pipeline query failed", "err", err, "count", len(ps)) ··· 208 202 } 209 203 210 204 singlePipeline := ps[0] 211 - spindle := repoInfo.Spindle 212 - knot := repoInfo.Knot 205 + spindle := f.Spindle 206 + knot := f.Knot 213 207 rkey := singlePipeline.Rkey 214 208 215 209 if spindle == "" || knot == "" || rkey == "" {
+3 -2
appview/pulls/opengraph.go
··· 13 13 "tangled.org/core/appview/db" 14 14 "tangled.org/core/appview/models" 15 15 "tangled.org/core/appview/ogcard" 16 + "tangled.org/core/orm" 16 17 "tangled.org/core/patchutil" 17 18 "tangled.org/core/types" 18 19 ) ··· 276 277 } 277 278 278 279 // Get comment count from database 279 - comments, err := db.GetPullComments(s.db, db.FilterEq("pull_id", pull.ID)) 280 + comments, err := db.GetComments(s.db, orm.FilterEq("subject_at", pull.AtUri())) 280 281 if err != nil { 281 282 log.Printf("failed to get pull comments: %v", err) 282 283 } ··· 293 294 filesChanged = niceDiff.Stat.FilesChanged 294 295 } 295 296 296 - card, err := s.drawPullSummaryCard(pull, &f.Repo, commentCount, diffStats, filesChanged) 297 + card, err := s.drawPullSummaryCard(pull, f, commentCount, diffStats, filesChanged) 297 298 if err != nil { 298 299 log.Println("failed to draw pull summary card", err) 299 300 http.Error(w, "failed to draw pull summary card", http.StatusInternalServerError)
+214 -197
appview/pulls/pulls.go
··· 1 1 package pulls 2 2 3 3 import ( 4 + "context" 4 5 "database/sql" 5 6 "encoding/json" 6 7 "errors" ··· 18 19 "tangled.org/core/appview/config" 19 20 "tangled.org/core/appview/db" 20 21 pulls_indexer "tangled.org/core/appview/indexer/pulls" 22 + "tangled.org/core/appview/mentions" 21 23 "tangled.org/core/appview/models" 22 24 "tangled.org/core/appview/notify" 23 25 "tangled.org/core/appview/oauth" 24 26 "tangled.org/core/appview/pages" 25 27 "tangled.org/core/appview/pages/markup" 28 + "tangled.org/core/appview/pages/repoinfo" 26 29 "tangled.org/core/appview/reporesolver" 27 30 "tangled.org/core/appview/validator" 28 31 "tangled.org/core/appview/xrpcclient" 29 32 "tangled.org/core/idresolver" 33 + "tangled.org/core/orm" 30 34 "tangled.org/core/patchutil" 31 35 "tangled.org/core/rbac" 32 36 "tangled.org/core/tid" ··· 41 45 ) 42 46 43 47 type Pulls struct { 44 - oauth *oauth.OAuth 45 - repoResolver *reporesolver.RepoResolver 46 - pages *pages.Pages 47 - idResolver *idresolver.Resolver 48 - db *db.DB 49 - config *config.Config 50 - notifier notify.Notifier 51 - enforcer *rbac.Enforcer 52 - logger *slog.Logger 53 - validator *validator.Validator 54 - indexer *pulls_indexer.Indexer 48 + oauth *oauth.OAuth 49 + repoResolver *reporesolver.RepoResolver 50 + pages *pages.Pages 51 + idResolver *idresolver.Resolver 52 + mentionsResolver *mentions.Resolver 53 + db *db.DB 54 + config *config.Config 55 + notifier notify.Notifier 56 + enforcer *rbac.Enforcer 57 + logger *slog.Logger 58 + validator *validator.Validator 59 + indexer *pulls_indexer.Indexer 55 60 } 56 61 57 62 func New( ··· 59 64 repoResolver *reporesolver.RepoResolver, 60 65 pages *pages.Pages, 61 66 resolver *idresolver.Resolver, 67 + mentionsResolver *mentions.Resolver, 62 68 db *db.DB, 63 69 config *config.Config, 64 70 notifier notify.Notifier, ··· 68 74 logger *slog.Logger, 69 75 ) *Pulls { 70 76 return &Pulls{ 71 - oauth: oauth, 72 - repoResolver: repoResolver, 73 - pages: pages, 74 - idResolver: resolver, 75 - db: db, 76 - config: config, 77 - notifier: notifier, 78 - enforcer: enforcer, 79 - logger: logger, 80 - validator: validator, 81 - indexer: indexer, 77 + oauth: oauth, 78 + repoResolver: repoResolver, 79 + pages: pages, 80 + idResolver: resolver, 81 + mentionsResolver: mentionsResolver, 82 + db: db, 83 + config: config, 84 + notifier: notifier, 85 + enforcer: enforcer, 86 + logger: logger, 87 + validator: validator, 88 + indexer: indexer, 82 89 } 83 90 } 84 91 ··· 123 130 124 131 s.pages.PullActionsFragment(w, pages.PullActionsParams{ 125 132 LoggedInUser: user, 126 - RepoInfo: f.RepoInfo(user), 133 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 127 134 Pull: pull, 128 135 RoundNumber: roundNumber, 129 136 MergeCheck: mergeCheckResponse, ··· 150 157 return 151 158 } 152 159 160 + backlinks, err := db.GetBacklinks(s.db, pull.AtUri()) 161 + if err != nil { 162 + log.Println("failed to get pull backlinks", err) 163 + s.pages.Notice(w, "pull-error", "Failed to get pull. Try again later.") 164 + return 165 + } 166 + 153 167 // can be nil if this pull is not stacked 154 168 stack, _ := r.Context().Value("stack").(models.Stack) 155 169 abandonedPulls, _ := r.Context().Value("abandonedPulls").([]*models.Pull) ··· 160 174 if user != nil && user.Did == pull.OwnerDid { 161 175 resubmitResult = s.resubmitCheck(r, f, pull, stack) 162 176 } 163 - 164 - repoInfo := f.RepoInfo(user) 165 177 166 178 m := make(map[string]models.Pipeline) 167 179 ··· 179 191 ps, err := db.GetPipelineStatuses( 180 192 s.db, 181 193 len(shas), 182 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 183 - db.FilterEq("repo_name", repoInfo.Name), 184 - db.FilterEq("knot", repoInfo.Knot), 185 - db.FilterIn("sha", shas), 194 + orm.FilterEq("repo_owner", f.Did), 195 + orm.FilterEq("repo_name", f.Name), 196 + orm.FilterEq("knot", f.Knot), 197 + orm.FilterIn("sha", shas), 186 198 ) 187 199 if err != nil { 188 200 log.Printf("failed to fetch pipeline statuses: %s", err) ··· 206 218 207 219 labelDefs, err := db.GetLabelDefinitions( 208 220 s.db, 209 - db.FilterIn("at_uri", f.Repo.Labels), 210 - db.FilterContains("scope", tangled.RepoPullNSID), 221 + orm.FilterIn("at_uri", f.Labels), 222 + orm.FilterContains("scope", tangled.RepoPullNSID), 211 223 ) 212 224 if err != nil { 213 225 log.Println("failed to fetch labels", err) ··· 222 234 223 235 s.pages.RepoSinglePull(w, pages.RepoSinglePullParams{ 224 236 LoggedInUser: user, 225 - RepoInfo: repoInfo, 237 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 226 238 Pull: pull, 227 239 Stack: stack, 228 240 AbandonedPulls: abandonedPulls, 241 + Backlinks: backlinks, 229 242 BranchDeleteStatus: branchDeleteStatus, 230 243 MergeCheck: mergeCheckResponse, 231 244 ResubmitCheck: resubmitResult, ··· 239 252 }) 240 253 } 241 254 242 - func (s *Pulls) mergeCheck(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull, stack models.Stack) types.MergeCheckResponse { 255 + func (s *Pulls) mergeCheck(r *http.Request, f *models.Repo, pull *models.Pull, stack models.Stack) types.MergeCheckResponse { 243 256 if pull.State == models.PullMerged { 244 257 return types.MergeCheckResponse{} 245 258 } ··· 268 281 r.Context(), 269 282 &xrpcc, 270 283 &tangled.RepoMergeCheck_Input{ 271 - Did: f.OwnerDid(), 284 + Did: f.Did, 272 285 Name: f.Name, 273 286 Branch: pull.TargetBranch, 274 287 Patch: patch, ··· 306 319 return result 307 320 } 308 321 309 - func (s *Pulls) branchDeleteStatus(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull) *models.BranchDeleteStatus { 322 + func (s *Pulls) branchDeleteStatus(r *http.Request, repo *models.Repo, pull *models.Pull) *models.BranchDeleteStatus { 310 323 if pull.State != models.PullMerged { 311 324 return nil 312 325 } ··· 317 330 } 318 331 319 332 var branch string 320 - var repo *models.Repo 321 333 // check if the branch exists 322 334 // NOTE: appview could cache branches/tags etc. for every repo by listening for gitRefUpdates 323 335 if pull.IsBranchBased() { 324 336 branch = pull.PullSource.Branch 325 - repo = &f.Repo 326 337 } else if pull.IsForkBased() { 327 338 branch = pull.PullSource.Branch 328 339 repo = pull.PullSource.Repo ··· 361 372 } 362 373 } 363 374 364 - func (s *Pulls) resubmitCheck(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull, stack models.Stack) pages.ResubmitResult { 375 + func (s *Pulls) resubmitCheck(r *http.Request, repo *models.Repo, pull *models.Pull, stack models.Stack) pages.ResubmitResult { 365 376 if pull.State == models.PullMerged || pull.State == models.PullDeleted || pull.PullSource == nil { 366 377 return pages.Unknown 367 378 } ··· 381 392 repoName = sourceRepo.Name 382 393 } else { 383 394 // pulls within the same repo 384 - knot = f.Knot 385 - ownerDid = f.OwnerDid() 386 - repoName = f.Name 395 + knot = repo.Knot 396 + ownerDid = repo.Did 397 + repoName = repo.Name 387 398 } 388 399 389 400 scheme := "http" ··· 395 406 Host: host, 396 407 } 397 408 398 - repo := fmt.Sprintf("%s/%s", ownerDid, repoName) 399 - branchResp, err := tangled.RepoBranch(r.Context(), xrpcc, pull.PullSource.Branch, repo) 409 + didSlashName := fmt.Sprintf("%s/%s", ownerDid, repoName) 410 + branchResp, err := tangled.RepoBranch(r.Context(), xrpcc, pull.PullSource.Branch, didSlashName) 400 411 if err != nil { 401 412 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 402 413 log.Println("failed to call XRPC repo.branches", xrpcerr) ··· 424 435 425 436 func (s *Pulls) RepoPullPatch(w http.ResponseWriter, r *http.Request) { 426 437 user := s.oauth.GetUser(r) 427 - f, err := s.repoResolver.Resolve(r) 428 - if err != nil { 429 - log.Println("failed to get repo and knot", err) 430 - return 431 - } 432 438 433 439 var diffOpts types.DiffOpts 434 440 if d := r.URL.Query().Get("diff"); d == "split" { ··· 457 463 458 464 s.pages.RepoPullPatchPage(w, pages.RepoPullPatchParams{ 459 465 LoggedInUser: user, 460 - RepoInfo: f.RepoInfo(user), 466 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 461 467 Pull: pull, 462 468 Stack: stack, 463 469 Round: roundIdInt, ··· 470 476 471 477 func (s *Pulls) RepoPullInterdiff(w http.ResponseWriter, r *http.Request) { 472 478 user := s.oauth.GetUser(r) 473 - 474 - f, err := s.repoResolver.Resolve(r) 475 - if err != nil { 476 - log.Println("failed to get repo and knot", err) 477 - return 478 - } 479 479 480 480 var diffOpts types.DiffOpts 481 481 if d := r.URL.Query().Get("diff"); d == "split" { ··· 521 521 522 522 s.pages.RepoPullInterdiffPage(w, pages.RepoPullInterdiffParams{ 523 523 LoggedInUser: s.oauth.GetUser(r), 524 - RepoInfo: f.RepoInfo(user), 524 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 525 525 Pull: pull, 526 526 Round: roundIdInt, 527 527 Interdiff: interdiff, ··· 598 598 599 599 pulls, err := db.GetPulls( 600 600 s.db, 601 - db.FilterIn("id", ids), 601 + orm.FilterIn("id", ids), 602 602 ) 603 603 if err != nil { 604 604 log.Println("failed to get pulls", err) ··· 646 646 } 647 647 pulls = pulls[:n] 648 648 649 - repoInfo := f.RepoInfo(user) 650 649 ps, err := db.GetPipelineStatuses( 651 650 s.db, 652 651 len(shas), 653 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 654 - db.FilterEq("repo_name", repoInfo.Name), 655 - db.FilterEq("knot", repoInfo.Knot), 656 - db.FilterIn("sha", shas), 652 + orm.FilterEq("repo_owner", f.Did), 653 + orm.FilterEq("repo_name", f.Name), 654 + orm.FilterEq("knot", f.Knot), 655 + orm.FilterIn("sha", shas), 657 656 ) 658 657 if err != nil { 659 658 log.Printf("failed to fetch pipeline statuses: %s", err) ··· 666 665 667 666 labelDefs, err := db.GetLabelDefinitions( 668 667 s.db, 669 - db.FilterIn("at_uri", f.Repo.Labels), 670 - db.FilterContains("scope", tangled.RepoPullNSID), 668 + orm.FilterIn("at_uri", f.Labels), 669 + orm.FilterContains("scope", tangled.RepoPullNSID), 671 670 ) 672 671 if err != nil { 673 672 log.Println("failed to fetch labels", err) ··· 682 681 683 682 s.pages.RepoPulls(w, pages.RepoPullsParams{ 684 683 LoggedInUser: s.oauth.GetUser(r), 685 - RepoInfo: f.RepoInfo(user), 684 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 686 685 Pulls: pulls, 687 686 LabelDefs: defs, 688 687 FilteringBy: state, ··· 693 692 } 694 693 695 694 func (s *Pulls) PullComment(w http.ResponseWriter, r *http.Request) { 696 - l := s.logger.With("handler", "PullComment") 697 695 user := s.oauth.GetUser(r) 698 696 f, err := s.repoResolver.Resolve(r) 699 697 if err != nil { ··· 720 718 case http.MethodGet: 721 719 s.pages.PullNewCommentFragment(w, pages.PullNewCommentParams{ 722 720 LoggedInUser: user, 723 - RepoInfo: f.RepoInfo(user), 721 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 724 722 Pull: pull, 725 723 RoundNumber: roundNumber, 726 724 }) ··· 732 730 return 733 731 } 734 732 733 + mentions, references := s.mentionsResolver.Resolve(r.Context(), body) 734 + 735 735 // Start a transaction 736 736 tx, err := s.db.BeginTx(r.Context(), nil) 737 737 if err != nil { ··· 741 741 } 742 742 defer tx.Rollback() 743 743 744 - createdAt := time.Now().Format(time.RFC3339) 744 + comment := models.Comment{ 745 + Did: syntax.DID(user.Did), 746 + Rkey: tid.TID(), 747 + Subject: pull.AtUri(), 748 + ReplyTo: nil, 749 + Body: body, 750 + Created: time.Now(), 751 + Mentions: mentions, 752 + References: references, 753 + PullSubmissionId: &pull.Submissions[roundNumber].ID, 754 + } 755 + if err = comment.Validate(); err != nil { 756 + log.Println("failed to validate comment", err) 757 + s.pages.Notice(w, "pull-comment", "Failed to create comment.") 758 + return 759 + } 760 + record := comment.AsRecord() 745 761 746 762 client, err := s.oauth.AuthorizedClient(r) 747 763 if err != nil { ··· 749 765 s.pages.Notice(w, "pull-comment", "Failed to create comment.") 750 766 return 751 767 } 752 - atResp, err := comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 753 - Collection: tangled.RepoPullCommentNSID, 768 + _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 769 + Collection: tangled.CommentNSID, 754 770 Repo: user.Did, 755 - Rkey: tid.TID(), 771 + Rkey: comment.Rkey, 756 772 Record: &lexutil.LexiconTypeDecoder{ 757 - Val: &tangled.RepoPullComment{ 758 - Pull: pull.AtUri().String(), 759 - Body: body, 760 - CreatedAt: createdAt, 761 - }, 773 + Val: &record, 762 774 }, 763 775 }) 764 776 if err != nil { ··· 767 779 return 768 780 } 769 781 770 - comment := &models.PullComment{ 771 - OwnerDid: user.Did, 772 - RepoAt: f.RepoAt().String(), 773 - PullId: pull.PullId, 774 - Body: body, 775 - CommentAt: atResp.Uri, 776 - SubmissionId: pull.Submissions[roundNumber].ID, 777 - } 778 - 779 782 // Create the pull comment in the database with the commentAt field 780 - commentId, err := db.NewPullComment(tx, comment) 783 + err = db.PutComment(tx, &comment) 781 784 if err != nil { 782 785 log.Println("failed to create pull comment", err) 783 786 s.pages.Notice(w, "pull-comment", "Failed to create comment.") ··· 791 794 return 792 795 } 793 796 794 - rawMentions := markup.FindUserMentions(comment.Body) 795 - idents := s.idResolver.ResolveIdents(r.Context(), rawMentions) 796 - l.Debug("parsed mentions", "raw", rawMentions, "idents", idents) 797 - var mentions []syntax.DID 798 - for _, ident := range idents { 799 - if ident != nil && !ident.Handle.IsInvalidHandle() { 800 - mentions = append(mentions, ident.DID) 801 - } 802 - } 803 - s.notifier.NewPullComment(r.Context(), comment, mentions) 797 + s.notifier.NewComment(r.Context(), &comment) 804 798 805 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", f.OwnerSlashRepo(), pull.PullId, commentId)) 799 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 800 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", ownerSlashRepo, pull.PullId, comment.Id)) 806 801 return 807 802 } 808 803 } ··· 826 821 Host: host, 827 822 } 828 823 829 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 824 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 830 825 xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 831 826 if err != nil { 832 827 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { ··· 853 848 854 849 s.pages.RepoNewPull(w, pages.RepoNewPullParams{ 855 850 LoggedInUser: user, 856 - RepoInfo: f.RepoInfo(user), 851 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 857 852 Branches: result.Branches, 858 853 Strategy: strategy, 859 854 SourceBranch: sourceBranch, ··· 876 871 } 877 872 878 873 // Determine PR type based on input parameters 879 - isPushAllowed := f.RepoInfo(user).Roles.IsPushAllowed() 874 + roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 875 + isPushAllowed := roles.IsPushAllowed() 880 876 isBranchBased := isPushAllowed && sourceBranch != "" && fromFork == "" 881 877 isForkBased := fromFork != "" && sourceBranch != "" 882 878 isPatchBased := patch != "" && !isBranchBased && !isForkBased ··· 974 970 func (s *Pulls) handleBranchBasedPull( 975 971 w http.ResponseWriter, 976 972 r *http.Request, 977 - f *reporesolver.ResolvedRepo, 973 + repo *models.Repo, 978 974 user *oauth.User, 979 975 title, 980 976 body, ··· 986 982 if !s.config.Core.Dev { 987 983 scheme = "https" 988 984 } 989 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 985 + host := fmt.Sprintf("%s://%s", scheme, repo.Knot) 990 986 xrpcc := &indigoxrpc.Client{ 991 987 Host: host, 992 988 } 993 989 994 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 995 - xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, repo, targetBranch, sourceBranch) 990 + didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name) 991 + xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, didSlashRepo, targetBranch, sourceBranch) 996 992 if err != nil { 997 993 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 998 994 log.Println("failed to call XRPC repo.compare", xrpcerr) ··· 1029 1025 Sha: comparison.Rev2, 1030 1026 } 1031 1027 1032 - s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked) 1028 + s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked) 1033 1029 } 1034 1030 1035 - func (s *Pulls) handlePatchBasedPull(w http.ResponseWriter, r *http.Request, f *reporesolver.ResolvedRepo, user *oauth.User, title, body, targetBranch, patch string, isStacked bool) { 1031 + func (s *Pulls) handlePatchBasedPull(w http.ResponseWriter, r *http.Request, repo *models.Repo, user *oauth.User, title, body, targetBranch, patch string, isStacked bool) { 1036 1032 if err := s.validator.ValidatePatch(&patch); err != nil { 1037 1033 s.logger.Error("patch validation failed", "err", err) 1038 1034 s.pages.Notice(w, "pull", "Invalid patch format. Please provide a valid diff.") 1039 1035 return 1040 1036 } 1041 1037 1042 - s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, "", "", nil, nil, isStacked) 1038 + s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, "", "", nil, nil, isStacked) 1043 1039 } 1044 1040 1045 - func (s *Pulls) handleForkBasedPull(w http.ResponseWriter, r *http.Request, f *reporesolver.ResolvedRepo, user *oauth.User, forkRepo string, title, body, targetBranch, sourceBranch string, isStacked bool) { 1041 + func (s *Pulls) handleForkBasedPull(w http.ResponseWriter, r *http.Request, repo *models.Repo, user *oauth.User, forkRepo string, title, body, targetBranch, sourceBranch string, isStacked bool) { 1046 1042 repoString := strings.SplitN(forkRepo, "/", 2) 1047 1043 forkOwnerDid := repoString[0] 1048 1044 repoName := repoString[1] ··· 1144 1140 Sha: sourceRev, 1145 1141 } 1146 1142 1147 - s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked) 1143 + s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked) 1148 1144 } 1149 1145 1150 1146 func (s *Pulls) createPullRequest( 1151 1147 w http.ResponseWriter, 1152 1148 r *http.Request, 1153 - f *reporesolver.ResolvedRepo, 1149 + repo *models.Repo, 1154 1150 user *oauth.User, 1155 1151 title, body, targetBranch string, 1156 1152 patch string, ··· 1165 1161 s.createStackedPullRequest( 1166 1162 w, 1167 1163 r, 1168 - f, 1164 + repo, 1169 1165 user, 1170 1166 targetBranch, 1171 1167 patch, ··· 1211 1207 } 1212 1208 } 1213 1209 1210 + mentions, references := s.mentionsResolver.Resolve(r.Context(), body) 1211 + 1214 1212 rkey := tid.TID() 1215 1213 initialSubmission := models.PullSubmission{ 1216 1214 Patch: patch, ··· 1222 1220 Body: body, 1223 1221 TargetBranch: targetBranch, 1224 1222 OwnerDid: user.Did, 1225 - RepoAt: f.RepoAt(), 1223 + RepoAt: repo.RepoAt(), 1226 1224 Rkey: rkey, 1225 + Mentions: mentions, 1226 + References: references, 1227 1227 Submissions: []*models.PullSubmission{ 1228 1228 &initialSubmission, 1229 1229 }, ··· 1235 1235 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1236 1236 return 1237 1237 } 1238 - pullId, err := db.NextPullId(tx, f.RepoAt()) 1238 + pullId, err := db.NextPullId(tx, repo.RepoAt()) 1239 1239 if err != nil { 1240 1240 log.Println("failed to get pull id", err) 1241 1241 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1242 1242 return 1243 1243 } 1244 1244 1245 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 1246 + if err != nil { 1247 + log.Println("failed to upload patch", err) 1248 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1249 + return 1250 + } 1251 + 1245 1252 _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 1246 1253 Collection: tangled.RepoPullNSID, 1247 1254 Repo: user.Did, ··· 1250 1257 Val: &tangled.RepoPull{ 1251 1258 Title: title, 1252 1259 Target: &tangled.RepoPull_Target{ 1253 - Repo: string(f.RepoAt()), 1260 + Repo: string(repo.RepoAt()), 1254 1261 Branch: targetBranch, 1255 1262 }, 1256 - Patch: patch, 1263 + PatchBlob: blob.Blob, 1257 1264 Source: recordPullSource, 1258 1265 CreatedAt: time.Now().Format(time.RFC3339), 1259 1266 }, ··· 1273 1280 1274 1281 s.notifier.NewPull(r.Context(), pull) 1275 1282 1276 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pullId)) 1283 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo) 1284 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pullId)) 1277 1285 } 1278 1286 1279 1287 func (s *Pulls) createStackedPullRequest( 1280 1288 w http.ResponseWriter, 1281 1289 r *http.Request, 1282 - f *reporesolver.ResolvedRepo, 1290 + repo *models.Repo, 1283 1291 user *oauth.User, 1284 1292 targetBranch string, 1285 1293 patch string, ··· 1311 1319 1312 1320 // build a stack out of this patch 1313 1321 stackId := uuid.New() 1314 - stack, err := newStack(f, user, targetBranch, patch, pullSource, stackId.String()) 1322 + stack, err := s.newStack(r.Context(), repo, user, targetBranch, patch, pullSource, stackId.String()) 1315 1323 if err != nil { 1316 1324 log.Println("failed to create stack", err) 1317 1325 s.pages.Notice(w, "pull", fmt.Sprintf("Failed to create stack: %v", err)) ··· 1328 1336 // apply all record creations at once 1329 1337 var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem 1330 1338 for _, p := range stack { 1339 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(p.LatestPatch())) 1340 + if err != nil { 1341 + log.Println("failed to upload patch blob", err) 1342 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1343 + return 1344 + } 1345 + 1331 1346 record := p.AsRecord() 1332 - write := comatproto.RepoApplyWrites_Input_Writes_Elem{ 1347 + record.PatchBlob = blob.Blob 1348 + writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 1333 1349 RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{ 1334 1350 Collection: tangled.RepoPullNSID, 1335 1351 Rkey: &p.Rkey, ··· 1337 1353 Val: &record, 1338 1354 }, 1339 1355 }, 1340 - } 1341 - writes = append(writes, &write) 1356 + }) 1342 1357 } 1343 1358 _, err = comatproto.RepoApplyWrites(r.Context(), client, &comatproto.RepoApplyWrites_Input{ 1344 1359 Repo: user.Did, ··· 1366 1381 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1367 1382 return 1368 1383 } 1384 + 1369 1385 } 1370 1386 1371 1387 if err = tx.Commit(); err != nil { ··· 1374 1390 return 1375 1391 } 1376 1392 1377 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls", f.OwnerSlashRepo())) 1393 + // notify about each pull 1394 + // 1395 + // this is performed after tx.Commit, because it could result in a locked DB otherwise 1396 + for _, p := range stack { 1397 + s.notifier.NewPull(r.Context(), p) 1398 + } 1399 + 1400 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo) 1401 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls", ownerSlashRepo)) 1378 1402 } 1379 1403 1380 1404 func (s *Pulls) ValidatePatch(w http.ResponseWriter, r *http.Request) { ··· 1405 1429 1406 1430 func (s *Pulls) PatchUploadFragment(w http.ResponseWriter, r *http.Request) { 1407 1431 user := s.oauth.GetUser(r) 1408 - f, err := s.repoResolver.Resolve(r) 1409 - if err != nil { 1410 - log.Println("failed to get repo and knot", err) 1411 - return 1412 - } 1413 1432 1414 1433 s.pages.PullPatchUploadFragment(w, pages.PullPatchUploadParams{ 1415 - RepoInfo: f.RepoInfo(user), 1434 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 1416 1435 }) 1417 1436 } 1418 1437 ··· 1433 1452 Host: host, 1434 1453 } 1435 1454 1436 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 1455 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 1437 1456 xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 1438 1457 if err != nil { 1439 1458 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { ··· 1466 1485 } 1467 1486 1468 1487 s.pages.PullCompareBranchesFragment(w, pages.PullCompareBranchesParams{ 1469 - RepoInfo: f.RepoInfo(user), 1488 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 1470 1489 Branches: withoutDefault, 1471 1490 }) 1472 1491 } 1473 1492 1474 1493 func (s *Pulls) CompareForksFragment(w http.ResponseWriter, r *http.Request) { 1475 1494 user := s.oauth.GetUser(r) 1476 - f, err := s.repoResolver.Resolve(r) 1477 - if err != nil { 1478 - log.Println("failed to get repo and knot", err) 1479 - return 1480 - } 1481 1495 1482 1496 forks, err := db.GetForksByDid(s.db, user.Did) 1483 1497 if err != nil { ··· 1486 1500 } 1487 1501 1488 1502 s.pages.PullCompareForkFragment(w, pages.PullCompareForkParams{ 1489 - RepoInfo: f.RepoInfo(user), 1503 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 1490 1504 Forks: forks, 1491 1505 Selected: r.URL.Query().Get("fork"), 1492 1506 }) ··· 1508 1522 // fork repo 1509 1523 repo, err := db.GetRepo( 1510 1524 s.db, 1511 - db.FilterEq("did", forkOwnerDid), 1512 - db.FilterEq("name", forkName), 1525 + orm.FilterEq("did", forkOwnerDid), 1526 + orm.FilterEq("name", forkName), 1513 1527 ) 1514 1528 if err != nil { 1515 1529 log.Println("failed to get repo", "did", forkOwnerDid, "name", forkName, "err", err) ··· 1554 1568 Host: targetHost, 1555 1569 } 1556 1570 1557 - targetRepo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 1571 + targetRepo := fmt.Sprintf("%s/%s", f.Did, f.Name) 1558 1572 targetXrpcBytes, err := tangled.RepoBranches(r.Context(), targetXrpcc, "", 0, targetRepo) 1559 1573 if err != nil { 1560 1574 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { ··· 1579 1593 }) 1580 1594 1581 1595 s.pages.PullCompareForkBranchesFragment(w, pages.PullCompareForkBranchesParams{ 1582 - RepoInfo: f.RepoInfo(user), 1596 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 1583 1597 SourceBranches: sourceBranches.Branches, 1584 1598 TargetBranches: targetBranches.Branches, 1585 1599 }) ··· 1587 1601 1588 1602 func (s *Pulls) ResubmitPull(w http.ResponseWriter, r *http.Request) { 1589 1603 user := s.oauth.GetUser(r) 1590 - f, err := s.repoResolver.Resolve(r) 1591 - if err != nil { 1592 - log.Println("failed to get repo and knot", err) 1593 - return 1594 - } 1595 1604 1596 1605 pull, ok := r.Context().Value("pull").(*models.Pull) 1597 1606 if !ok { ··· 1603 1612 switch r.Method { 1604 1613 case http.MethodGet: 1605 1614 s.pages.PullResubmitFragment(w, pages.PullResubmitParams{ 1606 - RepoInfo: f.RepoInfo(user), 1615 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 1607 1616 Pull: pull, 1608 1617 }) 1609 1618 return ··· 1670 1679 return 1671 1680 } 1672 1681 1673 - if !f.RepoInfo(user).Roles.IsPushAllowed() { 1682 + roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 1683 + if !roles.IsPushAllowed() { 1674 1684 log.Println("unauthorized user") 1675 1685 w.WriteHeader(http.StatusUnauthorized) 1676 1686 return ··· 1685 1695 Host: host, 1686 1696 } 1687 1697 1688 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 1698 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 1689 1699 xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, repo, pull.TargetBranch, pull.PullSource.Branch) 1690 1700 if err != nil { 1691 1701 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { ··· 1812 1822 func (s *Pulls) resubmitPullHelper( 1813 1823 w http.ResponseWriter, 1814 1824 r *http.Request, 1815 - f *reporesolver.ResolvedRepo, 1825 + repo *models.Repo, 1816 1826 user *oauth.User, 1817 1827 pull *models.Pull, 1818 1828 patch string, ··· 1821 1831 ) { 1822 1832 if pull.IsStacked() { 1823 1833 log.Println("resubmitting stacked PR") 1824 - s.resubmitStackedPullHelper(w, r, f, user, pull, patch, pull.StackId) 1834 + s.resubmitStackedPullHelper(w, r, repo, user, pull, patch, pull.StackId) 1825 1835 return 1826 1836 } 1827 1837 ··· 1876 1886 return 1877 1887 } 1878 1888 1879 - var recordPullSource *tangled.RepoPull_Source 1880 - if pull.IsBranchBased() { 1881 - recordPullSource = &tangled.RepoPull_Source{ 1882 - Branch: pull.PullSource.Branch, 1883 - Sha: sourceRev, 1884 - } 1889 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 1890 + if err != nil { 1891 + log.Println("failed to upload patch blob", err) 1892 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 1893 + return 1885 1894 } 1886 - if pull.IsForkBased() { 1887 - repoAt := pull.PullSource.RepoAt.String() 1888 - recordPullSource = &tangled.RepoPull_Source{ 1889 - Branch: pull.PullSource.Branch, 1890 - Repo: &repoAt, 1891 - Sha: sourceRev, 1892 - } 1893 - } 1895 + record := pull.AsRecord() 1896 + record.PatchBlob = blob.Blob 1897 + record.CreatedAt = time.Now().Format(time.RFC3339) 1894 1898 1895 1899 _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 1896 1900 Collection: tangled.RepoPullNSID, ··· 1898 1902 Rkey: pull.Rkey, 1899 1903 SwapRecord: ex.Cid, 1900 1904 Record: &lexutil.LexiconTypeDecoder{ 1901 - Val: &tangled.RepoPull{ 1902 - Title: pull.Title, 1903 - Target: &tangled.RepoPull_Target{ 1904 - Repo: string(f.RepoAt()), 1905 - Branch: pull.TargetBranch, 1906 - }, 1907 - Patch: patch, // new patch 1908 - Source: recordPullSource, 1909 - CreatedAt: time.Now().Format(time.RFC3339), 1910 - }, 1905 + Val: &record, 1911 1906 }, 1912 1907 }) 1913 1908 if err != nil { ··· 1922 1917 return 1923 1918 } 1924 1919 1925 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId)) 1920 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo) 1921 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 1926 1922 } 1927 1923 1928 1924 func (s *Pulls) resubmitStackedPullHelper( 1929 1925 w http.ResponseWriter, 1930 1926 r *http.Request, 1931 - f *reporesolver.ResolvedRepo, 1927 + repo *models.Repo, 1932 1928 user *oauth.User, 1933 1929 pull *models.Pull, 1934 1930 patch string, ··· 1937 1933 targetBranch := pull.TargetBranch 1938 1934 1939 1935 origStack, _ := r.Context().Value("stack").(models.Stack) 1940 - newStack, err := newStack(f, user, targetBranch, patch, pull.PullSource, stackId) 1936 + newStack, err := s.newStack(r.Context(), repo, user, targetBranch, patch, pull.PullSource, stackId) 1941 1937 if err != nil { 1942 1938 log.Println("failed to create resubmitted stack", err) 1943 1939 s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.") ··· 1992 1988 } 1993 1989 defer tx.Rollback() 1994 1990 1991 + client, err := s.oauth.AuthorizedClient(r) 1992 + if err != nil { 1993 + log.Println("failed to authorize client") 1994 + s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.") 1995 + return 1996 + } 1997 + 1995 1998 // pds updates to make 1996 1999 var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem 1997 2000 ··· 2025 2028 return 2026 2029 } 2027 2030 2031 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 2032 + if err != nil { 2033 + log.Println("failed to upload patch blob", err) 2034 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 2035 + return 2036 + } 2028 2037 record := p.AsRecord() 2038 + record.PatchBlob = blob.Blob 2029 2039 writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 2030 2040 RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{ 2031 2041 Collection: tangled.RepoPullNSID, ··· 2060 2070 return 2061 2071 } 2062 2072 2073 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 2074 + if err != nil { 2075 + log.Println("failed to upload patch blob", err) 2076 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 2077 + return 2078 + } 2063 2079 record := np.AsRecord() 2064 - 2080 + record.PatchBlob = blob.Blob 2065 2081 writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 2066 2082 RepoApplyWrites_Update: &comatproto.RepoApplyWrites_Update{ 2067 2083 Collection: tangled.RepoPullNSID, ··· 2079 2095 tx, 2080 2096 p.ParentChangeId, 2081 2097 // these should be enough filters to be unique per-stack 2082 - db.FilterEq("repo_at", p.RepoAt.String()), 2083 - db.FilterEq("owner_did", p.OwnerDid), 2084 - db.FilterEq("change_id", p.ChangeId), 2098 + orm.FilterEq("repo_at", p.RepoAt.String()), 2099 + orm.FilterEq("owner_did", p.OwnerDid), 2100 + orm.FilterEq("change_id", p.ChangeId), 2085 2101 ) 2086 2102 2087 2103 if err != nil { ··· 2098 2114 return 2099 2115 } 2100 2116 2101 - client, err := s.oauth.AuthorizedClient(r) 2102 - if err != nil { 2103 - log.Println("failed to authorize client") 2104 - s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.") 2105 - return 2106 - } 2107 - 2108 2117 _, err = comatproto.RepoApplyWrites(r.Context(), client, &comatproto.RepoApplyWrites_Input{ 2109 2118 Repo: user.Did, 2110 2119 Writes: writes, ··· 2115 2124 return 2116 2125 } 2117 2126 2118 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId)) 2127 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo) 2128 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 2119 2129 } 2120 2130 2121 2131 func (s *Pulls) MergePull(w http.ResponseWriter, r *http.Request) { ··· 2168 2178 2169 2179 authorName := ident.Handle.String() 2170 2180 mergeInput := &tangled.RepoMerge_Input{ 2171 - Did: f.OwnerDid(), 2181 + Did: f.Did, 2172 2182 Name: f.Name, 2173 2183 Branch: pull.TargetBranch, 2174 2184 Patch: patch, ··· 2233 2243 s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p) 2234 2244 } 2235 2245 2236 - s.pages.HxLocation(w, fmt.Sprintf("/@%s/%s/pulls/%d", f.OwnerHandle(), f.Name, pull.PullId)) 2246 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 2247 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 2237 2248 } 2238 2249 2239 2250 func (s *Pulls) ClosePull(w http.ResponseWriter, r *http.Request) { ··· 2253 2264 } 2254 2265 2255 2266 // auth filter: only owner or collaborators can close 2256 - roles := f.RolesInRepo(user) 2267 + roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 2257 2268 isOwner := roles.IsOwner() 2258 2269 isCollaborator := roles.IsCollaborator() 2259 2270 isPullAuthor := user.Did == pull.OwnerDid ··· 2305 2316 s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p) 2306 2317 } 2307 2318 2308 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId)) 2319 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 2320 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 2309 2321 } 2310 2322 2311 2323 func (s *Pulls) ReopenPull(w http.ResponseWriter, r *http.Request) { ··· 2326 2338 } 2327 2339 2328 2340 // auth filter: only owner or collaborators can close 2329 - roles := f.RolesInRepo(user) 2341 + roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 2330 2342 isOwner := roles.IsOwner() 2331 2343 isCollaborator := roles.IsCollaborator() 2332 2344 isPullAuthor := user.Did == pull.OwnerDid ··· 2378 2390 s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p) 2379 2391 } 2380 2392 2381 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId)) 2393 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 2394 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 2382 2395 } 2383 2396 2384 - func newStack(f *reporesolver.ResolvedRepo, user *oauth.User, targetBranch, patch string, pullSource *models.PullSource, stackId string) (models.Stack, error) { 2397 + func (s *Pulls) newStack(ctx context.Context, repo *models.Repo, user *oauth.User, targetBranch, patch string, pullSource *models.PullSource, stackId string) (models.Stack, error) { 2385 2398 formatPatches, err := patchutil.ExtractPatches(patch) 2386 2399 if err != nil { 2387 2400 return nil, fmt.Errorf("Failed to extract patches: %v", err) ··· 2406 2419 body := fp.Body 2407 2420 rkey := tid.TID() 2408 2421 2422 + mentions, references := s.mentionsResolver.Resolve(ctx, body) 2423 + 2409 2424 initialSubmission := models.PullSubmission{ 2410 2425 Patch: fp.Raw, 2411 2426 SourceRev: fp.SHA, ··· 2416 2431 Body: body, 2417 2432 TargetBranch: targetBranch, 2418 2433 OwnerDid: user.Did, 2419 - RepoAt: f.RepoAt(), 2434 + RepoAt: repo.RepoAt(), 2420 2435 Rkey: rkey, 2436 + Mentions: mentions, 2437 + References: references, 2421 2438 Submissions: []*models.PullSubmission{ 2422 2439 &initialSubmission, 2423 2440 },
+3 -2
appview/repo/archive.go
··· 18 18 l := rp.logger.With("handler", "DownloadArchive") 19 19 ref := chi.URLParam(r, "ref") 20 20 ref, _ = url.PathUnescape(ref) 21 + ref = strings.TrimSuffix(ref, ".tar.gz") 21 22 f, err := rp.repoResolver.Resolve(r) 22 23 if err != nil { 23 24 l.Error("failed to get repo and knot", "err", err) ··· 31 32 xrpcc := &indigoxrpc.Client{ 32 33 Host: host, 33 34 } 34 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 35 - archiveBytes, err := tangled.RepoArchive(r.Context(), xrpcc, "tar.gz", "", ref, repo) 35 + didSlashRepo := f.DidSlashRepo() 36 + archiveBytes, err := tangled.RepoArchive(r.Context(), xrpcc, "tar.gz", "", ref, didSlashRepo) 36 37 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 37 38 l.Error("failed to call XRPC repo.archive", "err", xrpcerr) 38 39 rp.pages.Error503(w)
+21 -14
appview/repo/artifact.go
··· 14 14 "tangled.org/core/appview/db" 15 15 "tangled.org/core/appview/models" 16 16 "tangled.org/core/appview/pages" 17 - "tangled.org/core/appview/reporesolver" 18 17 "tangled.org/core/appview/xrpcclient" 18 + "tangled.org/core/orm" 19 19 "tangled.org/core/tid" 20 20 "tangled.org/core/types" 21 21 ··· 131 131 132 132 rp.pages.RepoArtifactFragment(w, pages.RepoArtifactParams{ 133 133 LoggedInUser: user, 134 - RepoInfo: f.RepoInfo(user), 134 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 135 135 Artifact: artifact, 136 136 }) 137 137 } ··· 156 156 157 157 artifacts, err := db.GetArtifact( 158 158 rp.db, 159 - db.FilterEq("repo_at", f.RepoAt()), 160 - db.FilterEq("tag", tag.Tag.Hash[:]), 161 - db.FilterEq("name", filename), 159 + orm.FilterEq("repo_at", f.RepoAt()), 160 + orm.FilterEq("tag", tag.Tag.Hash[:]), 161 + orm.FilterEq("name", filename), 162 162 ) 163 163 if err != nil { 164 164 log.Println("failed to get artifacts", err) ··· 174 174 175 175 artifact := artifacts[0] 176 176 177 - ownerPds := f.OwnerId.PDSEndpoint() 177 + ownerId, err := rp.idResolver.ResolveIdent(r.Context(), f.Did) 178 + if err != nil { 179 + log.Println("failed to resolve repo owner did", f.Did, err) 180 + http.Error(w, "repository owner not found", http.StatusNotFound) 181 + return 182 + } 183 + 184 + ownerPds := ownerId.PDSEndpoint() 178 185 url, _ := url.Parse(fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob", ownerPds)) 179 186 q := url.Query() 180 187 q.Set("cid", artifact.BlobCid.String()) ··· 228 235 229 236 artifacts, err := db.GetArtifact( 230 237 rp.db, 231 - db.FilterEq("repo_at", f.RepoAt()), 232 - db.FilterEq("tag", tag[:]), 233 - db.FilterEq("name", filename), 238 + orm.FilterEq("repo_at", f.RepoAt()), 239 + orm.FilterEq("tag", tag[:]), 240 + orm.FilterEq("name", filename), 234 241 ) 235 242 if err != nil { 236 243 log.Println("failed to get artifacts", err) ··· 270 277 defer tx.Rollback() 271 278 272 279 err = db.DeleteArtifact(tx, 273 - db.FilterEq("repo_at", f.RepoAt()), 274 - db.FilterEq("tag", artifact.Tag[:]), 275 - db.FilterEq("name", filename), 280 + orm.FilterEq("repo_at", f.RepoAt()), 281 + orm.FilterEq("tag", artifact.Tag[:]), 282 + orm.FilterEq("name", filename), 276 283 ) 277 284 if err != nil { 278 285 log.Println("failed to remove artifact record from db", err) ··· 290 297 w.Write([]byte{}) 291 298 } 292 299 293 - func (rp *Repo) resolveTag(ctx context.Context, f *reporesolver.ResolvedRepo, tagParam string) (*types.TagReference, error) { 300 + func (rp *Repo) resolveTag(ctx context.Context, f *models.Repo, tagParam string) (*types.TagReference, error) { 294 301 tagParam, err := url.QueryUnescape(tagParam) 295 302 if err != nil { 296 303 return nil, err ··· 305 312 Host: host, 306 313 } 307 314 308 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 315 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 309 316 xrpcBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo) 310 317 if err != nil { 311 318 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
+11 -9
appview/repo/blob.go
··· 54 54 xrpcc := &indigoxrpc.Client{ 55 55 Host: host, 56 56 } 57 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Repo.Name) 57 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 58 58 resp, err := tangled.RepoBlob(r.Context(), xrpcc, filePath, false, ref, repo) 59 59 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 60 60 l.Error("failed to call XRPC repo.blob", "err", xrpcerr) ··· 62 62 return 63 63 } 64 64 65 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 66 + 65 67 // Use XRPC response directly instead of converting to internal types 66 68 var breadcrumbs [][]string 67 - breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), url.PathEscape(ref))}) 69 + breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", ownerSlashRepo, url.PathEscape(ref))}) 68 70 if filePath != "" { 69 71 for idx, elem := range strings.Split(filePath, "/") { 70 72 breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], url.PathEscape(elem))}) ··· 78 80 79 81 rp.pages.RepoBlob(w, pages.RepoBlobParams{ 80 82 LoggedInUser: user, 81 - RepoInfo: f.RepoInfo(user), 83 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 82 84 BreadCrumbs: breadcrumbs, 83 85 BlobView: blobView, 84 86 RepoBlob_Output: resp, ··· 105 107 if !rp.config.Core.Dev { 106 108 scheme = "https" 107 109 } 108 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Repo.Name) 110 + repo := f.DidSlashRepo() 109 111 baseURL := &url.URL{ 110 112 Scheme: scheme, 111 113 Host: f.Knot, ··· 176 178 } 177 179 178 180 // NewBlobView creates a BlobView from the XRPC response 179 - func NewBlobView(resp *tangled.RepoBlob_Output, config *config.Config, f *reporesolver.ResolvedRepo, ref, filePath string, queryParams url.Values) models.BlobView { 181 + func NewBlobView(resp *tangled.RepoBlob_Output, config *config.Config, repo *models.Repo, ref, filePath string, queryParams url.Values) models.BlobView { 180 182 view := models.BlobView{ 181 183 Contents: "", 182 184 Lines: 0, ··· 198 200 199 201 // Determine if binary 200 202 if resp.IsBinary != nil && *resp.IsBinary { 201 - view.ContentSrc = generateBlobURL(config, f, ref, filePath) 203 + view.ContentSrc = generateBlobURL(config, repo, ref, filePath) 202 204 ext := strings.ToLower(filepath.Ext(resp.Path)) 203 205 204 206 switch ext { ··· 250 252 return view 251 253 } 252 254 253 - func generateBlobURL(config *config.Config, f *reporesolver.ResolvedRepo, ref, filePath string) string { 255 + func generateBlobURL(config *config.Config, repo *models.Repo, ref, filePath string) string { 254 256 scheme := "http" 255 257 if !config.Core.Dev { 256 258 scheme = "https" 257 259 } 258 260 259 - repoName := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 261 + repoName := fmt.Sprintf("%s/%s", repo.Did, repo.Name) 260 262 baseURL := &url.URL{ 261 263 Scheme: scheme, 262 - Host: f.Knot, 264 + Host: repo.Knot, 263 265 Path: "/xrpc/sh.tangled.repo.blob", 264 266 } 265 267 query := baseURL.Query()
+2 -2
appview/repo/branches.go
··· 29 29 xrpcc := &indigoxrpc.Client{ 30 30 Host: host, 31 31 } 32 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 32 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 33 33 xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 34 34 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 35 35 l.Error("failed to call XRPC repo.branches", "err", xrpcerr) ··· 46 46 user := rp.oauth.GetUser(r) 47 47 rp.pages.RepoBranches(w, pages.RepoBranchesParams{ 48 48 LoggedInUser: user, 49 - RepoInfo: f.RepoInfo(user), 49 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 50 50 RepoBranchesResponse: result, 51 51 }) 52 52 }
+4 -8
appview/repo/compare.go
··· 36 36 Host: host, 37 37 } 38 38 39 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 39 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 40 40 branchBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 41 41 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 42 42 l.Error("failed to call XRPC repo.branches", "err", xrpcerr) ··· 88 88 return 89 89 } 90 90 91 - repoinfo := f.RepoInfo(user) 92 - 93 91 rp.pages.RepoCompareNew(w, pages.RepoCompareNewParams{ 94 92 LoggedInUser: user, 95 - RepoInfo: repoinfo, 93 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 96 94 Branches: branches, 97 95 Tags: tags.Tags, 98 96 Base: base, ··· 151 149 Host: host, 152 150 } 153 151 154 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 152 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 155 153 156 154 branchBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 157 155 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { ··· 202 200 diff = patchutil.AsNiceDiff(formatPatch.FormatPatchRaw, base) 203 201 } 204 202 205 - repoinfo := f.RepoInfo(user) 206 - 207 203 rp.pages.RepoCompare(w, pages.RepoCompareParams{ 208 204 LoggedInUser: user, 209 - RepoInfo: repoinfo, 205 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 210 206 Branches: branches.Branches, 211 207 Tags: tags.Tags, 212 208 Base: base,
+24 -17
appview/repo/feed.go
··· 11 11 "tangled.org/core/appview/db" 12 12 "tangled.org/core/appview/models" 13 13 "tangled.org/core/appview/pagination" 14 - "tangled.org/core/appview/reporesolver" 14 + "tangled.org/core/orm" 15 15 16 + "github.com/bluesky-social/indigo/atproto/identity" 16 17 "github.com/bluesky-social/indigo/atproto/syntax" 17 18 "github.com/gorilla/feeds" 18 19 ) 19 20 20 - func (rp *Repo) getRepoFeed(ctx context.Context, f *reporesolver.ResolvedRepo) (*feeds.Feed, error) { 21 + func (rp *Repo) getRepoFeed(ctx context.Context, repo *models.Repo, ownerSlashRepo string) (*feeds.Feed, error) { 21 22 const feedLimitPerType = 100 22 23 23 - pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, db.FilterEq("repo_at", f.RepoAt())) 24 + pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, orm.FilterEq("repo_at", repo.RepoAt())) 24 25 if err != nil { 25 26 return nil, err 26 27 } ··· 28 29 issues, err := db.GetIssuesPaginated( 29 30 rp.db, 30 31 pagination.Page{Limit: feedLimitPerType}, 31 - db.FilterEq("repo_at", f.RepoAt()), 32 + orm.FilterEq("repo_at", repo.RepoAt()), 32 33 ) 33 34 if err != nil { 34 35 return nil, err 35 36 } 36 37 37 38 feed := &feeds.Feed{ 38 - Title: fmt.Sprintf("activity feed for %s", f.OwnerSlashRepo()), 39 - Link: &feeds.Link{Href: fmt.Sprintf("%s/%s", rp.config.Core.AppviewHost, f.OwnerSlashRepo()), Type: "text/html", Rel: "alternate"}, 39 + Title: fmt.Sprintf("activity feed for @%s", ownerSlashRepo), 40 + Link: &feeds.Link{Href: fmt.Sprintf("%s/%s", rp.config.Core.AppviewHost, ownerSlashRepo), Type: "text/html", Rel: "alternate"}, 40 41 Items: make([]*feeds.Item, 0), 41 42 Updated: time.UnixMilli(0), 42 43 } 43 44 44 45 for _, pull := range pulls { 45 - items, err := rp.createPullItems(ctx, pull, f) 46 + items, err := rp.createPullItems(ctx, pull, repo, ownerSlashRepo) 46 47 if err != nil { 47 48 return nil, err 48 49 } ··· 50 51 } 51 52 52 53 for _, issue := range issues { 53 - item, err := rp.createIssueItem(ctx, issue, f) 54 + item, err := rp.createIssueItem(ctx, issue, repo, ownerSlashRepo) 54 55 if err != nil { 55 56 return nil, err 56 57 } ··· 71 72 return feed, nil 72 73 } 73 74 74 - func (rp *Repo) createPullItems(ctx context.Context, pull *models.Pull, f *reporesolver.ResolvedRepo) ([]*feeds.Item, error) { 75 + func (rp *Repo) createPullItems(ctx context.Context, pull *models.Pull, repo *models.Repo, ownerSlashRepo string) ([]*feeds.Item, error) { 75 76 owner, err := rp.idResolver.ResolveIdent(ctx, pull.OwnerDid) 76 77 if err != nil { 77 78 return nil, err ··· 80 81 var items []*feeds.Item 81 82 82 83 state := rp.getPullState(pull) 83 - description := rp.buildPullDescription(owner.Handle, state, pull, f.OwnerSlashRepo()) 84 + description := rp.buildPullDescription(owner.Handle, state, pull, ownerSlashRepo) 84 85 85 86 mainItem := &feeds.Item{ 86 87 Title: fmt.Sprintf("[PR #%d] %s", pull.PullId, pull.Title), 87 88 Description: description, 88 - Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId)}, 89 + Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d", rp.config.Core.AppviewHost, ownerSlashRepo, pull.PullId)}, 89 90 Created: pull.Created, 90 91 Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)}, 91 92 } ··· 98 99 99 100 roundItem := &feeds.Item{ 100 101 Title: fmt.Sprintf("[PR #%d] %s (round #%d)", pull.PullId, pull.Title, round.RoundNumber), 101 - Description: fmt.Sprintf("@%s submitted changes (at round #%d) on PR #%d in %s", owner.Handle, round.RoundNumber, pull.PullId, f.OwnerSlashRepo()), 102 - Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d/round/%d/", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId, round.RoundNumber)}, 102 + Description: fmt.Sprintf("@%s submitted changes (at round #%d) on PR #%d in @%s", owner.Handle, round.RoundNumber, pull.PullId, ownerSlashRepo), 103 + Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d/round/%d/", rp.config.Core.AppviewHost, ownerSlashRepo, pull.PullId, round.RoundNumber)}, 103 104 Created: round.Created, 104 105 Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)}, 105 106 } ··· 109 110 return items, nil 110 111 } 111 112 112 - func (rp *Repo) createIssueItem(ctx context.Context, issue models.Issue, f *reporesolver.ResolvedRepo) (*feeds.Item, error) { 113 + func (rp *Repo) createIssueItem(ctx context.Context, issue models.Issue, repo *models.Repo, ownerSlashRepo string) (*feeds.Item, error) { 113 114 owner, err := rp.idResolver.ResolveIdent(ctx, issue.Did) 114 115 if err != nil { 115 116 return nil, err ··· 122 123 123 124 return &feeds.Item{ 124 125 Title: fmt.Sprintf("[Issue #%d] %s", issue.IssueId, issue.Title), 125 - Description: fmt.Sprintf("@%s %s issue #%d in %s", owner.Handle, state, issue.IssueId, f.OwnerSlashRepo()), 126 - Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/issues/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), issue.IssueId)}, 126 + Description: fmt.Sprintf("@%s %s issue #%d in @%s", owner.Handle, state, issue.IssueId, ownerSlashRepo), 127 + Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/issues/%d", rp.config.Core.AppviewHost, ownerSlashRepo, issue.IssueId)}, 127 128 Created: issue.Created, 128 129 Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)}, 129 130 }, nil ··· 152 153 log.Println("failed to fully resolve repo:", err) 153 154 return 154 155 } 156 + repoOwnerId, ok := r.Context().Value("resolvedId").(identity.Identity) 157 + if !ok || repoOwnerId.Handle.IsInvalidHandle() { 158 + log.Println("failed to get resolved repo owner id") 159 + return 160 + } 161 + ownerSlashRepo := repoOwnerId.Handle.String() + "/" + f.Name 155 162 156 - feed, err := rp.getRepoFeed(r.Context(), f) 163 + feed, err := rp.getRepoFeed(r.Context(), f, ownerSlashRepo) 157 164 if err != nil { 158 165 log.Println("failed to get repo feed:", err) 159 166 rp.pages.Error500(w)
+18 -19
appview/repo/index.go
··· 22 22 "tangled.org/core/appview/db" 23 23 "tangled.org/core/appview/models" 24 24 "tangled.org/core/appview/pages" 25 - "tangled.org/core/appview/reporesolver" 26 25 "tangled.org/core/appview/xrpcclient" 26 + "tangled.org/core/orm" 27 27 "tangled.org/core/types" 28 28 29 29 "github.com/go-chi/chi/v5" ··· 52 52 } 53 53 54 54 user := rp.oauth.GetUser(r) 55 - repoInfo := f.RepoInfo(user) 56 55 57 56 // Build index response from multiple XRPC calls 58 57 result, err := rp.buildIndexResponse(r.Context(), xrpcc, f, ref) ··· 62 61 rp.pages.RepoIndexPage(w, pages.RepoIndexParams{ 63 62 LoggedInUser: user, 64 63 NeedsKnotUpgrade: true, 65 - RepoInfo: repoInfo, 64 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 66 65 }) 67 66 return 68 67 } ··· 124 123 l.Error("failed to get email to did map", "err", err) 125 124 } 126 125 127 - vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, commitsTrunc) 126 + vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, commitsTrunc) 128 127 if err != nil { 129 128 l.Error("failed to GetVerifiedObjectCommits", "err", err) 130 129 } ··· 140 139 for _, c := range commitsTrunc { 141 140 shas = append(shas, c.Hash.String()) 142 141 } 143 - pipelines, err := getPipelineStatuses(rp.db, repoInfo, shas) 142 + pipelines, err := getPipelineStatuses(rp.db, f, shas) 144 143 if err != nil { 145 144 l.Error("failed to fetch pipeline statuses", "err", err) 146 145 // non-fatal ··· 148 147 149 148 rp.pages.RepoIndexPage(w, pages.RepoIndexParams{ 150 149 LoggedInUser: user, 151 - RepoInfo: repoInfo, 150 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 152 151 TagMap: tagMap, 153 152 RepoIndexResponse: *result, 154 153 CommitsTrunc: commitsTrunc, ··· 165 164 func (rp *Repo) getLanguageInfo( 166 165 ctx context.Context, 167 166 l *slog.Logger, 168 - f *reporesolver.ResolvedRepo, 167 + repo *models.Repo, 169 168 xrpcc *indigoxrpc.Client, 170 169 currentRef string, 171 170 isDefaultRef bool, ··· 173 172 // first attempt to fetch from db 174 173 langs, err := db.GetRepoLanguages( 175 174 rp.db, 176 - db.FilterEq("repo_at", f.RepoAt()), 177 - db.FilterEq("ref", currentRef), 175 + orm.FilterEq("repo_at", repo.RepoAt()), 176 + orm.FilterEq("ref", currentRef), 178 177 ) 179 178 180 179 if err != nil || langs == nil { 181 180 // non-fatal, fetch langs from ks via XRPC 182 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 183 - ls, err := tangled.RepoLanguages(ctx, xrpcc, currentRef, repo) 181 + didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name) 182 + ls, err := tangled.RepoLanguages(ctx, xrpcc, currentRef, didSlashRepo) 184 183 if err != nil { 185 184 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 186 185 l.Error("failed to call XRPC repo.languages", "err", xrpcerr) ··· 195 194 196 195 for _, lang := range ls.Languages { 197 196 langs = append(langs, models.RepoLanguage{ 198 - RepoAt: f.RepoAt(), 197 + RepoAt: repo.RepoAt(), 199 198 Ref: currentRef, 200 199 IsDefaultRef: isDefaultRef, 201 200 Language: lang.Name, ··· 210 209 defer tx.Rollback() 211 210 212 211 // update appview's cache 213 - err = db.UpdateRepoLanguages(tx, f.RepoAt(), currentRef, langs) 212 + err = db.UpdateRepoLanguages(tx, repo.RepoAt(), currentRef, langs) 214 213 if err != nil { 215 214 // non-fatal 216 215 l.Error("failed to cache lang results", "err", err) ··· 255 254 } 256 255 257 256 // buildIndexResponse creates a RepoIndexResponse by combining multiple xrpc calls in parallel 258 - func (rp *Repo) buildIndexResponse(ctx context.Context, xrpcc *indigoxrpc.Client, f *reporesolver.ResolvedRepo, ref string) (*types.RepoIndexResponse, error) { 259 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 257 + func (rp *Repo) buildIndexResponse(ctx context.Context, xrpcc *indigoxrpc.Client, repo *models.Repo, ref string) (*types.RepoIndexResponse, error) { 258 + didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name) 260 259 261 260 // first get branches to determine the ref if not specified 262 - branchesBytes, err := tangled.RepoBranches(ctx, xrpcc, "", 0, repo) 261 + branchesBytes, err := tangled.RepoBranches(ctx, xrpcc, "", 0, didSlashRepo) 263 262 if err != nil { 264 263 return nil, fmt.Errorf("failed to call repoBranches: %w", err) 265 264 } ··· 303 302 wg.Add(1) 304 303 go func() { 305 304 defer wg.Done() 306 - tagsBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo) 305 + tagsBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, didSlashRepo) 307 306 if err != nil { 308 307 errs = errors.Join(errs, fmt.Errorf("failed to call repoTags: %w", err)) 309 308 return ··· 318 317 wg.Add(1) 319 318 go func() { 320 319 defer wg.Done() 321 - resp, err := tangled.RepoTree(ctx, xrpcc, "", ref, repo) 320 + resp, err := tangled.RepoTree(ctx, xrpcc, "", ref, didSlashRepo) 322 321 if err != nil { 323 322 errs = errors.Join(errs, fmt.Errorf("failed to call repoTree: %w", err)) 324 323 return ··· 330 329 wg.Add(1) 331 330 go func() { 332 331 defer wg.Done() 333 - logBytes, err := tangled.RepoLog(ctx, xrpcc, "", 50, "", ref, repo) 332 + logBytes, err := tangled.RepoLog(ctx, xrpcc, "", 50, "", ref, didSlashRepo) 334 333 if err != nil { 335 334 errs = errors.Join(errs, fmt.Errorf("failed to call repoLog: %w", err)) 336 335 return
+8 -11
appview/repo/log.go
··· 57 57 cursor = strconv.Itoa(offset) 58 58 } 59 59 60 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 60 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 61 61 xrpcBytes, err := tangled.RepoLog(r.Context(), xrpcc, cursor, limit, "", ref, repo) 62 62 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 63 63 l.Error("failed to call XRPC repo.log", "err", xrpcerr) ··· 116 116 l.Error("failed to fetch email to did mapping", "err", err) 117 117 } 118 118 119 - vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, xrpcResp.Commits) 119 + vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, xrpcResp.Commits) 120 120 if err != nil { 121 121 l.Error("failed to GetVerifiedObjectCommits", "err", err) 122 122 } 123 - 124 - repoInfo := f.RepoInfo(user) 125 123 126 124 var shas []string 127 125 for _, c := range xrpcResp.Commits { 128 126 shas = append(shas, c.Hash.String()) 129 127 } 130 - pipelines, err := getPipelineStatuses(rp.db, repoInfo, shas) 128 + pipelines, err := getPipelineStatuses(rp.db, f, shas) 131 129 if err != nil { 132 130 l.Error("failed to getPipelineStatuses", "err", err) 133 131 // non-fatal ··· 136 134 rp.pages.RepoLog(w, pages.RepoLogParams{ 137 135 LoggedInUser: user, 138 136 TagMap: tagMap, 139 - RepoInfo: repoInfo, 137 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 140 138 RepoLogResponse: xrpcResp, 141 139 EmailToDid: emailToDidMap, 142 140 VerifiedCommits: vc, ··· 174 172 Host: host, 175 173 } 176 174 177 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 175 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 178 176 xrpcBytes, err := tangled.RepoDiff(r.Context(), xrpcc, ref, repo) 179 177 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 180 178 l.Error("failed to call XRPC repo.diff", "err", xrpcerr) ··· 194 192 l.Error("failed to get email to did mapping", "err", err) 195 193 } 196 194 197 - vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.NiceDiff{*result.Diff}) 195 + vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.Commit{result.Diff.Commit}) 198 196 if err != nil { 199 197 l.Error("failed to GetVerifiedCommits", "err", err) 200 198 } 201 199 202 200 user := rp.oauth.GetUser(r) 203 - repoInfo := f.RepoInfo(user) 204 - pipelines, err := getPipelineStatuses(rp.db, repoInfo, []string{result.Diff.Commit.This}) 201 + pipelines, err := getPipelineStatuses(rp.db, f, []string{result.Diff.Commit.This}) 205 202 if err != nil { 206 203 l.Error("failed to getPipelineStatuses", "err", err) 207 204 // non-fatal ··· 213 210 214 211 rp.pages.RepoCommit(w, pages.RepoCommitParams{ 215 212 LoggedInUser: user, 216 - RepoInfo: f.RepoInfo(user), 213 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 217 214 RepoCommitResponse: result, 218 215 EmailToDid: emailToDidMap, 219 216 VerifiedCommit: vc,
+4 -3
appview/repo/opengraph.go
··· 16 16 "tangled.org/core/appview/db" 17 17 "tangled.org/core/appview/models" 18 18 "tangled.org/core/appview/ogcard" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/types" 20 21 ) 21 22 ··· 338 339 var languageStats []types.RepoLanguageDetails 339 340 langs, err := db.GetRepoLanguages( 340 341 rp.db, 341 - db.FilterEq("repo_at", f.RepoAt()), 342 - db.FilterEq("is_default_ref", 1), 342 + orm.FilterEq("repo_at", f.RepoAt()), 343 + orm.FilterEq("is_default_ref", 1), 343 344 ) 344 345 if err != nil { 345 346 log.Printf("failed to get language stats from db: %v", err) ··· 374 375 }) 375 376 } 376 377 377 - card, err := rp.drawRepoSummaryCard(&f.Repo, languageStats) 378 + card, err := rp.drawRepoSummaryCard(f, languageStats) 378 379 if err != nil { 379 380 log.Println("failed to draw repo summary card", err) 380 381 http.Error(w, "failed to draw repo summary card", http.StatusInternalServerError)
+35 -35
appview/repo/repo.go
··· 24 24 xrpcclient "tangled.org/core/appview/xrpcclient" 25 25 "tangled.org/core/eventconsumer" 26 26 "tangled.org/core/idresolver" 27 + "tangled.org/core/orm" 27 28 "tangled.org/core/rbac" 28 29 "tangled.org/core/tid" 29 30 "tangled.org/core/xrpc/serviceauth" ··· 118 119 } 119 120 } 120 121 121 - newRepo := f.Repo 122 + newRepo := *f 122 123 newRepo.Spindle = newSpindle 123 124 record := newRepo.AsRecord() 124 125 ··· 257 258 l.Info("wrote label record to PDS") 258 259 259 260 // update the repo to subscribe to this label 260 - newRepo := f.Repo 261 + newRepo := *f 261 262 newRepo.Labels = append(newRepo.Labels, aturi) 262 263 repoRecord := newRepo.AsRecord() 263 264 ··· 345 346 // get form values 346 347 labelId := r.FormValue("label-id") 347 348 348 - label, err := db.GetLabelDefinition(rp.db, db.FilterEq("id", labelId)) 349 + label, err := db.GetLabelDefinition(rp.db, orm.FilterEq("id", labelId)) 349 350 if err != nil { 350 351 fail("Failed to find label definition.", err) 351 352 return ··· 369 370 } 370 371 371 372 // update repo record to remove the label reference 372 - newRepo := f.Repo 373 + newRepo := *f 373 374 var updated []string 374 375 removedAt := label.AtUri().String() 375 376 for _, l := range newRepo.Labels { ··· 409 410 410 411 err = db.UnsubscribeLabel( 411 412 tx, 412 - db.FilterEq("repo_at", f.RepoAt()), 413 - db.FilterEq("label_at", removedAt), 413 + orm.FilterEq("repo_at", f.RepoAt()), 414 + orm.FilterEq("label_at", removedAt), 414 415 ) 415 416 if err != nil { 416 417 fail("Failed to unsubscribe label.", err) 417 418 return 418 419 } 419 420 420 - err = db.DeleteLabelDefinition(tx, db.FilterEq("id", label.Id)) 421 + err = db.DeleteLabelDefinition(tx, orm.FilterEq("id", label.Id)) 421 422 if err != nil { 422 423 fail("Failed to delete label definition.", err) 423 424 return ··· 456 457 } 457 458 458 459 labelAts := r.Form["label"] 459 - _, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts)) 460 + _, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts)) 460 461 if err != nil { 461 462 fail("Failed to subscribe to label.", err) 462 463 return 463 464 } 464 465 465 - newRepo := f.Repo 466 + newRepo := *f 466 467 newRepo.Labels = append(newRepo.Labels, labelAts...) 467 468 468 469 // dedup ··· 477 478 return 478 479 } 479 480 480 - ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Repo.Did, f.Repo.Rkey) 481 + ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Did, f.Rkey) 481 482 if err != nil { 482 483 fail("Failed to update labels, no record found on PDS.", err) 483 484 return ··· 542 543 } 543 544 544 545 labelAts := r.Form["label"] 545 - _, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts)) 546 + _, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts)) 546 547 if err != nil { 547 548 fail("Failed to unsubscribe to label.", err) 548 549 return 549 550 } 550 551 551 552 // update repo record to remove the label reference 552 - newRepo := f.Repo 553 + newRepo := *f 553 554 var updated []string 554 555 for _, l := range newRepo.Labels { 555 556 if !slices.Contains(labelAts, l) { ··· 565 566 return 566 567 } 567 568 568 - ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Repo.Did, f.Repo.Rkey) 569 + ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Did, f.Rkey) 569 570 if err != nil { 570 571 fail("Failed to update labels, no record found on PDS.", err) 571 572 return ··· 582 583 583 584 err = db.UnsubscribeLabel( 584 585 rp.db, 585 - db.FilterEq("repo_at", f.RepoAt()), 586 - db.FilterIn("label_at", labelAts), 586 + orm.FilterEq("repo_at", f.RepoAt()), 587 + orm.FilterIn("label_at", labelAts), 587 588 ) 588 589 if err != nil { 589 590 fail("Failed to unsubscribe label.", err) ··· 612 613 613 614 labelDefs, err := db.GetLabelDefinitions( 614 615 rp.db, 615 - db.FilterIn("at_uri", f.Repo.Labels), 616 - db.FilterContains("scope", subject.Collection().String()), 616 + orm.FilterIn("at_uri", f.Labels), 617 + orm.FilterContains("scope", subject.Collection().String()), 617 618 ) 618 619 if err != nil { 619 620 l.Error("failed to fetch label defs", "err", err) ··· 625 626 defs[l.AtUri().String()] = &l 626 627 } 627 628 628 - states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject)) 629 + states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject)) 629 630 if err != nil { 630 631 l.Error("failed to build label state", "err", err) 631 632 return ··· 635 636 user := rp.oauth.GetUser(r) 636 637 rp.pages.LabelPanel(w, pages.LabelPanelParams{ 637 638 LoggedInUser: user, 638 - RepoInfo: f.RepoInfo(user), 639 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 639 640 Defs: defs, 640 641 Subject: subject.String(), 641 642 State: state, ··· 660 661 661 662 labelDefs, err := db.GetLabelDefinitions( 662 663 rp.db, 663 - db.FilterIn("at_uri", f.Repo.Labels), 664 - db.FilterContains("scope", subject.Collection().String()), 664 + orm.FilterIn("at_uri", f.Labels), 665 + orm.FilterContains("scope", subject.Collection().String()), 665 666 ) 666 667 if err != nil { 667 668 l.Error("failed to fetch labels", "err", err) ··· 673 674 defs[l.AtUri().String()] = &l 674 675 } 675 676 676 - states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject)) 677 + states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject)) 677 678 if err != nil { 678 679 l.Error("failed to build label state", "err", err) 679 680 return ··· 683 684 user := rp.oauth.GetUser(r) 684 685 rp.pages.EditLabelPanel(w, pages.EditLabelPanelParams{ 685 686 LoggedInUser: user, 686 - RepoInfo: f.RepoInfo(user), 687 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 687 688 Defs: defs, 688 689 Subject: subject.String(), 689 690 State: state, ··· 864 865 r.Context(), 865 866 client, 866 867 &tangled.RepoDelete_Input{ 867 - Did: f.OwnerDid(), 868 + Did: f.Did, 868 869 Name: f.Name, 869 870 Rkey: f.Rkey, 870 871 }, ··· 902 903 l.Info("removed collaborators") 903 904 904 905 // remove repo RBAC 905 - err = rp.enforcer.RemoveRepo(f.OwnerDid(), f.Knot, f.DidSlashRepo()) 906 + err = rp.enforcer.RemoveRepo(f.Did, f.Knot, f.DidSlashRepo()) 906 907 if err != nil { 907 908 rp.pages.Notice(w, noticeId, "Failed to update RBAC rules") 908 909 return 909 910 } 910 911 911 912 // remove repo from db 912 - err = db.RemoveRepo(tx, f.OwnerDid(), f.Name) 913 + err = db.RemoveRepo(tx, f.Did, f.Name) 913 914 if err != nil { 914 915 rp.pages.Notice(w, noticeId, "Failed to update appview") 915 916 return ··· 930 931 return 931 932 } 932 933 933 - rp.pages.HxRedirect(w, fmt.Sprintf("/%s", f.OwnerDid())) 934 + rp.pages.HxRedirect(w, fmt.Sprintf("/%s", f.Did)) 934 935 } 935 936 936 937 func (rp *Repo) SyncRepoFork(w http.ResponseWriter, r *http.Request) { ··· 959 960 return 960 961 } 961 962 962 - repoInfo := f.RepoInfo(user) 963 - if repoInfo.Source == nil { 963 + if f.Source == "" { 964 964 rp.pages.Notice(w, "repo", "This repository is not a fork.") 965 965 return 966 966 } ··· 971 971 &tangled.RepoForkSync_Input{ 972 972 Did: user.Did, 973 973 Name: f.Name, 974 - Source: repoInfo.Source.RepoAt().String(), 974 + Source: f.Source, 975 975 Branch: ref, 976 976 }, 977 977 ) ··· 1007 1007 rp.pages.ForkRepo(w, pages.ForkRepoParams{ 1008 1008 LoggedInUser: user, 1009 1009 Knots: knots, 1010 - RepoInfo: f.RepoInfo(user), 1010 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 1011 1011 }) 1012 1012 1013 1013 case http.MethodPost: ··· 1037 1037 // in the user's account. 1038 1038 existingRepo, err := db.GetRepo( 1039 1039 rp.db, 1040 - db.FilterEq("did", user.Did), 1041 - db.FilterEq("name", forkName), 1040 + orm.FilterEq("did", user.Did), 1041 + orm.FilterEq("name", forkName), 1042 1042 ) 1043 1043 if err != nil { 1044 1044 if !errors.Is(err, sql.ErrNoRows) { ··· 1058 1058 uri = "http" 1059 1059 } 1060 1060 1061 - forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.Repo.Name) 1061 + forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.Did, f.Name) 1062 1062 l = l.With("cloneUrl", forkSourceUrl) 1063 1063 1064 1064 sourceAt := f.RepoAt().String() ··· 1071 1071 Knot: targetKnot, 1072 1072 Rkey: rkey, 1073 1073 Source: sourceAt, 1074 - Description: f.Repo.Description, 1074 + Description: f.Description, 1075 1075 Created: time.Now(), 1076 1076 Labels: rp.config.Label.DefaultLabelDefs, 1077 1077 }
+17 -19
appview/repo/repo_util.go
··· 1 1 package repo 2 2 3 3 import ( 4 + "maps" 4 5 "slices" 5 6 "sort" 6 7 "strings" 7 8 8 9 "tangled.org/core/appview/db" 9 10 "tangled.org/core/appview/models" 10 - "tangled.org/core/appview/pages/repoinfo" 11 + "tangled.org/core/orm" 11 12 "tangled.org/core/types" 12 - 13 - "github.com/go-git/go-git/v5/plumbing/object" 14 13 ) 15 14 16 15 func sortFiles(files []types.NiceTree) { ··· 43 42 }) 44 43 } 45 44 46 - func uniqueEmails(commits []*object.Commit) []string { 45 + func uniqueEmails(commits []types.Commit) []string { 47 46 emails := make(map[string]struct{}) 48 47 for _, commit := range commits { 49 - if commit.Author.Email != "" { 50 - emails[commit.Author.Email] = struct{}{} 51 - } 52 - if commit.Committer.Email != "" { 53 - emails[commit.Committer.Email] = struct{}{} 48 + emails[commit.Author.Email] = struct{}{} 49 + emails[commit.Committer.Email] = struct{}{} 50 + for _, c := range commit.CoAuthors() { 51 + emails[c.Email] = struct{}{} 54 52 } 55 53 } 56 - var uniqueEmails []string 57 - for email := range emails { 58 - uniqueEmails = append(uniqueEmails, email) 59 - } 60 - return uniqueEmails 54 + 55 + // delete empty emails if any, from the set 56 + delete(emails, "") 57 + 58 + return slices.Collect(maps.Keys(emails)) 61 59 } 62 60 63 61 func balanceIndexItems(commitCount, branchCount, tagCount, fileCount int) (commitsTrunc int, branchesTrunc int, tagsTrunc int) { ··· 93 91 // golang is so blessed that it requires 35 lines of imperative code for this 94 92 func getPipelineStatuses( 95 93 d *db.DB, 96 - repoInfo repoinfo.RepoInfo, 94 + repo *models.Repo, 97 95 shas []string, 98 96 ) (map[string]models.Pipeline, error) { 99 97 m := make(map[string]models.Pipeline) ··· 105 103 ps, err := db.GetPipelineStatuses( 106 104 d, 107 105 len(shas), 108 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 109 - db.FilterEq("repo_name", repoInfo.Name), 110 - db.FilterEq("knot", repoInfo.Knot), 111 - db.FilterIn("sha", shas), 106 + orm.FilterEq("repo_owner", repo.Did), 107 + orm.FilterEq("repo_name", repo.Name), 108 + orm.FilterEq("knot", repo.Knot), 109 + orm.FilterIn("sha", shas), 112 110 ) 113 111 if err != nil { 114 112 return nil, err
+40 -11
appview/repo/settings.go
··· 10 10 11 11 "tangled.org/core/api/tangled" 12 12 "tangled.org/core/appview/db" 13 + "tangled.org/core/appview/models" 13 14 "tangled.org/core/appview/oauth" 14 15 "tangled.org/core/appview/pages" 15 16 xrpcclient "tangled.org/core/appview/xrpcclient" 17 + "tangled.org/core/orm" 16 18 "tangled.org/core/types" 17 19 18 20 comatproto "github.com/bluesky-social/indigo/api/atproto" ··· 194 196 Host: host, 195 197 } 196 198 197 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 199 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 198 200 xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 199 201 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 200 202 l.Error("failed to call XRPC repo.branches", "err", xrpcerr) ··· 209 211 return 210 212 } 211 213 212 - defaultLabels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs)) 214 + defaultLabels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs)) 213 215 if err != nil { 214 216 l.Error("failed to fetch labels", "err", err) 215 217 rp.pages.Error503(w) 216 218 return 217 219 } 218 220 219 - labels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", f.Repo.Labels)) 221 + labels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", f.Labels)) 220 222 if err != nil { 221 223 l.Error("failed to fetch labels", "err", err) 222 224 rp.pages.Error503(w) ··· 237 239 labels = labels[:n] 238 240 239 241 subscribedLabels := make(map[string]struct{}) 240 - for _, l := range f.Repo.Labels { 242 + for _, l := range f.Labels { 241 243 subscribedLabels[l] = struct{}{} 242 244 } 243 245 ··· 254 256 255 257 rp.pages.RepoGeneralSettings(w, pages.RepoGeneralSettingsParams{ 256 258 LoggedInUser: user, 257 - RepoInfo: f.RepoInfo(user), 259 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 258 260 Branches: result.Branches, 259 261 Labels: labels, 260 262 DefaultLabels: defaultLabels, ··· 271 273 f, err := rp.repoResolver.Resolve(r) 272 274 user := rp.oauth.GetUser(r) 273 275 274 - repoCollaborators, err := f.Collaborators(r.Context()) 276 + collaborators, err := func(repo *models.Repo) ([]pages.Collaborator, error) { 277 + repoCollaborators, err := rp.enforcer.E.GetImplicitUsersForResourceByDomain(repo.DidSlashRepo(), repo.Knot) 278 + if err != nil { 279 + return nil, err 280 + } 281 + var collaborators []pages.Collaborator 282 + for _, item := range repoCollaborators { 283 + // currently only two roles: owner and member 284 + var role string 285 + switch item[3] { 286 + case "repo:owner": 287 + role = "owner" 288 + case "repo:collaborator": 289 + role = "collaborator" 290 + default: 291 + continue 292 + } 293 + 294 + did := item[0] 295 + 296 + c := pages.Collaborator{ 297 + Did: did, 298 + Role: role, 299 + } 300 + collaborators = append(collaborators, c) 301 + } 302 + return collaborators, nil 303 + }(f) 275 304 if err != nil { 276 305 l.Error("failed to get collaborators", "err", err) 277 306 } 278 307 279 308 rp.pages.RepoAccessSettings(w, pages.RepoAccessSettingsParams{ 280 309 LoggedInUser: user, 281 - RepoInfo: f.RepoInfo(user), 310 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 282 311 Tabs: settingsTabs, 283 312 Tab: "access", 284 - Collaborators: repoCollaborators, 313 + Collaborators: collaborators, 285 314 }) 286 315 } 287 316 ··· 292 321 user := rp.oauth.GetUser(r) 293 322 294 323 // all spindles that the repo owner is a member of 295 - spindles, err := rp.enforcer.GetSpindlesForUser(f.OwnerDid()) 324 + spindles, err := rp.enforcer.GetSpindlesForUser(f.Did) 296 325 if err != nil { 297 326 l.Error("failed to fetch spindles", "err", err) 298 327 return ··· 339 368 340 369 rp.pages.RepoPipelineSettings(w, pages.RepoPipelineSettingsParams{ 341 370 LoggedInUser: user, 342 - RepoInfo: f.RepoInfo(user), 371 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 343 372 Tabs: settingsTabs, 344 373 Tab: "pipelines", 345 374 Spindles: spindles, ··· 388 417 } 389 418 l.Debug("got", "topicsStr", topicStr, "topics", topics) 390 419 391 - newRepo := f.Repo 420 + newRepo := *f 392 421 newRepo.Description = description 393 422 newRepo.Website = website 394 423 newRepo.Topics = topics
+4 -3
appview/repo/tags.go
··· 10 10 "tangled.org/core/appview/models" 11 11 "tangled.org/core/appview/pages" 12 12 xrpcclient "tangled.org/core/appview/xrpcclient" 13 + "tangled.org/core/orm" 13 14 "tangled.org/core/types" 14 15 15 16 indigoxrpc "github.com/bluesky-social/indigo/xrpc" ··· 31 32 xrpcc := &indigoxrpc.Client{ 32 33 Host: host, 33 34 } 34 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 35 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 35 36 xrpcBytes, err := tangled.RepoTags(r.Context(), xrpcc, "", 0, repo) 36 37 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 37 38 l.Error("failed to call XRPC repo.tags", "err", xrpcerr) ··· 44 45 rp.pages.Error503(w) 45 46 return 46 47 } 47 - artifacts, err := db.GetArtifact(rp.db, db.FilterEq("repo_at", f.RepoAt())) 48 + artifacts, err := db.GetArtifact(rp.db, orm.FilterEq("repo_at", f.RepoAt())) 48 49 if err != nil { 49 50 l.Error("failed grab artifacts", "err", err) 50 51 return ··· 71 72 user := rp.oauth.GetUser(r) 72 73 rp.pages.RepoTags(w, pages.RepoTagsParams{ 73 74 LoggedInUser: user, 74 - RepoInfo: f.RepoInfo(user), 75 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 75 76 RepoTagsResponse: result, 76 77 ArtifactMap: artifactMap, 77 78 DanglingArtifacts: danglingArtifacts,
+6 -4
appview/repo/tree.go
··· 9 9 10 10 "tangled.org/core/api/tangled" 11 11 "tangled.org/core/appview/pages" 12 + "tangled.org/core/appview/reporesolver" 12 13 xrpcclient "tangled.org/core/appview/xrpcclient" 13 14 "tangled.org/core/types" 14 15 ··· 39 40 xrpcc := &indigoxrpc.Client{ 40 41 Host: host, 41 42 } 42 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 43 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 43 44 xrpcResp, err := tangled.RepoTree(r.Context(), xrpcc, treePath, ref, repo) 44 45 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 45 46 l.Error("failed to call XRPC repo.tree", "err", xrpcerr) ··· 79 80 result.ReadmeFileName = xrpcResp.Readme.Filename 80 81 result.Readme = xrpcResp.Readme.Contents 81 82 } 83 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 82 84 // redirects tree paths trying to access a blob; in this case the result.Files is unpopulated, 83 85 // so we can safely redirect to the "parent" (which is the same file). 84 86 if len(result.Files) == 0 && result.Parent == treePath { 85 - redirectTo := fmt.Sprintf("/%s/blob/%s/%s", f.OwnerSlashRepo(), url.PathEscape(ref), result.Parent) 87 + redirectTo := fmt.Sprintf("/%s/blob/%s/%s", ownerSlashRepo, url.PathEscape(ref), result.Parent) 86 88 http.Redirect(w, r, redirectTo, http.StatusFound) 87 89 return 88 90 } 89 91 user := rp.oauth.GetUser(r) 90 92 var breadcrumbs [][]string 91 - breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), url.PathEscape(ref))}) 93 + breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", ownerSlashRepo, url.PathEscape(ref))}) 92 94 if treePath != "" { 93 95 for idx, elem := range strings.Split(treePath, "/") { 94 96 breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], url.PathEscape(elem))}) ··· 100 102 LoggedInUser: user, 101 103 BreadCrumbs: breadcrumbs, 102 104 TreePath: treePath, 103 - RepoInfo: f.RepoInfo(user), 105 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 104 106 RepoTreeResponse: result, 105 107 }) 106 108 }
+98 -161
appview/reporesolver/resolver.go
··· 1 1 package reporesolver 2 2 3 3 import ( 4 - "context" 5 - "database/sql" 6 - "errors" 7 4 "fmt" 8 5 "log" 9 6 "net/http" ··· 12 9 "strings" 13 10 14 11 "github.com/bluesky-social/indigo/atproto/identity" 15 - securejoin "github.com/cyphar/filepath-securejoin" 16 12 "github.com/go-chi/chi/v5" 17 13 "tangled.org/core/appview/config" 18 14 "tangled.org/core/appview/db" 19 15 "tangled.org/core/appview/models" 20 16 "tangled.org/core/appview/oauth" 21 - "tangled.org/core/appview/pages" 22 17 "tangled.org/core/appview/pages/repoinfo" 23 - "tangled.org/core/idresolver" 24 18 "tangled.org/core/rbac" 25 19 ) 26 20 27 - type ResolvedRepo struct { 28 - models.Repo 29 - OwnerId identity.Identity 30 - CurrentDir string 31 - Ref string 32 - 33 - rr *RepoResolver 21 + type RepoResolver struct { 22 + config *config.Config 23 + enforcer *rbac.Enforcer 24 + execer db.Execer 34 25 } 35 26 36 - type RepoResolver struct { 37 - config *config.Config 38 - enforcer *rbac.Enforcer 39 - idResolver *idresolver.Resolver 40 - execer db.Execer 27 + func New(config *config.Config, enforcer *rbac.Enforcer, execer db.Execer) *RepoResolver { 28 + return &RepoResolver{config: config, enforcer: enforcer, execer: execer} 41 29 } 42 30 43 - func New(config *config.Config, enforcer *rbac.Enforcer, resolver *idresolver.Resolver, execer db.Execer) *RepoResolver { 44 - return &RepoResolver{config: config, enforcer: enforcer, idResolver: resolver, execer: execer} 31 + // NOTE: this... should not even be here. the entire package will be removed in future refactor 32 + func GetBaseRepoPath(r *http.Request, repo *models.Repo) string { 33 + var ( 34 + user = chi.URLParam(r, "user") 35 + name = chi.URLParam(r, "repo") 36 + ) 37 + if user == "" || name == "" { 38 + return repo.DidSlashRepo() 39 + } 40 + return path.Join(user, name) 45 41 } 46 42 47 - func (rr *RepoResolver) Resolve(r *http.Request) (*ResolvedRepo, error) { 43 + // TODO: move this out of `RepoResolver` struct 44 + func (rr *RepoResolver) Resolve(r *http.Request) (*models.Repo, error) { 48 45 repo, ok := r.Context().Value("repo").(*models.Repo) 49 46 if !ok { 50 47 log.Println("malformed middleware: `repo` not exist in context") 51 48 return nil, fmt.Errorf("malformed middleware") 52 49 } 53 - id, ok := r.Context().Value("resolvedId").(identity.Identity) 54 - if !ok { 55 - log.Println("malformed middleware") 56 - return nil, fmt.Errorf("malformed middleware") 57 - } 58 50 59 - currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath())) 60 - ref := chi.URLParam(r, "ref") 61 - 62 - return &ResolvedRepo{ 63 - Repo: *repo, 64 - OwnerId: id, 65 - CurrentDir: currentDir, 66 - Ref: ref, 67 - 68 - rr: rr, 69 - }, nil 70 - } 71 - 72 - func (f *ResolvedRepo) OwnerDid() string { 73 - return f.OwnerId.DID.String() 74 - } 75 - 76 - func (f *ResolvedRepo) OwnerHandle() string { 77 - return f.OwnerId.Handle.String() 51 + return repo, nil 78 52 } 79 53 80 - func (f *ResolvedRepo) OwnerSlashRepo() string { 81 - handle := f.OwnerId.Handle 82 - 83 - var p string 84 - if handle != "" && !handle.IsInvalidHandle() { 85 - p, _ = securejoin.SecureJoin(fmt.Sprintf("@%s", handle), f.Name) 86 - } else { 87 - p, _ = securejoin.SecureJoin(f.OwnerDid(), f.Name) 54 + // 1. [x] replace `RepoInfo` to `reporesolver.GetRepoInfo(r *http.Request, repo, user)` 55 + // 2. [x] remove `rr`, `CurrentDir`, `Ref` fields from `ResolvedRepo` 56 + // 3. [x] remove `ResolvedRepo` 57 + // 4. [ ] replace reporesolver to reposervice 58 + func (rr *RepoResolver) GetRepoInfo(r *http.Request, user *oauth.User) repoinfo.RepoInfo { 59 + ownerId, ook := r.Context().Value("resolvedId").(identity.Identity) 60 + repo, rok := r.Context().Value("repo").(*models.Repo) 61 + if !ook || !rok { 62 + log.Println("malformed request, failed to get repo from context") 88 63 } 89 64 90 - return p 91 - } 65 + // get dir/ref 66 + currentDir := extractCurrentDir(r.URL.EscapedPath()) 67 + ref := chi.URLParam(r, "ref") 92 68 93 - func (f *ResolvedRepo) Collaborators(ctx context.Context) ([]pages.Collaborator, error) { 94 - repoCollaborators, err := f.rr.enforcer.E.GetImplicitUsersForResourceByDomain(f.DidSlashRepo(), f.Knot) 95 - if err != nil { 96 - return nil, err 69 + repoAt := repo.RepoAt() 70 + isStarred := false 71 + roles := repoinfo.RolesInRepo{} 72 + if user != nil { 73 + isStarred = db.GetStarStatus(rr.execer, user.Did, repoAt) 74 + roles.Roles = rr.enforcer.GetPermissionsInRepo(user.Did, repo.Knot, repo.DidSlashRepo()) 97 75 } 98 76 99 - var collaborators []pages.Collaborator 100 - for _, item := range repoCollaborators { 101 - // currently only two roles: owner and member 102 - var role string 103 - switch item[3] { 104 - case "repo:owner": 105 - role = "owner" 106 - case "repo:collaborator": 107 - role = "collaborator" 108 - default: 109 - continue 77 + stats := repo.RepoStats 78 + if stats == nil { 79 + starCount, err := db.GetStarCount(rr.execer, repoAt) 80 + if err != nil { 81 + log.Println("failed to get star count for ", repoAt) 110 82 } 111 - 112 - did := item[0] 113 - 114 - c := pages.Collaborator{ 115 - Did: did, 116 - Handle: "", 117 - Role: role, 83 + issueCount, err := db.GetIssueCount(rr.execer, repoAt) 84 + if err != nil { 85 + log.Println("failed to get issue count for ", repoAt) 118 86 } 119 - collaborators = append(collaborators, c) 120 - } 121 - 122 - // populate all collborators with handles 123 - identsToResolve := make([]string, len(collaborators)) 124 - for i, collab := range collaborators { 125 - identsToResolve[i] = collab.Did 126 - } 127 - 128 - resolvedIdents := f.rr.idResolver.ResolveIdents(ctx, identsToResolve) 129 - for i, resolved := range resolvedIdents { 130 - if resolved != nil { 131 - collaborators[i].Handle = resolved.Handle.String() 87 + pullCount, err := db.GetPullCount(rr.execer, repoAt) 88 + if err != nil { 89 + log.Println("failed to get pull count for ", repoAt) 90 + } 91 + stats = &models.RepoStats{ 92 + StarCount: starCount, 93 + IssueCount: issueCount, 94 + PullCount: pullCount, 132 95 } 133 96 } 134 97 135 - return collaborators, nil 136 - } 137 - 138 - // this function is a bit weird since it now returns RepoInfo from an entirely different 139 - // package. we should refactor this or get rid of RepoInfo entirely. 140 - func (f *ResolvedRepo) RepoInfo(user *oauth.User) repoinfo.RepoInfo { 141 - repoAt := f.RepoAt() 142 - isStarred := false 143 - if user != nil { 144 - isStarred = db.GetStarStatus(f.rr.execer, user.Did, repoAt) 145 - } 146 - 147 - starCount, err := db.GetStarCount(f.rr.execer, repoAt) 148 - if err != nil { 149 - log.Println("failed to get star count for ", repoAt) 150 - } 151 - issueCount, err := db.GetIssueCount(f.rr.execer, repoAt) 152 - if err != nil { 153 - log.Println("failed to get issue count for ", repoAt) 154 - } 155 - pullCount, err := db.GetPullCount(f.rr.execer, repoAt) 156 - if err != nil { 157 - log.Println("failed to get issue count for ", repoAt) 158 - } 159 - source, err := db.GetRepoSource(f.rr.execer, repoAt) 160 - if errors.Is(err, sql.ErrNoRows) { 161 - source = "" 162 - } else if err != nil { 163 - log.Println("failed to get repo source for ", repoAt, err) 164 - } 165 - 166 98 var sourceRepo *models.Repo 167 - if source != "" { 168 - sourceRepo, err = db.GetRepoByAtUri(f.rr.execer, source) 99 + var err error 100 + if repo.Source != "" { 101 + sourceRepo, err = db.GetRepoByAtUri(rr.execer, repo.Source) 169 102 if err != nil { 170 103 log.Println("failed to get repo by at uri", err) 171 104 } 172 105 } 173 106 174 - var sourceHandle *identity.Identity 175 - if sourceRepo != nil { 176 - sourceHandle, err = f.rr.idResolver.ResolveIdent(context.Background(), sourceRepo.Did) 177 - if err != nil { 178 - log.Println("failed to resolve source repo", err) 179 - } 180 - } 107 + repoInfo := repoinfo.RepoInfo{ 108 + // this is basically a models.Repo 109 + OwnerDid: ownerId.DID.String(), 110 + OwnerHandle: ownerId.Handle.String(), 111 + Name: repo.Name, 112 + Rkey: repo.Rkey, 113 + Description: repo.Description, 114 + Website: repo.Website, 115 + Topics: repo.Topics, 116 + Knot: repo.Knot, 117 + Spindle: repo.Spindle, 118 + Stats: *stats, 181 119 182 - knot := f.Knot 120 + // fork repo upstream 121 + Source: sourceRepo, 183 122 184 - repoInfo := repoinfo.RepoInfo{ 185 - OwnerDid: f.OwnerDid(), 186 - OwnerHandle: f.OwnerHandle(), 187 - Name: f.Name, 188 - Rkey: f.Repo.Rkey, 189 - RepoAt: repoAt, 190 - Description: f.Description, 191 - Website: f.Website, 192 - Topics: f.Topics, 193 - IsStarred: isStarred, 194 - Knot: knot, 195 - Spindle: f.Spindle, 196 - Roles: f.RolesInRepo(user), 197 - Stats: models.RepoStats{ 198 - StarCount: starCount, 199 - IssueCount: issueCount, 200 - PullCount: pullCount, 201 - }, 202 - CurrentDir: f.CurrentDir, 203 - Ref: f.Ref, 204 - } 123 + // page context 124 + CurrentDir: currentDir, 125 + Ref: ref, 205 126 206 - if sourceRepo != nil { 207 - repoInfo.Source = sourceRepo 208 - repoInfo.SourceHandle = sourceHandle.Handle.String() 127 + // info related to the session 128 + IsStarred: isStarred, 129 + Roles: roles, 209 130 } 210 131 211 132 return repoInfo 212 133 } 213 134 214 - func (f *ResolvedRepo) RolesInRepo(u *oauth.User) repoinfo.RolesInRepo { 215 - if u != nil { 216 - r := f.rr.enforcer.GetPermissionsInRepo(u.Did, f.Knot, f.DidSlashRepo()) 217 - return repoinfo.RolesInRepo{Roles: r} 218 - } else { 219 - return repoinfo.RolesInRepo{} 135 + // extractCurrentDir gets the current directory for markdown link resolution. 136 + // for blob paths, returns the parent dir. for tree paths, returns the path itself. 137 + // 138 + // /@user/repo/blob/main/docs/README.md => docs 139 + // /@user/repo/tree/main/docs => docs 140 + func extractCurrentDir(fullPath string) string { 141 + fullPath = strings.TrimPrefix(fullPath, "/") 142 + 143 + blobPattern := regexp.MustCompile(`blob/[^/]+/(.*)$`) 144 + if matches := blobPattern.FindStringSubmatch(fullPath); len(matches) > 1 { 145 + return path.Dir(matches[1]) 220 146 } 147 + 148 + treePattern := regexp.MustCompile(`tree/[^/]+/(.*)$`) 149 + if matches := treePattern.FindStringSubmatch(fullPath); len(matches) > 1 { 150 + dir := strings.TrimSuffix(matches[1], "/") 151 + if dir == "" { 152 + return "." 153 + } 154 + return dir 155 + } 156 + 157 + return "." 221 158 } 222 159 223 160 // extractPathAfterRef gets the actual repository path
+22
appview/reporesolver/resolver_test.go
··· 1 + package reporesolver 2 + 3 + import "testing" 4 + 5 + func TestExtractCurrentDir(t *testing.T) { 6 + tests := []struct { 7 + path string 8 + want string 9 + }{ 10 + {"/@user/repo/blob/main/docs/README.md", "docs"}, 11 + {"/@user/repo/blob/main/README.md", "."}, 12 + {"/@user/repo/tree/main/docs", "docs"}, 13 + {"/@user/repo/tree/main/docs/", "docs"}, 14 + {"/@user/repo/tree/main", "."}, 15 + } 16 + 17 + for _, tt := range tests { 18 + if got := extractCurrentDir(tt.path); got != tt.want { 19 + t.Errorf("extractCurrentDir(%q) = %q, want %q", tt.path, got, tt.want) 20 + } 21 + } 22 + }
+5 -4
appview/serververify/verify.go
··· 9 9 "tangled.org/core/api/tangled" 10 10 "tangled.org/core/appview/db" 11 11 "tangled.org/core/appview/xrpcclient" 12 + "tangled.org/core/orm" 12 13 "tangled.org/core/rbac" 13 14 ) 14 15 ··· 76 77 // mark this spindle as verified in the db 77 78 rowId, err := db.VerifySpindle( 78 79 tx, 79 - db.FilterEq("owner", owner), 80 - db.FilterEq("instance", instance), 80 + orm.FilterEq("owner", owner), 81 + orm.FilterEq("instance", instance), 81 82 ) 82 83 if err != nil { 83 84 return 0, fmt.Errorf("failed to write to DB: %w", err) ··· 115 116 // mark as registered 116 117 err = db.MarkRegistered( 117 118 tx, 118 - db.FilterEq("did", owner), 119 - db.FilterEq("domain", domain), 119 + orm.FilterEq("did", owner), 120 + orm.FilterEq("domain", domain), 120 121 ) 121 122 if err != nil { 122 123 return fmt.Errorf("failed to register domain: %w", err)
+25 -29
appview/spindles/spindles.go
··· 20 20 "tangled.org/core/appview/serververify" 21 21 "tangled.org/core/appview/xrpcclient" 22 22 "tangled.org/core/idresolver" 23 + "tangled.org/core/orm" 23 24 "tangled.org/core/rbac" 24 25 "tangled.org/core/tid" 25 26 ··· 71 72 user := s.OAuth.GetUser(r) 72 73 all, err := db.GetSpindles( 73 74 s.Db, 74 - db.FilterEq("owner", user.Did), 75 + orm.FilterEq("owner", user.Did), 75 76 ) 76 77 if err != nil { 77 78 s.Logger.Error("failed to fetch spindles", "err", err) ··· 101 102 102 103 spindles, err := db.GetSpindles( 103 104 s.Db, 104 - db.FilterEq("instance", instance), 105 - db.FilterEq("owner", user.Did), 106 - db.FilterIsNot("verified", "null"), 105 + orm.FilterEq("instance", instance), 106 + orm.FilterEq("owner", user.Did), 107 + orm.FilterIsNot("verified", "null"), 107 108 ) 108 109 if err != nil || len(spindles) != 1 { 109 110 l.Error("failed to get spindle", "err", err, "len(spindles)", len(spindles)) ··· 123 124 repos, err := db.GetRepos( 124 125 s.Db, 125 126 0, 126 - db.FilterEq("spindle", instance), 127 + orm.FilterEq("spindle", instance), 127 128 ) 128 129 if err != nil { 129 130 l.Error("failed to get spindle repos", "err", err) ··· 290 291 291 292 spindles, err := db.GetSpindles( 292 293 s.Db, 293 - db.FilterEq("owner", user.Did), 294 - db.FilterEq("instance", instance), 294 + orm.FilterEq("owner", user.Did), 295 + orm.FilterEq("instance", instance), 295 296 ) 296 297 if err != nil || len(spindles) != 1 { 297 298 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 319 320 // remove spindle members first 320 321 err = db.RemoveSpindleMember( 321 322 tx, 322 - db.FilterEq("did", user.Did), 323 - db.FilterEq("instance", instance), 323 + orm.FilterEq("did", user.Did), 324 + orm.FilterEq("instance", instance), 324 325 ) 325 326 if err != nil { 326 327 l.Error("failed to remove spindle members", "err", err) ··· 330 331 331 332 err = db.DeleteSpindle( 332 333 tx, 333 - db.FilterEq("owner", user.Did), 334 - db.FilterEq("instance", instance), 334 + orm.FilterEq("owner", user.Did), 335 + orm.FilterEq("instance", instance), 335 336 ) 336 337 if err != nil { 337 338 l.Error("failed to delete spindle", "err", err) ··· 410 411 411 412 spindles, err := db.GetSpindles( 412 413 s.Db, 413 - db.FilterEq("owner", user.Did), 414 - db.FilterEq("instance", instance), 414 + orm.FilterEq("owner", user.Did), 415 + orm.FilterEq("instance", instance), 415 416 ) 416 417 if err != nil || len(spindles) != 1 { 417 418 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 453 454 454 455 verifiedSpindle, err := db.GetSpindles( 455 456 s.Db, 456 - db.FilterEq("id", rowId), 457 + orm.FilterEq("id", rowId), 457 458 ) 458 459 if err != nil || len(verifiedSpindle) != 1 { 459 460 l.Error("failed get new spindle", "err", err) ··· 486 487 487 488 spindles, err := db.GetSpindles( 488 489 s.Db, 489 - db.FilterEq("owner", user.Did), 490 - db.FilterEq("instance", instance), 490 + orm.FilterEq("owner", user.Did), 491 + orm.FilterEq("instance", instance), 491 492 ) 492 493 if err != nil || len(spindles) != 1 { 493 494 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 622 623 623 624 spindles, err := db.GetSpindles( 624 625 s.Db, 625 - db.FilterEq("owner", user.Did), 626 - db.FilterEq("instance", instance), 626 + orm.FilterEq("owner", user.Did), 627 + orm.FilterEq("instance", instance), 627 628 ) 628 629 if err != nil || len(spindles) != 1 { 629 630 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 652 653 s.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.") 653 654 return 654 655 } 655 - if memberId.Handle.IsInvalidHandle() { 656 - l.Error("failed to resolve member identity to handle") 657 - s.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.") 658 - return 659 - } 660 656 661 657 tx, err := s.Db.Begin() 662 658 if err != nil { ··· 672 668 // get the record from the DB first: 673 669 members, err := db.GetSpindleMembers( 674 670 s.Db, 675 - db.FilterEq("did", user.Did), 676 - db.FilterEq("instance", instance), 677 - db.FilterEq("subject", memberId.DID), 671 + orm.FilterEq("did", user.Did), 672 + orm.FilterEq("instance", instance), 673 + orm.FilterEq("subject", memberId.DID), 678 674 ) 679 675 if err != nil || len(members) != 1 { 680 676 l.Error("failed to get member", "err", err) ··· 685 681 // remove from db 686 682 if err = db.RemoveSpindleMember( 687 683 tx, 688 - db.FilterEq("did", user.Did), 689 - db.FilterEq("instance", instance), 690 - db.FilterEq("subject", memberId.DID), 684 + orm.FilterEq("did", user.Did), 685 + orm.FilterEq("instance", instance), 686 + orm.FilterEq("subject", memberId.DID), 691 687 ); err != nil { 692 688 l.Error("failed to remove spindle member", "err", err) 693 689 fail()
+6 -5
appview/state/gfi.go
··· 11 11 "tangled.org/core/appview/pages" 12 12 "tangled.org/core/appview/pagination" 13 13 "tangled.org/core/consts" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 func (s *State) GoodFirstIssues(w http.ResponseWriter, r *http.Request) { ··· 20 21 21 22 goodFirstIssueLabel := s.config.Label.GoodFirstIssue 22 23 23 - gfiLabelDef, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", goodFirstIssueLabel)) 24 + gfiLabelDef, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", goodFirstIssueLabel)) 24 25 if err != nil { 25 26 log.Println("failed to get gfi label def", err) 26 27 s.pages.Error500(w) 27 28 return 28 29 } 29 30 30 - repoLabels, err := db.GetRepoLabels(s.db, db.FilterEq("label_at", goodFirstIssueLabel)) 31 + repoLabels, err := db.GetRepoLabels(s.db, orm.FilterEq("label_at", goodFirstIssueLabel)) 31 32 if err != nil { 32 33 log.Println("failed to get repo labels", err) 33 34 s.pages.Error503(w) ··· 55 56 pagination.Page{ 56 57 Limit: 500, 57 58 }, 58 - db.FilterIn("repo_at", repoUris), 59 - db.FilterEq("open", 1), 59 + orm.FilterIn("repo_at", repoUris), 60 + orm.FilterEq("open", 1), 60 61 ) 61 62 if err != nil { 62 63 log.Println("failed to get issues", err) ··· 132 133 } 133 134 134 135 if len(uriList) > 0 { 135 - allLabelDefs, err = db.GetLabelDefinitions(s.db, db.FilterIn("at_uri", uriList)) 136 + allLabelDefs, err = db.GetLabelDefinitions(s.db, orm.FilterIn("at_uri", uriList)) 136 137 if err != nil { 137 138 log.Println("failed to fetch labels", err) 138 139 }
+17
appview/state/git_http.go
··· 25 25 26 26 } 27 27 28 + func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) { 29 + user, ok := r.Context().Value("resolvedId").(identity.Identity) 30 + if !ok { 31 + http.Error(w, "failed to resolve user", http.StatusInternalServerError) 32 + return 33 + } 34 + repo := r.Context().Value("repo").(*models.Repo) 35 + 36 + scheme := "https" 37 + if s.config.Core.Dev { 38 + scheme = "http" 39 + } 40 + 41 + targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery) 42 + s.proxyRequest(w, r, targetURL) 43 + } 44 + 28 45 func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) { 29 46 user, ok := r.Context().Value("resolvedId").(identity.Identity) 30 47 if !ok {
+6 -5
appview/state/knotstream.go
··· 16 16 ec "tangled.org/core/eventconsumer" 17 17 "tangled.org/core/eventconsumer/cursor" 18 18 "tangled.org/core/log" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/rbac" 20 21 "tangled.org/core/workflow" 21 22 ··· 30 31 31 32 knots, err := db.GetRegistrations( 32 33 d, 33 - db.FilterIsNot("registered", "null"), 34 + orm.FilterIsNot("registered", "null"), 34 35 ) 35 36 if err != nil { 36 37 return nil, err ··· 143 144 repos, err := db.GetRepos( 144 145 d, 145 146 0, 146 - db.FilterEq("did", record.RepoDid), 147 - db.FilterEq("name", record.RepoName), 147 + orm.FilterEq("did", record.RepoDid), 148 + orm.FilterEq("name", record.RepoName), 148 149 ) 149 150 if err != nil { 150 151 return fmt.Errorf("failed to look for repo in DB (%s/%s): %w", record.RepoDid, record.RepoName, err) ··· 209 210 repos, err := db.GetRepos( 210 211 d, 211 212 0, 212 - db.FilterEq("did", record.TriggerMetadata.Repo.Did), 213 - db.FilterEq("name", record.TriggerMetadata.Repo.Repo), 213 + orm.FilterEq("did", record.TriggerMetadata.Repo.Did), 214 + orm.FilterEq("name", record.TriggerMetadata.Repo.Repo), 214 215 ) 215 216 if err != nil { 216 217 return fmt.Errorf("failed to look for repo in DB: nsid %s, rkey %s, %w", msg.Nsid, msg.Rkey, err)
+29 -18
appview/state/profile.go
··· 19 19 "tangled.org/core/appview/db" 20 20 "tangled.org/core/appview/models" 21 21 "tangled.org/core/appview/pages" 22 + "tangled.org/core/orm" 22 23 ) 23 24 24 25 func (s *State) Profile(w http.ResponseWriter, r *http.Request) { ··· 56 57 return nil, fmt.Errorf("failed to get profile: %w", err) 57 58 } 58 59 59 - repoCount, err := db.CountRepos(s.db, db.FilterEq("did", did)) 60 + repoCount, err := db.CountRepos(s.db, orm.FilterEq("did", did)) 60 61 if err != nil { 61 62 return nil, fmt.Errorf("failed to get repo count: %w", err) 62 63 } 63 64 64 - stringCount, err := db.CountStrings(s.db, db.FilterEq("did", did)) 65 + stringCount, err := db.CountStrings(s.db, orm.FilterEq("did", did)) 65 66 if err != nil { 66 67 return nil, fmt.Errorf("failed to get string count: %w", err) 67 68 } 68 69 69 - starredCount, err := db.CountStars(s.db, db.FilterEq("did", did)) 70 + starredCount, err := db.CountStars(s.db, orm.FilterEq("did", did)) 70 71 if err != nil { 71 72 return nil, fmt.Errorf("failed to get starred repo count: %w", err) 72 73 } ··· 86 87 startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC) 87 88 punchcard, err := db.MakePunchcard( 88 89 s.db, 89 - db.FilterEq("did", did), 90 - db.FilterGte("date", startOfYear.Format(time.DateOnly)), 91 - db.FilterLte("date", now.Format(time.DateOnly)), 90 + orm.FilterEq("did", did), 91 + orm.FilterGte("date", startOfYear.Format(time.DateOnly)), 92 + orm.FilterLte("date", now.Format(time.DateOnly)), 92 93 ) 93 94 if err != nil { 94 95 return nil, fmt.Errorf("failed to get punchcard for %s: %w", did, err) ··· 96 97 97 98 return &pages.ProfileCard{ 98 99 UserDid: did, 99 - UserHandle: ident.Handle.String(), 100 100 Profile: profile, 101 101 FollowStatus: followStatus, 102 102 Stats: pages.ProfileStats{ ··· 119 119 s.pages.Error500(w) 120 120 return 121 121 } 122 - l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle) 122 + l = l.With("profileDid", profile.UserDid) 123 123 124 124 repos, err := db.GetRepos( 125 125 s.db, 126 126 0, 127 - db.FilterEq("did", profile.UserDid), 127 + orm.FilterEq("did", profile.UserDid), 128 128 ) 129 129 if err != nil { 130 130 l.Error("failed to fetch repos", "err", err) ··· 162 162 l.Error("failed to create timeline", "err", err) 163 163 } 164 164 165 + // populate commit counts in the timeline, using the punchcard 166 + now := time.Now() 167 + for _, p := range profile.Punchcard.Punches { 168 + years := now.Year() - p.Date.Year() 169 + months := int(now.Month() - p.Date.Month()) 170 + monthsAgo := years*12 + months 171 + if monthsAgo >= 0 && monthsAgo < len(timeline.ByMonth) { 172 + timeline.ByMonth[monthsAgo].Commits += p.Count 173 + } 174 + } 175 + 165 176 s.pages.ProfileOverview(w, pages.ProfileOverviewParams{ 166 177 LoggedInUser: s.oauth.GetUser(r), 167 178 Card: profile, ··· 180 191 s.pages.Error500(w) 181 192 return 182 193 } 183 - l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle) 194 + l = l.With("profileDid", profile.UserDid) 184 195 185 196 repos, err := db.GetRepos( 186 197 s.db, 187 198 0, 188 - db.FilterEq("did", profile.UserDid), 199 + orm.FilterEq("did", profile.UserDid), 189 200 ) 190 201 if err != nil { 191 202 l.Error("failed to get repos", "err", err) ··· 209 220 s.pages.Error500(w) 210 221 return 211 222 } 212 - l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle) 223 + l = l.With("profileDid", profile.UserDid) 213 224 214 - stars, err := db.GetRepoStars(s.db, 0, db.FilterEq("did", profile.UserDid)) 225 + stars, err := db.GetRepoStars(s.db, 0, orm.FilterEq("did", profile.UserDid)) 215 226 if err != nil { 216 227 l.Error("failed to get stars", "err", err) 217 228 s.pages.Error500(w) ··· 238 249 s.pages.Error500(w) 239 250 return 240 251 } 241 - l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle) 252 + l = l.With("profileDid", profile.UserDid) 242 253 243 - strings, err := db.GetStrings(s.db, 0, db.FilterEq("did", profile.UserDid)) 254 + strings, err := db.GetStrings(s.db, 0, orm.FilterEq("did", profile.UserDid)) 244 255 if err != nil { 245 256 l.Error("failed to get strings", "err", err) 246 257 s.pages.Error500(w) ··· 270 281 if err != nil { 271 282 return nil, err 272 283 } 273 - l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle) 284 + l = l.With("profileDid", profile.UserDid) 274 285 275 286 loggedInUser := s.oauth.GetUser(r) 276 287 params := FollowsPageParams{ ··· 292 303 followDids = append(followDids, extractDid(follow)) 293 304 } 294 305 295 - profiles, err := db.GetProfiles(s.db, db.FilterIn("did", followDids)) 306 + profiles, err := db.GetProfiles(s.db, orm.FilterIn("did", followDids)) 296 307 if err != nil { 297 308 l.Error("failed to get profiles", "followDids", followDids, "err", err) 298 309 return &params, err ··· 695 706 log.Printf("getting profile data for %s: %s", user.Did, err) 696 707 } 697 708 698 - repos, err := db.GetRepos(s.db, 0, db.FilterEq("did", user.Did)) 709 + repos, err := db.GetRepos(s.db, 0, orm.FilterEq("did", user.Did)) 699 710 if err != nil { 700 711 log.Printf("getting repos for %s: %s", user.Did, err) 701 712 }
+6
appview/state/router.go
··· 101 101 102 102 // These routes get proxied to the knot 103 103 r.Get("/info/refs", s.InfoRefs) 104 + r.Post("/git-upload-archive", s.UploadArchive) 104 105 r.Post("/git-upload-pack", s.UploadPack) 105 106 r.Post("/git-receive-pack", s.ReceivePack) 106 107 ··· 108 109 }) 109 110 110 111 r.NotFound(func(w http.ResponseWriter, r *http.Request) { 112 + w.WriteHeader(http.StatusNotFound) 111 113 s.pages.Error404(w) 112 114 }) 113 115 ··· 181 183 r.Get("/brand", s.Brand) 182 184 183 185 r.NotFound(func(w http.ResponseWriter, r *http.Request) { 186 + w.WriteHeader(http.StatusNotFound) 184 187 s.pages.Error404(w) 185 188 }) 186 189 return r ··· 263 266 issues := issues.New( 264 267 s.oauth, 265 268 s.repoResolver, 269 + s.enforcer, 266 270 s.pages, 267 271 s.idResolver, 272 + s.mentionsResolver, 268 273 s.db, 269 274 s.config, 270 275 s.notifier, ··· 281 286 s.repoResolver, 282 287 s.pages, 283 288 s.idResolver, 289 + s.mentionsResolver, 284 290 s.db, 285 291 s.config, 286 292 s.notifier,
+2 -1
appview/state/spindlestream.go
··· 17 17 ec "tangled.org/core/eventconsumer" 18 18 "tangled.org/core/eventconsumer/cursor" 19 19 "tangled.org/core/log" 20 + "tangled.org/core/orm" 20 21 "tangled.org/core/rbac" 21 22 spindle "tangled.org/core/spindle/models" 22 23 ) ··· 27 28 28 29 spindles, err := db.GetSpindles( 29 30 d, 30 - db.FilterIsNot("verified", "null"), 31 + orm.FilterIsNot("verified", "null"), 31 32 ) 32 33 if err != nil { 33 34 return nil, err
+31 -25
appview/state/state.go
··· 15 15 "tangled.org/core/appview/config" 16 16 "tangled.org/core/appview/db" 17 17 "tangled.org/core/appview/indexer" 18 + "tangled.org/core/appview/mentions" 18 19 "tangled.org/core/appview/models" 19 20 "tangled.org/core/appview/notify" 20 21 dbnotify "tangled.org/core/appview/notify/db" ··· 29 30 "tangled.org/core/jetstream" 30 31 "tangled.org/core/log" 31 32 tlog "tangled.org/core/log" 33 + "tangled.org/core/orm" 32 34 "tangled.org/core/rbac" 33 35 "tangled.org/core/tid" 34 36 ··· 42 44 ) 43 45 44 46 type State struct { 45 - db *db.DB 46 - notifier notify.Notifier 47 - indexer *indexer.Indexer 48 - oauth *oauth.OAuth 49 - enforcer *rbac.Enforcer 50 - pages *pages.Pages 51 - idResolver *idresolver.Resolver 52 - posthog posthog.Client 53 - jc *jetstream.JetstreamClient 54 - config *config.Config 55 - repoResolver *reporesolver.RepoResolver 56 - knotstream *eventconsumer.Consumer 57 - spindlestream *eventconsumer.Consumer 58 - logger *slog.Logger 59 - validator *validator.Validator 47 + db *db.DB 48 + notifier notify.Notifier 49 + indexer *indexer.Indexer 50 + oauth *oauth.OAuth 51 + enforcer *rbac.Enforcer 52 + pages *pages.Pages 53 + idResolver *idresolver.Resolver 54 + mentionsResolver *mentions.Resolver 55 + posthog posthog.Client 56 + jc *jetstream.JetstreamClient 57 + config *config.Config 58 + repoResolver *reporesolver.RepoResolver 59 + knotstream *eventconsumer.Consumer 60 + spindlestream *eventconsumer.Consumer 61 + logger *slog.Logger 62 + validator *validator.Validator 60 63 } 61 64 62 65 func Make(ctx context.Context, config *config.Config) (*State, error) { ··· 96 99 } 97 100 validator := validator.New(d, res, enforcer) 98 101 99 - repoResolver := reporesolver.New(config, enforcer, res, d) 102 + repoResolver := reporesolver.New(config, enforcer, d) 103 + 104 + mentionsResolver := mentions.New(config, res, d, log.SubLogger(logger, "mentionsResolver")) 100 105 101 106 wrapper := db.DbWrapper{Execer: d} 102 107 jc, err := jetstream.NewJetstreamClient( ··· 112 117 tangled.SpindleNSID, 113 118 tangled.StringNSID, 114 119 tangled.RepoIssueNSID, 115 - tangled.RepoIssueCommentNSID, 120 + tangled.CommentNSID, 116 121 tangled.LabelDefinitionNSID, 117 122 tangled.LabelOpNSID, 118 123 }, ··· 178 183 enforcer, 179 184 pages, 180 185 res, 186 + mentionsResolver, 181 187 posthog, 182 188 jc, 183 189 config, ··· 294 300 return 295 301 } 296 302 297 - gfiLabel, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", s.config.Label.GoodFirstIssue)) 303 + gfiLabel, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", s.config.Label.GoodFirstIssue)) 298 304 if err != nil { 299 305 // non-fatal 300 306 } ··· 318 324 319 325 regs, err := db.GetRegistrations( 320 326 s.db, 321 - db.FilterEq("did", user.Did), 322 - db.FilterEq("needs_upgrade", 1), 327 + orm.FilterEq("did", user.Did), 328 + orm.FilterEq("needs_upgrade", 1), 323 329 ) 324 330 if err != nil { 325 331 l.Error("non-fatal: failed to get registrations", "err", err) ··· 327 333 328 334 spindles, err := db.GetSpindles( 329 335 s.db, 330 - db.FilterEq("owner", user.Did), 331 - db.FilterEq("needs_upgrade", 1), 336 + orm.FilterEq("owner", user.Did), 337 + orm.FilterEq("needs_upgrade", 1), 332 338 ) 333 339 if err != nil { 334 340 l.Error("non-fatal: failed to get spindles", "err", err) ··· 499 505 // Check for existing repos 500 506 existingRepo, err := db.GetRepo( 501 507 s.db, 502 - db.FilterEq("did", user.Did), 503 - db.FilterEq("name", repoName), 508 + orm.FilterEq("did", user.Did), 509 + orm.FilterEq("name", repoName), 504 510 ) 505 511 if err == nil && existingRepo != nil { 506 512 l.Info("repo exists") ··· 660 666 } 661 667 662 668 func BackfillDefaultDefs(e db.Execer, r *idresolver.Resolver, defaults []string) error { 663 - defaultLabels, err := db.GetLabelDefinitions(e, db.FilterIn("at_uri", defaults)) 669 + defaultLabels, err := db.GetLabelDefinitions(e, orm.FilterIn("at_uri", defaults)) 664 670 if err != nil { 665 671 return err 666 672 }
+7 -6
appview/strings/strings.go
··· 17 17 "tangled.org/core/appview/pages" 18 18 "tangled.org/core/appview/pages/markup" 19 19 "tangled.org/core/idresolver" 20 + "tangled.org/core/orm" 20 21 "tangled.org/core/tid" 21 22 22 23 "github.com/bluesky-social/indigo/api/atproto" ··· 108 109 strings, err := db.GetStrings( 109 110 s.Db, 110 111 0, 111 - db.FilterEq("did", id.DID), 112 - db.FilterEq("rkey", rkey), 112 + orm.FilterEq("did", id.DID), 113 + orm.FilterEq("rkey", rkey), 113 114 ) 114 115 if err != nil { 115 116 l.Error("failed to fetch string", "err", err) ··· 199 200 all, err := db.GetStrings( 200 201 s.Db, 201 202 0, 202 - db.FilterEq("did", id.DID), 203 - db.FilterEq("rkey", rkey), 203 + orm.FilterEq("did", id.DID), 204 + orm.FilterEq("rkey", rkey), 204 205 ) 205 206 if err != nil { 206 207 l.Error("failed to fetch string", "err", err) ··· 408 409 409 410 if err := db.DeleteString( 410 411 s.Db, 411 - db.FilterEq("did", user.Did), 412 - db.FilterEq("rkey", rkey), 412 + orm.FilterEq("did", user.Did), 413 + orm.FilterEq("rkey", rkey), 413 414 ); err != nil { 414 415 fail("Failed to delete string.", err) 415 416 return
-26
appview/validator/issue.go
··· 4 4 "fmt" 5 5 "strings" 6 6 7 - "tangled.org/core/appview/db" 8 7 "tangled.org/core/appview/models" 9 8 ) 10 - 11 - func (v *Validator) ValidateIssueComment(comment *models.IssueComment) error { 12 - // if comments have parents, only ingest ones that are 1 level deep 13 - if comment.ReplyTo != nil { 14 - parents, err := db.GetIssueComments(v.db, db.FilterEq("at_uri", *comment.ReplyTo)) 15 - if err != nil { 16 - return fmt.Errorf("failed to fetch parent comment: %w", err) 17 - } 18 - if len(parents) != 1 { 19 - return fmt.Errorf("incorrect number of parent comments returned: %d", len(parents)) 20 - } 21 - 22 - // depth check 23 - parent := parents[0] 24 - if parent.ReplyTo != nil { 25 - return fmt.Errorf("incorrect depth, this comment is replying at depth >1") 26 - } 27 - } 28 - 29 - if sb := strings.TrimSpace(v.sanitizer.SanitizeDefault(comment.Body)); sb == "" { 30 - return fmt.Errorf("body is empty after HTML sanitization") 31 - } 32 - 33 - return nil 34 - } 35 9 36 10 func (v *Validator) ValidateIssue(issue *models.Issue) error { 37 11 if issue.Title == "" {
+1
cmd/cborgen/cborgen.go
··· 15 15 "api/tangled/cbor_gen.go", 16 16 "tangled", 17 17 tangled.ActorProfile{}, 18 + tangled.Comment{}, 18 19 tangled.FeedReaction{}, 19 20 tangled.FeedStar{}, 20 21 tangled.GitRefUpdate{},
+1 -34
crypto/verify.go
··· 5 5 "crypto/sha256" 6 6 "encoding/base64" 7 7 "fmt" 8 - "strings" 9 8 10 9 "github.com/hiddeco/sshsig" 11 10 "golang.org/x/crypto/ssh" 12 - "tangled.org/core/types" 13 11 ) 14 12 15 13 func VerifySignature(pubKey, signature, payload []byte) (error, bool) { ··· 28 26 // multiple algorithms but sha-512 is most secure, and git's ssh signing defaults 29 27 // to sha-512 for all key types anyway. 30 28 err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git") 31 - return err, err == nil 32 - } 33 29 34 - // VerifyCommitSignature reconstructs the payload used to sign a commit. This is 35 - // essentially the git cat-file output but without the gpgsig header. 36 - // 37 - // Caveats: signature verification will fail on commits with more than one parent, 38 - // i.e. merge commits, because types.NiceDiff doesn't carry more than one Parent field 39 - // and we are unable to reconstruct the payload correctly. 40 - // 41 - // Ideally this should directly operate on an *object.Commit. 42 - func VerifyCommitSignature(pubKey string, commit types.NiceDiff) (error, bool) { 43 - signature := commit.Commit.PGPSignature 44 - 45 - author := bytes.NewBuffer([]byte{}) 46 - committer := bytes.NewBuffer([]byte{}) 47 - commit.Commit.Author.Encode(author) 48 - commit.Commit.Committer.Encode(committer) 49 - 50 - payload := strings.Builder{} 51 - 52 - fmt.Fprintf(&payload, "tree %s\n", commit.Commit.Tree) 53 - if commit.Commit.Parent != "" { 54 - fmt.Fprintf(&payload, "parent %s\n", commit.Commit.Parent) 55 - } 56 - fmt.Fprintf(&payload, "author %s\n", author.String()) 57 - fmt.Fprintf(&payload, "committer %s\n", committer.String()) 58 - if commit.Commit.ChangedId != "" { 59 - fmt.Fprintf(&payload, "change-id %s\n", commit.Commit.ChangedId) 60 - } 61 - fmt.Fprintf(&payload, "\n%s", commit.Commit.Message) 62 - 63 - return VerifySignature([]byte(pubKey), []byte(signature), []byte(payload.String())) 30 + return err, err == nil 64 31 } 65 32 66 33 // SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
+1527
docs/DOCS.md
··· 1 + --- 2 + title: Tangled docs 3 + author: The Tangled Contributors 4 + date: 21 Sun, Dec 2025 5 + abstract: | 6 + Tangled is a decentralized code hosting and collaboration 7 + platform. Every component of Tangled is open-source and 8 + self-hostable. [tangled.org](https://tangled.org) also 9 + provides hosting and CI services that are free to use. 10 + 11 + There are several models for decentralized code 12 + collaboration platforms, ranging from ActivityPubโ€™s 13 + (Forgejo) federated model, to Radicleโ€™s entirely P2P model. 14 + Our approach attempts to be the best of both worlds by 15 + adopting the AT Protocolโ€”a protocol for building decentralized 16 + social applications with a central identity 17 + 18 + Our approach to this is the idea of โ€œknotsโ€. Knots are 19 + lightweight, headless servers that enable users to host Git 20 + repositories with ease. Knots are designed for either single 21 + or multi-tenant use which is perfect for self-hosting on a 22 + Raspberry Pi at home, or larger โ€œcommunityโ€ servers. By 23 + default, Tangled provides managed knots where you can host 24 + your repositories for free. 25 + 26 + The appview at tangled.org acts as a consolidated "view" 27 + into the whole network, allowing users to access, clone and 28 + contribute to repositories hosted across different knots 29 + seamlessly. 30 + --- 31 + 32 + # Quick start guide 33 + 34 + ## Login or sign up 35 + 36 + You can [login](https://tangled.org) by using your AT Protocol 37 + account. If you are unclear on what that means, simply head 38 + to the [signup](https://tangled.org/signup) page and create 39 + an account. By doing so, you will be choosing Tangled as 40 + your account provider (you will be granted a handle of the 41 + form `user.tngl.sh`). 42 + 43 + In the AT Protocol network, users are free to choose their account 44 + provider (known as a "Personal Data Service", or PDS), and 45 + login to applications that support AT accounts. 46 + 47 + You can think of it as "one account for all of the atmosphere"! 48 + 49 + If you already have an AT account (you may have one if you 50 + signed up to Bluesky, for example), you can login with the 51 + same handle on Tangled (so just use `user.bsky.social` on 52 + the login page). 53 + 54 + ## Add an SSH key 55 + 56 + Once you are logged in, you can start creating repositories 57 + and pushing code. Tangled supports pushing git repositories 58 + over SSH. 59 + 60 + First, you'll need to generate an SSH key if you don't 61 + already have one: 62 + 63 + ```bash 64 + ssh-keygen -t ed25519 -C "foo@bar.com" 65 + ``` 66 + 67 + When prompted, save the key to the default location 68 + (`~/.ssh/id_ed25519`) and optionally set a passphrase. 69 + 70 + Copy your public key to your clipboard: 71 + 72 + ```bash 73 + # on X11 74 + cat ~/.ssh/id_ed25519.pub | xclip -sel c 75 + 76 + # on wayland 77 + cat ~/.ssh/id_ed25519.pub | wl-copy 78 + 79 + # on macos 80 + cat ~/.ssh/id_ed25519.pub | pbcopy 81 + ``` 82 + 83 + Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key', 84 + paste your public key, give it a descriptive name, and hit 85 + save. 86 + 87 + ## Create a repository 88 + 89 + Once your SSH key is added, create your first repository: 90 + 91 + 1. Hit the green `+` icon on the topbar, and select 92 + repository 93 + 2. Enter a repository name 94 + 3. Add a description 95 + 4. Choose a knotserver to host this repository on 96 + 5. Hit create 97 + 98 + Knots are self-hostable, lightweight Git servers that can 99 + host your repository. Unlike traditional code forges, your 100 + code can live on any server. Read the [Knots](TODO) section 101 + for more. 102 + 103 + ## Configure SSH 104 + 105 + To ensure Git uses the correct SSH key and connects smoothly 106 + to Tangled, add this configuration to your `~/.ssh/config` 107 + file: 108 + 109 + ``` 110 + Host tangled.org 111 + Hostname tangled.org 112 + User git 113 + IdentityFile ~/.ssh/id_ed25519 114 + AddressFamily inet 115 + ``` 116 + 117 + This tells SSH to use your specific key when connecting to 118 + Tangled and prevents authentication issues if you have 119 + multiple SSH keys. 120 + 121 + Note that this configuration only works for knotservers that 122 + are hosted by tangled.org. If you use a custom knot, refer 123 + to the [Knots](TODO) section. 124 + 125 + ## Push your first repository 126 + 127 + Initialize a new Git repository: 128 + 129 + ```bash 130 + mkdir my-project 131 + cd my-project 132 + 133 + git init 134 + echo "# My Project" > README.md 135 + ``` 136 + 137 + Add some content and push! 138 + 139 + ```bash 140 + git add README.md 141 + git commit -m "Initial commit" 142 + git remote add origin git@tangled.org:user.tngl.sh/my-project 143 + git push -u origin main 144 + ``` 145 + 146 + That's it! Your code is now hosted on Tangled. 147 + 148 + ## Migrating an existing repository 149 + 150 + Moving your repositories from GitHub, GitLab, Bitbucket, or 151 + any other Git forge to Tangled is straightforward. You'll 152 + simply change your repository's remote URL. At the moment, 153 + Tangled does not have any tooling to migrate data such as 154 + GitHub issues or pull requests. 155 + 156 + First, create a new repository on tangled.org as described 157 + in the [Quick Start Guide](#create-a-repository). 158 + 159 + Navigate to your existing local repository: 160 + 161 + ```bash 162 + cd /path/to/your/existing/repo 163 + ``` 164 + 165 + You can inspect your existing Git remote like so: 166 + 167 + ```bash 168 + git remote -v 169 + ``` 170 + 171 + You'll see something like: 172 + 173 + ``` 174 + origin git@github.com:username/my-project (fetch) 175 + origin git@github.com:username/my-project (push) 176 + ``` 177 + 178 + Update the remote URL to point to tangled: 179 + 180 + ```bash 181 + git remote set-url origin git@tangled.org:user.tngl.sh/my-project 182 + ``` 183 + 184 + Verify the change: 185 + 186 + ```bash 187 + git remote -v 188 + ``` 189 + 190 + You should now see: 191 + 192 + ``` 193 + origin git@tangled.org:user.tngl.sh/my-project (fetch) 194 + origin git@tangled.org:user.tngl.sh/my-project (push) 195 + ``` 196 + 197 + Push all your branches and tags to Tangled: 198 + 199 + ```bash 200 + git push -u origin --all 201 + git push -u origin --tags 202 + ``` 203 + 204 + Your repository is now migrated to Tangled! All commit 205 + history, branches, and tags have been preserved. 206 + 207 + ## Mirroring a repository to Tangled 208 + 209 + If you want to maintain your repository on multiple forges 210 + simultaneously, for example, keeping your primary repository 211 + on GitHub while mirroring to Tangled for backup or 212 + redundancy, you can do so by adding multiple remotes. 213 + 214 + You can configure your local repository to push to both 215 + Tangled and, say, GitHub. You may already have the following 216 + setup: 217 + 218 + ``` 219 + $ git remote -v 220 + origin git@github.com:username/my-project (fetch) 221 + origin git@github.com:username/my-project (push) 222 + ``` 223 + 224 + Now add Tangled as an additional push URL to the same 225 + remote: 226 + 227 + ```bash 228 + git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project 229 + ``` 230 + 231 + You also need to re-add the original URL as a push 232 + destination (Git replaces the push URL when you use `--add` 233 + the first time): 234 + 235 + ```bash 236 + git remote set-url --add --push origin git@github.com:username/my-project 237 + ``` 238 + 239 + Verify your configuration: 240 + 241 + ``` 242 + $ git remote -v 243 + origin git@github.com:username/repo (fetch) 244 + origin git@tangled.org:username/my-project (push) 245 + origin git@github.com:username/repo (push) 246 + ``` 247 + 248 + Notice that there's one fetch URL (the primary remote) and 249 + two push URLs. Now, whenever you push, Git will 250 + automatically push to both remotes: 251 + 252 + ```bash 253 + git push origin main 254 + ``` 255 + 256 + This single command pushes your `main` branch to both GitHub 257 + and Tangled simultaneously. 258 + 259 + To push all branches and tags: 260 + 261 + ```bash 262 + git push origin --all 263 + git push origin --tags 264 + ``` 265 + 266 + If you prefer more control over which remote you push to, 267 + you can maintain separate remotes: 268 + 269 + ```bash 270 + git remote add github git@github.com:username/my-project 271 + git remote add tangled git@tangled.org:username/my-project 272 + ``` 273 + 274 + Then push to each explicitly: 275 + 276 + ```bash 277 + git push github main 278 + git push tangled main 279 + ``` 280 + 281 + # Knot self-hosting guide 282 + 283 + So you want to run your own knot server? Great! Here are a few prerequisites: 284 + 285 + 1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind. 286 + 2. A (sub)domain name. People generally use `knot.example.com`. 287 + 3. A valid SSL certificate for your domain. 288 + 289 + ## NixOS 290 + 291 + Refer to the [knot 292 + module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix) 293 + for a full list of options. Sample configurations: 294 + 295 + - [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85) 296 + - [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25) 297 + 298 + ## Docker 299 + 300 + Refer to 301 + [@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker). 302 + Note that this is community maintained. 303 + 304 + ## Manual setup 305 + 306 + First, clone this repository: 307 + 308 + ``` 309 + git clone https://tangled.org/@tangled.org/core 310 + ``` 311 + 312 + Then, build the `knot` CLI. This is the knot administration 313 + and operation tool. For the purpose of this guide, we're 314 + only concerned with these subcommands: 315 + 316 + * `knot server`: the main knot server process, typically 317 + run as a supervised service 318 + * `knot guard`: handles role-based access control for git 319 + over SSH (you'll never have to run this yourself) 320 + * `knot keys`: fetches SSH keys associated with your knot; 321 + we'll use this to generate the SSH 322 + `AuthorizedKeysCommand` 323 + 324 + ``` 325 + cd core 326 + export CGO_ENABLED=1 327 + go build -o knot ./cmd/knot 328 + ``` 329 + 330 + Next, move the `knot` binary to a location owned by `root` -- 331 + `/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`: 332 + 333 + ``` 334 + sudo mv knot /usr/local/bin/knot 335 + sudo chown root:root /usr/local/bin/knot 336 + ``` 337 + 338 + This is necessary because SSH `AuthorizedKeysCommand` requires [really 339 + specific permissions](https://stackoverflow.com/a/27638306). The 340 + `AuthorizedKeysCommand` specifies a command that is run by `sshd` to 341 + retrieve a user's public SSH keys dynamically for authentication. Let's 342 + set that up. 343 + 344 + ``` 345 + sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 346 + Match User git 347 + AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys 348 + AuthorizedKeysCommandUser nobody 349 + EOF 350 + ``` 351 + 352 + Then, reload `sshd`: 353 + 354 + ``` 355 + sudo systemctl reload ssh 356 + ``` 357 + 358 + Next, create the `git` user. We'll use the `git` user's home directory 359 + to store repositories: 360 + 361 + ``` 362 + sudo adduser git 363 + ``` 364 + 365 + Create `/home/git/.knot.env` with the following, updating the values as 366 + necessary. The `KNOT_SERVER_OWNER` should be set to your 367 + DID, you can find your DID in the [Settings](https://tangled.sh/settings) page. 368 + 369 + ``` 370 + KNOT_REPO_SCAN_PATH=/home/git 371 + KNOT_SERVER_HOSTNAME=knot.example.com 372 + APPVIEW_ENDPOINT=https://tangled.org 373 + KNOT_SERVER_OWNER=did:plc:foobar 374 + KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444 375 + KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555 376 + ``` 377 + 378 + If you run a Linux distribution that uses systemd, you can use the provided 379 + service file to run the server. Copy 380 + [`knotserver.service`](/systemd/knotserver.service) 381 + to `/etc/systemd/system/`. Then, run: 382 + 383 + ``` 384 + systemctl enable knotserver 385 + systemctl start knotserver 386 + ``` 387 + 388 + The last step is to configure a reverse proxy like Nginx or Caddy to front your 389 + knot. Here's an example configuration for Nginx: 390 + 391 + ``` 392 + server { 393 + listen 80; 394 + listen [::]:80; 395 + server_name knot.example.com; 396 + 397 + location / { 398 + proxy_pass http://localhost:5555; 399 + proxy_set_header Host $host; 400 + proxy_set_header X-Real-IP $remote_addr; 401 + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 402 + proxy_set_header X-Forwarded-Proto $scheme; 403 + } 404 + 405 + # wss endpoint for git events 406 + location /events { 407 + proxy_set_header X-Forwarded-For $remote_addr; 408 + proxy_set_header Host $http_host; 409 + proxy_set_header Upgrade websocket; 410 + proxy_set_header Connection Upgrade; 411 + proxy_pass http://localhost:5555; 412 + } 413 + # additional config for SSL/TLS go here. 414 + } 415 + 416 + ``` 417 + 418 + Remember to use Let's Encrypt or similar to procure a certificate for your 419 + knot domain. 420 + 421 + You should now have a running knot server! You can finalize 422 + your registration by hitting the `verify` button on the 423 + [/settings/knots](https://tangled.org/settings/knots) page. This simply creates 424 + a record on your PDS to announce the existence of the knot. 425 + 426 + ### Custom paths 427 + 428 + (This section applies to manual setup only. Docker users should edit the mounts 429 + in `docker-compose.yml` instead.) 430 + 431 + Right now, the database and repositories of your knot lives in `/home/git`. You 432 + can move these paths if you'd like to store them in another folder. Be careful 433 + when adjusting these paths: 434 + 435 + * Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent 436 + any possible side effects. Remember to restart it once you're done. 437 + * Make backups before moving in case something goes wrong. 438 + * Make sure the `git` user can read and write from the new paths. 439 + 440 + #### Database 441 + 442 + As an example, let's say the current database is at `/home/git/knotserver.db`, 443 + and we want to move it to `/home/git/database/knotserver.db`. 444 + 445 + Copy the current database to the new location. Make sure to copy the `.db-shm` 446 + and `.db-wal` files if they exist. 447 + 448 + ``` 449 + mkdir /home/git/database 450 + cp /home/git/knotserver.db* /home/git/database 451 + ``` 452 + 453 + In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to 454 + the new file path (_not_ the directory): 455 + 456 + ``` 457 + KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db 458 + ``` 459 + 460 + #### Repositories 461 + 462 + As an example, let's say the repositories are currently in `/home/git`, and we 463 + want to move them into `/home/git/repositories`. 464 + 465 + Create the new folder, then move the existing repositories (if there are any): 466 + 467 + ``` 468 + mkdir /home/git/repositories 469 + # move all DIDs into the new folder; these will vary for you! 470 + mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories 471 + ``` 472 + 473 + In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH` 474 + to the new directory: 475 + 476 + ``` 477 + KNOT_REPO_SCAN_PATH=/home/git/repositories 478 + ``` 479 + 480 + Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated 481 + repository path: 482 + 483 + ``` 484 + sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 485 + Match User git 486 + AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories 487 + AuthorizedKeysCommandUser nobody 488 + EOF 489 + ``` 490 + 491 + Make sure to restart your SSH server! 492 + 493 + #### MOTD (message of the day) 494 + 495 + To configure the MOTD used ("Welcome to this knot!" by default), edit the 496 + `/home/git/motd` file: 497 + 498 + ``` 499 + printf "Hi from this knot!\n" > /home/git/motd 500 + ``` 501 + 502 + Note that you should add a newline at the end if setting a non-empty message 503 + since the knot won't do this for you. 504 + 505 + # Spindles 506 + 507 + ## Pipelines 508 + 509 + Spindle workflows allow you to write CI/CD pipelines in a 510 + simple format. They're located in the `.tangled/workflows` 511 + directory at the root of your repository, and are defined 512 + using YAML. 513 + 514 + The fields are: 515 + 516 + - [Trigger](#trigger): A **required** field that defines 517 + when a workflow should be triggered. 518 + - [Engine](#engine): A **required** field that defines which 519 + engine a workflow should run on. 520 + - [Clone options](#clone-options): An **optional** field 521 + that defines how the repository should be cloned. 522 + - [Dependencies](#dependencies): An **optional** field that 523 + allows you to list dependencies you may need. 524 + - [Environment](#environment): An **optional** field that 525 + allows you to define environment variables. 526 + - [Steps](#steps): An **optional** field that allows you to 527 + define what steps should run in the workflow. 528 + 529 + ### Trigger 530 + 531 + The first thing to add to a workflow is the trigger, which 532 + defines when a workflow runs. This is defined using a `when` 533 + field, which takes in a list of conditions. Each condition 534 + has the following fields: 535 + 536 + - `event`: This is a **required** field that defines when 537 + your workflow should run. It's a list that can take one or 538 + more of the following values: 539 + - `push`: The workflow should run every time a commit is 540 + pushed to the repository. 541 + - `pull_request`: The workflow should run every time a 542 + pull request is made or updated. 543 + - `manual`: The workflow can be triggered manually. 544 + - `branch`: Defines which branches the workflow should run 545 + for. If used with the `push` event, commits to the 546 + branch(es) listed here will trigger the workflow. If used 547 + with the `pull_request` event, updates to pull requests 548 + targeting the branch(es) listed here will trigger the 549 + workflow. This field has no effect with the `manual` 550 + event. Supports glob patterns using `*` and `**` (e.g., 551 + `main`, `develop`, `release-*`). Either `branch` or `tag` 552 + (or both) must be specified for `push` events. 553 + - `tag`: Defines which tags the workflow should run for. 554 + Only used with the `push` event - when tags matching the 555 + pattern(s) listed here are pushed, the workflow will 556 + trigger. This field has no effect with `pull_request` or 557 + `manual` events. Supports glob patterns using `*` and `**` 558 + (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or 559 + `tag` (or both) must be specified for `push` events. 560 + 561 + For example, if you'd like to define a workflow that runs 562 + when commits are pushed to the `main` and `develop` 563 + branches, or when pull requests that target the `main` 564 + branch are updated, or manually, you can do so with: 565 + 566 + ```yaml 567 + when: 568 + - event: ["push", "manual"] 569 + branch: ["main", "develop"] 570 + - event: ["pull_request"] 571 + branch: ["main"] 572 + ``` 573 + 574 + You can also trigger workflows on tag pushes. For instance, 575 + to run a deployment workflow when tags matching `v*` are 576 + pushed: 577 + 578 + ```yaml 579 + when: 580 + - event: ["push"] 581 + tag: ["v*"] 582 + ``` 583 + 584 + You can even combine branch and tag patterns in a single 585 + constraint (the workflow triggers if either matches): 586 + 587 + ```yaml 588 + when: 589 + - event: ["push"] 590 + branch: ["main", "release-*"] 591 + tag: ["v*", "stable"] 592 + ``` 593 + 594 + ### Engine 595 + 596 + Next is the engine on which the workflow should run, defined 597 + using the **required** `engine` field. The currently 598 + supported engines are: 599 + 600 + - `nixery`: This uses an instance of 601 + [Nixery](https://nixery.dev) to run steps, which allows 602 + you to add [dependencies](#dependencies) from 603 + Nixpkgs (https://github.com/NixOS/nixpkgs). You can 604 + search for packages on https://search.nixos.org, and 605 + there's a pretty good chance the package(s) you're looking 606 + for will be there. 607 + 608 + Example: 609 + 610 + ```yaml 611 + engine: "nixery" 612 + ``` 613 + 614 + ### Clone options 615 + 616 + When a workflow starts, the first step is to clone the 617 + repository. You can customize this behavior using the 618 + **optional** `clone` field. It has the following fields: 619 + 620 + - `skip`: Setting this to `true` will skip cloning the 621 + repository. This can be useful if your workflow is doing 622 + something that doesn't require anything from the 623 + repository itself. This is `false` by default. 624 + - `depth`: This sets the number of commits, or the "clone 625 + depth", to fetch from the repository. For example, if you 626 + set this to 2, the last 2 commits will be fetched. By 627 + default, the depth is set to 1, meaning only the most 628 + recent commit will be fetched, which is the commit that 629 + triggered the workflow. 630 + - `submodules`: If you use Git submodules 631 + (https://git-scm.com/book/en/v2/Git-Tools-Submodules) 632 + in your repository, setting this field to `true` will 633 + recursively fetch all submodules. This is `false` by 634 + default. 635 + 636 + The default settings are: 637 + 638 + ```yaml 639 + clone: 640 + skip: false 641 + depth: 1 642 + submodules: false 643 + ``` 644 + 645 + ### Dependencies 646 + 647 + Usually when you're running a workflow, you'll need 648 + additional dependencies. The `dependencies` field lets you 649 + define which dependencies to get, and from where. It's a 650 + key-value map, with the key being the registry to fetch 651 + dependencies from, and the value being the list of 652 + dependencies to fetch. 653 + 654 + Say you want to fetch Node.js and Go from `nixpkgs`, and a 655 + package called `my_pkg` you've made from your own registry 656 + at your repository at 657 + `https://tangled.org/@example.com/my_pkg`. You can define 658 + those dependencies like so: 659 + 660 + ```yaml 661 + dependencies: 662 + # nixpkgs 663 + nixpkgs: 664 + - nodejs 665 + - go 666 + # custom registry 667 + git+https://tangled.org/@example.com/my_pkg: 668 + - my_pkg 669 + ``` 670 + 671 + Now these dependencies are available to use in your 672 + workflow! 673 + 674 + ### Environment 675 + 676 + The `environment` field allows you define environment 677 + variables that will be available throughout the entire 678 + workflow. **Do not put secrets here, these environment 679 + variables are visible to anyone viewing the repository. You 680 + can add secrets for pipelines in your repository's 681 + settings.** 682 + 683 + Example: 684 + 685 + ```yaml 686 + environment: 687 + GOOS: "linux" 688 + GOARCH: "arm64" 689 + NODE_ENV: "production" 690 + MY_ENV_VAR: "MY_ENV_VALUE" 691 + ``` 692 + 693 + ### Steps 694 + 695 + The `steps` field allows you to define what steps should run 696 + in the workflow. It's a list of step objects, each with the 697 + following fields: 698 + 699 + - `name`: This field allows you to give your step a name. 700 + This name is visible in your workflow runs, and is used to 701 + describe what the step is doing. 702 + - `command`: This field allows you to define a command to 703 + run in that step. The step is run in a Bash shell, and the 704 + logs from the command will be visible in the pipelines 705 + page on the Tangled website. The 706 + [dependencies](#dependencies) you added will be available 707 + to use here. 708 + - `environment`: Similar to the global 709 + [environment](#environment) config, this **optional** 710 + field is a key-value map that allows you to set 711 + environment variables for the step. **Do not put secrets 712 + here, these environment variables are visible to anyone 713 + viewing the repository. You can add secrets for pipelines 714 + in your repository's settings.** 715 + 716 + Example: 717 + 718 + ```yaml 719 + steps: 720 + - name: "Build backend" 721 + command: "go build" 722 + environment: 723 + GOOS: "darwin" 724 + GOARCH: "arm64" 725 + - name: "Build frontend" 726 + command: "npm run build" 727 + environment: 728 + NODE_ENV: "production" 729 + ``` 730 + 731 + ### Complete workflow 732 + 733 + ```yaml 734 + # .tangled/workflows/build.yml 735 + 736 + when: 737 + - event: ["push", "manual"] 738 + branch: ["main", "develop"] 739 + - event: ["pull_request"] 740 + branch: ["main"] 741 + 742 + engine: "nixery" 743 + 744 + # using the default values 745 + clone: 746 + skip: false 747 + depth: 1 748 + submodules: false 749 + 750 + dependencies: 751 + # nixpkgs 752 + nixpkgs: 753 + - nodejs 754 + - go 755 + # custom registry 756 + git+https://tangled.org/@example.com/my_pkg: 757 + - my_pkg 758 + 759 + environment: 760 + GOOS: "linux" 761 + GOARCH: "arm64" 762 + NODE_ENV: "production" 763 + MY_ENV_VAR: "MY_ENV_VALUE" 764 + 765 + steps: 766 + - name: "Build backend" 767 + command: "go build" 768 + environment: 769 + GOOS: "darwin" 770 + GOARCH: "arm64" 771 + - name: "Build frontend" 772 + command: "npm run build" 773 + environment: 774 + NODE_ENV: "production" 775 + ``` 776 + 777 + If you want another example of a workflow, you can look at 778 + the one [Tangled uses to build the 779 + project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml). 780 + 781 + ## Self-hosting guide 782 + 783 + ### Prerequisites 784 + 785 + * Go 786 + * Docker (the only supported backend currently) 787 + 788 + ### Configuration 789 + 790 + Spindle is configured using environment variables. The following environment variables are available: 791 + 792 + * `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`). 793 + * `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`). 794 + * `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required). 795 + * `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`). 796 + * `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`). 797 + * `SPINDLE_SERVER_OWNER`: The DID of the owner (required). 798 + * `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`). 799 + * `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`). 800 + * `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`). 801 + 802 + ### Running spindle 803 + 804 + 1. **Set the environment variables.** For example: 805 + 806 + ```shell 807 + export SPINDLE_SERVER_HOSTNAME="your-hostname" 808 + export SPINDLE_SERVER_OWNER="your-did" 809 + ``` 810 + 811 + 2. **Build the Spindle binary.** 812 + 813 + ```shell 814 + cd core 815 + go mod download 816 + go build -o cmd/spindle/spindle cmd/spindle/main.go 817 + ``` 818 + 819 + 3. **Create the log directory.** 820 + 821 + ```shell 822 + sudo mkdir -p /var/log/spindle 823 + sudo chown $USER:$USER -R /var/log/spindle 824 + ``` 825 + 826 + 4. **Run the Spindle binary.** 827 + 828 + ```shell 829 + ./cmd/spindle/spindle 830 + ``` 831 + 832 + Spindle will now start, connect to the Jetstream server, and begin processing pipelines. 833 + 834 + ## Architecture 835 + 836 + Spindle is a small CI runner service. Here's a high-level overview of how it operates: 837 + 838 + * Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and 839 + [`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream. 840 + * When a new repo record comes through (typically when you add a spindle to a 841 + repo from the settings), spindle then resolves the underlying knot and 842 + subscribes to repo events (see: 843 + [`sh.tangled.pipeline`](/lexicons/pipeline.json)). 844 + * The spindle engine then handles execution of the pipeline, with results and 845 + logs beamed on the spindle event stream over WebSocket 846 + 847 + ### The engine 848 + 849 + At present, the only supported backend is Docker (and Podman, if Docker 850 + compatibility is enabled, so that `/run/docker.sock` is created). spindle 851 + executes each step in the pipeline in a fresh container, with state persisted 852 + across steps within the `/tangled/workspace` directory. 853 + 854 + The base image for the container is constructed on the fly using 855 + [Nixery](https://nixery.dev), which is handy for caching layers for frequently 856 + used packages. 857 + 858 + The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines). 859 + 860 + ## Secrets with openbao 861 + 862 + This document covers setting up spindle to use OpenBao for secrets 863 + management via OpenBao Proxy instead of the default SQLite backend. 864 + 865 + ### Overview 866 + 867 + Spindle now uses OpenBao Proxy for secrets management. The proxy handles 868 + authentication automatically using AppRole credentials, while spindle 869 + connects to the local proxy instead of directly to the OpenBao server. 870 + 871 + This approach provides better security, automatic token renewal, and 872 + simplified application code. 873 + 874 + ### Installation 875 + 876 + Install OpenBao from Nixpkgs: 877 + 878 + ```bash 879 + nix shell nixpkgs#openbao # for a local server 880 + ``` 881 + 882 + ### Setup 883 + 884 + The setup process can is documented for both local development and production. 885 + 886 + #### Local development 887 + 888 + Start OpenBao in dev mode: 889 + 890 + ```bash 891 + bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201 892 + ``` 893 + 894 + This starts OpenBao on `http://localhost:8201` with a root token. 895 + 896 + Set up environment for bao CLI: 897 + 898 + ```bash 899 + export BAO_ADDR=http://localhost:8200 900 + export BAO_TOKEN=root 901 + ``` 902 + 903 + #### Production 904 + 905 + You would typically use a systemd service with a 906 + configuration file. Refer to 907 + [@tangled.org/infra](https://tangled.org/@tangled.org/infra) 908 + for how this can be achieved using Nix. 909 + 910 + Then, initialize the bao server: 911 + 912 + ```bash 913 + bao operator init -key-shares=1 -key-threshold=1 914 + ``` 915 + 916 + This will print out an unseal key and a root key. Save them 917 + somewhere (like a password manager). Then unseal the vault 918 + to begin setting it up: 919 + 920 + ```bash 921 + bao operator unseal <unseal_key> 922 + ``` 923 + 924 + All steps below remain the same across both dev and 925 + production setups. 926 + 927 + #### Configure openbao server 928 + 929 + Create the spindle KV mount: 930 + 931 + ```bash 932 + bao secrets enable -path=spindle -version=2 kv 933 + ``` 934 + 935 + Set up AppRole authentication and policy: 936 + 937 + Create a policy file `spindle-policy.hcl`: 938 + 939 + ```hcl 940 + # Full access to spindle KV v2 data 941 + path "spindle/data/*" { 942 + capabilities = ["create", "read", "update", "delete"] 943 + } 944 + 945 + # Access to metadata for listing and management 946 + path "spindle/metadata/*" { 947 + capabilities = ["list", "read", "delete", "update"] 948 + } 949 + 950 + # Allow listing at root level 951 + path "spindle/" { 952 + capabilities = ["list"] 953 + } 954 + 955 + # Required for connection testing and health checks 956 + path "auth/token/lookup-self" { 957 + capabilities = ["read"] 958 + } 959 + ``` 960 + 961 + Apply the policy and create an AppRole: 962 + 963 + ```bash 964 + bao policy write spindle-policy spindle-policy.hcl 965 + bao auth enable approle 966 + bao write auth/approle/role/spindle \ 967 + token_policies="spindle-policy" \ 968 + token_ttl=1h \ 969 + token_max_ttl=4h \ 970 + bind_secret_id=true \ 971 + secret_id_ttl=0 \ 972 + secret_id_num_uses=0 973 + ``` 974 + 975 + Get the credentials: 976 + 977 + ```bash 978 + # Get role ID (static) 979 + ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id) 980 + 981 + # Generate secret ID 982 + SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id) 983 + 984 + echo "Role ID: $ROLE_ID" 985 + echo "Secret ID: $SECRET_ID" 986 + ``` 987 + 988 + #### Create proxy configuration 989 + 990 + Create the credential files: 991 + 992 + ```bash 993 + # Create directory for OpenBao files 994 + mkdir -p /tmp/openbao 995 + 996 + # Save credentials 997 + echo "$ROLE_ID" > /tmp/openbao/role-id 998 + echo "$SECRET_ID" > /tmp/openbao/secret-id 999 + chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id 1000 + ``` 1001 + 1002 + Create a proxy configuration file `/tmp/openbao/proxy.hcl`: 1003 + 1004 + ```hcl 1005 + # OpenBao server connection 1006 + vault { 1007 + address = "http://localhost:8200" 1008 + } 1009 + 1010 + # Auto-Auth using AppRole 1011 + auto_auth { 1012 + method "approle" { 1013 + mount_path = "auth/approle" 1014 + config = { 1015 + role_id_file_path = "/tmp/openbao/role-id" 1016 + secret_id_file_path = "/tmp/openbao/secret-id" 1017 + } 1018 + } 1019 + 1020 + # Optional: write token to file for debugging 1021 + sink "file" { 1022 + config = { 1023 + path = "/tmp/openbao/token" 1024 + mode = 0640 1025 + } 1026 + } 1027 + } 1028 + 1029 + # Proxy listener for spindle 1030 + listener "tcp" { 1031 + address = "127.0.0.1:8201" 1032 + tls_disable = true 1033 + } 1034 + 1035 + # Enable API proxy with auto-auth token 1036 + api_proxy { 1037 + use_auto_auth_token = true 1038 + } 1039 + 1040 + # Enable response caching 1041 + cache { 1042 + use_auto_auth_token = true 1043 + } 1044 + 1045 + # Logging 1046 + log_level = "info" 1047 + ``` 1048 + 1049 + #### Start the proxy 1050 + 1051 + Start OpenBao Proxy: 1052 + 1053 + ```bash 1054 + bao proxy -config=/tmp/openbao/proxy.hcl 1055 + ``` 1056 + 1057 + The proxy will authenticate with OpenBao and start listening on 1058 + `127.0.0.1:8201`. 1059 + 1060 + #### Configure spindle 1061 + 1062 + Set these environment variables for spindle: 1063 + 1064 + ```bash 1065 + export SPINDLE_SERVER_SECRETS_PROVIDER=openbao 1066 + export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201 1067 + export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle 1068 + ``` 1069 + 1070 + On startup, spindle will now connect to the local proxy, 1071 + which handles all authentication automatically. 1072 + 1073 + ### Production setup for proxy 1074 + 1075 + For production, you'll want to run the proxy as a service: 1076 + 1077 + Place your production configuration in 1078 + `/etc/openbao/proxy.hcl` with proper TLS settings for the 1079 + vault connection. 1080 + 1081 + ### Verifying setup 1082 + 1083 + Test the proxy directly: 1084 + 1085 + ```bash 1086 + # Check proxy health 1087 + curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health 1088 + 1089 + # Test token lookup through proxy 1090 + curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self 1091 + ``` 1092 + 1093 + Test OpenBao operations through the server: 1094 + 1095 + ```bash 1096 + # List all secrets 1097 + bao kv list spindle/ 1098 + 1099 + # Add a test secret via the spindle API, then check it exists 1100 + bao kv list spindle/repos/ 1101 + 1102 + # Get a specific secret 1103 + bao kv get spindle/repos/your_repo_path/SECRET_NAME 1104 + ``` 1105 + 1106 + ### How it works 1107 + 1108 + - Spindle connects to OpenBao Proxy on localhost (typically 1109 + port 8200 or 8201) 1110 + - The proxy authenticates with OpenBao using AppRole 1111 + credentials 1112 + - All spindle requests go through the proxy, which injects 1113 + authentication tokens 1114 + - Secrets are stored at 1115 + `spindle/repos/{sanitized_repo_path}/{secret_key}` 1116 + - Repository paths like `did:plc:alice/myrepo` become 1117 + `did_plc_alice_myrepo` 1118 + - The proxy handles all token renewal automatically 1119 + - Spindle no longer manages tokens or authentication 1120 + directly 1121 + 1122 + ### Troubleshooting 1123 + 1124 + **Connection refused**: Check that the OpenBao Proxy is 1125 + running and listening on the configured address. 1126 + 1127 + **403 errors**: Verify the AppRole credentials are correct 1128 + and the policy has the necessary permissions. 1129 + 1130 + **404 route errors**: The spindle KV mount probably doesn't 1131 + existโ€”run the mount creation step again. 1132 + 1133 + **Proxy authentication failures**: Check the proxy logs and 1134 + verify the role-id and secret-id files are readable and 1135 + contain valid credentials. 1136 + 1137 + **Secret not found after writing**: This can indicate policy 1138 + permission issues. Verify the policy includes both 1139 + `spindle/data/*` and `spindle/metadata/*` paths with 1140 + appropriate capabilities. 1141 + 1142 + Check proxy logs: 1143 + 1144 + ```bash 1145 + # If running as systemd service 1146 + journalctl -u openbao-proxy -f 1147 + 1148 + # If running directly, check the console output 1149 + ``` 1150 + 1151 + Test AppRole authentication manually: 1152 + 1153 + ```bash 1154 + bao write auth/approle/login \ 1155 + role_id="$(cat /tmp/openbao/role-id)" \ 1156 + secret_id="$(cat /tmp/openbao/secret-id)" 1157 + ``` 1158 + 1159 + # Migrating knots and spindles 1160 + 1161 + Sometimes, non-backwards compatible changes are made to the 1162 + knot/spindle XRPC APIs. If you host a knot or a spindle, you 1163 + will need to follow this guide to upgrade. Typically, this 1164 + only requires you to deploy the newest version. 1165 + 1166 + This document is laid out in reverse-chronological order. 1167 + Newer migration guides are listed first, and older guides 1168 + are further down the page. 1169 + 1170 + ## Upgrading from v1.8.x 1171 + 1172 + After v1.8.2, the HTTP API for knots and spindles has been 1173 + deprecated and replaced with XRPC. Repositories on outdated 1174 + knots will not be viewable from the appview. Upgrading is 1175 + straightforward however. 1176 + 1177 + For knots: 1178 + 1179 + - Upgrade to the latest tag (v1.9.0 or above) 1180 + - Head to the [knot dashboard](https://tangled.org/settings/knots) and 1181 + hit the "retry" button to verify your knot 1182 + 1183 + For spindles: 1184 + 1185 + - Upgrade to the latest tag (v1.9.0 or above) 1186 + - Head to the [spindle 1187 + dashboard](https://tangled.org/settings/spindles) and hit the 1188 + "retry" button to verify your spindle 1189 + 1190 + ## Upgrading from v1.7.x 1191 + 1192 + After v1.7.0, knot secrets have been deprecated. You no 1193 + longer need a secret from the appview to run a knot. All 1194 + authorized commands to knots are managed via [Inter-Service 1195 + Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt). 1196 + Knots will be read-only until upgraded. 1197 + 1198 + Upgrading is quite easy, in essence: 1199 + 1200 + - `KNOT_SERVER_SECRET` is no more, you can remove this 1201 + environment variable entirely 1202 + - `KNOT_SERVER_OWNER` is now required on boot, set this to 1203 + your DID. You can find your DID in the 1204 + [settings](https://tangled.org/settings) page. 1205 + - Restart your knot once you have replaced the environment 1206 + variable 1207 + - Head to the [knot dashboard](https://tangled.org/settings/knots) and 1208 + hit the "retry" button to verify your knot. This simply 1209 + writes a `sh.tangled.knot` record to your PDS. 1210 + 1211 + If you use the nix module, simply bump the flake to the 1212 + latest revision, and change your config block like so: 1213 + 1214 + ```diff 1215 + services.tangled.knot = { 1216 + enable = true; 1217 + server = { 1218 + - secretFile = /path/to/secret; 1219 + + owner = "did:plc:foo"; 1220 + }; 1221 + }; 1222 + ``` 1223 + 1224 + # Hacking on Tangled 1225 + 1226 + We highly recommend [installing 1227 + Nix](https://nixos.org/download/) (the package manager) 1228 + before working on the codebase. The Nix flake provides a lot 1229 + of helpers to get started and most importantly, builds and 1230 + dev shells are entirely deterministic. 1231 + 1232 + To set up your dev environment: 1233 + 1234 + ```bash 1235 + nix develop 1236 + ``` 1237 + 1238 + Non-Nix users can look at the `devShell` attribute in the 1239 + `flake.nix` file to determine necessary dependencies. 1240 + 1241 + ## Running the appview 1242 + 1243 + The Nix flake also exposes a few `app` attributes (run `nix 1244 + flake show` to see a full list of what the flake provides), 1245 + one of the apps runs the appview with the `air` 1246 + live-reloader: 1247 + 1248 + ```bash 1249 + TANGLED_DEV=true nix run .#watch-appview 1250 + 1251 + # TANGLED_DB_PATH might be of interest to point to 1252 + # different sqlite DBs 1253 + 1254 + # in a separate shell, you can live-reload tailwind 1255 + nix run .#watch-tailwind 1256 + ``` 1257 + 1258 + To authenticate with the appview, you will need Redis and 1259 + OAuth JWKs to be set up: 1260 + 1261 + ``` 1262 + # OAuth JWKs should already be set up by the Nix devshell: 1263 + echo $TANGLED_OAUTH_CLIENT_SECRET 1264 + z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc 1265 + 1266 + echo $TANGLED_OAUTH_CLIENT_KID 1267 + 1761667908 1268 + 1269 + # if not, you can set it up yourself: 1270 + goat key generate -t P-256 1271 + Key Type: P-256 / secp256r1 / ES256 private key 1272 + Secret Key (Multibase Syntax): save this securely (eg, add to password manager) 1273 + z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL 1274 + Public Key (DID Key Syntax): share or publish this (eg, in DID document) 1275 + did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR 1276 + 1277 + # the secret key from above 1278 + export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..." 1279 + 1280 + # Run Redis in a new shell to store OAuth sessions 1281 + redis-server 1282 + ``` 1283 + 1284 + ## Running knots and spindles 1285 + 1286 + An end-to-end knot setup requires setting up a machine with 1287 + `sshd`, `AuthorizedKeysCommand`, and a Git user, which is 1288 + quite cumbersome. So the Nix flake provides a 1289 + `nixosConfiguration` to do so. 1290 + 1291 + <details> 1292 + <summary><strong>macOS users will have to set up a Nix Builder first</strong></summary> 1293 + 1294 + In order to build Tangled's dev VM on macOS, you will 1295 + first need to set up a Linux Nix builder. The recommended 1296 + way to do so is to run a [`darwin.linux-builder` 1297 + VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder) 1298 + and to register it in `nix.conf` as a builder for Linux 1299 + with the same architecture as your Mac (`linux-aarch64` if 1300 + you are using Apple Silicon). 1301 + 1302 + > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside 1303 + > the Tangled repo so that it doesn't conflict with the other VM. For example, 1304 + > you can do 1305 + > 1306 + > ```shell 1307 + > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder 1308 + > ``` 1309 + > 1310 + > to store the builder VM in a temporary dir. 1311 + > 1312 + > You should read and follow [all the other intructions][darwin builder vm] to 1313 + > avoid subtle problems. 1314 + 1315 + Alternatively, you can use any other method to set up a 1316 + Linux machine with Nix installed that you can `sudo ssh` 1317 + into (in other words, root user on your Mac has to be able 1318 + to ssh into the Linux machine without entering a password) 1319 + and that has the same architecture as your Mac. See 1320 + [remote builder 1321 + instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements) 1322 + for how to register such a builder in `nix.conf`. 1323 + 1324 + > WARNING: If you'd like to use 1325 + > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or 1326 + > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo 1327 + > ssh` works can be tricky. It seems to be [possible with 1328 + > Orbstack](https://github.com/orgs/orbstack/discussions/1669). 1329 + 1330 + </details> 1331 + 1332 + To begin, grab your DID from http://localhost:3000/settings. 1333 + Then, set `TANGLED_VM_KNOT_OWNER` and 1334 + `TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a 1335 + lightweight NixOS VM like so: 1336 + 1337 + ```bash 1338 + nix run --impure .#vm 1339 + 1340 + # type `poweroff` at the shell to exit the VM 1341 + ``` 1342 + 1343 + This starts a knot on port 6444, a spindle on port 6555 1344 + with `ssh` exposed on port 2222. 1345 + 1346 + Once the services are running, head to 1347 + http://localhost:3000/settings/knots and hit "Verify". It should 1348 + verify the ownership of the services instantly if everything 1349 + went smoothly. 1350 + 1351 + You can push repositories to this VM with this ssh config 1352 + block on your main machine: 1353 + 1354 + ```bash 1355 + Host nixos-shell 1356 + Hostname localhost 1357 + Port 2222 1358 + User git 1359 + IdentityFile ~/.ssh/my_tangled_key 1360 + ``` 1361 + 1362 + Set up a remote called `local-dev` on a git repo: 1363 + 1364 + ```bash 1365 + git remote add local-dev git@nixos-shell:user/repo 1366 + git push local-dev main 1367 + ``` 1368 + 1369 + The above VM should already be running a spindle on 1370 + `localhost:6555`. Head to http://localhost:3000/settings/spindles and 1371 + hit "Verify". You can then configure each repository to use 1372 + this spindle and run CI jobs. 1373 + 1374 + Of interest when debugging spindles: 1375 + 1376 + ``` 1377 + # Service logs from journald: 1378 + journalctl -xeu spindle 1379 + 1380 + # CI job logs from disk: 1381 + ls /var/log/spindle 1382 + 1383 + # Debugging spindle database: 1384 + sqlite3 /var/lib/spindle/spindle.db 1385 + 1386 + # litecli has a nicer REPL interface: 1387 + litecli /var/lib/spindle/spindle.db 1388 + ``` 1389 + 1390 + If for any reason you wish to disable either one of the 1391 + services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set 1392 + `services.tangled.spindle.enable` (or 1393 + `services.tangled.knot.enable`) to `false`. 1394 + 1395 + # Contribution guide 1396 + 1397 + ## Commit guidelines 1398 + 1399 + We follow a commit style similar to the Go project. Please keep commits: 1400 + 1401 + * **atomic**: each commit should represent one logical change 1402 + * **descriptive**: the commit message should clearly describe what the 1403 + change does and why it's needed 1404 + 1405 + ### Message format 1406 + 1407 + ``` 1408 + <service/top-level directory>/<affected package/directory>: <short summary of change> 1409 + 1410 + Optional longer description can go here, if necessary. Explain what the 1411 + change does and why, especially if not obvious. Reference relevant 1412 + issues or PRs when applicable. These can be links for now since we don't 1413 + auto-link issues/PRs yet. 1414 + ``` 1415 + 1416 + Here are some examples: 1417 + 1418 + ``` 1419 + appview/state: fix token expiry check in middleware 1420 + 1421 + The previous check did not account for clock drift, leading to premature 1422 + token invalidation. 1423 + ``` 1424 + 1425 + ``` 1426 + knotserver/git/service: improve error checking in upload-pack 1427 + ``` 1428 + 1429 + 1430 + ### General notes 1431 + 1432 + - PRs get merged "as-is" (fast-forward)โ€”like applying a patch-series 1433 + using `git am`. At present, there is no squashingโ€”so please author 1434 + your commits as they would appear on `master`, following the above 1435 + guidelines. 1436 + - If there is a lot of nesting, for example "appview: 1437 + pages/templates/repo/fragments: ...", these can be truncated down to 1438 + just "appview: repo/fragments: ...". If the change affects a lot of 1439 + subdirectories, you may abbreviate to just the top-level names, e.g. 1440 + "appview: ..." or "knotserver: ...". 1441 + - Keep commits lowercased with no trailing period. 1442 + - Use the imperative mood in the summary line (e.g., "fix bug" not 1443 + "fixed bug" or "fixes bug"). 1444 + - Try to keep the summary line under 72 characters, but we aren't too 1445 + fussed about this. 1446 + - Follow the same formatting for PR titles if filled manually. 1447 + - Don't include unrelated changes in the same commit. 1448 + - Avoid noisy commit messages like "wip" or "final fix"โ€”rewrite history 1449 + before submitting if necessary. 1450 + 1451 + ## Code formatting 1452 + 1453 + We use a variety of tools to format our code, and multiplex them with 1454 + [`treefmt`](https://treefmt.com). All you need to do to format your changes 1455 + is run `nix run .#fmt` (or just `treefmt` if you're in the devshell). 1456 + 1457 + ## Proposals for bigger changes 1458 + 1459 + Small fixes like typos, minor bugs, or trivial refactors can be 1460 + submitted directly as PRs. 1461 + 1462 + For larger changesโ€”especially those introducing new features, significant 1463 + refactoring, or altering system behaviorโ€”please open a proposal first. This 1464 + helps us evaluate the scope, design, and potential impact before implementation. 1465 + 1466 + Create a new issue titled: 1467 + 1468 + ``` 1469 + proposal: <affected scope>: <summary of change> 1470 + ``` 1471 + 1472 + In the description, explain: 1473 + 1474 + - What the change is 1475 + - Why it's needed 1476 + - How you plan to implement it (roughly) 1477 + - Any open questions or tradeoffs 1478 + 1479 + We'll use the issue thread to discuss and refine the idea before moving 1480 + forward. 1481 + 1482 + ## Developer Certificate of Origin (DCO) 1483 + 1484 + We require all contributors to certify that they have the right to 1485 + submit the code they're contributing. To do this, we follow the 1486 + [Developer Certificate of Origin 1487 + (DCO)](https://developercertificate.org/). 1488 + 1489 + By signing your commits, you're stating that the contribution is your 1490 + own work, or that you have the right to submit it under the project's 1491 + license. This helps us keep things clean and legally sound. 1492 + 1493 + To sign your commit, just add the `-s` flag when committing: 1494 + 1495 + ```sh 1496 + git commit -s -m "your commit message" 1497 + ``` 1498 + 1499 + This appends a line like: 1500 + 1501 + ``` 1502 + Signed-off-by: Your Name <your.email@example.com> 1503 + ``` 1504 + 1505 + We won't merge commits if they aren't signed off. If you forget, you can 1506 + amend the last commit like this: 1507 + 1508 + ```sh 1509 + git commit --amend -s 1510 + ``` 1511 + 1512 + If you're submitting a PR with multiple commits, make sure each one is 1513 + signed. 1514 + 1515 + For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command 1516 + to make it sign off commits in the tangled repo: 1517 + 1518 + ```shell 1519 + # Safety check, should say "No matching config key..." 1520 + jj config list templates.commit_trailers 1521 + # The command below may need to be adjusted if the command above returned something. 1522 + jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)" 1523 + ``` 1524 + 1525 + Refer to the [jujutsu 1526 + documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers) 1527 + for more information.
-136
docs/contributing.md
··· 1 - # tangled contributing guide 2 - 3 - ## commit guidelines 4 - 5 - We follow a commit style similar to the Go project. Please keep commits: 6 - 7 - * **atomic**: each commit should represent one logical change 8 - * **descriptive**: the commit message should clearly describe what the 9 - change does and why it's needed 10 - 11 - ### message format 12 - 13 - ``` 14 - <service/top-level directory>/<affected package/directory>: <short summary of change> 15 - 16 - 17 - Optional longer description can go here, if necessary. Explain what the 18 - change does and why, especially if not obvious. Reference relevant 19 - issues or PRs when applicable. These can be links for now since we don't 20 - auto-link issues/PRs yet. 21 - ``` 22 - 23 - Here are some examples: 24 - 25 - ``` 26 - appview/state: fix token expiry check in middleware 27 - 28 - The previous check did not account for clock drift, leading to premature 29 - token invalidation. 30 - ``` 31 - 32 - ``` 33 - knotserver/git/service: improve error checking in upload-pack 34 - ``` 35 - 36 - 37 - ### general notes 38 - 39 - - PRs get merged "as-is" (fast-forward) -- like applying a patch-series 40 - using `git am`. At present, there is no squashing -- so please author 41 - your commits as they would appear on `master`, following the above 42 - guidelines. 43 - - If there is a lot of nesting, for example "appview: 44 - pages/templates/repo/fragments: ...", these can be truncated down to 45 - just "appview: repo/fragments: ...". If the change affects a lot of 46 - subdirectories, you may abbreviate to just the top-level names, e.g. 47 - "appview: ..." or "knotserver: ...". 48 - - Keep commits lowercased with no trailing period. 49 - - Use the imperative mood in the summary line (e.g., "fix bug" not 50 - "fixed bug" or "fixes bug"). 51 - - Try to keep the summary line under 72 characters, but we aren't too 52 - fussed about this. 53 - - Follow the same formatting for PR titles if filled manually. 54 - - Don't include unrelated changes in the same commit. 55 - - Avoid noisy commit messages like "wip" or "final fix"โ€”rewrite history 56 - before submitting if necessary. 57 - 58 - ## code formatting 59 - 60 - We use a variety of tools to format our code, and multiplex them with 61 - [`treefmt`](https://treefmt.com): all you need to do to format your changes 62 - is run `nix run .#fmt` (or just `treefmt` if you're in the devshell). 63 - 64 - ## proposals for bigger changes 65 - 66 - Small fixes like typos, minor bugs, or trivial refactors can be 67 - submitted directly as PRs. 68 - 69 - For larger changesโ€”especially those introducing new features, significant 70 - refactoring, or altering system behaviorโ€”please open a proposal first. This 71 - helps us evaluate the scope, design, and potential impact before implementation. 72 - 73 - ### proposal format 74 - 75 - Create a new issue titled: 76 - 77 - ``` 78 - proposal: <affected scope>: <summary of change> 79 - ``` 80 - 81 - In the description, explain: 82 - 83 - - What the change is 84 - - Why it's needed 85 - - How you plan to implement it (roughly) 86 - - Any open questions or tradeoffs 87 - 88 - We'll use the issue thread to discuss and refine the idea before moving 89 - forward. 90 - 91 - ## developer certificate of origin (DCO) 92 - 93 - We require all contributors to certify that they have the right to 94 - submit the code they're contributing. To do this, we follow the 95 - [Developer Certificate of Origin 96 - (DCO)](https://developercertificate.org/). 97 - 98 - By signing your commits, you're stating that the contribution is your 99 - own work, or that you have the right to submit it under the project's 100 - license. This helps us keep things clean and legally sound. 101 - 102 - To sign your commit, just add the `-s` flag when committing: 103 - 104 - ```sh 105 - git commit -s -m "your commit message" 106 - ``` 107 - 108 - This appends a line like: 109 - 110 - ``` 111 - Signed-off-by: Your Name <your.email@example.com> 112 - ``` 113 - 114 - We won't merge commits if they aren't signed off. If you forget, you can 115 - amend the last commit like this: 116 - 117 - ```sh 118 - git commit --amend -s 119 - ``` 120 - 121 - If you're submitting a PR with multiple commits, make sure each one is 122 - signed. 123 - 124 - For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command 125 - to make it sign off commits in the tangled repo: 126 - 127 - ```shell 128 - # Safety check, should say "No matching config key..." 129 - jj config list templates.commit_trailers 130 - # The command below may need to be adjusted if the command above returned something. 131 - jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)" 132 - ``` 133 - 134 - Refer to the [jj 135 - documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers) 136 - for more information.
-172
docs/hacking.md
··· 1 - # hacking on tangled 2 - 3 - We highly recommend [installing 4 - nix](https://nixos.org/download/) (the package manager) 5 - before working on the codebase. The nix flake provides a lot 6 - of helpers to get started and most importantly, builds and 7 - dev shells are entirely deterministic. 8 - 9 - To set up your dev environment: 10 - 11 - ```bash 12 - nix develop 13 - ``` 14 - 15 - Non-nix users can look at the `devShell` attribute in the 16 - `flake.nix` file to determine necessary dependencies. 17 - 18 - ## running the appview 19 - 20 - The nix flake also exposes a few `app` attributes (run `nix 21 - flake show` to see a full list of what the flake provides), 22 - one of the apps runs the appview with the `air` 23 - live-reloader: 24 - 25 - ```bash 26 - TANGLED_DEV=true nix run .#watch-appview 27 - 28 - # TANGLED_DB_PATH might be of interest to point to 29 - # different sqlite DBs 30 - 31 - # in a separate shell, you can live-reload tailwind 32 - nix run .#watch-tailwind 33 - ``` 34 - 35 - To authenticate with the appview, you will need redis and 36 - OAUTH JWKs to be setup: 37 - 38 - ``` 39 - # oauth jwks should already be setup by the nix devshell: 40 - echo $TANGLED_OAUTH_CLIENT_SECRET 41 - z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc 42 - 43 - echo $TANGLED_OAUTH_CLIENT_KID 44 - 1761667908 45 - 46 - # if not, you can set it up yourself: 47 - goat key generate -t P-256 48 - Key Type: P-256 / secp256r1 / ES256 private key 49 - Secret Key (Multibase Syntax): save this securely (eg, add to password manager) 50 - z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL 51 - Public Key (DID Key Syntax): share or publish this (eg, in DID document) 52 - did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR 53 - 54 - # the secret key from above 55 - export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..." 56 - 57 - # run redis in at a new shell to store oauth sessions 58 - redis-server 59 - ``` 60 - 61 - ## running knots and spindles 62 - 63 - An end-to-end knot setup requires setting up a machine with 64 - `sshd`, `AuthorizedKeysCommand`, and git user, which is 65 - quite cumbersome. So the nix flake provides a 66 - `nixosConfiguration` to do so. 67 - 68 - <details> 69 - <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary> 70 - 71 - In order to build Tangled's dev VM on macOS, you will 72 - first need to set up a Linux Nix builder. The recommended 73 - way to do so is to run a [`darwin.linux-builder` 74 - VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder) 75 - and to register it in `nix.conf` as a builder for Linux 76 - with the same architecture as your Mac (`linux-aarch64` if 77 - you are using Apple Silicon). 78 - 79 - > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside 80 - > the tangled repo so that it doesn't conflict with the other VM. For example, 81 - > you can do 82 - > 83 - > ```shell 84 - > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder 85 - > ``` 86 - > 87 - > to store the builder VM in a temporary dir. 88 - > 89 - > You should read and follow [all the other intructions][darwin builder vm] to 90 - > avoid subtle problems. 91 - 92 - Alternatively, you can use any other method to set up a 93 - Linux machine with `nix` installed that you can `sudo ssh` 94 - into (in other words, root user on your Mac has to be able 95 - to ssh into the Linux machine without entering a password) 96 - and that has the same architecture as your Mac. See 97 - [remote builder 98 - instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements) 99 - for how to register such a builder in `nix.conf`. 100 - 101 - > WARNING: If you'd like to use 102 - > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or 103 - > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo 104 - > ssh` works can be tricky. It seems to be [possible with 105 - > Orbstack](https://github.com/orgs/orbstack/discussions/1669). 106 - 107 - </details> 108 - 109 - To begin, grab your DID from http://localhost:3000/settings. 110 - Then, set `TANGLED_VM_KNOT_OWNER` and 111 - `TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a 112 - lightweight NixOS VM like so: 113 - 114 - ```bash 115 - nix run --impure .#vm 116 - 117 - # type `poweroff` at the shell to exit the VM 118 - ``` 119 - 120 - This starts a knot on port 6444, a spindle on port 6555 121 - with `ssh` exposed on port 2222. 122 - 123 - Once the services are running, head to 124 - http://localhost:3000/settings/knots and hit verify. It should 125 - verify the ownership of the services instantly if everything 126 - went smoothly. 127 - 128 - You can push repositories to this VM with this ssh config 129 - block on your main machine: 130 - 131 - ```bash 132 - Host nixos-shell 133 - Hostname localhost 134 - Port 2222 135 - User git 136 - IdentityFile ~/.ssh/my_tangled_key 137 - ``` 138 - 139 - Set up a remote called `local-dev` on a git repo: 140 - 141 - ```bash 142 - git remote add local-dev git@nixos-shell:user/repo 143 - git push local-dev main 144 - ``` 145 - 146 - ### running a spindle 147 - 148 - The above VM should already be running a spindle on 149 - `localhost:6555`. Head to http://localhost:3000/settings/spindles and 150 - hit verify. You can then configure each repository to use 151 - this spindle and run CI jobs. 152 - 153 - Of interest when debugging spindles: 154 - 155 - ``` 156 - # service logs from journald: 157 - journalctl -xeu spindle 158 - 159 - # CI job logs from disk: 160 - ls /var/log/spindle 161 - 162 - # debugging spindle db: 163 - sqlite3 /var/lib/spindle/spindle.db 164 - 165 - # litecli has a nicer REPL interface: 166 - litecli /var/lib/spindle/spindle.db 167 - ``` 168 - 169 - If for any reason you wish to disable either one of the 170 - services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set 171 - `services.tangled.spindle.enable` (or 172 - `services.tangled.knot.enable`) to `false`.
+93
docs/highlight.theme
··· 1 + { 2 + "text-color": null, 3 + "background-color": null, 4 + "line-number-color": null, 5 + "line-number-background-color": null, 6 + "text-styles": { 7 + "Annotation": { 8 + "text-color": null, 9 + "background-color": null, 10 + "bold": false, 11 + "italic": true, 12 + "underline": false 13 + }, 14 + "ControlFlow": { 15 + "text-color": null, 16 + "background-color": null, 17 + "bold": true, 18 + "italic": false, 19 + "underline": false 20 + }, 21 + "Error": { 22 + "text-color": null, 23 + "background-color": null, 24 + "bold": true, 25 + "italic": false, 26 + "underline": false 27 + }, 28 + "Alert": { 29 + "text-color": null, 30 + "background-color": null, 31 + "bold": true, 32 + "italic": false, 33 + "underline": false 34 + }, 35 + "Preprocessor": { 36 + "text-color": null, 37 + "background-color": null, 38 + "bold": true, 39 + "italic": false, 40 + "underline": false 41 + }, 42 + "Information": { 43 + "text-color": null, 44 + "background-color": null, 45 + "bold": false, 46 + "italic": true, 47 + "underline": false 48 + }, 49 + "Warning": { 50 + "text-color": null, 51 + "background-color": null, 52 + "bold": false, 53 + "italic": true, 54 + "underline": false 55 + }, 56 + "Documentation": { 57 + "text-color": null, 58 + "background-color": null, 59 + "bold": false, 60 + "italic": true, 61 + "underline": false 62 + }, 63 + "DataType": { 64 + "text-color": "#8f4e8b", 65 + "background-color": null, 66 + "bold": false, 67 + "italic": false, 68 + "underline": false 69 + }, 70 + "Comment": { 71 + "text-color": null, 72 + "background-color": null, 73 + "bold": false, 74 + "italic": true, 75 + "underline": false 76 + }, 77 + "CommentVar": { 78 + "text-color": null, 79 + "background-color": null, 80 + "bold": false, 81 + "italic": true, 82 + "underline": false 83 + }, 84 + "Keyword": { 85 + "text-color": null, 86 + "background-color": null, 87 + "bold": true, 88 + "italic": false, 89 + "underline": false 90 + } 91 + } 92 + } 93 +
-214
docs/knot-hosting.md
··· 1 - # knot self-hosting guide 2 - 3 - So you want to run your own knot server? Great! Here are a few prerequisites: 4 - 5 - 1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind. 6 - 2. A (sub)domain name. People generally use `knot.example.com`. 7 - 3. A valid SSL certificate for your domain. 8 - 9 - There's a couple of ways to get started: 10 - * NixOS: refer to 11 - [flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix) 12 - * Docker: Documented at 13 - [@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker) 14 - (community maintained: support is not guaranteed!) 15 - * Manual: Documented below. 16 - 17 - ## manual setup 18 - 19 - First, clone this repository: 20 - 21 - ``` 22 - git clone https://tangled.org/@tangled.org/core 23 - ``` 24 - 25 - Then, build the `knot` CLI. This is the knot administration and operation tool. 26 - For the purpose of this guide, we're only concerned with these subcommands: 27 - 28 - * `knot server`: the main knot server process, typically run as a 29 - supervised service 30 - * `knot guard`: handles role-based access control for git over SSH 31 - (you'll never have to run this yourself) 32 - * `knot keys`: fetches SSH keys associated with your knot; we'll use 33 - this to generate the SSH `AuthorizedKeysCommand` 34 - 35 - ``` 36 - cd core 37 - export CGO_ENABLED=1 38 - go build -o knot ./cmd/knot 39 - ``` 40 - 41 - Next, move the `knot` binary to a location owned by `root` -- 42 - `/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`: 43 - 44 - ``` 45 - sudo mv knot /usr/local/bin/knot 46 - sudo chown root:root /usr/local/bin/knot 47 - ``` 48 - 49 - This is necessary because SSH `AuthorizedKeysCommand` requires [really 50 - specific permissions](https://stackoverflow.com/a/27638306). The 51 - `AuthorizedKeysCommand` specifies a command that is run by `sshd` to 52 - retrieve a user's public SSH keys dynamically for authentication. Let's 53 - set that up. 54 - 55 - ``` 56 - sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 57 - Match User git 58 - AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys 59 - AuthorizedKeysCommandUser nobody 60 - EOF 61 - ``` 62 - 63 - Then, reload `sshd`: 64 - 65 - ``` 66 - sudo systemctl reload ssh 67 - ``` 68 - 69 - Next, create the `git` user. We'll use the `git` user's home directory 70 - to store repositories: 71 - 72 - ``` 73 - sudo adduser git 74 - ``` 75 - 76 - Create `/home/git/.knot.env` with the following, updating the values as 77 - necessary. The `KNOT_SERVER_OWNER` should be set to your 78 - DID, you can find your DID in the [Settings](https://tangled.sh/settings) page. 79 - 80 - ``` 81 - KNOT_REPO_SCAN_PATH=/home/git 82 - KNOT_SERVER_HOSTNAME=knot.example.com 83 - APPVIEW_ENDPOINT=https://tangled.sh 84 - KNOT_SERVER_OWNER=did:plc:foobar 85 - KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444 86 - KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555 87 - ``` 88 - 89 - If you run a Linux distribution that uses systemd, you can use the provided 90 - service file to run the server. Copy 91 - [`knotserver.service`](/systemd/knotserver.service) 92 - to `/etc/systemd/system/`. Then, run: 93 - 94 - ``` 95 - systemctl enable knotserver 96 - systemctl start knotserver 97 - ``` 98 - 99 - The last step is to configure a reverse proxy like Nginx or Caddy to front your 100 - knot. Here's an example configuration for Nginx: 101 - 102 - ``` 103 - server { 104 - listen 80; 105 - listen [::]:80; 106 - server_name knot.example.com; 107 - 108 - location / { 109 - proxy_pass http://localhost:5555; 110 - proxy_set_header Host $host; 111 - proxy_set_header X-Real-IP $remote_addr; 112 - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 113 - proxy_set_header X-Forwarded-Proto $scheme; 114 - } 115 - 116 - # wss endpoint for git events 117 - location /events { 118 - proxy_set_header X-Forwarded-For $remote_addr; 119 - proxy_set_header Host $http_host; 120 - proxy_set_header Upgrade websocket; 121 - proxy_set_header Connection Upgrade; 122 - proxy_pass http://localhost:5555; 123 - } 124 - # additional config for SSL/TLS go here. 125 - } 126 - 127 - ``` 128 - 129 - Remember to use Let's Encrypt or similar to procure a certificate for your 130 - knot domain. 131 - 132 - You should now have a running knot server! You can finalize 133 - your registration by hitting the `verify` button on the 134 - [/settings/knots](https://tangled.org/settings/knots) page. This simply creates 135 - a record on your PDS to announce the existence of the knot. 136 - 137 - ### custom paths 138 - 139 - (This section applies to manual setup only. Docker users should edit the mounts 140 - in `docker-compose.yml` instead.) 141 - 142 - Right now, the database and repositories of your knot lives in `/home/git`. You 143 - can move these paths if you'd like to store them in another folder. Be careful 144 - when adjusting these paths: 145 - 146 - * Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent 147 - any possible side effects. Remember to restart it once you're done. 148 - * Make backups before moving in case something goes wrong. 149 - * Make sure the `git` user can read and write from the new paths. 150 - 151 - #### database 152 - 153 - As an example, let's say the current database is at `/home/git/knotserver.db`, 154 - and we want to move it to `/home/git/database/knotserver.db`. 155 - 156 - Copy the current database to the new location. Make sure to copy the `.db-shm` 157 - and `.db-wal` files if they exist. 158 - 159 - ``` 160 - mkdir /home/git/database 161 - cp /home/git/knotserver.db* /home/git/database 162 - ``` 163 - 164 - In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to 165 - the new file path (_not_ the directory): 166 - 167 - ``` 168 - KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db 169 - ``` 170 - 171 - #### repositories 172 - 173 - As an example, let's say the repositories are currently in `/home/git`, and we 174 - want to move them into `/home/git/repositories`. 175 - 176 - Create the new folder, then move the existing repositories (if there are any): 177 - 178 - ``` 179 - mkdir /home/git/repositories 180 - # move all DIDs into the new folder; these will vary for you! 181 - mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories 182 - ``` 183 - 184 - In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH` 185 - to the new directory: 186 - 187 - ``` 188 - KNOT_REPO_SCAN_PATH=/home/git/repositories 189 - ``` 190 - 191 - Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated 192 - repository path: 193 - 194 - ``` 195 - sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 196 - Match User git 197 - AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories 198 - AuthorizedKeysCommandUser nobody 199 - EOF 200 - ``` 201 - 202 - Make sure to restart your SSH server! 203 - 204 - #### MOTD (message of the day) 205 - 206 - To configure the MOTD used ("Welcome to this knot!" by default), edit the 207 - `/home/git/motd` file: 208 - 209 - ``` 210 - printf "Hi from this knot!\n" > /home/git/motd 211 - ``` 212 - 213 - Note that you should add a newline at the end if setting a non-empty message 214 - since the knot won't do this for you.
-59
docs/migrations.md
··· 1 - # Migrations 2 - 3 - This document is laid out in reverse-chronological order. 4 - Newer migration guides are listed first, and older guides 5 - are further down the page. 6 - 7 - ## Upgrading from v1.8.x 8 - 9 - After v1.8.2, the HTTP API for knot and spindles have been 10 - deprecated and replaced with XRPC. Repositories on outdated 11 - knots will not be viewable from the appview. Upgrading is 12 - straightforward however. 13 - 14 - For knots: 15 - 16 - - Upgrade to latest tag (v1.9.0 or above) 17 - - Head to the [knot dashboard](https://tangled.org/settings/knots) and 18 - hit the "retry" button to verify your knot 19 - 20 - For spindles: 21 - 22 - - Upgrade to latest tag (v1.9.0 or above) 23 - - Head to the [spindle 24 - dashboard](https://tangled.org/settings/spindles) and hit the 25 - "retry" button to verify your spindle 26 - 27 - ## Upgrading from v1.7.x 28 - 29 - After v1.7.0, knot secrets have been deprecated. You no 30 - longer need a secret from the appview to run a knot. All 31 - authorized commands to knots are managed via [Inter-Service 32 - Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt). 33 - Knots will be read-only until upgraded. 34 - 35 - Upgrading is quite easy, in essence: 36 - 37 - - `KNOT_SERVER_SECRET` is no more, you can remove this 38 - environment variable entirely 39 - - `KNOT_SERVER_OWNER` is now required on boot, set this to 40 - your DID. You can find your DID in the 41 - [settings](https://tangled.org/settings) page. 42 - - Restart your knot once you have replaced the environment 43 - variable 44 - - Head to the [knot dashboard](https://tangled.org/settings/knots) and 45 - hit the "retry" button to verify your knot. This simply 46 - writes a `sh.tangled.knot` record to your PDS. 47 - 48 - If you use the nix module, simply bump the flake to the 49 - latest revision, and change your config block like so: 50 - 51 - ```diff 52 - services.tangled.knot = { 53 - enable = true; 54 - server = { 55 - - secretFile = /path/to/secret; 56 - + owner = "did:plc:foo"; 57 - }; 58 - }; 59 - ```
+3
docs/mode.html
··· 1 + <a class="px-4 py-2 mt-8 block text-center w-full rounded-sm shadow-sm border border-gray-200 dark:border-gray-700 no-underline hover:no-underline" href="$if(single-page)$/$else$/single-page.html$endif$"> 2 + $if(single-page)$View as multi-page$else$View as single-page$endif$ 3 + </a>
+7
docs/search.html
··· 1 + <form action="https://google.com/search" role="search" aria-label="Sitewide" class="w-full"> 2 + <input type="hidden" name="q" value="+[inurl:https://docs.tangled.org]"> 3 + <label> 4 + <span style="display:none;">Search</span> 5 + <input type="text" name="q" placeholder="Search docs ..." class="w-full font-normal"> 6 + </label> 7 + </form>
-25
docs/spindle/architecture.md
··· 1 - # spindle architecture 2 - 3 - Spindle is a small CI runner service. Here's a high level overview of how it operates: 4 - 5 - * listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and 6 - [`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream. 7 - * when a new repo record comes through (typically when you add a spindle to a 8 - repo from the settings), spindle then resolves the underlying knot and 9 - subscribes to repo events (see: 10 - [`sh.tangled.pipeline`](/lexicons/pipeline.json)). 11 - * the spindle engine then handles execution of the pipeline, with results and 12 - logs beamed on the spindle event stream over wss 13 - 14 - ### the engine 15 - 16 - At present, the only supported backend is Docker (and Podman, if Docker 17 - compatibility is enabled, so that `/run/docker.sock` is created). Spindle 18 - executes each step in the pipeline in a fresh container, with state persisted 19 - across steps within the `/tangled/workspace` directory. 20 - 21 - The base image for the container is constructed on the fly using 22 - [Nixery](https://nixery.dev), which is handy for caching layers for frequently 23 - used packages. 24 - 25 - The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
··· 1 - # spindle self-hosting guide 2 - 3 - ## prerequisites 4 - 5 - * Go 6 - * Docker (the only supported backend currently) 7 - 8 - ## configuration 9 - 10 - Spindle is configured using environment variables. The following environment variables are available: 11 - 12 - * `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`). 13 - * `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`). 14 - * `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required). 15 - * `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`). 16 - * `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`). 17 - * `SPINDLE_SERVER_OWNER`: The DID of the owner (required). 18 - * `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`). 19 - * `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`). 20 - * `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`). 21 - 22 - ## running spindle 23 - 24 - 1. **Set the environment variables.** For example: 25 - 26 - ```shell 27 - export SPINDLE_SERVER_HOSTNAME="your-hostname" 28 - export SPINDLE_SERVER_OWNER="your-did" 29 - ``` 30 - 31 - 2. **Build the Spindle binary.** 32 - 33 - ```shell 34 - cd core 35 - go mod download 36 - go build -o cmd/spindle/spindle cmd/spindle/main.go 37 - ``` 38 - 39 - 3. **Create the log directory.** 40 - 41 - ```shell 42 - sudo mkdir -p /var/log/spindle 43 - sudo chown $USER:$USER -R /var/log/spindle 44 - ``` 45 - 46 - 4. **Run the Spindle binary.** 47 - 48 - ```shell 49 - ./cmd/spindle/spindle 50 - ``` 51 - 52 - Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
··· 1 - # spindle secrets with openbao 2 - 3 - This document covers setting up Spindle to use OpenBao for secrets 4 - management via OpenBao Proxy instead of the default SQLite backend. 5 - 6 - ## overview 7 - 8 - Spindle now uses OpenBao Proxy for secrets management. The proxy handles 9 - authentication automatically using AppRole credentials, while Spindle 10 - connects to the local proxy instead of directly to the OpenBao server. 11 - 12 - This approach provides better security, automatic token renewal, and 13 - simplified application code. 14 - 15 - ## installation 16 - 17 - Install OpenBao from nixpkgs: 18 - 19 - ```bash 20 - nix shell nixpkgs#openbao # for a local server 21 - ``` 22 - 23 - ## setup 24 - 25 - The setup process can is documented for both local development and production. 26 - 27 - ### local development 28 - 29 - Start OpenBao in dev mode: 30 - 31 - ```bash 32 - bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201 33 - ``` 34 - 35 - This starts OpenBao on `http://localhost:8201` with a root token. 36 - 37 - Set up environment for bao CLI: 38 - 39 - ```bash 40 - export BAO_ADDR=http://localhost:8200 41 - export BAO_TOKEN=root 42 - ``` 43 - 44 - ### production 45 - 46 - You would typically use a systemd service with a configuration file. Refer to 47 - [@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be 48 - achieved using Nix. 49 - 50 - Then, initialize the bao server: 51 - ```bash 52 - bao operator init -key-shares=1 -key-threshold=1 53 - ``` 54 - 55 - This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up: 56 - ```bash 57 - bao operator unseal <unseal_key> 58 - ``` 59 - 60 - All steps below remain the same across both dev and production setups. 61 - 62 - ### configure openbao server 63 - 64 - Create the spindle KV mount: 65 - 66 - ```bash 67 - bao secrets enable -path=spindle -version=2 kv 68 - ``` 69 - 70 - Set up AppRole authentication and policy: 71 - 72 - Create a policy file `spindle-policy.hcl`: 73 - 74 - ```hcl 75 - # Full access to spindle KV v2 data 76 - path "spindle/data/*" { 77 - capabilities = ["create", "read", "update", "delete"] 78 - } 79 - 80 - # Access to metadata for listing and management 81 - path "spindle/metadata/*" { 82 - capabilities = ["list", "read", "delete", "update"] 83 - } 84 - 85 - # Allow listing at root level 86 - path "spindle/" { 87 - capabilities = ["list"] 88 - } 89 - 90 - # Required for connection testing and health checks 91 - path "auth/token/lookup-self" { 92 - capabilities = ["read"] 93 - } 94 - ``` 95 - 96 - Apply the policy and create an AppRole: 97 - 98 - ```bash 99 - bao policy write spindle-policy spindle-policy.hcl 100 - bao auth enable approle 101 - bao write auth/approle/role/spindle \ 102 - token_policies="spindle-policy" \ 103 - token_ttl=1h \ 104 - token_max_ttl=4h \ 105 - bind_secret_id=true \ 106 - secret_id_ttl=0 \ 107 - secret_id_num_uses=0 108 - ``` 109 - 110 - Get the credentials: 111 - 112 - ```bash 113 - # Get role ID (static) 114 - ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id) 115 - 116 - # Generate secret ID 117 - SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id) 118 - 119 - echo "Role ID: $ROLE_ID" 120 - echo "Secret ID: $SECRET_ID" 121 - ``` 122 - 123 - ### create proxy configuration 124 - 125 - Create the credential files: 126 - 127 - ```bash 128 - # Create directory for OpenBao files 129 - mkdir -p /tmp/openbao 130 - 131 - # Save credentials 132 - echo "$ROLE_ID" > /tmp/openbao/role-id 133 - echo "$SECRET_ID" > /tmp/openbao/secret-id 134 - chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id 135 - ``` 136 - 137 - Create a proxy configuration file `/tmp/openbao/proxy.hcl`: 138 - 139 - ```hcl 140 - # OpenBao server connection 141 - vault { 142 - address = "http://localhost:8200" 143 - } 144 - 145 - # Auto-Auth using AppRole 146 - auto_auth { 147 - method "approle" { 148 - mount_path = "auth/approle" 149 - config = { 150 - role_id_file_path = "/tmp/openbao/role-id" 151 - secret_id_file_path = "/tmp/openbao/secret-id" 152 - } 153 - } 154 - 155 - # Optional: write token to file for debugging 156 - sink "file" { 157 - config = { 158 - path = "/tmp/openbao/token" 159 - mode = 0640 160 - } 161 - } 162 - } 163 - 164 - # Proxy listener for Spindle 165 - listener "tcp" { 166 - address = "127.0.0.1:8201" 167 - tls_disable = true 168 - } 169 - 170 - # Enable API proxy with auto-auth token 171 - api_proxy { 172 - use_auto_auth_token = true 173 - } 174 - 175 - # Enable response caching 176 - cache { 177 - use_auto_auth_token = true 178 - } 179 - 180 - # Logging 181 - log_level = "info" 182 - ``` 183 - 184 - ### start the proxy 185 - 186 - Start OpenBao Proxy: 187 - 188 - ```bash 189 - bao proxy -config=/tmp/openbao/proxy.hcl 190 - ``` 191 - 192 - The proxy will authenticate with OpenBao and start listening on 193 - `127.0.0.1:8201`. 194 - 195 - ### configure spindle 196 - 197 - Set these environment variables for Spindle: 198 - 199 - ```bash 200 - export SPINDLE_SERVER_SECRETS_PROVIDER=openbao 201 - export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201 202 - export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle 203 - ``` 204 - 205 - Start Spindle: 206 - 207 - Spindle will now connect to the local proxy, which handles all 208 - authentication automatically. 209 - 210 - ## production setup for proxy 211 - 212 - For production, you'll want to run the proxy as a service: 213 - 214 - Place your production configuration in `/etc/openbao/proxy.hcl` with 215 - proper TLS settings for the vault connection. 216 - 217 - ## verifying setup 218 - 219 - Test the proxy directly: 220 - 221 - ```bash 222 - # Check proxy health 223 - curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health 224 - 225 - # Test token lookup through proxy 226 - curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self 227 - ``` 228 - 229 - Test OpenBao operations through the server: 230 - 231 - ```bash 232 - # List all secrets 233 - bao kv list spindle/ 234 - 235 - # Add a test secret via Spindle API, then check it exists 236 - bao kv list spindle/repos/ 237 - 238 - # Get a specific secret 239 - bao kv get spindle/repos/your_repo_path/SECRET_NAME 240 - ``` 241 - 242 - ## how it works 243 - 244 - - Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201) 245 - - The proxy authenticates with OpenBao using AppRole credentials 246 - - All Spindle requests go through the proxy, which injects authentication tokens 247 - - Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}` 248 - - Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo` 249 - - The proxy handles all token renewal automatically 250 - - Spindle no longer manages tokens or authentication directly 251 - 252 - ## troubleshooting 253 - 254 - **Connection refused**: Check that the OpenBao Proxy is running and 255 - listening on the configured address. 256 - 257 - **403 errors**: Verify the AppRole credentials are correct and the policy 258 - has the necessary permissions. 259 - 260 - **404 route errors**: The spindle KV mount probably doesn't exist - run 261 - the mount creation step again. 262 - 263 - **Proxy authentication failures**: Check the proxy logs and verify the 264 - role-id and secret-id files are readable and contain valid credentials. 265 - 266 - **Secret not found after writing**: This can indicate policy permission 267 - issues. Verify the policy includes both `spindle/data/*` and 268 - `spindle/metadata/*` paths with appropriate capabilities. 269 - 270 - Check proxy logs: 271 - 272 - ```bash 273 - # If running as systemd service 274 - journalctl -u openbao-proxy -f 275 - 276 - # If running directly, check the console output 277 - ``` 278 - 279 - Test AppRole authentication manually: 280 - 281 - ```bash 282 - bao write auth/approle/login \ 283 - role_id="$(cat /tmp/openbao/role-id)" \ 284 - secret_id="$(cat /tmp/openbao/secret-id)" 285 - ```
-183
docs/spindle/pipeline.md
··· 1 - # spindle pipelines 2 - 3 - Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML. 4 - 5 - The fields are: 6 - 7 - - [Trigger](#trigger): A **required** field that defines when a workflow should be triggered. 8 - - [Engine](#engine): A **required** field that defines which engine a workflow should run on. 9 - - [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned. 10 - - [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need. 11 - - [Environment](#environment): An **optional** field that allows you to define environment variables. 12 - - [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow. 13 - 14 - ## Trigger 15 - 16 - The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields: 17 - 18 - - `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values: 19 - - `push`: The workflow should run every time a commit is pushed to the repository. 20 - - `pull_request`: The workflow should run every time a pull request is made or updated. 21 - - `manual`: The workflow can be triggered manually. 22 - - `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events. 23 - - `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events. 24 - 25 - For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with: 26 - 27 - ```yaml 28 - when: 29 - - event: ["push", "manual"] 30 - branch: ["main", "develop"] 31 - - event: ["pull_request"] 32 - branch: ["main"] 33 - ``` 34 - 35 - You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed: 36 - 37 - ```yaml 38 - when: 39 - - event: ["push"] 40 - tag: ["v*"] 41 - ``` 42 - 43 - You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches): 44 - 45 - ```yaml 46 - when: 47 - - event: ["push"] 48 - branch: ["main", "release-*"] 49 - tag: ["v*", "stable"] 50 - ``` 51 - 52 - ## Engine 53 - 54 - Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are: 55 - 56 - - `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there. 57 - 58 - Example: 59 - 60 - ```yaml 61 - engine: "nixery" 62 - ``` 63 - 64 - ## Clone options 65 - 66 - When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields: 67 - 68 - - `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default. 69 - - `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow. 70 - - `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default. 71 - 72 - The default settings are: 73 - 74 - ```yaml 75 - clone: 76 - skip: false 77 - depth: 1 78 - submodules: false 79 - ``` 80 - 81 - ## Dependencies 82 - 83 - Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch. 84 - 85 - Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so: 86 - 87 - ```yaml 88 - dependencies: 89 - # nixpkgs 90 - nixpkgs: 91 - - nodejs 92 - - go 93 - # custom registry 94 - git+https://tangled.org/@example.com/my_pkg: 95 - - my_pkg 96 - ``` 97 - 98 - Now these dependencies are available to use in your workflow! 99 - 100 - ## Environment 101 - 102 - The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.** 103 - 104 - Example: 105 - 106 - ```yaml 107 - environment: 108 - GOOS: "linux" 109 - GOARCH: "arm64" 110 - NODE_ENV: "production" 111 - MY_ENV_VAR: "MY_ENV_VALUE" 112 - ``` 113 - 114 - ## Steps 115 - 116 - The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields: 117 - 118 - - `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing. 119 - - `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here. 120 - - `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.** 121 - 122 - Example: 123 - 124 - ```yaml 125 - steps: 126 - - name: "Build backend" 127 - command: "go build" 128 - environment: 129 - GOOS: "darwin" 130 - GOARCH: "arm64" 131 - - name: "Build frontend" 132 - command: "npm run build" 133 - environment: 134 - NODE_ENV: "production" 135 - ``` 136 - 137 - ## Complete workflow 138 - 139 - ```yaml 140 - # .tangled/workflows/build.yml 141 - 142 - when: 143 - - event: ["push", "manual"] 144 - branch: ["main", "develop"] 145 - - event: ["pull_request"] 146 - branch: ["main"] 147 - 148 - engine: "nixery" 149 - 150 - # using the default values 151 - clone: 152 - skip: false 153 - depth: 1 154 - submodules: false 155 - 156 - dependencies: 157 - # nixpkgs 158 - nixpkgs: 159 - - nodejs 160 - - go 161 - # custom registry 162 - git+https://tangled.org/@example.com/my_pkg: 163 - - my_pkg 164 - 165 - environment: 166 - GOOS: "linux" 167 - GOARCH: "arm64" 168 - NODE_ENV: "production" 169 - MY_ENV_VAR: "MY_ENV_VALUE" 170 - 171 - steps: 172 - - name: "Build backend" 173 - command: "go build" 174 - environment: 175 - GOOS: "darwin" 176 - GOARCH: "arm64" 177 - - name: "Build frontend" 178 - command: "npm run build" 179 - environment: 180 - NODE_ENV: "production" 181 - ``` 182 - 183 - If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+101
docs/styles.css
··· 1 + svg { 2 + width: 16px; 3 + height: 16px; 4 + } 5 + 6 + :root { 7 + --syntax-alert: #d20f39; 8 + --syntax-annotation: #fe640b; 9 + --syntax-attribute: #df8e1d; 10 + --syntax-basen: #40a02b; 11 + --syntax-builtin: #1e66f5; 12 + --syntax-controlflow: #8839ef; 13 + --syntax-char: #04a5e5; 14 + --syntax-constant: #fe640b; 15 + --syntax-comment: #9ca0b0; 16 + --syntax-commentvar: #7c7f93; 17 + --syntax-documentation: #9ca0b0; 18 + --syntax-datatype: #df8e1d; 19 + --syntax-decval: #40a02b; 20 + --syntax-error: #d20f39; 21 + --syntax-extension: #4c4f69; 22 + --syntax-float: #40a02b; 23 + --syntax-function: #1e66f5; 24 + --syntax-import: #40a02b; 25 + --syntax-information: #04a5e5; 26 + --syntax-keyword: #8839ef; 27 + --syntax-operator: #179299; 28 + --syntax-other: #8839ef; 29 + --syntax-preprocessor: #ea76cb; 30 + --syntax-specialchar: #04a5e5; 31 + --syntax-specialstring: #ea76cb; 32 + --syntax-string: #40a02b; 33 + --syntax-variable: #8839ef; 34 + --syntax-verbatimstring: #40a02b; 35 + --syntax-warning: #df8e1d; 36 + } 37 + 38 + @media (prefers-color-scheme: dark) { 39 + :root { 40 + --syntax-alert: #f38ba8; 41 + --syntax-annotation: #fab387; 42 + --syntax-attribute: #f9e2af; 43 + --syntax-basen: #a6e3a1; 44 + --syntax-builtin: #89b4fa; 45 + --syntax-controlflow: #cba6f7; 46 + --syntax-char: #89dceb; 47 + --syntax-constant: #fab387; 48 + --syntax-comment: #6c7086; 49 + --syntax-commentvar: #585b70; 50 + --syntax-documentation: #6c7086; 51 + --syntax-datatype: #f9e2af; 52 + --syntax-decval: #a6e3a1; 53 + --syntax-error: #f38ba8; 54 + --syntax-extension: #cdd6f4; 55 + --syntax-float: #a6e3a1; 56 + --syntax-function: #89b4fa; 57 + --syntax-import: #a6e3a1; 58 + --syntax-information: #89dceb; 59 + --syntax-keyword: #cba6f7; 60 + --syntax-operator: #94e2d5; 61 + --syntax-other: #cba6f7; 62 + --syntax-preprocessor: #f5c2e7; 63 + --syntax-specialchar: #89dceb; 64 + --syntax-specialstring: #f5c2e7; 65 + --syntax-string: #a6e3a1; 66 + --syntax-variable: #cba6f7; 67 + --syntax-verbatimstring: #a6e3a1; 68 + --syntax-warning: #f9e2af; 69 + } 70 + } 71 + 72 + /* pandoc syntax highlighting classes */ 73 + code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */ 74 + code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */ 75 + code span.at { color: var(--syntax-attribute); } /* attribute */ 76 + code span.bn { color: var(--syntax-basen); } /* basen */ 77 + code span.bu { color: var(--syntax-builtin); } /* builtin */ 78 + code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */ 79 + code span.ch { color: var(--syntax-char); } /* char */ 80 + code span.cn { color: var(--syntax-constant); } /* constant */ 81 + code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */ 82 + code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */ 83 + code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */ 84 + code span.dt { color: var(--syntax-datatype); } /* datatype */ 85 + code span.dv { color: var(--syntax-decval); } /* decval */ 86 + code span.er { color: var(--syntax-error); font-weight: bold; } /* error */ 87 + code span.ex { color: var(--syntax-extension); } /* extension */ 88 + code span.fl { color: var(--syntax-float); } /* float */ 89 + code span.fu { color: var(--syntax-function); } /* function */ 90 + code span.im { color: var(--syntax-import); font-weight: bold; } /* import */ 91 + code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */ 92 + code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */ 93 + code span.op { color: var(--syntax-operator); } /* operator */ 94 + code span.ot { color: var(--syntax-other); } /* other */ 95 + code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */ 96 + code span.sc { color: var(--syntax-specialchar); } /* specialchar */ 97 + code span.ss { color: var(--syntax-specialstring); } /* specialstring */ 98 + code span.st { color: var(--syntax-string); } /* string */ 99 + code span.va { color: var(--syntax-variable); } /* variable */ 100 + code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */ 101 + code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
+156
docs/template.html
··· 1 + <!DOCTYPE html> 2 + <html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$> 3 + <head> 4 + <meta charset="utf-8" /> 5 + <meta name="generator" content="pandoc" /> 6 + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> 7 + $for(author-meta)$ 8 + <meta name="author" content="$author-meta$" /> 9 + $endfor$ 10 + 11 + $if(date-meta)$ 12 + <meta name="dcterms.date" content="$date-meta$" /> 13 + $endif$ 14 + 15 + $if(keywords)$ 16 + <meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" /> 17 + $endif$ 18 + 19 + $if(description-meta)$ 20 + <meta name="description" content="$description-meta$" /> 21 + $endif$ 22 + 23 + <title>$pagetitle$</title> 24 + 25 + <style> 26 + $styles.css()$ 27 + </style> 28 + 29 + $for(css)$ 30 + <link rel="stylesheet" href="$css$" /> 31 + $endfor$ 32 + 33 + $for(header-includes)$ 34 + $header-includes$ 35 + $endfor$ 36 + 37 + <link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin /> 38 + 39 + </head> 40 + <body class="bg-white dark:bg-gray-900 flex flex-col min-h-svh"> 41 + $for(include-before)$ 42 + $include-before$ 43 + $endfor$ 44 + 45 + $if(toc)$ 46 + <!-- mobile TOC trigger --> 47 + <div class="md:hidden px-6 py-4 border-b border-gray-200 dark:border-gray-700"> 48 + <button 49 + type="button" 50 + popovertarget="mobile-toc-popover" 51 + popovertargetaction="toggle" 52 + class="w-full flex gap-2 items-center text-sm font-semibold dark:text-white" 53 + > 54 + ${ menu.svg() } 55 + $if(toc-title)$$toc-title$$else$Table of Contents$endif$ 56 + </button> 57 + </div> 58 + 59 + <div 60 + id="mobile-toc-popover" 61 + popover 62 + class="mobile-toc-popover 63 + bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 64 + h-full overflow-y-auto shadow-sm 65 + px-6 py-4 fixed inset-x-0 top-0 w-fit max-w-4/5 m-0" 66 + > 67 + <div class="flex flex-col min-h-full"> 68 + <div class="flex-1 space-y-4"> 69 + <button 70 + type="button" 71 + popovertarget="mobile-toc-popover" 72 + popovertargetaction="toggle" 73 + class="w-full flex gap-2 items-center text-sm font-semibold dark:text-white mb-4"> 74 + ${ x.svg() } 75 + $if(toc-title)$$toc-title$$else$Table of Contents$endif$ 76 + </button> 77 + ${ search.html() } 78 + ${ table-of-contents:toc.html() } 79 + </div> 80 + ${ single-page:mode.html() } 81 + </div> 82 + </div> 83 + 84 + <!-- desktop sidebar toc --> 85 + <nav 86 + id="$idprefix$TOC" 87 + role="doc-toc" 88 + class="hidden md:flex md:flex-col gap-4 fixed left-0 top-0 w-80 h-screen 89 + bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 90 + p-4 z-50 overflow-y-auto"> 91 + ${ search.html() } 92 + <div class="flex-1"> 93 + $if(toc-title)$ 94 + <h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2> 95 + $endif$ 96 + ${ table-of-contents:toc.html() } 97 + </div> 98 + ${ single-page:mode.html() } 99 + </nav> 100 + $endif$ 101 + 102 + <div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col"> 103 + <main class="max-w-4xl w-full mx-auto p-6 flex-1"> 104 + $if(top)$ 105 + $-- only print title block if this is NOT the top page 106 + $else$ 107 + $if(title)$ 108 + <header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700"> 109 + <h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1> 110 + $if(subtitle)$ 111 + <p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p> 112 + $endif$ 113 + $for(author)$ 114 + <p class="text-sm text-gray-500 dark:text-gray-400">$author$</p> 115 + $endfor$ 116 + $if(date)$ 117 + <p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p> 118 + $endif$ 119 + $endif$ 120 + </header> 121 + $endif$ 122 + 123 + $if(abstract)$ 124 + <article class="prose dark:prose-invert max-w-none"> 125 + $abstract$ 126 + </article> 127 + $endif$ 128 + 129 + <article class="prose dark:prose-invert max-w-none"> 130 + $body$ 131 + </article> 132 + </main> 133 + <nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800"> 134 + <div class="max-w-4xl mx-auto px-8 py-4"> 135 + <div class="flex justify-between gap-4"> 136 + <span class="flex-1"> 137 + $if(previous.url)$ 138 + <span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span> 139 + <a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a> 140 + $endif$ 141 + </span> 142 + <span class="flex-1 text-right"> 143 + $if(next.url)$ 144 + <span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span> 145 + <a href="$next.url$" accesskey="n" rel="next">$next.title$</a> 146 + $endif$ 147 + </span> 148 + </div> 149 + </div> 150 + </nav> 151 + </div> 152 + $for(include-after)$ 153 + $include-after$ 154 + $endfor$ 155 + </body> 156 + </html>
+4
docs/toc.html
··· 1 + <div class="[&_ul]:space-y-6 [&_ul]:pl-0 [&_ul]:font-bold [&_ul_ul]:pl-4 [&_ul_ul]:font-normal [&_ul_ul]:space-y-2 [&_li]:space-y-2"> 2 + $table-of-contents$ 3 + </div> 4 +
+9 -9
flake.lock
··· 35 35 "systems": "systems" 36 36 }, 37 37 "locked": { 38 - "lastModified": 1694529238, 39 - "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", 38 + "lastModified": 1731533236, 39 + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 40 40 "owner": "numtide", 41 41 "repo": "flake-utils", 42 - "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", 42 + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 43 43 "type": "github" 44 44 }, 45 45 "original": { ··· 56 56 ] 57 57 }, 58 58 "locked": { 59 - "lastModified": 1754078208, 60 - "narHash": "sha256-YVoIFDCDpYuU3riaDEJ3xiGdPOtsx4sR5eTzHTytPV8=", 59 + "lastModified": 1763982521, 60 + "narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=", 61 61 "owner": "nix-community", 62 62 "repo": "gomod2nix", 63 - "rev": "7f963246a71626c7fc70b431a315c4388a0c95cf", 63 + "rev": "02e63a239d6eabd595db56852535992c898eba72", 64 64 "type": "github" 65 65 }, 66 66 "original": { ··· 150 150 }, 151 151 "nixpkgs": { 152 152 "locked": { 153 - "lastModified": 1751984180, 154 - "narHash": "sha256-LwWRsENAZJKUdD3SpLluwDmdXY9F45ZEgCb0X+xgOL0=", 153 + "lastModified": 1766070988, 154 + "narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=", 155 155 "owner": "nixos", 156 156 "repo": "nixpkgs", 157 - "rev": "9807714d6944a957c2e036f84b0ff8caf9930bc0", 157 + "rev": "c6245e83d836d0433170a16eb185cefe0572f8b8", 158 158 "type": "github" 159 159 }, 160 160 "original": {
+6 -5
flake.nix
··· 76 76 }; 77 77 buildGoApplication = 78 78 (self.callPackage "${gomod2nix}/builder" { 79 - gomod2nix = gomod2nix.legacyPackages.${pkgs.system}.gomod2nix; 79 + gomod2nix = gomod2nix.legacyPackages.${pkgs.stdenv.hostPlatform.system}.gomod2nix; 80 80 }).buildGoApplication; 81 81 modules = ./nix/gomod2nix.toml; 82 82 sqlite-lib = self.callPackage ./nix/pkgs/sqlite-lib.nix { 83 - inherit (pkgs) gcc; 84 83 inherit sqlite-lib-src; 85 84 }; 86 85 lexgen = self.callPackage ./nix/pkgs/lexgen.nix {inherit indigo;}; ··· 89 88 inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src; 90 89 }; 91 90 appview = self.callPackage ./nix/pkgs/appview.nix {}; 91 + docs = self.callPackage ./nix/pkgs/docs.nix { 92 + inherit inter-fonts-src ibm-plex-mono-src lucide-src; 93 + }; 92 94 spindle = self.callPackage ./nix/pkgs/spindle.nix {}; 93 95 knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {}; 94 96 knot = self.callPackage ./nix/pkgs/knot.nix {}; 95 97 }); 96 98 in { 97 99 overlays.default = final: prev: { 98 - inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview; 100 + inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs; 99 101 }; 100 102 101 103 packages = forAllSystems (system: let ··· 104 106 staticPackages = mkPackageSet pkgs.pkgsStatic; 105 107 crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic; 106 108 in { 107 - inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib; 109 + inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs; 108 110 109 111 pkgsStatic-appview = staticPackages.appview; 110 112 pkgsStatic-knot = staticPackages.knot; ··· 156 158 nativeBuildInputs = [ 157 159 pkgs.go 158 160 pkgs.air 159 - pkgs.tilt 160 161 pkgs.gopls 161 162 pkgs.httpie 162 163 pkgs.litecli
+3 -4
go.mod
··· 1 1 module tangled.org/core 2 2 3 - go 1.24.4 3 + go 1.25.0 4 4 5 5 require ( 6 6 github.com/Blank-Xu/sql-adapter v1.1.1 ··· 44 44 github.com/stretchr/testify v1.10.0 45 45 github.com/urfave/cli/v3 v3.3.3 46 46 github.com/whyrusleeping/cbor-gen v0.3.1 47 - github.com/wyatt915/goldmark-treeblood v0.0.1 48 47 github.com/yuin/goldmark v1.7.13 48 + github.com/yuin/goldmark-emoji v1.0.6 49 49 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc 50 50 gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab 51 51 golang.org/x/crypto v0.40.0 52 52 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b 53 53 golang.org/x/image v0.31.0 54 54 golang.org/x/net v0.42.0 55 - golang.org/x/sync v0.17.0 56 55 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da 57 56 gopkg.in/yaml.v3 v3.0.1 58 57 ) ··· 190 189 github.com/vmihailenco/go-tinylfu v0.2.2 // indirect 191 190 github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect 192 191 github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect 193 - github.com/wyatt915/treeblood v0.1.16 // indirect 194 192 github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect 195 193 gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect 196 194 gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect ··· 205 203 go.uber.org/atomic v1.11.0 // indirect 206 204 go.uber.org/multierr v1.11.0 // indirect 207 205 go.uber.org/zap v1.27.0 // indirect 206 + golang.org/x/sync v0.17.0 // indirect 208 207 golang.org/x/sys v0.34.0 // indirect 209 208 golang.org/x/text v0.29.0 // indirect 210 209 golang.org/x/time v0.12.0 // indirect
+2 -4
go.sum
··· 495 495 github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= 496 496 github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= 497 497 github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= 498 - github.com/wyatt915/goldmark-treeblood v0.0.1 h1:6vLJcjFrHgE4ASu2ga4hqIQmbvQLU37v53jlHZ3pqDs= 499 - github.com/wyatt915/goldmark-treeblood v0.0.1/go.mod h1:SmcJp5EBaV17rroNlgNQFydYwy0+fv85CUr/ZaCz208= 500 - github.com/wyatt915/treeblood v0.1.16 h1:byxNbWZhnPDxdTp7W5kQhCeaY8RBVmojTFz1tEHgg8Y= 501 - github.com/wyatt915/treeblood v0.1.16/go.mod h1:i7+yhhmzdDP17/97pIsOSffw74EK/xk+qJ0029cSXUY= 502 498 github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= 503 499 github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= 504 500 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= ··· 509 505 github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 510 506 github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= 511 507 github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= 508 + github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= 509 + github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= 512 510 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ= 513 511 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I= 514 512 gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab h1:gK9tS6QJw5F0SIhYJnGG2P83kuabOdmWBbSmZhJkz2A=
+4 -4
hook/hook.go
··· 48 48 }, 49 49 Commands: []*cli.Command{ 50 50 { 51 - Name: "post-recieve", 52 - Usage: "sends a post-recieve hook to the knot (waits for stdin)", 53 - Action: postRecieve, 51 + Name: "post-receive", 52 + Usage: "sends a post-receive hook to the knot (waits for stdin)", 53 + Action: postReceive, 54 54 }, 55 55 }, 56 56 } 57 57 } 58 58 59 - func postRecieve(ctx context.Context, cmd *cli.Command) error { 59 + func postReceive(ctx context.Context, cmd *cli.Command) error { 60 60 gitDir := cmd.String("git-dir") 61 61 userDid := cmd.String("user-did") 62 62 userHandle := cmd.String("user-handle")
+1 -1
hook/setup.go
··· 138 138 option_var="GIT_PUSH_OPTION_$i" 139 139 push_options+=(-push-option "${!option_var}") 140 140 done 141 - %s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-recieve 141 + %s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-receive 142 142 `, executablePath, config.internalApi) 143 143 144 144 return os.WriteFile(hookPath, []byte(hookContent), 0755)
+2 -1
input.css
··· 162 162 } 163 163 164 164 .prose a.mention { 165 - @apply no-underline hover:underline; 165 + @apply no-underline hover:underline font-bold; 166 166 } 167 167 168 168 .prose li { ··· 255 255 @apply py-1 text-gray-900 dark:text-gray-100; 256 256 } 257 257 } 258 + 258 259 } 259 260 260 261 /* Background */
+15 -4
jetstream/jetstream.go
··· 72 72 // existing instances of the closure when j.WantedDids is mutated 73 73 return func(ctx context.Context, evt *models.Event) error { 74 74 75 + j.mu.RLock() 75 76 // empty filter => all dids allowed 76 - if len(j.wantedDids) == 0 { 77 - return processFunc(ctx, evt) 77 + matches := len(j.wantedDids) == 0 78 + if !matches { 79 + if _, ok := j.wantedDids[evt.Did]; ok { 80 + matches = true 81 + } 78 82 } 83 + j.mu.RUnlock() 79 84 80 - if _, ok := j.wantedDids[evt.Did]; ok { 85 + if matches { 81 86 return processFunc(ctx, evt) 82 87 } else { 83 88 return nil ··· 122 127 123 128 go func() { 124 129 if j.waitForDid { 125 - for len(j.wantedDids) == 0 { 130 + for { 131 + j.mu.RLock() 132 + hasDid := len(j.wantedDids) != 0 133 + j.mu.RUnlock() 134 + if hasDid { 135 + break 136 + } 126 137 time.Sleep(time.Second) 127 138 } 128 139 }
+81
knotserver/db/db.go
··· 1 + package db 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "log/slog" 7 + "strings" 8 + 9 + _ "github.com/mattn/go-sqlite3" 10 + "tangled.org/core/log" 11 + ) 12 + 13 + type DB struct { 14 + db *sql.DB 15 + logger *slog.Logger 16 + } 17 + 18 + func Setup(ctx context.Context, dbPath string) (*DB, error) { 19 + // https://github.com/mattn/go-sqlite3#connection-string 20 + opts := []string{ 21 + "_foreign_keys=1", 22 + "_journal_mode=WAL", 23 + "_synchronous=NORMAL", 24 + "_auto_vacuum=incremental", 25 + } 26 + 27 + logger := log.FromContext(ctx) 28 + logger = log.SubLogger(logger, "db") 29 + 30 + db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&")) 31 + if err != nil { 32 + return nil, err 33 + } 34 + 35 + conn, err := db.Conn(ctx) 36 + if err != nil { 37 + return nil, err 38 + } 39 + defer conn.Close() 40 + 41 + _, err = conn.ExecContext(ctx, ` 42 + create table if not exists known_dids ( 43 + did text primary key 44 + ); 45 + 46 + create table if not exists public_keys ( 47 + id integer primary key autoincrement, 48 + did text not null, 49 + key text not null, 50 + created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 51 + unique(did, key), 52 + foreign key (did) references known_dids(did) on delete cascade 53 + ); 54 + 55 + create table if not exists _jetstream ( 56 + id integer primary key autoincrement, 57 + last_time_us integer not null 58 + ); 59 + 60 + create table if not exists events ( 61 + rkey text not null, 62 + nsid text not null, 63 + event text not null, -- json 64 + created integer not null default (strftime('%s', 'now')), 65 + primary key (rkey, nsid) 66 + ); 67 + 68 + create table if not exists migrations ( 69 + id integer primary key autoincrement, 70 + name text unique 71 + ); 72 + `) 73 + if err != nil { 74 + return nil, err 75 + } 76 + 77 + return &DB{ 78 + db: db, 79 + logger: logger, 80 + }, nil 81 + }
-64
knotserver/db/init.go
··· 1 - package db 2 - 3 - import ( 4 - "database/sql" 5 - "strings" 6 - 7 - _ "github.com/mattn/go-sqlite3" 8 - ) 9 - 10 - type DB struct { 11 - db *sql.DB 12 - } 13 - 14 - func Setup(dbPath string) (*DB, error) { 15 - // https://github.com/mattn/go-sqlite3#connection-string 16 - opts := []string{ 17 - "_foreign_keys=1", 18 - "_journal_mode=WAL", 19 - "_synchronous=NORMAL", 20 - "_auto_vacuum=incremental", 21 - } 22 - 23 - db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&")) 24 - if err != nil { 25 - return nil, err 26 - } 27 - 28 - // NOTE: If any other migration is added here, you MUST 29 - // copy the pattern in appview: use a single sql.Conn 30 - // for every migration. 31 - 32 - _, err = db.Exec(` 33 - create table if not exists known_dids ( 34 - did text primary key 35 - ); 36 - 37 - create table if not exists public_keys ( 38 - id integer primary key autoincrement, 39 - did text not null, 40 - key text not null, 41 - created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 42 - unique(did, key), 43 - foreign key (did) references known_dids(did) on delete cascade 44 - ); 45 - 46 - create table if not exists _jetstream ( 47 - id integer primary key autoincrement, 48 - last_time_us integer not null 49 - ); 50 - 51 - create table if not exists events ( 52 - rkey text not null, 53 - nsid text not null, 54 - event text not null, -- json 55 - created integer not null default (strftime('%s', 'now')), 56 - primary key (rkey, nsid) 57 - ); 58 - `) 59 - if err != nil { 60 - return nil, err 61 - } 62 - 63 - return &DB{db: db}, nil 64 - }
+1 -17
knotserver/git/diff.go
··· 77 77 nd.Diff = append(nd.Diff, ndiff) 78 78 } 79 79 80 - nd.Stat.FilesChanged = len(diffs) 81 - nd.Commit.This = c.Hash.String() 82 - nd.Commit.PGPSignature = c.PGPSignature 83 - nd.Commit.Committer = c.Committer 84 - nd.Commit.Tree = c.TreeHash.String() 85 - 86 - if parent.Hash.IsZero() { 87 - nd.Commit.Parent = "" 88 - } else { 89 - nd.Commit.Parent = parent.Hash.String() 90 - } 91 - nd.Commit.Author = c.Author 92 - nd.Commit.Message = c.Message 93 - 94 - if v, ok := c.ExtraHeaders["change-id"]; ok { 95 - nd.Commit.ChangedId = string(v) 96 - } 80 + nd.Commit.FromGoGitCommit(c) 97 81 98 82 return &nd, nil 99 83 }
+38 -2
knotserver/git/fork.go
··· 3 3 import ( 4 4 "errors" 5 5 "fmt" 6 + "log/slog" 7 + "net/url" 6 8 "os/exec" 9 + "path/filepath" 7 10 8 11 "github.com/go-git/go-git/v5" 9 12 "github.com/go-git/go-git/v5/config" 13 + knotconfig "tangled.org/core/knotserver/config" 10 14 ) 11 15 12 - func Fork(repoPath, source string) error { 13 - cloneCmd := exec.Command("git", "clone", "--bare", source, repoPath) 16 + func Fork(repoPath, source string, cfg *knotconfig.Config) error { 17 + u, err := url.Parse(source) 18 + if err != nil { 19 + return fmt.Errorf("failed to parse source URL: %w", err) 20 + } 21 + 22 + if o := optimizeClone(u, cfg); o != nil { 23 + u = o 24 + } 25 + 26 + cloneCmd := exec.Command("git", "clone", "--bare", u.String(), repoPath) 14 27 if err := cloneCmd.Run(); err != nil { 15 28 return fmt.Errorf("failed to bare clone repository: %w", err) 16 29 } ··· 21 34 } 22 35 23 36 return nil 37 + } 38 + 39 + func optimizeClone(u *url.URL, cfg *knotconfig.Config) *url.URL { 40 + // only optimize if it's the same host 41 + if u.Host != cfg.Server.Hostname { 42 + return nil 43 + } 44 + 45 + local := filepath.Join(cfg.Repo.ScanPath, u.Path) 46 + 47 + // sanity check: is there a git repo there? 48 + if _, err := PlainOpen(local); err != nil { 49 + return nil 50 + } 51 + 52 + // create optimized file:// URL 53 + optimized := &url.URL{ 54 + Scheme: "file", 55 + Path: local, 56 + } 57 + 58 + slog.Debug("performing local clone", "url", optimized.String()) 59 + return optimized 24 60 } 25 61 26 62 func (g *GitRepo) Sync() error {
+13 -1
knotserver/git/service/service.go
··· 95 95 return c.RunService(cmd) 96 96 } 97 97 98 + func (c *ServiceCommand) UploadArchive() error { 99 + cmd := exec.Command("git", []string{ 100 + "upload-archive", 101 + ".", 102 + }...) 103 + 104 + cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} 105 + cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_PROTOCOL=%s", c.GitProtocol)) 106 + cmd.Dir = c.Dir 107 + 108 + return c.RunService(cmd) 109 + } 110 + 98 111 func (c *ServiceCommand) UploadPack() error { 99 112 cmd := exec.Command("git", []string{ 100 - "-c", "uploadpack.allowFilter=true", 101 113 "upload-pack", 102 114 "--stateless-rpc", 103 115 ".",
+47
knotserver/git.go
··· 56 56 } 57 57 } 58 58 59 + func (h *Knot) UploadArchive(w http.ResponseWriter, r *http.Request) { 60 + did := chi.URLParam(r, "did") 61 + name := chi.URLParam(r, "name") 62 + repo, err := securejoin.SecureJoin(h.c.Repo.ScanPath, filepath.Join(did, name)) 63 + if err != nil { 64 + gitError(w, err.Error(), http.StatusInternalServerError) 65 + h.l.Error("git: failed to secure join repo path", "handler", "UploadPack", "error", err) 66 + return 67 + } 68 + 69 + const expectedContentType = "application/x-git-upload-archive-request" 70 + contentType := r.Header.Get("Content-Type") 71 + if contentType != expectedContentType { 72 + gitError(w, fmt.Sprintf("Expected Content-Type: '%s', but received '%s'.", expectedContentType, contentType), http.StatusUnsupportedMediaType) 73 + } 74 + 75 + var bodyReader io.ReadCloser = r.Body 76 + if r.Header.Get("Content-Encoding") == "gzip" { 77 + gzipReader, err := gzip.NewReader(r.Body) 78 + if err != nil { 79 + gitError(w, err.Error(), http.StatusInternalServerError) 80 + h.l.Error("git: failed to create gzip reader", "handler", "UploadArchive", "error", err) 81 + return 82 + } 83 + defer gzipReader.Close() 84 + bodyReader = gzipReader 85 + } 86 + 87 + w.Header().Set("Content-Type", "application/x-git-upload-archive-result") 88 + 89 + h.l.Info("git: executing git-upload-archive", "handler", "UploadArchive", "repo", repo) 90 + 91 + cmd := service.ServiceCommand{ 92 + GitProtocol: r.Header.Get("Git-Protocol"), 93 + Dir: repo, 94 + Stdout: w, 95 + Stdin: bodyReader, 96 + } 97 + 98 + w.WriteHeader(http.StatusOK) 99 + 100 + if err := cmd.UploadArchive(); err != nil { 101 + h.l.Error("git: failed to execute git-upload-pack", "handler", "UploadPack", "error", err) 102 + return 103 + } 104 + } 105 + 59 106 func (h *Knot) UploadPack(w http.ResponseWriter, r *http.Request) { 60 107 did := chi.URLParam(r, "did") 61 108 name := chi.URLParam(r, "name")
+1
knotserver/router.go
··· 82 82 r.Route("/{name}", func(r chi.Router) { 83 83 // routes for git operations 84 84 r.Get("/info/refs", h.InfoRefs) 85 + r.Post("/git-upload-archive", h.UploadArchive) 85 86 r.Post("/git-upload-pack", h.UploadPack) 86 87 r.Post("/git-receive-pack", h.ReceivePack) 87 88 })
+1 -1
knotserver/server.go
··· 64 64 logger.Info("running in dev mode, signature verification is disabled") 65 65 } 66 66 67 - db, err := db.Setup(c.Server.DBPath) 67 + db, err := db.Setup(ctx, c.Server.DBPath) 68 68 if err != nil { 69 69 return fmt.Errorf("failed to load db: %w", err) 70 70 }
+1 -1
knotserver/xrpc/create_repo.go
··· 84 84 repoPath, _ := securejoin.SecureJoin(h.Config.Repo.ScanPath, relativeRepoPath) 85 85 86 86 if data.Source != nil && *data.Source != "" { 87 - err = git.Fork(repoPath, *data.Source) 87 + err = git.Fork(repoPath, *data.Source, h.Config) 88 88 if err != nil { 89 89 l.Error("forking repo", "error", err.Error()) 90 90 writeError(w, xrpcerr.GenericError(err), http.StatusInternalServerError)
+6 -1
knotserver/xrpc/repo_log.go
··· 62 62 return 63 63 } 64 64 65 + tcommits := make([]types.Commit, len(commits)) 66 + for i, c := range commits { 67 + tcommits[i].FromGoGitCommit(c) 68 + } 69 + 65 70 // Create response using existing types.RepoLogResponse 66 71 response := types.RepoLogResponse{ 67 - Commits: commits, 72 + Commits: tcommits, 68 73 Ref: ref, 69 74 Page: (offset / limit) + 1, 70 75 PerPage: limit,
+51
lexicons/comment/comment.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "sh.tangled.comment", 4 + "needsCbor": true, 5 + "needsType": true, 6 + "defs": { 7 + "main": { 8 + "type": "record", 9 + "key": "tid", 10 + "record": { 11 + "type": "object", 12 + "required": [ 13 + "subject", 14 + "body", 15 + "createdAt" 16 + ], 17 + "properties": { 18 + "subject": { 19 + "type": "string", 20 + "format": "at-uri" 21 + }, 22 + "body": { 23 + "type": "string" 24 + }, 25 + "createdAt": { 26 + "type": "string", 27 + "format": "datetime" 28 + }, 29 + "replyTo": { 30 + "type": "string", 31 + "format": "at-uri" 32 + }, 33 + "mentions": { 34 + "type": "array", 35 + "items": { 36 + "type": "string", 37 + "format": "did" 38 + } 39 + }, 40 + "references": { 41 + "type": "array", 42 + "items": { 43 + "type": "string", 44 + "format": "at-uri" 45 + } 46 + } 47 + } 48 + } 49 + } 50 + } 51 + }
+14
lexicons/issue/comment.json
··· 29 29 "replyTo": { 30 30 "type": "string", 31 31 "format": "at-uri" 32 + }, 33 + "mentions": { 34 + "type": "array", 35 + "items": { 36 + "type": "string", 37 + "format": "did" 38 + } 39 + }, 40 + "references": { 41 + "type": "array", 42 + "items": { 43 + "type": "string", 44 + "format": "at-uri" 45 + } 32 46 } 33 47 } 34 48 }
+14
lexicons/issue/issue.json
··· 24 24 "createdAt": { 25 25 "type": "string", 26 26 "format": "datetime" 27 + }, 28 + "mentions": { 29 + "type": "array", 30 + "items": { 31 + "type": "string", 32 + "format": "did" 33 + } 34 + }, 35 + "references": { 36 + "type": "array", 37 + "items": { 38 + "type": "string", 39 + "format": "at-uri" 40 + } 27 41 } 28 42 } 29 43 }
+14
lexicons/pulls/comment.json
··· 25 25 "createdAt": { 26 26 "type": "string", 27 27 "format": "datetime" 28 + }, 29 + "mentions": { 30 + "type": "array", 31 + "items": { 32 + "type": "string", 33 + "format": "did" 34 + } 35 + }, 36 + "references": { 37 + "type": "array", 38 + "items": { 39 + "type": "string", 40 + "format": "at-uri" 41 + } 28 42 } 29 43 } 30 44 }
+24 -2
lexicons/pulls/pull.json
··· 12 12 "required": [ 13 13 "target", 14 14 "title", 15 - "patch", 15 + "patchBlob", 16 16 "createdAt" 17 17 ], 18 18 "properties": { ··· 27 27 "type": "string" 28 28 }, 29 29 "patch": { 30 - "type": "string" 30 + "type": "string", 31 + "description": "(deprecated) use patchBlob instead" 32 + }, 33 + "patchBlob": { 34 + "type": "blob", 35 + "accept": [ 36 + "text/x-patch" 37 + ], 38 + "description": "patch content" 31 39 }, 32 40 "source": { 33 41 "type": "ref", ··· 36 44 "createdAt": { 37 45 "type": "string", 38 46 "format": "datetime" 47 + }, 48 + "mentions": { 49 + "type": "array", 50 + "items": { 51 + "type": "string", 52 + "format": "did" 53 + } 54 + }, 55 + "references": { 56 + "type": "array", 57 + "items": { 58 + "type": "string", 59 + "format": "at-uri" 60 + } 39 61 } 40 62 } 41 63 }
+3 -30
nix/gomod2nix.toml
··· 165 165 [mod."github.com/davecgh/go-spew"] 166 166 version = "v1.1.2-0.20180830191138-d8f796af33cc" 167 167 hash = "sha256-fV9oI51xjHdOmEx6+dlq7Ku2Ag+m/bmbzPo6A4Y74qc=" 168 - [mod."github.com/decred/dcrd/dcrec/secp256k1/v4"] 169 - version = "v4.4.0" 170 - hash = "sha256-qrhEIwhDll3cxoVpMbm1NQ9/HTI42S7ms8Buzlo5HCg=" 171 168 [mod."github.com/dgraph-io/ristretto"] 172 169 version = "v0.2.0" 173 170 hash = "sha256-bnpxX+oO/Qf7IJevA0gsbloVoqRx+5bh7RQ9d9eLNYw=" ··· 373 370 [mod."github.com/klauspost/cpuid/v2"] 374 371 version = "v2.3.0" 375 372 hash = "sha256-50JhbQyT67BK38HIdJihPtjV7orYp96HknI2VP7A9Yc=" 376 - [mod."github.com/lestrrat-go/blackmagic"] 377 - version = "v1.0.4" 378 - hash = "sha256-HmWOpwoPDNMwLdOi7onNn3Sb+ZsAa3Ai3gVBbXmQ0e8=" 379 - [mod."github.com/lestrrat-go/httpcc"] 380 - version = "v1.0.1" 381 - hash = "sha256-SMRSwJpqDIs/xL0l2e8vP0W65qtCHX2wigcOeqPJmos=" 382 - [mod."github.com/lestrrat-go/httprc"] 383 - version = "v1.0.6" 384 - hash = "sha256-mfZzePEhrmyyu/avEBd2MsDXyto8dq5+fyu5lA8GUWM=" 385 - [mod."github.com/lestrrat-go/iter"] 386 - version = "v1.0.2" 387 - hash = "sha256-30tErRf7Qu/NOAt1YURXY/XJSA6sCr6hYQfO8QqHrtw=" 388 - [mod."github.com/lestrrat-go/jwx/v2"] 389 - version = "v2.1.6" 390 - hash = "sha256-0LszXRZIba+X8AOrs3T4uanAUafBdlVB8/MpUNEFpbc=" 391 - [mod."github.com/lestrrat-go/option"] 392 - version = "v1.0.1" 393 - hash = "sha256-jVcIYYVsxElIS/l2akEw32vdEPR8+anR6oeT1FoYULI=" 394 373 [mod."github.com/lucasb-eyer/go-colorful"] 395 374 version = "v1.2.0" 396 375 hash = "sha256-Gg9dDJFCTaHrKHRR1SrJgZ8fWieJkybljybkI9x0gyE=" ··· 511 490 [mod."github.com/ryanuber/go-glob"] 512 491 version = "v1.0.0" 513 492 hash = "sha256-YkMl1utwUhi3E0sHK23ISpAsPyj4+KeXyXKoFYGXGVY=" 514 - [mod."github.com/segmentio/asm"] 515 - version = "v1.2.0" 516 - hash = "sha256-zbNuKxNrUDUc6IlmRQNuJQzVe5Ol/mqp7srDg9IMMqs=" 517 493 [mod."github.com/sergi/go-diff"] 518 494 version = "v1.1.0" 519 495 hash = "sha256-8NJMabldpf40uwQN20T6QXx5KORDibCBJL02KD661xY=" ··· 548 524 [mod."github.com/whyrusleeping/cbor-gen"] 549 525 version = "v0.3.1" 550 526 hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc=" 551 - [mod."github.com/wyatt915/goldmark-treeblood"] 552 - version = "v0.0.1" 553 - hash = "sha256-hAVFaktO02MiiqZFffr8ZlvFEfwxw4Y84OZ2t7e5G7g=" 554 - [mod."github.com/wyatt915/treeblood"] 555 - version = "v0.1.16" 556 - hash = "sha256-T68sa+iVx0qY7dDjXEAJvRWQEGXYIpUsf9tcWwO1tIw=" 557 527 [mod."github.com/xo/terminfo"] 558 528 version = "v0.0.0-20220910002029-abceb7e1c41e" 559 529 hash = "sha256-GyCDxxMQhXA3Pi/TsWXpA8cX5akEoZV7CFx4RO3rARU=" 560 530 [mod."github.com/yuin/goldmark"] 561 531 version = "v1.7.13" 562 532 hash = "sha256-vBCxZrPYPc8x/nvAAv3Au59dCCyfS80Vw3/a9EXK7TE=" 533 + [mod."github.com/yuin/goldmark-emoji"] 534 + version = "v1.0.6" 535 + hash = "sha256-+d6bZzOPE+JSFsZbQNZMCWE+n3jgcQnkPETVk47mxSY=" 563 536 [mod."github.com/yuin/goldmark-highlighting/v2"] 564 537 version = "v2.0.0-20230729083705-37449abec8cc" 565 538 hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
+2
nix/modules/knot.nix
··· 195 195 Match User ${cfg.gitUser} 196 196 AuthorizedKeysCommand /etc/ssh/keyfetch_wrapper 197 197 AuthorizedKeysCommandUser nobody 198 + ChallengeResponseAuthentication no 199 + PasswordAuthentication no 198 200 ''; 199 201 }; 200 202
+53
nix/pkgs/docs.nix
··· 1 + { 2 + pandoc, 3 + tailwindcss, 4 + runCommandLocal, 5 + inter-fonts-src, 6 + ibm-plex-mono-src, 7 + lucide-src, 8 + src, 9 + }: 10 + runCommandLocal "docs" {} '' 11 + mkdir -p working 12 + 13 + # copy templates, themes, styles, filters to working directory 14 + cp ${src}/docs/*.html working/ 15 + cp ${src}/docs/*.theme working/ 16 + cp ${src}/docs/*.css working/ 17 + 18 + # icons 19 + cp -rf ${lucide-src}/*.svg working/ 20 + 21 + # content - chunked 22 + ${pandoc}/bin/pandoc ${src}/docs/DOCS.md \ 23 + -o $out/ \ 24 + -t chunkedhtml \ 25 + --variable toc \ 26 + --variable-json single-page=false \ 27 + --toc-depth=2 \ 28 + --css=stylesheet.css \ 29 + --chunk-template="%i.html" \ 30 + --highlight-style=working/highlight.theme \ 31 + --template=working/template.html 32 + 33 + # content - single page 34 + ${pandoc}/bin/pandoc ${src}/docs/DOCS.md \ 35 + -o $out/single-page.html \ 36 + --toc \ 37 + --variable toc \ 38 + --variable single-page \ 39 + --toc-depth=2 \ 40 + --css=stylesheet.css \ 41 + --highlight-style=working/highlight.theme \ 42 + --template=working/template.html 43 + 44 + # fonts 45 + mkdir -p $out/static/fonts 46 + cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/ 47 + cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/ 48 + cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/ 49 + cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/ 50 + 51 + # styles 52 + cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css 53 + ''
+7 -5
nix/pkgs/sqlite-lib.nix
··· 1 1 { 2 - gcc, 3 2 stdenv, 4 3 sqlite-lib-src, 5 4 }: 6 5 stdenv.mkDerivation { 7 6 name = "sqlite-lib"; 8 7 src = sqlite-lib-src; 9 - nativeBuildInputs = [gcc]; 8 + 10 9 buildPhase = '' 11 - gcc -c sqlite3.c 12 - ar rcs libsqlite3.a sqlite3.o 13 - ranlib libsqlite3.a 10 + $CC -c sqlite3.c 11 + $AR rcs libsqlite3.a sqlite3.o 12 + $RANLIB libsqlite3.a 13 + ''; 14 + 15 + installPhase = '' 14 16 mkdir -p $out/include $out/lib 15 17 cp *.h $out/include 16 18 cp libsqlite3.a $out/lib
+1 -1
nix/vm.nix
··· 8 8 var = builtins.getEnv name; 9 9 in 10 10 if var == "" 11 - then throw "\$${name} must be defined, see docs/hacking.md for more details" 11 + then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details" 12 12 else var; 13 13 envVarOr = name: default: let 14 14 var = builtins.getEnv name;
+122
orm/orm.go
··· 1 + package orm 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "fmt" 7 + "log/slog" 8 + "reflect" 9 + "strings" 10 + ) 11 + 12 + type migrationFn = func(*sql.Tx) error 13 + 14 + func RunMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error { 15 + logger = logger.With("migration", name) 16 + 17 + tx, err := c.BeginTx(context.Background(), nil) 18 + if err != nil { 19 + return err 20 + } 21 + defer tx.Rollback() 22 + 23 + var exists bool 24 + err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists) 25 + if err != nil { 26 + return err 27 + } 28 + 29 + if !exists { 30 + // run migration 31 + err = migrationFn(tx) 32 + if err != nil { 33 + logger.Error("failed to run migration", "err", err) 34 + return err 35 + } 36 + 37 + // mark migration as complete 38 + _, err = tx.Exec("insert into migrations (name) values (?)", name) 39 + if err != nil { 40 + logger.Error("failed to mark migration as complete", "err", err) 41 + return err 42 + } 43 + 44 + // commit the transaction 45 + if err := tx.Commit(); err != nil { 46 + return err 47 + } 48 + 49 + logger.Info("migration applied successfully") 50 + } else { 51 + logger.Warn("skipped migration, already applied") 52 + } 53 + 54 + return nil 55 + } 56 + 57 + type Filter struct { 58 + Key string 59 + arg any 60 + Cmp string 61 + } 62 + 63 + func newFilter(key, cmp string, arg any) Filter { 64 + return Filter{ 65 + Key: key, 66 + arg: arg, 67 + Cmp: cmp, 68 + } 69 + } 70 + 71 + func FilterEq(key string, arg any) Filter { return newFilter(key, "=", arg) } 72 + func FilterNotEq(key string, arg any) Filter { return newFilter(key, "<>", arg) } 73 + func FilterGte(key string, arg any) Filter { return newFilter(key, ">=", arg) } 74 + func FilterLte(key string, arg any) Filter { return newFilter(key, "<=", arg) } 75 + func FilterIs(key string, arg any) Filter { return newFilter(key, "is", arg) } 76 + func FilterIsNot(key string, arg any) Filter { return newFilter(key, "is not", arg) } 77 + func FilterIn(key string, arg any) Filter { return newFilter(key, "in", arg) } 78 + func FilterLike(key string, arg any) Filter { return newFilter(key, "like", arg) } 79 + func FilterNotLike(key string, arg any) Filter { return newFilter(key, "not like", arg) } 80 + func FilterContains(key string, arg any) Filter { 81 + return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg)) 82 + } 83 + 84 + func (f Filter) Condition() string { 85 + rv := reflect.ValueOf(f.arg) 86 + kind := rv.Kind() 87 + 88 + // if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)` 89 + if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 90 + if rv.Len() == 0 { 91 + // always false 92 + return "1 = 0" 93 + } 94 + 95 + placeholders := make([]string, rv.Len()) 96 + for i := range placeholders { 97 + placeholders[i] = "?" 98 + } 99 + 100 + return fmt.Sprintf("%s %s (%s)", f.Key, f.Cmp, strings.Join(placeholders, ", ")) 101 + } 102 + 103 + return fmt.Sprintf("%s %s ?", f.Key, f.Cmp) 104 + } 105 + 106 + func (f Filter) Arg() []any { 107 + rv := reflect.ValueOf(f.arg) 108 + kind := rv.Kind() 109 + if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 110 + if rv.Len() == 0 { 111 + return nil 112 + } 113 + 114 + out := make([]any, rv.Len()) 115 + for i := range rv.Len() { 116 + out[i] = rv.Index(i).Interface() 117 + } 118 + return out 119 + } 120 + 121 + return []any{f.arg} 122 + }
-1
patchutil/patchutil.go
··· 296 296 } 297 297 298 298 nd := types.NiceDiff{} 299 - nd.Commit.Parent = targetBranch 300 299 301 300 for _, d := range diffs { 302 301 ndiff := types.Diff{}
+8
rbac/rbac.go
··· 285 285 return e.E.Enforce(user, domain, repo, "repo:delete") 286 286 } 287 287 288 + func (e *Enforcer) IsRepoOwner(user, domain, repo string) (bool, error) { 289 + return e.E.Enforce(user, domain, repo, "repo:owner") 290 + } 291 + 292 + func (e *Enforcer) IsRepoCollaborator(user, domain, repo string) (bool, error) { 293 + return e.E.Enforce(user, domain, repo, "repo:collaborator") 294 + } 295 + 288 296 func (e *Enforcer) IsPushAllowed(user, domain, repo string) (bool, error) { 289 297 return e.E.Enforce(user, domain, repo, "repo:push") 290 298 }
+3 -3
readme.md
··· 10 10 11 11 ## docs 12 12 13 - * [knot hosting guide](/docs/knot-hosting.md) 14 - * [contributing guide](/docs/contributing.md) **please read before opening a PR!** 15 - * [hacking on tangled](/docs/hacking.md) 13 + - [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide) 14 + - [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!** 15 + - [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled) 16 16 17 17 ## security 18 18
+31
sets/gen.go
··· 1 + package sets 2 + 3 + import ( 4 + "math/rand" 5 + "reflect" 6 + "testing/quick" 7 + ) 8 + 9 + func (_ Set[T]) Generate(rand *rand.Rand, size int) reflect.Value { 10 + s := New[T]() 11 + 12 + var zero T 13 + itemType := reflect.TypeOf(zero) 14 + 15 + for { 16 + if s.Len() >= size { 17 + break 18 + } 19 + 20 + item, ok := quick.Value(itemType, rand) 21 + if !ok { 22 + continue 23 + } 24 + 25 + if val, ok := item.Interface().(T); ok { 26 + s.Insert(val) 27 + } 28 + } 29 + 30 + return reflect.ValueOf(s) 31 + }
+35
sets/readme.txt
··· 1 + sets 2 + ---- 3 + set datastructure for go with generics and iterators. the 4 + api is supposed to mimic rust's std::collections::HashSet api. 5 + 6 + s1 := sets.Collect(slices.Values([]int{1, 2, 3, 4})) 7 + s2 := sets.Collect(slices.Values([]int{1, 2, 3, 4, 5, 6})) 8 + 9 + union := sets.Collect(s1.Union(s2)) 10 + intersect := sets.Collect(s1.Intersection(s2)) 11 + diff := sets.Collect(s1.Difference(s2)) 12 + symdiff := sets.Collect(s1.SymmetricDifference(s2)) 13 + 14 + s1.Len() // 4 15 + s1.Contains(1) // true 16 + s1.IsEmpty() // false 17 + s1.IsSubset(s2) // true 18 + s1.IsSuperset(s2) // false 19 + s1.IsDisjoint(s2) // false 20 + 21 + if exists := s1.Insert(1); exists { 22 + // already existed in set 23 + } 24 + 25 + if existed := s1.Remove(1); existed { 26 + // existed in set, now removed 27 + } 28 + 29 + 30 + testing 31 + ------- 32 + includes property-based tests using the wonderful 33 + testing/quick module! 34 + 35 + go test -v
+174
sets/set.go
··· 1 + package sets 2 + 3 + import ( 4 + "iter" 5 + "maps" 6 + ) 7 + 8 + type Set[T comparable] struct { 9 + data map[T]struct{} 10 + } 11 + 12 + func New[T comparable]() Set[T] { 13 + return Set[T]{ 14 + data: make(map[T]struct{}), 15 + } 16 + } 17 + 18 + func (s *Set[T]) Insert(item T) bool { 19 + _, exists := s.data[item] 20 + s.data[item] = struct{}{} 21 + return !exists 22 + } 23 + 24 + func Singleton[T comparable](item T) Set[T] { 25 + n := New[T]() 26 + _ = n.Insert(item) 27 + return n 28 + } 29 + 30 + func (s *Set[T]) Remove(item T) bool { 31 + _, exists := s.data[item] 32 + if exists { 33 + delete(s.data, item) 34 + } 35 + return exists 36 + } 37 + 38 + func (s Set[T]) Contains(item T) bool { 39 + _, exists := s.data[item] 40 + return exists 41 + } 42 + 43 + func (s Set[T]) Len() int { 44 + return len(s.data) 45 + } 46 + 47 + func (s Set[T]) IsEmpty() bool { 48 + return len(s.data) == 0 49 + } 50 + 51 + func (s *Set[T]) Clear() { 52 + s.data = make(map[T]struct{}) 53 + } 54 + 55 + func (s Set[T]) All() iter.Seq[T] { 56 + return func(yield func(T) bool) { 57 + for item := range s.data { 58 + if !yield(item) { 59 + return 60 + } 61 + } 62 + } 63 + } 64 + 65 + func (s Set[T]) Clone() Set[T] { 66 + return Set[T]{ 67 + data: maps.Clone(s.data), 68 + } 69 + } 70 + 71 + func (s Set[T]) Union(other Set[T]) iter.Seq[T] { 72 + if s.Len() >= other.Len() { 73 + return chain(s.All(), other.Difference(s)) 74 + } else { 75 + return chain(other.All(), s.Difference(other)) 76 + } 77 + } 78 + 79 + func chain[T any](seqs ...iter.Seq[T]) iter.Seq[T] { 80 + return func(yield func(T) bool) { 81 + for _, seq := range seqs { 82 + for item := range seq { 83 + if !yield(item) { 84 + return 85 + } 86 + } 87 + } 88 + } 89 + } 90 + 91 + func (s Set[T]) Intersection(other Set[T]) iter.Seq[T] { 92 + return func(yield func(T) bool) { 93 + for item := range s.data { 94 + if other.Contains(item) { 95 + if !yield(item) { 96 + return 97 + } 98 + } 99 + } 100 + } 101 + } 102 + 103 + func (s Set[T]) Difference(other Set[T]) iter.Seq[T] { 104 + return func(yield func(T) bool) { 105 + for item := range s.data { 106 + if !other.Contains(item) { 107 + if !yield(item) { 108 + return 109 + } 110 + } 111 + } 112 + } 113 + } 114 + 115 + func (s Set[T]) SymmetricDifference(other Set[T]) iter.Seq[T] { 116 + return func(yield func(T) bool) { 117 + for item := range s.data { 118 + if !other.Contains(item) { 119 + if !yield(item) { 120 + return 121 + } 122 + } 123 + } 124 + for item := range other.data { 125 + if !s.Contains(item) { 126 + if !yield(item) { 127 + return 128 + } 129 + } 130 + } 131 + } 132 + } 133 + 134 + func (s Set[T]) IsSubset(other Set[T]) bool { 135 + for item := range s.data { 136 + if !other.Contains(item) { 137 + return false 138 + } 139 + } 140 + return true 141 + } 142 + 143 + func (s Set[T]) IsSuperset(other Set[T]) bool { 144 + return other.IsSubset(s) 145 + } 146 + 147 + func (s Set[T]) IsDisjoint(other Set[T]) bool { 148 + for item := range s.data { 149 + if other.Contains(item) { 150 + return false 151 + } 152 + } 153 + return true 154 + } 155 + 156 + func (s Set[T]) Equal(other Set[T]) bool { 157 + if s.Len() != other.Len() { 158 + return false 159 + } 160 + for item := range s.data { 161 + if !other.Contains(item) { 162 + return false 163 + } 164 + } 165 + return true 166 + } 167 + 168 + func Collect[T comparable](seq iter.Seq[T]) Set[T] { 169 + result := New[T]() 170 + for item := range seq { 171 + result.Insert(item) 172 + } 173 + return result 174 + }
+411
sets/set_test.go
··· 1 + package sets 2 + 3 + import ( 4 + "slices" 5 + "testing" 6 + "testing/quick" 7 + ) 8 + 9 + func TestNew(t *testing.T) { 10 + s := New[int]() 11 + if s.Len() != 0 { 12 + t.Errorf("New set should be empty, got length %d", s.Len()) 13 + } 14 + if !s.IsEmpty() { 15 + t.Error("New set should be empty") 16 + } 17 + } 18 + 19 + func TestFromSlice(t *testing.T) { 20 + s := Collect(slices.Values([]int{1, 2, 3, 2, 1})) 21 + if s.Len() != 3 { 22 + t.Errorf("Expected length 3, got %d", s.Len()) 23 + } 24 + if !s.Contains(1) || !s.Contains(2) || !s.Contains(3) { 25 + t.Error("Set should contain all unique elements from slice") 26 + } 27 + } 28 + 29 + func TestInsert(t *testing.T) { 30 + s := New[string]() 31 + 32 + if !s.Insert("hello") { 33 + t.Error("First insert should return true") 34 + } 35 + if s.Insert("hello") { 36 + t.Error("Duplicate insert should return false") 37 + } 38 + if s.Len() != 1 { 39 + t.Errorf("Expected length 1, got %d", s.Len()) 40 + } 41 + } 42 + 43 + func TestRemove(t *testing.T) { 44 + s := Collect(slices.Values([]int{1, 2, 3})) 45 + 46 + if !s.Remove(2) { 47 + t.Error("Remove existing element should return true") 48 + } 49 + if s.Remove(2) { 50 + t.Error("Remove non-existing element should return false") 51 + } 52 + if s.Contains(2) { 53 + t.Error("Element should be removed") 54 + } 55 + if s.Len() != 2 { 56 + t.Errorf("Expected length 2, got %d", s.Len()) 57 + } 58 + } 59 + 60 + func TestContains(t *testing.T) { 61 + s := Collect(slices.Values([]int{1, 2, 3})) 62 + 63 + if !s.Contains(1) { 64 + t.Error("Should contain 1") 65 + } 66 + if s.Contains(4) { 67 + t.Error("Should not contain 4") 68 + } 69 + } 70 + 71 + func TestClear(t *testing.T) { 72 + s := Collect(slices.Values([]int{1, 2, 3})) 73 + s.Clear() 74 + 75 + if !s.IsEmpty() { 76 + t.Error("Set should be empty after clear") 77 + } 78 + if s.Len() != 0 { 79 + t.Errorf("Expected length 0, got %d", s.Len()) 80 + } 81 + } 82 + 83 + func TestIterator(t *testing.T) { 84 + s := Collect(slices.Values([]int{1, 2, 3})) 85 + var items []int 86 + 87 + for item := range s.All() { 88 + items = append(items, item) 89 + } 90 + 91 + slices.Sort(items) 92 + expected := []int{1, 2, 3} 93 + if !slices.Equal(items, expected) { 94 + t.Errorf("Expected %v, got %v", expected, items) 95 + } 96 + } 97 + 98 + func TestClone(t *testing.T) { 99 + s1 := Collect(slices.Values([]int{1, 2, 3})) 100 + s2 := s1.Clone() 101 + 102 + if !s1.Equal(s2) { 103 + t.Error("Cloned set should be equal to original") 104 + } 105 + 106 + s2.Insert(4) 107 + if s1.Contains(4) { 108 + t.Error("Modifying clone should not affect original") 109 + } 110 + } 111 + 112 + func TestUnion(t *testing.T) { 113 + s1 := Collect(slices.Values([]int{1, 2})) 114 + s2 := Collect(slices.Values([]int{2, 3})) 115 + 116 + result := Collect(s1.Union(s2)) 117 + expected := Collect(slices.Values([]int{1, 2, 3})) 118 + 119 + if !result.Equal(expected) { 120 + t.Errorf("Expected %v, got %v", expected, result) 121 + } 122 + } 123 + 124 + func TestIntersection(t *testing.T) { 125 + s1 := Collect(slices.Values([]int{1, 2, 3})) 126 + s2 := Collect(slices.Values([]int{2, 3, 4})) 127 + 128 + expected := Collect(slices.Values([]int{2, 3})) 129 + result := Collect(s1.Intersection(s2)) 130 + 131 + if !result.Equal(expected) { 132 + t.Errorf("Expected %v, got %v", expected, result) 133 + } 134 + } 135 + 136 + func TestDifference(t *testing.T) { 137 + s1 := Collect(slices.Values([]int{1, 2, 3})) 138 + s2 := Collect(slices.Values([]int{2, 3, 4})) 139 + 140 + expected := Collect(slices.Values([]int{1})) 141 + result := Collect(s1.Difference(s2)) 142 + 143 + if !result.Equal(expected) { 144 + t.Errorf("Expected %v, got %v", expected, result) 145 + } 146 + } 147 + 148 + func TestSymmetricDifference(t *testing.T) { 149 + s1 := Collect(slices.Values([]int{1, 2, 3})) 150 + s2 := Collect(slices.Values([]int{2, 3, 4})) 151 + 152 + expected := Collect(slices.Values([]int{1, 4})) 153 + result := Collect(s1.SymmetricDifference(s2)) 154 + 155 + if !result.Equal(expected) { 156 + t.Errorf("Expected %v, got %v", expected, result) 157 + } 158 + } 159 + 160 + func TestSymmetricDifferenceCommutativeProperty(t *testing.T) { 161 + s1 := Collect(slices.Values([]int{1, 2, 3})) 162 + s2 := Collect(slices.Values([]int{2, 3, 4})) 163 + 164 + result1 := Collect(s1.SymmetricDifference(s2)) 165 + result2 := Collect(s2.SymmetricDifference(s1)) 166 + 167 + if !result1.Equal(result2) { 168 + t.Errorf("Expected %v, got %v", result1, result2) 169 + } 170 + } 171 + 172 + func TestIsSubset(t *testing.T) { 173 + s1 := Collect(slices.Values([]int{1, 2})) 174 + s2 := Collect(slices.Values([]int{1, 2, 3})) 175 + 176 + if !s1.IsSubset(s2) { 177 + t.Error("s1 should be subset of s2") 178 + } 179 + if s2.IsSubset(s1) { 180 + t.Error("s2 should not be subset of s1") 181 + } 182 + } 183 + 184 + func TestIsSuperset(t *testing.T) { 185 + s1 := Collect(slices.Values([]int{1, 2, 3})) 186 + s2 := Collect(slices.Values([]int{1, 2})) 187 + 188 + if !s1.IsSuperset(s2) { 189 + t.Error("s1 should be superset of s2") 190 + } 191 + if s2.IsSuperset(s1) { 192 + t.Error("s2 should not be superset of s1") 193 + } 194 + } 195 + 196 + func TestIsDisjoint(t *testing.T) { 197 + s1 := Collect(slices.Values([]int{1, 2})) 198 + s2 := Collect(slices.Values([]int{3, 4})) 199 + s3 := Collect(slices.Values([]int{2, 3})) 200 + 201 + if !s1.IsDisjoint(s2) { 202 + t.Error("s1 and s2 should be disjoint") 203 + } 204 + if s1.IsDisjoint(s3) { 205 + t.Error("s1 and s3 should not be disjoint") 206 + } 207 + } 208 + 209 + func TestEqual(t *testing.T) { 210 + s1 := Collect(slices.Values([]int{1, 2, 3})) 211 + s2 := Collect(slices.Values([]int{3, 2, 1})) 212 + s3 := Collect(slices.Values([]int{1, 2})) 213 + 214 + if !s1.Equal(s2) { 215 + t.Error("s1 and s2 should be equal") 216 + } 217 + if s1.Equal(s3) { 218 + t.Error("s1 and s3 should not be equal") 219 + } 220 + } 221 + 222 + func TestCollect(t *testing.T) { 223 + s1 := Collect(slices.Values([]int{1, 2})) 224 + s2 := Collect(slices.Values([]int{2, 3})) 225 + 226 + unionSet := Collect(s1.Union(s2)) 227 + if unionSet.Len() != 3 { 228 + t.Errorf("Expected union set length 3, got %d", unionSet.Len()) 229 + } 230 + if !unionSet.Contains(1) || !unionSet.Contains(2) || !unionSet.Contains(3) { 231 + t.Error("Union set should contain 1, 2, and 3") 232 + } 233 + 234 + diffSet := Collect(s1.Difference(s2)) 235 + if diffSet.Len() != 1 { 236 + t.Errorf("Expected difference set length 1, got %d", diffSet.Len()) 237 + } 238 + if !diffSet.Contains(1) { 239 + t.Error("Difference set should contain 1") 240 + } 241 + } 242 + 243 + func TestPropertySingleonLen(t *testing.T) { 244 + f := func(item int) bool { 245 + single := Singleton(item) 246 + return single.Len() == 1 247 + } 248 + 249 + if err := quick.Check(f, nil); err != nil { 250 + t.Error(err) 251 + } 252 + } 253 + 254 + func TestPropertyInsertIdempotent(t *testing.T) { 255 + f := func(s Set[int], item int) bool { 256 + clone := s.Clone() 257 + 258 + clone.Insert(item) 259 + firstLen := clone.Len() 260 + 261 + clone.Insert(item) 262 + secondLen := clone.Len() 263 + 264 + return firstLen == secondLen 265 + } 266 + 267 + if err := quick.Check(f, nil); err != nil { 268 + t.Error(err) 269 + } 270 + } 271 + 272 + func TestPropertyUnionCommutative(t *testing.T) { 273 + f := func(s1 Set[int], s2 Set[int]) bool { 274 + union1 := Collect(s1.Union(s2)) 275 + union2 := Collect(s2.Union(s1)) 276 + return union1.Equal(union2) 277 + } 278 + 279 + if err := quick.Check(f, nil); err != nil { 280 + t.Error(err) 281 + } 282 + } 283 + 284 + func TestPropertyIntersectionCommutative(t *testing.T) { 285 + f := func(s1 Set[int], s2 Set[int]) bool { 286 + inter1 := Collect(s1.Intersection(s2)) 287 + inter2 := Collect(s2.Intersection(s1)) 288 + return inter1.Equal(inter2) 289 + } 290 + 291 + if err := quick.Check(f, nil); err != nil { 292 + t.Error(err) 293 + } 294 + } 295 + 296 + func TestPropertyCloneEquals(t *testing.T) { 297 + f := func(s Set[int]) bool { 298 + clone := s.Clone() 299 + return s.Equal(clone) 300 + } 301 + 302 + if err := quick.Check(f, nil); err != nil { 303 + t.Error(err) 304 + } 305 + } 306 + 307 + func TestPropertyIntersectionIsSubset(t *testing.T) { 308 + f := func(s1 Set[int], s2 Set[int]) bool { 309 + inter := Collect(s1.Intersection(s2)) 310 + return inter.IsSubset(s1) && inter.IsSubset(s2) 311 + } 312 + 313 + if err := quick.Check(f, nil); err != nil { 314 + t.Error(err) 315 + } 316 + } 317 + 318 + func TestPropertyUnionIsSuperset(t *testing.T) { 319 + f := func(s1 Set[int], s2 Set[int]) bool { 320 + union := Collect(s1.Union(s2)) 321 + return union.IsSuperset(s1) && union.IsSuperset(s2) 322 + } 323 + 324 + if err := quick.Check(f, nil); err != nil { 325 + t.Error(err) 326 + } 327 + } 328 + 329 + func TestPropertyDifferenceDisjoint(t *testing.T) { 330 + f := func(s1 Set[int], s2 Set[int]) bool { 331 + diff := Collect(s1.Difference(s2)) 332 + return diff.IsDisjoint(s2) 333 + } 334 + 335 + if err := quick.Check(f, nil); err != nil { 336 + t.Error(err) 337 + } 338 + } 339 + 340 + func TestPropertySymmetricDifferenceCommutative(t *testing.T) { 341 + f := func(s1 Set[int], s2 Set[int]) bool { 342 + symDiff1 := Collect(s1.SymmetricDifference(s2)) 343 + symDiff2 := Collect(s2.SymmetricDifference(s1)) 344 + return symDiff1.Equal(symDiff2) 345 + } 346 + 347 + if err := quick.Check(f, nil); err != nil { 348 + t.Error(err) 349 + } 350 + } 351 + 352 + func TestPropertyRemoveWorks(t *testing.T) { 353 + f := func(s Set[int], item int) bool { 354 + clone := s.Clone() 355 + clone.Insert(item) 356 + clone.Remove(item) 357 + return !clone.Contains(item) 358 + } 359 + 360 + if err := quick.Check(f, nil); err != nil { 361 + t.Error(err) 362 + } 363 + } 364 + 365 + func TestPropertyClearEmpty(t *testing.T) { 366 + f := func(s Set[int]) bool { 367 + s.Clear() 368 + return s.IsEmpty() && s.Len() == 0 369 + } 370 + 371 + if err := quick.Check(f, nil); err != nil { 372 + t.Error(err) 373 + } 374 + } 375 + 376 + func TestPropertyIsSubsetReflexive(t *testing.T) { 377 + f := func(s Set[int]) bool { 378 + return s.IsSubset(s) 379 + } 380 + 381 + if err := quick.Check(f, nil); err != nil { 382 + t.Error(err) 383 + } 384 + } 385 + 386 + func TestPropertyDeMorganUnion(t *testing.T) { 387 + f := func(s1 Set[int], s2 Set[int], universe Set[int]) bool { 388 + // create a universe that contains both sets 389 + u := universe.Clone() 390 + for item := range s1.All() { 391 + u.Insert(item) 392 + } 393 + for item := range s2.All() { 394 + u.Insert(item) 395 + } 396 + 397 + // (A u B)' = A' n B' 398 + union := Collect(s1.Union(s2)) 399 + complementUnion := Collect(u.Difference(union)) 400 + 401 + complementS1 := Collect(u.Difference(s1)) 402 + complementS2 := Collect(u.Difference(s2)) 403 + intersectionComplements := Collect(complementS1.Intersection(complementS2)) 404 + 405 + return complementUnion.Equal(intersectionComplements) 406 + } 407 + 408 + if err := quick.Check(f, nil); err != nil { 409 + t.Error(err) 410 + } 411 + }
+1
spindle/db/repos.go
··· 16 16 if err != nil { 17 17 return nil, err 18 18 } 19 + defer rows.Close() 19 20 20 21 var knots []string 21 22 for rows.Next() {
+22 -21
spindle/engine/engine.go
··· 3 3 import ( 4 4 "context" 5 5 "errors" 6 - "fmt" 7 6 "log/slog" 7 + "sync" 8 8 9 9 securejoin "github.com/cyphar/filepath-securejoin" 10 - "golang.org/x/sync/errgroup" 11 10 "tangled.org/core/notifier" 12 11 "tangled.org/core/spindle/config" 13 12 "tangled.org/core/spindle/db" ··· 31 30 } 32 31 } 33 32 34 - eg, ctx := errgroup.WithContext(ctx) 33 + var wg sync.WaitGroup 35 34 for eng, wfs := range pipeline.Workflows { 36 35 workflowTimeout := eng.WorkflowTimeout() 37 36 l.Info("using workflow timeout", "timeout", workflowTimeout) 38 37 39 38 for _, w := range wfs { 40 - eg.Go(func() error { 39 + wg.Add(1) 40 + go func() { 41 + defer wg.Done() 42 + 41 43 wid := models.WorkflowId{ 42 44 PipelineId: pipelineId, 43 45 Name: w.Name, ··· 45 47 46 48 err := db.StatusRunning(wid, n) 47 49 if err != nil { 48 - return err 50 + l.Error("failed to set workflow status to running", "wid", wid, "err", err) 51 + return 49 52 } 50 53 51 54 err = eng.SetupWorkflow(ctx, wid, &w) ··· 61 64 62 65 dbErr := db.StatusFailed(wid, err.Error(), -1, n) 63 66 if dbErr != nil { 64 - return dbErr 67 + l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr) 65 68 } 66 - return err 69 + return 67 70 } 68 71 defer eng.DestroyWorkflow(ctx, wid) 69 72 70 - wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid) 73 + secretValues := make([]string, len(allSecrets)) 74 + for i, s := range allSecrets { 75 + secretValues[i] = s.Value 76 + } 77 + wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid, secretValues) 71 78 if err != nil { 72 79 l.Warn("failed to setup step logger; logs will not be persisted", "error", err) 73 80 wfLogger = nil ··· 99 106 if errors.Is(err, ErrTimedOut) { 100 107 dbErr := db.StatusTimeout(wid, n) 101 108 if dbErr != nil { 102 - return dbErr 109 + l.Error("failed to set workflow status to timeout", "wid", wid, "err", dbErr) 103 110 } 104 111 } else { 105 112 dbErr := db.StatusFailed(wid, err.Error(), -1, n) 106 113 if dbErr != nil { 107 - return dbErr 114 + l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr) 108 115 } 109 116 } 110 - 111 - return fmt.Errorf("starting steps image: %w", err) 117 + return 112 118 } 113 119 } 114 120 115 121 err = db.StatusSuccess(wid, n) 116 122 if err != nil { 117 - return err 123 + l.Error("failed to set workflow status to success", "wid", wid, "err", err) 118 124 } 119 - 120 - return nil 121 - }) 125 + }() 122 126 } 123 127 } 124 128 125 - if err := eg.Wait(); err != nil { 126 - l.Error("failed to run one or more workflows", "err", err) 127 - } else { 128 - l.Info("successfully ran full pipeline") 129 - } 129 + wg.Wait() 130 + l.Info("all workflows completed") 130 131 }
+5 -3
spindle/engines/nixery/engine.go
··· 294 294 workflowEnvs.AddEnv(s.Key, s.Value) 295 295 } 296 296 297 - step := w.Steps[idx].(Step) 297 + step := w.Steps[idx] 298 298 299 299 select { 300 300 case <-ctx.Done(): ··· 303 303 } 304 304 305 305 envs := append(EnvVars(nil), workflowEnvs...) 306 - for k, v := range step.environment { 307 - envs.AddEnv(k, v) 306 + if nixStep, ok := step.(Step); ok { 307 + for k, v := range nixStep.environment { 308 + envs.AddEnv(k, v) 309 + } 308 310 } 309 311 envs.AddEnv("HOME", homeDir) 310 312
+6 -1
spindle/models/logger.go
··· 12 12 type WorkflowLogger struct { 13 13 file *os.File 14 14 encoder *json.Encoder 15 + mask *SecretMask 15 16 } 16 17 17 - func NewWorkflowLogger(baseDir string, wid WorkflowId) (*WorkflowLogger, error) { 18 + func NewWorkflowLogger(baseDir string, wid WorkflowId, secretValues []string) (*WorkflowLogger, error) { 18 19 path := LogFilePath(baseDir, wid) 19 20 20 21 file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) ··· 25 26 return &WorkflowLogger{ 26 27 file: file, 27 28 encoder: json.NewEncoder(file), 29 + mask: NewSecretMask(secretValues), 28 30 }, nil 29 31 } 30 32 ··· 62 64 63 65 func (w *dataWriter) Write(p []byte) (int, error) { 64 66 line := strings.TrimRight(string(p), "\r\n") 67 + if w.logger.mask != nil { 68 + line = w.logger.mask.Mask(line) 69 + } 65 70 entry := NewDataLogLine(w.idx, line, w.stream) 66 71 if err := w.logger.encoder.Encode(entry); err != nil { 67 72 return 0, err
+51
spindle/models/secret_mask.go
··· 1 + package models 2 + 3 + import ( 4 + "encoding/base64" 5 + "strings" 6 + ) 7 + 8 + // SecretMask replaces secret values in strings with "***". 9 + type SecretMask struct { 10 + replacer *strings.Replacer 11 + } 12 + 13 + // NewSecretMask creates a mask for the given secret values. 14 + // Also registers base64-encoded variants of each secret. 15 + func NewSecretMask(values []string) *SecretMask { 16 + var pairs []string 17 + 18 + for _, value := range values { 19 + if value == "" { 20 + continue 21 + } 22 + 23 + pairs = append(pairs, value, "***") 24 + 25 + b64 := base64.StdEncoding.EncodeToString([]byte(value)) 26 + if b64 != value { 27 + pairs = append(pairs, b64, "***") 28 + } 29 + 30 + b64NoPad := strings.TrimRight(b64, "=") 31 + if b64NoPad != b64 && b64NoPad != value { 32 + pairs = append(pairs, b64NoPad, "***") 33 + } 34 + } 35 + 36 + if len(pairs) == 0 { 37 + return nil 38 + } 39 + 40 + return &SecretMask{ 41 + replacer: strings.NewReplacer(pairs...), 42 + } 43 + } 44 + 45 + // Mask replaces all registered secret values with "***". 46 + func (m *SecretMask) Mask(input string) string { 47 + if m == nil || m.replacer == nil { 48 + return input 49 + } 50 + return m.replacer.Replace(input) 51 + }
+135
spindle/models/secret_mask_test.go
··· 1 + package models 2 + 3 + import ( 4 + "encoding/base64" 5 + "testing" 6 + ) 7 + 8 + func TestSecretMask_BasicMasking(t *testing.T) { 9 + mask := NewSecretMask([]string{"mysecret123"}) 10 + 11 + input := "The password is mysecret123 in this log" 12 + expected := "The password is *** in this log" 13 + 14 + result := mask.Mask(input) 15 + if result != expected { 16 + t.Errorf("expected %q, got %q", expected, result) 17 + } 18 + } 19 + 20 + func TestSecretMask_Base64Encoded(t *testing.T) { 21 + secret := "mysecret123" 22 + mask := NewSecretMask([]string{secret}) 23 + 24 + b64 := base64.StdEncoding.EncodeToString([]byte(secret)) 25 + input := "Encoded: " + b64 26 + expected := "Encoded: ***" 27 + 28 + result := mask.Mask(input) 29 + if result != expected { 30 + t.Errorf("expected %q, got %q", expected, result) 31 + } 32 + } 33 + 34 + func TestSecretMask_Base64NoPadding(t *testing.T) { 35 + // "test" encodes to "dGVzdA==" with padding 36 + secret := "test" 37 + mask := NewSecretMask([]string{secret}) 38 + 39 + b64NoPad := "dGVzdA" // base64 without padding 40 + input := "Token: " + b64NoPad 41 + expected := "Token: ***" 42 + 43 + result := mask.Mask(input) 44 + if result != expected { 45 + t.Errorf("expected %q, got %q", expected, result) 46 + } 47 + } 48 + 49 + func TestSecretMask_MultipleSecrets(t *testing.T) { 50 + mask := NewSecretMask([]string{"password1", "apikey123"}) 51 + 52 + input := "Using password1 and apikey123 for auth" 53 + expected := "Using *** and *** for auth" 54 + 55 + result := mask.Mask(input) 56 + if result != expected { 57 + t.Errorf("expected %q, got %q", expected, result) 58 + } 59 + } 60 + 61 + func TestSecretMask_MultipleOccurrences(t *testing.T) { 62 + mask := NewSecretMask([]string{"secret"}) 63 + 64 + input := "secret appears twice: secret" 65 + expected := "*** appears twice: ***" 66 + 67 + result := mask.Mask(input) 68 + if result != expected { 69 + t.Errorf("expected %q, got %q", expected, result) 70 + } 71 + } 72 + 73 + func TestSecretMask_ShortValues(t *testing.T) { 74 + mask := NewSecretMask([]string{"abc", "xy", ""}) 75 + 76 + if mask == nil { 77 + t.Fatal("expected non-nil mask") 78 + } 79 + 80 + input := "abc xy test" 81 + expected := "*** *** test" 82 + result := mask.Mask(input) 83 + if result != expected { 84 + t.Errorf("expected %q, got %q", expected, result) 85 + } 86 + } 87 + 88 + func TestSecretMask_NilMask(t *testing.T) { 89 + var mask *SecretMask 90 + 91 + input := "some input text" 92 + result := mask.Mask(input) 93 + if result != input { 94 + t.Errorf("expected %q, got %q", input, result) 95 + } 96 + } 97 + 98 + func TestSecretMask_EmptyInput(t *testing.T) { 99 + mask := NewSecretMask([]string{"secret"}) 100 + 101 + result := mask.Mask("") 102 + if result != "" { 103 + t.Errorf("expected empty string, got %q", result) 104 + } 105 + } 106 + 107 + func TestSecretMask_NoMatch(t *testing.T) { 108 + mask := NewSecretMask([]string{"secretvalue"}) 109 + 110 + input := "nothing to mask here" 111 + result := mask.Mask(input) 112 + if result != input { 113 + t.Errorf("expected %q, got %q", input, result) 114 + } 115 + } 116 + 117 + func TestSecretMask_EmptySecretsList(t *testing.T) { 118 + mask := NewSecretMask([]string{}) 119 + 120 + if mask != nil { 121 + t.Error("expected nil mask for empty secrets list") 122 + } 123 + } 124 + 125 + func TestSecretMask_EmptySecretsFiltered(t *testing.T) { 126 + mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"}) 127 + 128 + input := "Using validpassword here" 129 + expected := "Using *** here" 130 + 131 + result := mask.Mask(input) 132 + if result != expected { 133 + t.Errorf("expected %q, got %q", expected, result) 134 + } 135 + }
+1 -1
spindle/motd
··· 20 20 ** 21 21 ******** 22 22 23 - This is a spindle server. More info at https://tangled.sh/@tangled.sh/core/tree/master/docs/spindle 23 + This is a spindle server. More info at https://docs.tangled.org/spindles.html#spindles 24 24 25 25 Most API routes are under /xrpc/
+31 -13
spindle/server.go
··· 8 8 "log/slog" 9 9 "maps" 10 10 "net/http" 11 + "sync" 11 12 12 13 "github.com/go-chi/chi/v5" 13 14 "tangled.org/core/api/tangled" ··· 30 31 ) 31 32 32 33 //go:embed motd 33 - var motd []byte 34 + var defaultMotd []byte 34 35 35 36 const ( 36 37 rbacDomain = "thisserver" 37 38 ) 38 39 39 40 type Spindle struct { 40 - jc *jetstream.JetstreamClient 41 - db *db.DB 42 - e *rbac.Enforcer 43 - l *slog.Logger 44 - n *notifier.Notifier 45 - engs map[string]models.Engine 46 - jq *queue.Queue 47 - cfg *config.Config 48 - ks *eventconsumer.Consumer 49 - res *idresolver.Resolver 50 - vault secrets.Manager 41 + jc *jetstream.JetstreamClient 42 + db *db.DB 43 + e *rbac.Enforcer 44 + l *slog.Logger 45 + n *notifier.Notifier 46 + engs map[string]models.Engine 47 + jq *queue.Queue 48 + cfg *config.Config 49 + ks *eventconsumer.Consumer 50 + res *idresolver.Resolver 51 + vault secrets.Manager 52 + motd []byte 53 + motdMu sync.RWMutex 51 54 } 52 55 53 56 // New creates a new Spindle server with the provided configuration and engines. ··· 128 131 cfg: cfg, 129 132 res: resolver, 130 133 vault: vault, 134 + motd: defaultMotd, 131 135 } 132 136 133 137 err = e.AddSpindle(rbacDomain) ··· 201 205 return s.e 202 206 } 203 207 208 + // SetMotdContent sets custom MOTD content, replacing the embedded default. 209 + func (s *Spindle) SetMotdContent(content []byte) { 210 + s.motdMu.Lock() 211 + defer s.motdMu.Unlock() 212 + s.motd = content 213 + } 214 + 215 + // GetMotdContent returns the current MOTD content. 216 + func (s *Spindle) GetMotdContent() []byte { 217 + s.motdMu.RLock() 218 + defer s.motdMu.RUnlock() 219 + return s.motd 220 + } 221 + 204 222 // Start starts the Spindle server (blocking). 205 223 func (s *Spindle) Start(ctx context.Context) error { 206 224 // starts a job queue runner in the background ··· 246 264 mux := chi.NewRouter() 247 265 248 266 mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 249 - w.Write(motd) 267 + w.Write(s.GetMotdContent()) 250 268 }) 251 269 mux.HandleFunc("/events", s.Events) 252 270 mux.HandleFunc("/logs/{knot}/{rkey}/{name}", s.Logs)
+1 -1
tailwind.config.js
··· 2 2 const colors = require("tailwindcss/colors"); 3 3 4 4 module.exports = { 5 - content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"], 5 + content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"], 6 6 darkMode: "media", 7 7 theme: { 8 8 container: {
+199
types/commit.go
··· 1 + package types 2 + 3 + import ( 4 + "bytes" 5 + "encoding/json" 6 + "fmt" 7 + "maps" 8 + "regexp" 9 + "strings" 10 + 11 + "github.com/go-git/go-git/v5/plumbing" 12 + "github.com/go-git/go-git/v5/plumbing/object" 13 + ) 14 + 15 + type Commit struct { 16 + // hash of the commit object. 17 + Hash plumbing.Hash `json:"hash,omitempty"` 18 + 19 + // author is the original author of the commit. 20 + Author object.Signature `json:"author"` 21 + 22 + // committer is the one performing the commit, might be different from author. 23 + Committer object.Signature `json:"committer"` 24 + 25 + // message is the commit message, contains arbitrary text. 26 + Message string `json:"message"` 27 + 28 + // treehash is the hash of the root tree of the commit. 29 + Tree string `json:"tree"` 30 + 31 + // parents are the hashes of the parent commits of the commit. 32 + ParentHashes []plumbing.Hash `json:"parent_hashes,omitempty"` 33 + 34 + // pgpsignature is the pgp signature of the commit. 35 + PGPSignature string `json:"pgp_signature,omitempty"` 36 + 37 + // mergetag is the embedded tag object when a merge commit is created by 38 + // merging a signed tag. 39 + MergeTag string `json:"merge_tag,omitempty"` 40 + 41 + // changeid is a unique identifier for the change (e.g., gerrit change-id). 42 + ChangeId string `json:"change_id,omitempty"` 43 + 44 + // extraheaders contains additional headers not captured by other fields. 45 + ExtraHeaders map[string][]byte `json:"extra_headers,omitempty"` 46 + 47 + // deprecated: kept for backwards compatibility with old json format. 48 + This string `json:"this,omitempty"` 49 + 50 + // deprecated: kept for backwards compatibility with old json format. 51 + Parent string `json:"parent,omitempty"` 52 + } 53 + 54 + // types.Commit is an unify two commit structs: 55 + // - git.object.Commit from 56 + // - types.NiceDiff.commit 57 + // 58 + // to do this in backwards compatible fashion, we define the base struct 59 + // to use the same fields as NiceDiff.Commit, and then we also unmarshal 60 + // the struct fields from go-git structs, this custom unmarshal makes sense 61 + // of both representations and unifies them to have maximal data in either 62 + // form. 63 + func (c *Commit) UnmarshalJSON(data []byte) error { 64 + type Alias Commit 65 + 66 + aux := &struct { 67 + *object.Commit 68 + *Alias 69 + }{ 70 + Alias: (*Alias)(c), 71 + } 72 + 73 + if err := json.Unmarshal(data, aux); err != nil { 74 + return err 75 + } 76 + 77 + c.FromGoGitCommit(aux.Commit) 78 + 79 + return nil 80 + } 81 + 82 + // fill in as much of Commit as possible from the given go-git commit 83 + func (c *Commit) FromGoGitCommit(gc *object.Commit) { 84 + if gc == nil { 85 + return 86 + } 87 + 88 + if c.Hash.IsZero() { 89 + c.Hash = gc.Hash 90 + } 91 + if c.This == "" { 92 + c.This = gc.Hash.String() 93 + } 94 + if isEmptySignature(c.Author) { 95 + c.Author = gc.Author 96 + } 97 + if isEmptySignature(c.Committer) { 98 + c.Committer = gc.Committer 99 + } 100 + if c.Message == "" { 101 + c.Message = gc.Message 102 + } 103 + if c.Tree == "" { 104 + c.Tree = gc.TreeHash.String() 105 + } 106 + if c.PGPSignature == "" { 107 + c.PGPSignature = gc.PGPSignature 108 + } 109 + if c.MergeTag == "" { 110 + c.MergeTag = gc.MergeTag 111 + } 112 + 113 + if len(c.ParentHashes) == 0 { 114 + c.ParentHashes = gc.ParentHashes 115 + } 116 + if c.Parent == "" && len(gc.ParentHashes) > 0 { 117 + c.Parent = gc.ParentHashes[0].String() 118 + } 119 + 120 + if len(c.ExtraHeaders) == 0 { 121 + c.ExtraHeaders = make(map[string][]byte) 122 + maps.Copy(c.ExtraHeaders, gc.ExtraHeaders) 123 + } 124 + 125 + if c.ChangeId == "" { 126 + if v, ok := gc.ExtraHeaders["change-id"]; ok { 127 + c.ChangeId = string(v) 128 + } 129 + } 130 + } 131 + 132 + func isEmptySignature(s object.Signature) bool { 133 + return s.Email == "" && s.Name == "" && s.When.IsZero() 134 + } 135 + 136 + // produce a verifiable payload from this commit's metadata 137 + func (c *Commit) Payload() string { 138 + author := bytes.NewBuffer([]byte{}) 139 + c.Author.Encode(author) 140 + 141 + committer := bytes.NewBuffer([]byte{}) 142 + c.Committer.Encode(committer) 143 + 144 + payload := strings.Builder{} 145 + 146 + fmt.Fprintf(&payload, "tree %s\n", c.Tree) 147 + 148 + if len(c.ParentHashes) > 0 { 149 + for _, p := range c.ParentHashes { 150 + fmt.Fprintf(&payload, "parent %s\n", p.String()) 151 + } 152 + } else { 153 + // present for backwards compatibility 154 + fmt.Fprintf(&payload, "parent %s\n", c.Parent) 155 + } 156 + 157 + fmt.Fprintf(&payload, "author %s\n", author.String()) 158 + fmt.Fprintf(&payload, "committer %s\n", committer.String()) 159 + 160 + if c.ChangeId != "" { 161 + fmt.Fprintf(&payload, "change-id %s\n", c.ChangeId) 162 + } else if v, ok := c.ExtraHeaders["change-id"]; ok { 163 + fmt.Fprintf(&payload, "change-id %s\n", string(v)) 164 + } 165 + 166 + fmt.Fprintf(&payload, "\n%s", c.Message) 167 + 168 + return payload.String() 169 + } 170 + 171 + var ( 172 + coAuthorRegex = regexp.MustCompile(`(?im)^Co-authored-by:\s*(.+?)\s*<([^>]+)>`) 173 + ) 174 + 175 + func (commit Commit) CoAuthors() []object.Signature { 176 + var coAuthors []object.Signature 177 + seen := make(map[string]bool) 178 + matches := coAuthorRegex.FindAllStringSubmatch(commit.Message, -1) 179 + 180 + for _, match := range matches { 181 + if len(match) >= 3 { 182 + name := strings.TrimSpace(match[1]) 183 + email := strings.TrimSpace(match[2]) 184 + 185 + if seen[email] { 186 + continue 187 + } 188 + seen[email] = true 189 + 190 + coAuthors = append(coAuthors, object.Signature{ 191 + Name: name, 192 + Email: email, 193 + When: commit.Committer.When, 194 + }) 195 + } 196 + } 197 + 198 + return coAuthors 199 + }
+5 -12
types/diff.go
··· 2 2 3 3 import ( 4 4 "github.com/bluekeyes/go-gitdiff/gitdiff" 5 - "github.com/go-git/go-git/v5/plumbing/object" 6 5 ) 7 6 8 7 type DiffOpts struct { ··· 43 42 44 43 // A nicer git diff representation. 45 44 type NiceDiff struct { 46 - Commit struct { 47 - Message string `json:"message"` 48 - Author object.Signature `json:"author"` 49 - This string `json:"this"` 50 - Parent string `json:"parent"` 51 - PGPSignature string `json:"pgp_signature"` 52 - Committer object.Signature `json:"committer"` 53 - Tree string `json:"tree"` 54 - ChangedId string `json:"change_id"` 55 - } `json:"commit"` 56 - Stat struct { 45 + Commit Commit `json:"commit"` 46 + Stat struct { 57 47 FilesChanged int `json:"files_changed"` 58 48 Insertions int `json:"insertions"` 59 49 Deletions int `json:"deletions"` ··· 84 74 85 75 // used by html elements as a unique ID for hrefs 86 76 func (d *Diff) Id() string { 77 + if d.IsDelete { 78 + return d.Name.Old 79 + } 87 80 return d.Name.New 88 81 } 89 82
+112
types/diff_test.go
··· 1 + package types 2 + 3 + import "testing" 4 + 5 + func TestDiffId(t *testing.T) { 6 + tests := []struct { 7 + name string 8 + diff Diff 9 + expected string 10 + }{ 11 + { 12 + name: "regular file uses new name", 13 + diff: Diff{ 14 + Name: struct { 15 + Old string `json:"old"` 16 + New string `json:"new"` 17 + }{Old: "", New: "src/main.go"}, 18 + }, 19 + expected: "src/main.go", 20 + }, 21 + { 22 + name: "new file uses new name", 23 + diff: Diff{ 24 + Name: struct { 25 + Old string `json:"old"` 26 + New string `json:"new"` 27 + }{Old: "", New: "src/new.go"}, 28 + IsNew: true, 29 + }, 30 + expected: "src/new.go", 31 + }, 32 + { 33 + name: "deleted file uses old name", 34 + diff: Diff{ 35 + Name: struct { 36 + Old string `json:"old"` 37 + New string `json:"new"` 38 + }{Old: "src/deleted.go", New: ""}, 39 + IsDelete: true, 40 + }, 41 + expected: "src/deleted.go", 42 + }, 43 + { 44 + name: "renamed file uses new name", 45 + diff: Diff{ 46 + Name: struct { 47 + Old string `json:"old"` 48 + New string `json:"new"` 49 + }{Old: "src/old.go", New: "src/renamed.go"}, 50 + IsRename: true, 51 + }, 52 + expected: "src/renamed.go", 53 + }, 54 + } 55 + 56 + for _, tt := range tests { 57 + t.Run(tt.name, func(t *testing.T) { 58 + if got := tt.diff.Id(); got != tt.expected { 59 + t.Errorf("Diff.Id() = %q, want %q", got, tt.expected) 60 + } 61 + }) 62 + } 63 + } 64 + 65 + func TestChangedFilesMatchesDiffId(t *testing.T) { 66 + // ChangedFiles() must return values matching each Diff's Id() 67 + // so that sidebar links point to the correct anchors. 68 + // Tests existing, deleted, new, and renamed files. 69 + nd := NiceDiff{ 70 + Diff: []Diff{ 71 + { 72 + Name: struct { 73 + Old string `json:"old"` 74 + New string `json:"new"` 75 + }{Old: "", New: "src/modified.go"}, 76 + }, 77 + { 78 + Name: struct { 79 + Old string `json:"old"` 80 + New string `json:"new"` 81 + }{Old: "src/deleted.go", New: ""}, 82 + IsDelete: true, 83 + }, 84 + { 85 + Name: struct { 86 + Old string `json:"old"` 87 + New string `json:"new"` 88 + }{Old: "", New: "src/new.go"}, 89 + IsNew: true, 90 + }, 91 + { 92 + Name: struct { 93 + Old string `json:"old"` 94 + New string `json:"new"` 95 + }{Old: "src/old.go", New: "src/renamed.go"}, 96 + IsRename: true, 97 + }, 98 + }, 99 + } 100 + 101 + changedFiles := nd.ChangedFiles() 102 + 103 + if len(changedFiles) != len(nd.Diff) { 104 + t.Fatalf("ChangedFiles() returned %d items, want %d", len(changedFiles), len(nd.Diff)) 105 + } 106 + 107 + for i, diff := range nd.Diff { 108 + if changedFiles[i] != diff.Id() { 109 + t.Errorf("ChangedFiles()[%d] = %q, but Diff.Id() = %q", i, changedFiles[i], diff.Id()) 110 + } 111 + } 112 + }
+17 -17
types/repo.go
··· 8 8 ) 9 9 10 10 type RepoIndexResponse struct { 11 - IsEmpty bool `json:"is_empty"` 12 - Ref string `json:"ref,omitempty"` 13 - Readme string `json:"readme,omitempty"` 14 - ReadmeFileName string `json:"readme_file_name,omitempty"` 15 - Commits []*object.Commit `json:"commits,omitempty"` 16 - Description string `json:"description,omitempty"` 17 - Files []NiceTree `json:"files,omitempty"` 18 - Branches []Branch `json:"branches,omitempty"` 19 - Tags []*TagReference `json:"tags,omitempty"` 20 - TotalCommits int `json:"total_commits,omitempty"` 11 + IsEmpty bool `json:"is_empty"` 12 + Ref string `json:"ref,omitempty"` 13 + Readme string `json:"readme,omitempty"` 14 + ReadmeFileName string `json:"readme_file_name,omitempty"` 15 + Commits []Commit `json:"commits,omitempty"` 16 + Description string `json:"description,omitempty"` 17 + Files []NiceTree `json:"files,omitempty"` 18 + Branches []Branch `json:"branches,omitempty"` 19 + Tags []*TagReference `json:"tags,omitempty"` 20 + TotalCommits int `json:"total_commits,omitempty"` 21 21 } 22 22 23 23 type RepoLogResponse struct { 24 - Commits []*object.Commit `json:"commits,omitempty"` 25 - Ref string `json:"ref,omitempty"` 26 - Description string `json:"description,omitempty"` 27 - Log bool `json:"log,omitempty"` 28 - Total int `json:"total,omitempty"` 29 - Page int `json:"page,omitempty"` 30 - PerPage int `json:"per_page,omitempty"` 24 + Commits []Commit `json:"commits,omitempty"` 25 + Ref string `json:"ref,omitempty"` 26 + Description string `json:"description,omitempty"` 27 + Log bool `json:"log,omitempty"` 28 + Total int `json:"total,omitempty"` 29 + Page int `json:"page,omitempty"` 30 + PerPage int `json:"per_page,omitempty"` 31 31 } 32 32 33 33 type RepoCommitResponse struct {