forked from tangled.org/core
Monorepo for Tangled — https://tangled.org

Compare changes

Choose any two refs to compare.

Changed files
+10174 -4192
.air
api
appview
commitverify
db
email
indexer
issues
knots
labels
mentions
middleware
models
notifications
notify
oauth
pages
pipelines
pulls
repo
reporesolver
serververify
settings
spindles
state
strings
validator
crypto
docs
hook
jetstream
knotserver
lexicons
local-infra
nix
orm
patchutil
rbac
sets
spindle
types
+8 -6
.air/appview.toml
··· 1 - [build] 2 - cmd = "tailwindcss -i input.css -o ./appview/pages/static/tw.css && go build -o .bin/app ./cmd/appview/main.go" 3 - bin = ";set -o allexport && source .env && set +o allexport; .bin/app" 4 1 root = "." 2 + tmp_dir = "out" 5 3 6 - exclude_regex = [".*_templ.go"] 7 - include_ext = ["go", "templ", "html", "css"] 8 - exclude_dir = ["target", "atrium", "nix"] 4 + [build] 5 + cmd = "go build -o out/appview.out cmd/appview/main.go" 6 + bin = "out/appview.out" 7 + 8 + include_ext = ["go"] 9 + exclude_dir = ["avatar", "camo", "indexes", "nix", "tmp"] 10 + stop_on_error = true
+11
.air/knot.toml
··· 1 + root = "." 2 + tmp_dir = "out" 3 + 4 + [build] 5 + cmd = 'go build -ldflags "-X tangled.org/core/knotserver.version=$(git describe --tags --long)" -o out/knot.out cmd/knot/main.go' 6 + bin = "out/knot.out" 7 + args_bin = ["server"] 8 + 9 + include_ext = ["go"] 10 + exclude_dir = ["avatar", "camo", "indexes", "nix", "tmp"] 11 + stop_on_error = true
-7
.air/knotserver.toml
··· 1 - [build] 2 - cmd = 'go build -ldflags "-X tangled.org/core/knotserver.version=$(git describe --tags --long)" -o .bin/knot ./cmd/knot/' 3 - bin = ".bin/knot server" 4 - root = "." 5 - 6 - exclude_regex = [""] 7 - include_ext = ["go", "templ"]
+10
.air/spindle.toml
··· 1 + root = "." 2 + tmp_dir = "out" 3 + 4 + [build] 5 + cmd = "go build -o out/spindle.out cmd/spindle/main.go" 6 + bin = "out/spindle.out" 7 + 8 + include_ext = ["go"] 9 + exclude_dir = ["avatar", "camo", "indexes", "nix", "tmp"] 10 + stop_on_error = true
+13
.editorconfig
··· 1 + root = true 2 + 3 + [*.html] 4 + indent_size = 2 5 + 6 + [*.json] 7 + indent_size = 2 8 + 9 + [*.nix] 10 + indent_size = 2 11 + 12 + [*.yml] 13 + indent_size = 2
+727 -27
api/tangled/cbor_gen.go
··· 6938 6938 } 6939 6939 6940 6940 cw := cbg.NewCborWriter(w) 6941 - fieldCount := 5 6941 + fieldCount := 7 6942 6942 6943 6943 if t.Body == nil { 6944 + fieldCount-- 6945 + } 6946 + 6947 + if t.Mentions == nil { 6948 + fieldCount-- 6949 + } 6950 + 6951 + if t.References == nil { 6944 6952 fieldCount-- 6945 6953 } 6946 6954 ··· 7045 7053 return err 7046 7054 } 7047 7055 7056 + // t.Mentions ([]string) (slice) 7057 + if t.Mentions != nil { 7058 + 7059 + if len("mentions") > 1000000 { 7060 + return xerrors.Errorf("Value in field \"mentions\" was too long") 7061 + } 7062 + 7063 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil { 7064 + return err 7065 + } 7066 + if _, err := cw.WriteString(string("mentions")); err != nil { 7067 + return err 7068 + } 7069 + 7070 + if len(t.Mentions) > 8192 { 7071 + return xerrors.Errorf("Slice value in field t.Mentions was too long") 7072 + } 7073 + 7074 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil { 7075 + return err 7076 + } 7077 + for _, v := range t.Mentions { 7078 + if len(v) > 1000000 { 7079 + return xerrors.Errorf("Value in field v was too long") 7080 + } 7081 + 7082 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 7083 + return err 7084 + } 7085 + if _, err := cw.WriteString(string(v)); err != nil { 7086 + return err 7087 + } 7088 + 7089 + } 7090 + } 7091 + 7048 7092 // t.CreatedAt (string) (string) 7049 7093 if len("createdAt") > 1000000 { 7050 7094 return xerrors.Errorf("Value in field \"createdAt\" was too long") ··· 7067 7111 if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 7068 7112 return err 7069 7113 } 7114 + 7115 + // t.References ([]string) (slice) 7116 + if t.References != nil { 7117 + 7118 + if len("references") > 1000000 { 7119 + return xerrors.Errorf("Value in field \"references\" was too long") 7120 + } 7121 + 7122 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil { 7123 + return err 7124 + } 7125 + if _, err := cw.WriteString(string("references")); err != nil { 7126 + return err 7127 + } 7128 + 7129 + if len(t.References) > 8192 { 7130 + return xerrors.Errorf("Slice value in field t.References was too long") 7131 + } 7132 + 7133 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil { 7134 + return err 7135 + } 7136 + for _, v := range t.References { 7137 + if len(v) > 1000000 { 7138 + return xerrors.Errorf("Value in field v was too long") 7139 + } 7140 + 7141 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 7142 + return err 7143 + } 7144 + if _, err := cw.WriteString(string(v)); err != nil { 7145 + return err 7146 + } 7147 + 7148 + } 7149 + } 7070 7150 return nil 7071 7151 } 7072 7152 ··· 7095 7175 7096 7176 n := extra 7097 7177 7098 - nameBuf := make([]byte, 9) 7178 + nameBuf := make([]byte, 10) 7099 7179 for i := uint64(0); i < n; i++ { 7100 7180 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 7101 7181 if err != nil { ··· 7165 7245 7166 7246 t.Title = string(sval) 7167 7247 } 7248 + // t.Mentions ([]string) (slice) 7249 + case "mentions": 7250 + 7251 + maj, extra, err = cr.ReadHeader() 7252 + if err != nil { 7253 + return err 7254 + } 7255 + 7256 + if extra > 8192 { 7257 + return fmt.Errorf("t.Mentions: array too large (%d)", extra) 7258 + } 7259 + 7260 + if maj != cbg.MajArray { 7261 + return fmt.Errorf("expected cbor array") 7262 + } 7263 + 7264 + if extra > 0 { 7265 + t.Mentions = make([]string, extra) 7266 + } 7267 + 7268 + for i := 0; i < int(extra); i++ { 7269 + { 7270 + var maj byte 7271 + var extra uint64 7272 + var err error 7273 + _ = maj 7274 + _ = extra 7275 + _ = err 7276 + 7277 + { 7278 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 7279 + if err != nil { 7280 + return err 7281 + } 7282 + 7283 + t.Mentions[i] = string(sval) 7284 + } 7285 + 7286 + } 7287 + } 7168 7288 // t.CreatedAt (string) (string) 7169 7289 case "createdAt": 7170 7290 ··· 7175 7295 } 7176 7296 7177 7297 t.CreatedAt = string(sval) 7298 + } 7299 + // t.References ([]string) (slice) 7300 + case "references": 7301 + 7302 + maj, extra, err = cr.ReadHeader() 7303 + if err != nil { 7304 + return err 7305 + } 7306 + 7307 + if extra > 8192 { 7308 + return fmt.Errorf("t.References: array too large (%d)", extra) 7309 + } 7310 + 7311 + if maj != cbg.MajArray { 7312 + return fmt.Errorf("expected cbor array") 7313 + } 7314 + 7315 + if extra > 0 { 7316 + t.References = make([]string, extra) 7317 + } 7318 + 7319 + for i := 0; i < int(extra); i++ { 7320 + { 7321 + var maj byte 7322 + var extra uint64 7323 + var err error 7324 + _ = maj 7325 + _ = extra 7326 + _ = err 7327 + 7328 + { 7329 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 7330 + if err != nil { 7331 + return err 7332 + } 7333 + 7334 + t.References[i] = string(sval) 7335 + } 7336 + 7337 + } 7178 7338 } 7179 7339 7180 7340 default: ··· 7194 7354 } 7195 7355 7196 7356 cw := cbg.NewCborWriter(w) 7197 - fieldCount := 5 7357 + fieldCount := 7 7358 + 7359 + if t.Mentions == nil { 7360 + fieldCount-- 7361 + } 7362 + 7363 + if t.References == nil { 7364 + fieldCount-- 7365 + } 7198 7366 7199 7367 if t.ReplyTo == nil { 7200 7368 fieldCount-- ··· 7301 7469 } 7302 7470 } 7303 7471 7472 + // t.Mentions ([]string) (slice) 7473 + if t.Mentions != nil { 7474 + 7475 + if len("mentions") > 1000000 { 7476 + return xerrors.Errorf("Value in field \"mentions\" was too long") 7477 + } 7478 + 7479 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil { 7480 + return err 7481 + } 7482 + if _, err := cw.WriteString(string("mentions")); err != nil { 7483 + return err 7484 + } 7485 + 7486 + if len(t.Mentions) > 8192 { 7487 + return xerrors.Errorf("Slice value in field t.Mentions was too long") 7488 + } 7489 + 7490 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil { 7491 + return err 7492 + } 7493 + for _, v := range t.Mentions { 7494 + if len(v) > 1000000 { 7495 + return xerrors.Errorf("Value in field v was too long") 7496 + } 7497 + 7498 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 7499 + return err 7500 + } 7501 + if _, err := cw.WriteString(string(v)); err != nil { 7502 + return err 7503 + } 7504 + 7505 + } 7506 + } 7507 + 7304 7508 // t.CreatedAt (string) (string) 7305 7509 if len("createdAt") > 1000000 { 7306 7510 return xerrors.Errorf("Value in field \"createdAt\" was too long") ··· 7323 7527 if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 7324 7528 return err 7325 7529 } 7530 + 7531 + // t.References ([]string) (slice) 7532 + if t.References != nil { 7533 + 7534 + if len("references") > 1000000 { 7535 + return xerrors.Errorf("Value in field \"references\" was too long") 7536 + } 7537 + 7538 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil { 7539 + return err 7540 + } 7541 + if _, err := cw.WriteString(string("references")); err != nil { 7542 + return err 7543 + } 7544 + 7545 + if len(t.References) > 8192 { 7546 + return xerrors.Errorf("Slice value in field t.References was too long") 7547 + } 7548 + 7549 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil { 7550 + return err 7551 + } 7552 + for _, v := range t.References { 7553 + if len(v) > 1000000 { 7554 + return xerrors.Errorf("Value in field v was too long") 7555 + } 7556 + 7557 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 7558 + return err 7559 + } 7560 + if _, err := cw.WriteString(string(v)); err != nil { 7561 + return err 7562 + } 7563 + 7564 + } 7565 + } 7326 7566 return nil 7327 7567 } 7328 7568 ··· 7351 7591 7352 7592 n := extra 7353 7593 7354 - nameBuf := make([]byte, 9) 7594 + nameBuf := make([]byte, 10) 7355 7595 for i := uint64(0); i < n; i++ { 7356 7596 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 7357 7597 if err != nil { ··· 7421 7661 t.ReplyTo = (*string)(&sval) 7422 7662 } 7423 7663 } 7664 + // t.Mentions ([]string) (slice) 7665 + case "mentions": 7666 + 7667 + maj, extra, err = cr.ReadHeader() 7668 + if err != nil { 7669 + return err 7670 + } 7671 + 7672 + if extra > 8192 { 7673 + return fmt.Errorf("t.Mentions: array too large (%d)", extra) 7674 + } 7675 + 7676 + if maj != cbg.MajArray { 7677 + return fmt.Errorf("expected cbor array") 7678 + } 7679 + 7680 + if extra > 0 { 7681 + t.Mentions = make([]string, extra) 7682 + } 7683 + 7684 + for i := 0; i < int(extra); i++ { 7685 + { 7686 + var maj byte 7687 + var extra uint64 7688 + var err error 7689 + _ = maj 7690 + _ = extra 7691 + _ = err 7692 + 7693 + { 7694 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 7695 + if err != nil { 7696 + return err 7697 + } 7698 + 7699 + t.Mentions[i] = string(sval) 7700 + } 7701 + 7702 + } 7703 + } 7424 7704 // t.CreatedAt (string) (string) 7425 7705 case "createdAt": 7426 7706 ··· 7431 7711 } 7432 7712 7433 7713 t.CreatedAt = string(sval) 7714 + } 7715 + // t.References ([]string) (slice) 7716 + case "references": 7717 + 7718 + maj, extra, err = cr.ReadHeader() 7719 + if err != nil { 7720 + return err 7721 + } 7722 + 7723 + if extra > 8192 { 7724 + return fmt.Errorf("t.References: array too large (%d)", extra) 7725 + } 7726 + 7727 + if maj != cbg.MajArray { 7728 + return fmt.Errorf("expected cbor array") 7729 + } 7730 + 7731 + if extra > 0 { 7732 + t.References = make([]string, extra) 7733 + } 7734 + 7735 + for i := 0; i < int(extra); i++ { 7736 + { 7737 + var maj byte 7738 + var extra uint64 7739 + var err error 7740 + _ = maj 7741 + _ = extra 7742 + _ = err 7743 + 7744 + { 7745 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 7746 + if err != nil { 7747 + return err 7748 + } 7749 + 7750 + t.References[i] = string(sval) 7751 + } 7752 + 7753 + } 7434 7754 } 7435 7755 7436 7756 default: ··· 7614 7934 } 7615 7935 7616 7936 cw := cbg.NewCborWriter(w) 7617 - fieldCount := 7 7937 + fieldCount := 10 7618 7938 7619 7939 if t.Body == nil { 7940 + fieldCount-- 7941 + } 7942 + 7943 + if t.Mentions == nil { 7944 + fieldCount-- 7945 + } 7946 + 7947 + if t.Patch == nil { 7948 + fieldCount-- 7949 + } 7950 + 7951 + if t.References == nil { 7620 7952 fieldCount-- 7621 7953 } 7622 7954 ··· 7680 8012 } 7681 8013 7682 8014 // t.Patch (string) (string) 7683 - if len("patch") > 1000000 { 7684 - return xerrors.Errorf("Value in field \"patch\" was too long") 7685 - } 8015 + if t.Patch != nil { 7686 8016 7687 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil { 7688 - return err 7689 - } 7690 - if _, err := cw.WriteString(string("patch")); err != nil { 7691 - return err 7692 - } 8017 + if len("patch") > 1000000 { 8018 + return xerrors.Errorf("Value in field \"patch\" was too long") 8019 + } 7693 8020 7694 - if len(t.Patch) > 1000000 { 7695 - return xerrors.Errorf("Value in field t.Patch was too long") 7696 - } 8021 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil { 8022 + return err 8023 + } 8024 + if _, err := cw.WriteString(string("patch")); err != nil { 8025 + return err 8026 + } 7697 8027 7698 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Patch))); err != nil { 7699 - return err 7700 - } 7701 - if _, err := cw.WriteString(string(t.Patch)); err != nil { 7702 - return err 8028 + if t.Patch == nil { 8029 + if _, err := cw.Write(cbg.CborNull); err != nil { 8030 + return err 8031 + } 8032 + } else { 8033 + if len(*t.Patch) > 1000000 { 8034 + return xerrors.Errorf("Value in field t.Patch was too long") 8035 + } 8036 + 8037 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Patch))); err != nil { 8038 + return err 8039 + } 8040 + if _, err := cw.WriteString(string(*t.Patch)); err != nil { 8041 + return err 8042 + } 8043 + } 7703 8044 } 7704 8045 7705 8046 // t.Title (string) (string) ··· 7760 8101 return err 7761 8102 } 7762 8103 8104 + // t.Mentions ([]string) (slice) 8105 + if t.Mentions != nil { 8106 + 8107 + if len("mentions") > 1000000 { 8108 + return xerrors.Errorf("Value in field \"mentions\" was too long") 8109 + } 8110 + 8111 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil { 8112 + return err 8113 + } 8114 + if _, err := cw.WriteString(string("mentions")); err != nil { 8115 + return err 8116 + } 8117 + 8118 + if len(t.Mentions) > 8192 { 8119 + return xerrors.Errorf("Slice value in field t.Mentions was too long") 8120 + } 8121 + 8122 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil { 8123 + return err 8124 + } 8125 + for _, v := range t.Mentions { 8126 + if len(v) > 1000000 { 8127 + return xerrors.Errorf("Value in field v was too long") 8128 + } 8129 + 8130 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 8131 + return err 8132 + } 8133 + if _, err := cw.WriteString(string(v)); err != nil { 8134 + return err 8135 + } 8136 + 8137 + } 8138 + } 8139 + 7763 8140 // t.CreatedAt (string) (string) 7764 8141 if len("createdAt") > 1000000 { 7765 8142 return xerrors.Errorf("Value in field \"createdAt\" was too long") ··· 7782 8159 if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 7783 8160 return err 7784 8161 } 8162 + 8163 + // t.PatchBlob (util.LexBlob) (struct) 8164 + if len("patchBlob") > 1000000 { 8165 + return xerrors.Errorf("Value in field \"patchBlob\" was too long") 8166 + } 8167 + 8168 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patchBlob"))); err != nil { 8169 + return err 8170 + } 8171 + if _, err := cw.WriteString(string("patchBlob")); err != nil { 8172 + return err 8173 + } 8174 + 8175 + if err := t.PatchBlob.MarshalCBOR(cw); err != nil { 8176 + return err 8177 + } 8178 + 8179 + // t.References ([]string) (slice) 8180 + if t.References != nil { 8181 + 8182 + if len("references") > 1000000 { 8183 + return xerrors.Errorf("Value in field \"references\" was too long") 8184 + } 8185 + 8186 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil { 8187 + return err 8188 + } 8189 + if _, err := cw.WriteString(string("references")); err != nil { 8190 + return err 8191 + } 8192 + 8193 + if len(t.References) > 8192 { 8194 + return xerrors.Errorf("Slice value in field t.References was too long") 8195 + } 8196 + 8197 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil { 8198 + return err 8199 + } 8200 + for _, v := range t.References { 8201 + if len(v) > 1000000 { 8202 + return xerrors.Errorf("Value in field v was too long") 8203 + } 8204 + 8205 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 8206 + return err 8207 + } 8208 + if _, err := cw.WriteString(string(v)); err != nil { 8209 + return err 8210 + } 8211 + 8212 + } 8213 + } 7785 8214 return nil 7786 8215 } 7787 8216 ··· 7810 8239 7811 8240 n := extra 7812 8241 7813 - nameBuf := make([]byte, 9) 8242 + nameBuf := make([]byte, 10) 7814 8243 for i := uint64(0); i < n; i++ { 7815 8244 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 7816 8245 if err != nil { ··· 7862 8291 case "patch": 7863 8292 7864 8293 { 7865 - sval, err := cbg.ReadStringWithMax(cr, 1000000) 8294 + b, err := cr.ReadByte() 7866 8295 if err != nil { 7867 8296 return err 7868 8297 } 8298 + if b != cbg.CborNull[0] { 8299 + if err := cr.UnreadByte(); err != nil { 8300 + return err 8301 + } 7869 8302 7870 - t.Patch = string(sval) 8303 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8304 + if err != nil { 8305 + return err 8306 + } 8307 + 8308 + t.Patch = (*string)(&sval) 8309 + } 7871 8310 } 7872 8311 // t.Title (string) (string) 7873 8312 case "title": ··· 7920 8359 } 7921 8360 7922 8361 } 8362 + // t.Mentions ([]string) (slice) 8363 + case "mentions": 8364 + 8365 + maj, extra, err = cr.ReadHeader() 8366 + if err != nil { 8367 + return err 8368 + } 8369 + 8370 + if extra > 8192 { 8371 + return fmt.Errorf("t.Mentions: array too large (%d)", extra) 8372 + } 8373 + 8374 + if maj != cbg.MajArray { 8375 + return fmt.Errorf("expected cbor array") 8376 + } 8377 + 8378 + if extra > 0 { 8379 + t.Mentions = make([]string, extra) 8380 + } 8381 + 8382 + for i := 0; i < int(extra); i++ { 8383 + { 8384 + var maj byte 8385 + var extra uint64 8386 + var err error 8387 + _ = maj 8388 + _ = extra 8389 + _ = err 8390 + 8391 + { 8392 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8393 + if err != nil { 8394 + return err 8395 + } 8396 + 8397 + t.Mentions[i] = string(sval) 8398 + } 8399 + 8400 + } 8401 + } 7923 8402 // t.CreatedAt (string) (string) 7924 8403 case "createdAt": 7925 8404 ··· 7931 8410 7932 8411 t.CreatedAt = string(sval) 7933 8412 } 8413 + // t.PatchBlob (util.LexBlob) (struct) 8414 + case "patchBlob": 8415 + 8416 + { 8417 + 8418 + b, err := cr.ReadByte() 8419 + if err != nil { 8420 + return err 8421 + } 8422 + if b != cbg.CborNull[0] { 8423 + if err := cr.UnreadByte(); err != nil { 8424 + return err 8425 + } 8426 + t.PatchBlob = new(util.LexBlob) 8427 + if err := t.PatchBlob.UnmarshalCBOR(cr); err != nil { 8428 + return xerrors.Errorf("unmarshaling t.PatchBlob pointer: %w", err) 8429 + } 8430 + } 8431 + 8432 + } 8433 + // t.References ([]string) (slice) 8434 + case "references": 8435 + 8436 + maj, extra, err = cr.ReadHeader() 8437 + if err != nil { 8438 + return err 8439 + } 8440 + 8441 + if extra > 8192 { 8442 + return fmt.Errorf("t.References: array too large (%d)", extra) 8443 + } 8444 + 8445 + if maj != cbg.MajArray { 8446 + return fmt.Errorf("expected cbor array") 8447 + } 8448 + 8449 + if extra > 0 { 8450 + t.References = make([]string, extra) 8451 + } 8452 + 8453 + for i := 0; i < int(extra); i++ { 8454 + { 8455 + var maj byte 8456 + var extra uint64 8457 + var err error 8458 + _ = maj 8459 + _ = extra 8460 + _ = err 8461 + 8462 + { 8463 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8464 + if err != nil { 8465 + return err 8466 + } 8467 + 8468 + t.References[i] = string(sval) 8469 + } 8470 + 8471 + } 8472 + } 7934 8473 7935 8474 default: 7936 8475 // Field doesn't exist on this type, so ignore it ··· 7949 8488 } 7950 8489 7951 8490 cw := cbg.NewCborWriter(w) 8491 + fieldCount := 6 7952 8492 7953 - if _, err := cw.Write([]byte{164}); err != nil { 8493 + if t.Mentions == nil { 8494 + fieldCount-- 8495 + } 8496 + 8497 + if t.References == nil { 8498 + fieldCount-- 8499 + } 8500 + 8501 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 7954 8502 return err 7955 8503 } 7956 8504 ··· 8019 8567 return err 8020 8568 } 8021 8569 8570 + // t.Mentions ([]string) (slice) 8571 + if t.Mentions != nil { 8572 + 8573 + if len("mentions") > 1000000 { 8574 + return xerrors.Errorf("Value in field \"mentions\" was too long") 8575 + } 8576 + 8577 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil { 8578 + return err 8579 + } 8580 + if _, err := cw.WriteString(string("mentions")); err != nil { 8581 + return err 8582 + } 8583 + 8584 + if len(t.Mentions) > 8192 { 8585 + return xerrors.Errorf("Slice value in field t.Mentions was too long") 8586 + } 8587 + 8588 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil { 8589 + return err 8590 + } 8591 + for _, v := range t.Mentions { 8592 + if len(v) > 1000000 { 8593 + return xerrors.Errorf("Value in field v was too long") 8594 + } 8595 + 8596 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 8597 + return err 8598 + } 8599 + if _, err := cw.WriteString(string(v)); err != nil { 8600 + return err 8601 + } 8602 + 8603 + } 8604 + } 8605 + 8022 8606 // t.CreatedAt (string) (string) 8023 8607 if len("createdAt") > 1000000 { 8024 8608 return xerrors.Errorf("Value in field \"createdAt\" was too long") ··· 8041 8625 if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 8042 8626 return err 8043 8627 } 8628 + 8629 + // t.References ([]string) (slice) 8630 + if t.References != nil { 8631 + 8632 + if len("references") > 1000000 { 8633 + return xerrors.Errorf("Value in field \"references\" was too long") 8634 + } 8635 + 8636 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil { 8637 + return err 8638 + } 8639 + if _, err := cw.WriteString(string("references")); err != nil { 8640 + return err 8641 + } 8642 + 8643 + if len(t.References) > 8192 { 8644 + return xerrors.Errorf("Slice value in field t.References was too long") 8645 + } 8646 + 8647 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil { 8648 + return err 8649 + } 8650 + for _, v := range t.References { 8651 + if len(v) > 1000000 { 8652 + return xerrors.Errorf("Value in field v was too long") 8653 + } 8654 + 8655 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 8656 + return err 8657 + } 8658 + if _, err := cw.WriteString(string(v)); err != nil { 8659 + return err 8660 + } 8661 + 8662 + } 8663 + } 8044 8664 return nil 8045 8665 } 8046 8666 ··· 8069 8689 8070 8690 n := extra 8071 8691 8072 - nameBuf := make([]byte, 9) 8692 + nameBuf := make([]byte, 10) 8073 8693 for i := uint64(0); i < n; i++ { 8074 8694 nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000) 8075 8695 if err != nil { ··· 8118 8738 8119 8739 t.LexiconTypeID = string(sval) 8120 8740 } 8741 + // t.Mentions ([]string) (slice) 8742 + case "mentions": 8743 + 8744 + maj, extra, err = cr.ReadHeader() 8745 + if err != nil { 8746 + return err 8747 + } 8748 + 8749 + if extra > 8192 { 8750 + return fmt.Errorf("t.Mentions: array too large (%d)", extra) 8751 + } 8752 + 8753 + if maj != cbg.MajArray { 8754 + return fmt.Errorf("expected cbor array") 8755 + } 8756 + 8757 + if extra > 0 { 8758 + t.Mentions = make([]string, extra) 8759 + } 8760 + 8761 + for i := 0; i < int(extra); i++ { 8762 + { 8763 + var maj byte 8764 + var extra uint64 8765 + var err error 8766 + _ = maj 8767 + _ = extra 8768 + _ = err 8769 + 8770 + { 8771 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8772 + if err != nil { 8773 + return err 8774 + } 8775 + 8776 + t.Mentions[i] = string(sval) 8777 + } 8778 + 8779 + } 8780 + } 8121 8781 // t.CreatedAt (string) (string) 8122 8782 case "createdAt": 8123 8783 ··· 8128 8788 } 8129 8789 8130 8790 t.CreatedAt = string(sval) 8791 + } 8792 + // t.References ([]string) (slice) 8793 + case "references": 8794 + 8795 + maj, extra, err = cr.ReadHeader() 8796 + if err != nil { 8797 + return err 8798 + } 8799 + 8800 + if extra > 8192 { 8801 + return fmt.Errorf("t.References: array too large (%d)", extra) 8802 + } 8803 + 8804 + if maj != cbg.MajArray { 8805 + return fmt.Errorf("expected cbor array") 8806 + } 8807 + 8808 + if extra > 0 { 8809 + t.References = make([]string, extra) 8810 + } 8811 + 8812 + for i := 0; i < int(extra); i++ { 8813 + { 8814 + var maj byte 8815 + var extra uint64 8816 + var err error 8817 + _ = maj 8818 + _ = extra 8819 + _ = err 8820 + 8821 + { 8822 + sval, err := cbg.ReadStringWithMax(cr, 1000000) 8823 + if err != nil { 8824 + return err 8825 + } 8826 + 8827 + t.References[i] = string(sval) 8828 + } 8829 + 8830 + } 8131 8831 } 8132 8832 8133 8833 default:
+7 -5
api/tangled/issuecomment.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoIssueComment 19 19 type RepoIssueComment struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue.comment" cborgen:"$type,const=sh.tangled.repo.issue.comment"` 21 - Body string `json:"body" cborgen:"body"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Issue string `json:"issue" cborgen:"issue"` 24 - ReplyTo *string `json:"replyTo,omitempty" cborgen:"replyTo,omitempty"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue.comment" cborgen:"$type,const=sh.tangled.repo.issue.comment"` 21 + Body string `json:"body" cborgen:"body"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Issue string `json:"issue" cborgen:"issue"` 24 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 25 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 26 + ReplyTo *string `json:"replyTo,omitempty" cborgen:"replyTo,omitempty"` 25 27 }
+6 -4
api/tangled/pullcomment.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoPullComment 19 19 type RepoPullComment struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull.comment" cborgen:"$type,const=sh.tangled.repo.pull.comment"` 21 - Body string `json:"body" cborgen:"body"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Pull string `json:"pull" cborgen:"pull"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull.comment" cborgen:"$type,const=sh.tangled.repo.pull.comment"` 21 + Body string `json:"body" cborgen:"body"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 24 + Pull string `json:"pull" cborgen:"pull"` 25 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 24 26 }
+13 -1
api/tangled/repoblob.go
··· 30 30 // RepoBlob_Output is the output of a sh.tangled.repo.blob call. 31 31 type RepoBlob_Output struct { 32 32 // content: File content (base64 encoded for binary files) 33 - Content string `json:"content" cborgen:"content"` 33 + Content *string `json:"content,omitempty" cborgen:"content,omitempty"` 34 34 // encoding: Content encoding 35 35 Encoding *string `json:"encoding,omitempty" cborgen:"encoding,omitempty"` 36 36 // isBinary: Whether the file is binary ··· 44 44 Ref string `json:"ref" cborgen:"ref"` 45 45 // size: File size in bytes 46 46 Size *int64 `json:"size,omitempty" cborgen:"size,omitempty"` 47 + // submodule: Submodule information if path is a submodule 48 + Submodule *RepoBlob_Submodule `json:"submodule,omitempty" cborgen:"submodule,omitempty"` 47 49 } 48 50 49 51 // RepoBlob_Signature is a "signature" in the sh.tangled.repo.blob schema. ··· 54 56 Name string `json:"name" cborgen:"name"` 55 57 // when: Author timestamp 56 58 When string `json:"when" cborgen:"when"` 59 + } 60 + 61 + // RepoBlob_Submodule is a "submodule" in the sh.tangled.repo.blob schema. 62 + type RepoBlob_Submodule struct { 63 + // branch: Branch to track in the submodule 64 + Branch *string `json:"branch,omitempty" cborgen:"branch,omitempty"` 65 + // name: Submodule name 66 + Name string `json:"name" cborgen:"name"` 67 + // url: Submodule repository URL 68 + Url string `json:"url" cborgen:"url"` 57 69 } 58 70 59 71 // RepoBlob calls the XRPC method "sh.tangled.repo.blob".
+7 -5
api/tangled/repoissue.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoIssue 19 19 type RepoIssue struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue" cborgen:"$type,const=sh.tangled.repo.issue"` 21 - Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Repo string `json:"repo" cborgen:"repo"` 24 - Title string `json:"title" cborgen:"title"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue" cborgen:"$type,const=sh.tangled.repo.issue"` 21 + Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 24 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 25 + Repo string `json:"repo" cborgen:"repo"` 26 + Title string `json:"title" cborgen:"title"` 25 27 }
+12 -7
api/tangled/repopull.go
··· 17 17 } // 18 18 // RECORDTYPE: RepoPull 19 19 type RepoPull struct { 20 - LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"` 21 - Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 - CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 - Patch string `json:"patch" cborgen:"patch"` 24 - Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"` 25 - Target *RepoPull_Target `json:"target" cborgen:"target"` 26 - Title string `json:"title" cborgen:"title"` 20 + LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"` 21 + Body *string `json:"body,omitempty" cborgen:"body,omitempty"` 22 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 23 + Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"` 24 + // patch: (deprecated) use patchBlob instead 25 + Patch *string `json:"patch,omitempty" cborgen:"patch,omitempty"` 26 + // patchBlob: patch content 27 + PatchBlob *util.LexBlob `json:"patchBlob" cborgen:"patchBlob"` 28 + References []string `json:"references,omitempty" cborgen:"references,omitempty"` 29 + Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"` 30 + Target *RepoPull_Target `json:"target" cborgen:"target"` 31 + Title string `json:"title" cborgen:"title"` 27 32 } 28 33 29 34 // RepoPull_Source is a "source" in the sh.tangled.repo.pull schema.
-4
api/tangled/repotree.go
··· 47 47 48 48 // RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema. 49 49 type RepoTree_TreeEntry struct { 50 - // is_file: Whether this entry is a file 51 - Is_file bool `json:"is_file" cborgen:"is_file"` 52 - // is_subtree: Whether this entry is a directory/subtree 53 - Is_subtree bool `json:"is_subtree" cborgen:"is_subtree"` 54 50 Last_commit *RepoTree_LastCommit `json:"last_commit,omitempty" cborgen:"last_commit,omitempty"` 55 51 // mode: File mode 56 52 Mode string `json:"mode" cborgen:"mode"`
+6 -45
appview/commitverify/verify.go
··· 3 3 import ( 4 4 "log" 5 5 6 - "github.com/go-git/go-git/v5/plumbing/object" 7 6 "tangled.org/core/appview/db" 8 7 "tangled.org/core/appview/models" 9 8 "tangled.org/core/crypto" ··· 35 34 return "" 36 35 } 37 36 38 - func GetVerifiedObjectCommits(e db.Execer, emailToDid map[string]string, commits []*object.Commit) (VerifiedCommits, error) { 39 - ndCommits := []types.NiceDiff{} 40 - for _, commit := range commits { 41 - ndCommits = append(ndCommits, ObjectCommitToNiceDiff(commit)) 42 - } 43 - return GetVerifiedCommits(e, emailToDid, ndCommits) 44 - } 45 - 46 - func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.NiceDiff) (VerifiedCommits, error) { 37 + func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.Commit) (VerifiedCommits, error) { 47 38 vcs := VerifiedCommits{} 48 39 49 40 didPubkeyCache := make(map[string][]models.PublicKey) 50 41 51 42 for _, commit := range ndCommits { 52 - c := commit.Commit 53 - 54 - committerEmail := c.Committer.Email 43 + committerEmail := commit.Committer.Email 55 44 if did, exists := emailToDid[committerEmail]; exists { 56 45 // check if we've already fetched public keys for this did 57 46 pubKeys, ok := didPubkeyCache[did] ··· 67 56 } 68 57 69 58 // try to verify with any associated pubkeys 59 + payload := commit.Payload() 60 + signature := commit.PGPSignature 70 61 for _, pk := range pubKeys { 71 - if _, ok := crypto.VerifyCommitSignature(pk.Key, commit); ok { 62 + if _, ok := crypto.VerifySignature([]byte(pk.Key), []byte(signature), []byte(payload)); ok { 72 63 73 64 fp, err := crypto.SSHFingerprint(pk.Key) 74 65 if err != nil { 75 66 log.Println("error computing ssh fingerprint:", err) 76 67 } 77 68 78 - vc := verifiedCommit{fingerprint: fp, hash: c.This} 69 + vc := verifiedCommit{fingerprint: fp, hash: commit.This} 79 70 vcs[vc] = struct{}{} 80 71 break 81 72 } ··· 86 77 87 78 return vcs, nil 88 79 } 89 - 90 - // ObjectCommitToNiceDiff is a compatibility function to convert a 91 - // commit object into a NiceDiff structure. 92 - func ObjectCommitToNiceDiff(c *object.Commit) types.NiceDiff { 93 - var niceDiff types.NiceDiff 94 - 95 - // set commit information 96 - niceDiff.Commit.Message = c.Message 97 - niceDiff.Commit.Author = c.Author 98 - niceDiff.Commit.This = c.Hash.String() 99 - niceDiff.Commit.Committer = c.Committer 100 - niceDiff.Commit.Tree = c.TreeHash.String() 101 - niceDiff.Commit.PGPSignature = c.PGPSignature 102 - 103 - changeId, ok := c.ExtraHeaders["change-id"] 104 - if ok { 105 - niceDiff.Commit.ChangedId = string(changeId) 106 - } 107 - 108 - // set parent hash if available 109 - if len(c.ParentHashes) > 0 { 110 - niceDiff.Commit.Parent = c.ParentHashes[0].String() 111 - } 112 - 113 - // XXX: Stats and Diff fields are typically populated 114 - // after fetching the actual diff information, which isn't 115 - // directly available in the commit object itself. 116 - 117 - return niceDiff 118 - }
+3 -2
appview/db/artifact.go
··· 8 8 "github.com/go-git/go-git/v5/plumbing" 9 9 "github.com/ipfs/go-cid" 10 10 "tangled.org/core/appview/models" 11 + "tangled.org/core/orm" 11 12 ) 12 13 13 14 func AddArtifact(e Execer, artifact models.Artifact) error { ··· 37 38 return err 38 39 } 39 40 40 - func GetArtifact(e Execer, filters ...filter) ([]models.Artifact, error) { 41 + func GetArtifact(e Execer, filters ...orm.Filter) ([]models.Artifact, error) { 41 42 var artifacts []models.Artifact 42 43 43 44 var conditions []string ··· 109 110 return artifacts, nil 110 111 } 111 112 112 - func DeleteArtifact(e Execer, filters ...filter) error { 113 + func DeleteArtifact(e Execer, filters ...orm.Filter) error { 113 114 var conditions []string 114 115 var args []any 115 116 for _, filter := range filters {
+4 -3
appview/db/collaborators.go
··· 6 6 "time" 7 7 8 8 "tangled.org/core/appview/models" 9 + "tangled.org/core/orm" 9 10 ) 10 11 11 12 func AddCollaborator(e Execer, c models.Collaborator) error { ··· 16 17 return err 17 18 } 18 19 19 - func DeleteCollaborator(e Execer, filters ...filter) error { 20 + func DeleteCollaborator(e Execer, filters ...orm.Filter) error { 20 21 var conditions []string 21 22 var args []any 22 23 for _, filter := range filters { ··· 58 59 return nil, nil 59 60 } 60 61 61 - return GetRepos(e, 0, FilterIn("at_uri", repoAts)) 62 + return GetRepos(e, 0, orm.FilterIn("at_uri", repoAts)) 62 63 } 63 64 64 - func GetCollaborators(e Execer, filters ...filter) ([]models.Collaborator, error) { 65 + func GetCollaborators(e Execer, filters ...orm.Filter) ([]models.Collaborator, error) { 65 66 var collaborators []models.Collaborator 66 67 var conditions []string 67 68 var args []any
+74 -134
appview/db/db.go
··· 3 3 import ( 4 4 "context" 5 5 "database/sql" 6 - "fmt" 7 6 "log/slog" 8 - "reflect" 9 7 "strings" 10 8 11 9 _ "github.com/mattn/go-sqlite3" 12 10 "tangled.org/core/log" 11 + "tangled.org/core/orm" 13 12 ) 14 13 15 14 type DB struct { ··· 561 560 email_notifications integer not null default 0 562 561 ); 563 562 563 + create table if not exists reference_links ( 564 + id integer primary key autoincrement, 565 + from_at text not null, 566 + to_at text not null, 567 + unique (from_at, to_at) 568 + ); 569 + 564 570 create table if not exists migrations ( 565 571 id integer primary key autoincrement, 566 572 name text unique ··· 569 575 -- indexes for better performance 570 576 create index if not exists idx_notifications_recipient_created on notifications(recipient_did, created desc); 571 577 create index if not exists idx_notifications_recipient_read on notifications(recipient_did, read); 572 - create index if not exists idx_stars_created on stars(created); 573 - create index if not exists idx_stars_repo_at_created on stars(repo_at, created); 578 + create index if not exists idx_references_from_at on reference_links(from_at); 579 + create index if not exists idx_references_to_at on reference_links(to_at); 574 580 `) 575 581 if err != nil { 576 582 return nil, err 577 583 } 578 584 579 585 // run migrations 580 - runMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error { 586 + orm.RunMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error { 581 587 tx.Exec(` 582 588 alter table repos add column description text check (length(description) <= 200); 583 589 `) 584 590 return nil 585 591 }) 586 592 587 - runMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error { 593 + orm.RunMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error { 588 594 // add unconstrained column 589 595 _, err := tx.Exec(` 590 596 alter table public_keys ··· 607 613 return nil 608 614 }) 609 615 610 - runMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error { 616 + orm.RunMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error { 611 617 _, err := tx.Exec(` 612 618 alter table comments drop column comment_at; 613 619 alter table comments add column rkey text; ··· 615 621 return err 616 622 }) 617 623 618 - runMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error { 624 + orm.RunMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error { 619 625 _, err := tx.Exec(` 620 626 alter table comments add column deleted text; -- timestamp 621 627 alter table comments add column edited text; -- timestamp ··· 623 629 return err 624 630 }) 625 631 626 - runMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error { 632 + orm.RunMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error { 627 633 _, err := tx.Exec(` 628 634 alter table pulls add column source_branch text; 629 635 alter table pulls add column source_repo_at text; ··· 632 638 return err 633 639 }) 634 640 635 - runMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error { 641 + orm.RunMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error { 636 642 _, err := tx.Exec(` 637 643 alter table repos add column source text; 638 644 `) ··· 644 650 // 645 651 // [0]: https://sqlite.org/pragma.html#pragma_foreign_keys 646 652 conn.ExecContext(ctx, "pragma foreign_keys = off;") 647 - runMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error { 653 + orm.RunMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error { 648 654 _, err := tx.Exec(` 649 655 create table pulls_new ( 650 656 -- identifiers ··· 701 707 }) 702 708 conn.ExecContext(ctx, "pragma foreign_keys = on;") 703 709 704 - runMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error { 710 + orm.RunMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error { 705 711 tx.Exec(` 706 712 alter table repos add column spindle text; 707 713 `) ··· 711 717 // drop all knot secrets, add unique constraint to knots 712 718 // 713 719 // knots will henceforth use service auth for signed requests 714 - runMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error { 720 + orm.RunMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error { 715 721 _, err := tx.Exec(` 716 722 create table registrations_new ( 717 723 id integer primary key autoincrement, ··· 734 740 }) 735 741 736 742 // recreate and add rkey + created columns with default constraint 737 - runMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error { 743 + orm.RunMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error { 738 744 // create new table 739 745 // - repo_at instead of repo integer 740 746 // - rkey field ··· 788 794 return err 789 795 }) 790 796 791 - runMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error { 797 + orm.RunMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error { 792 798 _, err := tx.Exec(` 793 799 alter table issues add column rkey text not null default ''; 794 800 ··· 800 806 }) 801 807 802 808 // repurpose the read-only column to "needs-upgrade" 803 - runMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error { 809 + orm.RunMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error { 804 810 _, err := tx.Exec(` 805 811 alter table registrations rename column read_only to needs_upgrade; 806 812 `) ··· 808 814 }) 809 815 810 816 // require all knots to upgrade after the release of total xrpc 811 - runMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error { 817 + orm.RunMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error { 812 818 _, err := tx.Exec(` 813 819 update registrations set needs_upgrade = 1; 814 820 `) ··· 816 822 }) 817 823 818 824 // require all knots to upgrade after the release of total xrpc 819 - runMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error { 825 + orm.RunMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error { 820 826 _, err := tx.Exec(` 821 827 alter table spindles add column needs_upgrade integer not null default 0; 822 828 `) ··· 834 840 // 835 841 // disable foreign-keys for the next migration 836 842 conn.ExecContext(ctx, "pragma foreign_keys = off;") 837 - runMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error { 843 + orm.RunMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error { 838 844 _, err := tx.Exec(` 839 845 create table if not exists issues_new ( 840 846 -- identifiers ··· 904 910 // - new columns 905 911 // * column "reply_to" which can be any other comment 906 912 // * column "at-uri" which is a generated column 907 - runMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error { 913 + orm.RunMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error { 908 914 _, err := tx.Exec(` 909 915 create table if not exists issue_comments ( 910 916 -- identifiers ··· 964 970 // 965 971 // disable foreign-keys for the next migration 966 972 conn.ExecContext(ctx, "pragma foreign_keys = off;") 967 - runMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error { 973 + orm.RunMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error { 968 974 _, err := tx.Exec(` 969 975 create table if not exists pulls_new ( 970 976 -- identifiers ··· 1045 1051 // 1046 1052 // disable foreign-keys for the next migration 1047 1053 conn.ExecContext(ctx, "pragma foreign_keys = off;") 1048 - runMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error { 1054 + orm.RunMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error { 1049 1055 _, err := tx.Exec(` 1050 1056 create table if not exists pull_submissions_new ( 1051 1057 -- identifiers ··· 1099 1105 1100 1106 // knots may report the combined patch for a comparison, we can store that on the appview side 1101 1107 // (but not on the pds record), because calculating the combined patch requires a git index 1102 - runMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error { 1108 + orm.RunMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error { 1103 1109 _, err := tx.Exec(` 1104 1110 alter table pull_submissions add column combined text; 1105 1111 `) 1106 1112 return err 1107 1113 }) 1108 1114 1109 - runMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error { 1115 + orm.RunMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error { 1110 1116 _, err := tx.Exec(` 1111 1117 alter table profile add column pronouns text; 1112 1118 `) 1113 1119 return err 1114 1120 }) 1115 1121 1116 - runMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error { 1122 + orm.RunMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error { 1117 1123 _, err := tx.Exec(` 1118 1124 alter table repos add column website text; 1119 1125 alter table repos add column topics text; ··· 1121 1127 return err 1122 1128 }) 1123 1129 1124 - return &DB{ 1125 - db, 1126 - logger, 1127 - }, nil 1128 - } 1129 - 1130 - type migrationFn = func(*sql.Tx) error 1131 - 1132 - func runMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error { 1133 - logger = logger.With("migration", name) 1134 - 1135 - tx, err := c.BeginTx(context.Background(), nil) 1136 - if err != nil { 1130 + orm.RunMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error { 1131 + _, err := tx.Exec(` 1132 + alter table notification_preferences add column user_mentioned integer not null default 1; 1133 + `) 1137 1134 return err 1138 - } 1139 - defer tx.Rollback() 1135 + }) 1140 1136 1141 - var exists bool 1142 - err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists) 1143 - if err != nil { 1144 - return err 1145 - } 1137 + // remove the foreign key constraints from stars. 1138 + orm.RunMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error { 1139 + _, err := tx.Exec(` 1140 + create table stars_new ( 1141 + id integer primary key autoincrement, 1142 + did text not null, 1143 + rkey text not null, 1146 1144 1147 - if !exists { 1148 - // run migration 1149 - err = migrationFn(tx) 1150 - if err != nil { 1151 - logger.Error("failed to run migration", "err", err) 1152 - return err 1153 - } 1145 + subject_at text not null, 1154 1146 1155 - // mark migration as complete 1156 - _, err = tx.Exec("insert into migrations (name) values (?)", name) 1157 - if err != nil { 1158 - logger.Error("failed to mark migration as complete", "err", err) 1159 - return err 1160 - } 1147 + created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 1148 + unique(did, rkey), 1149 + unique(did, subject_at) 1150 + ); 1161 1151 1162 - // commit the transaction 1163 - if err := tx.Commit(); err != nil { 1164 - return err 1165 - } 1152 + insert into stars_new ( 1153 + id, 1154 + did, 1155 + rkey, 1156 + subject_at, 1157 + created 1158 + ) 1159 + select 1160 + id, 1161 + starred_by_did, 1162 + rkey, 1163 + repo_at, 1164 + created 1165 + from stars; 1166 1166 1167 - logger.Info("migration applied successfully") 1168 - } else { 1169 - logger.Warn("skipped migration, already applied") 1170 - } 1167 + drop table stars; 1168 + alter table stars_new rename to stars; 1171 1169 1172 - return nil 1170 + create index if not exists idx_stars_created on stars(created); 1171 + create index if not exists idx_stars_subject_at_created on stars(subject_at, created); 1172 + `) 1173 + return err 1174 + }) 1175 + 1176 + return &DB{ 1177 + db, 1178 + logger, 1179 + }, nil 1173 1180 } 1174 1181 1175 1182 func (d *DB) Close() error { 1176 1183 return d.DB.Close() 1177 1184 } 1178 - 1179 - type filter struct { 1180 - key string 1181 - arg any 1182 - cmp string 1183 - } 1184 - 1185 - func newFilter(key, cmp string, arg any) filter { 1186 - return filter{ 1187 - key: key, 1188 - arg: arg, 1189 - cmp: cmp, 1190 - } 1191 - } 1192 - 1193 - func FilterEq(key string, arg any) filter { return newFilter(key, "=", arg) } 1194 - func FilterNotEq(key string, arg any) filter { return newFilter(key, "<>", arg) } 1195 - func FilterGte(key string, arg any) filter { return newFilter(key, ">=", arg) } 1196 - func FilterLte(key string, arg any) filter { return newFilter(key, "<=", arg) } 1197 - func FilterIs(key string, arg any) filter { return newFilter(key, "is", arg) } 1198 - func FilterIsNot(key string, arg any) filter { return newFilter(key, "is not", arg) } 1199 - func FilterIn(key string, arg any) filter { return newFilter(key, "in", arg) } 1200 - func FilterLike(key string, arg any) filter { return newFilter(key, "like", arg) } 1201 - func FilterNotLike(key string, arg any) filter { return newFilter(key, "not like", arg) } 1202 - func FilterContains(key string, arg any) filter { 1203 - return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg)) 1204 - } 1205 - 1206 - func (f filter) Condition() string { 1207 - rv := reflect.ValueOf(f.arg) 1208 - kind := rv.Kind() 1209 - 1210 - // if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)` 1211 - if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 1212 - if rv.Len() == 0 { 1213 - // always false 1214 - return "1 = 0" 1215 - } 1216 - 1217 - placeholders := make([]string, rv.Len()) 1218 - for i := range placeholders { 1219 - placeholders[i] = "?" 1220 - } 1221 - 1222 - return fmt.Sprintf("%s %s (%s)", f.key, f.cmp, strings.Join(placeholders, ", ")) 1223 - } 1224 - 1225 - return fmt.Sprintf("%s %s ?", f.key, f.cmp) 1226 - } 1227 - 1228 - func (f filter) Arg() []any { 1229 - rv := reflect.ValueOf(f.arg) 1230 - kind := rv.Kind() 1231 - if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 1232 - if rv.Len() == 0 { 1233 - return nil 1234 - } 1235 - 1236 - out := make([]any, rv.Len()) 1237 - for i := range rv.Len() { 1238 - out[i] = rv.Index(i).Interface() 1239 - } 1240 - return out 1241 - } 1242 - 1243 - return []any{f.arg} 1244 - }
+6 -3
appview/db/follow.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 13 func AddFollow(e Execer, follow *models.Follow) error { ··· 134 135 return result, nil 135 136 } 136 137 137 - func GetFollows(e Execer, limit int, filters ...filter) ([]models.Follow, error) { 138 + func GetFollows(e Execer, limit int, filters ...orm.Filter) ([]models.Follow, error) { 138 139 var follows []models.Follow 139 140 140 141 var conditions []string ··· 166 167 if err != nil { 167 168 return nil, err 168 169 } 170 + defer rows.Close() 171 + 169 172 for rows.Next() { 170 173 var follow models.Follow 171 174 var followedAt string ··· 191 194 } 192 195 193 196 func GetFollowers(e Execer, did string) ([]models.Follow, error) { 194 - return GetFollows(e, 0, FilterEq("subject_did", did)) 197 + return GetFollows(e, 0, orm.FilterEq("subject_did", did)) 195 198 } 196 199 197 200 func GetFollowing(e Execer, did string) ([]models.Follow, error) { 198 - return GetFollows(e, 0, FilterEq("user_did", did)) 201 + return GetFollows(e, 0, orm.FilterEq("user_did", did)) 199 202 } 200 203 201 204 func getFollowStatuses(e Execer, userDid string, subjectDids []string) (map[string]models.FollowStatus, error) {
+93 -36
appview/db/issues.go
··· 10 10 "time" 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 + "tangled.org/core/api/tangled" 13 14 "tangled.org/core/appview/models" 14 15 "tangled.org/core/appview/pagination" 16 + "tangled.org/core/orm" 15 17 ) 16 18 17 19 func PutIssue(tx *sql.Tx, issue *models.Issue) error { ··· 26 28 27 29 issues, err := GetIssues( 28 30 tx, 29 - FilterEq("did", issue.Did), 30 - FilterEq("rkey", issue.Rkey), 31 + orm.FilterEq("did", issue.Did), 32 + orm.FilterEq("rkey", issue.Rkey), 31 33 ) 32 34 switch { 33 35 case err != nil: ··· 69 71 returning rowid, issue_id 70 72 `, issue.RepoAt, issue.Did, issue.Rkey, newIssueId, issue.Title, issue.Body) 71 73 72 - return row.Scan(&issue.Id, &issue.IssueId) 74 + err = row.Scan(&issue.Id, &issue.IssueId) 75 + if err != nil { 76 + return fmt.Errorf("scan row: %w", err) 77 + } 78 + 79 + if err := putReferences(tx, issue.AtUri(), issue.References); err != nil { 80 + return fmt.Errorf("put reference_links: %w", err) 81 + } 82 + return nil 73 83 } 74 84 75 85 func updateIssue(tx *sql.Tx, issue *models.Issue) error { ··· 79 89 set title = ?, body = ?, edited = ? 80 90 where did = ? and rkey = ? 81 91 `, issue.Title, issue.Body, time.Now().Format(time.RFC3339), issue.Did, issue.Rkey) 82 - return err 92 + if err != nil { 93 + return err 94 + } 95 + 96 + if err := putReferences(tx, issue.AtUri(), issue.References); err != nil { 97 + return fmt.Errorf("put reference_links: %w", err) 98 + } 99 + return nil 83 100 } 84 101 85 - func GetIssuesPaginated(e Execer, page pagination.Page, filters ...filter) ([]models.Issue, error) { 102 + func GetIssuesPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]models.Issue, error) { 86 103 issueMap := make(map[string]*models.Issue) // at-uri -> issue 87 104 88 105 var conditions []string ··· 98 115 whereClause = " where " + strings.Join(conditions, " and ") 99 116 } 100 117 101 - pLower := FilterGte("row_num", page.Offset+1) 102 - pUpper := FilterLte("row_num", page.Offset+page.Limit) 118 + pLower := orm.FilterGte("row_num", page.Offset+1) 119 + pUpper := orm.FilterLte("row_num", page.Offset+page.Limit) 103 120 104 121 pageClause := "" 105 122 if page.Limit > 0 { ··· 189 206 repoAts = append(repoAts, string(issue.RepoAt)) 190 207 } 191 208 192 - repos, err := GetRepos(e, 0, FilterIn("at_uri", repoAts)) 209 + repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoAts)) 193 210 if err != nil { 194 211 return nil, fmt.Errorf("failed to build repo mappings: %w", err) 195 212 } ··· 212 229 // collect comments 213 230 issueAts := slices.Collect(maps.Keys(issueMap)) 214 231 215 - comments, err := GetIssueComments(e, FilterIn("issue_at", issueAts)) 232 + comments, err := GetIssueComments(e, orm.FilterIn("issue_at", issueAts)) 216 233 if err != nil { 217 234 return nil, fmt.Errorf("failed to query comments: %w", err) 218 235 } ··· 224 241 } 225 242 226 243 // collect allLabels for each issue 227 - allLabels, err := GetLabels(e, FilterIn("subject", issueAts)) 244 + allLabels, err := GetLabels(e, orm.FilterIn("subject", issueAts)) 228 245 if err != nil { 229 246 return nil, fmt.Errorf("failed to query labels: %w", err) 230 247 } ··· 234 251 } 235 252 } 236 253 254 + // collect references for each issue 255 + allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", issueAts)) 256 + if err != nil { 257 + return nil, fmt.Errorf("failed to query reference_links: %w", err) 258 + } 259 + for issueAt, references := range allReferencs { 260 + if issue, ok := issueMap[issueAt.String()]; ok { 261 + issue.References = references 262 + } 263 + } 264 + 237 265 var issues []models.Issue 238 266 for _, i := range issueMap { 239 267 issues = append(issues, *i) ··· 250 278 issues, err := GetIssuesPaginated( 251 279 e, 252 280 pagination.Page{}, 253 - FilterEq("repo_at", repoAt), 254 - FilterEq("issue_id", issueId), 281 + orm.FilterEq("repo_at", repoAt), 282 + orm.FilterEq("issue_id", issueId), 255 283 ) 256 284 if err != nil { 257 285 return nil, err ··· 263 291 return &issues[0], nil 264 292 } 265 293 266 - func GetIssues(e Execer, filters ...filter) ([]models.Issue, error) { 294 + func GetIssues(e Execer, filters ...orm.Filter) ([]models.Issue, error) { 267 295 return GetIssuesPaginated(e, pagination.Page{}, filters...) 268 296 } 269 297 ··· 271 299 func GetIssueIDs(e Execer, opts models.IssueSearchOptions) ([]int64, error) { 272 300 var ids []int64 273 301 274 - var filters []filter 302 + var filters []orm.Filter 275 303 openValue := 0 276 304 if opts.IsOpen { 277 305 openValue = 1 278 306 } 279 - filters = append(filters, FilterEq("open", openValue)) 307 + filters = append(filters, orm.FilterEq("open", openValue)) 280 308 if opts.RepoAt != "" { 281 - filters = append(filters, FilterEq("repo_at", opts.RepoAt)) 309 + filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt)) 282 310 } 283 311 284 312 var conditions []string ··· 323 351 return ids, nil 324 352 } 325 353 326 - func AddIssueComment(e Execer, c models.IssueComment) (int64, error) { 327 - result, err := e.Exec( 354 + func AddIssueComment(tx *sql.Tx, c models.IssueComment) (int64, error) { 355 + result, err := tx.Exec( 328 356 `insert into issue_comments ( 329 357 did, 330 358 rkey, ··· 363 391 return 0, err 364 392 } 365 393 394 + if err := putReferences(tx, c.AtUri(), c.References); err != nil { 395 + return 0, fmt.Errorf("put reference_links: %w", err) 396 + } 397 + 366 398 return id, nil 367 399 } 368 400 369 - func DeleteIssueComments(e Execer, filters ...filter) error { 401 + func DeleteIssueComments(e Execer, filters ...orm.Filter) error { 370 402 var conditions []string 371 403 var args []any 372 404 for _, filter := range filters { ··· 385 417 return err 386 418 } 387 419 388 - func GetIssueComments(e Execer, filters ...filter) ([]models.IssueComment, error) { 389 - var comments []models.IssueComment 420 + func GetIssueComments(e Execer, filters ...orm.Filter) ([]models.IssueComment, error) { 421 + commentMap := make(map[string]*models.IssueComment) 390 422 391 423 var conditions []string 392 424 var args []any ··· 420 452 if err != nil { 421 453 return nil, err 422 454 } 455 + defer rows.Close() 423 456 424 457 for rows.Next() { 425 458 var comment models.IssueComment ··· 465 498 comment.ReplyTo = &replyTo.V 466 499 } 467 500 468 - comments = append(comments, comment) 501 + atUri := comment.AtUri().String() 502 + commentMap[atUri] = &comment 469 503 } 470 504 471 505 if err = rows.Err(); err != nil { 472 506 return nil, err 473 507 } 474 508 509 + // collect references for each comments 510 + commentAts := slices.Collect(maps.Keys(commentMap)) 511 + allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts)) 512 + if err != nil { 513 + return nil, fmt.Errorf("failed to query reference_links: %w", err) 514 + } 515 + for commentAt, references := range allReferencs { 516 + if comment, ok := commentMap[commentAt.String()]; ok { 517 + comment.References = references 518 + } 519 + } 520 + 521 + var comments []models.IssueComment 522 + for _, c := range commentMap { 523 + comments = append(comments, *c) 524 + } 525 + 526 + sort.Slice(comments, func(i, j int) bool { 527 + return comments[i].Created.After(comments[j].Created) 528 + }) 529 + 475 530 return comments, nil 476 531 } 477 532 478 - func DeleteIssues(e Execer, filters ...filter) error { 479 - var conditions []string 480 - var args []any 481 - for _, filter := range filters { 482 - conditions = append(conditions, filter.Condition()) 483 - args = append(args, filter.Arg()...) 533 + func DeleteIssues(tx *sql.Tx, did, rkey string) error { 534 + _, err := tx.Exec( 535 + `delete from issues 536 + where did = ? and rkey = ?`, 537 + did, 538 + rkey, 539 + ) 540 + if err != nil { 541 + return fmt.Errorf("delete issue: %w", err) 484 542 } 485 543 486 - whereClause := "" 487 - if conditions != nil { 488 - whereClause = " where " + strings.Join(conditions, " and ") 544 + uri := syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", did, tangled.RepoIssueNSID, rkey)) 545 + err = deleteReferences(tx, uri) 546 + if err != nil { 547 + return fmt.Errorf("delete reference_links: %w", err) 489 548 } 490 549 491 - query := fmt.Sprintf(`delete from issues %s`, whereClause) 492 - _, err := e.Exec(query, args...) 493 - return err 550 + return nil 494 551 } 495 552 496 - func CloseIssues(e Execer, filters ...filter) error { 553 + func CloseIssues(e Execer, filters ...orm.Filter) error { 497 554 var conditions []string 498 555 var args []any 499 556 for _, filter := range filters { ··· 511 568 return err 512 569 } 513 570 514 - func ReopenIssues(e Execer, filters ...filter) error { 571 + func ReopenIssues(e Execer, filters ...orm.Filter) error { 515 572 var conditions []string 516 573 var args []any 517 574 for _, filter := range filters {
+8 -7
appview/db/label.go
··· 10 10 11 11 "github.com/bluesky-social/indigo/atproto/syntax" 12 12 "tangled.org/core/appview/models" 13 + "tangled.org/core/orm" 13 14 ) 14 15 15 16 // no updating type for now ··· 59 60 return id, nil 60 61 } 61 62 62 - func DeleteLabelDefinition(e Execer, filters ...filter) error { 63 + func DeleteLabelDefinition(e Execer, filters ...orm.Filter) error { 63 64 var conditions []string 64 65 var args []any 65 66 for _, filter := range filters { ··· 75 76 return err 76 77 } 77 78 78 - func GetLabelDefinitions(e Execer, filters ...filter) ([]models.LabelDefinition, error) { 79 + func GetLabelDefinitions(e Execer, filters ...orm.Filter) ([]models.LabelDefinition, error) { 79 80 var labelDefinitions []models.LabelDefinition 80 81 var conditions []string 81 82 var args []any ··· 167 168 } 168 169 169 170 // helper to get exactly one label def 170 - func GetLabelDefinition(e Execer, filters ...filter) (*models.LabelDefinition, error) { 171 + func GetLabelDefinition(e Execer, filters ...orm.Filter) (*models.LabelDefinition, error) { 171 172 labels, err := GetLabelDefinitions(e, filters...) 172 173 if err != nil { 173 174 return nil, err ··· 227 228 return id, nil 228 229 } 229 230 230 - func GetLabelOps(e Execer, filters ...filter) ([]models.LabelOp, error) { 231 + func GetLabelOps(e Execer, filters ...orm.Filter) ([]models.LabelOp, error) { 231 232 var labelOps []models.LabelOp 232 233 var conditions []string 233 234 var args []any ··· 302 303 } 303 304 304 305 // get labels for a given list of subject URIs 305 - func GetLabels(e Execer, filters ...filter) (map[syntax.ATURI]models.LabelState, error) { 306 + func GetLabels(e Execer, filters ...orm.Filter) (map[syntax.ATURI]models.LabelState, error) { 306 307 ops, err := GetLabelOps(e, filters...) 307 308 if err != nil { 308 309 return nil, err ··· 322 323 } 323 324 labelAts := slices.Collect(maps.Keys(labelAtSet)) 324 325 325 - actx, err := NewLabelApplicationCtx(e, FilterIn("at_uri", labelAts)) 326 + actx, err := NewLabelApplicationCtx(e, orm.FilterIn("at_uri", labelAts)) 326 327 if err != nil { 327 328 return nil, err 328 329 } ··· 338 339 return results, nil 339 340 } 340 341 341 - func NewLabelApplicationCtx(e Execer, filters ...filter) (*models.LabelApplicationCtx, error) { 342 + func NewLabelApplicationCtx(e Execer, filters ...orm.Filter) (*models.LabelApplicationCtx, error) { 342 343 labels, err := GetLabelDefinitions(e, filters...) 343 344 if err != nil { 344 345 return nil, err
+6 -5
appview/db/language.go
··· 7 7 8 8 "github.com/bluesky-social/indigo/atproto/syntax" 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetRepoLanguages(e Execer, filters ...filter) ([]models.RepoLanguage, error) { 13 + func GetRepoLanguages(e Execer, filters ...orm.Filter) ([]models.RepoLanguage, error) { 13 14 var conditions []string 14 15 var args []any 15 16 for _, filter := range filters { ··· 27 28 whereClause, 28 29 ) 29 30 rows, err := e.Query(query, args...) 30 - 31 31 if err != nil { 32 32 return nil, fmt.Errorf("failed to execute query: %w ", err) 33 33 } 34 + defer rows.Close() 34 35 35 36 var langs []models.RepoLanguage 36 37 for rows.Next() { ··· 85 86 return nil 86 87 } 87 88 88 - func DeleteRepoLanguages(e Execer, filters ...filter) error { 89 + func DeleteRepoLanguages(e Execer, filters ...orm.Filter) error { 89 90 var conditions []string 90 91 var args []any 91 92 for _, filter := range filters { ··· 107 108 func UpdateRepoLanguages(tx *sql.Tx, repoAt syntax.ATURI, ref string, langs []models.RepoLanguage) error { 108 109 err := DeleteRepoLanguages( 109 110 tx, 110 - FilterEq("repo_at", repoAt), 111 - FilterEq("ref", ref), 111 + orm.FilterEq("repo_at", repoAt), 112 + orm.FilterEq("ref", ref), 112 113 ) 113 114 if err != nil { 114 115 return fmt.Errorf("failed to delete existing languages: %w", err)
+20 -15
appview/db/notifications.go
··· 11 11 "github.com/bluesky-social/indigo/atproto/syntax" 12 12 "tangled.org/core/appview/models" 13 13 "tangled.org/core/appview/pagination" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 func CreateNotification(e Execer, notification *models.Notification) error { ··· 44 45 } 45 46 46 47 // GetNotificationsPaginated retrieves notifications with filters and pagination 47 - func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...filter) ([]*models.Notification, error) { 48 + func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.Notification, error) { 48 49 var conditions []string 49 50 var args []any 50 51 ··· 113 114 } 114 115 115 116 // GetNotificationsWithEntities retrieves notifications with their related entities 116 - func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...filter) ([]*models.NotificationWithEntity, error) { 117 + func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.NotificationWithEntity, error) { 117 118 var conditions []string 118 119 var args []any 119 120 ··· 256 257 } 257 258 258 259 // GetNotifications retrieves notifications with filters 259 - func GetNotifications(e Execer, filters ...filter) ([]*models.Notification, error) { 260 + func GetNotifications(e Execer, filters ...orm.Filter) ([]*models.Notification, error) { 260 261 return GetNotificationsPaginated(e, pagination.FirstPage(), filters...) 261 262 } 262 263 263 - func CountNotifications(e Execer, filters ...filter) (int64, error) { 264 + func CountNotifications(e Execer, filters ...orm.Filter) (int64, error) { 264 265 var conditions []string 265 266 var args []any 266 267 for _, filter := range filters { ··· 285 286 } 286 287 287 288 func MarkNotificationRead(e Execer, notificationID int64, userDID string) error { 288 - idFilter := FilterEq("id", notificationID) 289 - recipientFilter := FilterEq("recipient_did", userDID) 289 + idFilter := orm.FilterEq("id", notificationID) 290 + recipientFilter := orm.FilterEq("recipient_did", userDID) 290 291 291 292 query := fmt.Sprintf(` 292 293 UPDATE notifications ··· 314 315 } 315 316 316 317 func MarkAllNotificationsRead(e Execer, userDID string) error { 317 - recipientFilter := FilterEq("recipient_did", userDID) 318 - readFilter := FilterEq("read", 0) 318 + recipientFilter := orm.FilterEq("recipient_did", userDID) 319 + readFilter := orm.FilterEq("read", 0) 319 320 320 321 query := fmt.Sprintf(` 321 322 UPDATE notifications ··· 334 335 } 335 336 336 337 func DeleteNotification(e Execer, notificationID int64, userDID string) error { 337 - idFilter := FilterEq("id", notificationID) 338 - recipientFilter := FilterEq("recipient_did", userDID) 338 + idFilter := orm.FilterEq("id", notificationID) 339 + recipientFilter := orm.FilterEq("recipient_did", userDID) 339 340 340 341 query := fmt.Sprintf(` 341 342 DELETE FROM notifications ··· 362 363 } 363 364 364 365 func GetNotificationPreference(e Execer, userDid string) (*models.NotificationPreferences, error) { 365 - prefs, err := GetNotificationPreferences(e, FilterEq("user_did", userDid)) 366 + prefs, err := GetNotificationPreferences(e, orm.FilterEq("user_did", userDid)) 366 367 if err != nil { 367 368 return nil, err 368 369 } ··· 375 376 return p, nil 376 377 } 377 378 378 - func GetNotificationPreferences(e Execer, filters ...filter) (map[syntax.DID]*models.NotificationPreferences, error) { 379 + func GetNotificationPreferences(e Execer, filters ...orm.Filter) (map[syntax.DID]*models.NotificationPreferences, error) { 379 380 prefsMap := make(map[syntax.DID]*models.NotificationPreferences) 380 381 381 382 var conditions []string ··· 400 401 pull_created, 401 402 pull_commented, 402 403 followed, 404 + user_mentioned, 403 405 pull_merged, 404 406 issue_closed, 405 407 email_notifications ··· 425 427 &prefs.PullCreated, 426 428 &prefs.PullCommented, 427 429 &prefs.Followed, 430 + &prefs.UserMentioned, 428 431 &prefs.PullMerged, 429 432 &prefs.IssueClosed, 430 433 &prefs.EmailNotifications, ··· 446 449 query := ` 447 450 INSERT OR REPLACE INTO notification_preferences 448 451 (user_did, repo_starred, issue_created, issue_commented, pull_created, 449 - pull_commented, followed, pull_merged, issue_closed, email_notifications) 450 - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 452 + pull_commented, followed, user_mentioned, pull_merged, issue_closed, 453 + email_notifications) 454 + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 451 455 ` 452 456 453 457 result, err := d.DB.ExecContext(ctx, query, ··· 458 462 prefs.PullCreated, 459 463 prefs.PullCommented, 460 464 prefs.Followed, 465 + prefs.UserMentioned, 461 466 prefs.PullMerged, 462 467 prefs.IssueClosed, 463 468 prefs.EmailNotifications, ··· 479 484 480 485 func (d *DB) ClearOldNotifications(ctx context.Context, olderThan time.Duration) error { 481 486 cutoff := time.Now().Add(-olderThan) 482 - createdFilter := FilterLte("created", cutoff) 487 + createdFilter := orm.FilterLte("created", cutoff) 483 488 484 489 query := fmt.Sprintf(` 485 490 DELETE FROM notifications
+9 -6
appview/db/pipeline.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetPipelines(e Execer, filters ...filter) ([]models.Pipeline, error) { 13 + func GetPipelines(e Execer, filters ...orm.Filter) ([]models.Pipeline, error) { 13 14 var pipelines []models.Pipeline 14 15 15 16 var conditions []string ··· 168 169 169 170 // this is a mega query, but the most useful one: 170 171 // get N pipelines, for each one get the latest status of its N workflows 171 - func GetPipelineStatuses(e Execer, filters ...filter) ([]models.Pipeline, error) { 172 + func GetPipelineStatuses(e Execer, limit int, filters ...orm.Filter) ([]models.Pipeline, error) { 172 173 var conditions []string 173 174 var args []any 174 175 for _, filter := range filters { 175 - filter.key = "p." + filter.key // the table is aliased in the query to `p` 176 + filter.Key = "p." + filter.Key // the table is aliased in the query to `p` 176 177 conditions = append(conditions, filter.Condition()) 177 178 args = append(args, filter.Arg()...) 178 179 } ··· 205 206 join 206 207 triggers t ON p.trigger_id = t.id 207 208 %s 208 - `, whereClause) 209 + order by p.created desc 210 + limit %d 211 + `, whereClause, limit) 209 212 210 213 rows, err := e.Query(query, args...) 211 214 if err != nil { ··· 262 265 conditions = nil 263 266 args = nil 264 267 for _, p := range pipelines { 265 - knotFilter := FilterEq("pipeline_knot", p.Knot) 266 - rkeyFilter := FilterEq("pipeline_rkey", p.Rkey) 268 + knotFilter := orm.FilterEq("pipeline_knot", p.Knot) 269 + rkeyFilter := orm.FilterEq("pipeline_rkey", p.Rkey) 267 270 conditions = append(conditions, fmt.Sprintf("(%s and %s)", knotFilter.Condition(), rkeyFilter.Condition())) 268 271 args = append(args, p.Knot) 269 272 args = append(args, p.Rkey)
+29 -16
appview/db/profile.go
··· 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 13 "tangled.org/core/appview/models" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 const TimeframeMonths = 7 ··· 19 20 timeline := models.ProfileTimeline{ 20 21 ByMonth: make([]models.ByMonth, TimeframeMonths), 21 22 } 22 - currentMonth := time.Now().Month() 23 + now := time.Now() 23 24 timeframe := fmt.Sprintf("-%d months", TimeframeMonths) 24 25 25 26 pulls, err := GetPullsByOwnerDid(e, forDid, timeframe) ··· 29 30 30 31 // group pulls by month 31 32 for _, pull := range pulls { 32 - pullMonth := pull.Created.Month() 33 + monthsAgo := monthsBetween(pull.Created, now) 33 34 34 - if currentMonth-pullMonth >= TimeframeMonths { 35 + if monthsAgo >= TimeframeMonths { 35 36 // shouldn't happen; but times are weird 36 37 continue 37 38 } 38 39 39 - idx := currentMonth - pullMonth 40 + idx := monthsAgo 40 41 items := &timeline.ByMonth[idx].PullEvents.Items 41 42 42 43 *items = append(*items, &pull) ··· 44 45 45 46 issues, err := GetIssues( 46 47 e, 47 - FilterEq("did", forDid), 48 - FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)), 48 + orm.FilterEq("did", forDid), 49 + orm.FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)), 49 50 ) 50 51 if err != nil { 51 52 return nil, fmt.Errorf("error getting issues by owner did: %w", err) 52 53 } 53 54 54 55 for _, issue := range issues { 55 - issueMonth := issue.Created.Month() 56 + monthsAgo := monthsBetween(issue.Created, now) 56 57 57 - if currentMonth-issueMonth >= TimeframeMonths { 58 + if monthsAgo >= TimeframeMonths { 58 59 // shouldn't happen; but times are weird 59 60 continue 60 61 } 61 62 62 - idx := currentMonth - issueMonth 63 + idx := monthsAgo 63 64 items := &timeline.ByMonth[idx].IssueEvents.Items 64 65 65 66 *items = append(*items, &issue) 66 67 } 67 68 68 - repos, err := GetRepos(e, 0, FilterEq("did", forDid)) 69 + repos, err := GetRepos(e, 0, orm.FilterEq("did", forDid)) 69 70 if err != nil { 70 71 return nil, fmt.Errorf("error getting all repos by did: %w", err) 71 72 } ··· 76 77 if repo.Source != "" { 77 78 sourceRepo, err = GetRepoByAtUri(e, repo.Source) 78 79 if err != nil { 79 - return nil, err 80 + // the source repo was not found, skip this bit 81 + log.Println("profile", "err", err) 80 82 } 81 83 } 82 84 83 - repoMonth := repo.Created.Month() 85 + monthsAgo := monthsBetween(repo.Created, now) 84 86 85 - if currentMonth-repoMonth >= TimeframeMonths { 87 + if monthsAgo >= TimeframeMonths { 86 88 // shouldn't happen; but times are weird 87 89 continue 88 90 } 89 91 90 - idx := currentMonth - repoMonth 92 + idx := monthsAgo 91 93 92 94 items := &timeline.ByMonth[idx].RepoEvents 93 95 *items = append(*items, models.RepoEvent{ ··· 99 101 return &timeline, nil 100 102 } 101 103 104 + func monthsBetween(from, to time.Time) int { 105 + years := to.Year() - from.Year() 106 + months := int(to.Month() - from.Month()) 107 + return years*12 + months 108 + } 109 + 102 110 func UpsertProfile(tx *sql.Tx, profile *models.Profile) error { 103 111 defer tx.Rollback() 104 112 ··· 199 207 return tx.Commit() 200 208 } 201 209 202 - func GetProfiles(e Execer, filters ...filter) (map[string]*models.Profile, error) { 210 + func GetProfiles(e Execer, filters ...orm.Filter) (map[string]*models.Profile, error) { 203 211 var conditions []string 204 212 var args []any 205 213 for _, filter := range filters { ··· 229 237 if err != nil { 230 238 return nil, err 231 239 } 240 + defer rows.Close() 232 241 233 242 profileMap := make(map[string]*models.Profile) 234 243 for rows.Next() { ··· 269 278 if err != nil { 270 279 return nil, err 271 280 } 281 + defer rows.Close() 282 + 272 283 idxs := make(map[string]int) 273 284 for did := range profileMap { 274 285 idxs[did] = 0 ··· 289 300 if err != nil { 290 301 return nil, err 291 302 } 303 + defer rows.Close() 304 + 292 305 idxs = make(map[string]int) 293 306 for did := range profileMap { 294 307 idxs[did] = 0 ··· 441 454 } 442 455 443 456 // ensure all pinned repos are either own repos or collaborating repos 444 - repos, err := GetRepos(e, 0, FilterEq("did", profile.Did)) 457 + repos, err := GetRepos(e, 0, orm.FilterEq("did", profile.Did)) 445 458 if err != nil { 446 459 log.Printf("getting repos for %s: %s", profile.Did, err) 447 460 }
+69 -24
appview/db/pulls.go
··· 13 13 14 14 "github.com/bluesky-social/indigo/atproto/syntax" 15 15 "tangled.org/core/appview/models" 16 + "tangled.org/core/orm" 16 17 ) 17 18 18 19 func NewPull(tx *sql.Tx, pull *models.Pull) error { ··· 93 94 insert into pull_submissions (pull_at, round_number, patch, combined, source_rev) 94 95 values (?, ?, ?, ?, ?) 95 96 `, pull.AtUri(), 0, pull.Submissions[0].Patch, pull.Submissions[0].Combined, pull.Submissions[0].SourceRev) 96 - return err 97 + if err != nil { 98 + return err 99 + } 100 + 101 + if err := putReferences(tx, pull.AtUri(), pull.References); err != nil { 102 + return fmt.Errorf("put reference_links: %w", err) 103 + } 104 + 105 + return nil 97 106 } 98 107 99 108 func GetPullAt(e Execer, repoAt syntax.ATURI, pullId int) (syntax.ATURI, error) { ··· 110 119 return pullId - 1, err 111 120 } 112 121 113 - func GetPullsWithLimit(e Execer, limit int, filters ...filter) ([]*models.Pull, error) { 122 + func GetPullsWithLimit(e Execer, limit int, filters ...orm.Filter) ([]*models.Pull, error) { 114 123 pulls := make(map[syntax.ATURI]*models.Pull) 115 124 116 125 var conditions []string ··· 221 230 for _, p := range pulls { 222 231 pullAts = append(pullAts, p.AtUri()) 223 232 } 224 - submissionsMap, err := GetPullSubmissions(e, FilterIn("pull_at", pullAts)) 233 + submissionsMap, err := GetPullSubmissions(e, orm.FilterIn("pull_at", pullAts)) 225 234 if err != nil { 226 235 return nil, fmt.Errorf("failed to get submissions: %w", err) 227 236 } ··· 233 242 } 234 243 235 244 // collect allLabels for each issue 236 - allLabels, err := GetLabels(e, FilterIn("subject", pullAts)) 245 + allLabels, err := GetLabels(e, orm.FilterIn("subject", pullAts)) 237 246 if err != nil { 238 247 return nil, fmt.Errorf("failed to query labels: %w", err) 239 248 } ··· 250 259 sourceAts = append(sourceAts, *p.PullSource.RepoAt) 251 260 } 252 261 } 253 - sourceRepos, err := GetRepos(e, 0, FilterIn("at_uri", sourceAts)) 262 + sourceRepos, err := GetRepos(e, 0, orm.FilterIn("at_uri", sourceAts)) 254 263 if err != nil && !errors.Is(err, sql.ErrNoRows) { 255 264 return nil, fmt.Errorf("failed to get source repos: %w", err) 256 265 } ··· 266 275 } 267 276 } 268 277 278 + allReferences, err := GetReferencesAll(e, orm.FilterIn("from_at", pullAts)) 279 + if err != nil { 280 + return nil, fmt.Errorf("failed to query reference_links: %w", err) 281 + } 282 + for pullAt, references := range allReferences { 283 + if pull, ok := pulls[pullAt]; ok { 284 + pull.References = references 285 + } 286 + } 287 + 269 288 orderedByPullId := []*models.Pull{} 270 289 for _, p := range pulls { 271 290 orderedByPullId = append(orderedByPullId, p) ··· 277 296 return orderedByPullId, nil 278 297 } 279 298 280 - func GetPulls(e Execer, filters ...filter) ([]*models.Pull, error) { 299 + func GetPulls(e Execer, filters ...orm.Filter) ([]*models.Pull, error) { 281 300 return GetPullsWithLimit(e, 0, filters...) 282 301 } 283 302 284 303 func GetPullIDs(e Execer, opts models.PullSearchOptions) ([]int64, error) { 285 304 var ids []int64 286 305 287 - var filters []filter 288 - filters = append(filters, FilterEq("state", opts.State)) 306 + var filters []orm.Filter 307 + filters = append(filters, orm.FilterEq("state", opts.State)) 289 308 if opts.RepoAt != "" { 290 - filters = append(filters, FilterEq("repo_at", opts.RepoAt)) 309 + filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt)) 291 310 } 292 311 293 312 var conditions []string ··· 343 362 } 344 363 345 364 func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) { 346 - pulls, err := GetPullsWithLimit(e, 1, FilterEq("repo_at", repoAt), FilterEq("pull_id", pullId)) 365 + pulls, err := GetPullsWithLimit(e, 1, orm.FilterEq("repo_at", repoAt), orm.FilterEq("pull_id", pullId)) 347 366 if err != nil { 348 367 return nil, err 349 368 } ··· 355 374 } 356 375 357 376 // mapping from pull -> pull submissions 358 - func GetPullSubmissions(e Execer, filters ...filter) (map[syntax.ATURI][]*models.PullSubmission, error) { 377 + func GetPullSubmissions(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]*models.PullSubmission, error) { 359 378 var conditions []string 360 379 var args []any 361 380 for _, filter := range filters { ··· 430 449 431 450 // Get comments for all submissions using GetPullComments 432 451 submissionIds := slices.Collect(maps.Keys(submissionMap)) 433 - comments, err := GetPullComments(e, FilterIn("submission_id", submissionIds)) 452 + comments, err := GetPullComments(e, orm.FilterIn("submission_id", submissionIds)) 434 453 if err != nil { 435 - return nil, err 454 + return nil, fmt.Errorf("failed to get pull comments: %w", err) 436 455 } 437 456 for _, comment := range comments { 438 457 if submission, ok := submissionMap[comment.SubmissionId]; ok { ··· 456 475 return m, nil 457 476 } 458 477 459 - func GetPullComments(e Execer, filters ...filter) ([]models.PullComment, error) { 478 + func GetPullComments(e Execer, filters ...orm.Filter) ([]models.PullComment, error) { 460 479 var conditions []string 461 480 var args []any 462 481 for _, filter := range filters { ··· 492 511 } 493 512 defer rows.Close() 494 513 495 - var comments []models.PullComment 514 + commentMap := make(map[string]*models.PullComment) 496 515 for rows.Next() { 497 516 var comment models.PullComment 498 517 var createdAt string ··· 514 533 comment.Created = t 515 534 } 516 535 517 - comments = append(comments, comment) 536 + atUri := comment.AtUri().String() 537 + commentMap[atUri] = &comment 518 538 } 519 539 520 540 if err := rows.Err(); err != nil { 521 541 return nil, err 522 542 } 523 543 544 + // collect references for each comments 545 + commentAts := slices.Collect(maps.Keys(commentMap)) 546 + allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts)) 547 + if err != nil { 548 + return nil, fmt.Errorf("failed to query reference_links: %w", err) 549 + } 550 + for commentAt, references := range allReferencs { 551 + if comment, ok := commentMap[commentAt.String()]; ok { 552 + comment.References = references 553 + } 554 + } 555 + 556 + var comments []models.PullComment 557 + for _, c := range commentMap { 558 + comments = append(comments, *c) 559 + } 560 + 561 + sort.Slice(comments, func(i, j int) bool { 562 + return comments[i].Created.Before(comments[j].Created) 563 + }) 564 + 524 565 return comments, nil 525 566 } 526 567 ··· 600 641 return pulls, nil 601 642 } 602 643 603 - func NewPullComment(e Execer, comment *models.PullComment) (int64, error) { 644 + func NewPullComment(tx *sql.Tx, comment *models.PullComment) (int64, error) { 604 645 query := `insert into pull_comments (owner_did, repo_at, submission_id, comment_at, pull_id, body) values (?, ?, ?, ?, ?, ?)` 605 - res, err := e.Exec( 646 + res, err := tx.Exec( 606 647 query, 607 648 comment.OwnerDid, 608 649 comment.RepoAt, ··· 618 659 i, err := res.LastInsertId() 619 660 if err != nil { 620 661 return 0, err 662 + } 663 + 664 + if err := putReferences(tx, comment.AtUri(), comment.References); err != nil { 665 + return 0, fmt.Errorf("put reference_links: %w", err) 621 666 } 622 667 623 668 return i, nil ··· 664 709 return err 665 710 } 666 711 667 - func SetPullParentChangeId(e Execer, parentChangeId string, filters ...filter) error { 712 + func SetPullParentChangeId(e Execer, parentChangeId string, filters ...orm.Filter) error { 668 713 var conditions []string 669 714 var args []any 670 715 ··· 688 733 689 734 // Only used when stacking to update contents in the event of a rebase (the interdiff should be empty). 690 735 // otherwise submissions are immutable 691 - func UpdatePull(e Execer, newPatch, sourceRev string, filters ...filter) error { 736 + func UpdatePull(e Execer, newPatch, sourceRev string, filters ...orm.Filter) error { 692 737 var conditions []string 693 738 var args []any 694 739 ··· 746 791 func GetStack(e Execer, stackId string) (models.Stack, error) { 747 792 unorderedPulls, err := GetPulls( 748 793 e, 749 - FilterEq("stack_id", stackId), 750 - FilterNotEq("state", models.PullDeleted), 794 + orm.FilterEq("stack_id", stackId), 795 + orm.FilterNotEq("state", models.PullDeleted), 751 796 ) 752 797 if err != nil { 753 798 return nil, err ··· 791 836 func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) { 792 837 pulls, err := GetPulls( 793 838 e, 794 - FilterEq("stack_id", stackId), 795 - FilterEq("state", models.PullDeleted), 839 + orm.FilterEq("stack_id", stackId), 840 + orm.FilterEq("state", models.PullDeleted), 796 841 ) 797 842 if err != nil { 798 843 return nil, err
+3 -2
appview/db/punchcard.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 13 // this adds to the existing count ··· 20 21 return err 21 22 } 22 23 23 - func MakePunchcard(e Execer, filters ...filter) (*models.Punchcard, error) { 24 + func MakePunchcard(e Execer, filters ...orm.Filter) (*models.Punchcard, error) { 24 25 punchcard := &models.Punchcard{} 25 26 now := time.Now() 26 27 startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC) ··· 77 78 punch.Count = int(count.Int64) 78 79 } 79 80 80 - punchcard.Punches[punch.Date.YearDay()] = punch 81 + punchcard.Punches[punch.Date.YearDay()-1] = punch 81 82 punchcard.Total += punch.Count 82 83 } 83 84
+463
appview/db/reference.go
··· 1 + package db 2 + 3 + import ( 4 + "database/sql" 5 + "fmt" 6 + "strings" 7 + 8 + "github.com/bluesky-social/indigo/atproto/syntax" 9 + "tangled.org/core/api/tangled" 10 + "tangled.org/core/appview/models" 11 + "tangled.org/core/orm" 12 + ) 13 + 14 + // ValidateReferenceLinks resolves refLinks to Issue/PR/IssueComment/PullComment ATURIs. 15 + // It will ignore missing refLinks. 16 + func ValidateReferenceLinks(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) { 17 + var ( 18 + issueRefs []models.ReferenceLink 19 + pullRefs []models.ReferenceLink 20 + ) 21 + for _, ref := range refLinks { 22 + switch ref.Kind { 23 + case models.RefKindIssue: 24 + issueRefs = append(issueRefs, ref) 25 + case models.RefKindPull: 26 + pullRefs = append(pullRefs, ref) 27 + } 28 + } 29 + issueUris, err := findIssueReferences(e, issueRefs) 30 + if err != nil { 31 + return nil, fmt.Errorf("find issue references: %w", err) 32 + } 33 + pullUris, err := findPullReferences(e, pullRefs) 34 + if err != nil { 35 + return nil, fmt.Errorf("find pull references: %w", err) 36 + } 37 + 38 + return append(issueUris, pullUris...), nil 39 + } 40 + 41 + func findIssueReferences(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) { 42 + if len(refLinks) == 0 { 43 + return nil, nil 44 + } 45 + vals := make([]string, len(refLinks)) 46 + args := make([]any, 0, len(refLinks)*4) 47 + for i, ref := range refLinks { 48 + vals[i] = "(?, ?, ?, ?)" 49 + args = append(args, ref.Handle, ref.Repo, ref.SubjectId, ref.CommentId) 50 + } 51 + query := fmt.Sprintf( 52 + `with input(owner_did, name, issue_id, comment_id) as ( 53 + values %s 54 + ) 55 + select 56 + i.did, i.rkey, 57 + c.did, c.rkey 58 + from input inp 59 + join repos r 60 + on r.did = inp.owner_did 61 + and r.name = inp.name 62 + join issues i 63 + on i.repo_at = r.at_uri 64 + and i.issue_id = inp.issue_id 65 + left join issue_comments c 66 + on inp.comment_id is not null 67 + and c.issue_at = i.at_uri 68 + and c.id = inp.comment_id 69 + `, 70 + strings.Join(vals, ","), 71 + ) 72 + rows, err := e.Query(query, args...) 73 + if err != nil { 74 + return nil, err 75 + } 76 + defer rows.Close() 77 + 78 + var uris []syntax.ATURI 79 + 80 + for rows.Next() { 81 + // Scan rows 82 + var issueOwner, issueRkey string 83 + var commentOwner, commentRkey sql.NullString 84 + var uri syntax.ATURI 85 + if err := rows.Scan(&issueOwner, &issueRkey, &commentOwner, &commentRkey); err != nil { 86 + return nil, err 87 + } 88 + if commentOwner.Valid && commentRkey.Valid { 89 + uri = syntax.ATURI(fmt.Sprintf( 90 + "at://%s/%s/%s", 91 + commentOwner.String, 92 + tangled.RepoIssueCommentNSID, 93 + commentRkey.String, 94 + )) 95 + } else { 96 + uri = syntax.ATURI(fmt.Sprintf( 97 + "at://%s/%s/%s", 98 + issueOwner, 99 + tangled.RepoIssueNSID, 100 + issueRkey, 101 + )) 102 + } 103 + uris = append(uris, uri) 104 + } 105 + if err := rows.Err(); err != nil { 106 + return nil, fmt.Errorf("iterate rows: %w", err) 107 + } 108 + 109 + return uris, nil 110 + } 111 + 112 + func findPullReferences(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) { 113 + if len(refLinks) == 0 { 114 + return nil, nil 115 + } 116 + vals := make([]string, len(refLinks)) 117 + args := make([]any, 0, len(refLinks)*4) 118 + for i, ref := range refLinks { 119 + vals[i] = "(?, ?, ?, ?)" 120 + args = append(args, ref.Handle, ref.Repo, ref.SubjectId, ref.CommentId) 121 + } 122 + query := fmt.Sprintf( 123 + `with input(owner_did, name, pull_id, comment_id) as ( 124 + values %s 125 + ) 126 + select 127 + p.owner_did, p.rkey, 128 + c.comment_at 129 + from input inp 130 + join repos r 131 + on r.did = inp.owner_did 132 + and r.name = inp.name 133 + join pulls p 134 + on p.repo_at = r.at_uri 135 + and p.pull_id = inp.pull_id 136 + left join pull_comments c 137 + on inp.comment_id is not null 138 + and c.repo_at = r.at_uri and c.pull_id = p.pull_id 139 + and c.id = inp.comment_id 140 + `, 141 + strings.Join(vals, ","), 142 + ) 143 + rows, err := e.Query(query, args...) 144 + if err != nil { 145 + return nil, err 146 + } 147 + defer rows.Close() 148 + 149 + var uris []syntax.ATURI 150 + 151 + for rows.Next() { 152 + // Scan rows 153 + var pullOwner, pullRkey string 154 + var commentUri sql.NullString 155 + var uri syntax.ATURI 156 + if err := rows.Scan(&pullOwner, &pullRkey, &commentUri); err != nil { 157 + return nil, err 158 + } 159 + if commentUri.Valid { 160 + // no-op 161 + uri = syntax.ATURI(commentUri.String) 162 + } else { 163 + uri = syntax.ATURI(fmt.Sprintf( 164 + "at://%s/%s/%s", 165 + pullOwner, 166 + tangled.RepoPullNSID, 167 + pullRkey, 168 + )) 169 + } 170 + uris = append(uris, uri) 171 + } 172 + return uris, nil 173 + } 174 + 175 + func putReferences(tx *sql.Tx, fromAt syntax.ATURI, references []syntax.ATURI) error { 176 + err := deleteReferences(tx, fromAt) 177 + if err != nil { 178 + return fmt.Errorf("delete old reference_links: %w", err) 179 + } 180 + if len(references) == 0 { 181 + return nil 182 + } 183 + 184 + values := make([]string, 0, len(references)) 185 + args := make([]any, 0, len(references)*2) 186 + for _, ref := range references { 187 + values = append(values, "(?, ?)") 188 + args = append(args, fromAt, ref) 189 + } 190 + _, err = tx.Exec( 191 + fmt.Sprintf( 192 + `insert into reference_links (from_at, to_at) 193 + values %s`, 194 + strings.Join(values, ","), 195 + ), 196 + args..., 197 + ) 198 + if err != nil { 199 + return fmt.Errorf("insert new reference_links: %w", err) 200 + } 201 + return nil 202 + } 203 + 204 + func deleteReferences(tx *sql.Tx, fromAt syntax.ATURI) error { 205 + _, err := tx.Exec(`delete from reference_links where from_at = ?`, fromAt) 206 + return err 207 + } 208 + 209 + func GetReferencesAll(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]syntax.ATURI, error) { 210 + var ( 211 + conditions []string 212 + args []any 213 + ) 214 + for _, filter := range filters { 215 + conditions = append(conditions, filter.Condition()) 216 + args = append(args, filter.Arg()...) 217 + } 218 + 219 + whereClause := "" 220 + if conditions != nil { 221 + whereClause = " where " + strings.Join(conditions, " and ") 222 + } 223 + 224 + rows, err := e.Query( 225 + fmt.Sprintf( 226 + `select from_at, to_at from reference_links %s`, 227 + whereClause, 228 + ), 229 + args..., 230 + ) 231 + if err != nil { 232 + return nil, fmt.Errorf("query reference_links: %w", err) 233 + } 234 + defer rows.Close() 235 + 236 + result := make(map[syntax.ATURI][]syntax.ATURI) 237 + 238 + for rows.Next() { 239 + var from, to syntax.ATURI 240 + if err := rows.Scan(&from, &to); err != nil { 241 + return nil, fmt.Errorf("scan row: %w", err) 242 + } 243 + 244 + result[from] = append(result[from], to) 245 + } 246 + if err := rows.Err(); err != nil { 247 + return nil, fmt.Errorf("iterate rows: %w", err) 248 + } 249 + 250 + return result, nil 251 + } 252 + 253 + func GetBacklinks(e Execer, target syntax.ATURI) ([]models.RichReferenceLink, error) { 254 + rows, err := e.Query( 255 + `select from_at from reference_links 256 + where to_at = ?`, 257 + target, 258 + ) 259 + if err != nil { 260 + return nil, fmt.Errorf("query backlinks: %w", err) 261 + } 262 + defer rows.Close() 263 + 264 + var ( 265 + backlinks []models.RichReferenceLink 266 + backlinksMap = make(map[string][]syntax.ATURI) 267 + ) 268 + for rows.Next() { 269 + var from syntax.ATURI 270 + if err := rows.Scan(&from); err != nil { 271 + return nil, fmt.Errorf("scan row: %w", err) 272 + } 273 + nsid := from.Collection().String() 274 + backlinksMap[nsid] = append(backlinksMap[nsid], from) 275 + } 276 + if err := rows.Err(); err != nil { 277 + return nil, fmt.Errorf("iterate rows: %w", err) 278 + } 279 + 280 + var ls []models.RichReferenceLink 281 + ls, err = getIssueBacklinks(e, backlinksMap[tangled.RepoIssueNSID]) 282 + if err != nil { 283 + return nil, fmt.Errorf("get issue backlinks: %w", err) 284 + } 285 + backlinks = append(backlinks, ls...) 286 + ls, err = getIssueCommentBacklinks(e, backlinksMap[tangled.RepoIssueCommentNSID]) 287 + if err != nil { 288 + return nil, fmt.Errorf("get issue_comment backlinks: %w", err) 289 + } 290 + backlinks = append(backlinks, ls...) 291 + ls, err = getPullBacklinks(e, backlinksMap[tangled.RepoPullNSID]) 292 + if err != nil { 293 + return nil, fmt.Errorf("get pull backlinks: %w", err) 294 + } 295 + backlinks = append(backlinks, ls...) 296 + ls, err = getPullCommentBacklinks(e, backlinksMap[tangled.RepoPullCommentNSID]) 297 + if err != nil { 298 + return nil, fmt.Errorf("get pull_comment backlinks: %w", err) 299 + } 300 + backlinks = append(backlinks, ls...) 301 + 302 + return backlinks, nil 303 + } 304 + 305 + func getIssueBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 306 + if len(aturis) == 0 { 307 + return nil, nil 308 + } 309 + vals := make([]string, len(aturis)) 310 + args := make([]any, 0, len(aturis)*2) 311 + for i, aturi := range aturis { 312 + vals[i] = "(?, ?)" 313 + did := aturi.Authority().String() 314 + rkey := aturi.RecordKey().String() 315 + args = append(args, did, rkey) 316 + } 317 + rows, err := e.Query( 318 + fmt.Sprintf( 319 + `select r.did, r.name, i.issue_id, i.title, i.open 320 + from issues i 321 + join repos r 322 + on r.at_uri = i.repo_at 323 + where (i.did, i.rkey) in (%s)`, 324 + strings.Join(vals, ","), 325 + ), 326 + args..., 327 + ) 328 + if err != nil { 329 + return nil, err 330 + } 331 + defer rows.Close() 332 + var refLinks []models.RichReferenceLink 333 + for rows.Next() { 334 + var l models.RichReferenceLink 335 + l.Kind = models.RefKindIssue 336 + if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, &l.Title, &l.State); err != nil { 337 + return nil, err 338 + } 339 + refLinks = append(refLinks, l) 340 + } 341 + if err := rows.Err(); err != nil { 342 + return nil, fmt.Errorf("iterate rows: %w", err) 343 + } 344 + return refLinks, nil 345 + } 346 + 347 + func getIssueCommentBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 348 + if len(aturis) == 0 { 349 + return nil, nil 350 + } 351 + filter := orm.FilterIn("c.at_uri", aturis) 352 + rows, err := e.Query( 353 + fmt.Sprintf( 354 + `select r.did, r.name, i.issue_id, c.id, i.title, i.open 355 + from issue_comments c 356 + join issues i 357 + on i.at_uri = c.issue_at 358 + join repos r 359 + on r.at_uri = i.repo_at 360 + where %s`, 361 + filter.Condition(), 362 + ), 363 + filter.Arg()..., 364 + ) 365 + if err != nil { 366 + return nil, err 367 + } 368 + defer rows.Close() 369 + var refLinks []models.RichReferenceLink 370 + for rows.Next() { 371 + var l models.RichReferenceLink 372 + l.Kind = models.RefKindIssue 373 + l.CommentId = new(int) 374 + if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, l.CommentId, &l.Title, &l.State); err != nil { 375 + return nil, err 376 + } 377 + refLinks = append(refLinks, l) 378 + } 379 + if err := rows.Err(); err != nil { 380 + return nil, fmt.Errorf("iterate rows: %w", err) 381 + } 382 + return refLinks, nil 383 + } 384 + 385 + func getPullBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 386 + if len(aturis) == 0 { 387 + return nil, nil 388 + } 389 + vals := make([]string, len(aturis)) 390 + args := make([]any, 0, len(aturis)*2) 391 + for i, aturi := range aturis { 392 + vals[i] = "(?, ?)" 393 + did := aturi.Authority().String() 394 + rkey := aturi.RecordKey().String() 395 + args = append(args, did, rkey) 396 + } 397 + rows, err := e.Query( 398 + fmt.Sprintf( 399 + `select r.did, r.name, p.pull_id, p.title, p.state 400 + from pulls p 401 + join repos r 402 + on r.at_uri = p.repo_at 403 + where (p.owner_did, p.rkey) in (%s)`, 404 + strings.Join(vals, ","), 405 + ), 406 + args..., 407 + ) 408 + if err != nil { 409 + return nil, err 410 + } 411 + defer rows.Close() 412 + var refLinks []models.RichReferenceLink 413 + for rows.Next() { 414 + var l models.RichReferenceLink 415 + l.Kind = models.RefKindPull 416 + if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, &l.Title, &l.State); err != nil { 417 + return nil, err 418 + } 419 + refLinks = append(refLinks, l) 420 + } 421 + if err := rows.Err(); err != nil { 422 + return nil, fmt.Errorf("iterate rows: %w", err) 423 + } 424 + return refLinks, nil 425 + } 426 + 427 + func getPullCommentBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) { 428 + if len(aturis) == 0 { 429 + return nil, nil 430 + } 431 + filter := orm.FilterIn("c.comment_at", aturis) 432 + rows, err := e.Query( 433 + fmt.Sprintf( 434 + `select r.did, r.name, p.pull_id, c.id, p.title, p.state 435 + from repos r 436 + join pulls p 437 + on r.at_uri = p.repo_at 438 + join pull_comments c 439 + on r.at_uri = c.repo_at and p.pull_id = c.pull_id 440 + where %s`, 441 + filter.Condition(), 442 + ), 443 + filter.Arg()..., 444 + ) 445 + if err != nil { 446 + return nil, err 447 + } 448 + defer rows.Close() 449 + var refLinks []models.RichReferenceLink 450 + for rows.Next() { 451 + var l models.RichReferenceLink 452 + l.Kind = models.RefKindPull 453 + l.CommentId = new(int) 454 + if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, l.CommentId, &l.Title, &l.State); err != nil { 455 + return nil, err 456 + } 457 + refLinks = append(refLinks, l) 458 + } 459 + if err := rows.Err(); err != nil { 460 + return nil, fmt.Errorf("iterate rows: %w", err) 461 + } 462 + return refLinks, nil 463 + }
+5 -3
appview/db/registration.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetRegistrations(e Execer, filters ...filter) ([]models.Registration, error) { 13 + func GetRegistrations(e Execer, filters ...orm.Filter) ([]models.Registration, error) { 13 14 var registrations []models.Registration 14 15 15 16 var conditions []string ··· 37 38 if err != nil { 38 39 return nil, err 39 40 } 41 + defer rows.Close() 40 42 41 43 for rows.Next() { 42 44 var createdAt string ··· 69 71 return registrations, nil 70 72 } 71 73 72 - func MarkRegistered(e Execer, filters ...filter) error { 74 + func MarkRegistered(e Execer, filters ...orm.Filter) error { 73 75 var conditions []string 74 76 var args []any 75 77 for _, filter := range filters { ··· 94 96 return err 95 97 } 96 98 97 - func DeleteKnot(e Execer, filters ...filter) error { 99 + func DeleteKnot(e Execer, filters ...orm.Filter) error { 98 100 var conditions []string 99 101 var args []any 100 102 for _, filter := range filters {
+32 -37
appview/db/repos.go
··· 10 10 "time" 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 - securejoin "github.com/cyphar/filepath-securejoin" 14 - "tangled.org/core/api/tangled" 15 13 "tangled.org/core/appview/models" 14 + "tangled.org/core/orm" 16 15 ) 17 16 18 - type Repo struct { 19 - Id int64 20 - Did string 21 - Name string 22 - Knot string 23 - Rkey string 24 - Created time.Time 25 - Description string 26 - Spindle string 27 - 28 - // optionally, populate this when querying for reverse mappings 29 - RepoStats *models.RepoStats 30 - 31 - // optional 32 - Source string 33 - } 34 - 35 - func (r Repo) RepoAt() syntax.ATURI { 36 - return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, tangled.RepoNSID, r.Rkey)) 37 - } 38 - 39 - func (r Repo) DidSlashRepo() string { 40 - p, _ := securejoin.SecureJoin(r.Did, r.Name) 41 - return p 42 - } 43 - 44 - func GetRepos(e Execer, limit int, filters ...filter) ([]models.Repo, error) { 17 + func GetRepos(e Execer, limit int, filters ...orm.Filter) ([]models.Repo, error) { 45 18 repoMap := make(map[syntax.ATURI]*models.Repo) 46 19 47 20 var conditions []string ··· 83 56 limitClause, 84 57 ) 85 58 rows, err := e.Query(repoQuery, args...) 86 - 87 59 if err != nil { 88 60 return nil, fmt.Errorf("failed to execute repo query: %w ", err) 89 61 } 62 + defer rows.Close() 90 63 91 64 for rows.Next() { 92 65 var repo models.Repo ··· 155 128 if err != nil { 156 129 return nil, fmt.Errorf("failed to execute labels query: %w ", err) 157 130 } 131 + defer rows.Close() 132 + 158 133 for rows.Next() { 159 134 var repoat, labelat string 160 135 if err := rows.Scan(&repoat, &labelat); err != nil { ··· 183 158 from repo_languages 184 159 where repo_at in (%s) 185 160 and is_default_ref = 1 161 + and language <> '' 186 162 ) 187 163 where rn = 1 188 164 `, ··· 192 168 if err != nil { 193 169 return nil, fmt.Errorf("failed to execute lang query: %w ", err) 194 170 } 171 + defer rows.Close() 172 + 195 173 for rows.Next() { 196 174 var repoat, lang string 197 175 if err := rows.Scan(&repoat, &lang); err != nil { ··· 208 186 209 187 starCountQuery := fmt.Sprintf( 210 188 `select 211 - repo_at, count(1) 189 + subject_at, count(1) 212 190 from stars 213 - where repo_at in (%s) 214 - group by repo_at`, 191 + where subject_at in (%s) 192 + group by subject_at`, 215 193 inClause, 216 194 ) 217 195 rows, err = e.Query(starCountQuery, args...) 218 196 if err != nil { 219 197 return nil, fmt.Errorf("failed to execute star-count query: %w ", err) 220 198 } 199 + defer rows.Close() 200 + 221 201 for rows.Next() { 222 202 var repoat string 223 203 var count int ··· 247 227 if err != nil { 248 228 return nil, fmt.Errorf("failed to execute issue-count query: %w ", err) 249 229 } 230 + defer rows.Close() 231 + 250 232 for rows.Next() { 251 233 var repoat string 252 234 var open, closed int ··· 288 270 if err != nil { 289 271 return nil, fmt.Errorf("failed to execute pulls-count query: %w ", err) 290 272 } 273 + defer rows.Close() 274 + 291 275 for rows.Next() { 292 276 var repoat string 293 277 var open, merged, closed, deleted int ··· 322 306 } 323 307 324 308 // helper to get exactly one repo 325 - func GetRepo(e Execer, filters ...filter) (*models.Repo, error) { 309 + func GetRepo(e Execer, filters ...orm.Filter) (*models.Repo, error) { 326 310 repos, err := GetRepos(e, 0, filters...) 327 311 if err != nil { 328 312 return nil, err ··· 339 323 return &repos[0], nil 340 324 } 341 325 342 - func CountRepos(e Execer, filters ...filter) (int64, error) { 326 + func CountRepos(e Execer, filters ...orm.Filter) (int64, error) { 343 327 var conditions []string 344 328 var args []any 345 329 for _, filter := range filters { ··· 439 423 return nullableSource.String, nil 440 424 } 441 425 426 + func GetRepoSourceRepo(e Execer, repoAt syntax.ATURI) (*models.Repo, error) { 427 + source, err := GetRepoSource(e, repoAt) 428 + if source == "" || errors.Is(err, sql.ErrNoRows) { 429 + return nil, nil 430 + } 431 + if err != nil { 432 + return nil, err 433 + } 434 + return GetRepoByAtUri(e, source) 435 + } 436 + 442 437 func GetForksByDid(e Execer, did string) ([]models.Repo, error) { 443 438 var repos []models.Repo 444 439 ··· 559 554 return err 560 555 } 561 556 562 - func UnsubscribeLabel(e Execer, filters ...filter) error { 557 + func UnsubscribeLabel(e Execer, filters ...orm.Filter) error { 563 558 var conditions []string 564 559 var args []any 565 560 for _, filter := range filters { ··· 577 572 return err 578 573 } 579 574 580 - func GetRepoLabels(e Execer, filters ...filter) ([]models.RepoLabel, error) { 575 + func GetRepoLabels(e Execer, filters ...orm.Filter) ([]models.RepoLabel, error) { 581 576 var conditions []string 582 577 var args []any 583 578 for _, filter := range filters {
+6 -5
appview/db/spindle.go
··· 7 7 "time" 8 8 9 9 "tangled.org/core/appview/models" 10 + "tangled.org/core/orm" 10 11 ) 11 12 12 - func GetSpindles(e Execer, filters ...filter) ([]models.Spindle, error) { 13 + func GetSpindles(e Execer, filters ...orm.Filter) ([]models.Spindle, error) { 13 14 var spindles []models.Spindle 14 15 15 16 var conditions []string ··· 91 92 return err 92 93 } 93 94 94 - func VerifySpindle(e Execer, filters ...filter) (int64, error) { 95 + func VerifySpindle(e Execer, filters ...orm.Filter) (int64, error) { 95 96 var conditions []string 96 97 var args []any 97 98 for _, filter := range filters { ··· 114 115 return res.RowsAffected() 115 116 } 116 117 117 - func DeleteSpindle(e Execer, filters ...filter) error { 118 + func DeleteSpindle(e Execer, filters ...orm.Filter) error { 118 119 var conditions []string 119 120 var args []any 120 121 for _, filter := range filters { ··· 144 145 return err 145 146 } 146 147 147 - func RemoveSpindleMember(e Execer, filters ...filter) error { 148 + func RemoveSpindleMember(e Execer, filters ...orm.Filter) error { 148 149 var conditions []string 149 150 var args []any 150 151 for _, filter := range filters { ··· 163 164 return err 164 165 } 165 166 166 - func GetSpindleMembers(e Execer, filters ...filter) ([]models.SpindleMember, error) { 167 + func GetSpindleMembers(e Execer, filters ...orm.Filter) ([]models.SpindleMember, error) { 167 168 var members []models.SpindleMember 168 169 169 170 var conditions []string
+44 -102
appview/db/star.go
··· 11 11 12 12 "github.com/bluesky-social/indigo/atproto/syntax" 13 13 "tangled.org/core/appview/models" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 func AddStar(e Execer, star *models.Star) error { 17 - query := `insert or ignore into stars (starred_by_did, repo_at, rkey) values (?, ?, ?)` 18 + query := `insert or ignore into stars (did, subject_at, rkey) values (?, ?, ?)` 18 19 _, err := e.Exec( 19 20 query, 20 - star.StarredByDid, 21 + star.Did, 21 22 star.RepoAt.String(), 22 23 star.Rkey, 23 24 ) ··· 25 26 } 26 27 27 28 // Get a star record 28 - func GetStar(e Execer, starredByDid string, repoAt syntax.ATURI) (*models.Star, error) { 29 + func GetStar(e Execer, did string, subjectAt syntax.ATURI) (*models.Star, error) { 29 30 query := ` 30 - select starred_by_did, repo_at, created, rkey 31 + select did, subject_at, created, rkey 31 32 from stars 32 - where starred_by_did = ? and repo_at = ?` 33 - row := e.QueryRow(query, starredByDid, repoAt) 33 + where did = ? and subject_at = ?` 34 + row := e.QueryRow(query, did, subjectAt) 34 35 35 36 var star models.Star 36 37 var created string 37 - err := row.Scan(&star.StarredByDid, &star.RepoAt, &created, &star.Rkey) 38 + err := row.Scan(&star.Did, &star.RepoAt, &created, &star.Rkey) 38 39 if err != nil { 39 40 return nil, err 40 41 } ··· 51 52 } 52 53 53 54 // Remove a star 54 - func DeleteStar(e Execer, starredByDid string, repoAt syntax.ATURI) error { 55 - _, err := e.Exec(`delete from stars where starred_by_did = ? and repo_at = ?`, starredByDid, repoAt) 55 + func DeleteStar(e Execer, did string, subjectAt syntax.ATURI) error { 56 + _, err := e.Exec(`delete from stars where did = ? and subject_at = ?`, did, subjectAt) 56 57 return err 57 58 } 58 59 59 60 // Remove a star 60 - func DeleteStarByRkey(e Execer, starredByDid string, rkey string) error { 61 - _, err := e.Exec(`delete from stars where starred_by_did = ? and rkey = ?`, starredByDid, rkey) 61 + func DeleteStarByRkey(e Execer, did string, rkey string) error { 62 + _, err := e.Exec(`delete from stars where did = ? and rkey = ?`, did, rkey) 62 63 return err 63 64 } 64 65 65 - func GetStarCount(e Execer, repoAt syntax.ATURI) (int, error) { 66 + func GetStarCount(e Execer, subjectAt syntax.ATURI) (int, error) { 66 67 stars := 0 67 68 err := e.QueryRow( 68 - `select count(starred_by_did) from stars where repo_at = ?`, repoAt).Scan(&stars) 69 + `select count(did) from stars where subject_at = ?`, subjectAt).Scan(&stars) 69 70 if err != nil { 70 71 return 0, err 71 72 } ··· 89 90 } 90 91 91 92 query := fmt.Sprintf(` 92 - SELECT repo_at 93 + SELECT subject_at 93 94 FROM stars 94 - WHERE starred_by_did = ? AND repo_at IN (%s) 95 + WHERE did = ? AND subject_at IN (%s) 95 96 `, strings.Join(placeholders, ",")) 96 97 97 98 rows, err := e.Query(query, args...) ··· 118 119 return result, nil 119 120 } 120 121 121 - func GetStarStatus(e Execer, userDid string, repoAt syntax.ATURI) bool { 122 - statuses, err := getStarStatuses(e, userDid, []syntax.ATURI{repoAt}) 122 + func GetStarStatus(e Execer, userDid string, subjectAt syntax.ATURI) bool { 123 + statuses, err := getStarStatuses(e, userDid, []syntax.ATURI{subjectAt}) 123 124 if err != nil { 124 125 return false 125 126 } 126 - return statuses[repoAt.String()] 127 + return statuses[subjectAt.String()] 127 128 } 128 129 129 130 // GetStarStatuses returns a map of repo URIs to star status for a given user 130 - func GetStarStatuses(e Execer, userDid string, repoAts []syntax.ATURI) (map[string]bool, error) { 131 - return getStarStatuses(e, userDid, repoAts) 131 + func GetStarStatuses(e Execer, userDid string, subjectAts []syntax.ATURI) (map[string]bool, error) { 132 + return getStarStatuses(e, userDid, subjectAts) 132 133 } 133 - func GetStars(e Execer, limit int, filters ...filter) ([]models.Star, error) { 134 + 135 + // GetRepoStars return a list of stars each holding target repository. 136 + // If there isn't known repo with starred at-uri, those stars will be ignored. 137 + func GetRepoStars(e Execer, limit int, filters ...orm.Filter) ([]models.RepoStar, error) { 134 138 var conditions []string 135 139 var args []any 136 140 for _, filter := range filters { ··· 149 153 } 150 154 151 155 repoQuery := fmt.Sprintf( 152 - `select starred_by_did, repo_at, created, rkey 156 + `select did, subject_at, created, rkey 153 157 from stars 154 158 %s 155 159 order by created desc ··· 161 165 if err != nil { 162 166 return nil, err 163 167 } 168 + defer rows.Close() 164 169 165 170 starMap := make(map[string][]models.Star) 166 171 for rows.Next() { 167 172 var star models.Star 168 173 var created string 169 - err := rows.Scan(&star.StarredByDid, &star.RepoAt, &created, &star.Rkey) 174 + err := rows.Scan(&star.Did, &star.RepoAt, &created, &star.Rkey) 170 175 if err != nil { 171 176 return nil, err 172 177 } ··· 192 197 return nil, nil 193 198 } 194 199 195 - repos, err := GetRepos(e, 0, FilterIn("at_uri", args)) 200 + repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", args)) 196 201 if err != nil { 197 202 return nil, err 198 203 } 199 204 205 + var repoStars []models.RepoStar 200 206 for _, r := range repos { 201 207 if stars, ok := starMap[string(r.RepoAt())]; ok { 202 - for i := range stars { 203 - stars[i].Repo = &r 208 + for _, star := range stars { 209 + repoStars = append(repoStars, models.RepoStar{ 210 + Star: star, 211 + Repo: &r, 212 + }) 204 213 } 205 214 } 206 215 } 207 216 208 - var stars []models.Star 209 - for _, s := range starMap { 210 - stars = append(stars, s...) 211 - } 212 - 213 - slices.SortFunc(stars, func(a, b models.Star) int { 217 + slices.SortFunc(repoStars, func(a, b models.RepoStar) int { 214 218 if a.Created.After(b.Created) { 215 219 return -1 216 220 } ··· 220 224 return 0 221 225 }) 222 226 223 - return stars, nil 227 + return repoStars, nil 224 228 } 225 229 226 - func CountStars(e Execer, filters ...filter) (int64, error) { 230 + func CountStars(e Execer, filters ...orm.Filter) (int64, error) { 227 231 var conditions []string 228 232 var args []any 229 233 for _, filter := range filters { ··· 247 251 return count, nil 248 252 } 249 253 250 - func GetAllStars(e Execer, limit int) ([]models.Star, error) { 251 - var stars []models.Star 252 - 253 - rows, err := e.Query(` 254 - select 255 - s.starred_by_did, 256 - s.repo_at, 257 - s.rkey, 258 - s.created, 259 - r.did, 260 - r.name, 261 - r.knot, 262 - r.rkey, 263 - r.created 264 - from stars s 265 - join repos r on s.repo_at = r.at_uri 266 - `) 267 - 268 - if err != nil { 269 - return nil, err 270 - } 271 - defer rows.Close() 272 - 273 - for rows.Next() { 274 - var star models.Star 275 - var repo models.Repo 276 - var starCreatedAt, repoCreatedAt string 277 - 278 - if err := rows.Scan( 279 - &star.StarredByDid, 280 - &star.RepoAt, 281 - &star.Rkey, 282 - &starCreatedAt, 283 - &repo.Did, 284 - &repo.Name, 285 - &repo.Knot, 286 - &repo.Rkey, 287 - &repoCreatedAt, 288 - ); err != nil { 289 - return nil, err 290 - } 291 - 292 - star.Created, err = time.Parse(time.RFC3339, starCreatedAt) 293 - if err != nil { 294 - star.Created = time.Now() 295 - } 296 - repo.Created, err = time.Parse(time.RFC3339, repoCreatedAt) 297 - if err != nil { 298 - repo.Created = time.Now() 299 - } 300 - star.Repo = &repo 301 - 302 - stars = append(stars, star) 303 - } 304 - 305 - if err := rows.Err(); err != nil { 306 - return nil, err 307 - } 308 - 309 - return stars, nil 310 - } 311 - 312 254 // GetTopStarredReposLastWeek returns the top 8 most starred repositories from the last week 313 255 func GetTopStarredReposLastWeek(e Execer) ([]models.Repo, error) { 314 256 // first, get the top repo URIs by star count from the last week 315 257 query := ` 316 258 with recent_starred_repos as ( 317 - select distinct repo_at 259 + select distinct subject_at 318 260 from stars 319 261 where created >= datetime('now', '-7 days') 320 262 ), 321 263 repo_star_counts as ( 322 264 select 323 - s.repo_at, 265 + s.subject_at, 324 266 count(*) as stars_gained_last_week 325 267 from stars s 326 - join recent_starred_repos rsr on s.repo_at = rsr.repo_at 268 + join recent_starred_repos rsr on s.subject_at = rsr.subject_at 327 269 where s.created >= datetime('now', '-7 days') 328 - group by s.repo_at 270 + group by s.subject_at 329 271 ) 330 - select rsc.repo_at 272 + select rsc.subject_at 331 273 from repo_star_counts rsc 332 274 order by rsc.stars_gained_last_week desc 333 275 limit 8 ··· 358 300 } 359 301 360 302 // get full repo data 361 - repos, err := GetRepos(e, 0, FilterIn("at_uri", repoUris)) 303 + repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoUris)) 362 304 if err != nil { 363 305 return nil, err 364 306 }
+4 -3
appview/db/strings.go
··· 8 8 "time" 9 9 10 10 "tangled.org/core/appview/models" 11 + "tangled.org/core/orm" 11 12 ) 12 13 13 14 func AddString(e Execer, s models.String) error { ··· 44 45 return err 45 46 } 46 47 47 - func GetStrings(e Execer, limit int, filters ...filter) ([]models.String, error) { 48 + func GetStrings(e Execer, limit int, filters ...orm.Filter) ([]models.String, error) { 48 49 var all []models.String 49 50 50 51 var conditions []string ··· 127 128 return all, nil 128 129 } 129 130 130 - func CountStrings(e Execer, filters ...filter) (int64, error) { 131 + func CountStrings(e Execer, filters ...orm.Filter) (int64, error) { 131 132 var conditions []string 132 133 var args []any 133 134 for _, filter := range filters { ··· 151 152 return count, nil 152 153 } 153 154 154 - func DeleteString(e Execer, filters ...filter) error { 155 + func DeleteString(e Execer, filters ...orm.Filter) error { 155 156 var conditions []string 156 157 var args []any 157 158 for _, filter := range filters {
+11 -20
appview/db/timeline.go
··· 5 5 6 6 "github.com/bluesky-social/indigo/atproto/syntax" 7 7 "tangled.org/core/appview/models" 8 + "tangled.org/core/orm" 8 9 ) 9 10 10 11 // TODO: this gathers heterogenous events from different sources and aggregates ··· 84 85 } 85 86 86 87 func getTimelineRepos(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) { 87 - filters := make([]filter, 0) 88 + filters := make([]orm.Filter, 0) 88 89 if userIsFollowing != nil { 89 - filters = append(filters, FilterIn("did", userIsFollowing)) 90 + filters = append(filters, orm.FilterIn("did", userIsFollowing)) 90 91 } 91 92 92 93 repos, err := GetRepos(e, limit, filters...) ··· 104 105 105 106 var origRepos []models.Repo 106 107 if args != nil { 107 - origRepos, err = GetRepos(e, 0, FilterIn("at_uri", args)) 108 + origRepos, err = GetRepos(e, 0, orm.FilterIn("at_uri", args)) 108 109 } 109 110 if err != nil { 110 111 return nil, err ··· 144 145 } 145 146 146 147 func getTimelineStars(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) { 147 - filters := make([]filter, 0) 148 + filters := make([]orm.Filter, 0) 148 149 if userIsFollowing != nil { 149 - filters = append(filters, FilterIn("starred_by_did", userIsFollowing)) 150 + filters = append(filters, orm.FilterIn("did", userIsFollowing)) 150 151 } 151 152 152 - stars, err := GetStars(e, limit, filters...) 153 + stars, err := GetRepoStars(e, limit, filters...) 153 154 if err != nil { 154 155 return nil, err 155 156 } 156 157 157 - // filter star records without a repo 158 - n := 0 159 - for _, s := range stars { 160 - if s.Repo != nil { 161 - stars[n] = s 162 - n++ 163 - } 164 - } 165 - stars = stars[:n] 166 - 167 158 var repos []models.Repo 168 159 for _, s := range stars { 169 160 repos = append(repos, *s.Repo) ··· 179 170 isStarred, starCount := getRepoStarInfo(s.Repo, starStatuses) 180 171 181 172 events = append(events, models.TimelineEvent{ 182 - Star: &s, 173 + RepoStar: &s, 183 174 EventAt: s.Created, 184 175 IsStarred: isStarred, 185 176 StarCount: starCount, ··· 190 181 } 191 182 192 183 func getTimelineFollows(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) { 193 - filters := make([]filter, 0) 184 + filters := make([]orm.Filter, 0) 194 185 if userIsFollowing != nil { 195 - filters = append(filters, FilterIn("user_did", userIsFollowing)) 186 + filters = append(filters, orm.FilterIn("user_did", userIsFollowing)) 196 187 } 197 188 198 189 follows, err := GetFollows(e, limit, filters...) ··· 209 200 return nil, nil 210 201 } 211 202 212 - profiles, err := GetProfiles(e, FilterIn("did", subjects)) 203 + profiles, err := GetProfiles(e, orm.FilterIn("did", subjects)) 213 204 if err != nil { 214 205 return nil, err 215 206 }
+7 -12
appview/email/email.go
··· 3 3 import ( 4 4 "fmt" 5 5 "net" 6 - "regexp" 6 + "net/mail" 7 7 "strings" 8 8 9 9 "github.com/resend/resend-go/v2" ··· 34 34 } 35 35 36 36 func IsValidEmail(email string) bool { 37 - // Basic length check 38 - if len(email) < 3 || len(email) > 254 { 37 + // Reject whitespace (ParseAddress normalizes it away) 38 + if strings.ContainsAny(email, " \t\n\r") { 39 39 return false 40 40 } 41 41 42 - // Regular expression for email validation (RFC 5322 compliant) 43 - pattern := `^[a-zA-Z0-9.!#$%&'*+/=?^_\x60{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$` 44 - 45 - // Compile regex 46 - regex := regexp.MustCompile(pattern) 47 - 48 - // Check if email matches regex pattern 49 - if !regex.MatchString(email) { 42 + // Use stdlib RFC 5322 parser 43 + addr, err := mail.ParseAddress(email) 44 + if err != nil { 50 45 return false 51 46 } 52 47 53 48 // Split email into local and domain parts 54 - parts := strings.Split(email, "@") 49 + parts := strings.Split(addr.Address, "@") 55 50 domain := parts[1] 56 51 57 52 mx, err := net.LookupMX(domain)
+53
appview/email/email_test.go
··· 1 + package email 2 + 3 + import ( 4 + "testing" 5 + ) 6 + 7 + func TestIsValidEmail(t *testing.T) { 8 + tests := []struct { 9 + name string 10 + email string 11 + want bool 12 + }{ 13 + // Valid emails using RFC 2606 reserved domains 14 + {"standard email", "user@example.com", true}, 15 + {"single char local", "a@example.com", true}, 16 + {"dot in middle", "first.last@example.com", true}, 17 + {"multiple dots", "a.b.c@example.com", true}, 18 + {"plus tag", "user+tag@example.com", true}, 19 + {"numbers", "user123@example.com", true}, 20 + {"example.org", "user@example.org", true}, 21 + {"example.net", "user@example.net", true}, 22 + 23 + // Invalid format - rejected by mail.ParseAddress 24 + {"empty string", "", false}, 25 + {"no at sign", "userexample.com", false}, 26 + {"no domain", "user@", false}, 27 + {"no local part", "@example.com", false}, 28 + {"double at", "user@@example.com", false}, 29 + {"just at sign", "@", false}, 30 + {"leading dot", ".user@example.com", false}, 31 + {"trailing dot", "user.@example.com", false}, 32 + {"consecutive dots", "user..name@example.com", false}, 33 + 34 + // Whitespace - rejected before parsing 35 + {"space in local", "user @example.com", false}, 36 + {"space in domain", "user@ example.com", false}, 37 + {"tab", "user\t@example.com", false}, 38 + {"newline", "user\n@example.com", false}, 39 + 40 + // MX lookup - using RFC 2606 reserved TLDs (guaranteed no MX) 41 + {"invalid TLD", "user@example.invalid", false}, 42 + {"test TLD", "user@mail.test", false}, 43 + } 44 + 45 + for _, tt := range tests { 46 + t.Run(tt.name, func(t *testing.T) { 47 + got := IsValidEmail(tt.email) 48 + if got != tt.want { 49 + t.Errorf("IsValidEmail(%q) = %v, want %v", tt.email, got, tt.want) 50 + } 51 + }) 52 + } 53 + }
+3 -1
appview/indexer/issues/indexer.go
··· 56 56 log.Fatalln("failed to populate issue indexer", err) 57 57 } 58 58 } 59 - l.Info("Initialized the issue indexer") 59 + 60 + count, _ := ix.indexer.DocCount() 61 + l.Info("Initialized the issue indexer", "docCount", count) 60 62 } 61 63 62 64 func generateIssueIndexMapping() (mapping.IndexMapping, error) {
+1 -1
appview/indexer/notifier.go
··· 11 11 12 12 var _ notify.Notifier = &Indexer{} 13 13 14 - func (ix *Indexer) NewIssue(ctx context.Context, issue *models.Issue) { 14 + func (ix *Indexer) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 15 15 l := log.FromContext(ctx).With("notifier", "indexer", "issue", issue) 16 16 l.Debug("indexing new issue") 17 17 err := ix.Issues.Index(ctx, *issue)
+3 -1
appview/indexer/pulls/indexer.go
··· 55 55 log.Fatalln("failed to populate pull indexer", err) 56 56 } 57 57 } 58 - l.Info("Initialized the pull indexer") 58 + 59 + count, _ := ix.indexer.DocCount() 60 + l.Info("Initialized the pull indexer", "docCount", count) 59 61 } 60 62 61 63 func generatePullIndexMapping() (mapping.IndexMapping, error) {
+50 -32
appview/ingester.go
··· 21 21 "tangled.org/core/appview/serververify" 22 22 "tangled.org/core/appview/validator" 23 23 "tangled.org/core/idresolver" 24 + "tangled.org/core/orm" 24 25 "tangled.org/core/rbac" 25 26 ) 26 27 ··· 121 122 return err 122 123 } 123 124 err = db.AddStar(i.Db, &models.Star{ 124 - StarredByDid: did, 125 - RepoAt: subjectUri, 126 - Rkey: e.Commit.RKey, 125 + Did: did, 126 + RepoAt: subjectUri, 127 + Rkey: e.Commit.RKey, 127 128 }) 128 129 case jmodels.CommitOperationDelete: 129 130 err = db.DeleteStarByRkey(i.Db, did, e.Commit.RKey) ··· 253 254 254 255 err = db.AddArtifact(i.Db, artifact) 255 256 case jmodels.CommitOperationDelete: 256 - err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey)) 257 + err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey)) 257 258 } 258 259 259 260 if err != nil { ··· 350 351 351 352 err = db.UpsertProfile(tx, &profile) 352 353 case jmodels.CommitOperationDelete: 353 - err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey)) 354 + err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey)) 354 355 } 355 356 356 357 if err != nil { ··· 424 425 // get record from db first 425 426 members, err := db.GetSpindleMembers( 426 427 ddb, 427 - db.FilterEq("did", did), 428 - db.FilterEq("rkey", rkey), 428 + orm.FilterEq("did", did), 429 + orm.FilterEq("rkey", rkey), 429 430 ) 430 431 if err != nil || len(members) != 1 { 431 432 return fmt.Errorf("failed to get member: %w, len(members) = %d", err, len(members)) ··· 440 441 // remove record by rkey && update enforcer 441 442 if err = db.RemoveSpindleMember( 442 443 tx, 443 - db.FilterEq("did", did), 444 - db.FilterEq("rkey", rkey), 444 + orm.FilterEq("did", did), 445 + orm.FilterEq("rkey", rkey), 445 446 ); err != nil { 446 447 return fmt.Errorf("failed to remove from db: %w", err) 447 448 } ··· 523 524 // get record from db first 524 525 spindles, err := db.GetSpindles( 525 526 ddb, 526 - db.FilterEq("owner", did), 527 - db.FilterEq("instance", instance), 527 + orm.FilterEq("owner", did), 528 + orm.FilterEq("instance", instance), 528 529 ) 529 530 if err != nil || len(spindles) != 1 { 530 531 return fmt.Errorf("failed to get spindles: %w, len(spindles) = %d", err, len(spindles)) ··· 543 544 // remove spindle members first 544 545 err = db.RemoveSpindleMember( 545 546 tx, 546 - db.FilterEq("owner", did), 547 - db.FilterEq("instance", instance), 547 + orm.FilterEq("owner", did), 548 + orm.FilterEq("instance", instance), 548 549 ) 549 550 if err != nil { 550 551 return err ··· 552 553 553 554 err = db.DeleteSpindle( 554 555 tx, 555 - db.FilterEq("owner", did), 556 - db.FilterEq("instance", instance), 556 + orm.FilterEq("owner", did), 557 + orm.FilterEq("instance", instance), 557 558 ) 558 559 if err != nil { 559 560 return err ··· 621 622 case jmodels.CommitOperationDelete: 622 623 if err := db.DeleteString( 623 624 ddb, 624 - db.FilterEq("did", did), 625 - db.FilterEq("rkey", rkey), 625 + orm.FilterEq("did", did), 626 + orm.FilterEq("rkey", rkey), 626 627 ); err != nil { 627 628 l.Error("failed to delete", "err", err) 628 629 return fmt.Errorf("failed to delete string record: %w", err) ··· 740 741 // get record from db first 741 742 registrations, err := db.GetRegistrations( 742 743 ddb, 743 - db.FilterEq("domain", domain), 744 - db.FilterEq("did", did), 744 + orm.FilterEq("domain", domain), 745 + orm.FilterEq("did", did), 745 746 ) 746 747 if err != nil { 747 748 return fmt.Errorf("failed to get registration: %w", err) ··· 762 763 763 764 err = db.DeleteKnot( 764 765 tx, 765 - db.FilterEq("did", did), 766 - db.FilterEq("domain", domain), 766 + orm.FilterEq("did", did), 767 + orm.FilterEq("domain", domain), 767 768 ) 768 769 if err != nil { 769 770 return err ··· 841 842 return nil 842 843 843 844 case jmodels.CommitOperationDelete: 845 + tx, err := ddb.BeginTx(ctx, nil) 846 + if err != nil { 847 + l.Error("failed to begin transaction", "err", err) 848 + return err 849 + } 850 + defer tx.Rollback() 851 + 844 852 if err := db.DeleteIssues( 845 - ddb, 846 - db.FilterEq("did", did), 847 - db.FilterEq("rkey", rkey), 853 + tx, 854 + did, 855 + rkey, 848 856 ); err != nil { 849 857 l.Error("failed to delete", "err", err) 850 858 return fmt.Errorf("failed to delete issue record: %w", err) 859 + } 860 + if err := tx.Commit(); err != nil { 861 + l.Error("failed to commit txn", "err", err) 862 + return err 851 863 } 852 864 853 865 return nil ··· 888 900 return fmt.Errorf("failed to validate comment: %w", err) 889 901 } 890 902 891 - _, err = db.AddIssueComment(ddb, *comment) 903 + tx, err := ddb.Begin() 904 + if err != nil { 905 + return fmt.Errorf("failed to start transaction: %w", err) 906 + } 907 + defer tx.Rollback() 908 + 909 + _, err = db.AddIssueComment(tx, *comment) 892 910 if err != nil { 893 911 return fmt.Errorf("failed to create issue comment: %w", err) 894 912 } 895 913 896 - return nil 914 + return tx.Commit() 897 915 898 916 case jmodels.CommitOperationDelete: 899 917 if err := db.DeleteIssueComments( 900 918 ddb, 901 - db.FilterEq("did", did), 902 - db.FilterEq("rkey", rkey), 919 + orm.FilterEq("did", did), 920 + orm.FilterEq("rkey", rkey), 903 921 ); err != nil { 904 922 return fmt.Errorf("failed to delete issue comment record: %w", err) 905 923 } ··· 952 970 case jmodels.CommitOperationDelete: 953 971 if err := db.DeleteLabelDefinition( 954 972 ddb, 955 - db.FilterEq("did", did), 956 - db.FilterEq("rkey", rkey), 973 + orm.FilterEq("did", did), 974 + orm.FilterEq("rkey", rkey), 957 975 ); err != nil { 958 976 return fmt.Errorf("failed to delete labeldef record: %w", err) 959 977 } ··· 993 1011 var repo *models.Repo 994 1012 switch collection { 995 1013 case tangled.RepoIssueNSID: 996 - i, err := db.GetIssues(ddb, db.FilterEq("at_uri", subject)) 1014 + i, err := db.GetIssues(ddb, orm.FilterEq("at_uri", subject)) 997 1015 if err != nil || len(i) != 1 { 998 1016 return fmt.Errorf("failed to find subject: %w || subject count %d", err, len(i)) 999 1017 } ··· 1002 1020 return fmt.Errorf("unsupport label subject: %s", collection) 1003 1021 } 1004 1022 1005 - actx, err := db.NewLabelApplicationCtx(ddb, db.FilterIn("at_uri", repo.Labels)) 1023 + actx, err := db.NewLabelApplicationCtx(ddb, orm.FilterIn("at_uri", repo.Labels)) 1006 1024 if err != nil { 1007 1025 return fmt.Errorf("failed to build label application ctx: %w", err) 1008 1026 }
+178 -131
appview/issues/issues.go
··· 7 7 "fmt" 8 8 "log/slog" 9 9 "net/http" 10 - "slices" 11 10 "time" 12 11 13 12 comatproto "github.com/bluesky-social/indigo/api/atproto" ··· 20 19 "tangled.org/core/appview/config" 21 20 "tangled.org/core/appview/db" 22 21 issues_indexer "tangled.org/core/appview/indexer/issues" 22 + "tangled.org/core/appview/mentions" 23 23 "tangled.org/core/appview/models" 24 24 "tangled.org/core/appview/notify" 25 25 "tangled.org/core/appview/oauth" 26 26 "tangled.org/core/appview/pages" 27 + "tangled.org/core/appview/pages/repoinfo" 27 28 "tangled.org/core/appview/pagination" 28 29 "tangled.org/core/appview/reporesolver" 29 30 "tangled.org/core/appview/validator" 30 31 "tangled.org/core/idresolver" 32 + "tangled.org/core/orm" 33 + "tangled.org/core/rbac" 31 34 "tangled.org/core/tid" 32 35 ) 33 36 34 37 type Issues struct { 35 - oauth *oauth.OAuth 36 - repoResolver *reporesolver.RepoResolver 37 - pages *pages.Pages 38 - idResolver *idresolver.Resolver 39 - db *db.DB 40 - config *config.Config 41 - notifier notify.Notifier 42 - logger *slog.Logger 43 - validator *validator.Validator 44 - indexer *issues_indexer.Indexer 38 + oauth *oauth.OAuth 39 + repoResolver *reporesolver.RepoResolver 40 + enforcer *rbac.Enforcer 41 + pages *pages.Pages 42 + idResolver *idresolver.Resolver 43 + mentionsResolver *mentions.Resolver 44 + db *db.DB 45 + config *config.Config 46 + notifier notify.Notifier 47 + logger *slog.Logger 48 + validator *validator.Validator 49 + indexer *issues_indexer.Indexer 45 50 } 46 51 47 52 func New( 48 53 oauth *oauth.OAuth, 49 54 repoResolver *reporesolver.RepoResolver, 55 + enforcer *rbac.Enforcer, 50 56 pages *pages.Pages, 51 57 idResolver *idresolver.Resolver, 58 + mentionsResolver *mentions.Resolver, 52 59 db *db.DB, 53 60 config *config.Config, 54 61 notifier notify.Notifier, ··· 57 64 logger *slog.Logger, 58 65 ) *Issues { 59 66 return &Issues{ 60 - oauth: oauth, 61 - repoResolver: repoResolver, 62 - pages: pages, 63 - idResolver: idResolver, 64 - db: db, 65 - config: config, 66 - notifier: notifier, 67 - logger: logger, 68 - validator: validator, 69 - indexer: indexer, 67 + oauth: oauth, 68 + repoResolver: repoResolver, 69 + enforcer: enforcer, 70 + pages: pages, 71 + idResolver: idResolver, 72 + mentionsResolver: mentionsResolver, 73 + db: db, 74 + config: config, 75 + notifier: notifier, 76 + logger: logger, 77 + validator: validator, 78 + indexer: indexer, 70 79 } 71 80 } 72 81 ··· 96 105 userReactions = db.GetReactionStatusMap(rp.db, user.Did, issue.AtUri()) 97 106 } 98 107 108 + backlinks, err := db.GetBacklinks(rp.db, issue.AtUri()) 109 + if err != nil { 110 + l.Error("failed to fetch backlinks", "err", err) 111 + rp.pages.Error503(w) 112 + return 113 + } 114 + 99 115 labelDefs, err := db.GetLabelDefinitions( 100 116 rp.db, 101 - db.FilterIn("at_uri", f.Repo.Labels), 102 - db.FilterContains("scope", tangled.RepoIssueNSID), 117 + orm.FilterIn("at_uri", f.Labels), 118 + orm.FilterContains("scope", tangled.RepoIssueNSID), 103 119 ) 104 120 if err != nil { 105 121 l.Error("failed to fetch labels", "err", err) ··· 114 130 115 131 rp.pages.RepoSingleIssue(w, pages.RepoSingleIssueParams{ 116 132 LoggedInUser: user, 117 - RepoInfo: f.RepoInfo(user), 133 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 118 134 Issue: issue, 119 135 CommentList: issue.CommentList(), 136 + Backlinks: backlinks, 120 137 OrderedReactionKinds: models.OrderedReactionKinds, 121 138 Reactions: reactionMap, 122 139 UserReacted: userReactions, ··· 127 144 func (rp *Issues) EditIssue(w http.ResponseWriter, r *http.Request) { 128 145 l := rp.logger.With("handler", "EditIssue") 129 146 user := rp.oauth.GetUser(r) 130 - f, err := rp.repoResolver.Resolve(r) 131 - if err != nil { 132 - l.Error("failed to get repo and knot", "err", err) 133 - return 134 - } 135 147 136 148 issue, ok := r.Context().Value("issue").(*models.Issue) 137 149 if !ok { ··· 144 156 case http.MethodGet: 145 157 rp.pages.EditIssueFragment(w, pages.EditIssueParams{ 146 158 LoggedInUser: user, 147 - RepoInfo: f.RepoInfo(user), 159 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 148 160 Issue: issue, 149 161 }) 150 162 case http.MethodPost: ··· 152 164 newIssue := issue 153 165 newIssue.Title = r.FormValue("title") 154 166 newIssue.Body = r.FormValue("body") 167 + newIssue.Mentions, newIssue.References = rp.mentionsResolver.Resolve(r.Context(), newIssue.Body) 155 168 156 169 if err := rp.validator.ValidateIssue(newIssue); err != nil { 157 170 l.Error("validation error", "err", err) ··· 221 234 l := rp.logger.With("handler", "DeleteIssue") 222 235 noticeId := "issue-actions-error" 223 236 224 - user := rp.oauth.GetUser(r) 225 - 226 237 f, err := rp.repoResolver.Resolve(r) 227 238 if err != nil { 228 239 l.Error("failed to get repo and knot", "err", err) ··· 237 248 } 238 249 l = l.With("did", issue.Did, "rkey", issue.Rkey) 239 250 251 + tx, err := rp.db.Begin() 252 + if err != nil { 253 + l.Error("failed to start transaction", "err", err) 254 + rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.") 255 + return 256 + } 257 + defer tx.Rollback() 258 + 240 259 // delete from PDS 241 260 client, err := rp.oauth.AuthorizedClient(r) 242 261 if err != nil { ··· 257 276 } 258 277 259 278 // delete from db 260 - if err := db.DeleteIssues(rp.db, db.FilterEq("id", issue.Id)); err != nil { 279 + if err := db.DeleteIssues(tx, issue.Did, issue.Rkey); err != nil { 261 280 l.Error("failed to delete issue", "err", err) 262 281 rp.pages.Notice(w, noticeId, "Failed to delete issue.") 263 282 return 264 283 } 284 + tx.Commit() 265 285 266 286 rp.notifier.DeleteIssue(r.Context(), issue) 267 287 268 288 // return to all issues page 269 - rp.pages.HxRedirect(w, "/"+f.RepoInfo(user).FullName()+"/issues") 289 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 290 + rp.pages.HxRedirect(w, "/"+ownerSlashRepo+"/issues") 270 291 } 271 292 272 293 func (rp *Issues) CloseIssue(w http.ResponseWriter, r *http.Request) { ··· 285 306 return 286 307 } 287 308 288 - collaborators, err := f.Collaborators(r.Context()) 289 - if err != nil { 290 - l.Error("failed to fetch repo collaborators", "err", err) 291 - } 292 - isCollaborator := slices.ContainsFunc(collaborators, func(collab pages.Collaborator) bool { 293 - return user.Did == collab.Did 294 - }) 309 + roles := repoinfo.RolesInRepo{Roles: rp.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 310 + isRepoOwner := roles.IsOwner() 311 + isCollaborator := roles.IsCollaborator() 295 312 isIssueOwner := user.Did == issue.Did 296 313 297 314 // TODO: make this more granular 298 - if isIssueOwner || isCollaborator { 315 + if isIssueOwner || isRepoOwner || isCollaborator { 299 316 err = db.CloseIssues( 300 317 rp.db, 301 - db.FilterEq("id", issue.Id), 318 + orm.FilterEq("id", issue.Id), 302 319 ) 303 320 if err != nil { 304 321 l.Error("failed to close issue", "err", err) ··· 311 328 // notify about the issue closure 312 329 rp.notifier.NewIssueState(r.Context(), syntax.DID(user.Did), issue) 313 330 314 - rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId)) 331 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 332 + rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId)) 315 333 return 316 334 } else { 317 335 l.Error("user is not permitted to close issue") ··· 336 354 return 337 355 } 338 356 339 - collaborators, err := f.Collaborators(r.Context()) 340 - if err != nil { 341 - l.Error("failed to fetch repo collaborators", "err", err) 342 - } 343 - isCollaborator := slices.ContainsFunc(collaborators, func(collab pages.Collaborator) bool { 344 - return user.Did == collab.Did 345 - }) 357 + roles := repoinfo.RolesInRepo{Roles: rp.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 358 + isRepoOwner := roles.IsOwner() 359 + isCollaborator := roles.IsCollaborator() 346 360 isIssueOwner := user.Did == issue.Did 347 361 348 - if isCollaborator || isIssueOwner { 362 + if isCollaborator || isRepoOwner || isIssueOwner { 349 363 err := db.ReopenIssues( 350 364 rp.db, 351 - db.FilterEq("id", issue.Id), 365 + orm.FilterEq("id", issue.Id), 352 366 ) 353 367 if err != nil { 354 368 l.Error("failed to reopen issue", "err", err) ··· 361 375 // notify about the issue reopen 362 376 rp.notifier.NewIssueState(r.Context(), syntax.DID(user.Did), issue) 363 377 364 - rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId)) 378 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 379 + rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId)) 365 380 return 366 381 } else { 367 382 l.Error("user is not the owner of the repo") ··· 398 413 replyTo = &replyToUri 399 414 } 400 415 416 + mentions, references := rp.mentionsResolver.Resolve(r.Context(), body) 417 + 401 418 comment := models.IssueComment{ 402 - Did: user.Did, 403 - Rkey: tid.TID(), 404 - IssueAt: issue.AtUri().String(), 405 - ReplyTo: replyTo, 406 - Body: body, 407 - Created: time.Now(), 419 + Did: user.Did, 420 + Rkey: tid.TID(), 421 + IssueAt: issue.AtUri().String(), 422 + ReplyTo: replyTo, 423 + Body: body, 424 + Created: time.Now(), 425 + Mentions: mentions, 426 + References: references, 408 427 } 409 428 if err = rp.validator.ValidateIssueComment(&comment); err != nil { 410 429 l.Error("failed to validate comment", "err", err) ··· 441 460 } 442 461 }() 443 462 444 - commentId, err := db.AddIssueComment(rp.db, comment) 463 + tx, err := rp.db.Begin() 464 + if err != nil { 465 + l.Error("failed to start transaction", "err", err) 466 + rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.") 467 + return 468 + } 469 + defer tx.Rollback() 470 + 471 + commentId, err := db.AddIssueComment(tx, comment) 445 472 if err != nil { 446 473 l.Error("failed to create comment", "err", err) 447 474 rp.pages.Notice(w, "issue-comment", "Failed to create comment.") 475 + return 476 + } 477 + err = tx.Commit() 478 + if err != nil { 479 + l.Error("failed to commit transaction", "err", err) 480 + rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.") 448 481 return 449 482 } 450 483 ··· 453 486 454 487 // notify about the new comment 455 488 comment.Id = commentId 456 - rp.notifier.NewIssueComment(r.Context(), &comment) 489 + 490 + rp.notifier.NewIssueComment(r.Context(), &comment, mentions) 457 491 458 - rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d#comment-%d", f.OwnerSlashRepo(), issue.IssueId, commentId)) 492 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 493 + rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d#comment-%d", ownerSlashRepo, issue.IssueId, commentId)) 459 494 } 460 495 461 496 func (rp *Issues) IssueComment(w http.ResponseWriter, r *http.Request) { 462 497 l := rp.logger.With("handler", "IssueComment") 463 498 user := rp.oauth.GetUser(r) 464 - f, err := rp.repoResolver.Resolve(r) 465 - if err != nil { 466 - l.Error("failed to get repo and knot", "err", err) 467 - return 468 - } 469 499 470 500 issue, ok := r.Context().Value("issue").(*models.Issue) 471 501 if !ok { ··· 477 507 commentId := chi.URLParam(r, "commentId") 478 508 comments, err := db.GetIssueComments( 479 509 rp.db, 480 - db.FilterEq("id", commentId), 510 + orm.FilterEq("id", commentId), 481 511 ) 482 512 if err != nil { 483 513 l.Error("failed to fetch comment", "id", commentId) ··· 493 523 494 524 rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{ 495 525 LoggedInUser: user, 496 - RepoInfo: f.RepoInfo(user), 526 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 497 527 Issue: issue, 498 528 Comment: &comment, 499 529 }) ··· 502 532 func (rp *Issues) EditIssueComment(w http.ResponseWriter, r *http.Request) { 503 533 l := rp.logger.With("handler", "EditIssueComment") 504 534 user := rp.oauth.GetUser(r) 505 - f, err := rp.repoResolver.Resolve(r) 506 - if err != nil { 507 - l.Error("failed to get repo and knot", "err", err) 508 - return 509 - } 510 535 511 536 issue, ok := r.Context().Value("issue").(*models.Issue) 512 537 if !ok { ··· 518 543 commentId := chi.URLParam(r, "commentId") 519 544 comments, err := db.GetIssueComments( 520 545 rp.db, 521 - db.FilterEq("id", commentId), 546 + orm.FilterEq("id", commentId), 522 547 ) 523 548 if err != nil { 524 549 l.Error("failed to fetch comment", "id", commentId) ··· 542 567 case http.MethodGet: 543 568 rp.pages.EditIssueCommentFragment(w, pages.EditIssueCommentParams{ 544 569 LoggedInUser: user, 545 - RepoInfo: f.RepoInfo(user), 570 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 546 571 Issue: issue, 547 572 Comment: &comment, 548 573 }) ··· 560 585 newComment := comment 561 586 newComment.Body = newBody 562 587 newComment.Edited = &now 588 + newComment.Mentions, newComment.References = rp.mentionsResolver.Resolve(r.Context(), newBody) 589 + 563 590 record := newComment.AsRecord() 564 591 565 - _, err = db.AddIssueComment(rp.db, newComment) 592 + tx, err := rp.db.Begin() 593 + if err != nil { 594 + l.Error("failed to start transaction", "err", err) 595 + rp.pages.Notice(w, "repo-notice", "Failed to update description, try again later.") 596 + return 597 + } 598 + defer tx.Rollback() 599 + 600 + _, err = db.AddIssueComment(tx, newComment) 566 601 if err != nil { 567 602 l.Error("failed to perferom update-description query", "err", err) 568 603 rp.pages.Notice(w, "repo-notice", "Failed to update description, try again later.") 569 604 return 570 605 } 606 + tx.Commit() 571 607 572 608 // rkey is optional, it was introduced later 573 609 if newComment.Rkey != "" { ··· 596 632 // return new comment body with htmx 597 633 rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{ 598 634 LoggedInUser: user, 599 - RepoInfo: f.RepoInfo(user), 635 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 600 636 Issue: issue, 601 637 Comment: &newComment, 602 638 }) ··· 606 642 func (rp *Issues) ReplyIssueCommentPlaceholder(w http.ResponseWriter, r *http.Request) { 607 643 l := rp.logger.With("handler", "ReplyIssueCommentPlaceholder") 608 644 user := rp.oauth.GetUser(r) 609 - f, err := rp.repoResolver.Resolve(r) 610 - if err != nil { 611 - l.Error("failed to get repo and knot", "err", err) 612 - return 613 - } 614 645 615 646 issue, ok := r.Context().Value("issue").(*models.Issue) 616 647 if !ok { ··· 622 653 commentId := chi.URLParam(r, "commentId") 623 654 comments, err := db.GetIssueComments( 624 655 rp.db, 625 - db.FilterEq("id", commentId), 656 + orm.FilterEq("id", commentId), 626 657 ) 627 658 if err != nil { 628 659 l.Error("failed to fetch comment", "id", commentId) ··· 638 669 639 670 rp.pages.ReplyIssueCommentPlaceholderFragment(w, pages.ReplyIssueCommentPlaceholderParams{ 640 671 LoggedInUser: user, 641 - RepoInfo: f.RepoInfo(user), 672 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 642 673 Issue: issue, 643 674 Comment: &comment, 644 675 }) ··· 647 678 func (rp *Issues) ReplyIssueComment(w http.ResponseWriter, r *http.Request) { 648 679 l := rp.logger.With("handler", "ReplyIssueComment") 649 680 user := rp.oauth.GetUser(r) 650 - f, err := rp.repoResolver.Resolve(r) 651 - if err != nil { 652 - l.Error("failed to get repo and knot", "err", err) 653 - return 654 - } 655 681 656 682 issue, ok := r.Context().Value("issue").(*models.Issue) 657 683 if !ok { ··· 663 689 commentId := chi.URLParam(r, "commentId") 664 690 comments, err := db.GetIssueComments( 665 691 rp.db, 666 - db.FilterEq("id", commentId), 692 + orm.FilterEq("id", commentId), 667 693 ) 668 694 if err != nil { 669 695 l.Error("failed to fetch comment", "id", commentId) ··· 679 705 680 706 rp.pages.ReplyIssueCommentFragment(w, pages.ReplyIssueCommentParams{ 681 707 LoggedInUser: user, 682 - RepoInfo: f.RepoInfo(user), 708 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 683 709 Issue: issue, 684 710 Comment: &comment, 685 711 }) ··· 688 714 func (rp *Issues) DeleteIssueComment(w http.ResponseWriter, r *http.Request) { 689 715 l := rp.logger.With("handler", "DeleteIssueComment") 690 716 user := rp.oauth.GetUser(r) 691 - f, err := rp.repoResolver.Resolve(r) 692 - if err != nil { 693 - l.Error("failed to get repo and knot", "err", err) 694 - return 695 - } 696 717 697 718 issue, ok := r.Context().Value("issue").(*models.Issue) 698 719 if !ok { ··· 704 725 commentId := chi.URLParam(r, "commentId") 705 726 comments, err := db.GetIssueComments( 706 727 rp.db, 707 - db.FilterEq("id", commentId), 728 + orm.FilterEq("id", commentId), 708 729 ) 709 730 if err != nil { 710 731 l.Error("failed to fetch comment", "id", commentId) ··· 731 752 732 753 // optimistic deletion 733 754 deleted := time.Now() 734 - err = db.DeleteIssueComments(rp.db, db.FilterEq("id", comment.Id)) 755 + err = db.DeleteIssueComments(rp.db, orm.FilterEq("id", comment.Id)) 735 756 if err != nil { 736 757 l.Error("failed to delete comment", "err", err) 737 758 rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "failed to delete comment") ··· 763 784 // htmx fragment of comment after deletion 764 785 rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{ 765 786 LoggedInUser: user, 766 - RepoInfo: f.RepoInfo(user), 787 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 767 788 Issue: issue, 768 789 Comment: &comment, 769 790 }) ··· 793 814 return 794 815 } 795 816 817 + totalIssues := 0 818 + if isOpen { 819 + totalIssues = f.RepoStats.IssueCount.Open 820 + } else { 821 + totalIssues = f.RepoStats.IssueCount.Closed 822 + } 823 + 796 824 keyword := params.Get("q") 797 825 798 - var ids []int64 826 + var issues []models.Issue 799 827 searchOpts := models.IssueSearchOptions{ 800 828 Keyword: keyword, 801 829 RepoAt: f.RepoAt().String(), ··· 808 836 l.Error("failed to search for issues", "err", err) 809 837 return 810 838 } 811 - ids = res.Hits 812 - l.Debug("searched issues with indexer", "count", len(ids)) 839 + l.Debug("searched issues with indexer", "count", len(res.Hits)) 840 + totalIssues = int(res.Total) 841 + 842 + issues, err = db.GetIssues( 843 + rp.db, 844 + orm.FilterIn("id", res.Hits), 845 + ) 846 + if err != nil { 847 + l.Error("failed to get issues", "err", err) 848 + rp.pages.Notice(w, "issues", "Failed to load issues. Try again later.") 849 + return 850 + } 851 + 813 852 } else { 814 - ids, err = db.GetIssueIDs(rp.db, searchOpts) 853 + openInt := 0 854 + if isOpen { 855 + openInt = 1 856 + } 857 + issues, err = db.GetIssuesPaginated( 858 + rp.db, 859 + page, 860 + orm.FilterEq("repo_at", f.RepoAt()), 861 + orm.FilterEq("open", openInt), 862 + ) 815 863 if err != nil { 816 - l.Error("failed to search for issues", "err", err) 864 + l.Error("failed to get issues", "err", err) 865 + rp.pages.Notice(w, "issues", "Failed to load issues. Try again later.") 817 866 return 818 867 } 819 - l.Debug("indexed all issues from the db", "count", len(ids)) 820 - } 821 - 822 - issues, err := db.GetIssues( 823 - rp.db, 824 - db.FilterIn("id", ids), 825 - ) 826 - if err != nil { 827 - l.Error("failed to get issues", "err", err) 828 - rp.pages.Notice(w, "issues", "Failed to load issues. Try again later.") 829 - return 830 868 } 831 869 832 870 labelDefs, err := db.GetLabelDefinitions( 833 871 rp.db, 834 - db.FilterIn("at_uri", f.Repo.Labels), 835 - db.FilterContains("scope", tangled.RepoIssueNSID), 872 + orm.FilterIn("at_uri", f.Labels), 873 + orm.FilterContains("scope", tangled.RepoIssueNSID), 836 874 ) 837 875 if err != nil { 838 876 l.Error("failed to fetch labels", "err", err) ··· 847 885 848 886 rp.pages.RepoIssues(w, pages.RepoIssuesParams{ 849 887 LoggedInUser: rp.oauth.GetUser(r), 850 - RepoInfo: f.RepoInfo(user), 888 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 851 889 Issues: issues, 890 + IssueCount: totalIssues, 852 891 LabelDefs: defs, 853 892 FilteringByOpen: isOpen, 854 893 FilterQuery: keyword, ··· 870 909 case http.MethodGet: 871 910 rp.pages.RepoNewIssue(w, pages.RepoNewIssueParams{ 872 911 LoggedInUser: user, 873 - RepoInfo: f.RepoInfo(user), 912 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 874 913 }) 875 914 case http.MethodPost: 915 + body := r.FormValue("body") 916 + mentions, references := rp.mentionsResolver.Resolve(r.Context(), body) 917 + 876 918 issue := &models.Issue{ 877 - RepoAt: f.RepoAt(), 878 - Rkey: tid.TID(), 879 - Title: r.FormValue("title"), 880 - Body: r.FormValue("body"), 881 - Open: true, 882 - Did: user.Did, 883 - Created: time.Now(), 884 - Repo: &f.Repo, 919 + RepoAt: f.RepoAt(), 920 + Rkey: tid.TID(), 921 + Title: r.FormValue("title"), 922 + Body: body, 923 + Open: true, 924 + Did: user.Did, 925 + Created: time.Now(), 926 + Mentions: mentions, 927 + References: references, 928 + Repo: f, 885 929 } 886 930 887 931 if err := rp.validator.ValidateIssue(issue); err != nil { ··· 948 992 949 993 // everything is successful, do not rollback the atproto record 950 994 atUri = "" 951 - rp.notifier.NewIssue(r.Context(), issue) 952 - rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId)) 995 + 996 + rp.notifier.NewIssue(r.Context(), issue, mentions) 997 + 998 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 999 + rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId)) 953 1000 return 954 1001 } 955 1002 }
+3 -3
appview/issues/opengraph.go
··· 232 232 233 233 // Get owner handle for avatar 234 234 var ownerHandle string 235 - owner, err := rp.idResolver.ResolveIdent(r.Context(), f.Repo.Did) 235 + owner, err := rp.idResolver.ResolveIdent(r.Context(), f.Did) 236 236 if err != nil { 237 - ownerHandle = f.Repo.Did 237 + ownerHandle = f.Did 238 238 } else { 239 239 ownerHandle = "@" + owner.Handle.String() 240 240 } 241 241 242 - card, err := rp.drawIssueSummaryCard(issue, &f.Repo, commentCount, ownerHandle) 242 + card, err := rp.drawIssueSummaryCard(issue, f, commentCount, ownerHandle) 243 243 if err != nil { 244 244 log.Println("failed to draw issue summary card", err) 245 245 http.Error(w, "failed to draw issue summary card", http.StatusInternalServerError)
+37 -24
appview/knots/knots.go
··· 21 21 "tangled.org/core/appview/xrpcclient" 22 22 "tangled.org/core/eventconsumer" 23 23 "tangled.org/core/idresolver" 24 + "tangled.org/core/orm" 24 25 "tangled.org/core/rbac" 25 26 "tangled.org/core/tid" 26 27 ··· 39 40 Knotstream *eventconsumer.Consumer 40 41 } 41 42 43 + type tab = map[string]any 44 + 45 + var ( 46 + knotsTabs []tab = []tab{ 47 + {"Name": "profile", "Icon": "user"}, 48 + {"Name": "keys", "Icon": "key"}, 49 + {"Name": "emails", "Icon": "mail"}, 50 + {"Name": "notifications", "Icon": "bell"}, 51 + {"Name": "knots", "Icon": "volleyball"}, 52 + {"Name": "spindles", "Icon": "spool"}, 53 + } 54 + ) 55 + 42 56 func (k *Knots) Router() http.Handler { 43 57 r := chi.NewRouter() 44 58 ··· 59 73 user := k.OAuth.GetUser(r) 60 74 registrations, err := db.GetRegistrations( 61 75 k.Db, 62 - db.FilterEq("did", user.Did), 76 + orm.FilterEq("did", user.Did), 63 77 ) 64 78 if err != nil { 65 79 k.Logger.Error("failed to fetch knot registrations", "err", err) ··· 70 84 k.Pages.Knots(w, pages.KnotsParams{ 71 85 LoggedInUser: user, 72 86 Registrations: registrations, 87 + Tabs: knotsTabs, 88 + Tab: "knots", 73 89 }) 74 90 } 75 91 ··· 87 103 88 104 registrations, err := db.GetRegistrations( 89 105 k.Db, 90 - db.FilterEq("did", user.Did), 91 - db.FilterEq("domain", domain), 106 + orm.FilterEq("did", user.Did), 107 + orm.FilterEq("domain", domain), 92 108 ) 93 109 if err != nil { 94 110 l.Error("failed to get registrations", "err", err) ··· 112 128 repos, err := db.GetRepos( 113 129 k.Db, 114 130 0, 115 - db.FilterEq("knot", domain), 131 + orm.FilterEq("knot", domain), 116 132 ) 117 133 if err != nil { 118 134 l.Error("failed to get knot repos", "err", err) ··· 132 148 Members: members, 133 149 Repos: repoMap, 134 150 IsOwner: true, 151 + Tabs: knotsTabs, 152 + Tab: "knots", 135 153 }) 136 154 } 137 155 ··· 276 294 // get record from db first 277 295 registrations, err := db.GetRegistrations( 278 296 k.Db, 279 - db.FilterEq("did", user.Did), 280 - db.FilterEq("domain", domain), 297 + orm.FilterEq("did", user.Did), 298 + orm.FilterEq("domain", domain), 281 299 ) 282 300 if err != nil { 283 301 l.Error("failed to get registration", "err", err) ··· 304 322 305 323 err = db.DeleteKnot( 306 324 tx, 307 - db.FilterEq("did", user.Did), 308 - db.FilterEq("domain", domain), 325 + orm.FilterEq("did", user.Did), 326 + orm.FilterEq("domain", domain), 309 327 ) 310 328 if err != nil { 311 329 l.Error("failed to delete registration", "err", err) ··· 385 403 // get record from db first 386 404 registrations, err := db.GetRegistrations( 387 405 k.Db, 388 - db.FilterEq("did", user.Did), 389 - db.FilterEq("domain", domain), 406 + orm.FilterEq("did", user.Did), 407 + orm.FilterEq("domain", domain), 390 408 ) 391 409 if err != nil { 392 410 l.Error("failed to get registration", "err", err) ··· 476 494 // Get updated registration to show 477 495 registrations, err = db.GetRegistrations( 478 496 k.Db, 479 - db.FilterEq("did", user.Did), 480 - db.FilterEq("domain", domain), 497 + orm.FilterEq("did", user.Did), 498 + orm.FilterEq("domain", domain), 481 499 ) 482 500 if err != nil { 483 501 l.Error("failed to get registration", "err", err) ··· 512 530 513 531 registrations, err := db.GetRegistrations( 514 532 k.Db, 515 - db.FilterEq("did", user.Did), 516 - db.FilterEq("domain", domain), 517 - db.FilterIsNot("registered", "null"), 533 + orm.FilterEq("did", user.Did), 534 + orm.FilterEq("domain", domain), 535 + orm.FilterIsNot("registered", "null"), 518 536 ) 519 537 if err != nil { 520 538 l.Error("failed to get registration", "err", err) ··· 596 614 } 597 615 598 616 // success 599 - k.Pages.HxRedirect(w, fmt.Sprintf("/knots/%s", domain)) 617 + k.Pages.HxRedirect(w, fmt.Sprintf("/settings/knots/%s", domain)) 600 618 } 601 619 602 620 func (k *Knots) removeMember(w http.ResponseWriter, r *http.Request) { ··· 620 638 621 639 registrations, err := db.GetRegistrations( 622 640 k.Db, 623 - db.FilterEq("did", user.Did), 624 - db.FilterEq("domain", domain), 625 - db.FilterIsNot("registered", "null"), 641 + orm.FilterEq("did", user.Did), 642 + orm.FilterEq("domain", domain), 643 + orm.FilterIsNot("registered", "null"), 626 644 ) 627 645 if err != nil { 628 646 l.Error("failed to get registration", "err", err) ··· 645 663 memberId, err := k.IdResolver.ResolveIdent(r.Context(), member) 646 664 if err != nil { 647 665 l.Error("failed to resolve member identity to handle", "err", err) 648 - k.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.") 649 - return 650 - } 651 - if memberId.Handle.IsInvalidHandle() { 652 - l.Error("failed to resolve member identity to handle") 653 666 k.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.") 654 667 return 655 668 }
+5 -4
appview/labels/labels.go
··· 16 16 "tangled.org/core/appview/oauth" 17 17 "tangled.org/core/appview/pages" 18 18 "tangled.org/core/appview/validator" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/rbac" 20 21 "tangled.org/core/tid" 21 22 ··· 88 89 repoAt := r.Form.Get("repo") 89 90 subjectUri := r.Form.Get("subject") 90 91 91 - repo, err := db.GetRepo(l.db, db.FilterEq("at_uri", repoAt)) 92 + repo, err := db.GetRepo(l.db, orm.FilterEq("at_uri", repoAt)) 92 93 if err != nil { 93 94 fail("Failed to get repository.", err) 94 95 return 95 96 } 96 97 97 98 // find all the labels that this repo subscribes to 98 - repoLabels, err := db.GetRepoLabels(l.db, db.FilterEq("repo_at", repoAt)) 99 + repoLabels, err := db.GetRepoLabels(l.db, orm.FilterEq("repo_at", repoAt)) 99 100 if err != nil { 100 101 fail("Failed to get labels for this repository.", err) 101 102 return ··· 106 107 labelAts = append(labelAts, rl.LabelAt.String()) 107 108 } 108 109 109 - actx, err := db.NewLabelApplicationCtx(l.db, db.FilterIn("at_uri", labelAts)) 110 + actx, err := db.NewLabelApplicationCtx(l.db, orm.FilterIn("at_uri", labelAts)) 110 111 if err != nil { 111 112 fail("Invalid form data.", err) 112 113 return 113 114 } 114 115 115 116 // calculate the start state by applying already known labels 116 - existingOps, err := db.GetLabelOps(l.db, db.FilterEq("subject", subjectUri)) 117 + existingOps, err := db.GetLabelOps(l.db, orm.FilterEq("subject", subjectUri)) 117 118 if err != nil { 118 119 fail("Invalid form data.", err) 119 120 return
+67
appview/mentions/resolver.go
··· 1 + package mentions 2 + 3 + import ( 4 + "context" 5 + "log/slog" 6 + 7 + "github.com/bluesky-social/indigo/atproto/syntax" 8 + "tangled.org/core/appview/config" 9 + "tangled.org/core/appview/db" 10 + "tangled.org/core/appview/models" 11 + "tangled.org/core/appview/pages/markup" 12 + "tangled.org/core/idresolver" 13 + ) 14 + 15 + type Resolver struct { 16 + config *config.Config 17 + idResolver *idresolver.Resolver 18 + execer db.Execer 19 + logger *slog.Logger 20 + } 21 + 22 + func New( 23 + config *config.Config, 24 + idResolver *idresolver.Resolver, 25 + execer db.Execer, 26 + logger *slog.Logger, 27 + ) *Resolver { 28 + return &Resolver{ 29 + config, 30 + idResolver, 31 + execer, 32 + logger, 33 + } 34 + } 35 + 36 + func (r *Resolver) Resolve(ctx context.Context, source string) ([]syntax.DID, []syntax.ATURI) { 37 + l := r.logger.With("method", "Resolve") 38 + 39 + rawMentions, rawRefs := markup.FindReferences(r.config.Core.AppviewHost, source) 40 + l.Debug("found possible references", "mentions", rawMentions, "refs", rawRefs) 41 + 42 + idents := r.idResolver.ResolveIdents(ctx, rawMentions) 43 + var mentions []syntax.DID 44 + for _, ident := range idents { 45 + if ident != nil && !ident.Handle.IsInvalidHandle() { 46 + mentions = append(mentions, ident.DID) 47 + } 48 + } 49 + l.Debug("found mentions", "mentions", mentions) 50 + 51 + var resolvedRefs []models.ReferenceLink 52 + for _, rawRef := range rawRefs { 53 + ident, err := r.idResolver.ResolveIdent(ctx, rawRef.Handle) 54 + if err != nil || ident == nil || ident.Handle.IsInvalidHandle() { 55 + continue 56 + } 57 + rawRef.Handle = string(ident.DID) 58 + resolvedRefs = append(resolvedRefs, rawRef) 59 + } 60 + aturiRefs, err := db.ValidateReferenceLinks(r.execer, resolvedRefs) 61 + if err != nil { 62 + l.Error("failed running query", "err", err) 63 + } 64 + l.Debug("found references", "refs", aturiRefs) 65 + 66 + return mentions, aturiRefs 67 + }
+11 -4
appview/middleware/middleware.go
··· 18 18 "tangled.org/core/appview/pagination" 19 19 "tangled.org/core/appview/reporesolver" 20 20 "tangled.org/core/idresolver" 21 + "tangled.org/core/orm" 21 22 "tangled.org/core/rbac" 22 23 ) 23 24 ··· 164 165 ok, err := mw.enforcer.E.Enforce(actor.Did, f.Knot, f.DidSlashRepo(), requiredPerm) 165 166 if err != nil || !ok { 166 167 // we need a logged in user 167 - log.Printf("%s does not have perms of a %s in repo %s", actor.Did, requiredPerm, f.OwnerSlashRepo()) 168 + log.Printf("%s does not have perms of a %s in repo %s", actor.Did, requiredPerm, f.DidSlashRepo()) 168 169 http.Error(w, "Forbiden", http.StatusUnauthorized) 169 170 return 170 171 } ··· 206 207 return func(next http.Handler) http.Handler { 207 208 return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { 208 209 repoName := chi.URLParam(req, "repo") 210 + repoName = strings.TrimSuffix(repoName, ".git") 211 + 209 212 id, ok := req.Context().Value("resolvedId").(identity.Identity) 210 213 if !ok { 211 214 log.Println("malformed middleware") ··· 215 218 216 219 repo, err := db.GetRepo( 217 220 mw.db, 218 - db.FilterEq("did", id.DID.String()), 219 - db.FilterEq("name", repoName), 221 + orm.FilterEq("did", id.DID.String()), 222 + orm.FilterEq("name", repoName), 220 223 ) 221 224 if err != nil { 222 225 log.Println("failed to resolve repo", "err", err) 226 + w.WriteHeader(http.StatusNotFound) 223 227 mw.pages.ErrorKnot404(w) 224 228 return 225 229 } ··· 237 241 f, err := mw.repoResolver.Resolve(r) 238 242 if err != nil { 239 243 log.Println("failed to fully resolve repo", err) 244 + w.WriteHeader(http.StatusNotFound) 240 245 mw.pages.ErrorKnot404(w) 241 246 return 242 247 } ··· 285 290 f, err := mw.repoResolver.Resolve(r) 286 291 if err != nil { 287 292 log.Println("failed to fully resolve repo", err) 293 + w.WriteHeader(http.StatusNotFound) 288 294 mw.pages.ErrorKnot404(w) 289 295 return 290 296 } ··· 321 327 f, err := mw.repoResolver.Resolve(r) 322 328 if err != nil { 323 329 log.Println("failed to fully resolve repo", err) 330 + w.WriteHeader(http.StatusNotFound) 324 331 mw.pages.ErrorKnot404(w) 325 332 return 326 333 } 327 334 328 - fullName := f.OwnerHandle() + "/" + f.Name 335 + fullName := reporesolver.GetBaseRepoPath(r, f) 329 336 330 337 if r.Header.Get("User-Agent") == "Go-http-client/1.1" { 331 338 if r.URL.Query().Get("go-get") == "1" {
+70 -34
appview/models/issue.go
··· 10 10 ) 11 11 12 12 type Issue struct { 13 - Id int64 14 - Did string 15 - Rkey string 16 - RepoAt syntax.ATURI 17 - IssueId int 18 - Created time.Time 19 - Edited *time.Time 20 - Deleted *time.Time 21 - Title string 22 - Body string 23 - Open bool 13 + Id int64 14 + Did string 15 + Rkey string 16 + RepoAt syntax.ATURI 17 + IssueId int 18 + Created time.Time 19 + Edited *time.Time 20 + Deleted *time.Time 21 + Title string 22 + Body string 23 + Open bool 24 + Mentions []syntax.DID 25 + References []syntax.ATURI 24 26 25 27 // optionally, populate this when querying for reverse mappings 26 28 // like comment counts, parent repo etc. ··· 34 36 } 35 37 36 38 func (i *Issue) AsRecord() tangled.RepoIssue { 39 + mentions := make([]string, len(i.Mentions)) 40 + for i, did := range i.Mentions { 41 + mentions[i] = string(did) 42 + } 43 + references := make([]string, len(i.References)) 44 + for i, uri := range i.References { 45 + references[i] = string(uri) 46 + } 37 47 return tangled.RepoIssue{ 38 - Repo: i.RepoAt.String(), 39 - Title: i.Title, 40 - Body: &i.Body, 41 - CreatedAt: i.Created.Format(time.RFC3339), 48 + Repo: i.RepoAt.String(), 49 + Title: i.Title, 50 + Body: &i.Body, 51 + Mentions: mentions, 52 + References: references, 53 + CreatedAt: i.Created.Format(time.RFC3339), 42 54 } 43 55 } 44 56 ··· 161 173 } 162 174 163 175 type IssueComment struct { 164 - Id int64 165 - Did string 166 - Rkey string 167 - IssueAt string 168 - ReplyTo *string 169 - Body string 170 - Created time.Time 171 - Edited *time.Time 172 - Deleted *time.Time 176 + Id int64 177 + Did string 178 + Rkey string 179 + IssueAt string 180 + ReplyTo *string 181 + Body string 182 + Created time.Time 183 + Edited *time.Time 184 + Deleted *time.Time 185 + Mentions []syntax.DID 186 + References []syntax.ATURI 173 187 } 174 188 175 189 func (i *IssueComment) AtUri() syntax.ATURI { ··· 177 191 } 178 192 179 193 func (i *IssueComment) AsRecord() tangled.RepoIssueComment { 194 + mentions := make([]string, len(i.Mentions)) 195 + for i, did := range i.Mentions { 196 + mentions[i] = string(did) 197 + } 198 + references := make([]string, len(i.References)) 199 + for i, uri := range i.References { 200 + references[i] = string(uri) 201 + } 180 202 return tangled.RepoIssueComment{ 181 - Body: i.Body, 182 - Issue: i.IssueAt, 183 - CreatedAt: i.Created.Format(time.RFC3339), 184 - ReplyTo: i.ReplyTo, 203 + Body: i.Body, 204 + Issue: i.IssueAt, 205 + CreatedAt: i.Created.Format(time.RFC3339), 206 + ReplyTo: i.ReplyTo, 207 + Mentions: mentions, 208 + References: references, 185 209 } 186 210 } 187 211 ··· 205 229 return nil, err 206 230 } 207 231 232 + i := record 233 + mentions := make([]syntax.DID, len(record.Mentions)) 234 + for i, did := range record.Mentions { 235 + mentions[i] = syntax.DID(did) 236 + } 237 + references := make([]syntax.ATURI, len(record.References)) 238 + for i, uri := range i.References { 239 + references[i] = syntax.ATURI(uri) 240 + } 241 + 208 242 comment := IssueComment{ 209 - Did: ownerDid, 210 - Rkey: rkey, 211 - Body: record.Body, 212 - IssueAt: record.Issue, 213 - ReplyTo: record.ReplyTo, 214 - Created: created, 243 + Did: ownerDid, 244 + Rkey: rkey, 245 + Body: record.Body, 246 + IssueAt: record.Issue, 247 + ReplyTo: record.ReplyTo, 248 + Created: created, 249 + Mentions: mentions, 250 + References: references, 215 251 } 216 252 217 253 return &comment, nil
+7
appview/models/notifications.go
··· 20 20 NotificationTypeIssueReopen NotificationType = "issue_reopen" 21 21 NotificationTypePullClosed NotificationType = "pull_closed" 22 22 NotificationTypePullReopen NotificationType = "pull_reopen" 23 + NotificationTypeUserMentioned NotificationType = "user_mentioned" 23 24 ) 24 25 25 26 type Notification struct { ··· 63 64 return "git-pull-request-create" 64 65 case NotificationTypeFollowed: 65 66 return "user-plus" 67 + case NotificationTypeUserMentioned: 68 + return "at-sign" 66 69 default: 67 70 return "" 68 71 } ··· 84 87 PullCreated bool 85 88 PullCommented bool 86 89 Followed bool 90 + UserMentioned bool 87 91 PullMerged bool 88 92 IssueClosed bool 89 93 EmailNotifications bool ··· 113 117 return prefs.PullCreated // same pref for now 114 118 case NotificationTypeFollowed: 115 119 return prefs.Followed 120 + case NotificationTypeUserMentioned: 121 + return prefs.UserMentioned 116 122 default: 117 123 return false 118 124 } ··· 127 133 PullCreated: true, 128 134 PullCommented: true, 129 135 Followed: true, 136 + UserMentioned: true, 130 137 PullMerged: true, 131 138 IssueClosed: true, 132 139 EmailNotifications: false,
+3 -1
appview/models/profile.go
··· 111 111 } 112 112 113 113 type ByMonth struct { 114 + Commits int 114 115 RepoEvents []RepoEvent 115 116 IssueEvents IssueEvents 116 117 PullEvents PullEvents ··· 119 120 func (b ByMonth) IsEmpty() bool { 120 121 return len(b.RepoEvents) == 0 && 121 122 len(b.IssueEvents.Items) == 0 && 122 - len(b.PullEvents.Items) == 0 123 + len(b.PullEvents.Items) == 0 && 124 + b.Commits == 0 123 125 } 124 126 125 127 type IssueEvents struct {
+42 -4
appview/models/pull.go
··· 66 66 TargetBranch string 67 67 State PullState 68 68 Submissions []*PullSubmission 69 + Mentions []syntax.DID 70 + References []syntax.ATURI 69 71 70 72 // stacking 71 73 StackId string // nullable string ··· 81 83 Repo *Repo 82 84 } 83 85 86 + // NOTE: This method does not include patch blob in returned atproto record 84 87 func (p Pull) AsRecord() tangled.RepoPull { 85 88 var source *tangled.RepoPull_Source 86 89 if p.PullSource != nil { ··· 92 95 source.Repo = &s 93 96 } 94 97 } 98 + mentions := make([]string, len(p.Mentions)) 99 + for i, did := range p.Mentions { 100 + mentions[i] = string(did) 101 + } 102 + references := make([]string, len(p.References)) 103 + for i, uri := range p.References { 104 + references[i] = string(uri) 105 + } 95 106 96 107 record := tangled.RepoPull{ 97 - Title: p.Title, 98 - Body: &p.Body, 99 - CreatedAt: p.Created.Format(time.RFC3339), 108 + Title: p.Title, 109 + Body: &p.Body, 110 + Mentions: mentions, 111 + References: references, 112 + CreatedAt: p.Created.Format(time.RFC3339), 100 113 Target: &tangled.RepoPull_Target{ 101 114 Repo: p.RepoAt.String(), 102 115 Branch: p.TargetBranch, 103 116 }, 104 - Patch: p.LatestPatch(), 105 117 Source: source, 106 118 } 107 119 return record ··· 148 160 Body string 149 161 150 162 // meta 163 + Mentions []syntax.DID 164 + References []syntax.ATURI 165 + 166 + // meta 151 167 Created time.Time 152 168 } 169 + 170 + func (p *PullComment) AtUri() syntax.ATURI { 171 + return syntax.ATURI(p.CommentAt) 172 + } 173 + 174 + // func (p *PullComment) AsRecord() tangled.RepoPullComment { 175 + // mentions := make([]string, len(p.Mentions)) 176 + // for i, did := range p.Mentions { 177 + // mentions[i] = string(did) 178 + // } 179 + // references := make([]string, len(p.References)) 180 + // for i, uri := range p.References { 181 + // references[i] = string(uri) 182 + // } 183 + // return tangled.RepoPullComment{ 184 + // Pull: p.PullAt, 185 + // Body: p.Body, 186 + // Mentions: mentions, 187 + // References: references, 188 + // CreatedAt: p.Created.Format(time.RFC3339), 189 + // } 190 + // } 153 191 154 192 func (p *Pull) LastRoundNumber() int { 155 193 return len(p.Submissions) - 1
+49
appview/models/reference.go
··· 1 + package models 2 + 3 + import "fmt" 4 + 5 + type RefKind int 6 + 7 + const ( 8 + RefKindIssue RefKind = iota 9 + RefKindPull 10 + ) 11 + 12 + func (k RefKind) String() string { 13 + if k == RefKindIssue { 14 + return "issues" 15 + } else { 16 + return "pulls" 17 + } 18 + } 19 + 20 + // /@alice.com/cool-proj/issues/123 21 + // /@alice.com/cool-proj/issues/123#comment-321 22 + type ReferenceLink struct { 23 + Handle string 24 + Repo string 25 + Kind RefKind 26 + SubjectId int 27 + CommentId *int 28 + } 29 + 30 + func (l ReferenceLink) String() string { 31 + comment := "" 32 + if l.CommentId != nil { 33 + comment = fmt.Sprintf("#comment-%d", *l.CommentId) 34 + } 35 + return fmt.Sprintf("/%s/%s/%s/%d%s", 36 + l.Handle, 37 + l.Repo, 38 + l.Kind.String(), 39 + l.SubjectId, 40 + comment, 41 + ) 42 + } 43 + 44 + type RichReferenceLink struct { 45 + ReferenceLink 46 + Title string 47 + // reusing PullState for both issue & PR 48 + State PullState 49 + }
+47
appview/models/repo.go
··· 104 104 Repo *Repo 105 105 Issues []Issue 106 106 } 107 + 108 + type BlobContentType int 109 + 110 + const ( 111 + BlobContentTypeCode BlobContentType = iota 112 + BlobContentTypeMarkup 113 + BlobContentTypeImage 114 + BlobContentTypeSvg 115 + BlobContentTypeVideo 116 + BlobContentTypeSubmodule 117 + ) 118 + 119 + func (ty BlobContentType) IsCode() bool { return ty == BlobContentTypeCode } 120 + func (ty BlobContentType) IsMarkup() bool { return ty == BlobContentTypeMarkup } 121 + func (ty BlobContentType) IsImage() bool { return ty == BlobContentTypeImage } 122 + func (ty BlobContentType) IsSvg() bool { return ty == BlobContentTypeSvg } 123 + func (ty BlobContentType) IsVideo() bool { return ty == BlobContentTypeVideo } 124 + func (ty BlobContentType) IsSubmodule() bool { return ty == BlobContentTypeSubmodule } 125 + 126 + type BlobView struct { 127 + HasTextView bool // can show as code/text 128 + HasRenderedView bool // can show rendered (markup/image/video/submodule) 129 + HasRawView bool // can download raw (everything except submodule) 130 + 131 + // current display mode 132 + ShowingRendered bool // currently in rendered mode 133 + ShowingText bool // currently in text/code mode 134 + 135 + // content type flags 136 + ContentType BlobContentType 137 + 138 + // Content data 139 + Contents string 140 + ContentSrc string // URL for media files 141 + Lines int 142 + SizeHint uint64 143 + } 144 + 145 + // if both views are available, then show a toggle between them 146 + func (b BlobView) ShowToggle() bool { 147 + return b.HasTextView && b.HasRenderedView 148 + } 149 + 150 + func (b BlobView) IsUnsupported() bool { 151 + // no view available, only raw 152 + return !(b.HasRenderedView || b.HasTextView) 153 + }
+14 -5
appview/models/star.go
··· 7 7 ) 8 8 9 9 type Star struct { 10 - StarredByDid string 11 - RepoAt syntax.ATURI 12 - Created time.Time 13 - Rkey string 10 + Did string 11 + RepoAt syntax.ATURI 12 + Created time.Time 13 + Rkey string 14 + } 14 15 15 - // optionally, populate this when querying for reverse mappings 16 + // RepoStar is used for reverse mapping to repos 17 + type RepoStar struct { 18 + Star 16 19 Repo *Repo 17 20 } 21 + 22 + // StringStar is used for reverse mapping to strings 23 + type StringStar struct { 24 + Star 25 + String *String 26 + }
+1 -1
appview/models/string.go
··· 22 22 Edited *time.Time 23 23 } 24 24 25 - func (s *String) StringAt() syntax.ATURI { 25 + func (s *String) AtUri() syntax.ATURI { 26 26 return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", s.Did, tangled.StringNSID, s.Rkey)) 27 27 } 28 28
+1 -1
appview/models/timeline.go
··· 5 5 type TimelineEvent struct { 6 6 *Repo 7 7 *Follow 8 - *Star 8 + *RepoStar 9 9 10 10 EventAt time.Time 11 11
+5 -4
appview/notifications/notifications.go
··· 11 11 "tangled.org/core/appview/oauth" 12 12 "tangled.org/core/appview/pages" 13 13 "tangled.org/core/appview/pagination" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 type Notifications struct { ··· 53 54 54 55 total, err := db.CountNotifications( 55 56 n.db, 56 - db.FilterEq("recipient_did", user.Did), 57 + orm.FilterEq("recipient_did", user.Did), 57 58 ) 58 59 if err != nil { 59 60 l.Error("failed to get total notifications", "err", err) ··· 64 65 notifications, err := db.GetNotificationsWithEntities( 65 66 n.db, 66 67 page, 67 - db.FilterEq("recipient_did", user.Did), 68 + orm.FilterEq("recipient_did", user.Did), 68 69 ) 69 70 if err != nil { 70 71 l.Error("failed to get notifications", "err", err) ··· 96 97 97 98 count, err := db.CountNotifications( 98 99 n.db, 99 - db.FilterEq("recipient_did", user.Did), 100 - db.FilterEq("read", 0), 100 + orm.FilterEq("recipient_did", user.Did), 101 + orm.FilterEq("read", 0), 101 102 ) 102 103 if err != nil { 103 104 http.Error(w, "Failed to get unread count", http.StatusInternalServerError)
+118 -67
appview/notify/db/db.go
··· 3 3 import ( 4 4 "context" 5 5 "log" 6 - "maps" 7 6 "slices" 8 7 9 8 "github.com/bluesky-social/indigo/atproto/syntax" 9 + "tangled.org/core/api/tangled" 10 10 "tangled.org/core/appview/db" 11 11 "tangled.org/core/appview/models" 12 12 "tangled.org/core/appview/notify" 13 13 "tangled.org/core/idresolver" 14 + "tangled.org/core/orm" 15 + "tangled.org/core/sets" 16 + ) 17 + 18 + const ( 19 + maxMentions = 8 14 20 ) 15 21 16 22 type databaseNotifier struct { ··· 32 38 } 33 39 34 40 func (n *databaseNotifier) NewStar(ctx context.Context, star *models.Star) { 41 + if star.RepoAt.Collection().String() != tangled.RepoNSID { 42 + // skip string stars for now 43 + return 44 + } 35 45 var err error 36 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(star.RepoAt))) 46 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(star.RepoAt))) 37 47 if err != nil { 38 48 log.Printf("NewStar: failed to get repos: %v", err) 39 49 return 40 50 } 41 51 42 - actorDid := syntax.DID(star.StarredByDid) 43 - recipients := []syntax.DID{syntax.DID(repo.Did)} 52 + actorDid := syntax.DID(star.Did) 53 + recipients := sets.Singleton(syntax.DID(repo.Did)) 44 54 eventType := models.NotificationTypeRepoStarred 45 55 entityType := "repo" 46 56 entityId := star.RepoAt.String() ··· 64 74 // no-op 65 75 } 66 76 67 - func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue) { 77 + func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 78 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt())) 79 + if err != nil { 80 + log.Printf("failed to fetch collaborators: %v", err) 81 + return 82 + } 68 83 69 84 // build the recipients list 70 85 // - owner of the repo 71 86 // - collaborators in the repo 72 - var recipients []syntax.DID 73 - recipients = append(recipients, syntax.DID(issue.Repo.Did)) 74 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt())) 75 - if err != nil { 76 - log.Printf("failed to fetch collaborators: %v", err) 77 - return 87 + // - remove users already mentioned 88 + recipients := sets.Singleton(syntax.DID(issue.Repo.Did)) 89 + for _, c := range collaborators { 90 + recipients.Insert(c.SubjectDid) 78 91 } 79 - for _, c := range collaborators { 80 - recipients = append(recipients, c.SubjectDid) 92 + for _, m := range mentions { 93 + recipients.Remove(m) 81 94 } 82 95 83 96 actorDid := syntax.DID(issue.Did) 84 - eventType := models.NotificationTypeIssueCreated 85 97 entityType := "issue" 86 98 entityId := issue.AtUri().String() 87 99 repoId := &issue.Repo.Id ··· 91 103 n.notifyEvent( 92 104 actorDid, 93 105 recipients, 94 - eventType, 106 + models.NotificationTypeIssueCreated, 107 + entityType, 108 + entityId, 109 + repoId, 110 + issueId, 111 + pullId, 112 + ) 113 + n.notifyEvent( 114 + actorDid, 115 + sets.Collect(slices.Values(mentions)), 116 + models.NotificationTypeUserMentioned, 95 117 entityType, 96 118 entityId, 97 119 repoId, ··· 100 122 ) 101 123 } 102 124 103 - func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment) { 104 - issues, err := db.GetIssues(n.db, db.FilterEq("at_uri", comment.IssueAt)) 125 + func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 126 + issues, err := db.GetIssues(n.db, orm.FilterEq("at_uri", comment.IssueAt)) 105 127 if err != nil { 106 128 log.Printf("NewIssueComment: failed to get issues: %v", err) 107 129 return ··· 112 134 } 113 135 issue := issues[0] 114 136 115 - var recipients []syntax.DID 116 - recipients = append(recipients, syntax.DID(issue.Repo.Did)) 137 + // built the recipients list: 138 + // - the owner of the repo 139 + // - | if the comment is a reply -> everybody on that thread 140 + // | if the comment is a top level -> just the issue owner 141 + // - remove mentioned users from the recipients list 142 + recipients := sets.Singleton(syntax.DID(issue.Repo.Did)) 117 143 118 144 if comment.IsReply() { 119 145 // if this comment is a reply, then notify everybody in that thread 120 146 parentAtUri := *comment.ReplyTo 121 - allThreads := issue.CommentList() 122 147 123 148 // find the parent thread, and add all DIDs from here to the recipient list 124 - for _, t := range allThreads { 149 + for _, t := range issue.CommentList() { 125 150 if t.Self.AtUri().String() == parentAtUri { 126 - recipients = append(recipients, t.Participants()...) 151 + for _, p := range t.Participants() { 152 + recipients.Insert(p) 153 + } 127 154 } 128 155 } 129 156 } else { 130 157 // not a reply, notify just the issue author 131 - recipients = append(recipients, syntax.DID(issue.Did)) 158 + recipients.Insert(syntax.DID(issue.Did)) 159 + } 160 + 161 + for _, m := range mentions { 162 + recipients.Remove(m) 132 163 } 133 164 134 165 actorDid := syntax.DID(comment.Did) 135 - eventType := models.NotificationTypeIssueCommented 136 166 entityType := "issue" 137 167 entityId := issue.AtUri().String() 138 168 repoId := &issue.Repo.Id ··· 142 172 n.notifyEvent( 143 173 actorDid, 144 174 recipients, 145 - eventType, 175 + models.NotificationTypeIssueCommented, 176 + entityType, 177 + entityId, 178 + repoId, 179 + issueId, 180 + pullId, 181 + ) 182 + n.notifyEvent( 183 + actorDid, 184 + sets.Collect(slices.Values(mentions)), 185 + models.NotificationTypeUserMentioned, 146 186 entityType, 147 187 entityId, 148 188 repoId, ··· 157 197 158 198 func (n *databaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) { 159 199 actorDid := syntax.DID(follow.UserDid) 160 - recipients := []syntax.DID{syntax.DID(follow.SubjectDid)} 200 + recipients := sets.Singleton(syntax.DID(follow.SubjectDid)) 161 201 eventType := models.NotificationTypeFollowed 162 202 entityType := "follow" 163 203 entityId := follow.UserDid ··· 180 220 } 181 221 182 222 func (n *databaseNotifier) NewPull(ctx context.Context, pull *models.Pull) { 183 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt))) 223 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt))) 184 224 if err != nil { 185 225 log.Printf("NewPull: failed to get repos: %v", err) 226 + return 227 + } 228 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt())) 229 + if err != nil { 230 + log.Printf("failed to fetch collaborators: %v", err) 186 231 return 187 232 } 188 233 189 234 // build the recipients list 190 235 // - owner of the repo 191 236 // - collaborators in the repo 192 - var recipients []syntax.DID 193 - recipients = append(recipients, syntax.DID(repo.Did)) 194 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt())) 195 - if err != nil { 196 - log.Printf("failed to fetch collaborators: %v", err) 197 - return 198 - } 237 + recipients := sets.Singleton(syntax.DID(repo.Did)) 199 238 for _, c := range collaborators { 200 - recipients = append(recipients, c.SubjectDid) 239 + recipients.Insert(c.SubjectDid) 201 240 } 202 241 203 242 actorDid := syntax.DID(pull.OwnerDid) ··· 221 260 ) 222 261 } 223 262 224 - func (n *databaseNotifier) NewPullComment(ctx context.Context, comment *models.PullComment) { 263 + func (n *databaseNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 225 264 pull, err := db.GetPull(n.db, 226 265 syntax.ATURI(comment.RepoAt), 227 266 comment.PullId, ··· 231 270 return 232 271 } 233 272 234 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", comment.RepoAt)) 273 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", comment.RepoAt)) 235 274 if err != nil { 236 275 log.Printf("NewPullComment: failed to get repos: %v", err) 237 276 return ··· 240 279 // build up the recipients list: 241 280 // - repo owner 242 281 // - all pull participants 243 - var recipients []syntax.DID 244 - recipients = append(recipients, syntax.DID(repo.Did)) 282 + // - remove those already mentioned 283 + recipients := sets.Singleton(syntax.DID(repo.Did)) 245 284 for _, p := range pull.Participants() { 246 - recipients = append(recipients, syntax.DID(p)) 285 + recipients.Insert(syntax.DID(p)) 286 + } 287 + for _, m := range mentions { 288 + recipients.Remove(m) 247 289 } 248 290 249 291 actorDid := syntax.DID(comment.OwnerDid) ··· 265 307 issueId, 266 308 pullId, 267 309 ) 310 + n.notifyEvent( 311 + actorDid, 312 + sets.Collect(slices.Values(mentions)), 313 + models.NotificationTypeUserMentioned, 314 + entityType, 315 + entityId, 316 + repoId, 317 + issueId, 318 + pullId, 319 + ) 268 320 } 269 321 270 322 func (n *databaseNotifier) UpdateProfile(ctx context.Context, profile *models.Profile) { ··· 284 336 } 285 337 286 338 func (n *databaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) { 287 - // build up the recipients list: 288 - // - repo owner 289 - // - repo collaborators 290 - // - all issue participants 291 - var recipients []syntax.DID 292 - recipients = append(recipients, syntax.DID(issue.Repo.Did)) 293 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt())) 339 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt())) 294 340 if err != nil { 295 341 log.Printf("failed to fetch collaborators: %v", err) 296 342 return 297 343 } 344 + 345 + // build up the recipients list: 346 + // - repo owner 347 + // - repo collaborators 348 + // - all issue participants 349 + recipients := sets.Singleton(syntax.DID(issue.Repo.Did)) 298 350 for _, c := range collaborators { 299 - recipients = append(recipients, c.SubjectDid) 351 + recipients.Insert(c.SubjectDid) 300 352 } 301 353 for _, p := range issue.Participants() { 302 - recipients = append(recipients, syntax.DID(p)) 354 + recipients.Insert(syntax.DID(p)) 303 355 } 304 356 305 357 entityType := "pull" ··· 329 381 330 382 func (n *databaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) { 331 383 // Get repo details 332 - repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt))) 384 + repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt))) 333 385 if err != nil { 334 386 log.Printf("NewPullState: failed to get repos: %v", err) 335 387 return 336 388 } 337 389 338 - // build up the recipients list: 339 - // - repo owner 340 - // - all pull participants 341 - var recipients []syntax.DID 342 - recipients = append(recipients, syntax.DID(repo.Did)) 343 - collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt())) 390 + collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt())) 344 391 if err != nil { 345 392 log.Printf("failed to fetch collaborators: %v", err) 346 393 return 347 394 } 395 + 396 + // build up the recipients list: 397 + // - repo owner 398 + // - all pull participants 399 + recipients := sets.Singleton(syntax.DID(repo.Did)) 348 400 for _, c := range collaborators { 349 - recipients = append(recipients, c.SubjectDid) 401 + recipients.Insert(c.SubjectDid) 350 402 } 351 403 for _, p := range pull.Participants() { 352 - recipients = append(recipients, syntax.DID(p)) 404 + recipients.Insert(syntax.DID(p)) 353 405 } 354 406 355 407 entityType := "pull" ··· 385 437 386 438 func (n *databaseNotifier) notifyEvent( 387 439 actorDid syntax.DID, 388 - recipients []syntax.DID, 440 + recipients sets.Set[syntax.DID], 389 441 eventType models.NotificationType, 390 442 entityType string, 391 443 entityId string, ··· 393 445 issueId *int64, 394 446 pullId *int64, 395 447 ) { 396 - recipientSet := make(map[syntax.DID]struct{}) 397 - for _, did := range recipients { 398 - // everybody except actor themselves 399 - if did != actorDid { 400 - recipientSet[did] = struct{}{} 401 - } 448 + // if the user is attempting to mention >maxMentions users, this is probably spam, do not mention anybody 449 + if eventType == models.NotificationTypeUserMentioned && recipients.Len() > maxMentions { 450 + return 402 451 } 403 452 453 + recipients.Remove(actorDid) 454 + 404 455 prefMap, err := db.GetNotificationPreferences( 405 456 n.db, 406 - db.FilterIn("user_did", slices.Collect(maps.Keys(recipientSet))), 457 + orm.FilterIn("user_did", slices.Collect(recipients.All())), 407 458 ) 408 459 if err != nil { 409 460 // failed to get prefs for users ··· 419 470 defer tx.Rollback() 420 471 421 472 // filter based on preferences 422 - for recipientDid := range recipientSet { 473 + for recipientDid := range recipients.All() { 423 474 prefs, ok := prefMap[recipientDid] 424 475 if !ok { 425 476 prefs = models.DefaultNotificationPreferences(recipientDid)
+6 -7
appview/notify/merged_notifier.go
··· 39 39 v.Call(in) 40 40 }(n) 41 41 } 42 - wg.Wait() 43 42 } 44 43 45 44 func (m *mergedNotifier) NewRepo(ctx context.Context, repo *models.Repo) { ··· 54 53 m.fanout("DeleteStar", ctx, star) 55 54 } 56 55 57 - func (m *mergedNotifier) NewIssue(ctx context.Context, issue *models.Issue) { 58 - m.fanout("NewIssue", ctx, issue) 56 + func (m *mergedNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 57 + m.fanout("NewIssue", ctx, issue, mentions) 59 58 } 60 59 61 - func (m *mergedNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment) { 62 - m.fanout("NewIssueComment", ctx, comment) 60 + func (m *mergedNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 61 + m.fanout("NewIssueComment", ctx, comment, mentions) 63 62 } 64 63 65 64 func (m *mergedNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) { ··· 82 81 m.fanout("NewPull", ctx, pull) 83 82 } 84 83 85 - func (m *mergedNotifier) NewPullComment(ctx context.Context, comment *models.PullComment) { 86 - m.fanout("NewPullComment", ctx, comment) 84 + func (m *mergedNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 85 + m.fanout("NewPullComment", ctx, comment, mentions) 87 86 } 88 87 89 88 func (m *mergedNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {
+9 -7
appview/notify/notifier.go
··· 13 13 NewStar(ctx context.Context, star *models.Star) 14 14 DeleteStar(ctx context.Context, star *models.Star) 15 15 16 - NewIssue(ctx context.Context, issue *models.Issue) 17 - NewIssueComment(ctx context.Context, comment *models.IssueComment) 16 + NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) 17 + NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) 18 18 NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) 19 19 DeleteIssue(ctx context.Context, issue *models.Issue) 20 20 ··· 22 22 DeleteFollow(ctx context.Context, follow *models.Follow) 23 23 24 24 NewPull(ctx context.Context, pull *models.Pull) 25 - NewPullComment(ctx context.Context, comment *models.PullComment) 25 + NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) 26 26 NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) 27 27 28 28 UpdateProfile(ctx context.Context, profile *models.Profile) ··· 42 42 func (m *BaseNotifier) NewStar(ctx context.Context, star *models.Star) {} 43 43 func (m *BaseNotifier) DeleteStar(ctx context.Context, star *models.Star) {} 44 44 45 - func (m *BaseNotifier) NewIssue(ctx context.Context, issue *models.Issue) {} 46 - func (m *BaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment) {} 45 + func (m *BaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {} 46 + func (m *BaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 47 + } 47 48 func (m *BaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) {} 48 49 func (m *BaseNotifier) DeleteIssue(ctx context.Context, issue *models.Issue) {} 49 50 50 51 func (m *BaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) {} 51 52 func (m *BaseNotifier) DeleteFollow(ctx context.Context, follow *models.Follow) {} 52 53 53 - func (m *BaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {} 54 - func (m *BaseNotifier) NewPullComment(ctx context.Context, models *models.PullComment) {} 54 + func (m *BaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {} 55 + func (m *BaseNotifier) NewPullComment(ctx context.Context, models *models.PullComment, mentions []syntax.DID) { 56 + } 55 57 func (m *BaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {} 56 58 57 59 func (m *BaseNotifier) UpdateProfile(ctx context.Context, profile *models.Profile) {}
+10 -7
appview/notify/posthog/notifier.go
··· 37 37 38 38 func (n *posthogNotifier) NewStar(ctx context.Context, star *models.Star) { 39 39 err := n.client.Enqueue(posthog.Capture{ 40 - DistinctId: star.StarredByDid, 40 + DistinctId: star.Did, 41 41 Event: "star", 42 42 Properties: posthog.Properties{"repo_at": star.RepoAt.String()}, 43 43 }) ··· 48 48 49 49 func (n *posthogNotifier) DeleteStar(ctx context.Context, star *models.Star) { 50 50 err := n.client.Enqueue(posthog.Capture{ 51 - DistinctId: star.StarredByDid, 51 + DistinctId: star.Did, 52 52 Event: "unstar", 53 53 Properties: posthog.Properties{"repo_at": star.RepoAt.String()}, 54 54 }) ··· 57 57 } 58 58 } 59 59 60 - func (n *posthogNotifier) NewIssue(ctx context.Context, issue *models.Issue) { 60 + func (n *posthogNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) { 61 61 err := n.client.Enqueue(posthog.Capture{ 62 62 DistinctId: issue.Did, 63 63 Event: "new_issue", 64 64 Properties: posthog.Properties{ 65 65 "repo_at": issue.RepoAt.String(), 66 66 "issue_id": issue.IssueId, 67 + "mentions": mentions, 67 68 }, 68 69 }) 69 70 if err != nil { ··· 85 86 } 86 87 } 87 88 88 - func (n *posthogNotifier) NewPullComment(ctx context.Context, comment *models.PullComment) { 89 + func (n *posthogNotifier) NewPullComment(ctx context.Context, comment *models.PullComment, mentions []syntax.DID) { 89 90 err := n.client.Enqueue(posthog.Capture{ 90 91 DistinctId: comment.OwnerDid, 91 92 Event: "new_pull_comment", 92 93 Properties: posthog.Properties{ 93 - "repo_at": comment.RepoAt, 94 - "pull_id": comment.PullId, 94 + "repo_at": comment.RepoAt, 95 + "pull_id": comment.PullId, 96 + "mentions": mentions, 95 97 }, 96 98 }) 97 99 if err != nil { ··· 178 180 } 179 181 } 180 182 181 - func (n *posthogNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment) { 183 + func (n *posthogNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) { 182 184 err := n.client.Enqueue(posthog.Capture{ 183 185 DistinctId: comment.Did, 184 186 Event: "new_issue_comment", 185 187 Properties: posthog.Properties{ 186 188 "issue_at": comment.IssueAt, 189 + "mentions": mentions, 187 190 }, 188 191 }) 189 192 if err != nil {
+3 -2
appview/oauth/handler.go
··· 16 16 "tangled.org/core/api/tangled" 17 17 "tangled.org/core/appview/db" 18 18 "tangled.org/core/consts" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/tid" 20 21 ) 21 22 ··· 97 98 // and create an sh.tangled.spindle.member record with that 98 99 spindleMembers, err := db.GetSpindleMembers( 99 100 o.Db, 100 - db.FilterEq("instance", "spindle.tangled.sh"), 101 - db.FilterEq("subject", did), 101 + orm.FilterEq("instance", "spindle.tangled.sh"), 102 + orm.FilterEq("subject", did), 102 103 ) 103 104 if err != nil { 104 105 l.Error("failed to get spindle members", "err", err)
+15 -2
appview/oauth/oauth.go
··· 202 202 exp int64 203 203 lxm string 204 204 dev bool 205 + timeout time.Duration 205 206 } 206 207 207 208 type ServiceClientOpt func(*ServiceClientOpts) 209 + 210 + func DefaultServiceClientOpts() ServiceClientOpts { 211 + return ServiceClientOpts{ 212 + timeout: time.Second * 5, 213 + } 214 + } 208 215 209 216 func WithService(service string) ServiceClientOpt { 210 217 return func(s *ServiceClientOpts) { ··· 233 240 } 234 241 } 235 242 243 + func WithTimeout(timeout time.Duration) ServiceClientOpt { 244 + return func(s *ServiceClientOpts) { 245 + s.timeout = timeout 246 + } 247 + } 248 + 236 249 func (s *ServiceClientOpts) Audience() string { 237 250 return fmt.Sprintf("did:web:%s", s.service) 238 251 } ··· 247 260 } 248 261 249 262 func (o *OAuth) ServiceClient(r *http.Request, os ...ServiceClientOpt) (*xrpc.Client, error) { 250 - opts := ServiceClientOpts{} 263 + opts := DefaultServiceClientOpts() 251 264 for _, o := range os { 252 265 o(&opts) 253 266 } ··· 274 287 }, 275 288 Host: opts.Host(), 276 289 Client: &http.Client{ 277 - Timeout: time.Second * 5, 290 + Timeout: opts.timeout, 278 291 }, 279 292 }, nil 280 293 }
+86 -10
appview/pages/funcmap.go
··· 1 1 package pages 2 2 3 3 import ( 4 + "bytes" 4 5 "context" 5 6 "crypto/hmac" 6 7 "crypto/sha256" ··· 17 18 "strings" 18 19 "time" 19 20 20 - "github.com/bluesky-social/indigo/atproto/syntax" 21 + "github.com/alecthomas/chroma/v2" 22 + chromahtml "github.com/alecthomas/chroma/v2/formatters/html" 23 + "github.com/alecthomas/chroma/v2/lexers" 24 + "github.com/alecthomas/chroma/v2/styles" 21 25 "github.com/dustin/go-humanize" 22 26 "github.com/go-enry/go-enry/v2" 27 + "github.com/yuin/goldmark" 28 + emoji "github.com/yuin/goldmark-emoji" 23 29 "tangled.org/core/appview/filetree" 30 + "tangled.org/core/appview/models" 24 31 "tangled.org/core/appview/pages/markup" 25 32 "tangled.org/core/crypto" 26 33 ) ··· 66 73 67 74 return identity.Handle.String() 68 75 }, 76 + "ownerSlashRepo": func(repo *models.Repo) string { 77 + ownerId, err := p.resolver.ResolveIdent(context.Background(), repo.Did) 78 + if err != nil { 79 + return repo.DidSlashRepo() 80 + } 81 + handle := ownerId.Handle 82 + if handle != "" && !handle.IsInvalidHandle() { 83 + return string(handle) + "/" + repo.Name 84 + } 85 + return repo.DidSlashRepo() 86 + }, 69 87 "truncateAt30": func(s string) string { 70 88 if len(s) <= 30 { 71 89 return s ··· 94 112 "sub": func(a, b int) int { 95 113 return a - b 96 114 }, 115 + "mul": func(a, b int) int { 116 + return a * b 117 + }, 118 + "div": func(a, b int) int { 119 + return a / b 120 + }, 121 + "mod": func(a, b int) int { 122 + return a % b 123 + }, 97 124 "f64": func(a int) float64 { 98 125 return float64(a) 99 126 }, ··· 126 153 127 154 return b 128 155 }, 129 - "didOrHandle": func(did, handle string) string { 130 - if handle != "" && handle != syntax.HandleInvalid.String() { 131 - return handle 132 - } else { 133 - return did 134 - } 135 - }, 136 156 "assoc": func(values ...string) ([][]string, error) { 137 157 if len(values)%2 != 0 { 138 158 return nil, fmt.Errorf("invalid assoc call, must have an even number of arguments") ··· 143 163 } 144 164 return pairs, nil 145 165 }, 146 - "append": func(s []string, values ...string) []string { 166 + "append": func(s []any, values ...any) []any { 147 167 s = append(s, values...) 148 168 return s 149 169 }, ··· 242 262 }, 243 263 "description": func(text string) template.HTML { 244 264 p.rctx.RendererType = markup.RendererTypeDefault 245 - htmlString := p.rctx.RenderMarkdown(text) 265 + htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New( 266 + goldmark.WithExtensions( 267 + emoji.Emoji, 268 + ), 269 + )) 246 270 sanitized := p.rctx.SanitizeDescription(htmlString) 247 271 return template.HTML(sanitized) 248 272 }, 273 + "readme": func(text string) template.HTML { 274 + p.rctx.RendererType = markup.RendererTypeRepoMarkdown 275 + htmlString := p.rctx.RenderMarkdown(text) 276 + sanitized := p.rctx.SanitizeDefault(htmlString) 277 + return template.HTML(sanitized) 278 + }, 279 + "code": func(content, path string) string { 280 + var style *chroma.Style = styles.Get("catpuccin-latte") 281 + formatter := chromahtml.New( 282 + chromahtml.InlineCode(false), 283 + chromahtml.WithLineNumbers(true), 284 + chromahtml.WithLinkableLineNumbers(true, "L"), 285 + chromahtml.Standalone(false), 286 + chromahtml.WithClasses(true), 287 + ) 288 + 289 + lexer := lexers.Get(filepath.Base(path)) 290 + if lexer == nil { 291 + lexer = lexers.Fallback 292 + } 293 + 294 + iterator, err := lexer.Tokenise(nil, content) 295 + if err != nil { 296 + p.logger.Error("chroma tokenize", "err", "err") 297 + return "" 298 + } 299 + 300 + var code bytes.Buffer 301 + err = formatter.Format(&code, style, iterator) 302 + if err != nil { 303 + p.logger.Error("chroma format", "err", "err") 304 + return "" 305 + } 306 + 307 + return code.String() 308 + }, 249 309 "trimUriScheme": func(text string) string { 250 310 text = strings.TrimPrefix(text, "https://") 251 311 text = strings.TrimPrefix(text, "http://") ··· 328 388 } 329 389 } 330 390 391 + func (p *Pages) resolveDid(did string) string { 392 + identity, err := p.resolver.ResolveIdent(context.Background(), did) 393 + 394 + if err != nil { 395 + return did 396 + } 397 + 398 + if identity.Handle.IsInvalidHandle() { 399 + return "handle.invalid" 400 + } 401 + 402 + return identity.Handle.String() 403 + } 404 + 331 405 func (p *Pages) AvatarUrl(handle, size string) string { 332 406 handle = strings.TrimPrefix(handle, "@") 407 + 408 + handle = p.resolveDid(handle) 333 409 334 410 secret := p.avatar.SharedSecret 335 411 h := hmac.New(sha256.New, []byte(secret))
+121
appview/pages/markup/extension/atlink.go
··· 1 + // heavily inspired by: https://github.com/kaleocheng/goldmark-extensions 2 + 3 + package extension 4 + 5 + import ( 6 + "regexp" 7 + 8 + "github.com/yuin/goldmark" 9 + "github.com/yuin/goldmark/ast" 10 + "github.com/yuin/goldmark/parser" 11 + "github.com/yuin/goldmark/renderer" 12 + "github.com/yuin/goldmark/renderer/html" 13 + "github.com/yuin/goldmark/text" 14 + "github.com/yuin/goldmark/util" 15 + ) 16 + 17 + // An AtNode struct represents an AtNode 18 + type AtNode struct { 19 + Handle string 20 + ast.BaseInline 21 + } 22 + 23 + var _ ast.Node = &AtNode{} 24 + 25 + // Dump implements Node.Dump. 26 + func (n *AtNode) Dump(source []byte, level int) { 27 + ast.DumpHelper(n, source, level, nil, nil) 28 + } 29 + 30 + // KindAt is a NodeKind of the At node. 31 + var KindAt = ast.NewNodeKind("At") 32 + 33 + // Kind implements Node.Kind. 34 + func (n *AtNode) Kind() ast.NodeKind { 35 + return KindAt 36 + } 37 + 38 + var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`) 39 + var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`) 40 + 41 + type atParser struct{} 42 + 43 + // NewAtParser return a new InlineParser that parses 44 + // at expressions. 45 + func NewAtParser() parser.InlineParser { 46 + return &atParser{} 47 + } 48 + 49 + func (s *atParser) Trigger() []byte { 50 + return []byte{'@'} 51 + } 52 + 53 + func (s *atParser) Parse(parent ast.Node, block text.Reader, pc parser.Context) ast.Node { 54 + line, segment := block.PeekLine() 55 + m := atRegexp.FindSubmatchIndex(line) 56 + if m == nil { 57 + return nil 58 + } 59 + 60 + // Check for all links in the markdown to see if the handle found is inside one 61 + linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1) 62 + for _, linkMatch := range linksIndexes { 63 + if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] { 64 + return nil 65 + } 66 + } 67 + 68 + atSegment := text.NewSegment(segment.Start, segment.Start+m[1]) 69 + block.Advance(m[1]) 70 + node := &AtNode{} 71 + node.AppendChild(node, ast.NewTextSegment(atSegment)) 72 + node.Handle = string(atSegment.Value(block.Source())[1:]) 73 + return node 74 + } 75 + 76 + // atHtmlRenderer is a renderer.NodeRenderer implementation that 77 + // renders At nodes. 78 + type atHtmlRenderer struct { 79 + html.Config 80 + } 81 + 82 + // NewAtHTMLRenderer returns a new AtHTMLRenderer. 83 + func NewAtHTMLRenderer(opts ...html.Option) renderer.NodeRenderer { 84 + r := &atHtmlRenderer{ 85 + Config: html.NewConfig(), 86 + } 87 + for _, opt := range opts { 88 + opt.SetHTMLOption(&r.Config) 89 + } 90 + return r 91 + } 92 + 93 + // RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs. 94 + func (r *atHtmlRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) { 95 + reg.Register(KindAt, r.renderAt) 96 + } 97 + 98 + func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) { 99 + if entering { 100 + w.WriteString(`<a href="/`) 101 + w.WriteString(n.(*AtNode).Handle) 102 + w.WriteString(`" class="mention">`) 103 + } else { 104 + w.WriteString("</a>") 105 + } 106 + return ast.WalkContinue, nil 107 + } 108 + 109 + type atExt struct{} 110 + 111 + // At is an extension that allow you to use at expression like '@user.bsky.social' . 112 + var AtExt = &atExt{} 113 + 114 + func (e *atExt) Extend(m goldmark.Markdown) { 115 + m.Parser().AddOptions(parser.WithInlineParsers( 116 + util.Prioritized(NewAtParser(), 500), 117 + )) 118 + m.Renderer().AddOptions(renderer.WithNodeRenderers( 119 + util.Prioritized(NewAtHTMLRenderer(), 500), 120 + )) 121 + }
+13 -4
appview/pages/markup/markdown.go
··· 12 12 13 13 chromahtml "github.com/alecthomas/chroma/v2/formatters/html" 14 14 "github.com/alecthomas/chroma/v2/styles" 15 - treeblood "github.com/wyatt915/goldmark-treeblood" 16 15 "github.com/yuin/goldmark" 16 + "github.com/yuin/goldmark-emoji" 17 17 highlighting "github.com/yuin/goldmark-highlighting/v2" 18 18 "github.com/yuin/goldmark/ast" 19 19 "github.com/yuin/goldmark/extension" ··· 25 25 htmlparse "golang.org/x/net/html" 26 26 27 27 "tangled.org/core/api/tangled" 28 + textension "tangled.org/core/appview/pages/markup/extension" 28 29 "tangled.org/core/appview/pages/repoinfo" 29 30 ) 30 31 ··· 50 51 Files fs.FS 51 52 } 52 53 53 - func (rctx *RenderContext) RenderMarkdown(source string) string { 54 + func NewMarkdown() goldmark.Markdown { 54 55 md := goldmark.New( 55 56 goldmark.WithExtensions( 56 57 extension.GFM, ··· 64 65 extension.NewFootnote( 65 66 extension.WithFootnoteIDPrefix([]byte("footnote")), 66 67 ), 67 - treeblood.MathML(), 68 68 callout.CalloutExtention, 69 + textension.AtExt, 70 + emoji.Emoji, 69 71 ), 70 72 goldmark.WithParserOptions( 71 73 parser.WithAutoHeadingID(), 72 74 ), 73 75 goldmark.WithRendererOptions(html.WithUnsafe()), 74 76 ) 77 + return md 78 + } 75 79 80 + func (rctx *RenderContext) RenderMarkdown(source string) string { 81 + return rctx.RenderMarkdownWith(source, NewMarkdown()) 82 + } 83 + 84 + func (rctx *RenderContext) RenderMarkdownWith(source string, md goldmark.Markdown) string { 76 85 if rctx != nil { 77 86 var transformers []util.PrioritizedValue 78 87 ··· 240 249 repoName := fmt.Sprintf("%s/%s", rctx.RepoInfo.OwnerDid, rctx.RepoInfo.Name) 241 250 242 251 query := fmt.Sprintf("repo=%s&ref=%s&path=%s&raw=true", 243 - url.PathEscape(repoName), url.PathEscape(rctx.RepoInfo.Ref), actualPath) 252 + url.QueryEscape(repoName), url.QueryEscape(rctx.RepoInfo.Ref), actualPath) 244 253 245 254 parsedURL := &url.URL{ 246 255 Scheme: scheme,
+121
appview/pages/markup/markdown_test.go
··· 1 + package markup 2 + 3 + import ( 4 + "bytes" 5 + "testing" 6 + ) 7 + 8 + func TestAtExtension_Rendering(t *testing.T) { 9 + tests := []struct { 10 + name string 11 + markdown string 12 + expected string 13 + }{ 14 + { 15 + name: "renders simple at mention", 16 + markdown: "Hello @user.tngl.sh!", 17 + expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`, 18 + }, 19 + { 20 + name: "renders multiple at mentions", 21 + markdown: "Hi @alice.tngl.sh and @bob.example.com", 22 + expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`, 23 + }, 24 + { 25 + name: "renders at mention in parentheses", 26 + markdown: "Check this out (@user.tngl.sh)", 27 + expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`, 28 + }, 29 + { 30 + name: "does not render email", 31 + markdown: "Contact me at test@example.com", 32 + expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`, 33 + }, 34 + { 35 + name: "renders at mention with hyphen", 36 + markdown: "Follow @user-name.tngl.sh", 37 + expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`, 38 + }, 39 + { 40 + name: "renders at mention with numbers", 41 + markdown: "@user123.test456.social", 42 + expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`, 43 + }, 44 + { 45 + name: "at mention at start of line", 46 + markdown: "@user.tngl.sh is cool", 47 + expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`, 48 + }, 49 + } 50 + 51 + for _, tt := range tests { 52 + t.Run(tt.name, func(t *testing.T) { 53 + md := NewMarkdown() 54 + 55 + var buf bytes.Buffer 56 + if err := md.Convert([]byte(tt.markdown), &buf); err != nil { 57 + t.Fatalf("failed to convert markdown: %v", err) 58 + } 59 + 60 + result := buf.String() 61 + if result != tt.expected+"\n" { 62 + t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result) 63 + } 64 + }) 65 + } 66 + } 67 + 68 + func TestAtExtension_WithOtherMarkdown(t *testing.T) { 69 + tests := []struct { 70 + name string 71 + markdown string 72 + contains string 73 + }{ 74 + { 75 + name: "at mention with bold", 76 + markdown: "**Hello @user.tngl.sh**", 77 + contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`, 78 + }, 79 + { 80 + name: "at mention with italic", 81 + markdown: "*Check @user.tngl.sh*", 82 + contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`, 83 + }, 84 + { 85 + name: "at mention in list", 86 + markdown: "- Item 1\n- @user.tngl.sh\n- Item 3", 87 + contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`, 88 + }, 89 + { 90 + name: "at mention in link", 91 + markdown: "[@regnault.dev](https://regnault.dev)", 92 + contains: `<a href="https://regnault.dev">@regnault.dev</a>`, 93 + }, 94 + { 95 + name: "at mention in link again", 96 + markdown: "[check out @regnault.dev](https://regnault.dev)", 97 + contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`, 98 + }, 99 + { 100 + name: "at mention in link again, multiline", 101 + markdown: "[\ncheck out @regnault.dev](https://regnault.dev)", 102 + contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>", 103 + }, 104 + } 105 + 106 + for _, tt := range tests { 107 + t.Run(tt.name, func(t *testing.T) { 108 + md := NewMarkdown() 109 + 110 + var buf bytes.Buffer 111 + if err := md.Convert([]byte(tt.markdown), &buf); err != nil { 112 + t.Fatalf("failed to convert markdown: %v", err) 113 + } 114 + 115 + result := buf.String() 116 + if !bytes.Contains([]byte(result), []byte(tt.contains)) { 117 + t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result) 118 + } 119 + }) 120 + } 121 + }
+124
appview/pages/markup/reference_link.go
··· 1 + package markup 2 + 3 + import ( 4 + "maps" 5 + "net/url" 6 + "path" 7 + "slices" 8 + "strconv" 9 + "strings" 10 + 11 + "github.com/yuin/goldmark/ast" 12 + "github.com/yuin/goldmark/text" 13 + "tangled.org/core/appview/models" 14 + textension "tangled.org/core/appview/pages/markup/extension" 15 + ) 16 + 17 + // FindReferences collects all links referencing tangled-related objects 18 + // like issues, PRs, comments or even @-mentions 19 + // This funciton doesn't actually check for the existence of records in the DB 20 + // or the PDS; it merely returns a list of what are presumed to be references. 21 + func FindReferences(baseUrl string, source string) ([]string, []models.ReferenceLink) { 22 + var ( 23 + refLinkSet = make(map[models.ReferenceLink]struct{}) 24 + mentionsSet = make(map[string]struct{}) 25 + md = NewMarkdown() 26 + sourceBytes = []byte(source) 27 + root = md.Parser().Parse(text.NewReader(sourceBytes)) 28 + ) 29 + // trim url scheme. the SSL shouldn't matter 30 + baseUrl = strings.TrimPrefix(baseUrl, "https://") 31 + baseUrl = strings.TrimPrefix(baseUrl, "http://") 32 + 33 + ast.Walk(root, func(n ast.Node, entering bool) (ast.WalkStatus, error) { 34 + if !entering { 35 + return ast.WalkContinue, nil 36 + } 37 + switch n.Kind() { 38 + case textension.KindAt: 39 + handle := n.(*textension.AtNode).Handle 40 + mentionsSet[handle] = struct{}{} 41 + return ast.WalkSkipChildren, nil 42 + case ast.KindLink: 43 + dest := string(n.(*ast.Link).Destination) 44 + ref := parseTangledLink(baseUrl, dest) 45 + if ref != nil { 46 + refLinkSet[*ref] = struct{}{} 47 + } 48 + return ast.WalkSkipChildren, nil 49 + case ast.KindAutoLink: 50 + an := n.(*ast.AutoLink) 51 + if an.AutoLinkType == ast.AutoLinkURL { 52 + dest := string(an.URL(sourceBytes)) 53 + ref := parseTangledLink(baseUrl, dest) 54 + if ref != nil { 55 + refLinkSet[*ref] = struct{}{} 56 + } 57 + } 58 + return ast.WalkSkipChildren, nil 59 + } 60 + return ast.WalkContinue, nil 61 + }) 62 + mentions := slices.Collect(maps.Keys(mentionsSet)) 63 + references := slices.Collect(maps.Keys(refLinkSet)) 64 + return mentions, references 65 + } 66 + 67 + func parseTangledLink(baseHost string, urlStr string) *models.ReferenceLink { 68 + u, err := url.Parse(urlStr) 69 + if err != nil { 70 + return nil 71 + } 72 + 73 + if u.Host != "" && !strings.EqualFold(u.Host, baseHost) { 74 + return nil 75 + } 76 + 77 + p := path.Clean(u.Path) 78 + parts := strings.FieldsFunc(p, func(r rune) bool { return r == '/' }) 79 + if len(parts) < 4 { 80 + // need at least: handle / repo / kind / id 81 + return nil 82 + } 83 + 84 + var ( 85 + handle = parts[0] 86 + repo = parts[1] 87 + kindSeg = parts[2] 88 + subjectSeg = parts[3] 89 + ) 90 + 91 + handle = strings.TrimPrefix(handle, "@") 92 + 93 + var kind models.RefKind 94 + switch kindSeg { 95 + case "issues": 96 + kind = models.RefKindIssue 97 + case "pulls": 98 + kind = models.RefKindPull 99 + default: 100 + return nil 101 + } 102 + 103 + subjectId, err := strconv.Atoi(subjectSeg) 104 + if err != nil { 105 + return nil 106 + } 107 + var commentId *int 108 + if u.Fragment != "" { 109 + if strings.HasPrefix(u.Fragment, "comment-") { 110 + commentIdStr := u.Fragment[len("comment-"):] 111 + if id, err := strconv.Atoi(commentIdStr); err == nil { 112 + commentId = &id 113 + } 114 + } 115 + } 116 + 117 + return &models.ReferenceLink{ 118 + Handle: handle, 119 + Repo: repo, 120 + Kind: kind, 121 + SubjectId: subjectId, 122 + CommentId: commentId, 123 + } 124 + }
+3
appview/pages/markup/sanitizer.go
··· 77 77 policy.AllowAttrs("class").Matching(regexp.MustCompile(`heading`)).OnElements("h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8") 78 78 policy.AllowAttrs("class").Matching(regexp.MustCompile(strings.Join(slices.Collect(maps.Values(chroma.StandardTypes)), "|"))).OnElements("span") 79 79 80 + // at-mentions 81 + policy.AllowAttrs("class").Matching(regexp.MustCompile(`mention`)).OnElements("a") 82 + 80 83 // centering content 81 84 policy.AllowElements("center") 82 85
+39 -116
appview/pages/pages.go
··· 1 1 package pages 2 2 3 3 import ( 4 - "bytes" 5 4 "crypto/sha256" 6 5 "embed" 7 6 "encoding/hex" ··· 29 28 "tangled.org/core/patchutil" 30 29 "tangled.org/core/types" 31 30 32 - "github.com/alecthomas/chroma/v2" 33 - chromahtml "github.com/alecthomas/chroma/v2/formatters/html" 34 - "github.com/alecthomas/chroma/v2/lexers" 35 - "github.com/alecthomas/chroma/v2/styles" 36 31 "github.com/bluesky-social/indigo/atproto/identity" 37 32 "github.com/bluesky-social/indigo/atproto/syntax" 38 33 "github.com/go-git/go-git/v5/plumbing" 39 - "github.com/go-git/go-git/v5/plumbing/object" 40 34 ) 41 35 42 36 //go:embed templates/* static legal ··· 412 406 type KnotsParams struct { 413 407 LoggedInUser *oauth.User 414 408 Registrations []models.Registration 409 + Tabs []map[string]any 410 + Tab string 415 411 } 416 412 417 413 func (p *Pages) Knots(w io.Writer, params KnotsParams) error { ··· 424 420 Members []string 425 421 Repos map[string][]models.Repo 426 422 IsOwner bool 423 + Tabs []map[string]any 424 + Tab string 427 425 } 428 426 429 427 func (p *Pages) Knot(w io.Writer, params KnotParams) error { ··· 441 439 type SpindlesParams struct { 442 440 LoggedInUser *oauth.User 443 441 Spindles []models.Spindle 442 + Tabs []map[string]any 443 + Tab string 444 444 } 445 445 446 446 func (p *Pages) Spindles(w io.Writer, params SpindlesParams) error { ··· 449 449 450 450 type SpindleListingParams struct { 451 451 models.Spindle 452 + Tabs []map[string]any 453 + Tab string 452 454 } 453 455 454 456 func (p *Pages) SpindleListing(w io.Writer, params SpindleListingParams) error { ··· 460 462 Spindle models.Spindle 461 463 Members []string 462 464 Repos map[string][]models.Repo 465 + Tabs []map[string]any 466 + Tab string 463 467 } 464 468 465 469 func (p *Pages) SpindleDashboard(w io.Writer, params SpindleDashboardParams) error { ··· 487 491 488 492 type ProfileCard struct { 489 493 UserDid string 490 - UserHandle string 491 494 FollowStatus models.FollowStatus 492 495 Punchcard *models.Punchcard 493 496 Profile *models.Profile ··· 630 633 return p.executePlain("user/fragments/editPins", w, params) 631 634 } 632 635 633 - type RepoStarFragmentParams struct { 636 + type StarBtnFragmentParams struct { 634 637 IsStarred bool 635 - RepoAt syntax.ATURI 636 - Stats models.RepoStats 638 + SubjectAt syntax.ATURI 639 + StarCount int 637 640 } 638 641 639 - func (p *Pages) RepoStarFragment(w io.Writer, params RepoStarFragmentParams) error { 640 - return p.executePlain("repo/fragments/repoStar", w, params) 642 + func (p *Pages) StarBtnFragment(w io.Writer, params StarBtnFragmentParams) error { 643 + return p.executePlain("fragments/starBtn-oob", w, params) 641 644 } 642 645 643 646 type RepoIndexParams struct { ··· 645 648 RepoInfo repoinfo.RepoInfo 646 649 Active string 647 650 TagMap map[string][]string 648 - CommitsTrunc []*object.Commit 651 + CommitsTrunc []types.Commit 649 652 TagsTrunc []*types.TagReference 650 653 BranchesTrunc []types.Branch 651 654 // ForkInfo *types.ForkInfo ··· 744 747 func (r RepoTreeParams) TreeStats() RepoTreeStats { 745 748 numFolders, numFiles := 0, 0 746 749 for _, f := range r.Files { 747 - if !f.IsFile { 750 + if !f.IsFile() { 748 751 numFolders += 1 749 - } else if f.IsFile { 752 + } else if f.IsFile() { 750 753 numFiles += 1 751 754 } 752 755 } ··· 817 820 } 818 821 819 822 type RepoBlobParams struct { 820 - LoggedInUser *oauth.User 821 - RepoInfo repoinfo.RepoInfo 822 - Active string 823 - Unsupported bool 824 - IsImage bool 825 - IsVideo bool 826 - ContentSrc string 827 - BreadCrumbs [][]string 828 - ShowRendered bool 829 - RenderToggle bool 830 - RenderedContents template.HTML 823 + LoggedInUser *oauth.User 824 + RepoInfo repoinfo.RepoInfo 825 + Active string 826 + BreadCrumbs [][]string 827 + BlobView models.BlobView 831 828 *tangled.RepoBlob_Output 832 - // Computed fields for template compatibility 833 - Contents string 834 - Lines int 835 - SizeHint uint64 836 - IsBinary bool 837 829 } 838 830 839 831 func (p *Pages) RepoBlob(w io.Writer, params RepoBlobParams) error { 840 - var style *chroma.Style = styles.Get("catpuccin-latte") 841 - 842 - if params.ShowRendered { 843 - switch markup.GetFormat(params.Path) { 844 - case markup.FormatMarkdown: 845 - p.rctx.RepoInfo = params.RepoInfo 846 - p.rctx.RendererType = markup.RendererTypeRepoMarkdown 847 - htmlString := p.rctx.RenderMarkdown(params.Contents) 848 - sanitized := p.rctx.SanitizeDefault(htmlString) 849 - params.RenderedContents = template.HTML(sanitized) 850 - } 832 + switch params.BlobView.ContentType { 833 + case models.BlobContentTypeMarkup: 834 + p.rctx.RepoInfo = params.RepoInfo 851 835 } 852 836 853 - c := params.Contents 854 - formatter := chromahtml.New( 855 - chromahtml.InlineCode(false), 856 - chromahtml.WithLineNumbers(true), 857 - chromahtml.WithLinkableLineNumbers(true, "L"), 858 - chromahtml.Standalone(false), 859 - chromahtml.WithClasses(true), 860 - ) 861 - 862 - lexer := lexers.Get(filepath.Base(params.Path)) 863 - if lexer == nil { 864 - lexer = lexers.Fallback 865 - } 866 - 867 - iterator, err := lexer.Tokenise(nil, c) 868 - if err != nil { 869 - return fmt.Errorf("chroma tokenize: %w", err) 870 - } 871 - 872 - var code bytes.Buffer 873 - err = formatter.Format(&code, style, iterator) 874 - if err != nil { 875 - return fmt.Errorf("chroma format: %w", err) 876 - } 877 - 878 - params.Contents = code.String() 879 837 params.Active = "overview" 880 838 return p.executeRepo("repo/blob", w, params) 881 839 } 882 840 883 841 type Collaborator struct { 884 - Did string 885 - Handle string 886 - Role string 842 + Did string 843 + Role string 887 844 } 888 845 889 846 type RepoSettingsParams struct { ··· 958 915 RepoInfo repoinfo.RepoInfo 959 916 Active string 960 917 Issues []models.Issue 918 + IssueCount int 961 919 LabelDefs map[string]*models.LabelDefinition 962 920 Page pagination.Page 963 921 FilteringByOpen bool ··· 975 933 Active string 976 934 Issue *models.Issue 977 935 CommentList []models.CommentListItem 936 + Backlinks []models.RichReferenceLink 978 937 LabelDefs map[string]*models.LabelDefinition 979 938 980 939 OrderedReactionKinds []models.ReactionKind ··· 1128 1087 Pull *models.Pull 1129 1088 Stack models.Stack 1130 1089 AbandonedPulls []*models.Pull 1090 + Backlinks []models.RichReferenceLink 1131 1091 BranchDeleteStatus *models.BranchDeleteStatus 1132 1092 MergeCheck types.MergeCheckResponse 1133 1093 ResubmitCheck ResubmitResult ··· 1299 1259 return p.executePlain("repo/fragments/compareAllowPull", w, params) 1300 1260 } 1301 1261 1302 - type RepoCompareDiffParams struct { 1303 - LoggedInUser *oauth.User 1304 - RepoInfo repoinfo.RepoInfo 1305 - Diff types.NiceDiff 1262 + type RepoCompareDiffFragmentParams struct { 1263 + Diff types.NiceDiff 1264 + DiffOpts types.DiffOpts 1306 1265 } 1307 1266 1308 - func (p *Pages) RepoCompareDiff(w io.Writer, params RepoCompareDiffParams) error { 1309 - return p.executePlain("repo/fragments/diff", w, []any{params.RepoInfo.FullName, &params.Diff}) 1267 + func (p *Pages) RepoCompareDiffFragment(w io.Writer, params RepoCompareDiffFragmentParams) error { 1268 + return p.executePlain("repo/fragments/diff", w, []any{&params.Diff, &params.DiffOpts}) 1310 1269 } 1311 1270 1312 1271 type LabelPanelParams struct { ··· 1426 1385 ShowRendered bool 1427 1386 RenderToggle bool 1428 1387 RenderedContents template.HTML 1429 - String models.String 1388 + String *models.String 1430 1389 Stats models.StringStats 1390 + IsStarred bool 1391 + StarCount int 1431 1392 Owner identity.Identity 1432 1393 } 1433 1394 1434 1395 func (p *Pages) SingleString(w io.Writer, params SingleStringParams) error { 1435 - var style *chroma.Style = styles.Get("catpuccin-latte") 1436 - 1437 - if params.ShowRendered { 1438 - switch markup.GetFormat(params.String.Filename) { 1439 - case markup.FormatMarkdown: 1440 - p.rctx.RendererType = markup.RendererTypeRepoMarkdown 1441 - htmlString := p.rctx.RenderMarkdown(params.String.Contents) 1442 - sanitized := p.rctx.SanitizeDefault(htmlString) 1443 - params.RenderedContents = template.HTML(sanitized) 1444 - } 1445 - } 1446 - 1447 - c := params.String.Contents 1448 - formatter := chromahtml.New( 1449 - chromahtml.InlineCode(false), 1450 - chromahtml.WithLineNumbers(true), 1451 - chromahtml.WithLinkableLineNumbers(true, "L"), 1452 - chromahtml.Standalone(false), 1453 - chromahtml.WithClasses(true), 1454 - ) 1455 - 1456 - lexer := lexers.Get(filepath.Base(params.String.Filename)) 1457 - if lexer == nil { 1458 - lexer = lexers.Fallback 1459 - } 1460 - 1461 - iterator, err := lexer.Tokenise(nil, c) 1462 - if err != nil { 1463 - return fmt.Errorf("chroma tokenize: %w", err) 1464 - } 1465 - 1466 - var code bytes.Buffer 1467 - err = formatter.Format(&code, style, iterator) 1468 - if err != nil { 1469 - return fmt.Errorf("chroma format: %w", err) 1470 - } 1471 - 1472 - params.String.Contents = code.String() 1473 1396 return p.execute("strings/string", w, params) 1474 1397 } 1475 1398
+25 -22
appview/pages/repoinfo/repoinfo.go
··· 1 1 package repoinfo 2 2 3 3 import ( 4 + "fmt" 4 5 "path" 5 6 "slices" 6 7 7 8 "github.com/bluesky-social/indigo/atproto/syntax" 9 + "tangled.org/core/api/tangled" 8 10 "tangled.org/core/appview/models" 9 11 "tangled.org/core/appview/state/userutil" 10 12 ) 11 13 12 - func (r RepoInfo) Owner() string { 14 + func (r RepoInfo) owner() string { 13 15 if r.OwnerHandle != "" { 14 16 return r.OwnerHandle 15 17 } else { ··· 18 20 } 19 21 20 22 func (r RepoInfo) FullName() string { 21 - return path.Join(r.Owner(), r.Name) 23 + return path.Join(r.owner(), r.Name) 22 24 } 23 25 24 - func (r RepoInfo) OwnerWithoutAt() string { 26 + func (r RepoInfo) ownerWithoutAt() string { 25 27 if r.OwnerHandle != "" { 26 28 return r.OwnerHandle 27 29 } else { ··· 30 32 } 31 33 32 34 func (r RepoInfo) FullNameWithoutAt() string { 33 - return path.Join(r.OwnerWithoutAt(), r.Name) 35 + return path.Join(r.ownerWithoutAt(), r.Name) 34 36 } 35 37 36 38 func (r RepoInfo) GetTabs() [][]string { ··· 48 50 return tabs 49 51 } 50 52 53 + func (r RepoInfo) RepoAt() syntax.ATURI { 54 + return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.OwnerDid, tangled.RepoNSID, r.Rkey)) 55 + } 56 + 51 57 type RepoInfo struct { 52 - Name string 53 - Rkey string 54 - OwnerDid string 55 - OwnerHandle string 56 - Description string 57 - Website string 58 - Topics []string 59 - Knot string 60 - Spindle string 61 - RepoAt syntax.ATURI 62 - IsStarred bool 63 - Stats models.RepoStats 64 - Roles RolesInRepo 65 - Source *models.Repo 66 - SourceHandle string 67 - Ref string 68 - DisableFork bool 69 - CurrentDir string 58 + Name string 59 + Rkey string 60 + OwnerDid string 61 + OwnerHandle string 62 + Description string 63 + Website string 64 + Topics []string 65 + Knot string 66 + Spindle string 67 + IsStarred bool 68 + Stats models.RepoStats 69 + Roles RolesInRepo 70 + Source *models.Repo 71 + Ref string 72 + CurrentDir string 70 73 } 71 74 72 75 // each tab on a repo could have some metadata:
+1 -1
appview/pages/templates/banner.html
··· 30 30 <div class="mx-6"> 31 31 These services may not be fully accessible until upgraded. 32 32 <a class="underline text-red-800 dark:text-red-200" 33 - href="https://tangled.org/@tangled.org/core/tree/master/docs/migrations.md"> 33 + href="https://docs.tangled.org/migrating-knots-spindles.html#migrating-knots-spindles"> 34 34 Click to read the upgrade guide</a>. 35 35 </div> 36 36 </details>
+5
appview/pages/templates/fragments/starBtn-oob.html
··· 1 + {{ define "fragments/starBtn-oob" }} 2 + <div hx-swap-oob='outerHTML:#starBtn[data-star-subject-at="{{ .SubjectAt }}"]'> 3 + {{ template "fragments/starBtn" . }} 4 + </div> 5 + {{ end }}
+26
appview/pages/templates/fragments/starBtn.html
··· 1 + {{ define "fragments/starBtn" }} 2 + {{/* NOTE: this fragment is always replaced with hx-swap-oob */}} 3 + <button 4 + id="starBtn" 5 + class="btn disabled:opacity-50 disabled:cursor-not-allowed flex gap-2 items-center group" 6 + data-star-subject-at="{{ .SubjectAt }}" 7 + {{ if .IsStarred }} 8 + hx-delete="/star?subject={{ .SubjectAt }}&countHint={{ .StarCount }}" 9 + {{ else }} 10 + hx-post="/star?subject={{ .SubjectAt }}&countHint={{ .StarCount }}" 11 + {{ end }} 12 + 13 + hx-trigger="click" 14 + hx-disabled-elt="#starBtn" 15 + > 16 + {{ if .IsStarred }} 17 + {{ i "star" "w-4 h-4 fill-current" }} 18 + {{ else }} 19 + {{ i "star" "w-4 h-4" }} 20 + {{ end }} 21 + <span class="text-sm"> 22 + {{ .StarCount }} 23 + </span> 24 + {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 25 + </button> 26 + {{ end }}
+8
appview/pages/templates/fragments/tabSelector.html
··· 2 2 {{ $name := .Name }} 3 3 {{ $all := .Values }} 4 4 {{ $active := .Active }} 5 + {{ $include := .Include }} 5 6 <div class="flex justify-between divide-x divide-gray-200 dark:divide-gray-700 rounded border border-gray-200 dark:border-gray-700 overflow-hidden"> 6 7 {{ $activeTab := "bg-white dark:bg-gray-700 shadow-sm" }} 7 8 {{ $inactiveTab := "bg-gray-100 dark:bg-gray-800 shadow-inner" }} 8 9 {{ range $index, $value := $all }} 9 10 {{ $isActive := eq $value.Key $active }} 10 11 <a href="?{{ $name }}={{ $value.Key }}" 12 + {{ if $include }} 13 + hx-get="?{{ $name }}={{ $value.Key }}" 14 + hx-include="{{ $include }}" 15 + hx-push-url="true" 16 + hx-target="body" 17 + hx-on:htmx:config-request="if(!event.detail.parameters.q) delete event.detail.parameters.q" 18 + {{ end }} 11 19 class="p-2 whitespace-nowrap flex justify-center items-center gap-2 text-sm w-full block hover:no-underline text-center {{ if $isActive }} {{$activeTab }} {{ else }} {{ $inactiveTab }} {{ end }}"> 12 20 {{ if $value.Icon }} 13 21 {{ i $value.Icon "size-4" }}
+22
appview/pages/templates/fragments/tinyAvatarList.html
··· 1 + {{ define "fragments/tinyAvatarList" }} 2 + {{ $all := .all }} 3 + {{ $classes := .classes }} 4 + {{ $ps := take $all 5 }} 5 + <div class="inline-flex items-center -space-x-3"> 6 + {{ $c := "z-50 z-40 z-30 z-20 z-10" }} 7 + {{ range $i, $p := $ps }} 8 + <img 9 + src="{{ tinyAvatar . }}" 10 + alt="" 11 + class="rounded-full size-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0 {{ $classes }}" 12 + /> 13 + {{ end }} 14 + 15 + {{ if gt (len $all) 5 }} 16 + <span class="pl-4 text-gray-500 dark:text-gray-400 text-sm"> 17 + +{{ sub (len $all) 5 }} 18 + </span> 19 + {{ end }} 20 + </div> 21 + {{ end }} 22 +
+23 -7
appview/pages/templates/knots/dashboard.html
··· 1 - {{ define "title" }}{{ .Registration.Domain }} &middot; knots{{ end }} 1 + {{ define "title" }}{{ .Registration.Domain }} &middot; {{ .Tab }} settings{{ end }} 2 2 3 3 {{ define "content" }} 4 - <div class="px-6 py-4"> 4 + <div class="p-6"> 5 + <p class="text-xl font-bold dark:text-white">Settings</p> 6 + </div> 7 + <div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white"> 8 + <section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6"> 9 + <div class="col-span-1"> 10 + {{ template "user/settings/fragments/sidebar" . }} 11 + </div> 12 + <div class="col-span-1 md:col-span-3 flex flex-col gap-6"> 13 + {{ template "knotDash" . }} 14 + </div> 15 + </section> 16 + </div> 17 + {{ end }} 18 + 19 + {{ define "knotDash" }} 20 + <div> 5 21 <div class="flex justify-between items-center"> 6 - <h1 class="text-xl font-bold dark:text-white">{{ .Registration.Domain }}</h1> 22 + <h2 class="text-sm pb-2 uppercase font-bold">{{ .Tab }} &middot; {{ .Registration.Domain }}</h2> 7 23 <div id="right-side" class="flex gap-2"> 8 24 {{ $style := "px-2 py-1 rounded flex items-center flex-shrink-0 gap-2" }} 9 25 {{ $isOwner := and .LoggedInUser (eq .LoggedInUser.Did .Registration.ByDid) }} ··· 35 51 </div> 36 52 37 53 {{ if .Members }} 38 - <section class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white"> 54 + <section class="bg-white dark:bg-gray-800 rounded relative w-full mx-auto drop-shadow-sm dark:text-white"> 39 55 <div class="flex flex-col gap-2"> 40 56 {{ block "member" . }} {{ end }} 41 57 </div> ··· 79 95 <button 80 96 class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group" 81 97 title="Delete knot" 82 - hx-delete="/knots/{{ .Domain }}" 98 + hx-delete="/settings/knots/{{ .Domain }}" 83 99 hx-swap="outerHTML" 84 100 hx-confirm="Are you sure you want to delete the knot '{{ .Domain }}'?" 85 101 hx-headers='{"shouldRedirect": "true"}' ··· 95 111 <button 96 112 class="btn gap-2 group" 97 113 title="Retry knot verification" 98 - hx-post="/knots/{{ .Domain }}/retry" 114 + hx-post="/settings/knots/{{ .Domain }}/retry" 99 115 hx-swap="none" 100 116 hx-headers='{"shouldRefresh": "true"}' 101 117 > ··· 113 129 <button 114 130 class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group" 115 131 title="Remove member" 116 - hx-post="/knots/{{ $root.Registration.Domain }}/remove" 132 + hx-post="/settings/knots/{{ $root.Registration.Domain }}/remove" 117 133 hx-swap="none" 118 134 hx-vals='{"member": "{{$member}}" }' 119 135 hx-confirm="Are you sure you want to remove {{ $memberHandle }} from this knot?"
+18 -13
appview/pages/templates/knots/fragments/addMemberModal.html
··· 13 13 <div 14 14 id="add-member-{{ .Id }}" 15 15 popover 16 - class="bg-white w-full md:w-96 dark:bg-gray-800 p-4 rounded border border-gray-200 dark:border-gray-700 drop-shadow dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50"> 16 + class=" 17 + bg-white dark:bg-gray-800 border border-gray-200 dark:border-gray-700 dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50 18 + w-full md:w-96 p-4 rounded drop-shadow overflow-visible"> 17 19 {{ block "addKnotMemberPopover" . }} {{ end }} 18 20 </div> 19 21 {{ end }} 20 22 21 23 {{ define "addKnotMemberPopover" }} 22 24 <form 23 - hx-post="/knots/{{ .Domain }}/add" 25 + hx-post="/settings/knots/{{ .Domain }}/add" 24 26 hx-indicator="#spinner" 25 27 hx-swap="none" 26 28 class="flex flex-col gap-2" ··· 29 31 ADD MEMBER 30 32 </label> 31 33 <p class="text-sm text-gray-500 dark:text-gray-400">Members can create repositories and run workflows on this knot.</p> 32 - <input 33 - autocapitalize="none" 34 - autocorrect="off" 35 - autocomplete="off" 36 - type="text" 37 - id="member-did-{{ .Id }}" 38 - name="member" 39 - required 40 - placeholder="foo.bsky.social" 41 - /> 34 + <actor-typeahead> 35 + <input 36 + autocapitalize="none" 37 + autocorrect="off" 38 + autocomplete="off" 39 + type="text" 40 + id="member-did-{{ .Id }}" 41 + name="member" 42 + required 43 + placeholder="user.tngl.sh" 44 + class="w-full" 45 + /> 46 + </actor-typeahead> 42 47 <div class="flex gap-2 pt-2"> 43 48 <button 44 49 type="button" ··· 57 62 </div> 58 63 <div id="add-member-error-{{ .Id }}" class="text-red-500 dark:text-red-400"></div> 59 64 </form> 60 - {{ end }} 65 + {{ end }}
+3 -3
appview/pages/templates/knots/fragments/knotListing.html
··· 7 7 8 8 {{ define "knotLeftSide" }} 9 9 {{ if .Registered }} 10 - <a href="/knots/{{ .Domain }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]"> 10 + <a href="/settings/knots/{{ .Domain }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]"> 11 11 {{ i "hard-drive" "w-4 h-4" }} 12 12 <span class="hover:underline"> 13 13 {{ .Domain }} ··· 56 56 <button 57 57 class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group" 58 58 title="Delete knot" 59 - hx-delete="/knots/{{ .Domain }}" 59 + hx-delete="/settings/knots/{{ .Domain }}" 60 60 hx-swap="outerHTML" 61 61 hx-target="#knot-{{.Id}}" 62 62 hx-confirm="Are you sure you want to delete the knot '{{ .Domain }}'?" ··· 72 72 <button 73 73 class="btn gap-2 group" 74 74 title="Retry knot verification" 75 - hx-post="/knots/{{ .Domain }}/retry" 75 + hx-post="/settings/knots/{{ .Domain }}/retry" 76 76 hx-swap="none" 77 77 hx-target="#knot-{{.Id}}" 78 78 >
+42 -11
appview/pages/templates/knots/index.html
··· 1 - {{ define "title" }}knots{{ end }} 1 + {{ define "title" }}{{ .Tab }} settings{{ end }} 2 2 3 3 {{ define "content" }} 4 - <div class="px-6 py-4 flex items-center justify-between gap-4 align-bottom"> 5 - <h1 class="text-xl font-bold dark:text-white">Knots</h1> 6 - <span class="flex items-center gap-1"> 7 - {{ i "book" "w-3 h-3" }} 8 - <a href="https://tangled.org/@tangled.org/core/blob/master/docs/knot-hosting.md">docs</a> 9 - </span> 10 - </div> 4 + <div class="p-6"> 5 + <p class="text-xl font-bold dark:text-white">Settings</p> 6 + </div> 7 + <div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white"> 8 + <section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6"> 9 + <div class="col-span-1"> 10 + {{ template "user/settings/fragments/sidebar" . }} 11 + </div> 12 + <div class="col-span-1 md:col-span-3 flex flex-col gap-6"> 13 + {{ template "knotsList" . }} 14 + </div> 15 + </section> 16 + </div> 17 + {{ end }} 18 + 19 + {{ define "knotsList" }} 20 + <div class="grid grid-cols-1 md:grid-cols-3 gap-4 items-center"> 21 + <div class="col-span-1 md:col-span-2"> 22 + <h2 class="text-sm pb-2 uppercase font-bold">Knots</h2> 23 + {{ block "about" . }} {{ end }} 24 + </div> 25 + <div class="col-span-1 md:col-span-1 md:justify-self-end"> 26 + {{ template "docsButton" . }} 27 + </div> 28 + </div> 11 29 12 - <section class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white"> 30 + <section> 13 31 <div class="flex flex-col gap-6"> 14 - {{ block "about" . }} {{ end }} 15 32 {{ block "list" . }} {{ end }} 16 33 {{ block "register" . }} {{ end }} 17 34 </div> ··· 50 67 <h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">register a knot</h2> 51 68 <p class="mb-2 dark:text-gray-300">Enter the hostname of your knot to get started.</p> 52 69 <form 53 - hx-post="/knots/register" 70 + hx-post="/settings/knots/register" 54 71 class="max-w-2xl mb-2 space-y-4" 55 72 hx-indicator="#register-button" 56 73 hx-swap="none" ··· 84 101 85 102 </section> 86 103 {{ end }} 104 + 105 + {{ define "docsButton" }} 106 + <a 107 + class="btn flex items-center gap-2" 108 + href="https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide"> 109 + {{ i "book" "size-4" }} 110 + docs 111 + </a> 112 + <div 113 + id="add-email-modal" 114 + popover 115 + class="bg-white w-full md:w-96 dark:bg-gray-800 p-4 rounded border border-gray-200 dark:border-gray-700 drop-shadow dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50"> 116 + </div> 117 + {{ end }}
+1
appview/pages/templates/layouts/base.html
··· 9 9 10 10 <script defer src="/static/htmx.min.js"></script> 11 11 <script defer src="/static/htmx-ext-ws.min.js"></script> 12 + <script defer src="/static/actor-typeahead.js" type="module"></script> 12 13 13 14 <!-- preconnect to image cdn --> 14 15 <link rel="preconnect" href="https://avatar.tangled.sh" />
+2 -2
appview/pages/templates/layouts/fragments/footer.html
··· 26 26 <div class="flex flex-col gap-1"> 27 27 <div class="{{ $headerStyle }}">resources</div> 28 28 <a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a> 29 - <a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 29 + <a href="https://docs.tangled.org" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 30 30 <a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a> 31 31 <a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a> 32 32 </div> ··· 73 73 <div class="flex flex-col gap-1"> 74 74 <div class="{{ $headerStyle }}">resources</div> 75 75 <a href="https://blog.tangled.org" class="{{ $linkStyle }}" target="_blank" rel="noopener noreferrer">{{ i "book-open" $iconStyle }} blog</a> 76 - <a href="https://tangled.org/@tangled.org/core/tree/master/docs" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 76 + <a href="https://docs.tangled.org" class="{{ $linkStyle }}">{{ i "book" $iconStyle }} docs</a> 77 77 <a href="https://tangled.org/@tangled.org/core" class="{{ $linkStyle }}">{{ i "code" $iconStyle }} source</a> 78 78 <a href="https://tangled.org/brand" class="{{ $linkStyle }}">{{ i "paintbrush" $iconStyle }} brand</a> 79 79 </div>
-2
appview/pages/templates/layouts/fragments/topbar.html
··· 61 61 <a href="/{{ $user }}">profile</a> 62 62 <a href="/{{ $user }}?tab=repos">repositories</a> 63 63 <a href="/{{ $user }}?tab=strings">strings</a> 64 - <a href="/knots">knots</a> 65 - <a href="/spindles">spindles</a> 66 64 <a href="/settings">settings</a> 67 65 <a href="#" 68 66 hx-post="/logout"
+8 -7
appview/pages/templates/layouts/profilebase.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }}{{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }}{{ end }} 2 2 3 3 {{ define "extrameta" }} 4 - {{ $avatarUrl := fullAvatar .Card.UserHandle }} 5 - <meta property="og:title" content="{{ or .Card.UserHandle .Card.UserDid }}" /> 4 + {{ $handle := resolve .Card.UserDid }} 5 + {{ $avatarUrl := fullAvatar $handle }} 6 + <meta property="og:title" content="{{ $handle }}" /> 6 7 <meta property="og:type" content="profile" /> 7 - <meta property="og:url" content="https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}?tab={{ .Active }}" /> 8 - <meta property="og:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" /> 8 + <meta property="og:url" content="https://tangled.org/{{ $handle }}?tab={{ .Active }}" /> 9 + <meta property="og:description" content="{{ or .Card.Profile.Description $handle }}" /> 9 10 <meta property="og:image" content="{{ $avatarUrl }}" /> 10 11 <meta property="og:image:width" content="512" /> 11 12 <meta property="og:image:height" content="512" /> 12 13 13 14 <meta name="twitter:card" content="summary" /> 14 - <meta name="twitter:title" content="{{ or .Card.UserHandle .Card.UserDid }}" /> 15 - <meta name="twitter:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" /> 15 + <meta name="twitter:title" content="{{ $handle }}" /> 16 + <meta name="twitter:description" content="{{ or .Card.Profile.Description $handle }}" /> 16 17 <meta name="twitter:image" content="{{ $avatarUrl }}" /> 17 18 {{ end }} 18 19
+4 -1
appview/pages/templates/layouts/repobase.html
··· 49 49 </div> 50 50 51 51 <div class="w-full sm:w-fit grid grid-cols-3 gap-2 z-auto"> 52 - {{ template "repo/fragments/repoStar" .RepoInfo }} 52 + {{ template "fragments/starBtn" 53 + (dict "SubjectAt" .RepoInfo.RepoAt 54 + "IsStarred" .RepoInfo.IsStarred 55 + "StarCount" .RepoInfo.Stats.StarCount) }} 53 56 <a 54 57 class="btn text-sm no-underline hover:no-underline flex items-center gap-2 group" 55 58 hx-boost="true"
+2
appview/pages/templates/notifications/fragments/item.html
··· 54 54 reopened a pull request 55 55 {{ else if eq .Type "followed" }} 56 56 followed you 57 + {{ else if eq .Type "user_mentioned" }} 58 + mentioned you 57 59 {{ else }} 58 60 {{ end }} 59 61 {{ end }}
+64 -39
appview/pages/templates/repo/blob.html
··· 11 11 {{ end }} 12 12 13 13 {{ define "repoContent" }} 14 - {{ $lines := split .Contents }} 15 - {{ $tot_lines := len $lines }} 16 - {{ $tot_chars := len (printf "%d" $tot_lines) }} 17 - {{ $code_number_style := "text-gray-400 dark:text-gray-500 left-0 bg-white dark:bg-gray-800 text-right mr-6 select-none inline-block w-12" }} 18 14 {{ $linkstyle := "no-underline hover:underline" }} 19 15 <div class="pb-2 mb-3 text-base border-b border-gray-200 dark:border-gray-700"> 20 16 <div class="flex flex-col md:flex-row md:justify-between gap-2"> ··· 36 32 </div> 37 33 <div id="file-info" class="text-gray-500 dark:text-gray-400 text-xs md:text-sm flex flex-wrap items-center gap-1 md:gap-0"> 38 34 <span>at <a href="/{{ .RepoInfo.FullName }}/tree/{{ .Ref }}">{{ .Ref }}</a></span> 39 - <span class="select-none px-1 md:px-2 [&:before]:content-['·']"></span> 40 - <span>{{ .Lines }} lines</span> 41 - <span class="select-none px-1 md:px-2 [&:before]:content-['·']"></span> 42 - <span>{{ byteFmt .SizeHint }}</span> 43 - <span class="select-none px-1 md:px-2 [&:before]:content-['·']"></span> 44 - <a href="/{{ .RepoInfo.FullName }}/raw/{{ .Ref }}/{{ .Path }}">view raw</a> 45 - {{ if .RenderToggle }} 46 - <span class="select-none px-1 md:px-2 [&:before]:content-['·']"></span> 47 - <a 48 - href="/{{ .RepoInfo.FullName }}/blob/{{ .Ref }}/{{ .Path }}?code={{ .ShowRendered }}" 49 - hx-boost="true" 50 - >view {{ if .ShowRendered }}code{{ else }}rendered{{ end }}</a> 35 + 36 + {{ if .BlobView.ShowingText }} 37 + <span class="select-none px-1 md:px-2 [&:before]:content-['·']"></span> 38 + <span>{{ .Lines }} lines</span> 39 + {{ end }} 40 + 41 + {{ if .BlobView.SizeHint }} 42 + <span class="select-none px-1 md:px-2 [&:before]:content-['·']"></span> 43 + <span>{{ byteFmt .BlobView.SizeHint }}</span> 44 + {{ end }} 45 + 46 + {{ if .BlobView.HasRawView }} 47 + <span class="select-none px-1 md:px-2 [&:before]:content-['·']"></span> 48 + <a href="/{{ .RepoInfo.FullName }}/raw/{{ .Ref }}/{{ .Path }}">view raw</a> 49 + {{ end }} 50 + 51 + {{ if .BlobView.ShowToggle }} 52 + <span class="select-none px-1 md:px-2 [&:before]:content-['·']"></span> 53 + <a href="/{{ .RepoInfo.FullName }}/blob/{{ .Ref }}/{{ .Path }}?code={{ .BlobView.ShowingRendered }}" hx-boost="true"> 54 + view {{ if .BlobView.ShowingRendered }}code{{ else }}rendered{{ end }} 55 + </a> 51 56 {{ end }} 52 57 </div> 53 58 </div> 54 59 </div> 55 - {{ if and .IsBinary .Unsupported }} 56 - <p class="text-center text-gray-400 dark:text-gray-500"> 57 - Previews are not supported for this file type. 58 - </p> 59 - {{ else if .IsBinary }} 60 - <div class="text-center"> 61 - {{ if .IsImage }} 62 - <img src="{{ .ContentSrc }}" 63 - alt="{{ .Path }}" 64 - class="max-w-full h-auto mx-auto border border-gray-200 dark:border-gray-700 rounded" /> 65 - {{ else if .IsVideo }} 66 - <video controls class="max-w-full h-auto mx-auto border border-gray-200 dark:border-gray-700 rounded"> 67 - <source src="{{ .ContentSrc }}"> 68 - Your browser does not support the video tag. 69 - </video> 70 - {{ end }} 71 - </div> 72 - {{ else }} 73 - <div class="overflow-auto relative"> 74 - {{ if .ShowRendered }} 75 - <div id="blob-contents" class="prose dark:prose-invert">{{ .RenderedContents }}</div> 60 + {{ if .BlobView.IsUnsupported }} 61 + <p class="text-center text-gray-400 dark:text-gray-500"> 62 + Previews are not supported for this file type. 63 + </p> 64 + {{ else if .BlobView.ContentType.IsSubmodule }} 65 + <p class="text-center text-gray-400 dark:text-gray-500"> 66 + This directory is a git submodule of <a href="{{ .BlobView.ContentSrc }}">{{ .BlobView.ContentSrc }}</a>. 67 + </p> 68 + {{ else if .BlobView.ContentType.IsImage }} 69 + <div class="text-center"> 70 + <img src="{{ .BlobView.ContentSrc }}" 71 + alt="{{ .Path }}" 72 + class="max-w-full h-auto mx-auto border border-gray-200 dark:border-gray-700 rounded" /> 73 + </div> 74 + {{ else if .BlobView.ContentType.IsVideo }} 75 + <div class="text-center"> 76 + <video controls class="max-w-full h-auto mx-auto border border-gray-200 dark:border-gray-700 rounded"> 77 + <source src="{{ .BlobView.ContentSrc }}"> 78 + Your browser does not support the video tag. 79 + </video> 80 + </div> 81 + {{ else if .BlobView.ContentType.IsSvg }} 82 + <div class="overflow-auto relative"> 83 + {{ if .BlobView.ShowingRendered }} 84 + <div class="text-center"> 85 + <img src="{{ .BlobView.ContentSrc }}" 86 + alt="{{ .Path }}" 87 + class="max-w-full h-auto mx-auto border border-gray-200 dark:border-gray-700 rounded" /> 88 + </div> 89 + {{ else }} 90 + <div id="blob-contents" class="whitespace-pre peer-target:bg-yellow-200 dark:peer-target:bg-yellow-900">{{ code .BlobView.Contents .Path | escapeHtml }}</div> 91 + {{ end }} 92 + </div> 93 + {{ else if .BlobView.ContentType.IsMarkup }} 94 + <div class="overflow-auto relative"> 95 + {{ if .BlobView.ShowingRendered }} 96 + <div id="blob-contents" class="prose dark:prose-invert">{{ .BlobView.Contents | readme }}</div> 76 97 {{ else }} 77 - <div id="blob-contents" class="whitespace-pre peer-target:bg-yellow-200 dark:peer-target:bg-yellow-900">{{ $.Contents | escapeHtml }}</div> 98 + <div id="blob-contents" class="whitespace-pre peer-target:bg-yellow-200 dark:peer-target:bg-yellow-900">{{ code .BlobView.Contents .Path | escapeHtml }}</div> 78 99 {{ end }} 79 - </div> 100 + </div> 101 + {{ else if .BlobView.ContentType.IsCode }} 102 + <div class="overflow-auto relative"> 103 + <div id="blob-contents" class="whitespace-pre peer-target:bg-yellow-200 dark:peer-target:bg-yellow-900">{{ code .BlobView.Contents .Path | escapeHtml }}</div> 104 + </div> 80 105 {{ end }} 81 106 {{ template "fragments/multiline-select" }} 82 107 {{ end }}
+35 -10
appview/pages/templates/repo/commit.html
··· 25 25 </div> 26 26 27 27 <div class="flex flex-wrap items-center space-x-2"> 28 - <p class="flex flex-wrap items-center gap-2 text-sm text-gray-500 dark:text-gray-300"> 29 - {{ $did := index $.EmailToDid $commit.Author.Email }} 30 - 31 - {{ if $did }} 32 - {{ template "user/fragments/picHandleLink" $did }} 33 - {{ else }} 34 - <a href="mailto:{{ $commit.Author.Email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $commit.Author.Name }}</a> 35 - {{ end }} 28 + <p class="flex flex-wrap items-center gap-1 text-sm text-gray-500 dark:text-gray-300"> 29 + {{ template "attribution" . }} 36 30 37 31 <span class="px-1 select-none before:content-['\00B7']"></span> 38 - {{ template "repo/fragments/time" $commit.Author.When }} 32 + {{ template "repo/fragments/time" $commit.Committer.When }} 39 33 <span class="px-1 select-none before:content-['\00B7']"></span> 40 34 41 35 <a href="/{{ $repo }}/commit/{{ $commit.This }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ slice $commit.This 0 8 }}</a> ··· 79 73 </section> 80 74 {{end}} 81 75 76 + {{ define "attribution" }} 77 + {{ $commit := .Diff.Commit }} 78 + {{ $showCommitter := true }} 79 + {{ if eq $commit.Author.Email $commit.Committer.Email }} 80 + {{ $showCommitter = false }} 81 + {{ end }} 82 + 83 + {{ if $showCommitter }} 84 + authored by {{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid) }} 85 + {{ range $commit.CoAuthors }} 86 + {{ template "attributedUser" (list .Email .Name $.EmailToDid) }} 87 + {{ end }} 88 + and committed by {{ template "attributedUser" (list $commit.Committer.Email $commit.Committer.Name $.EmailToDid) }} 89 + {{ else }} 90 + {{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid )}} 91 + {{ end }} 92 + {{ end }} 93 + 94 + {{ define "attributedUser" }} 95 + {{ $email := index . 0 }} 96 + {{ $name := index . 1 }} 97 + {{ $map := index . 2 }} 98 + {{ $did := index $map $email }} 99 + 100 + {{ if $did }} 101 + {{ template "user/fragments/picHandleLink" $did }} 102 + {{ else }} 103 + <a href="mailto:{{ $email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $name }}</a> 104 + {{ end }} 105 + {{ end }} 106 + 82 107 {{ define "topbarLayout" }} 83 108 <header class="col-span-full" style="z-index: 20;"> 84 109 {{ template "layouts/fragments/topbar" . }} ··· 111 136 {{ end }} 112 137 113 138 {{ define "contentAfter" }} 114 - {{ template "repo/fragments/diff" (list .RepoInfo.FullName .Diff .DiffOpts) }} 139 + {{ template "repo/fragments/diff" (list .Diff .DiffOpts) }} 115 140 {{end}} 116 141 117 142 {{ define "contentAfterLeft" }}
+2 -2
appview/pages/templates/repo/compare/compare.html
··· 17 17 {{ end }} 18 18 19 19 {{ define "mainLayout" }} 20 - <div class="px-1 col-span-full flex flex-col gap-4"> 20 + <div class="px-1 flex-grow col-span-full flex flex-col gap-4"> 21 21 {{ block "contentLayout" . }} 22 22 {{ block "content" . }}{{ end }} 23 23 {{ end }} ··· 42 42 {{ end }} 43 43 44 44 {{ define "contentAfter" }} 45 - {{ template "repo/fragments/diff" (list .RepoInfo.FullName .Diff .DiffOpts) }} 45 + {{ template "repo/fragments/diff" (list .Diff .DiffOpts) }} 46 46 {{end}} 47 47 48 48 {{ define "contentAfterLeft" }}
+2 -2
appview/pages/templates/repo/empty.html
··· 26 26 {{ else if (and .LoggedInUser (eq .LoggedInUser.Did .RepoInfo.OwnerDid)) }} 27 27 {{ $knot := .RepoInfo.Knot }} 28 28 {{ if eq $knot "knot1.tangled.sh" }} 29 - {{ $knot = "tangled.sh" }} 29 + {{ $knot = "tangled.org" }} 30 30 {{ end }} 31 31 <div class="w-full flex place-content-center"> 32 32 <div class="py-6 w-fit flex flex-col gap-4"> ··· 35 35 36 36 <p><span class="{{$bullet}}">1</span>First, generate a new <a href="https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key" class="underline">SSH key pair</a>.</p> 37 37 <p><span class="{{$bullet}}">2</span>Then add the public key to your account from the <a href="/settings" class="underline">settings</a> page.</p> 38 - <p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code></p> 38 + <p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ resolve .RepoInfo.OwnerDid }}/{{ .RepoInfo.Name }}</code></p> 39 39 <p><span class="{{$bullet}}">4</span>Push!</p> 40 40 </div> 41 41 </div>
+2 -1
appview/pages/templates/repo/fork.html
··· 25 25 value="{{ . }}" 26 26 class="mr-2" 27 27 id="domain-{{ . }}" 28 + {{if eq (len $.Knots) 1}}checked{{end}} 28 29 /> 29 30 <label for="domain-{{ . }}" class="dark:text-white">{{ . }}</label> 30 31 </div> ··· 33 34 {{ end }} 34 35 </div> 35 36 </div> 36 - <p class="text-sm text-gray-500 dark:text-gray-400">A knot hosts repository data. <a href="/knots" class="underline">Learn how to register your own knot.</a></p> 37 + <p class="text-sm text-gray-500 dark:text-gray-400">A knot hosts repository data. <a href="/settings/knots" class="underline">Learn how to register your own knot.</a></p> 37 38 </fieldset> 38 39 39 40 <div class="space-y-2">
+49
appview/pages/templates/repo/fragments/backlinks.html
··· 1 + {{ define "repo/fragments/backlinks" }} 2 + {{ if .Backlinks }} 3 + <div id="at-uri-panel" class="px-2 md:px-0"> 4 + <div> 5 + <span class="text-sm py-1 font-bold text-gray-500 dark:text-gray-400">Referenced by</span> 6 + </div> 7 + <ul> 8 + {{ range .Backlinks }} 9 + <li> 10 + {{ $repoOwner := resolve .Handle }} 11 + {{ $repoName := .Repo }} 12 + {{ $repoUrl := printf "%s/%s" $repoOwner $repoName }} 13 + <div class="flex flex-col"> 14 + <div class="flex gap-2 items-center"> 15 + {{ if .State.IsClosed }} 16 + <span class="text-gray-500 dark:text-gray-400"> 17 + {{ i "ban" "size-3" }} 18 + </span> 19 + {{ else if eq .Kind.String "issues" }} 20 + <span class="text-green-600 dark:text-green-500"> 21 + {{ i "circle-dot" "size-3" }} 22 + </span> 23 + {{ else if .State.IsOpen }} 24 + <span class="text-green-600 dark:text-green-500"> 25 + {{ i "git-pull-request" "size-3" }} 26 + </span> 27 + {{ else if .State.IsMerged }} 28 + <span class="text-purple-600 dark:text-purple-500"> 29 + {{ i "git-merge" "size-3" }} 30 + </span> 31 + {{ else }} 32 + <span class="text-gray-600 dark:text-gray-300"> 33 + {{ i "git-pull-request-closed" "size-3" }} 34 + </span> 35 + {{ end }} 36 + <a href="{{ . }}" class="line-clamp-1 text-sm"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a> 37 + </div> 38 + {{ if not (eq $.RepoInfo.FullName $repoUrl) }} 39 + <div> 40 + <span>on <a href="/{{ $repoUrl }}">{{ $repoUrl }}</a></span> 41 + </div> 42 + {{ end }} 43 + </div> 44 + </li> 45 + {{ end }} 46 + </ul> 47 + </div> 48 + {{ end }} 49 + {{ end }}
+3 -2
appview/pages/templates/repo/fragments/cloneDropdown.html
··· 43 43 44 44 <!-- SSH Clone --> 45 45 <div class="mb-3"> 46 + {{ $repoOwnerHandle := resolve .RepoInfo.OwnerDid }} 46 47 <label class="block text-xs font-medium text-gray-700 dark:text-gray-300 mb-1">SSH</label> 47 48 <div class="flex items-center border border-gray-300 dark:border-gray-600 rounded"> 48 49 <code 49 50 class="flex-1 px-3 py-2 text-sm bg-gray-50 dark:bg-gray-700 text-gray-900 dark:text-gray-100 rounded-l select-all cursor-pointer whitespace-nowrap overflow-x-auto" 50 51 onclick="window.getSelection().selectAllChildren(this)" 51 - data-url="git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}" 52 - >git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code> 52 + data-url="git@{{ $knot | stripPort }}:{{ $repoOwnerHandle }}/{{ .RepoInfo.Name }}" 53 + >git@{{ $knot | stripPort }}:{{ $repoOwnerHandle }}/{{ .RepoInfo.Name }}</code> 53 54 <button 54 55 onclick="copyToClipboard(this, this.previousElementSibling.getAttribute('data-url'))" 55 56 class="px-3 py-2 text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-200 border-l border-gray-300 dark:border-gray-600"
+3 -4
appview/pages/templates/repo/fragments/diff.html
··· 1 1 {{ define "repo/fragments/diff" }} 2 - {{ $repo := index . 0 }} 3 - {{ $diff := index . 1 }} 4 - {{ $opts := index . 2 }} 2 + {{ $diff := index . 0 }} 3 + {{ $opts := index . 1 }} 5 4 6 5 {{ $commit := $diff.Commit }} 7 6 {{ $diff := $diff.Diff }} ··· 18 17 {{ else }} 19 18 {{ range $idx, $hunk := $diff }} 20 19 {{ with $hunk }} 21 - <details open id="file-{{ .Name.New }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}"> 20 + <details open id="file-{{ .Id }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}"> 22 21 <summary class="list-none cursor-pointer sticky top-0"> 23 22 <div id="diff-file-header" class="rounded cursor-pointer bg-white dark:bg-gray-800 flex justify-between"> 24 23 <div id="left-side-items" class="p-2 flex gap-2 items-center overflow-x-auto">
+15 -1
appview/pages/templates/repo/fragments/editLabelPanel.html
··· 170 170 {{ $fieldName := $def.AtUri }} 171 171 {{ $valueType := $def.ValueType }} 172 172 {{ $value := .value }} 173 + 173 174 {{ if $valueType.IsDidFormat }} 174 175 {{ $value = trimPrefix (resolve .value) "@" }} 176 + <actor-typeahead> 177 + <input 178 + autocapitalize="none" 179 + autocorrect="off" 180 + autocomplete="off" 181 + placeholder="user.tngl.sh" 182 + value="{{$value}}" 183 + name="{{$fieldName}}" 184 + type="text" 185 + class="p-1 w-full text-sm" 186 + /> 187 + </actor-typeahead> 188 + {{ else }} 189 + <input class="p-1 w-full" type="text" name="{{$fieldName}}" value="{{$value}}"> 175 190 {{ end }} 176 - <input class="p-1 w-full" type="text" name="{{$fieldName}}" value="{{$value}}"> 177 191 {{ end }} 178 192 179 193 {{ define "nullTypeInput" }}
+1 -16
appview/pages/templates/repo/fragments/participants.html
··· 6 6 <span class="font-bold text-gray-500 dark:text-gray-400 capitalize">Participants</span> 7 7 <span class="bg-gray-200 dark:bg-gray-700 rounded py-1/2 px-1 ml-1">{{ len $all }}</span> 8 8 </div> 9 - <div class="flex items-center -space-x-3 mt-2"> 10 - {{ $c := "z-50 z-40 z-30 z-20 z-10" }} 11 - {{ range $i, $p := $ps }} 12 - <img 13 - src="{{ tinyAvatar . }}" 14 - alt="" 15 - class="rounded-full h-8 w-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0" 16 - /> 17 - {{ end }} 18 - 19 - {{ if gt (len $all) 5 }} 20 - <span class="pl-4 text-gray-500 dark:text-gray-400 text-sm"> 21 - +{{ sub (len $all) 5 }} 22 - </span> 23 - {{ end }} 24 - </div> 9 + {{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "w-8 h-8") }} 25 10 </div> 26 11 {{ end }}
-26
appview/pages/templates/repo/fragments/repoStar.html
··· 1 - {{ define "repo/fragments/repoStar" }} 2 - <button 3 - id="starBtn" 4 - class="btn disabled:opacity-50 disabled:cursor-not-allowed flex gap-2 items-center group" 5 - {{ if .IsStarred }} 6 - hx-delete="/star?subject={{ .RepoAt }}&countHint={{ .Stats.StarCount }}" 7 - {{ else }} 8 - hx-post="/star?subject={{ .RepoAt }}&countHint={{ .Stats.StarCount }}" 9 - {{ end }} 10 - 11 - hx-trigger="click" 12 - hx-target="this" 13 - hx-swap="outerHTML" 14 - hx-disabled-elt="#starBtn" 15 - > 16 - {{ if .IsStarred }} 17 - {{ i "star" "w-4 h-4 fill-current" }} 18 - {{ else }} 19 - {{ i "star" "w-4 h-4" }} 20 - {{ end }} 21 - <span class="text-sm"> 22 - {{ .Stats.StarCount }} 23 - </span> 24 - {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 25 - </button> 26 - {{ end }}
+35 -35
appview/pages/templates/repo/fragments/splitDiff.html
··· 3 3 {{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800" -}} 4 4 {{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}} 5 5 {{- $lineNrSepStyle := "pr-2 border-r border-gray-200 dark:border-gray-700" -}} 6 - {{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 6 + {{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 7 7 {{- $emptyStyle := "bg-gray-200/30 dark:bg-gray-700/30" -}} 8 8 {{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400" -}} 9 9 {{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}} 10 10 {{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}} 11 11 {{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}} 12 12 <div class="grid grid-cols-2 divide-x divide-gray-200 dark:divide-gray-700"> 13 - <pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</div> 13 + <div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</span> 14 14 {{- range .LeftLines -}} 15 15 {{- if .IsEmpty -}} 16 - <div class="{{ $emptyStyle }} {{ $containerStyle }}"> 17 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div> 18 - <div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div> 19 - <div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div> 20 - </div> 16 + <span class="{{ $emptyStyle }} {{ $containerStyle }}"> 17 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span> 18 + <span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span> 19 + <span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span> 20 + </span> 21 21 {{- else if eq .Op.String "-" -}} 22 - <div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 23 - <div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div> 24 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 25 - <div class="px-2">{{ .Content }}</div> 26 - </div> 22 + <span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 23 + <span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span> 24 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 25 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 26 + </span> 27 27 {{- else if eq .Op.String " " -}} 28 - <div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 29 - <div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div> 30 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 31 - <div class="px-2">{{ .Content }}</div> 32 - </div> 28 + <span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}"> 29 + <span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span> 30 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 31 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 32 + </span> 33 33 {{- end -}} 34 34 {{- end -}} 35 - {{- end -}}</div></div></pre> 35 + {{- end -}}</div></div></div> 36 36 37 - <pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</div> 37 + <div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</span> 38 38 {{- range .RightLines -}} 39 39 {{- if .IsEmpty -}} 40 - <div class="{{ $emptyStyle }} {{ $containerStyle }}"> 41 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div> 42 - <div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div> 43 - <div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div> 44 - </div> 40 + <span class="{{ $emptyStyle }} {{ $containerStyle }}"> 41 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span> 42 + <span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span> 43 + <span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span> 44 + </span> 45 45 {{- else if eq .Op.String "+" -}} 46 - <div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 47 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div> 48 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 49 - <div class="px-2" >{{ .Content }}</div> 50 - </div> 46 + <span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 47 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></span> 48 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 49 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 50 + </span> 51 51 {{- else if eq .Op.String " " -}} 52 - <div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 53 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div> 54 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 55 - <div class="px-2">{{ .Content }}</div> 56 - </div> 52 + <span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}"> 53 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a> </span> 54 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 55 + <span class="px-2 whitespace-pre">{{ .Content }}</span> 56 + </span> 57 57 {{- end -}} 58 58 {{- end -}} 59 - {{- end -}}</div></div></pre> 59 + {{- end -}}</div></div></div> 60 60 </div> 61 61 {{ end }}
+21 -22
appview/pages/templates/repo/fragments/unifiedDiff.html
··· 1 1 {{ define "repo/fragments/unifiedDiff" }} 2 2 {{ $name := .Id }} 3 - <pre class="overflow-x-auto"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</div> 3 + <div class="overflow-x-auto font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">&middot;&middot;&middot;</span> 4 4 {{- $oldStart := .OldPosition -}} 5 5 {{- $newStart := .NewPosition -}} 6 6 {{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800 target:bg-yellow-200 target:dark:bg-yellow-600" -}} 7 7 {{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}} 8 8 {{- $lineNrSepStyle1 := "" -}} 9 9 {{- $lineNrSepStyle2 := "pr-2 border-r border-gray-200 dark:border-gray-700" -}} 10 - {{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 10 + {{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}} 11 11 {{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400 " -}} 12 12 {{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}} 13 13 {{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}} 14 14 {{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}} 15 15 {{- range .Lines -}} 16 16 {{- if eq .Op.String "+" -}} 17 - <div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}"> 18 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></div> 19 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></div> 20 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 21 - <div class="px-2">{{ .Line }}</div> 22 - </div> 17 + <span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}"> 18 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></span> 19 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></span> 20 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 21 + <span class="px-2 whitespace-pre">{{ .Line }}</span> 22 + </span> 23 23 {{- $newStart = add64 $newStart 1 -}} 24 24 {{- end -}} 25 25 {{- if eq .Op.String "-" -}} 26 - <div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}"> 27 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></div> 28 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></div> 29 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 30 - <div class="px-2">{{ .Line }}</div> 31 - </div> 26 + <span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}"> 27 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></span> 28 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></span> 29 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 30 + <span class="px-2 whitespace-pre">{{ .Line }}</span> 31 + </span> 32 32 {{- $oldStart = add64 $oldStart 1 -}} 33 33 {{- end -}} 34 34 {{- if eq .Op.String " " -}} 35 - <div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}"> 36 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></div> 37 - <div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></div> 38 - <div class="{{ $opStyle }}">{{ .Op.String }}</div> 39 - <div class="px-2">{{ .Line }}</div> 40 - </div> 35 + <span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}"> 36 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></span> 37 + <span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></span> 38 + <span class="{{ $opStyle }}">{{ .Op.String }}</span> 39 + <span class="px-2 whitespace-pre">{{ .Line }}</span> 40 + </span> 41 41 {{- $newStart = add64 $newStart 1 -}} 42 42 {{- $oldStart = add64 $oldStart 1 -}} 43 43 {{- end -}} 44 44 {{- end -}} 45 - {{- end -}}</div></div></pre> 45 + {{- end -}}</div></div></div> 46 46 {{ end }} 47 -
+39 -10
appview/pages/templates/repo/index.html
··· 14 14 {{ end }} 15 15 <div class="flex items-center justify-between pb-5"> 16 16 {{ block "branchSelector" . }}{{ end }} 17 - <div class="flex md:hidden items-center gap-2"> 17 + <div class="flex md:hidden items-center gap-3"> 18 18 <a href="/{{ .RepoInfo.FullName }}/commits/{{ .Ref | urlquery }}" class="inline-flex items-center text-sm gap-1 font-bold"> 19 19 {{ i "git-commit-horizontal" "w-4" "h-4" }} {{ .TotalCommits }} 20 20 </a> ··· 35 35 {{ end }} 36 36 37 37 {{ define "repoLanguages" }} 38 - <details class="group -m-6 mb-4"> 38 + <details class="group -my-4 -m-6 mb-4"> 39 39 <summary class="flex gap-[1px] h-4 scale-y-50 hover:scale-y-100 origin-top group-open:scale-y-100 transition-all hover:cursor-pointer overflow-hidden rounded-t"> 40 40 {{ range $value := .Languages }} 41 41 <div ··· 47 47 <div class="px-4 py-2 border-b border-gray-200 dark:border-gray-600 flex items-center gap-4 flex-wrap"> 48 48 {{ range $value := .Languages }} 49 49 <div 50 - class="flex flex-grow items-center gap-2 text-xs align-items-center justify-center" 50 + class="flex items-center gap-2 text-xs align-items-center justify-center" 51 51 > 52 52 {{ template "repo/fragments/colorBall" (dict "color" (langColor $value.Name)) }} 53 53 <div>{{ or $value.Name "Other" }} ··· 66 66 67 67 {{ define "branchSelector" }} 68 68 <div class="flex gap-2 items-center justify-between w-full"> 69 - <div class="flex gap-2 items-center"> 69 + <div class="flex gap-2 items-stretch"> 70 70 <select 71 71 onchange="window.location.href = '/{{ .RepoInfo.FullName }}/tree/' + encodeURIComponent(this.value)" 72 72 class="p-1 border max-w-32 border-gray-200 bg-white dark:bg-gray-800 dark:text-white dark:border-gray-700" ··· 129 129 {{ $icon := "folder" }} 130 130 {{ $iconStyle := "size-4 fill-current" }} 131 131 132 + {{ if .IsSubmodule }} 133 + {{ $link = printf "/%s/%s/%s/%s" $.RepoInfo.FullName "blob" (urlquery $.Ref) .Name }} 134 + {{ $icon = "folder-input" }} 135 + {{ $iconStyle = "size-4" }} 136 + {{ end }} 137 + 132 138 {{ if .IsFile }} 133 139 {{ $link = printf "/%s/%s/%s/%s" $.RepoInfo.FullName "blob" (urlquery $.Ref) .Name }} 134 140 {{ $icon = "file" }} 135 141 {{ $iconStyle = "size-4" }} 136 142 {{ end }} 143 + 137 144 <a href="{{ $link }}" class="{{ $linkstyle }}"> 138 145 <div class="flex items-center gap-2"> 139 146 {{ i $icon $iconStyle "flex-shrink-0" }} ··· 221 228 <span 222 229 class="mx-1 before:content-['·'] before:select-none" 223 230 ></span> 224 - <span> 225 - {{ $did := index $.EmailToDid .Author.Email }} 226 - <a href="{{ if $did }}/{{ resolve $did }}{{ else }}mailto:{{ .Author.Email }}{{ end }}" 227 - class="text-gray-500 dark:text-gray-400 no-underline hover:underline" 228 - >{{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ .Author.Name }}{{ end }}</a> 229 - </span> 231 + {{ template "attribution" (list . $.EmailToDid) }} 230 232 <div class="inline-block px-1 select-none after:content-['·']"></div> 231 233 {{ template "repo/fragments/time" .Committer.When }} 232 234 ··· 252 254 {{ end }} 253 255 </div> 254 256 </div> 257 + {{ end }} 258 + 259 + {{ define "attribution" }} 260 + {{ $commit := index . 0 }} 261 + {{ $map := index . 1 }} 262 + <span class="flex items-center"> 263 + {{ $author := index $map $commit.Author.Email }} 264 + {{ $coauthors := $commit.CoAuthors }} 265 + {{ $all := list }} 266 + 267 + {{ if $author }} 268 + {{ $all = append $all $author }} 269 + {{ end }} 270 + {{ range $coauthors }} 271 + {{ $co := index $map .Email }} 272 + {{ if $co }} 273 + {{ $all = append $all $co }} 274 + {{ end }} 275 + {{ end }} 276 + 277 + {{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }} 278 + <a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}" 279 + class="no-underline hover:underline"> 280 + {{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }} 281 + {{ if $coauthors }} +{{ length $coauthors }}{{ end }} 282 + </a> 283 + </span> 255 284 {{ end }} 256 285 257 286 {{ define "branchList" }}
+2 -2
appview/pages/templates/repo/issues/fragments/issueCommentHeader.html
··· 19 19 {{ end }} 20 20 21 21 {{ define "timestamp" }} 22 - <a href="#{{ .Comment.Id }}" 22 + <a href="#comment-{{ .Comment.Id }}" 23 23 class="text-gray-500 dark:text-gray-400 hover:text-gray-500 dark:hover:text-gray-400 hover:underline no-underline" 24 - id="{{ .Comment.Id }}"> 24 + id="comment-{{ .Comment.Id }}"> 25 25 {{ if .Comment.Deleted }} 26 26 {{ template "repo/fragments/shortTimeAgo" .Comment.Deleted }} 27 27 {{ else if .Comment.Edited }}
+3
appview/pages/templates/repo/issues/issue.html
··· 20 20 "Subject" $.Issue.AtUri 21 21 "State" $.Issue.Labels) }} 22 22 {{ template "repo/fragments/participants" $.Issue.Participants }} 23 + {{ template "repo/fragments/backlinks" 24 + (dict "RepoInfo" $.RepoInfo 25 + "Backlinks" $.Backlinks) }} 23 26 {{ template "repo/fragments/externalLinkPanel" $.Issue.AtUri }} 24 27 </div> 25 28 </div>
+125 -49
appview/pages/templates/repo/issues/issues.html
··· 27 27 "Meta" (string .RepoInfo.Stats.IssueCount.Closed)) }} 28 28 {{ $values := list $open $closed }} 29 29 30 - <div class="flex flex-col gap-2"> 31 - <div class="flex justify-between items-stretch gap-4"> 32 - <form class="flex flex-1 relative" method="GET"> 33 - <input type="hidden" name="state" value="{{ if .FilteringByOpen }}open{{ else }}closed{{ end }}"> 34 - <div class="absolute left-3 top-1/2 -translate-y-1/2 text-gray-400 pointer-events-none"> 35 - {{ i "search" "w-4 h-4" }} 36 - </div> 37 - <input class="flex-1 p-1 pl-10 pr-10 peer" type="text" name="q" value="{{ .FilterQuery }}" placeholder=" "> 38 - <a 30 + <div class="grid gap-2 grid-cols-[auto_1fr_auto] grid-row-2"> 31 + <form class="flex relative col-span-3 sm:col-span-1 sm:col-start-2" method="GET"> 32 + <input type="hidden" name="state" value="{{ if .FilteringByOpen }}open{{ else }}closed{{ end }}"> 33 + <div class="flex-1 flex relative"> 34 + <input 35 + id="search-q" 36 + class="flex-1 py-1 pl-2 pr-10 mr-[-1px] rounded-r-none focus:border-0 focus:outline-none focus:ring focus:ring-blue-400 ring-inset peer" 37 + type="text" 38 + name="q" 39 + value="{{ .FilterQuery }}" 40 + placeholder=" " 41 + > 42 + <a 39 43 href="?state={{ if .FilteringByOpen }}open{{ else }}closed{{ end }}" 40 44 class="absolute right-3 top-1/2 -translate-y-1/2 text-gray-400 hover:text-gray-600 dark:hover:text-gray-300 hidden peer-[:not(:placeholder-shown)]:block" 41 - > 45 + > 42 46 {{ i "x" "w-4 h-4" }} 43 47 </a> 44 - </form> 45 - <div class="hidden sm:block"> 46 - {{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active) }} 47 48 </div> 48 - <a 49 - href="/{{ .RepoInfo.FullName }}/issues/new" 50 - class="btn-create text-sm flex items-center justify-center gap-2 no-underline hover:no-underline hover:text-white" 51 - > 52 - {{ i "circle-plus" "w-4 h-4" }} 53 - <span>new</span> 54 - </a> 49 + <button 50 + type="submit" 51 + class="p-2 text-gray-400 border rounded-r border-gray-400 dark:border-gray-600" 52 + > 53 + {{ i "search" "w-4 h-4" }} 54 + </button> 55 + </form> 56 + <div class="sm:row-start-1"> 57 + {{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active "Include" "#search-q") }} 55 58 </div> 56 - <div class="sm:hidden"> 57 - {{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active) }} 58 - </div> 59 + <a 60 + href="/{{ .RepoInfo.FullName }}/issues/new" 61 + class="col-start-3 btn-create text-sm flex items-center justify-center gap-2 no-underline hover:no-underline hover:text-white" 62 + > 63 + {{ i "circle-plus" "w-4 h-4" }} 64 + <span>new</span> 65 + </a> 59 66 </div> 60 67 <div class="error" id="issues"></div> 61 68 {{ end }} ··· 64 71 <div class="mt-2"> 65 72 {{ template "repo/issues/fragments/issueListing" (dict "Issues" .Issues "RepoPrefix" .RepoInfo.FullName "LabelDefs" .LabelDefs) }} 66 73 </div> 67 - {{ block "pagination" . }} {{ end }} 74 + {{if gt .IssueCount .Page.Limit }} 75 + {{ block "pagination" . }} {{ end }} 76 + {{ end }} 68 77 {{ end }} 69 78 70 79 {{ define "pagination" }} 71 - <div class="flex justify-end mt-4 gap-2"> 72 - {{ $currentState := "closed" }} 73 - {{ if .FilteringByOpen }} 74 - {{ $currentState = "open" }} 75 - {{ end }} 80 + <div class="flex justify-center items-center mt-4 gap-2"> 81 + {{ $currentState := "closed" }} 82 + {{ if .FilteringByOpen }} 83 + {{ $currentState = "open" }} 84 + {{ end }} 85 + 86 + {{ $prev := .Page.Previous.Offset }} 87 + {{ $next := .Page.Next.Offset }} 88 + {{ $lastPage := sub .IssueCount (mod .IssueCount .Page.Limit) }} 76 89 90 + <a 91 + class=" 92 + btn flex items-center gap-2 no-underline hover:no-underline 93 + dark:text-white dark:hover:bg-gray-700 94 + {{ if le .Page.Offset 0 }} 95 + cursor-not-allowed opacity-50 96 + {{ end }} 97 + " 77 98 {{ if gt .Page.Offset 0 }} 78 - {{ $prev := .Page.Previous }} 79 - <a 80 - class="btn flex items-center gap-2 no-underline hover:no-underline dark:text-white dark:hover:bg-gray-700" 81 - hx-boost="true" 82 - href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $prev.Offset }}&limit={{ $prev.Limit }}" 83 - > 84 - {{ i "chevron-left" "w-4 h-4" }} 85 - previous 86 - </a> 87 - {{ else }} 88 - <div></div> 99 + hx-boost="true" 100 + href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $prev }}&limit={{ .Page.Limit }}" 89 101 {{ end }} 102 + > 103 + {{ i "chevron-left" "w-4 h-4" }} 104 + previous 105 + </a> 90 106 107 + <!-- dont show first page if current page is first page --> 108 + {{ if gt .Page.Offset 0 }} 109 + <a 110 + hx-boost="true" 111 + href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset=0&limit={{ .Page.Limit }}" 112 + > 113 + 1 114 + </a> 115 + {{ end }} 116 + 117 + <!-- if previous page is not first or second page (prev > limit) --> 118 + {{ if gt $prev .Page.Limit }} 119 + <span>...</span> 120 + {{ end }} 121 + 122 + <!-- if previous page is not the first page --> 123 + {{ if gt $prev 0 }} 124 + <a 125 + hx-boost="true" 126 + href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $prev }}&limit={{ .Page.Limit }}" 127 + > 128 + {{ add (div $prev .Page.Limit) 1 }} 129 + </a> 130 + {{ end }} 131 + 132 + <!-- current page. this is always visible --> 133 + <span class="font-bold"> 134 + {{ add (div .Page.Offset .Page.Limit) 1 }} 135 + </span> 136 + 137 + <!-- if next page is not last page --> 138 + {{ if lt $next $lastPage }} 139 + <a 140 + hx-boost="true" 141 + href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $next }}&limit={{ .Page.Limit }}" 142 + > 143 + {{ add (div $next .Page.Limit) 1 }} 144 + </a> 145 + {{ end }} 146 + 147 + <!-- if next page is not second last or last page (next < issues - 2 * limit) --> 148 + {{ if lt ($next) (sub .IssueCount (mul (2) .Page.Limit)) }} 149 + <span>...</span> 150 + {{ end }} 151 + 152 + <!-- if its not the last page --> 153 + {{ if lt .Page.Offset $lastPage }} 154 + <a 155 + hx-boost="true" 156 + href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $lastPage }}&limit={{ .Page.Limit }}" 157 + > 158 + {{ add (div $lastPage .Page.Limit) 1 }} 159 + </a> 160 + {{ end }} 161 + 162 + <a 163 + class=" 164 + btn flex items-center gap-2 no-underline hover:no-underline 165 + dark:text-white dark:hover:bg-gray-700 166 + {{ if ne (len .Issues) .Page.Limit }} 167 + cursor-not-allowed opacity-50 168 + {{ end }} 169 + " 91 170 {{ if eq (len .Issues) .Page.Limit }} 92 - {{ $next := .Page.Next }} 93 - <a 94 - class="btn flex items-center gap-2 no-underline hover:no-underline dark:text-white dark:hover:bg-gray-700" 95 - hx-boost="true" 96 - href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $next.Offset }}&limit={{ $next.Limit }}" 97 - > 98 - next 99 - {{ i "chevron-right" "w-4 h-4" }} 100 - </a> 171 + hx-boost="true" 172 + href="/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $next }}&limit={{ .Page.Limit }}" 101 173 {{ end }} 174 + > 175 + next 176 + {{ i "chevron-right" "w-4 h-4" }} 177 + </a> 102 178 </div> 103 179 {{ end }}
+40 -23
appview/pages/templates/repo/log.html
··· 17 17 <div class="hidden md:flex md:flex-col divide-y divide-gray-200 dark:divide-gray-700"> 18 18 {{ $grid := "grid grid-cols-14 gap-4" }} 19 19 <div class="{{ $grid }}"> 20 - <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2">Author</div> 20 + <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Author</div> 21 21 <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Commit</div> 22 22 <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-6">Message</div> 23 - <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-1"></div> 24 23 <div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2 justify-self-end">Date</div> 25 24 </div> 26 25 {{ range $index, $commit := .Commits }} 27 26 {{ $messageParts := splitN $commit.Message "\n\n" 2 }} 28 27 <div class="{{ $grid }} py-3"> 29 - <div class="align-top truncate col-span-2"> 30 - {{ $did := index $.EmailToDid $commit.Author.Email }} 31 - {{ if $did }} 32 - {{ template "user/fragments/picHandleLink" $did }} 33 - {{ else }} 34 - <a href="mailto:{{ $commit.Author.Email }}" class="text-gray-700 dark:text-gray-300 no-underline hover:underline">{{ $commit.Author.Name }}</a> 35 - {{ end }} 28 + <div class="align-top col-span-3"> 29 + {{ template "attribution" (list $commit $.EmailToDid) }} 36 30 </div> 37 31 <div class="align-top font-mono flex items-start col-span-3"> 38 32 {{ $verified := $.VerifiedCommits.IsVerified $commit.Hash.String }} ··· 61 55 <div class="align-top col-span-6"> 62 56 <div> 63 57 <a href="/{{ $.RepoInfo.FullName }}/commit/{{ $commit.Hash.String }}" class="dark:text-white no-underline hover:underline">{{ index $messageParts 0 }}</a> 58 + 64 59 {{ if gt (len $messageParts) 1 }} 65 60 <button class="py-1/2 px-1 bg-gray-200 hover:bg-gray-400 dark:bg-gray-700 dark:hover:bg-gray-600 rounded" hx-on:click="this.parentElement.nextElementSibling.classList.toggle('hidden')">{{ i "ellipsis" "w-3 h-3" }}</button> 66 61 {{ end }} ··· 72 67 </span> 73 68 {{ end }} 74 69 {{ end }} 70 + 71 + <!-- ci status --> 72 + <span class="text-xs"> 73 + {{ $pipeline := index $.Pipelines .Hash.String }} 74 + {{ if and $pipeline (gt (len $pipeline.Statuses) 0) }} 75 + {{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }} 76 + {{ end }} 77 + </span> 75 78 </div> 76 79 77 80 {{ if gt (len $messageParts) 1 }} 78 81 <p class="hidden mt-1 text-sm text-gray-600 dark:text-gray-400">{{ nl2br (index $messageParts 1) }}</p> 79 82 {{ end }} 80 - </div> 81 - <div class="align-top col-span-1"> 82 - <!-- ci status --> 83 - {{ $pipeline := index $.Pipelines .Hash.String }} 84 - {{ if and $pipeline (gt (len $pipeline.Statuses) 0) }} 85 - {{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }} 86 - {{ end }} 87 83 </div> 88 84 <div class="align-top justify-self-end text-gray-500 dark:text-gray-400 col-span-2">{{ template "repo/fragments/shortTimeAgo" $commit.Committer.When }}</div> 89 85 </div> ··· 152 148 </a> 153 149 </span> 154 150 <span class="mx-2 before:content-['·'] before:select-none"></span> 155 - <span> 156 - {{ $did := index $.EmailToDid $commit.Author.Email }} 157 - <a href="{{ if $did }}/{{ $did }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}" 158 - class="text-gray-500 dark:text-gray-400 no-underline hover:underline"> 159 - {{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ $commit.Author.Name }}{{ end }} 160 - </a> 161 - </span> 151 + {{ template "attribution" (list $commit $.EmailToDid) }} 162 152 <div class="inline-block px-1 select-none after:content-['·']"></div> 163 153 <span>{{ template "repo/fragments/shortTime" $commit.Committer.When }}</span> 164 154 ··· 176 166 </div> 177 167 </section> 178 168 169 + {{ end }} 170 + 171 + {{ define "attribution" }} 172 + {{ $commit := index . 0 }} 173 + {{ $map := index . 1 }} 174 + <span class="flex items-center gap-1"> 175 + {{ $author := index $map $commit.Author.Email }} 176 + {{ $coauthors := $commit.CoAuthors }} 177 + {{ $all := list }} 178 + 179 + {{ if $author }} 180 + {{ $all = append $all $author }} 181 + {{ end }} 182 + {{ range $coauthors }} 183 + {{ $co := index $map .Email }} 184 + {{ if $co }} 185 + {{ $all = append $all $co }} 186 + {{ end }} 187 + {{ end }} 188 + 189 + {{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }} 190 + <a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}" 191 + class="no-underline hover:underline"> 192 + {{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }} 193 + {{ if $coauthors }} +{{ length $coauthors }}{{ end }} 194 + </a> 195 + </span> 179 196 {{ end }} 180 197 181 198 {{ define "repoAfter" }}
+2 -1
appview/pages/templates/repo/new.html
··· 155 155 class="mr-2" 156 156 id="domain-{{ . }}" 157 157 required 158 + {{if eq (len $.Knots) 1}}checked{{end}} 158 159 /> 159 160 <label for="domain-{{ . }}" class="dark:text-white lowercase">{{ . }}</label> 160 161 </div> ··· 164 165 </div> 165 166 <p class="text-sm text-gray-500 dark:text-gray-400 mt-1"> 166 167 A knot hosts repository data and handles Git operations. 167 - You can also <a href="/knots" class="underline">register your own knot</a>. 168 + You can also <a href="/settings/knots" class="underline">register your own knot</a>. 168 169 </p> 169 170 </div> 170 171 {{ end }}
+3 -3
appview/pages/templates/repo/pipelines/fragments/logBlock.html
··· 2 2 <div id="lines" hx-swap-oob="beforeend"> 3 3 <details id="step-{{ .Id }}" {{if not .Collapsed}}open{{end}} class="group pb-2 rounded-sm border border-gray-200 dark:border-gray-700"> 4 4 <summary class="sticky top-0 pt-2 px-2 group-open:pb-2 group-open:mb-2 list-none cursor-pointer group-open:border-b border-gray-200 dark:border-gray-700 bg-white dark:bg-gray-800 hover:text-gray-500 hover:dark:text-gray-400"> 5 - <div class="group-open:hidden flex items-center gap-1">{{ template "stepHeader" . }}</div> 6 - <div class="hidden group-open:flex items-center gap-1">{{ template "stepHeader" . }}</div> 5 + <div class="group-open:hidden flex items-center gap-1">{{ i "chevron-right" "w-4 h-4" }} {{ template "stepHeader" . }}</div> 6 + <div class="hidden group-open:flex items-center gap-1">{{ i "chevron-down" "w-4 h-4" }} {{ template "stepHeader" . }}</div> 7 7 </summary> 8 8 <div class="font-mono whitespace-pre overflow-x-auto px-2"><div class="text-blue-600 dark:text-blue-300">{{ .Command }}</div><div id="step-body-{{ .Id }}"></div></div> 9 9 </details> ··· 11 11 {{ end }} 12 12 13 13 {{ define "stepHeader" }} 14 - {{ i "chevron-right" "w-4 h-4" }} {{ .Name }} 14 + {{ .Name }} 15 15 <span class="ml-auto text-sm text-gray-500 tabular-nums" data-timer="{{ .Id }}" data-start="{{ .StartTime.Unix }}"></span> 16 16 {{ end }}
+1 -1
appview/pages/templates/repo/pipelines/pipelines.html
··· 23 23 </p> 24 24 <p> 25 25 <span class="{{ $bullet }}">2</span>Configure your CI/CD 26 - <a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/pipeline.md" class="underline">pipeline</a>. 26 + <a href="https://docs.tangled.org/spindles.html#pipelines" class="underline">pipeline</a>. 27 27 </p> 28 28 <p><span class="{{ $bullet }}">3</span>Trigger a workflow with a push or a pull-request!</p> 29 29 </div>
+81 -83
appview/pages/templates/repo/pulls/fragments/pullActions.html
··· 22 22 {{ $isLastRound := eq $roundNumber $lastIdx }} 23 23 {{ $isSameRepoBranch := .Pull.IsBranchBased }} 24 24 {{ $isUpToDate := .ResubmitCheck.No }} 25 - <div class="relative w-fit"> 26 - <div id="actions-{{$roundNumber}}" class="flex flex-wrap gap-2"> 27 - <button 28 - hx-get="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/round/{{ $roundNumber }}/comment" 29 - hx-target="#actions-{{$roundNumber}}" 30 - hx-swap="outerHtml" 31 - class="btn p-2 flex items-center gap-2 no-underline hover:no-underline group"> 32 - {{ i "message-square-plus" "w-4 h-4" }} 33 - <span>comment</span> 34 - {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 35 - </button> 36 - {{ if .BranchDeleteStatus }} 37 - <button 38 - hx-delete="/{{ .BranchDeleteStatus.Repo.Did }}/{{ .BranchDeleteStatus.Repo.Name }}/branches" 39 - hx-vals='{"branch": "{{ .BranchDeleteStatus.Branch }}" }' 40 - hx-swap="none" 41 - class="btn p-2 flex items-center gap-2 no-underline hover:no-underline group text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300"> 42 - {{ i "git-branch" "w-4 h-4" }} 43 - <span>delete branch</span> 44 - {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 45 - </button> 46 - {{ end }} 47 - {{ if and $isPushAllowed $isOpen $isLastRound }} 48 - {{ $disabled := "" }} 49 - {{ if $isConflicted }} 50 - {{ $disabled = "disabled" }} 51 - {{ end }} 52 - <button 53 - hx-post="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/merge" 54 - hx-swap="none" 55 - hx-confirm="Are you sure you want to merge pull #{{ .Pull.PullId }} into the `{{ .Pull.TargetBranch }}` branch?" 56 - class="btn p-2 flex items-center gap-2 group" {{ $disabled }}> 57 - {{ i "git-merge" "w-4 h-4" }} 58 - <span>merge{{if $stackCount}} {{$stackCount}}{{end}}</span> 59 - {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 60 - </button> 61 - {{ end }} 25 + <div id="actions-{{$roundNumber}}" class="flex flex-wrap gap-2 relative"> 26 + <button 27 + hx-get="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/round/{{ $roundNumber }}/comment" 28 + hx-target="#actions-{{$roundNumber}}" 29 + hx-swap="outerHtml" 30 + class="btn p-2 flex items-center gap-2 no-underline hover:no-underline group"> 31 + {{ i "message-square-plus" "w-4 h-4" }} 32 + <span>comment</span> 33 + {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 34 + </button> 35 + {{ if .BranchDeleteStatus }} 36 + <button 37 + hx-delete="/{{ .BranchDeleteStatus.Repo.Did }}/{{ .BranchDeleteStatus.Repo.Name }}/branches" 38 + hx-vals='{"branch": "{{ .BranchDeleteStatus.Branch }}" }' 39 + hx-swap="none" 40 + class="btn p-2 flex items-center gap-2 no-underline hover:no-underline group text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300"> 41 + {{ i "git-branch" "w-4 h-4" }} 42 + <span>delete branch</span> 43 + {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 44 + </button> 45 + {{ end }} 46 + {{ if and $isPushAllowed $isOpen $isLastRound }} 47 + {{ $disabled := "" }} 48 + {{ if $isConflicted }} 49 + {{ $disabled = "disabled" }} 50 + {{ end }} 51 + <button 52 + hx-post="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/merge" 53 + hx-swap="none" 54 + hx-confirm="Are you sure you want to merge pull #{{ .Pull.PullId }} into the `{{ .Pull.TargetBranch }}` branch?" 55 + class="btn p-2 flex items-center gap-2 group" {{ $disabled }}> 56 + {{ i "git-merge" "w-4 h-4" }} 57 + <span>merge{{if $stackCount}} {{$stackCount}}{{end}}</span> 58 + {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 59 + </button> 60 + {{ end }} 62 61 63 - {{ if and $isPullAuthor $isOpen $isLastRound }} 64 - {{ $disabled := "" }} 65 - {{ if $isUpToDate }} 66 - {{ $disabled = "disabled" }} 62 + {{ if and $isPullAuthor $isOpen $isLastRound }} 63 + {{ $disabled := "" }} 64 + {{ if $isUpToDate }} 65 + {{ $disabled = "disabled" }} 66 + {{ end }} 67 + <button id="resubmitBtn" 68 + {{ if not .Pull.IsPatchBased }} 69 + hx-post="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/resubmit" 70 + {{ else }} 71 + hx-get="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/resubmit" 72 + hx-target="#actions-{{$roundNumber}}" 73 + hx-swap="outerHtml" 67 74 {{ end }} 68 - <button id="resubmitBtn" 69 - {{ if not .Pull.IsPatchBased }} 70 - hx-post="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/resubmit" 71 - {{ else }} 72 - hx-get="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/resubmit" 73 - hx-target="#actions-{{$roundNumber}}" 74 - hx-swap="outerHtml" 75 - {{ end }} 76 75 77 - hx-disabled-elt="#resubmitBtn" 78 - class="btn p-2 flex items-center gap-2 disabled:opacity-50 disabled:cursor-not-allowed group" {{ $disabled }} 76 + hx-disabled-elt="#resubmitBtn" 77 + class="btn p-2 flex items-center gap-2 disabled:opacity-50 disabled:cursor-not-allowed group" {{ $disabled }} 79 78 80 - {{ if $disabled }} 81 - title="Update this branch to resubmit this pull request" 82 - {{ else }} 83 - title="Resubmit this pull request" 84 - {{ end }} 85 - > 86 - {{ i "rotate-ccw" "w-4 h-4" }} 87 - <span>resubmit</span> 88 - {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 89 - </button> 90 - {{ end }} 79 + {{ if $disabled }} 80 + title="Update this branch to resubmit this pull request" 81 + {{ else }} 82 + title="Resubmit this pull request" 83 + {{ end }} 84 + > 85 + {{ i "rotate-ccw" "w-4 h-4" }} 86 + <span>resubmit</span> 87 + {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 88 + </button> 89 + {{ end }} 91 90 92 - {{ if and (or $isPullAuthor $isPushAllowed) $isOpen $isLastRound }} 93 - <button 94 - hx-post="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/close" 95 - hx-swap="none" 96 - class="btn p-2 flex items-center gap-2 group"> 97 - {{ i "ban" "w-4 h-4" }} 98 - <span>close</span> 99 - {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 100 - </button> 101 - {{ end }} 91 + {{ if and (or $isPullAuthor $isPushAllowed) $isOpen $isLastRound }} 92 + <button 93 + hx-post="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/close" 94 + hx-swap="none" 95 + class="btn p-2 flex items-center gap-2 group"> 96 + {{ i "ban" "w-4 h-4" }} 97 + <span>close</span> 98 + {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 99 + </button> 100 + {{ end }} 102 101 103 - {{ if and (or $isPullAuthor $isPushAllowed) $isClosed $isLastRound }} 104 - <button 105 - hx-post="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/reopen" 106 - hx-swap="none" 107 - class="btn p-2 flex items-center gap-2 group"> 108 - {{ i "refresh-ccw-dot" "w-4 h-4" }} 109 - <span>reopen</span> 110 - {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 111 - </button> 112 - {{ end }} 113 - </div> 102 + {{ if and (or $isPullAuthor $isPushAllowed) $isClosed $isLastRound }} 103 + <button 104 + hx-post="/{{ .RepoInfo.FullName }}/pulls/{{ .Pull.PullId }}/reopen" 105 + hx-swap="none" 106 + class="btn p-2 flex items-center gap-2 group"> 107 + {{ i "refresh-ccw-dot" "w-4 h-4" }} 108 + <span>reopen</span> 109 + {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 110 + </button> 111 + {{ end }} 114 112 </div> 115 113 {{ end }} 116 114
+1 -1
appview/pages/templates/repo/pulls/patch.html
··· 54 54 {{ end }} 55 55 56 56 {{ define "contentAfter" }} 57 - {{ template "repo/fragments/diff" (list .RepoInfo.FullName .Diff .DiffOpts) }} 57 + {{ template "repo/fragments/diff" (list .Diff .DiffOpts) }} 58 58 {{end}} 59 59 60 60 {{ define "contentAfterLeft" }}
+3
appview/pages/templates/repo/pulls/pull.html
··· 21 21 "Subject" $.Pull.AtUri 22 22 "State" $.Pull.Labels) }} 23 23 {{ template "repo/fragments/participants" $.Pull.Participants }} 24 + {{ template "repo/fragments/backlinks" 25 + (dict "RepoInfo" $.RepoInfo 26 + "Backlinks" $.Backlinks) }} 24 27 {{ template "repo/fragments/externalLinkPanel" $.Pull.AtUri }} 25 28 </div> 26 29 </div>
+30 -22
appview/pages/templates/repo/pulls/pulls.html
··· 31 31 "Key" "closed" 32 32 "Value" "closed" 33 33 "Icon" "ban" 34 - "Meta" (string .RepoInfo.Stats.IssueCount.Closed)) }} 34 + "Meta" (string .RepoInfo.Stats.PullCount.Closed)) }} 35 35 {{ $values := list $open $merged $closed }} 36 - <div class="flex flex-col gap-2"> 37 - <div class="flex justify-between items-stretch gap-2"> 38 - <form class="flex flex-1 relative" method="GET"> 39 - <input type="hidden" name="state" value="{{ .FilteringBy.String }}"> 40 - <div class="absolute left-3 top-1/2 -translate-y-1/2 text-gray-400 pointer-events-none"> 41 - {{ i "search" "w-4 h-4" }} 42 - </div> 43 - <input class="flex-1 p-1 pl-10 pr-10 peer" type="text" name="q" value="{{ .FilterQuery }}" placeholder=" "> 44 - <a 36 + <div class="grid gap-2 grid-cols-[auto_1fr_auto] grid-row-2"> 37 + <form class="flex relative col-span-3 sm:col-span-1 sm:col-start-2" method="GET"> 38 + <input type="hidden" name="state" value="{{ .FilteringBy.String }}"> 39 + <div class="flex-1 flex relative"> 40 + <input 41 + id="search-q" 42 + class="flex-1 py-1 pl-2 pr-10 mr-[-1px] rounded-r-none focus:border-0 focus:outline-none focus:ring focus:ring-blue-400 ring-inset peer" 43 + type="text" 44 + name="q" 45 + value="{{ .FilterQuery }}" 46 + placeholder=" " 47 + > 48 + <a 45 49 href="?state={{ .FilteringBy.String }}" 46 50 class="absolute right-3 top-1/2 -translate-y-1/2 text-gray-400 hover:text-gray-600 dark:hover:text-gray-300 hidden peer-[:not(:placeholder-shown)]:block" 47 - > 51 + > 48 52 {{ i "x" "w-4 h-4" }} 49 53 </a> 50 - </form> 51 - <div class="hidden sm:block"> 52 - {{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active) }} 53 54 </div> 54 - <a href="/{{ .RepoInfo.FullName }}/pulls/new" 55 - class="btn-create text-sm flex items-center gap-2 no-underline hover:no-underline hover:text-white" 55 + <button 56 + type="submit" 57 + class="p-2 text-gray-400 border rounded-r border-gray-400 dark:border-gray-600" 56 58 > 57 - {{ i "git-pull-request-create" "w-4 h-4" }} 58 - <span>new</span> 59 - </a> 60 - </div> 61 - <div class="sm:hidden"> 62 - {{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active) }} 59 + {{ i "search" "w-4 h-4" }} 60 + </button> 61 + </form> 62 + <div class="sm:row-start-1"> 63 + {{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active "Include" "#search-q") }} 63 64 </div> 65 + <a 66 + href="/{{ .RepoInfo.FullName }}/pulls/new" 67 + class="col-start-3 btn-create text-sm flex items-center gap-2 no-underline hover:no-underline hover:text-white" 68 + > 69 + {{ i "git-pull-request-create" "w-4 h-4" }} 70 + <span>new</span> 71 + </a> 64 72 </div> 65 73 <div class="error" id="pulls"></div> 66 74 {{ end }}
+22 -14
appview/pages/templates/repo/settings/access.html
··· 29 29 {{ template "addCollaboratorButton" . }} 30 30 {{ end }} 31 31 {{ range .Collaborators }} 32 + {{ $handle := resolve .Did }} 32 33 <div class="border border-gray-200 dark:border-gray-700 rounded p-4"> 33 34 <div class="flex items-center gap-3"> 34 35 <img 35 - src="{{ fullAvatar .Handle }}" 36 - alt="{{ .Handle }}" 36 + src="{{ fullAvatar $handle }}" 37 + alt="{{ $handle }}" 37 38 class="rounded-full h-10 w-10 border border-gray-300 dark:border-gray-600 flex-shrink-0"/> 38 39 39 40 <div class="flex-1 min-w-0"> 40 - <a href="/{{ .Handle }}" class="block truncate"> 41 - {{ didOrHandle .Did .Handle }} 41 + <a href="/{{ $handle }}" class="block truncate"> 42 + {{ $handle }} 42 43 </a> 43 44 <p class="text-sm text-gray-500 dark:text-gray-400">{{ .Role }}</p> 44 45 </div> ··· 66 67 <div 67 68 id="add-collaborator-modal" 68 69 popover 69 - class="bg-white w-full md:w-96 dark:bg-gray-800 p-4 rounded border border-gray-200 dark:border-gray-700 drop-shadow dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50"> 70 + class=" 71 + bg-white dark:bg-gray-800 border border-gray-200 dark:border-gray-700 72 + dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50 73 + w-full md:w-96 p-4 rounded drop-shadow overflow-visible"> 70 74 {{ template "addCollaboratorModal" . }} 71 75 </div> 72 76 {{ end }} ··· 82 86 ADD COLLABORATOR 83 87 </label> 84 88 <p class="text-sm text-gray-500 dark:text-gray-400">Collaborators can push to this repository.</p> 85 - <input 86 - autocapitalize="none" 87 - autocorrect="off" 88 - type="text" 89 - id="add-collaborator" 90 - name="collaborator" 91 - required 92 - placeholder="foo.bsky.social" 93 - /> 89 + <actor-typeahead> 90 + <input 91 + autocapitalize="none" 92 + autocorrect="off" 93 + autocomplete="off" 94 + type="text" 95 + id="add-collaborator" 96 + name="collaborator" 97 + required 98 + placeholder="user.tngl.sh" 99 + class="w-full" 100 + /> 101 + </actor-typeahead> 94 102 <div class="flex gap-2 pt-2"> 95 103 <button 96 104 type="button"
+1 -1
appview/pages/templates/repo/settings/general.html
··· 58 58 {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 59 59 </button> 60 60 </div> 61 - <fieldset> 61 + </fieldset> 62 62 </form> 63 63 {{ end }} 64 64
+1 -1
appview/pages/templates/repo/settings/pipelines.html
··· 22 22 <p class="text-gray-500 dark:text-gray-400"> 23 23 Choose a spindle to execute your workflows on. Only repository owners 24 24 can configure spindles. Spindles can be selfhosted, 25 - <a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md"> 25 + <a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide"> 26 26 click to learn more. 27 27 </a> 28 28 </p>
+8
appview/pages/templates/repo/tree.html
··· 59 59 {{ $icon := "folder" }} 60 60 {{ $iconStyle := "size-4 fill-current" }} 61 61 62 + {{ if .IsSubmodule }} 63 + {{ $link = printf "/%s/%s/%s/%s/%s" $.RepoInfo.FullName "blob" (urlquery $.Ref) $.TreePath .Name }} 64 + {{ $icon = "folder-input" }} 65 + {{ $iconStyle = "size-4" }} 66 + {{ end }} 67 + 62 68 {{ if .IsFile }} 69 + {{ $link = printf "/%s/%s/%s/%s/%s" $.RepoInfo.FullName "blob" (urlquery $.Ref) $.TreePath .Name }} 63 70 {{ $icon = "file" }} 64 71 {{ $iconStyle = "size-4" }} 65 72 {{ end }} 73 + 66 74 <a href="{{ $link }}" class="{{ $linkstyle }}"> 67 75 <div class="flex items-center gap-2"> 68 76 {{ i $icon $iconStyle "flex-shrink-0" }}
+22 -6
appview/pages/templates/spindles/dashboard.html
··· 1 - {{ define "title" }}{{.Spindle.Instance}} &middot; spindles{{ end }} 1 + {{ define "title" }}{{.Spindle.Instance}} &middot; {{ .Tab }} settings{{ end }} 2 2 3 3 {{ define "content" }} 4 - <div class="px-6 py-4"> 4 + <div class="p-6"> 5 + <p class="text-xl font-bold dark:text-white">Settings</p> 6 + </div> 7 + <div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white"> 8 + <section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6"> 9 + <div class="col-span-1"> 10 + {{ template "user/settings/fragments/sidebar" . }} 11 + </div> 12 + <div class="col-span-1 md:col-span-3 flex flex-col gap-6"> 13 + {{ template "spindleDash" . }} 14 + </div> 15 + </section> 16 + </div> 17 + {{ end }} 18 + 19 + {{ define "spindleDash" }} 20 + <div> 5 21 <div class="flex justify-between items-center"> 6 - <h1 class="text-xl font-bold dark:text-white">{{ .Spindle.Instance }}</h1> 22 + <h2 class="text-sm pb-2 uppercase font-bold">{{ .Tab }} &middot; {{ .Spindle.Instance }}</h2> 7 23 <div id="right-side" class="flex gap-2"> 8 24 {{ $style := "px-2 py-1 rounded flex items-center flex-shrink-0 gap-2" }} 9 25 {{ $isOwner := and .LoggedInUser (eq .LoggedInUser.Did .Spindle.Owner) }} ··· 71 87 <button 72 88 class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group" 73 89 title="Delete spindle" 74 - hx-delete="/spindles/{{ .Instance }}" 90 + hx-delete="/settings/spindles/{{ .Instance }}" 75 91 hx-swap="outerHTML" 76 92 hx-confirm="Are you sure you want to delete the spindle '{{ .Instance }}'?" 77 93 hx-headers='{"shouldRedirect": "true"}' ··· 87 103 <button 88 104 class="btn gap-2 group" 89 105 title="Retry spindle verification" 90 - hx-post="/spindles/{{ .Instance }}/retry" 106 + hx-post="/settings/spindles/{{ .Instance }}/retry" 91 107 hx-swap="none" 92 108 hx-headers='{"shouldRefresh": "true"}' 93 109 > ··· 104 120 <button 105 121 class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group" 106 122 title="Remove member" 107 - hx-post="/spindles/{{ $root.Spindle.Instance }}/remove" 123 + hx-post="/settings/spindles/{{ $root.Spindle.Instance }}/remove" 108 124 hx-swap="none" 109 125 hx-vals='{"member": "{{$member}}" }' 110 126 hx-confirm="Are you sure you want to remove {{ resolve $member }} from this instance?"
+17 -12
appview/pages/templates/spindles/fragments/addMemberModal.html
··· 13 13 <div 14 14 id="add-member-{{ .Instance }}" 15 15 popover 16 - class="bg-white w-full md:w-96 dark:bg-gray-800 p-4 rounded border border-gray-200 dark:border-gray-700 drop-shadow dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50"> 16 + class=" 17 + bg-white dark:bg-gray-800 border border-gray-200 dark:border-gray-700 dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50 18 + w-full md:w-96 p-4 rounded drop-shadow overflow-visible"> 17 19 {{ block "addSpindleMemberPopover" . }} {{ end }} 18 20 </div> 19 21 {{ end }} 20 22 21 23 {{ define "addSpindleMemberPopover" }} 22 24 <form 23 - hx-post="/spindles/{{ .Instance }}/add" 25 + hx-post="/settings/spindles/{{ .Instance }}/add" 24 26 hx-indicator="#spinner" 25 27 hx-swap="none" 26 28 class="flex flex-col gap-2" ··· 29 31 ADD MEMBER 30 32 </label> 31 33 <p class="text-sm text-gray-500 dark:text-gray-400">Members can register repositories and run workflows on this spindle.</p> 32 - <input 33 - autocapitalize="none" 34 - autocorrect="off" 35 - autocomplete="off" 36 - type="text" 37 - id="member-did-{{ .Id }}" 38 - name="member" 39 - required 40 - placeholder="foo.bsky.social" 41 - /> 34 + <actor-typeahead> 35 + <input 36 + autocapitalize="none" 37 + autocorrect="off" 38 + autocomplete="off" 39 + type="text" 40 + id="member-did-{{ .Id }}" 41 + name="member" 42 + required 43 + placeholder="user.tngl.sh" 44 + class="w-full" 45 + /> 46 + </actor-typeahead> 42 47 <div class="flex gap-2 pt-2"> 43 48 <button 44 49 type="button"
+3 -3
appview/pages/templates/spindles/fragments/spindleListing.html
··· 7 7 8 8 {{ define "spindleLeftSide" }} 9 9 {{ if .Verified }} 10 - <a href="/spindles/{{ .Instance }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]"> 10 + <a href="/settings/spindles/{{ .Instance }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]"> 11 11 {{ i "hard-drive" "w-4 h-4" }} 12 12 <span class="hover:underline"> 13 13 {{ .Instance }} ··· 50 50 <button 51 51 class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group" 52 52 title="Delete spindle" 53 - hx-delete="/spindles/{{ .Instance }}" 53 + hx-delete="/settings/spindles/{{ .Instance }}" 54 54 hx-swap="outerHTML" 55 55 hx-target="#spindle-{{.Id}}" 56 56 hx-confirm="Are you sure you want to delete the spindle '{{ .Instance }}'?" ··· 66 66 <button 67 67 class="btn gap-2 group" 68 68 title="Retry spindle verification" 69 - hx-post="/spindles/{{ .Instance }}/retry" 69 + hx-post="/settings/spindles/{{ .Instance }}/retry" 70 70 hx-swap="none" 71 71 hx-target="#spindle-{{.Id}}" 72 72 >
+90 -59
appview/pages/templates/spindles/index.html
··· 1 - {{ define "title" }}spindles{{ end }} 1 + {{ define "title" }}{{ .Tab }} settings{{ end }} 2 2 3 3 {{ define "content" }} 4 - <div class="px-6 py-4 flex items-center justify-between gap-4 align-bottom"> 5 - <h1 class="text-xl font-bold dark:text-white">Spindles</h1> 6 - <span class="flex items-center gap-1"> 7 - {{ i "book" "w-3 h-3" }} 8 - <a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">docs</a> 9 - </span> 4 + <div class="p-6"> 5 + <p class="text-xl font-bold dark:text-white">Settings</p> 6 + </div> 7 + <div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white"> 8 + <section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6"> 9 + <div class="col-span-1"> 10 + {{ template "user/settings/fragments/sidebar" . }} 11 + </div> 12 + <div class="col-span-1 md:col-span-3 flex flex-col gap-6"> 13 + {{ template "spindleList" . }} 14 + </div> 15 + </section> 16 + </div> 17 + {{ end }} 18 + 19 + {{ define "spindleList" }} 20 + <div class="grid grid-cols-1 md:grid-cols-3 gap-4 items-center"> 21 + <div class="col-span-1 md:col-span-2"> 22 + <h2 class="text-sm pb-2 uppercase font-bold">Spindle</h2> 23 + {{ block "about" . }} {{ end }} 24 + </div> 25 + <div class="col-span-1 md:col-span-1 md:justify-self-end"> 26 + {{ template "docsButton" . }} 27 + </div> 10 28 </div> 11 29 12 - <section class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white"> 30 + <section> 13 31 <div class="flex flex-col gap-6"> 14 - {{ block "about" . }} {{ end }} 15 32 {{ block "list" . }} {{ end }} 16 33 {{ block "register" . }} {{ end }} 17 34 </div> ··· 20 37 21 38 {{ define "about" }} 22 39 <section class="rounded flex items-center gap-2"> 23 - <p class="text-gray-500 dark:text-gray-400"> 24 - Spindles are small CI runners. 25 - </p> 40 + <p class="text-gray-500 dark:text-gray-400"> 41 + Spindles are small CI runners. 42 + </p> 26 43 </section> 27 44 {{ end }} 28 45 29 46 {{ define "list" }} 30 - <section class="rounded w-full flex flex-col gap-2"> 31 - <h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">your spindles</h2> 32 - <div class="flex flex-col rounded border border-gray-200 dark:border-gray-700 w-full"> 33 - {{ range $spindle := .Spindles }} 34 - {{ template "spindles/fragments/spindleListing" . }} 35 - {{ else }} 36 - <div class="flex items-center justify-center p-2 border-b border-gray-200 dark:border-gray-700 text-gray-500"> 37 - no spindles registered yet 38 - </div> 39 - {{ end }} 47 + <section class="rounded w-full flex flex-col gap-2"> 48 + <h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">your spindles</h2> 49 + <div class="flex flex-col rounded border border-gray-200 dark:border-gray-700 w-full"> 50 + {{ range $spindle := .Spindles }} 51 + {{ template "spindles/fragments/spindleListing" . }} 52 + {{ else }} 53 + <div class="flex items-center justify-center p-2 border-b border-gray-200 dark:border-gray-700 text-gray-500"> 54 + no spindles registered yet 40 55 </div> 41 - <div id="operation-error" class="text-red-500 dark:text-red-400"></div> 42 - </section> 56 + {{ end }} 57 + </div> 58 + <div id="operation-error" class="text-red-500 dark:text-red-400"></div> 59 + </section> 43 60 {{ end }} 44 61 45 62 {{ define "register" }} 46 - <section class="rounded w-full lg:w-fit flex flex-col gap-2"> 47 - <h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">register a spindle</h2> 48 - <p class="mb-2 dark:text-gray-300">Enter the hostname of your spindle to get started.</p> 49 - <form 50 - hx-post="/spindles/register" 51 - class="max-w-2xl mb-2 space-y-4" 52 - hx-indicator="#register-button" 53 - hx-swap="none" 54 - > 55 - <div class="flex gap-2"> 56 - <input 57 - type="text" 58 - id="instance" 59 - name="instance" 60 - placeholder="spindle.example.com" 61 - required 62 - class="flex-1 w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400 px-3 py-2 border rounded" 63 - > 64 - <button 65 - type="submit" 66 - id="register-button" 67 - class="btn rounded flex items-center py-2 dark:bg-gray-700 dark:text-white dark:hover:bg-gray-600 group" 68 - > 69 - <span class="inline-flex items-center gap-2"> 70 - {{ i "plus" "w-4 h-4" }} 71 - register 72 - </span> 73 - <span class="pl-2 hidden group-[.htmx-request]:inline"> 74 - {{ i "loader-circle" "w-4 h-4 animate-spin" }} 75 - </span> 76 - </button> 77 - </div> 63 + <section class="rounded w-full lg:w-fit flex flex-col gap-2"> 64 + <h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">register a spindle</h2> 65 + <p class="mb-2 dark:text-gray-300">Enter the hostname of your spindle to get started.</p> 66 + <form 67 + hx-post="/settings/spindles/register" 68 + class="max-w-2xl mb-2 space-y-4" 69 + hx-indicator="#register-button" 70 + hx-swap="none" 71 + > 72 + <div class="flex gap-2"> 73 + <input 74 + type="text" 75 + id="instance" 76 + name="instance" 77 + placeholder="spindle.example.com" 78 + required 79 + class="flex-1 w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400 px-3 py-2 border rounded" 80 + > 81 + <button 82 + type="submit" 83 + id="register-button" 84 + class="btn rounded flex items-center py-2 dark:bg-gray-700 dark:text-white dark:hover:bg-gray-600 group" 85 + > 86 + <span class="inline-flex items-center gap-2"> 87 + {{ i "plus" "w-4 h-4" }} 88 + register 89 + </span> 90 + <span class="pl-2 hidden group-[.htmx-request]:inline"> 91 + {{ i "loader-circle" "w-4 h-4 animate-spin" }} 92 + </span> 93 + </button> 94 + </div> 78 95 79 - <div id="register-error" class="dark:text-red-400"></div> 80 - </form> 96 + <div id="register-error" class="dark:text-red-400"></div> 97 + </form> 98 + 99 + </section> 100 + {{ end }} 81 101 82 - </section> 102 + {{ define "docsButton" }} 103 + <a 104 + class="btn flex items-center gap-2" 105 + href="https://docs.tangled.org/spindles.html#self-hosting-guide"> 106 + {{ i "book" "size-4" }} 107 + docs 108 + </a> 109 + <div 110 + id="add-email-modal" 111 + popover 112 + class="bg-white w-full md:w-96 dark:bg-gray-800 p-4 rounded border border-gray-200 dark:border-gray-700 drop-shadow dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50"> 113 + </div> 83 114 {{ end }}
+6 -5
appview/pages/templates/strings/dashboard.html
··· 1 - {{ define "title" }}strings by {{ or .Card.UserHandle .Card.UserDid }}{{ end }} 1 + {{ define "title" }}strings by {{ resolve .Card.UserDid }}{{ end }} 2 2 3 3 {{ define "extrameta" }} 4 - <meta property="og:title" content="{{ or .Card.UserHandle .Card.UserDid }}" /> 4 + {{ $handle := resolve .Card.UserDid }} 5 + <meta property="og:title" content="{{ $handle }}" /> 5 6 <meta property="og:type" content="profile" /> 6 - <meta property="og:url" content="https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}" /> 7 - <meta property="og:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" /> 7 + <meta property="og:url" content="https://tangled.org/{{ $handle }}" /> 8 + <meta property="og:description" content="{{ or .Card.Profile.Description $handle }}" /> 8 9 {{ end }} 9 10 10 11 ··· 35 36 {{ $s := index . 1 }} 36 37 <div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800"> 37 38 <div class="font-medium dark:text-white flex gap-2 items-center"> 38 - <a href="/strings/{{ or $root.Card.UserHandle $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a> 39 + <a href="/strings/{{ resolve $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a> 39 40 </div> 40 41 {{ with $s.Description }} 41 42 <div class="text-gray-600 dark:text-gray-300 text-sm">
+13 -9
appview/pages/templates/strings/string.html
··· 1 - {{ define "title" }}{{ .String.Filename }} · by {{ didOrHandle .Owner.DID.String .Owner.Handle.String }}{{ end }} 1 + {{ define "title" }}{{ .String.Filename }} · by {{ resolve .Owner.DID.String }}{{ end }} 2 2 3 3 {{ define "extrameta" }} 4 - {{ $ownerId := didOrHandle .Owner.DID.String .Owner.Handle.String }} 4 + {{ $ownerId := resolve .Owner.DID.String }} 5 5 <meta property="og:title" content="{{ .String.Filename }} · by {{ $ownerId }}" /> 6 6 <meta property="og:type" content="object" /> 7 7 <meta property="og:url" content="https://tangled.org/strings/{{ $ownerId }}/{{ .String.Rkey }}" /> ··· 9 9 {{ end }} 10 10 11 11 {{ define "content" }} 12 - {{ $ownerId := didOrHandle .Owner.DID.String .Owner.Handle.String }} 12 + {{ $ownerId := resolve .Owner.DID.String }} 13 13 <section id="string-header" class="mb-4 py-2 px-6 dark:text-white"> 14 14 <div class="text-lg flex items-center justify-between"> 15 15 <div> ··· 17 17 <span class="select-none">/</span> 18 18 <a href="/strings/{{ $ownerId }}/{{ .String.Rkey }}" class="font-bold">{{ .String.Filename }}</a> 19 19 </div> 20 - {{ if and .LoggedInUser (eq .LoggedInUser.Did .String.Did) }} 21 - <div class="flex gap-2 text-base"> 20 + <div class="flex gap-2 items-stretch text-base"> 21 + {{ if and .LoggedInUser (eq .LoggedInUser.Did .String.Did) }} 22 22 <a class="btn flex items-center gap-2 no-underline hover:no-underline p-2 group" 23 23 hx-boost="true" 24 24 href="/strings/{{ .String.Did }}/{{ .String.Rkey }}/edit"> ··· 37 37 <span class="hidden md:inline">delete</span> 38 38 {{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }} 39 39 </button> 40 - </div> 41 - {{ end }} 40 + {{ end }} 41 + {{ template "fragments/starBtn" 42 + (dict "SubjectAt" .String.AtUri 43 + "IsStarred" .IsStarred 44 + "StarCount" .StarCount) }} 45 + </div> 42 46 </div> 43 47 <span> 44 48 {{ with .String.Description }} ··· 75 79 </div> 76 80 <div class="overflow-x-auto overflow-y-hidden relative"> 77 81 {{ if .ShowRendered }} 78 - <div id="blob-contents" class="prose dark:prose-invert">{{ .RenderedContents }}</div> 82 + <div id="blob-contents" class="prose dark:prose-invert">{{ .String.Contents | readme }}</div> 79 83 {{ else }} 80 - <div id="blob-contents" class="whitespace-pre peer-target:bg-yellow-200 dark:peer-target:bg-yellow-900">{{ .String.Contents | escapeHtml }}</div> 84 + <div id="blob-contents" class="whitespace-pre peer-target:bg-yellow-200 dark:peer-target:bg-yellow-900">{{ code .String.Contents .String.Filename | escapeHtml }}</div> 81 85 {{ end }} 82 86 </div> 83 87 {{ template "fragments/multiline-select" }}
+1 -2
appview/pages/templates/timeline/fragments/goodfirstissues.html
··· 3 3 <a href="/goodfirstissues" class="no-underline hover:no-underline"> 4 4 <div class="flex items-center justify-between gap-2 bg-purple-200 dark:bg-purple-900 border border-purple-400 dark:border-purple-500 rounded mb-4 py-4 px-6 "> 5 5 <div class="flex-1 flex flex-col gap-2"> 6 - <div class="text-purple-500 dark:text-purple-400">Oct 2025</div> 7 6 <p> 8 - Make your first contribution to an open-source project this October. 7 + Make your first contribution to an open-source project. 9 8 <em>good-first-issue</em> helps new contributors find easy ways to 10 9 start contributing to open-source projects. 11 10 </p>
+5 -5
appview/pages/templates/timeline/fragments/timeline.html
··· 14 14 <div class="flex flex-col divide-y divide-gray-200 dark:divide-gray-700 border border-gray-200 dark:border-gray-700 rounded-sm"> 15 15 {{ if .Repo }} 16 16 {{ template "timeline/fragments/repoEvent" (list $ .) }} 17 - {{ else if .Star }} 17 + {{ else if .RepoStar }} 18 18 {{ template "timeline/fragments/starEvent" (list $ .) }} 19 19 {{ else if .Follow }} 20 20 {{ template "timeline/fragments/followEvent" (list $ .) }} ··· 52 52 <span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" $repo.Created }}</span> 53 53 </div> 54 54 {{ with $repo }} 55 - {{ template "user/fragments/repoCard" (list $root . true true (dict "IsStarred" $event.IsStarred "RepoAt" .RepoAt "Stats" (dict "StarCount" $event.StarCount))) }} 55 + {{ template "user/fragments/repoCard" (list $root . true true (dict "IsStarred" $event.IsStarred "SubjectAt" .RepoAt "StarCount" $event.StarCount)) }} 56 56 {{ end }} 57 57 {{ end }} 58 58 59 59 {{ define "timeline/fragments/starEvent" }} 60 60 {{ $root := index . 0 }} 61 61 {{ $event := index . 1 }} 62 - {{ $star := $event.Star }} 62 + {{ $star := $event.RepoStar }} 63 63 {{ with $star }} 64 - {{ $starrerHandle := resolve .StarredByDid }} 64 + {{ $starrerHandle := resolve .Did }} 65 65 {{ $repoOwnerHandle := resolve .Repo.Did }} 66 66 <div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm"> 67 67 {{ template "user/fragments/picHandleLink" $starrerHandle }} ··· 72 72 <span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" .Created }}</span> 73 73 </div> 74 74 {{ with .Repo }} 75 - {{ template "user/fragments/repoCard" (list $root . true true (dict "IsStarred" $event.IsStarred "RepoAt" .RepoAt "Stats" (dict "StarCount" $event.StarCount))) }} 75 + {{ template "user/fragments/repoCard" (list $root . true true (dict "IsStarred" $event.IsStarred "SubjectAt" .RepoAt "StarCount" $event.StarCount)) }} 76 76 {{ end }} 77 77 {{ end }} 78 78 {{ end }}
+4 -2
appview/pages/templates/user/followers.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} · followers {{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }} · followers {{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-followers" class="md:col-span-8 order-2 md:order-2"> ··· 19 19 "FollowersCount" .FollowersCount 20 20 "FollowingCount" .FollowingCount) }} 21 21 {{ else }} 22 - <p class="px-6 dark:text-white">This user does not have any followers yet.</p> 22 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 23 + <span>This user does not have any followers yet.</span> 24 + </div> 23 25 {{ end }} 24 26 </div> 25 27 {{ end }}
+4 -2
appview/pages/templates/user/following.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} · following {{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }} · following {{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-following" class="md:col-span-8 order-2 md:order-2"> ··· 19 19 "FollowersCount" .FollowersCount 20 20 "FollowingCount" .FollowingCount) }} 21 21 {{ else }} 22 - <p class="px-6 dark:text-white">This user does not follow anyone yet.</p> 22 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 23 + <span>This user does not follow anyone yet.</span> 24 + </div> 23 25 {{ end }} 24 26 </div> 25 27 {{ end }}
+7 -1
appview/pages/templates/user/fragments/editBio.html
··· 26 26 {{ if and .Profile .Profile.Pronouns }} 27 27 {{ $pronouns = .Profile.Pronouns }} 28 28 {{ end }} 29 - <input type="text" class="py-1 px-1 w-full" name="pronouns" value="{{ $pronouns }}"> 29 + <input 30 + type="text" 31 + class="py-1 px-1 w-full" 32 + name="pronouns" 33 + placeholder="they/them" 34 + value="{{ $pronouns }}" 35 + > 30 36 </div> 31 37 </div> 32 38
+2 -2
appview/pages/templates/user/fragments/followCard.html
··· 6 6 <img class="object-cover rounded-full p-2" src="{{ fullAvatar $userIdent }}" alt="{{ $userIdent }}" /> 7 7 </div> 8 8 9 - <div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full"> 9 + <div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full min-w-0"> 10 10 <div class="flex-1 min-h-0 justify-around flex flex-col"> 11 11 <a href="/{{ $userIdent }}"> 12 12 <span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $userIdent | truncateAt30 }}</span> 13 13 </a> 14 14 {{ with .Profile }} 15 - <p class="text-sm pb-2 md:pb-2">{{.Description}}</p> 15 + <p class="text-sm pb-2 md:pb-2 break-words">{{.Description}}</p> 16 16 {{ end }} 17 17 <div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full"> 18 18 <span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
+1 -1
appview/pages/templates/user/fragments/profileCard.html
··· 1 1 {{ define "user/fragments/profileCard" }} 2 - {{ $userIdent := didOrHandle .UserDid .UserHandle }} 2 + {{ $userIdent := resolve .UserDid }} 3 3 <div class="grid grid-cols-3 md:grid-cols-1 gap-1 items-center"> 4 4 <div id="avatar" class="col-span-1 flex justify-center items-center"> 5 5 <div class="w-3/4 aspect-square relative">
+2 -1
appview/pages/templates/user/fragments/repoCard.html
··· 1 1 {{ define "user/fragments/repoCard" }} 2 + {{/* root, repo, fullName [,starButton [,starData]] */}} 2 3 {{ $root := index . 0 }} 3 4 {{ $repo := index . 1 }} 4 5 {{ $fullName := index . 2 }} ··· 29 30 </div> 30 31 {{ if and $starButton $root.LoggedInUser }} 31 32 <div class="shrink-0"> 32 - {{ template "repo/fragments/repoStar" $starData }} 33 + {{ template "fragments/starBtn" $starData }} 33 34 </div> 34 35 {{ end }} 35 36 </div>
+22 -4
appview/pages/templates/user/overview.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }}{{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }}{{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-repos" class="md:col-span-4 order-2 md:order-2"> ··· 16 16 <p class="text-sm font-bold px-2 pb-4 dark:text-white">ACTIVITY</p> 17 17 <div class="flex flex-col gap-4 relative"> 18 18 {{ if .ProfileTimeline.IsEmpty }} 19 - <p class="dark:text-white">This user does not have any activity yet.</p> 19 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 20 + <span class="flex items-center gap-2"> 21 + This user does not have any activity yet. 22 + </span> 23 + </div> 20 24 {{ end }} 21 25 22 26 {{ with .ProfileTimeline }} ··· 33 37 </p> 34 38 35 39 <div class="flex flex-col gap-1"> 40 + {{ block "commits" .Commits }} {{ end }} 36 41 {{ block "repoEvents" .RepoEvents }} {{ end }} 37 42 {{ block "issueEvents" .IssueEvents }} {{ end }} 38 43 {{ block "pullEvents" .PullEvents }} {{ end }} ··· 43 48 {{ end }} 44 49 {{ end }} 45 50 </div> 51 + {{ end }} 52 + 53 + {{ define "commits" }} 54 + {{ if . }} 55 + <div class="flex flex-wrap items-center gap-1"> 56 + {{ i "git-commit-horizontal" "size-5" }} 57 + created {{ . }} commits 58 + </div> 59 + {{ end }} 46 60 {{ end }} 47 61 48 62 {{ define "repoEvents" }} ··· 224 238 {{ define "ownRepos" }} 225 239 <div> 226 240 <div class="text-sm font-bold px-2 pb-4 dark:text-white flex items-center gap-2"> 227 - <a href="/@{{ or $.Card.UserHandle $.Card.UserDid }}?tab=repos" 241 + <a href="/{{ resolve $.Card.UserDid }}?tab=repos" 228 242 class="flex text-black dark:text-white items-center gap-2 no-underline hover:no-underline group"> 229 243 <span>PINNED REPOS</span> 230 244 </a> ··· 244 258 {{ template "user/fragments/repoCard" (list $ . false) }} 245 259 </div> 246 260 {{ else }} 247 - <p class="dark:text-white">This user does not have any pinned repos.</p> 261 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 262 + <span class="flex items-center gap-2"> 263 + This user does not have any pinned repos. 264 + </span> 265 + </div> 248 266 {{ end }} 249 267 </div> 250 268 </div>
+4 -2
appview/pages/templates/user/repos.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} · repos {{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }} · repos {{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-repos" class="md:col-span-8 order-2 md:order-2"> ··· 13 13 {{ template "user/fragments/repoCard" (list $ . false) }} 14 14 </div> 15 15 {{ else }} 16 - <p class="px-6 dark:text-white">This user does not have any repos yet.</p> 16 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 17 + <span>This user does not have any repos yet.</span> 18 + </div> 17 19 {{ end }} 18 20 </div> 19 21 {{ end }}
+14
appview/pages/templates/user/settings/notifications.html
··· 144 144 <div class="flex items-center justify-between p-2"> 145 145 <div class="flex items-center gap-2"> 146 146 <div class="flex flex-col gap-1"> 147 + <span class="font-bold">Mentions</span> 148 + <div class="flex text-sm items-center gap-1 text-gray-500 dark:text-gray-400"> 149 + <span>When someone mentions you.</span> 150 + </div> 151 + </div> 152 + </div> 153 + <label class="flex items-center gap-2"> 154 + <input type="checkbox" name="user_mentioned" {{if .Preferences.UserMentioned}}checked{{end}}> 155 + </label> 156 + </div> 157 + 158 + <div class="flex items-center justify-between p-2"> 159 + <div class="flex items-center gap-2"> 160 + <div class="flex flex-col gap-1"> 147 161 <span class="font-bold">Email notifications</span> 148 162 <div class="flex text-sm items-center gap-1 text-gray-500 dark:text-gray-400"> 149 163 <span>Receive notifications via email in addition to in-app notifications.</span>
+9 -6
appview/pages/templates/user/signup.html
··· 43 43 page to complete your registration. 44 44 </span> 45 45 <div class="w-full mt-4 text-center"> 46 - <div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}"></div> 46 + <div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}" data-size="flexible"></div> 47 47 </div> 48 48 <button class="btn text-base w-full my-2 mt-6" type="submit" id="signup-button" tabindex="7" > 49 49 <span>join now</span> 50 50 </button> 51 + <p class="text-sm text-gray-500"> 52 + Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>. 53 + </p> 54 + 55 + <p id="signup-msg" class="error w-full"></p> 56 + <p class="text-sm text-gray-500 pt-4"> 57 + By signing up, you agree to our <a href="/terms" class="underline">Terms of Service</a> and <a href="/privacy" class="underline">Privacy Policy</a>. 58 + </p> 51 59 </form> 52 - <p class="text-sm text-gray-500"> 53 - Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>. 54 - </p> 55 - 56 - <p id="signup-msg" class="error w-full"></p> 57 60 </main> 58 61 </body> 59 62 </html>
+4 -2
appview/pages/templates/user/starred.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} · repos {{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }} · repos {{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-repos" class="md:col-span-8 order-2 md:order-2"> ··· 13 13 {{ template "user/fragments/repoCard" (list $ . true) }} 14 14 </div> 15 15 {{ else }} 16 - <p class="px-6 dark:text-white">This user does not have any starred repos yet.</p> 16 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 17 + <span>This user does not have any starred repos yet.</span> 18 + </div> 17 19 {{ end }} 18 20 </div> 19 21 {{ end }}
+5 -3
appview/pages/templates/user/strings.html
··· 1 - {{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} · strings {{ end }} 1 + {{ define "title" }}{{ resolve .Card.UserDid }} · strings {{ end }} 2 2 3 3 {{ define "profileContent" }} 4 4 <div id="all-strings" class="md:col-span-8 order-2 md:order-2"> ··· 13 13 {{ template "singleString" (list $ .) }} 14 14 </div> 15 15 {{ else }} 16 - <p class="px-6 dark:text-white">This user does not have any strings yet.</p> 16 + <div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded"> 17 + <span>This user does not have any strings yet.</span> 18 + </div> 17 19 {{ end }} 18 20 </div> 19 21 {{ end }} ··· 23 25 {{ $s := index . 1 }} 24 26 <div class="py-4 px-6 rounded bg-white dark:bg-gray-800"> 25 27 <div class="font-medium dark:text-white flex gap-2 items-center"> 26 - <a href="/strings/{{ or $root.Card.UserHandle $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a> 28 + <a href="/strings/{{ resolve $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a> 27 29 </div> 28 30 {{ with $s.Description }} 29 31 <div class="text-gray-600 dark:text-gray-300 text-sm">
+19 -22
appview/pipelines/pipelines.go
··· 16 16 "tangled.org/core/appview/reporesolver" 17 17 "tangled.org/core/eventconsumer" 18 18 "tangled.org/core/idresolver" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/rbac" 20 21 spindlemodel "tangled.org/core/spindle/models" 21 22 ··· 78 79 return 79 80 } 80 81 81 - repoInfo := f.RepoInfo(user) 82 - 83 82 ps, err := db.GetPipelineStatuses( 84 83 p.db, 85 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 86 - db.FilterEq("repo_name", repoInfo.Name), 87 - db.FilterEq("knot", repoInfo.Knot), 84 + 30, 85 + orm.FilterEq("repo_owner", f.Did), 86 + orm.FilterEq("repo_name", f.Name), 87 + orm.FilterEq("knot", f.Knot), 88 88 ) 89 89 if err != nil { 90 90 l.Error("failed to query db", "err", err) ··· 93 93 94 94 p.pages.Pipelines(w, pages.PipelinesParams{ 95 95 LoggedInUser: user, 96 - RepoInfo: repoInfo, 96 + RepoInfo: p.repoResolver.GetRepoInfo(r, user), 97 97 Pipelines: ps, 98 98 }) 99 99 } ··· 107 107 l.Error("failed to get repo and knot", "err", err) 108 108 return 109 109 } 110 - 111 - repoInfo := f.RepoInfo(user) 112 110 113 111 pipelineId := chi.URLParam(r, "pipeline") 114 112 if pipelineId == "" { ··· 124 122 125 123 ps, err := db.GetPipelineStatuses( 126 124 p.db, 127 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 128 - db.FilterEq("repo_name", repoInfo.Name), 129 - db.FilterEq("knot", repoInfo.Knot), 130 - db.FilterEq("id", pipelineId), 125 + 1, 126 + orm.FilterEq("repo_owner", f.Did), 127 + orm.FilterEq("repo_name", f.Name), 128 + orm.FilterEq("knot", f.Knot), 129 + orm.FilterEq("id", pipelineId), 131 130 ) 132 131 if err != nil { 133 132 l.Error("failed to query db", "err", err) ··· 143 142 144 143 p.pages.Workflow(w, pages.WorkflowParams{ 145 144 LoggedInUser: user, 146 - RepoInfo: repoInfo, 145 + RepoInfo: p.repoResolver.GetRepoInfo(r, user), 147 146 Pipeline: singlePipeline, 148 147 Workflow: workflow, 149 148 }) ··· 174 173 ctx, cancel := context.WithCancel(r.Context()) 175 174 defer cancel() 176 175 177 - user := p.oauth.GetUser(r) 178 176 f, err := p.repoResolver.Resolve(r) 179 177 if err != nil { 180 178 l.Error("failed to get repo and knot", "err", err) ··· 182 180 return 183 181 } 184 182 185 - repoInfo := f.RepoInfo(user) 186 - 187 183 pipelineId := chi.URLParam(r, "pipeline") 188 184 workflow := chi.URLParam(r, "workflow") 189 185 if pipelineId == "" || workflow == "" { ··· 193 189 194 190 ps, err := db.GetPipelineStatuses( 195 191 p.db, 196 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 197 - db.FilterEq("repo_name", repoInfo.Name), 198 - db.FilterEq("knot", repoInfo.Knot), 199 - db.FilterEq("id", pipelineId), 192 + 1, 193 + orm.FilterEq("repo_owner", f.Did), 194 + orm.FilterEq("repo_name", f.Name), 195 + orm.FilterEq("knot", f.Knot), 196 + orm.FilterEq("id", pipelineId), 200 197 ) 201 198 if err != nil || len(ps) != 1 { 202 199 l.Error("pipeline query failed", "err", err, "count", len(ps)) ··· 205 202 } 206 203 207 204 singlePipeline := ps[0] 208 - spindle := repoInfo.Spindle 209 - knot := repoInfo.Knot 205 + spindle := f.Spindle 206 + knot := f.Knot 210 207 rkey := singlePipeline.Rkey 211 208 212 209 if spindle == "" || knot == "" || rkey == "" {
+3 -2
appview/pulls/opengraph.go
··· 13 13 "tangled.org/core/appview/db" 14 14 "tangled.org/core/appview/models" 15 15 "tangled.org/core/appview/ogcard" 16 + "tangled.org/core/orm" 16 17 "tangled.org/core/patchutil" 17 18 "tangled.org/core/types" 18 19 ) ··· 276 277 } 277 278 278 279 // Get comment count from database 279 - comments, err := db.GetPullComments(s.db, db.FilterEq("pull_id", pull.ID)) 280 + comments, err := db.GetPullComments(s.db, orm.FilterEq("pull_id", pull.ID)) 280 281 if err != nil { 281 282 log.Printf("failed to get pull comments: %v", err) 282 283 } ··· 293 294 filesChanged = niceDiff.Stat.FilesChanged 294 295 } 295 296 296 - card, err := s.drawPullSummaryCard(pull, &f.Repo, commentCount, diffStats, filesChanged) 297 + card, err := s.drawPullSummaryCard(pull, f, commentCount, diffStats, filesChanged) 297 298 if err != nil { 298 299 log.Println("failed to draw pull summary card", err) 299 300 http.Error(w, "failed to draw pull summary card", http.StatusInternalServerError)
+196 -168
appview/pulls/pulls.go
··· 1 1 package pulls 2 2 3 3 import ( 4 + "context" 4 5 "database/sql" 5 6 "encoding/json" 6 7 "errors" ··· 18 19 "tangled.org/core/appview/config" 19 20 "tangled.org/core/appview/db" 20 21 pulls_indexer "tangled.org/core/appview/indexer/pulls" 22 + "tangled.org/core/appview/mentions" 21 23 "tangled.org/core/appview/models" 22 24 "tangled.org/core/appview/notify" 23 25 "tangled.org/core/appview/oauth" 24 26 "tangled.org/core/appview/pages" 25 27 "tangled.org/core/appview/pages/markup" 28 + "tangled.org/core/appview/pages/repoinfo" 26 29 "tangled.org/core/appview/reporesolver" 27 30 "tangled.org/core/appview/validator" 28 31 "tangled.org/core/appview/xrpcclient" 29 32 "tangled.org/core/idresolver" 33 + "tangled.org/core/orm" 30 34 "tangled.org/core/patchutil" 31 35 "tangled.org/core/rbac" 32 36 "tangled.org/core/tid" ··· 41 45 ) 42 46 43 47 type Pulls struct { 44 - oauth *oauth.OAuth 45 - repoResolver *reporesolver.RepoResolver 46 - pages *pages.Pages 47 - idResolver *idresolver.Resolver 48 - db *db.DB 49 - config *config.Config 50 - notifier notify.Notifier 51 - enforcer *rbac.Enforcer 52 - logger *slog.Logger 53 - validator *validator.Validator 54 - indexer *pulls_indexer.Indexer 48 + oauth *oauth.OAuth 49 + repoResolver *reporesolver.RepoResolver 50 + pages *pages.Pages 51 + idResolver *idresolver.Resolver 52 + mentionsResolver *mentions.Resolver 53 + db *db.DB 54 + config *config.Config 55 + notifier notify.Notifier 56 + enforcer *rbac.Enforcer 57 + logger *slog.Logger 58 + validator *validator.Validator 59 + indexer *pulls_indexer.Indexer 55 60 } 56 61 57 62 func New( ··· 59 64 repoResolver *reporesolver.RepoResolver, 60 65 pages *pages.Pages, 61 66 resolver *idresolver.Resolver, 67 + mentionsResolver *mentions.Resolver, 62 68 db *db.DB, 63 69 config *config.Config, 64 70 notifier notify.Notifier, ··· 68 74 logger *slog.Logger, 69 75 ) *Pulls { 70 76 return &Pulls{ 71 - oauth: oauth, 72 - repoResolver: repoResolver, 73 - pages: pages, 74 - idResolver: resolver, 75 - db: db, 76 - config: config, 77 - notifier: notifier, 78 - enforcer: enforcer, 79 - logger: logger, 80 - validator: validator, 81 - indexer: indexer, 77 + oauth: oauth, 78 + repoResolver: repoResolver, 79 + pages: pages, 80 + idResolver: resolver, 81 + mentionsResolver: mentionsResolver, 82 + db: db, 83 + config: config, 84 + notifier: notifier, 85 + enforcer: enforcer, 86 + logger: logger, 87 + validator: validator, 88 + indexer: indexer, 82 89 } 83 90 } 84 91 ··· 123 130 124 131 s.pages.PullActionsFragment(w, pages.PullActionsParams{ 125 132 LoggedInUser: user, 126 - RepoInfo: f.RepoInfo(user), 133 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 127 134 Pull: pull, 128 135 RoundNumber: roundNumber, 129 136 MergeCheck: mergeCheckResponse, ··· 150 157 return 151 158 } 152 159 160 + backlinks, err := db.GetBacklinks(s.db, pull.AtUri()) 161 + if err != nil { 162 + log.Println("failed to get pull backlinks", err) 163 + s.pages.Notice(w, "pull-error", "Failed to get pull. Try again later.") 164 + return 165 + } 166 + 153 167 // can be nil if this pull is not stacked 154 168 stack, _ := r.Context().Value("stack").(models.Stack) 155 169 abandonedPulls, _ := r.Context().Value("abandonedPulls").([]*models.Pull) ··· 160 174 if user != nil && user.Did == pull.OwnerDid { 161 175 resubmitResult = s.resubmitCheck(r, f, pull, stack) 162 176 } 163 - 164 - repoInfo := f.RepoInfo(user) 165 177 166 178 m := make(map[string]models.Pipeline) 167 179 ··· 178 190 179 191 ps, err := db.GetPipelineStatuses( 180 192 s.db, 181 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 182 - db.FilterEq("repo_name", repoInfo.Name), 183 - db.FilterEq("knot", repoInfo.Knot), 184 - db.FilterIn("sha", shas), 193 + len(shas), 194 + orm.FilterEq("repo_owner", f.Did), 195 + orm.FilterEq("repo_name", f.Name), 196 + orm.FilterEq("knot", f.Knot), 197 + orm.FilterIn("sha", shas), 185 198 ) 186 199 if err != nil { 187 200 log.Printf("failed to fetch pipeline statuses: %s", err) ··· 205 218 206 219 labelDefs, err := db.GetLabelDefinitions( 207 220 s.db, 208 - db.FilterIn("at_uri", f.Repo.Labels), 209 - db.FilterContains("scope", tangled.RepoPullNSID), 221 + orm.FilterIn("at_uri", f.Labels), 222 + orm.FilterContains("scope", tangled.RepoPullNSID), 210 223 ) 211 224 if err != nil { 212 225 log.Println("failed to fetch labels", err) ··· 221 234 222 235 s.pages.RepoSinglePull(w, pages.RepoSinglePullParams{ 223 236 LoggedInUser: user, 224 - RepoInfo: repoInfo, 237 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 225 238 Pull: pull, 226 239 Stack: stack, 227 240 AbandonedPulls: abandonedPulls, 241 + Backlinks: backlinks, 228 242 BranchDeleteStatus: branchDeleteStatus, 229 243 MergeCheck: mergeCheckResponse, 230 244 ResubmitCheck: resubmitResult, ··· 238 252 }) 239 253 } 240 254 241 - func (s *Pulls) mergeCheck(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull, stack models.Stack) types.MergeCheckResponse { 255 + func (s *Pulls) mergeCheck(r *http.Request, f *models.Repo, pull *models.Pull, stack models.Stack) types.MergeCheckResponse { 242 256 if pull.State == models.PullMerged { 243 257 return types.MergeCheckResponse{} 244 258 } ··· 267 281 r.Context(), 268 282 &xrpcc, 269 283 &tangled.RepoMergeCheck_Input{ 270 - Did: f.OwnerDid(), 284 + Did: f.Did, 271 285 Name: f.Name, 272 286 Branch: pull.TargetBranch, 273 287 Patch: patch, ··· 305 319 return result 306 320 } 307 321 308 - func (s *Pulls) branchDeleteStatus(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull) *models.BranchDeleteStatus { 322 + func (s *Pulls) branchDeleteStatus(r *http.Request, repo *models.Repo, pull *models.Pull) *models.BranchDeleteStatus { 309 323 if pull.State != models.PullMerged { 310 324 return nil 311 325 } ··· 316 330 } 317 331 318 332 var branch string 319 - var repo *models.Repo 320 333 // check if the branch exists 321 334 // NOTE: appview could cache branches/tags etc. for every repo by listening for gitRefUpdates 322 335 if pull.IsBranchBased() { 323 336 branch = pull.PullSource.Branch 324 - repo = &f.Repo 325 337 } else if pull.IsForkBased() { 326 338 branch = pull.PullSource.Branch 327 339 repo = pull.PullSource.Repo ··· 360 372 } 361 373 } 362 374 363 - func (s *Pulls) resubmitCheck(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull, stack models.Stack) pages.ResubmitResult { 375 + func (s *Pulls) resubmitCheck(r *http.Request, repo *models.Repo, pull *models.Pull, stack models.Stack) pages.ResubmitResult { 364 376 if pull.State == models.PullMerged || pull.State == models.PullDeleted || pull.PullSource == nil { 365 377 return pages.Unknown 366 378 } ··· 380 392 repoName = sourceRepo.Name 381 393 } else { 382 394 // pulls within the same repo 383 - knot = f.Knot 384 - ownerDid = f.OwnerDid() 385 - repoName = f.Name 395 + knot = repo.Knot 396 + ownerDid = repo.Did 397 + repoName = repo.Name 386 398 } 387 399 388 400 scheme := "http" ··· 394 406 Host: host, 395 407 } 396 408 397 - repo := fmt.Sprintf("%s/%s", ownerDid, repoName) 398 - branchResp, err := tangled.RepoBranch(r.Context(), xrpcc, pull.PullSource.Branch, repo) 409 + didSlashName := fmt.Sprintf("%s/%s", ownerDid, repoName) 410 + branchResp, err := tangled.RepoBranch(r.Context(), xrpcc, pull.PullSource.Branch, didSlashName) 399 411 if err != nil { 400 412 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 401 413 log.Println("failed to call XRPC repo.branches", xrpcerr) ··· 423 435 424 436 func (s *Pulls) RepoPullPatch(w http.ResponseWriter, r *http.Request) { 425 437 user := s.oauth.GetUser(r) 426 - f, err := s.repoResolver.Resolve(r) 427 - if err != nil { 428 - log.Println("failed to get repo and knot", err) 429 - return 430 - } 431 438 432 439 var diffOpts types.DiffOpts 433 440 if d := r.URL.Query().Get("diff"); d == "split" { ··· 456 463 457 464 s.pages.RepoPullPatchPage(w, pages.RepoPullPatchParams{ 458 465 LoggedInUser: user, 459 - RepoInfo: f.RepoInfo(user), 466 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 460 467 Pull: pull, 461 468 Stack: stack, 462 469 Round: roundIdInt, ··· 469 476 470 477 func (s *Pulls) RepoPullInterdiff(w http.ResponseWriter, r *http.Request) { 471 478 user := s.oauth.GetUser(r) 472 - 473 - f, err := s.repoResolver.Resolve(r) 474 - if err != nil { 475 - log.Println("failed to get repo and knot", err) 476 - return 477 - } 478 479 479 480 var diffOpts types.DiffOpts 480 481 if d := r.URL.Query().Get("diff"); d == "split" { ··· 520 521 521 522 s.pages.RepoPullInterdiffPage(w, pages.RepoPullInterdiffParams{ 522 523 LoggedInUser: s.oauth.GetUser(r), 523 - RepoInfo: f.RepoInfo(user), 524 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 524 525 Pull: pull, 525 526 Round: roundIdInt, 526 527 Interdiff: interdiff, ··· 597 598 598 599 pulls, err := db.GetPulls( 599 600 s.db, 600 - db.FilterIn("id", ids), 601 + orm.FilterIn("id", ids), 601 602 ) 602 603 if err != nil { 603 604 log.Println("failed to get pulls", err) ··· 645 646 } 646 647 pulls = pulls[:n] 647 648 648 - repoInfo := f.RepoInfo(user) 649 649 ps, err := db.GetPipelineStatuses( 650 650 s.db, 651 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 652 - db.FilterEq("repo_name", repoInfo.Name), 653 - db.FilterEq("knot", repoInfo.Knot), 654 - db.FilterIn("sha", shas), 651 + len(shas), 652 + orm.FilterEq("repo_owner", f.Did), 653 + orm.FilterEq("repo_name", f.Name), 654 + orm.FilterEq("knot", f.Knot), 655 + orm.FilterIn("sha", shas), 655 656 ) 656 657 if err != nil { 657 658 log.Printf("failed to fetch pipeline statuses: %s", err) ··· 664 665 665 666 labelDefs, err := db.GetLabelDefinitions( 666 667 s.db, 667 - db.FilterIn("at_uri", f.Repo.Labels), 668 - db.FilterContains("scope", tangled.RepoPullNSID), 668 + orm.FilterIn("at_uri", f.Labels), 669 + orm.FilterContains("scope", tangled.RepoPullNSID), 669 670 ) 670 671 if err != nil { 671 672 log.Println("failed to fetch labels", err) ··· 680 681 681 682 s.pages.RepoPulls(w, pages.RepoPullsParams{ 682 683 LoggedInUser: s.oauth.GetUser(r), 683 - RepoInfo: f.RepoInfo(user), 684 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 684 685 Pulls: pulls, 685 686 LabelDefs: defs, 686 687 FilteringBy: state, ··· 717 718 case http.MethodGet: 718 719 s.pages.PullNewCommentFragment(w, pages.PullNewCommentParams{ 719 720 LoggedInUser: user, 720 - RepoInfo: f.RepoInfo(user), 721 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 721 722 Pull: pull, 722 723 RoundNumber: roundNumber, 723 724 }) ··· 728 729 s.pages.Notice(w, "pull", "Comment body is required") 729 730 return 730 731 } 732 + 733 + mentions, references := s.mentionsResolver.Resolve(r.Context(), body) 731 734 732 735 // Start a transaction 733 736 tx, err := s.db.BeginTx(r.Context(), nil) ··· 771 774 Body: body, 772 775 CommentAt: atResp.Uri, 773 776 SubmissionId: pull.Submissions[roundNumber].ID, 777 + Mentions: mentions, 778 + References: references, 774 779 } 775 780 776 781 // Create the pull comment in the database with the commentAt field ··· 788 793 return 789 794 } 790 795 791 - s.notifier.NewPullComment(r.Context(), comment) 796 + s.notifier.NewPullComment(r.Context(), comment, mentions) 792 797 793 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", f.OwnerSlashRepo(), pull.PullId, commentId)) 798 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 799 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", ownerSlashRepo, pull.PullId, commentId)) 794 800 return 795 801 } 796 802 } ··· 814 820 Host: host, 815 821 } 816 822 817 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 823 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 818 824 xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 819 825 if err != nil { 820 826 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { ··· 841 847 842 848 s.pages.RepoNewPull(w, pages.RepoNewPullParams{ 843 849 LoggedInUser: user, 844 - RepoInfo: f.RepoInfo(user), 850 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 845 851 Branches: result.Branches, 846 852 Strategy: strategy, 847 853 SourceBranch: sourceBranch, ··· 864 870 } 865 871 866 872 // Determine PR type based on input parameters 867 - isPushAllowed := f.RepoInfo(user).Roles.IsPushAllowed() 873 + roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 874 + isPushAllowed := roles.IsPushAllowed() 868 875 isBranchBased := isPushAllowed && sourceBranch != "" && fromFork == "" 869 876 isForkBased := fromFork != "" && sourceBranch != "" 870 877 isPatchBased := patch != "" && !isBranchBased && !isForkBased ··· 962 969 func (s *Pulls) handleBranchBasedPull( 963 970 w http.ResponseWriter, 964 971 r *http.Request, 965 - f *reporesolver.ResolvedRepo, 972 + repo *models.Repo, 966 973 user *oauth.User, 967 974 title, 968 975 body, ··· 974 981 if !s.config.Core.Dev { 975 982 scheme = "https" 976 983 } 977 - host := fmt.Sprintf("%s://%s", scheme, f.Knot) 984 + host := fmt.Sprintf("%s://%s", scheme, repo.Knot) 978 985 xrpcc := &indigoxrpc.Client{ 979 986 Host: host, 980 987 } 981 988 982 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 983 - xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, repo, targetBranch, sourceBranch) 989 + didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name) 990 + xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, didSlashRepo, targetBranch, sourceBranch) 984 991 if err != nil { 985 992 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 986 993 log.Println("failed to call XRPC repo.compare", xrpcerr) ··· 1017 1024 Sha: comparison.Rev2, 1018 1025 } 1019 1026 1020 - s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked) 1027 + s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked) 1021 1028 } 1022 1029 1023 - func (s *Pulls) handlePatchBasedPull(w http.ResponseWriter, r *http.Request, f *reporesolver.ResolvedRepo, user *oauth.User, title, body, targetBranch, patch string, isStacked bool) { 1030 + func (s *Pulls) handlePatchBasedPull(w http.ResponseWriter, r *http.Request, repo *models.Repo, user *oauth.User, title, body, targetBranch, patch string, isStacked bool) { 1024 1031 if err := s.validator.ValidatePatch(&patch); err != nil { 1025 1032 s.logger.Error("patch validation failed", "err", err) 1026 1033 s.pages.Notice(w, "pull", "Invalid patch format. Please provide a valid diff.") 1027 1034 return 1028 1035 } 1029 1036 1030 - s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, "", "", nil, nil, isStacked) 1037 + s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, "", "", nil, nil, isStacked) 1031 1038 } 1032 1039 1033 - func (s *Pulls) handleForkBasedPull(w http.ResponseWriter, r *http.Request, f *reporesolver.ResolvedRepo, user *oauth.User, forkRepo string, title, body, targetBranch, sourceBranch string, isStacked bool) { 1040 + func (s *Pulls) handleForkBasedPull(w http.ResponseWriter, r *http.Request, repo *models.Repo, user *oauth.User, forkRepo string, title, body, targetBranch, sourceBranch string, isStacked bool) { 1034 1041 repoString := strings.SplitN(forkRepo, "/", 2) 1035 1042 forkOwnerDid := repoString[0] 1036 1043 repoName := repoString[1] ··· 1132 1139 Sha: sourceRev, 1133 1140 } 1134 1141 1135 - s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked) 1142 + s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked) 1136 1143 } 1137 1144 1138 1145 func (s *Pulls) createPullRequest( 1139 1146 w http.ResponseWriter, 1140 1147 r *http.Request, 1141 - f *reporesolver.ResolvedRepo, 1148 + repo *models.Repo, 1142 1149 user *oauth.User, 1143 1150 title, body, targetBranch string, 1144 1151 patch string, ··· 1153 1160 s.createStackedPullRequest( 1154 1161 w, 1155 1162 r, 1156 - f, 1163 + repo, 1157 1164 user, 1158 1165 targetBranch, 1159 1166 patch, ··· 1199 1206 } 1200 1207 } 1201 1208 1209 + mentions, references := s.mentionsResolver.Resolve(r.Context(), body) 1210 + 1202 1211 rkey := tid.TID() 1203 1212 initialSubmission := models.PullSubmission{ 1204 1213 Patch: patch, ··· 1210 1219 Body: body, 1211 1220 TargetBranch: targetBranch, 1212 1221 OwnerDid: user.Did, 1213 - RepoAt: f.RepoAt(), 1222 + RepoAt: repo.RepoAt(), 1214 1223 Rkey: rkey, 1224 + Mentions: mentions, 1225 + References: references, 1215 1226 Submissions: []*models.PullSubmission{ 1216 1227 &initialSubmission, 1217 1228 }, ··· 1223 1234 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1224 1235 return 1225 1236 } 1226 - pullId, err := db.NextPullId(tx, f.RepoAt()) 1237 + pullId, err := db.NextPullId(tx, repo.RepoAt()) 1227 1238 if err != nil { 1228 1239 log.Println("failed to get pull id", err) 1229 1240 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1230 1241 return 1231 1242 } 1232 1243 1244 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 1245 + if err != nil { 1246 + log.Println("failed to upload patch", err) 1247 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1248 + return 1249 + } 1250 + 1233 1251 _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 1234 1252 Collection: tangled.RepoPullNSID, 1235 1253 Repo: user.Did, ··· 1238 1256 Val: &tangled.RepoPull{ 1239 1257 Title: title, 1240 1258 Target: &tangled.RepoPull_Target{ 1241 - Repo: string(f.RepoAt()), 1259 + Repo: string(repo.RepoAt()), 1242 1260 Branch: targetBranch, 1243 1261 }, 1244 - Patch: patch, 1262 + PatchBlob: blob.Blob, 1245 1263 Source: recordPullSource, 1246 1264 CreatedAt: time.Now().Format(time.RFC3339), 1247 1265 }, ··· 1261 1279 1262 1280 s.notifier.NewPull(r.Context(), pull) 1263 1281 1264 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pullId)) 1282 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo) 1283 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pullId)) 1265 1284 } 1266 1285 1267 1286 func (s *Pulls) createStackedPullRequest( 1268 1287 w http.ResponseWriter, 1269 1288 r *http.Request, 1270 - f *reporesolver.ResolvedRepo, 1289 + repo *models.Repo, 1271 1290 user *oauth.User, 1272 1291 targetBranch string, 1273 1292 patch string, ··· 1299 1318 1300 1319 // build a stack out of this patch 1301 1320 stackId := uuid.New() 1302 - stack, err := newStack(f, user, targetBranch, patch, pullSource, stackId.String()) 1321 + stack, err := s.newStack(r.Context(), repo, user, targetBranch, patch, pullSource, stackId.String()) 1303 1322 if err != nil { 1304 1323 log.Println("failed to create stack", err) 1305 1324 s.pages.Notice(w, "pull", fmt.Sprintf("Failed to create stack: %v", err)) ··· 1316 1335 // apply all record creations at once 1317 1336 var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem 1318 1337 for _, p := range stack { 1338 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(p.LatestPatch())) 1339 + if err != nil { 1340 + log.Println("failed to upload patch blob", err) 1341 + s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1342 + return 1343 + } 1344 + 1319 1345 record := p.AsRecord() 1320 - write := comatproto.RepoApplyWrites_Input_Writes_Elem{ 1346 + record.PatchBlob = blob.Blob 1347 + writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 1321 1348 RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{ 1322 1349 Collection: tangled.RepoPullNSID, 1323 1350 Rkey: &p.Rkey, ··· 1325 1352 Val: &record, 1326 1353 }, 1327 1354 }, 1328 - } 1329 - writes = append(writes, &write) 1355 + }) 1330 1356 } 1331 1357 _, err = comatproto.RepoApplyWrites(r.Context(), client, &comatproto.RepoApplyWrites_Input{ 1332 1358 Repo: user.Did, ··· 1354 1380 s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.") 1355 1381 return 1356 1382 } 1383 + 1357 1384 } 1358 1385 1359 1386 if err = tx.Commit(); err != nil { ··· 1362 1389 return 1363 1390 } 1364 1391 1365 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls", f.OwnerSlashRepo())) 1392 + // notify about each pull 1393 + // 1394 + // this is performed after tx.Commit, because it could result in a locked DB otherwise 1395 + for _, p := range stack { 1396 + s.notifier.NewPull(r.Context(), p) 1397 + } 1398 + 1399 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo) 1400 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls", ownerSlashRepo)) 1366 1401 } 1367 1402 1368 1403 func (s *Pulls) ValidatePatch(w http.ResponseWriter, r *http.Request) { ··· 1393 1428 1394 1429 func (s *Pulls) PatchUploadFragment(w http.ResponseWriter, r *http.Request) { 1395 1430 user := s.oauth.GetUser(r) 1396 - f, err := s.repoResolver.Resolve(r) 1397 - if err != nil { 1398 - log.Println("failed to get repo and knot", err) 1399 - return 1400 - } 1401 1431 1402 1432 s.pages.PullPatchUploadFragment(w, pages.PullPatchUploadParams{ 1403 - RepoInfo: f.RepoInfo(user), 1433 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 1404 1434 }) 1405 1435 } 1406 1436 ··· 1421 1451 Host: host, 1422 1452 } 1423 1453 1424 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 1454 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 1425 1455 xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 1426 1456 if err != nil { 1427 1457 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { ··· 1454 1484 } 1455 1485 1456 1486 s.pages.PullCompareBranchesFragment(w, pages.PullCompareBranchesParams{ 1457 - RepoInfo: f.RepoInfo(user), 1487 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 1458 1488 Branches: withoutDefault, 1459 1489 }) 1460 1490 } 1461 1491 1462 1492 func (s *Pulls) CompareForksFragment(w http.ResponseWriter, r *http.Request) { 1463 1493 user := s.oauth.GetUser(r) 1464 - f, err := s.repoResolver.Resolve(r) 1465 - if err != nil { 1466 - log.Println("failed to get repo and knot", err) 1467 - return 1468 - } 1469 1494 1470 1495 forks, err := db.GetForksByDid(s.db, user.Did) 1471 1496 if err != nil { ··· 1474 1499 } 1475 1500 1476 1501 s.pages.PullCompareForkFragment(w, pages.PullCompareForkParams{ 1477 - RepoInfo: f.RepoInfo(user), 1502 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 1478 1503 Forks: forks, 1479 1504 Selected: r.URL.Query().Get("fork"), 1480 1505 }) ··· 1496 1521 // fork repo 1497 1522 repo, err := db.GetRepo( 1498 1523 s.db, 1499 - db.FilterEq("did", forkOwnerDid), 1500 - db.FilterEq("name", forkName), 1524 + orm.FilterEq("did", forkOwnerDid), 1525 + orm.FilterEq("name", forkName), 1501 1526 ) 1502 1527 if err != nil { 1503 1528 log.Println("failed to get repo", "did", forkOwnerDid, "name", forkName, "err", err) ··· 1542 1567 Host: targetHost, 1543 1568 } 1544 1569 1545 - targetRepo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 1570 + targetRepo := fmt.Sprintf("%s/%s", f.Did, f.Name) 1546 1571 targetXrpcBytes, err := tangled.RepoBranches(r.Context(), targetXrpcc, "", 0, targetRepo) 1547 1572 if err != nil { 1548 1573 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { ··· 1567 1592 }) 1568 1593 1569 1594 s.pages.PullCompareForkBranchesFragment(w, pages.PullCompareForkBranchesParams{ 1570 - RepoInfo: f.RepoInfo(user), 1595 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 1571 1596 SourceBranches: sourceBranches.Branches, 1572 1597 TargetBranches: targetBranches.Branches, 1573 1598 }) ··· 1575 1600 1576 1601 func (s *Pulls) ResubmitPull(w http.ResponseWriter, r *http.Request) { 1577 1602 user := s.oauth.GetUser(r) 1578 - f, err := s.repoResolver.Resolve(r) 1579 - if err != nil { 1580 - log.Println("failed to get repo and knot", err) 1581 - return 1582 - } 1583 1603 1584 1604 pull, ok := r.Context().Value("pull").(*models.Pull) 1585 1605 if !ok { ··· 1591 1611 switch r.Method { 1592 1612 case http.MethodGet: 1593 1613 s.pages.PullResubmitFragment(w, pages.PullResubmitParams{ 1594 - RepoInfo: f.RepoInfo(user), 1614 + RepoInfo: s.repoResolver.GetRepoInfo(r, user), 1595 1615 Pull: pull, 1596 1616 }) 1597 1617 return ··· 1658 1678 return 1659 1679 } 1660 1680 1661 - if !f.RepoInfo(user).Roles.IsPushAllowed() { 1681 + roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 1682 + if !roles.IsPushAllowed() { 1662 1683 log.Println("unauthorized user") 1663 1684 w.WriteHeader(http.StatusUnauthorized) 1664 1685 return ··· 1673 1694 Host: host, 1674 1695 } 1675 1696 1676 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 1697 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 1677 1698 xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, repo, pull.TargetBranch, pull.PullSource.Branch) 1678 1699 if err != nil { 1679 1700 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { ··· 1800 1821 func (s *Pulls) resubmitPullHelper( 1801 1822 w http.ResponseWriter, 1802 1823 r *http.Request, 1803 - f *reporesolver.ResolvedRepo, 1824 + repo *models.Repo, 1804 1825 user *oauth.User, 1805 1826 pull *models.Pull, 1806 1827 patch string, ··· 1809 1830 ) { 1810 1831 if pull.IsStacked() { 1811 1832 log.Println("resubmitting stacked PR") 1812 - s.resubmitStackedPullHelper(w, r, f, user, pull, patch, pull.StackId) 1833 + s.resubmitStackedPullHelper(w, r, repo, user, pull, patch, pull.StackId) 1813 1834 return 1814 1835 } 1815 1836 ··· 1864 1885 return 1865 1886 } 1866 1887 1867 - var recordPullSource *tangled.RepoPull_Source 1868 - if pull.IsBranchBased() { 1869 - recordPullSource = &tangled.RepoPull_Source{ 1870 - Branch: pull.PullSource.Branch, 1871 - Sha: sourceRev, 1872 - } 1873 - } 1874 - if pull.IsForkBased() { 1875 - repoAt := pull.PullSource.RepoAt.String() 1876 - recordPullSource = &tangled.RepoPull_Source{ 1877 - Branch: pull.PullSource.Branch, 1878 - Repo: &repoAt, 1879 - Sha: sourceRev, 1880 - } 1888 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 1889 + if err != nil { 1890 + log.Println("failed to upload patch blob", err) 1891 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 1892 + return 1881 1893 } 1894 + record := pull.AsRecord() 1895 + record.PatchBlob = blob.Blob 1896 + record.CreatedAt = time.Now().Format(time.RFC3339) 1882 1897 1883 1898 _, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{ 1884 1899 Collection: tangled.RepoPullNSID, ··· 1886 1901 Rkey: pull.Rkey, 1887 1902 SwapRecord: ex.Cid, 1888 1903 Record: &lexutil.LexiconTypeDecoder{ 1889 - Val: &tangled.RepoPull{ 1890 - Title: pull.Title, 1891 - Target: &tangled.RepoPull_Target{ 1892 - Repo: string(f.RepoAt()), 1893 - Branch: pull.TargetBranch, 1894 - }, 1895 - Patch: patch, // new patch 1896 - Source: recordPullSource, 1897 - CreatedAt: time.Now().Format(time.RFC3339), 1898 - }, 1904 + Val: &record, 1899 1905 }, 1900 1906 }) 1901 1907 if err != nil { ··· 1910 1916 return 1911 1917 } 1912 1918 1913 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId)) 1919 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo) 1920 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 1914 1921 } 1915 1922 1916 1923 func (s *Pulls) resubmitStackedPullHelper( 1917 1924 w http.ResponseWriter, 1918 1925 r *http.Request, 1919 - f *reporesolver.ResolvedRepo, 1926 + repo *models.Repo, 1920 1927 user *oauth.User, 1921 1928 pull *models.Pull, 1922 1929 patch string, ··· 1925 1932 targetBranch := pull.TargetBranch 1926 1933 1927 1934 origStack, _ := r.Context().Value("stack").(models.Stack) 1928 - newStack, err := newStack(f, user, targetBranch, patch, pull.PullSource, stackId) 1935 + newStack, err := s.newStack(r.Context(), repo, user, targetBranch, patch, pull.PullSource, stackId) 1929 1936 if err != nil { 1930 1937 log.Println("failed to create resubmitted stack", err) 1931 1938 s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.") ··· 1980 1987 } 1981 1988 defer tx.Rollback() 1982 1989 1990 + client, err := s.oauth.AuthorizedClient(r) 1991 + if err != nil { 1992 + log.Println("failed to authorize client") 1993 + s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.") 1994 + return 1995 + } 1996 + 1983 1997 // pds updates to make 1984 1998 var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem 1985 1999 ··· 2013 2027 return 2014 2028 } 2015 2029 2030 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 2031 + if err != nil { 2032 + log.Println("failed to upload patch blob", err) 2033 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 2034 + return 2035 + } 2016 2036 record := p.AsRecord() 2037 + record.PatchBlob = blob.Blob 2017 2038 writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 2018 2039 RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{ 2019 2040 Collection: tangled.RepoPullNSID, ··· 2048 2069 return 2049 2070 } 2050 2071 2072 + blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch)) 2073 + if err != nil { 2074 + log.Println("failed to upload patch blob", err) 2075 + s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.") 2076 + return 2077 + } 2051 2078 record := np.AsRecord() 2052 - 2079 + record.PatchBlob = blob.Blob 2053 2080 writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{ 2054 2081 RepoApplyWrites_Update: &comatproto.RepoApplyWrites_Update{ 2055 2082 Collection: tangled.RepoPullNSID, ··· 2067 2094 tx, 2068 2095 p.ParentChangeId, 2069 2096 // these should be enough filters to be unique per-stack 2070 - db.FilterEq("repo_at", p.RepoAt.String()), 2071 - db.FilterEq("owner_did", p.OwnerDid), 2072 - db.FilterEq("change_id", p.ChangeId), 2097 + orm.FilterEq("repo_at", p.RepoAt.String()), 2098 + orm.FilterEq("owner_did", p.OwnerDid), 2099 + orm.FilterEq("change_id", p.ChangeId), 2073 2100 ) 2074 2101 2075 2102 if err != nil { ··· 2086 2113 return 2087 2114 } 2088 2115 2089 - client, err := s.oauth.AuthorizedClient(r) 2090 - if err != nil { 2091 - log.Println("failed to authorize client") 2092 - s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.") 2093 - return 2094 - } 2095 - 2096 2116 _, err = comatproto.RepoApplyWrites(r.Context(), client, &comatproto.RepoApplyWrites_Input{ 2097 2117 Repo: user.Did, 2098 2118 Writes: writes, ··· 2103 2123 return 2104 2124 } 2105 2125 2106 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId)) 2126 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo) 2127 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 2107 2128 } 2108 2129 2109 2130 func (s *Pulls) MergePull(w http.ResponseWriter, r *http.Request) { ··· 2156 2177 2157 2178 authorName := ident.Handle.String() 2158 2179 mergeInput := &tangled.RepoMerge_Input{ 2159 - Did: f.OwnerDid(), 2180 + Did: f.Did, 2160 2181 Name: f.Name, 2161 2182 Branch: pull.TargetBranch, 2162 2183 Patch: patch, ··· 2221 2242 s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p) 2222 2243 } 2223 2244 2224 - s.pages.HxLocation(w, fmt.Sprintf("/@%s/%s/pulls/%d", f.OwnerHandle(), f.Name, pull.PullId)) 2245 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 2246 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 2225 2247 } 2226 2248 2227 2249 func (s *Pulls) ClosePull(w http.ResponseWriter, r *http.Request) { ··· 2241 2263 } 2242 2264 2243 2265 // auth filter: only owner or collaborators can close 2244 - roles := f.RolesInRepo(user) 2266 + roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 2245 2267 isOwner := roles.IsOwner() 2246 2268 isCollaborator := roles.IsCollaborator() 2247 2269 isPullAuthor := user.Did == pull.OwnerDid ··· 2293 2315 s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p) 2294 2316 } 2295 2317 2296 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId)) 2318 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 2319 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 2297 2320 } 2298 2321 2299 2322 func (s *Pulls) ReopenPull(w http.ResponseWriter, r *http.Request) { ··· 2314 2337 } 2315 2338 2316 2339 // auth filter: only owner or collaborators can close 2317 - roles := f.RolesInRepo(user) 2340 + roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())} 2318 2341 isOwner := roles.IsOwner() 2319 2342 isCollaborator := roles.IsCollaborator() 2320 2343 isPullAuthor := user.Did == pull.OwnerDid ··· 2366 2389 s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p) 2367 2390 } 2368 2391 2369 - s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId)) 2392 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 2393 + s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId)) 2370 2394 } 2371 2395 2372 - func newStack(f *reporesolver.ResolvedRepo, user *oauth.User, targetBranch, patch string, pullSource *models.PullSource, stackId string) (models.Stack, error) { 2396 + func (s *Pulls) newStack(ctx context.Context, repo *models.Repo, user *oauth.User, targetBranch, patch string, pullSource *models.PullSource, stackId string) (models.Stack, error) { 2373 2397 formatPatches, err := patchutil.ExtractPatches(patch) 2374 2398 if err != nil { 2375 2399 return nil, fmt.Errorf("Failed to extract patches: %v", err) ··· 2394 2418 body := fp.Body 2395 2419 rkey := tid.TID() 2396 2420 2421 + mentions, references := s.mentionsResolver.Resolve(ctx, body) 2422 + 2397 2423 initialSubmission := models.PullSubmission{ 2398 2424 Patch: fp.Raw, 2399 2425 SourceRev: fp.SHA, ··· 2404 2430 Body: body, 2405 2431 TargetBranch: targetBranch, 2406 2432 OwnerDid: user.Did, 2407 - RepoAt: f.RepoAt(), 2433 + RepoAt: repo.RepoAt(), 2408 2434 Rkey: rkey, 2435 + Mentions: mentions, 2436 + References: references, 2409 2437 Submissions: []*models.PullSubmission{ 2410 2438 &initialSubmission, 2411 2439 },
+3 -2
appview/repo/archive.go
··· 18 18 l := rp.logger.With("handler", "DownloadArchive") 19 19 ref := chi.URLParam(r, "ref") 20 20 ref, _ = url.PathUnescape(ref) 21 + ref = strings.TrimSuffix(ref, ".tar.gz") 21 22 f, err := rp.repoResolver.Resolve(r) 22 23 if err != nil { 23 24 l.Error("failed to get repo and knot", "err", err) ··· 31 32 xrpcc := &indigoxrpc.Client{ 32 33 Host: host, 33 34 } 34 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 35 - archiveBytes, err := tangled.RepoArchive(r.Context(), xrpcc, "tar.gz", "", ref, repo) 35 + didSlashRepo := f.DidSlashRepo() 36 + archiveBytes, err := tangled.RepoArchive(r.Context(), xrpcc, "tar.gz", "", ref, didSlashRepo) 36 37 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 37 38 l.Error("failed to call XRPC repo.archive", "err", xrpcerr) 38 39 rp.pages.Error503(w)
+21 -14
appview/repo/artifact.go
··· 14 14 "tangled.org/core/appview/db" 15 15 "tangled.org/core/appview/models" 16 16 "tangled.org/core/appview/pages" 17 - "tangled.org/core/appview/reporesolver" 18 17 "tangled.org/core/appview/xrpcclient" 18 + "tangled.org/core/orm" 19 19 "tangled.org/core/tid" 20 20 "tangled.org/core/types" 21 21 ··· 131 131 132 132 rp.pages.RepoArtifactFragment(w, pages.RepoArtifactParams{ 133 133 LoggedInUser: user, 134 - RepoInfo: f.RepoInfo(user), 134 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 135 135 Artifact: artifact, 136 136 }) 137 137 } ··· 156 156 157 157 artifacts, err := db.GetArtifact( 158 158 rp.db, 159 - db.FilterEq("repo_at", f.RepoAt()), 160 - db.FilterEq("tag", tag.Tag.Hash[:]), 161 - db.FilterEq("name", filename), 159 + orm.FilterEq("repo_at", f.RepoAt()), 160 + orm.FilterEq("tag", tag.Tag.Hash[:]), 161 + orm.FilterEq("name", filename), 162 162 ) 163 163 if err != nil { 164 164 log.Println("failed to get artifacts", err) ··· 174 174 175 175 artifact := artifacts[0] 176 176 177 - ownerPds := f.OwnerId.PDSEndpoint() 177 + ownerId, err := rp.idResolver.ResolveIdent(r.Context(), f.Did) 178 + if err != nil { 179 + log.Println("failed to resolve repo owner did", f.Did, err) 180 + http.Error(w, "repository owner not found", http.StatusNotFound) 181 + return 182 + } 183 + 184 + ownerPds := ownerId.PDSEndpoint() 178 185 url, _ := url.Parse(fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob", ownerPds)) 179 186 q := url.Query() 180 187 q.Set("cid", artifact.BlobCid.String()) ··· 228 235 229 236 artifacts, err := db.GetArtifact( 230 237 rp.db, 231 - db.FilterEq("repo_at", f.RepoAt()), 232 - db.FilterEq("tag", tag[:]), 233 - db.FilterEq("name", filename), 238 + orm.FilterEq("repo_at", f.RepoAt()), 239 + orm.FilterEq("tag", tag[:]), 240 + orm.FilterEq("name", filename), 234 241 ) 235 242 if err != nil { 236 243 log.Println("failed to get artifacts", err) ··· 270 277 defer tx.Rollback() 271 278 272 279 err = db.DeleteArtifact(tx, 273 - db.FilterEq("repo_at", f.RepoAt()), 274 - db.FilterEq("tag", artifact.Tag[:]), 275 - db.FilterEq("name", filename), 280 + orm.FilterEq("repo_at", f.RepoAt()), 281 + orm.FilterEq("tag", artifact.Tag[:]), 282 + orm.FilterEq("name", filename), 276 283 ) 277 284 if err != nil { 278 285 log.Println("failed to remove artifact record from db", err) ··· 290 297 w.Write([]byte{}) 291 298 } 292 299 293 - func (rp *Repo) resolveTag(ctx context.Context, f *reporesolver.ResolvedRepo, tagParam string) (*types.TagReference, error) { 300 + func (rp *Repo) resolveTag(ctx context.Context, f *models.Repo, tagParam string) (*types.TagReference, error) { 294 301 tagParam, err := url.QueryUnescape(tagParam) 295 302 if err != nil { 296 303 return nil, err ··· 305 312 Host: host, 306 313 } 307 314 308 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 315 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 309 316 xrpcBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo) 310 317 if err != nil { 311 318 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
+142 -68
appview/repo/blob.go
··· 1 1 package repo 2 2 3 3 import ( 4 + "encoding/base64" 4 5 "fmt" 5 6 "io" 6 7 "net/http" ··· 10 11 "strings" 11 12 12 13 "tangled.org/core/api/tangled" 14 + "tangled.org/core/appview/config" 15 + "tangled.org/core/appview/models" 13 16 "tangled.org/core/appview/pages" 14 17 "tangled.org/core/appview/pages/markup" 18 + "tangled.org/core/appview/reporesolver" 15 19 xrpcclient "tangled.org/core/appview/xrpcclient" 16 20 17 21 indigoxrpc "github.com/bluesky-social/indigo/xrpc" 18 22 "github.com/go-chi/chi/v5" 19 23 ) 20 24 25 + // the content can be one of the following: 26 + // 27 + // - code : text | | raw 28 + // - markup : text | rendered | raw 29 + // - svg : text | rendered | raw 30 + // - png : | rendered | raw 31 + // - video : | rendered | raw 32 + // - submodule : | rendered | 33 + // - rest : | | 21 34 func (rp *Repo) Blob(w http.ResponseWriter, r *http.Request) { 22 35 l := rp.logger.With("handler", "RepoBlob") 36 + 23 37 f, err := rp.repoResolver.Resolve(r) 24 38 if err != nil { 25 39 l.Error("failed to get repo and knot", "err", err) 26 40 return 27 41 } 42 + 28 43 ref := chi.URLParam(r, "ref") 29 44 ref, _ = url.PathUnescape(ref) 45 + 30 46 filePath := chi.URLParam(r, "*") 31 47 filePath, _ = url.PathUnescape(filePath) 48 + 32 49 scheme := "http" 33 50 if !rp.config.Core.Dev { 34 51 scheme = "https" ··· 37 54 xrpcc := &indigoxrpc.Client{ 38 55 Host: host, 39 56 } 40 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Repo.Name) 57 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 41 58 resp, err := tangled.RepoBlob(r.Context(), xrpcc, filePath, false, ref, repo) 42 59 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 43 60 l.Error("failed to call XRPC repo.blob", "err", xrpcerr) 44 61 rp.pages.Error503(w) 45 62 return 46 63 } 64 + 65 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 66 + 47 67 // Use XRPC response directly instead of converting to internal types 48 68 var breadcrumbs [][]string 49 - breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), url.PathEscape(ref))}) 69 + breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", ownerSlashRepo, url.PathEscape(ref))}) 50 70 if filePath != "" { 51 71 for idx, elem := range strings.Split(filePath, "/") { 52 72 breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], url.PathEscape(elem))}) 53 73 } 54 74 } 55 - showRendered := false 56 - renderToggle := false 57 - if markup.GetFormat(resp.Path) == markup.FormatMarkdown { 58 - renderToggle = true 59 - showRendered = r.URL.Query().Get("code") != "true" 60 - } 61 - var unsupported bool 62 - var isImage bool 63 - var isVideo bool 64 - var contentSrc string 65 - if resp.IsBinary != nil && *resp.IsBinary { 66 - ext := strings.ToLower(filepath.Ext(resp.Path)) 67 - switch ext { 68 - case ".jpg", ".jpeg", ".png", ".gif", ".svg", ".webp": 69 - isImage = true 70 - case ".mp4", ".webm", ".ogg", ".mov", ".avi": 71 - isVideo = true 72 - default: 73 - unsupported = true 74 - } 75 - // fetch the raw binary content using sh.tangled.repo.blob xrpc 76 - repoName := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 77 - baseURL := &url.URL{ 78 - Scheme: scheme, 79 - Host: f.Knot, 80 - Path: "/xrpc/sh.tangled.repo.blob", 81 - } 82 - query := baseURL.Query() 83 - query.Set("repo", repoName) 84 - query.Set("ref", ref) 85 - query.Set("path", filePath) 86 - query.Set("raw", "true") 87 - baseURL.RawQuery = query.Encode() 88 - blobURL := baseURL.String() 89 - contentSrc = blobURL 90 - if !rp.config.Core.Dev { 91 - contentSrc = markup.GenerateCamoURL(rp.config.Camo.Host, rp.config.Camo.SharedSecret, blobURL) 92 - } 93 - } 94 - lines := 0 95 - if resp.IsBinary == nil || !*resp.IsBinary { 96 - lines = strings.Count(resp.Content, "\n") + 1 97 - } 98 - var sizeHint uint64 99 - if resp.Size != nil { 100 - sizeHint = uint64(*resp.Size) 101 - } else { 102 - sizeHint = uint64(len(resp.Content)) 103 - } 75 + 76 + // Create the blob view 77 + blobView := NewBlobView(resp, rp.config, f, ref, filePath, r.URL.Query()) 78 + 104 79 user := rp.oauth.GetUser(r) 105 - // Determine if content is binary (dereference pointer) 106 - isBinary := false 107 - if resp.IsBinary != nil { 108 - isBinary = *resp.IsBinary 109 - } 80 + 110 81 rp.pages.RepoBlob(w, pages.RepoBlobParams{ 111 82 LoggedInUser: user, 112 - RepoInfo: f.RepoInfo(user), 83 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 113 84 BreadCrumbs: breadcrumbs, 114 - ShowRendered: showRendered, 115 - RenderToggle: renderToggle, 116 - Unsupported: unsupported, 117 - IsImage: isImage, 118 - IsVideo: isVideo, 119 - ContentSrc: contentSrc, 85 + BlobView: blobView, 120 86 RepoBlob_Output: resp, 121 - Contents: resp.Content, 122 - Lines: lines, 123 - SizeHint: sizeHint, 124 - IsBinary: isBinary, 125 87 }) 126 88 } 127 89 128 90 func (rp *Repo) RepoBlobRaw(w http.ResponseWriter, r *http.Request) { 129 91 l := rp.logger.With("handler", "RepoBlobRaw") 92 + 130 93 f, err := rp.repoResolver.Resolve(r) 131 94 if err != nil { 132 95 l.Error("failed to get repo and knot", "err", err) 133 96 w.WriteHeader(http.StatusBadRequest) 134 97 return 135 98 } 99 + 136 100 ref := chi.URLParam(r, "ref") 137 101 ref, _ = url.PathUnescape(ref) 102 + 138 103 filePath := chi.URLParam(r, "*") 139 104 filePath, _ = url.PathUnescape(filePath) 105 + 140 106 scheme := "http" 141 107 if !rp.config.Core.Dev { 142 108 scheme = "https" 143 109 } 144 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Repo.Name) 110 + repo := f.DidSlashRepo() 145 111 baseURL := &url.URL{ 146 112 Scheme: scheme, 147 113 Host: f.Knot, ··· 159 125 l.Error("failed to create request", "err", err) 160 126 return 161 127 } 128 + 162 129 // forward the If-None-Match header 163 130 if clientETag := r.Header.Get("If-None-Match"); clientETag != "" { 164 131 req.Header.Set("If-None-Match", clientETag) 165 132 } 166 133 client := &http.Client{} 134 + 167 135 resp, err := client.Do(req) 168 136 if err != nil { 169 137 l.Error("failed to reach knotserver", "err", err) 170 138 rp.pages.Error503(w) 171 139 return 172 140 } 141 + 173 142 defer resp.Body.Close() 143 + 174 144 // forward 304 not modified 175 145 if resp.StatusCode == http.StatusNotModified { 176 146 w.WriteHeader(http.StatusNotModified) 177 147 return 178 148 } 149 + 179 150 if resp.StatusCode != http.StatusOK { 180 151 l.Error("knotserver returned non-OK status for raw blob", "url", blobURL, "statuscode", resp.StatusCode) 181 152 w.WriteHeader(resp.StatusCode) 182 153 _, _ = io.Copy(w, resp.Body) 183 154 return 184 155 } 156 + 185 157 contentType := resp.Header.Get("Content-Type") 186 158 body, err := io.ReadAll(resp.Body) 187 159 if err != nil { ··· 189 161 w.WriteHeader(http.StatusInternalServerError) 190 162 return 191 163 } 164 + 192 165 if strings.HasPrefix(contentType, "text/") || isTextualMimeType(contentType) { 193 166 // serve all textual content as text/plain 194 167 w.Header().Set("Content-Type", "text/plain; charset=utf-8") ··· 202 175 w.Write([]byte("unsupported content type")) 203 176 return 204 177 } 178 + } 179 + 180 + // NewBlobView creates a BlobView from the XRPC response 181 + func NewBlobView(resp *tangled.RepoBlob_Output, config *config.Config, repo *models.Repo, ref, filePath string, queryParams url.Values) models.BlobView { 182 + view := models.BlobView{ 183 + Contents: "", 184 + Lines: 0, 185 + } 186 + 187 + // Set size 188 + if resp.Size != nil { 189 + view.SizeHint = uint64(*resp.Size) 190 + } else if resp.Content != nil { 191 + view.SizeHint = uint64(len(*resp.Content)) 192 + } 193 + 194 + if resp.Submodule != nil { 195 + view.ContentType = models.BlobContentTypeSubmodule 196 + view.HasRenderedView = true 197 + view.ContentSrc = resp.Submodule.Url 198 + return view 199 + } 200 + 201 + // Determine if binary 202 + if resp.IsBinary != nil && *resp.IsBinary { 203 + view.ContentSrc = generateBlobURL(config, repo, ref, filePath) 204 + ext := strings.ToLower(filepath.Ext(resp.Path)) 205 + 206 + switch ext { 207 + case ".jpg", ".jpeg", ".png", ".gif", ".webp": 208 + view.ContentType = models.BlobContentTypeImage 209 + view.HasRawView = true 210 + view.HasRenderedView = true 211 + view.ShowingRendered = true 212 + 213 + case ".svg": 214 + view.ContentType = models.BlobContentTypeSvg 215 + view.HasRawView = true 216 + view.HasTextView = true 217 + view.HasRenderedView = true 218 + view.ShowingRendered = queryParams.Get("code") != "true" 219 + if resp.Content != nil { 220 + bytes, _ := base64.StdEncoding.DecodeString(*resp.Content) 221 + view.Contents = string(bytes) 222 + view.Lines = strings.Count(view.Contents, "\n") + 1 223 + } 224 + 225 + case ".mp4", ".webm", ".ogg", ".mov", ".avi": 226 + view.ContentType = models.BlobContentTypeVideo 227 + view.HasRawView = true 228 + view.HasRenderedView = true 229 + view.ShowingRendered = true 230 + } 231 + 232 + return view 233 + } 234 + 235 + // otherwise, we are dealing with text content 236 + view.HasRawView = true 237 + view.HasTextView = true 238 + 239 + if resp.Content != nil { 240 + view.Contents = *resp.Content 241 + view.Lines = strings.Count(view.Contents, "\n") + 1 242 + } 243 + 244 + // with text, we may be dealing with markdown 245 + format := markup.GetFormat(resp.Path) 246 + if format == markup.FormatMarkdown { 247 + view.ContentType = models.BlobContentTypeMarkup 248 + view.HasRenderedView = true 249 + view.ShowingRendered = queryParams.Get("code") != "true" 250 + } 251 + 252 + return view 253 + } 254 + 255 + func generateBlobURL(config *config.Config, repo *models.Repo, ref, filePath string) string { 256 + scheme := "http" 257 + if !config.Core.Dev { 258 + scheme = "https" 259 + } 260 + 261 + repoName := fmt.Sprintf("%s/%s", repo.Did, repo.Name) 262 + baseURL := &url.URL{ 263 + Scheme: scheme, 264 + Host: repo.Knot, 265 + Path: "/xrpc/sh.tangled.repo.blob", 266 + } 267 + query := baseURL.Query() 268 + query.Set("repo", repoName) 269 + query.Set("ref", ref) 270 + query.Set("path", filePath) 271 + query.Set("raw", "true") 272 + baseURL.RawQuery = query.Encode() 273 + blobURL := baseURL.String() 274 + 275 + if !config.Core.Dev { 276 + return markup.GenerateCamoURL(config.Camo.Host, config.Camo.SharedSecret, blobURL) 277 + } 278 + return blobURL 205 279 } 206 280 207 281 func isTextualMimeType(mimeType string) bool {
+2 -2
appview/repo/branches.go
··· 29 29 xrpcc := &indigoxrpc.Client{ 30 30 Host: host, 31 31 } 32 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 32 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 33 33 xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 34 34 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 35 35 l.Error("failed to call XRPC repo.branches", "err", xrpcerr) ··· 46 46 user := rp.oauth.GetUser(r) 47 47 rp.pages.RepoBranches(w, pages.RepoBranchesParams{ 48 48 LoggedInUser: user, 49 - RepoInfo: f.RepoInfo(user), 49 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 50 50 RepoBranchesResponse: result, 51 51 }) 52 52 }
+18 -18
appview/repo/compare.go
··· 36 36 Host: host, 37 37 } 38 38 39 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 39 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 40 40 branchBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 41 41 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 42 42 l.Error("failed to call XRPC repo.branches", "err", xrpcerr) ··· 88 88 return 89 89 } 90 90 91 - repoinfo := f.RepoInfo(user) 92 - 93 91 rp.pages.RepoCompareNew(w, pages.RepoCompareNewParams{ 94 92 LoggedInUser: user, 95 - RepoInfo: repoinfo, 93 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 96 94 Branches: branches, 97 95 Tags: tags.Tags, 98 96 Base: base, ··· 116 114 } 117 115 118 116 // if user is navigating to one of 119 - // /compare/{base}/{head} 120 117 // /compare/{base}...{head} 121 - base := chi.URLParam(r, "base") 122 - head := chi.URLParam(r, "head") 123 - if base == "" && head == "" { 124 - rest := chi.URLParam(r, "*") // master...feature/xyz 125 - parts := strings.SplitN(rest, "...", 2) 126 - if len(parts) == 2 { 127 - base = parts[0] 128 - head = parts[1] 129 - } 118 + // /compare/{base}/{head} 119 + var base, head string 120 + rest := chi.URLParam(r, "*") 121 + 122 + var parts []string 123 + if strings.Contains(rest, "...") { 124 + parts = strings.SplitN(rest, "...", 2) 125 + } else if strings.Contains(rest, "/") { 126 + parts = strings.SplitN(rest, "/", 2) 127 + } 128 + 129 + if len(parts) == 2 { 130 + base = parts[0] 131 + head = parts[1] 130 132 } 131 133 132 134 base, _ = url.PathUnescape(base) ··· 147 149 Host: host, 148 150 } 149 151 150 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 152 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 151 153 152 154 branchBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 153 155 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { ··· 198 200 diff = patchutil.AsNiceDiff(formatPatch.FormatPatchRaw, base) 199 201 } 200 202 201 - repoinfo := f.RepoInfo(user) 202 - 203 203 rp.pages.RepoCompare(w, pages.RepoCompareParams{ 204 204 LoggedInUser: user, 205 - RepoInfo: repoinfo, 205 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 206 206 Branches: branches.Branches, 207 207 Tags: tags.Tags, 208 208 Base: base,
+24 -17
appview/repo/feed.go
··· 11 11 "tangled.org/core/appview/db" 12 12 "tangled.org/core/appview/models" 13 13 "tangled.org/core/appview/pagination" 14 - "tangled.org/core/appview/reporesolver" 14 + "tangled.org/core/orm" 15 15 16 + "github.com/bluesky-social/indigo/atproto/identity" 16 17 "github.com/bluesky-social/indigo/atproto/syntax" 17 18 "github.com/gorilla/feeds" 18 19 ) 19 20 20 - func (rp *Repo) getRepoFeed(ctx context.Context, f *reporesolver.ResolvedRepo) (*feeds.Feed, error) { 21 + func (rp *Repo) getRepoFeed(ctx context.Context, repo *models.Repo, ownerSlashRepo string) (*feeds.Feed, error) { 21 22 const feedLimitPerType = 100 22 23 23 - pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, db.FilterEq("repo_at", f.RepoAt())) 24 + pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, orm.FilterEq("repo_at", repo.RepoAt())) 24 25 if err != nil { 25 26 return nil, err 26 27 } ··· 28 29 issues, err := db.GetIssuesPaginated( 29 30 rp.db, 30 31 pagination.Page{Limit: feedLimitPerType}, 31 - db.FilterEq("repo_at", f.RepoAt()), 32 + orm.FilterEq("repo_at", repo.RepoAt()), 32 33 ) 33 34 if err != nil { 34 35 return nil, err 35 36 } 36 37 37 38 feed := &feeds.Feed{ 38 - Title: fmt.Sprintf("activity feed for %s", f.OwnerSlashRepo()), 39 - Link: &feeds.Link{Href: fmt.Sprintf("%s/%s", rp.config.Core.AppviewHost, f.OwnerSlashRepo()), Type: "text/html", Rel: "alternate"}, 39 + Title: fmt.Sprintf("activity feed for @%s", ownerSlashRepo), 40 + Link: &feeds.Link{Href: fmt.Sprintf("%s/%s", rp.config.Core.AppviewHost, ownerSlashRepo), Type: "text/html", Rel: "alternate"}, 40 41 Items: make([]*feeds.Item, 0), 41 42 Updated: time.UnixMilli(0), 42 43 } 43 44 44 45 for _, pull := range pulls { 45 - items, err := rp.createPullItems(ctx, pull, f) 46 + items, err := rp.createPullItems(ctx, pull, repo, ownerSlashRepo) 46 47 if err != nil { 47 48 return nil, err 48 49 } ··· 50 51 } 51 52 52 53 for _, issue := range issues { 53 - item, err := rp.createIssueItem(ctx, issue, f) 54 + item, err := rp.createIssueItem(ctx, issue, repo, ownerSlashRepo) 54 55 if err != nil { 55 56 return nil, err 56 57 } ··· 71 72 return feed, nil 72 73 } 73 74 74 - func (rp *Repo) createPullItems(ctx context.Context, pull *models.Pull, f *reporesolver.ResolvedRepo) ([]*feeds.Item, error) { 75 + func (rp *Repo) createPullItems(ctx context.Context, pull *models.Pull, repo *models.Repo, ownerSlashRepo string) ([]*feeds.Item, error) { 75 76 owner, err := rp.idResolver.ResolveIdent(ctx, pull.OwnerDid) 76 77 if err != nil { 77 78 return nil, err ··· 80 81 var items []*feeds.Item 81 82 82 83 state := rp.getPullState(pull) 83 - description := rp.buildPullDescription(owner.Handle, state, pull, f.OwnerSlashRepo()) 84 + description := rp.buildPullDescription(owner.Handle, state, pull, ownerSlashRepo) 84 85 85 86 mainItem := &feeds.Item{ 86 87 Title: fmt.Sprintf("[PR #%d] %s", pull.PullId, pull.Title), 87 88 Description: description, 88 - Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId)}, 89 + Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d", rp.config.Core.AppviewHost, ownerSlashRepo, pull.PullId)}, 89 90 Created: pull.Created, 90 91 Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)}, 91 92 } ··· 98 99 99 100 roundItem := &feeds.Item{ 100 101 Title: fmt.Sprintf("[PR #%d] %s (round #%d)", pull.PullId, pull.Title, round.RoundNumber), 101 - Description: fmt.Sprintf("@%s submitted changes (at round #%d) on PR #%d in %s", owner.Handle, round.RoundNumber, pull.PullId, f.OwnerSlashRepo()), 102 - Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d/round/%d/", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId, round.RoundNumber)}, 102 + Description: fmt.Sprintf("@%s submitted changes (at round #%d) on PR #%d in @%s", owner.Handle, round.RoundNumber, pull.PullId, ownerSlashRepo), 103 + Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d/round/%d/", rp.config.Core.AppviewHost, ownerSlashRepo, pull.PullId, round.RoundNumber)}, 103 104 Created: round.Created, 104 105 Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)}, 105 106 } ··· 109 110 return items, nil 110 111 } 111 112 112 - func (rp *Repo) createIssueItem(ctx context.Context, issue models.Issue, f *reporesolver.ResolvedRepo) (*feeds.Item, error) { 113 + func (rp *Repo) createIssueItem(ctx context.Context, issue models.Issue, repo *models.Repo, ownerSlashRepo string) (*feeds.Item, error) { 113 114 owner, err := rp.idResolver.ResolveIdent(ctx, issue.Did) 114 115 if err != nil { 115 116 return nil, err ··· 122 123 123 124 return &feeds.Item{ 124 125 Title: fmt.Sprintf("[Issue #%d] %s", issue.IssueId, issue.Title), 125 - Description: fmt.Sprintf("@%s %s issue #%d in %s", owner.Handle, state, issue.IssueId, f.OwnerSlashRepo()), 126 - Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/issues/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), issue.IssueId)}, 126 + Description: fmt.Sprintf("@%s %s issue #%d in @%s", owner.Handle, state, issue.IssueId, ownerSlashRepo), 127 + Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/issues/%d", rp.config.Core.AppviewHost, ownerSlashRepo, issue.IssueId)}, 127 128 Created: issue.Created, 128 129 Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)}, 129 130 }, nil ··· 152 153 log.Println("failed to fully resolve repo:", err) 153 154 return 154 155 } 156 + repoOwnerId, ok := r.Context().Value("resolvedId").(identity.Identity) 157 + if !ok || repoOwnerId.Handle.IsInvalidHandle() { 158 + log.Println("failed to get resolved repo owner id") 159 + return 160 + } 161 + ownerSlashRepo := repoOwnerId.Handle.String() + "/" + f.Name 155 162 156 - feed, err := rp.getRepoFeed(r.Context(), f) 163 + feed, err := rp.getRepoFeed(r.Context(), f, ownerSlashRepo) 157 164 if err != nil { 158 165 log.Println("failed to get repo feed:", err) 159 166 rp.pages.Error500(w)
+22 -24
appview/repo/index.go
··· 22 22 "tangled.org/core/appview/db" 23 23 "tangled.org/core/appview/models" 24 24 "tangled.org/core/appview/pages" 25 - "tangled.org/core/appview/reporesolver" 26 25 "tangled.org/core/appview/xrpcclient" 26 + "tangled.org/core/orm" 27 27 "tangled.org/core/types" 28 28 29 29 "github.com/go-chi/chi/v5" ··· 52 52 } 53 53 54 54 user := rp.oauth.GetUser(r) 55 - repoInfo := f.RepoInfo(user) 56 55 57 56 // Build index response from multiple XRPC calls 58 57 result, err := rp.buildIndexResponse(r.Context(), xrpcc, f, ref) ··· 62 61 rp.pages.RepoIndexPage(w, pages.RepoIndexParams{ 63 62 LoggedInUser: user, 64 63 NeedsKnotUpgrade: true, 65 - RepoInfo: repoInfo, 64 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 66 65 }) 67 66 return 68 67 } ··· 124 123 l.Error("failed to get email to did map", "err", err) 125 124 } 126 125 127 - vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, commitsTrunc) 126 + vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, commitsTrunc) 128 127 if err != nil { 129 128 l.Error("failed to GetVerifiedObjectCommits", "err", err) 130 129 } ··· 140 139 for _, c := range commitsTrunc { 141 140 shas = append(shas, c.Hash.String()) 142 141 } 143 - pipelines, err := getPipelineStatuses(rp.db, repoInfo, shas) 142 + pipelines, err := getPipelineStatuses(rp.db, f, shas) 144 143 if err != nil { 145 144 l.Error("failed to fetch pipeline statuses", "err", err) 146 145 // non-fatal ··· 148 147 149 148 rp.pages.RepoIndexPage(w, pages.RepoIndexParams{ 150 149 LoggedInUser: user, 151 - RepoInfo: repoInfo, 150 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 152 151 TagMap: tagMap, 153 152 RepoIndexResponse: *result, 154 153 CommitsTrunc: commitsTrunc, ··· 165 164 func (rp *Repo) getLanguageInfo( 166 165 ctx context.Context, 167 166 l *slog.Logger, 168 - f *reporesolver.ResolvedRepo, 167 + repo *models.Repo, 169 168 xrpcc *indigoxrpc.Client, 170 169 currentRef string, 171 170 isDefaultRef bool, ··· 173 172 // first attempt to fetch from db 174 173 langs, err := db.GetRepoLanguages( 175 174 rp.db, 176 - db.FilterEq("repo_at", f.RepoAt()), 177 - db.FilterEq("ref", currentRef), 175 + orm.FilterEq("repo_at", repo.RepoAt()), 176 + orm.FilterEq("ref", currentRef), 178 177 ) 179 178 180 179 if err != nil || langs == nil { 181 180 // non-fatal, fetch langs from ks via XRPC 182 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 183 - ls, err := tangled.RepoLanguages(ctx, xrpcc, currentRef, repo) 181 + didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name) 182 + ls, err := tangled.RepoLanguages(ctx, xrpcc, currentRef, didSlashRepo) 184 183 if err != nil { 185 184 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 186 185 l.Error("failed to call XRPC repo.languages", "err", xrpcerr) ··· 195 194 196 195 for _, lang := range ls.Languages { 197 196 langs = append(langs, models.RepoLanguage{ 198 - RepoAt: f.RepoAt(), 197 + RepoAt: repo.RepoAt(), 199 198 Ref: currentRef, 200 199 IsDefaultRef: isDefaultRef, 201 200 Language: lang.Name, ··· 210 209 defer tx.Rollback() 211 210 212 211 // update appview's cache 213 - err = db.UpdateRepoLanguages(tx, f.RepoAt(), currentRef, langs) 212 + err = db.UpdateRepoLanguages(tx, repo.RepoAt(), currentRef, langs) 214 213 if err != nil { 215 214 // non-fatal 216 215 l.Error("failed to cache lang results", "err", err) ··· 255 254 } 256 255 257 256 // buildIndexResponse creates a RepoIndexResponse by combining multiple xrpc calls in parallel 258 - func (rp *Repo) buildIndexResponse(ctx context.Context, xrpcc *indigoxrpc.Client, f *reporesolver.ResolvedRepo, ref string) (*types.RepoIndexResponse, error) { 259 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 257 + func (rp *Repo) buildIndexResponse(ctx context.Context, xrpcc *indigoxrpc.Client, repo *models.Repo, ref string) (*types.RepoIndexResponse, error) { 258 + didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name) 260 259 261 260 // first get branches to determine the ref if not specified 262 - branchesBytes, err := tangled.RepoBranches(ctx, xrpcc, "", 0, repo) 261 + branchesBytes, err := tangled.RepoBranches(ctx, xrpcc, "", 0, didSlashRepo) 263 262 if err != nil { 264 263 return nil, fmt.Errorf("failed to call repoBranches: %w", err) 265 264 } ··· 303 302 wg.Add(1) 304 303 go func() { 305 304 defer wg.Done() 306 - tagsBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo) 305 + tagsBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, didSlashRepo) 307 306 if err != nil { 308 307 errs = errors.Join(errs, fmt.Errorf("failed to call repoTags: %w", err)) 309 308 return ··· 318 317 wg.Add(1) 319 318 go func() { 320 319 defer wg.Done() 321 - resp, err := tangled.RepoTree(ctx, xrpcc, "", ref, repo) 320 + resp, err := tangled.RepoTree(ctx, xrpcc, "", ref, didSlashRepo) 322 321 if err != nil { 323 322 errs = errors.Join(errs, fmt.Errorf("failed to call repoTree: %w", err)) 324 323 return ··· 330 329 wg.Add(1) 331 330 go func() { 332 331 defer wg.Done() 333 - logBytes, err := tangled.RepoLog(ctx, xrpcc, "", 50, "", ref, repo) 332 + logBytes, err := tangled.RepoLog(ctx, xrpcc, "", 50, "", ref, didSlashRepo) 334 333 if err != nil { 335 334 errs = errors.Join(errs, fmt.Errorf("failed to call repoLog: %w", err)) 336 335 return ··· 351 350 if treeResp != nil && treeResp.Files != nil { 352 351 for _, file := range treeResp.Files { 353 352 niceFile := types.NiceTree{ 354 - IsFile: file.Is_file, 355 - IsSubtree: file.Is_subtree, 356 - Name: file.Name, 357 - Mode: file.Mode, 358 - Size: file.Size, 353 + Name: file.Name, 354 + Mode: file.Mode, 355 + Size: file.Size, 359 356 } 357 + 360 358 if file.Last_commit != nil { 361 359 when, _ := time.Parse(time.RFC3339, file.Last_commit.When) 362 360 niceFile.LastCommit = &types.LastCommitInfo{
+8 -11
appview/repo/log.go
··· 57 57 cursor = strconv.Itoa(offset) 58 58 } 59 59 60 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 60 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 61 61 xrpcBytes, err := tangled.RepoLog(r.Context(), xrpcc, cursor, limit, "", ref, repo) 62 62 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 63 63 l.Error("failed to call XRPC repo.log", "err", xrpcerr) ··· 116 116 l.Error("failed to fetch email to did mapping", "err", err) 117 117 } 118 118 119 - vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, xrpcResp.Commits) 119 + vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, xrpcResp.Commits) 120 120 if err != nil { 121 121 l.Error("failed to GetVerifiedObjectCommits", "err", err) 122 122 } 123 - 124 - repoInfo := f.RepoInfo(user) 125 123 126 124 var shas []string 127 125 for _, c := range xrpcResp.Commits { 128 126 shas = append(shas, c.Hash.String()) 129 127 } 130 - pipelines, err := getPipelineStatuses(rp.db, repoInfo, shas) 128 + pipelines, err := getPipelineStatuses(rp.db, f, shas) 131 129 if err != nil { 132 130 l.Error("failed to getPipelineStatuses", "err", err) 133 131 // non-fatal ··· 136 134 rp.pages.RepoLog(w, pages.RepoLogParams{ 137 135 LoggedInUser: user, 138 136 TagMap: tagMap, 139 - RepoInfo: repoInfo, 137 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 140 138 RepoLogResponse: xrpcResp, 141 139 EmailToDid: emailToDidMap, 142 140 VerifiedCommits: vc, ··· 174 172 Host: host, 175 173 } 176 174 177 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 175 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 178 176 xrpcBytes, err := tangled.RepoDiff(r.Context(), xrpcc, ref, repo) 179 177 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 180 178 l.Error("failed to call XRPC repo.diff", "err", xrpcerr) ··· 194 192 l.Error("failed to get email to did mapping", "err", err) 195 193 } 196 194 197 - vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.NiceDiff{*result.Diff}) 195 + vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.Commit{result.Diff.Commit}) 198 196 if err != nil { 199 197 l.Error("failed to GetVerifiedCommits", "err", err) 200 198 } 201 199 202 200 user := rp.oauth.GetUser(r) 203 - repoInfo := f.RepoInfo(user) 204 - pipelines, err := getPipelineStatuses(rp.db, repoInfo, []string{result.Diff.Commit.This}) 201 + pipelines, err := getPipelineStatuses(rp.db, f, []string{result.Diff.Commit.This}) 205 202 if err != nil { 206 203 l.Error("failed to getPipelineStatuses", "err", err) 207 204 // non-fatal ··· 213 210 214 211 rp.pages.RepoCommit(w, pages.RepoCommitParams{ 215 212 LoggedInUser: user, 216 - RepoInfo: f.RepoInfo(user), 213 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 217 214 RepoCommitResponse: result, 218 215 EmailToDid: emailToDidMap, 219 216 VerifiedCommit: vc,
+4 -3
appview/repo/opengraph.go
··· 16 16 "tangled.org/core/appview/db" 17 17 "tangled.org/core/appview/models" 18 18 "tangled.org/core/appview/ogcard" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/types" 20 21 ) 21 22 ··· 338 339 var languageStats []types.RepoLanguageDetails 339 340 langs, err := db.GetRepoLanguages( 340 341 rp.db, 341 - db.FilterEq("repo_at", f.RepoAt()), 342 - db.FilterEq("is_default_ref", 1), 342 + orm.FilterEq("repo_at", f.RepoAt()), 343 + orm.FilterEq("is_default_ref", 1), 343 344 ) 344 345 if err != nil { 345 346 log.Printf("failed to get language stats from db: %v", err) ··· 374 375 }) 375 376 } 376 377 377 - card, err := rp.drawRepoSummaryCard(&f.Repo, languageStats) 378 + card, err := rp.drawRepoSummaryCard(f, languageStats) 378 379 if err != nil { 379 380 log.Println("failed to draw repo summary card", err) 380 381 http.Error(w, "failed to draw repo summary card", http.StatusInternalServerError)
+37 -37
appview/repo/repo.go
··· 24 24 xrpcclient "tangled.org/core/appview/xrpcclient" 25 25 "tangled.org/core/eventconsumer" 26 26 "tangled.org/core/idresolver" 27 + "tangled.org/core/orm" 27 28 "tangled.org/core/rbac" 28 29 "tangled.org/core/tid" 29 30 "tangled.org/core/xrpc/serviceauth" ··· 78 79 } 79 80 } 80 81 81 - // isTextualMimeType returns true if the MIME type represents textual content 82 - 83 82 // modify the spindle configured for this repo 84 83 func (rp *Repo) EditSpindle(w http.ResponseWriter, r *http.Request) { 85 84 user := rp.oauth.GetUser(r) ··· 120 119 } 121 120 } 122 121 123 - newRepo := f.Repo 122 + newRepo := *f 124 123 newRepo.Spindle = newSpindle 125 124 record := newRepo.AsRecord() 126 125 ··· 259 258 l.Info("wrote label record to PDS") 260 259 261 260 // update the repo to subscribe to this label 262 - newRepo := f.Repo 261 + newRepo := *f 263 262 newRepo.Labels = append(newRepo.Labels, aturi) 264 263 repoRecord := newRepo.AsRecord() 265 264 ··· 347 346 // get form values 348 347 labelId := r.FormValue("label-id") 349 348 350 - label, err := db.GetLabelDefinition(rp.db, db.FilterEq("id", labelId)) 349 + label, err := db.GetLabelDefinition(rp.db, orm.FilterEq("id", labelId)) 351 350 if err != nil { 352 351 fail("Failed to find label definition.", err) 353 352 return ··· 371 370 } 372 371 373 372 // update repo record to remove the label reference 374 - newRepo := f.Repo 373 + newRepo := *f 375 374 var updated []string 376 375 removedAt := label.AtUri().String() 377 376 for _, l := range newRepo.Labels { ··· 411 410 412 411 err = db.UnsubscribeLabel( 413 412 tx, 414 - db.FilterEq("repo_at", f.RepoAt()), 415 - db.FilterEq("label_at", removedAt), 413 + orm.FilterEq("repo_at", f.RepoAt()), 414 + orm.FilterEq("label_at", removedAt), 416 415 ) 417 416 if err != nil { 418 417 fail("Failed to unsubscribe label.", err) 419 418 return 420 419 } 421 420 422 - err = db.DeleteLabelDefinition(tx, db.FilterEq("id", label.Id)) 421 + err = db.DeleteLabelDefinition(tx, orm.FilterEq("id", label.Id)) 423 422 if err != nil { 424 423 fail("Failed to delete label definition.", err) 425 424 return ··· 458 457 } 459 458 460 459 labelAts := r.Form["label"] 461 - _, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts)) 460 + _, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts)) 462 461 if err != nil { 463 462 fail("Failed to subscribe to label.", err) 464 463 return 465 464 } 466 465 467 - newRepo := f.Repo 466 + newRepo := *f 468 467 newRepo.Labels = append(newRepo.Labels, labelAts...) 469 468 470 469 // dedup ··· 479 478 return 480 479 } 481 480 482 - ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Repo.Did, f.Repo.Rkey) 481 + ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Did, f.Rkey) 483 482 if err != nil { 484 483 fail("Failed to update labels, no record found on PDS.", err) 485 484 return ··· 544 543 } 545 544 546 545 labelAts := r.Form["label"] 547 - _, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts)) 546 + _, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts)) 548 547 if err != nil { 549 548 fail("Failed to unsubscribe to label.", err) 550 549 return 551 550 } 552 551 553 552 // update repo record to remove the label reference 554 - newRepo := f.Repo 553 + newRepo := *f 555 554 var updated []string 556 555 for _, l := range newRepo.Labels { 557 556 if !slices.Contains(labelAts, l) { ··· 567 566 return 568 567 } 569 568 570 - ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Repo.Did, f.Repo.Rkey) 569 + ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Did, f.Rkey) 571 570 if err != nil { 572 571 fail("Failed to update labels, no record found on PDS.", err) 573 572 return ··· 584 583 585 584 err = db.UnsubscribeLabel( 586 585 rp.db, 587 - db.FilterEq("repo_at", f.RepoAt()), 588 - db.FilterIn("label_at", labelAts), 586 + orm.FilterEq("repo_at", f.RepoAt()), 587 + orm.FilterIn("label_at", labelAts), 589 588 ) 590 589 if err != nil { 591 590 fail("Failed to unsubscribe label.", err) ··· 614 613 615 614 labelDefs, err := db.GetLabelDefinitions( 616 615 rp.db, 617 - db.FilterIn("at_uri", f.Repo.Labels), 618 - db.FilterContains("scope", subject.Collection().String()), 616 + orm.FilterIn("at_uri", f.Labels), 617 + orm.FilterContains("scope", subject.Collection().String()), 619 618 ) 620 619 if err != nil { 621 620 l.Error("failed to fetch label defs", "err", err) ··· 627 626 defs[l.AtUri().String()] = &l 628 627 } 629 628 630 - states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject)) 629 + states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject)) 631 630 if err != nil { 632 631 l.Error("failed to build label state", "err", err) 633 632 return ··· 637 636 user := rp.oauth.GetUser(r) 638 637 rp.pages.LabelPanel(w, pages.LabelPanelParams{ 639 638 LoggedInUser: user, 640 - RepoInfo: f.RepoInfo(user), 639 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 641 640 Defs: defs, 642 641 Subject: subject.String(), 643 642 State: state, ··· 662 661 663 662 labelDefs, err := db.GetLabelDefinitions( 664 663 rp.db, 665 - db.FilterIn("at_uri", f.Repo.Labels), 666 - db.FilterContains("scope", subject.Collection().String()), 664 + orm.FilterIn("at_uri", f.Labels), 665 + orm.FilterContains("scope", subject.Collection().String()), 667 666 ) 668 667 if err != nil { 669 668 l.Error("failed to fetch labels", "err", err) ··· 675 674 defs[l.AtUri().String()] = &l 676 675 } 677 676 678 - states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject)) 677 + states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject)) 679 678 if err != nil { 680 679 l.Error("failed to build label state", "err", err) 681 680 return ··· 685 684 user := rp.oauth.GetUser(r) 686 685 rp.pages.EditLabelPanel(w, pages.EditLabelPanelParams{ 687 686 LoggedInUser: user, 688 - RepoInfo: f.RepoInfo(user), 687 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 689 688 Defs: defs, 690 689 Subject: subject.String(), 691 690 State: state, ··· 866 865 r.Context(), 867 866 client, 868 867 &tangled.RepoDelete_Input{ 869 - Did: f.OwnerDid(), 868 + Did: f.Did, 870 869 Name: f.Name, 871 870 Rkey: f.Rkey, 872 871 }, ··· 904 903 l.Info("removed collaborators") 905 904 906 905 // remove repo RBAC 907 - err = rp.enforcer.RemoveRepo(f.OwnerDid(), f.Knot, f.DidSlashRepo()) 906 + err = rp.enforcer.RemoveRepo(f.Did, f.Knot, f.DidSlashRepo()) 908 907 if err != nil { 909 908 rp.pages.Notice(w, noticeId, "Failed to update RBAC rules") 910 909 return 911 910 } 912 911 913 912 // remove repo from db 914 - err = db.RemoveRepo(tx, f.OwnerDid(), f.Name) 913 + err = db.RemoveRepo(tx, f.Did, f.Name) 915 914 if err != nil { 916 915 rp.pages.Notice(w, noticeId, "Failed to update appview") 917 916 return ··· 932 931 return 933 932 } 934 933 935 - rp.pages.HxRedirect(w, fmt.Sprintf("/%s", f.OwnerDid())) 934 + rp.pages.HxRedirect(w, fmt.Sprintf("/%s", f.Did)) 936 935 } 937 936 938 937 func (rp *Repo) SyncRepoFork(w http.ResponseWriter, r *http.Request) { ··· 961 960 return 962 961 } 963 962 964 - repoInfo := f.RepoInfo(user) 965 - if repoInfo.Source == nil { 963 + if f.Source == "" { 966 964 rp.pages.Notice(w, "repo", "This repository is not a fork.") 967 965 return 968 966 } ··· 973 971 &tangled.RepoForkSync_Input{ 974 972 Did: user.Did, 975 973 Name: f.Name, 976 - Source: repoInfo.Source.RepoAt().String(), 974 + Source: f.Source, 977 975 Branch: ref, 978 976 }, 979 977 ) ··· 1009 1007 rp.pages.ForkRepo(w, pages.ForkRepoParams{ 1010 1008 LoggedInUser: user, 1011 1009 Knots: knots, 1012 - RepoInfo: f.RepoInfo(user), 1010 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 1013 1011 }) 1014 1012 1015 1013 case http.MethodPost: ··· 1039 1037 // in the user's account. 1040 1038 existingRepo, err := db.GetRepo( 1041 1039 rp.db, 1042 - db.FilterEq("did", user.Did), 1043 - db.FilterEq("name", forkName), 1040 + orm.FilterEq("did", user.Did), 1041 + orm.FilterEq("name", forkName), 1044 1042 ) 1045 1043 if err != nil { 1046 1044 if !errors.Is(err, sql.ErrNoRows) { ··· 1060 1058 uri = "http" 1061 1059 } 1062 1060 1063 - forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.Repo.Name) 1061 + forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.Did, f.Name) 1064 1062 l = l.With("cloneUrl", forkSourceUrl) 1065 1063 1066 1064 sourceAt := f.RepoAt().String() ··· 1073 1071 Knot: targetKnot, 1074 1072 Rkey: rkey, 1075 1073 Source: sourceAt, 1076 - Description: f.Repo.Description, 1074 + Description: f.Description, 1077 1075 Created: time.Now(), 1078 1076 Labels: rp.config.Label.DefaultLabelDefs, 1079 1077 } ··· 1132 1130 } 1133 1131 defer rollback() 1134 1132 1133 + // TODO: this could coordinate better with the knot to recieve a clone status 1135 1134 client, err := rp.oauth.ServiceClient( 1136 1135 r, 1137 1136 oauth.WithService(targetKnot), 1138 1137 oauth.WithLxm(tangled.RepoCreateNSID), 1139 1138 oauth.WithDev(rp.config.Core.Dev), 1139 + oauth.WithTimeout(time.Second*20), // big repos take time to clone 1140 1140 ) 1141 1141 if err != nil { 1142 1142 l.Error("could not create service client", "err", err)
+20 -35
appview/repo/repo_util.go
··· 1 1 package repo 2 2 3 3 import ( 4 - "crypto/rand" 5 - "math/big" 4 + "maps" 6 5 "slices" 7 6 "sort" 8 7 "strings" 9 8 10 9 "tangled.org/core/appview/db" 11 10 "tangled.org/core/appview/models" 12 - "tangled.org/core/appview/pages/repoinfo" 11 + "tangled.org/core/orm" 13 12 "tangled.org/core/types" 14 - 15 - "github.com/go-git/go-git/v5/plumbing/object" 16 13 ) 17 14 18 15 func sortFiles(files []types.NiceTree) { 19 16 sort.Slice(files, func(i, j int) bool { 20 - iIsFile := files[i].IsFile 21 - jIsFile := files[j].IsFile 17 + iIsFile := files[i].IsFile() 18 + jIsFile := files[j].IsFile() 22 19 if iIsFile != jIsFile { 23 20 return !iIsFile 24 21 } ··· 45 42 }) 46 43 } 47 44 48 - func uniqueEmails(commits []*object.Commit) []string { 45 + func uniqueEmails(commits []types.Commit) []string { 49 46 emails := make(map[string]struct{}) 50 47 for _, commit := range commits { 51 - if commit.Author.Email != "" { 52 - emails[commit.Author.Email] = struct{}{} 53 - } 54 - if commit.Committer.Email != "" { 55 - emails[commit.Committer.Email] = struct{}{} 48 + emails[commit.Author.Email] = struct{}{} 49 + emails[commit.Committer.Email] = struct{}{} 50 + for _, c := range commit.CoAuthors() { 51 + emails[c.Email] = struct{}{} 56 52 } 57 53 } 58 - var uniqueEmails []string 59 - for email := range emails { 60 - uniqueEmails = append(uniqueEmails, email) 61 - } 62 - return uniqueEmails 54 + 55 + // delete empty emails if any, from the set 56 + delete(emails, "") 57 + 58 + return slices.Collect(maps.Keys(emails)) 63 59 } 64 60 65 61 func balanceIndexItems(commitCount, branchCount, tagCount, fileCount int) (commitsTrunc int, branchesTrunc int, tagsTrunc int) { ··· 90 86 return 91 87 } 92 88 93 - func randomString(n int) string { 94 - const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 95 - result := make([]byte, n) 96 - 97 - for i := 0; i < n; i++ { 98 - n, _ := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) 99 - result[i] = letters[n.Int64()] 100 - } 101 - 102 - return string(result) 103 - } 104 - 105 89 // grab pipelines from DB and munge that into a hashmap with commit sha as key 106 90 // 107 91 // golang is so blessed that it requires 35 lines of imperative code for this 108 92 func getPipelineStatuses( 109 93 d *db.DB, 110 - repoInfo repoinfo.RepoInfo, 94 + repo *models.Repo, 111 95 shas []string, 112 96 ) (map[string]models.Pipeline, error) { 113 97 m := make(map[string]models.Pipeline) ··· 118 102 119 103 ps, err := db.GetPipelineStatuses( 120 104 d, 121 - db.FilterEq("repo_owner", repoInfo.OwnerDid), 122 - db.FilterEq("repo_name", repoInfo.Name), 123 - db.FilterEq("knot", repoInfo.Knot), 124 - db.FilterIn("sha", shas), 105 + len(shas), 106 + orm.FilterEq("repo_owner", repo.Did), 107 + orm.FilterEq("repo_name", repo.Name), 108 + orm.FilterEq("knot", repo.Knot), 109 + orm.FilterIn("sha", shas), 125 110 ) 126 111 if err != nil { 127 112 return nil, err
-1
appview/repo/router.go
··· 61 61 // for example: 62 62 // /compare/master...some/feature 63 63 // /compare/master...example.com:another/feature <- this is a fork 64 - r.Get("/{base}/{head}", rp.Compare) 65 64 r.Get("/*", rp.Compare) 66 65 }) 67 66
+41 -12
appview/repo/settings.go
··· 10 10 11 11 "tangled.org/core/api/tangled" 12 12 "tangled.org/core/appview/db" 13 + "tangled.org/core/appview/models" 13 14 "tangled.org/core/appview/oauth" 14 15 "tangled.org/core/appview/pages" 15 16 xrpcclient "tangled.org/core/appview/xrpcclient" 17 + "tangled.org/core/orm" 16 18 "tangled.org/core/types" 17 19 18 20 comatproto "github.com/bluesky-social/indigo/api/atproto" ··· 194 196 Host: host, 195 197 } 196 198 197 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 199 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 198 200 xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo) 199 201 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 200 202 l.Error("failed to call XRPC repo.branches", "err", xrpcerr) ··· 209 211 return 210 212 } 211 213 212 - defaultLabels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs)) 214 + defaultLabels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs)) 213 215 if err != nil { 214 216 l.Error("failed to fetch labels", "err", err) 215 217 rp.pages.Error503(w) 216 218 return 217 219 } 218 220 219 - labels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", f.Repo.Labels)) 221 + labels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", f.Labels)) 220 222 if err != nil { 221 223 l.Error("failed to fetch labels", "err", err) 222 224 rp.pages.Error503(w) ··· 237 239 labels = labels[:n] 238 240 239 241 subscribedLabels := make(map[string]struct{}) 240 - for _, l := range f.Repo.Labels { 242 + for _, l := range f.Labels { 241 243 subscribedLabels[l] = struct{}{} 242 244 } 243 245 ··· 254 256 255 257 rp.pages.RepoGeneralSettings(w, pages.RepoGeneralSettingsParams{ 256 258 LoggedInUser: user, 257 - RepoInfo: f.RepoInfo(user), 259 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 258 260 Branches: result.Branches, 259 261 Labels: labels, 260 262 DefaultLabels: defaultLabels, ··· 271 273 f, err := rp.repoResolver.Resolve(r) 272 274 user := rp.oauth.GetUser(r) 273 275 274 - repoCollaborators, err := f.Collaborators(r.Context()) 276 + collaborators, err := func(repo *models.Repo) ([]pages.Collaborator, error) { 277 + repoCollaborators, err := rp.enforcer.E.GetImplicitUsersForResourceByDomain(repo.DidSlashRepo(), repo.Knot) 278 + if err != nil { 279 + return nil, err 280 + } 281 + var collaborators []pages.Collaborator 282 + for _, item := range repoCollaborators { 283 + // currently only two roles: owner and member 284 + var role string 285 + switch item[3] { 286 + case "repo:owner": 287 + role = "owner" 288 + case "repo:collaborator": 289 + role = "collaborator" 290 + default: 291 + continue 292 + } 293 + 294 + did := item[0] 295 + 296 + c := pages.Collaborator{ 297 + Did: did, 298 + Role: role, 299 + } 300 + collaborators = append(collaborators, c) 301 + } 302 + return collaborators, nil 303 + }(f) 275 304 if err != nil { 276 305 l.Error("failed to get collaborators", "err", err) 277 306 } 278 307 279 308 rp.pages.RepoAccessSettings(w, pages.RepoAccessSettingsParams{ 280 309 LoggedInUser: user, 281 - RepoInfo: f.RepoInfo(user), 310 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 282 311 Tabs: settingsTabs, 283 312 Tab: "access", 284 - Collaborators: repoCollaborators, 313 + Collaborators: collaborators, 285 314 }) 286 315 } 287 316 ··· 292 321 user := rp.oauth.GetUser(r) 293 322 294 323 // all spindles that the repo owner is a member of 295 - spindles, err := rp.enforcer.GetSpindlesForUser(f.OwnerDid()) 324 + spindles, err := rp.enforcer.GetSpindlesForUser(f.Did) 296 325 if err != nil { 297 326 l.Error("failed to fetch spindles", "err", err) 298 327 return ··· 339 368 340 369 rp.pages.RepoPipelineSettings(w, pages.RepoPipelineSettingsParams{ 341 370 LoggedInUser: user, 342 - RepoInfo: f.RepoInfo(user), 371 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 343 372 Tabs: settingsTabs, 344 373 Tab: "pipelines", 345 374 Spindles: spindles, ··· 374 403 ) 375 404 376 405 err = rp.validator.ValidateURI(website) 377 - if err != nil { 406 + if website != "" && err != nil { 378 407 l.Error("invalid uri", "err", err) 379 408 rp.pages.Notice(w, noticeId, err.Error()) 380 409 return ··· 388 417 } 389 418 l.Debug("got", "topicsStr", topicStr, "topics", topics) 390 419 391 - newRepo := f.Repo 420 + newRepo := *f 392 421 newRepo.Description = description 393 422 newRepo.Website = website 394 423 newRepo.Topics = topics
+4 -3
appview/repo/tags.go
··· 10 10 "tangled.org/core/appview/models" 11 11 "tangled.org/core/appview/pages" 12 12 xrpcclient "tangled.org/core/appview/xrpcclient" 13 + "tangled.org/core/orm" 13 14 "tangled.org/core/types" 14 15 15 16 indigoxrpc "github.com/bluesky-social/indigo/xrpc" ··· 31 32 xrpcc := &indigoxrpc.Client{ 32 33 Host: host, 33 34 } 34 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 35 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 35 36 xrpcBytes, err := tangled.RepoTags(r.Context(), xrpcc, "", 0, repo) 36 37 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 37 38 l.Error("failed to call XRPC repo.tags", "err", xrpcerr) ··· 44 45 rp.pages.Error503(w) 45 46 return 46 47 } 47 - artifacts, err := db.GetArtifact(rp.db, db.FilterEq("repo_at", f.RepoAt())) 48 + artifacts, err := db.GetArtifact(rp.db, orm.FilterEq("repo_at", f.RepoAt())) 48 49 if err != nil { 49 50 l.Error("failed grab artifacts", "err", err) 50 51 return ··· 71 72 user := rp.oauth.GetUser(r) 72 73 rp.pages.RepoTags(w, pages.RepoTagsParams{ 73 74 LoggedInUser: user, 74 - RepoInfo: f.RepoInfo(user), 75 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 75 76 RepoTagsResponse: result, 76 77 ArtifactMap: artifactMap, 77 78 DanglingArtifacts: danglingArtifacts,
+10 -9
appview/repo/tree.go
··· 9 9 10 10 "tangled.org/core/api/tangled" 11 11 "tangled.org/core/appview/pages" 12 + "tangled.org/core/appview/reporesolver" 12 13 xrpcclient "tangled.org/core/appview/xrpcclient" 13 14 "tangled.org/core/types" 14 15 ··· 39 40 xrpcc := &indigoxrpc.Client{ 40 41 Host: host, 41 42 } 42 - repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name) 43 + repo := fmt.Sprintf("%s/%s", f.Did, f.Name) 43 44 xrpcResp, err := tangled.RepoTree(r.Context(), xrpcc, treePath, ref, repo) 44 45 if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil { 45 46 l.Error("failed to call XRPC repo.tree", "err", xrpcerr) ··· 50 51 files := make([]types.NiceTree, len(xrpcResp.Files)) 51 52 for i, xrpcFile := range xrpcResp.Files { 52 53 file := types.NiceTree{ 53 - Name: xrpcFile.Name, 54 - Mode: xrpcFile.Mode, 55 - Size: int64(xrpcFile.Size), 56 - IsFile: xrpcFile.Is_file, 57 - IsSubtree: xrpcFile.Is_subtree, 54 + Name: xrpcFile.Name, 55 + Mode: xrpcFile.Mode, 56 + Size: int64(xrpcFile.Size), 58 57 } 59 58 // Convert last commit info if present 60 59 if xrpcFile.Last_commit != nil { ··· 81 80 result.ReadmeFileName = xrpcResp.Readme.Filename 82 81 result.Readme = xrpcResp.Readme.Contents 83 82 } 83 + ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f) 84 84 // redirects tree paths trying to access a blob; in this case the result.Files is unpopulated, 85 85 // so we can safely redirect to the "parent" (which is the same file). 86 86 if len(result.Files) == 0 && result.Parent == treePath { 87 - redirectTo := fmt.Sprintf("/%s/blob/%s/%s", f.OwnerSlashRepo(), url.PathEscape(ref), result.Parent) 87 + redirectTo := fmt.Sprintf("/%s/blob/%s/%s", ownerSlashRepo, url.PathEscape(ref), result.Parent) 88 88 http.Redirect(w, r, redirectTo, http.StatusFound) 89 89 return 90 90 } 91 91 user := rp.oauth.GetUser(r) 92 92 var breadcrumbs [][]string 93 - breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), url.PathEscape(ref))}) 93 + breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", ownerSlashRepo, url.PathEscape(ref))}) 94 94 if treePath != "" { 95 95 for idx, elem := range strings.Split(treePath, "/") { 96 96 breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], url.PathEscape(elem))}) 97 97 } 98 98 } 99 99 sortFiles(result.Files) 100 + 100 101 rp.pages.RepoTree(w, pages.RepoTreeParams{ 101 102 LoggedInUser: user, 102 103 BreadCrumbs: breadcrumbs, 103 104 TreePath: treePath, 104 - RepoInfo: f.RepoInfo(user), 105 + RepoInfo: rp.repoResolver.GetRepoInfo(r, user), 105 106 RepoTreeResponse: result, 106 107 }) 107 108 }
+76 -164
appview/reporesolver/resolver.go
··· 1 1 package reporesolver 2 2 3 3 import ( 4 - "context" 5 - "database/sql" 6 - "errors" 7 4 "fmt" 8 5 "log" 9 6 "net/http" ··· 12 9 "strings" 13 10 14 11 "github.com/bluesky-social/indigo/atproto/identity" 15 - securejoin "github.com/cyphar/filepath-securejoin" 16 12 "github.com/go-chi/chi/v5" 17 13 "tangled.org/core/appview/config" 18 14 "tangled.org/core/appview/db" 19 15 "tangled.org/core/appview/models" 20 16 "tangled.org/core/appview/oauth" 21 - "tangled.org/core/appview/pages" 22 17 "tangled.org/core/appview/pages/repoinfo" 23 - "tangled.org/core/idresolver" 24 18 "tangled.org/core/rbac" 25 19 ) 26 20 27 - type ResolvedRepo struct { 28 - models.Repo 29 - OwnerId identity.Identity 30 - CurrentDir string 31 - Ref string 32 - 33 - rr *RepoResolver 21 + type RepoResolver struct { 22 + config *config.Config 23 + enforcer *rbac.Enforcer 24 + execer db.Execer 34 25 } 35 26 36 - type RepoResolver struct { 37 - config *config.Config 38 - enforcer *rbac.Enforcer 39 - idResolver *idresolver.Resolver 40 - execer db.Execer 27 + func New(config *config.Config, enforcer *rbac.Enforcer, execer db.Execer) *RepoResolver { 28 + return &RepoResolver{config: config, enforcer: enforcer, execer: execer} 41 29 } 42 30 43 - func New(config *config.Config, enforcer *rbac.Enforcer, resolver *idresolver.Resolver, execer db.Execer) *RepoResolver { 44 - return &RepoResolver{config: config, enforcer: enforcer, idResolver: resolver, execer: execer} 31 + // NOTE: this... should not even be here. the entire package will be removed in future refactor 32 + func GetBaseRepoPath(r *http.Request, repo *models.Repo) string { 33 + var ( 34 + user = chi.URLParam(r, "user") 35 + name = chi.URLParam(r, "repo") 36 + ) 37 + if user == "" || name == "" { 38 + return repo.DidSlashRepo() 39 + } 40 + return path.Join(user, name) 45 41 } 46 42 47 - func (rr *RepoResolver) Resolve(r *http.Request) (*ResolvedRepo, error) { 43 + // TODO: move this out of `RepoResolver` struct 44 + func (rr *RepoResolver) Resolve(r *http.Request) (*models.Repo, error) { 48 45 repo, ok := r.Context().Value("repo").(*models.Repo) 49 46 if !ok { 50 47 log.Println("malformed middleware: `repo` not exist in context") 51 48 return nil, fmt.Errorf("malformed middleware") 52 49 } 53 - id, ok := r.Context().Value("resolvedId").(identity.Identity) 54 - if !ok { 55 - log.Println("malformed middleware") 56 - return nil, fmt.Errorf("malformed middleware") 57 - } 58 50 59 - currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath())) 60 - ref := chi.URLParam(r, "ref") 61 - 62 - return &ResolvedRepo{ 63 - Repo: *repo, 64 - OwnerId: id, 65 - CurrentDir: currentDir, 66 - Ref: ref, 67 - 68 - rr: rr, 69 - }, nil 70 - } 71 - 72 - func (f *ResolvedRepo) OwnerDid() string { 73 - return f.OwnerId.DID.String() 74 - } 75 - 76 - func (f *ResolvedRepo) OwnerHandle() string { 77 - return f.OwnerId.Handle.String() 51 + return repo, nil 78 52 } 79 53 80 - func (f *ResolvedRepo) OwnerSlashRepo() string { 81 - handle := f.OwnerId.Handle 82 - 83 - var p string 84 - if handle != "" && !handle.IsInvalidHandle() { 85 - p, _ = securejoin.SecureJoin(fmt.Sprintf("@%s", handle), f.Name) 86 - } else { 87 - p, _ = securejoin.SecureJoin(f.OwnerDid(), f.Name) 54 + // 1. [x] replace `RepoInfo` to `reporesolver.GetRepoInfo(r *http.Request, repo, user)` 55 + // 2. [x] remove `rr`, `CurrentDir`, `Ref` fields from `ResolvedRepo` 56 + // 3. [x] remove `ResolvedRepo` 57 + // 4. [ ] replace reporesolver to reposervice 58 + func (rr *RepoResolver) GetRepoInfo(r *http.Request, user *oauth.User) repoinfo.RepoInfo { 59 + ownerId, ook := r.Context().Value("resolvedId").(identity.Identity) 60 + repo, rok := r.Context().Value("repo").(*models.Repo) 61 + if !ook || !rok { 62 + log.Println("malformed request, failed to get repo from context") 88 63 } 89 64 90 - return p 91 - } 65 + // get dir/ref 66 + currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath())) 67 + ref := chi.URLParam(r, "ref") 92 68 93 - func (f *ResolvedRepo) Collaborators(ctx context.Context) ([]pages.Collaborator, error) { 94 - repoCollaborators, err := f.rr.enforcer.E.GetImplicitUsersForResourceByDomain(f.DidSlashRepo(), f.Knot) 95 - if err != nil { 96 - return nil, err 69 + repoAt := repo.RepoAt() 70 + isStarred := false 71 + roles := repoinfo.RolesInRepo{} 72 + if user != nil { 73 + isStarred = db.GetStarStatus(rr.execer, user.Did, repoAt) 74 + roles.Roles = rr.enforcer.GetPermissionsInRepo(user.Did, repo.Knot, repo.DidSlashRepo()) 97 75 } 98 76 99 - var collaborators []pages.Collaborator 100 - for _, item := range repoCollaborators { 101 - // currently only two roles: owner and member 102 - var role string 103 - switch item[3] { 104 - case "repo:owner": 105 - role = "owner" 106 - case "repo:collaborator": 107 - role = "collaborator" 108 - default: 109 - continue 77 + stats := repo.RepoStats 78 + if stats == nil { 79 + starCount, err := db.GetStarCount(rr.execer, repoAt) 80 + if err != nil { 81 + log.Println("failed to get star count for ", repoAt) 110 82 } 111 - 112 - did := item[0] 113 - 114 - c := pages.Collaborator{ 115 - Did: did, 116 - Handle: "", 117 - Role: role, 83 + issueCount, err := db.GetIssueCount(rr.execer, repoAt) 84 + if err != nil { 85 + log.Println("failed to get issue count for ", repoAt) 118 86 } 119 - collaborators = append(collaborators, c) 120 - } 121 - 122 - // populate all collborators with handles 123 - identsToResolve := make([]string, len(collaborators)) 124 - for i, collab := range collaborators { 125 - identsToResolve[i] = collab.Did 126 - } 127 - 128 - resolvedIdents := f.rr.idResolver.ResolveIdents(ctx, identsToResolve) 129 - for i, resolved := range resolvedIdents { 130 - if resolved != nil { 131 - collaborators[i].Handle = resolved.Handle.String() 87 + pullCount, err := db.GetPullCount(rr.execer, repoAt) 88 + if err != nil { 89 + log.Println("failed to get pull count for ", repoAt) 132 90 } 133 - } 134 - 135 - return collaborators, nil 136 - } 137 - 138 - // this function is a bit weird since it now returns RepoInfo from an entirely different 139 - // package. we should refactor this or get rid of RepoInfo entirely. 140 - func (f *ResolvedRepo) RepoInfo(user *oauth.User) repoinfo.RepoInfo { 141 - repoAt := f.RepoAt() 142 - isStarred := false 143 - if user != nil { 144 - isStarred = db.GetStarStatus(f.rr.execer, user.Did, repoAt) 145 - } 146 - 147 - starCount, err := db.GetStarCount(f.rr.execer, repoAt) 148 - if err != nil { 149 - log.Println("failed to get star count for ", repoAt) 150 - } 151 - issueCount, err := db.GetIssueCount(f.rr.execer, repoAt) 152 - if err != nil { 153 - log.Println("failed to get issue count for ", repoAt) 154 - } 155 - pullCount, err := db.GetPullCount(f.rr.execer, repoAt) 156 - if err != nil { 157 - log.Println("failed to get issue count for ", repoAt) 158 - } 159 - source, err := db.GetRepoSource(f.rr.execer, repoAt) 160 - if errors.Is(err, sql.ErrNoRows) { 161 - source = "" 162 - } else if err != nil { 163 - log.Println("failed to get repo source for ", repoAt, err) 91 + stats = &models.RepoStats{ 92 + StarCount: starCount, 93 + IssueCount: issueCount, 94 + PullCount: pullCount, 95 + } 164 96 } 165 97 166 98 var sourceRepo *models.Repo 167 - if source != "" { 168 - sourceRepo, err = db.GetRepoByAtUri(f.rr.execer, source) 99 + var err error 100 + if repo.Source != "" { 101 + sourceRepo, err = db.GetRepoByAtUri(rr.execer, repo.Source) 169 102 if err != nil { 170 103 log.Println("failed to get repo by at uri", err) 171 104 } 172 105 } 173 106 174 - var sourceHandle *identity.Identity 175 - if sourceRepo != nil { 176 - sourceHandle, err = f.rr.idResolver.ResolveIdent(context.Background(), sourceRepo.Did) 177 - if err != nil { 178 - log.Println("failed to resolve source repo", err) 179 - } 180 - } 107 + repoInfo := repoinfo.RepoInfo{ 108 + // this is basically a models.Repo 109 + OwnerDid: ownerId.DID.String(), 110 + OwnerHandle: ownerId.Handle.String(), 111 + Name: repo.Name, 112 + Rkey: repo.Rkey, 113 + Description: repo.Description, 114 + Website: repo.Website, 115 + Topics: repo.Topics, 116 + Knot: repo.Knot, 117 + Spindle: repo.Spindle, 118 + Stats: *stats, 181 119 182 - knot := f.Knot 120 + // fork repo upstream 121 + Source: sourceRepo, 183 122 184 - repoInfo := repoinfo.RepoInfo{ 185 - OwnerDid: f.OwnerDid(), 186 - OwnerHandle: f.OwnerHandle(), 187 - Name: f.Name, 188 - Rkey: f.Repo.Rkey, 189 - RepoAt: repoAt, 190 - Description: f.Description, 191 - Website: f.Website, 192 - Topics: f.Topics, 193 - IsStarred: isStarred, 194 - Knot: knot, 195 - Spindle: f.Spindle, 196 - Roles: f.RolesInRepo(user), 197 - Stats: models.RepoStats{ 198 - StarCount: starCount, 199 - IssueCount: issueCount, 200 - PullCount: pullCount, 201 - }, 202 - CurrentDir: f.CurrentDir, 203 - Ref: f.Ref, 204 - } 123 + // page context 124 + CurrentDir: currentDir, 125 + Ref: ref, 205 126 206 - if sourceRepo != nil { 207 - repoInfo.Source = sourceRepo 208 - repoInfo.SourceHandle = sourceHandle.Handle.String() 127 + // info related to the session 128 + IsStarred: isStarred, 129 + Roles: roles, 209 130 } 210 131 211 132 return repoInfo 212 - } 213 - 214 - func (f *ResolvedRepo) RolesInRepo(u *oauth.User) repoinfo.RolesInRepo { 215 - if u != nil { 216 - r := f.rr.enforcer.GetPermissionsInRepo(u.Did, f.Knot, f.DidSlashRepo()) 217 - return repoinfo.RolesInRepo{Roles: r} 218 - } else { 219 - return repoinfo.RolesInRepo{} 220 - } 221 133 } 222 134 223 135 // extractPathAfterRef gets the actual repository path
+5 -4
appview/serververify/verify.go
··· 9 9 "tangled.org/core/api/tangled" 10 10 "tangled.org/core/appview/db" 11 11 "tangled.org/core/appview/xrpcclient" 12 + "tangled.org/core/orm" 12 13 "tangled.org/core/rbac" 13 14 ) 14 15 ··· 76 77 // mark this spindle as verified in the db 77 78 rowId, err := db.VerifySpindle( 78 79 tx, 79 - db.FilterEq("owner", owner), 80 - db.FilterEq("instance", instance), 80 + orm.FilterEq("owner", owner), 81 + orm.FilterEq("instance", instance), 81 82 ) 82 83 if err != nil { 83 84 return 0, fmt.Errorf("failed to write to DB: %w", err) ··· 115 116 // mark as registered 116 117 err = db.MarkRegistered( 117 118 tx, 118 - db.FilterEq("did", owner), 119 - db.FilterEq("domain", domain), 119 + orm.FilterEq("did", owner), 120 + orm.FilterEq("domain", domain), 120 121 ) 121 122 if err != nil { 122 123 return fmt.Errorf("failed to register domain: %w", err)
+3
appview/settings/settings.go
··· 43 43 {"Name": "keys", "Icon": "key"}, 44 44 {"Name": "emails", "Icon": "mail"}, 45 45 {"Name": "notifications", "Icon": "bell"}, 46 + {"Name": "knots", "Icon": "volleyball"}, 47 + {"Name": "spindles", "Icon": "spool"}, 46 48 } 47 49 ) 48 50 ··· 120 122 PullCommented: r.FormValue("pull_commented") == "on", 121 123 PullMerged: r.FormValue("pull_merged") == "on", 122 124 Followed: r.FormValue("followed") == "on", 125 + UserMentioned: r.FormValue("user_mentioned") == "on", 123 126 EmailNotifications: r.FormValue("email_notifications") == "on", 124 127 } 125 128
+44 -31
appview/spindles/spindles.go
··· 20 20 "tangled.org/core/appview/serververify" 21 21 "tangled.org/core/appview/xrpcclient" 22 22 "tangled.org/core/idresolver" 23 + "tangled.org/core/orm" 23 24 "tangled.org/core/rbac" 24 25 "tangled.org/core/tid" 25 26 ··· 38 39 Logger *slog.Logger 39 40 } 40 41 42 + type tab = map[string]any 43 + 44 + var ( 45 + spindlesTabs []tab = []tab{ 46 + {"Name": "profile", "Icon": "user"}, 47 + {"Name": "keys", "Icon": "key"}, 48 + {"Name": "emails", "Icon": "mail"}, 49 + {"Name": "notifications", "Icon": "bell"}, 50 + {"Name": "knots", "Icon": "volleyball"}, 51 + {"Name": "spindles", "Icon": "spool"}, 52 + } 53 + ) 54 + 41 55 func (s *Spindles) Router() http.Handler { 42 56 r := chi.NewRouter() 43 57 ··· 58 72 user := s.OAuth.GetUser(r) 59 73 all, err := db.GetSpindles( 60 74 s.Db, 61 - db.FilterEq("owner", user.Did), 75 + orm.FilterEq("owner", user.Did), 62 76 ) 63 77 if err != nil { 64 78 s.Logger.Error("failed to fetch spindles", "err", err) ··· 69 83 s.Pages.Spindles(w, pages.SpindlesParams{ 70 84 LoggedInUser: user, 71 85 Spindles: all, 86 + Tabs: spindlesTabs, 87 + Tab: "spindles", 72 88 }) 73 89 } 74 90 ··· 86 102 87 103 spindles, err := db.GetSpindles( 88 104 s.Db, 89 - db.FilterEq("instance", instance), 90 - db.FilterEq("owner", user.Did), 91 - db.FilterIsNot("verified", "null"), 105 + orm.FilterEq("instance", instance), 106 + orm.FilterEq("owner", user.Did), 107 + orm.FilterIsNot("verified", "null"), 92 108 ) 93 109 if err != nil || len(spindles) != 1 { 94 110 l.Error("failed to get spindle", "err", err, "len(spindles)", len(spindles)) ··· 108 124 repos, err := db.GetRepos( 109 125 s.Db, 110 126 0, 111 - db.FilterEq("spindle", instance), 127 + orm.FilterEq("spindle", instance), 112 128 ) 113 129 if err != nil { 114 130 l.Error("failed to get spindle repos", "err", err) ··· 127 143 Spindle: spindle, 128 144 Members: members, 129 145 Repos: repoMap, 146 + Tabs: spindlesTabs, 147 + Tab: "spindles", 130 148 }) 131 149 } 132 150 ··· 273 291 274 292 spindles, err := db.GetSpindles( 275 293 s.Db, 276 - db.FilterEq("owner", user.Did), 277 - db.FilterEq("instance", instance), 294 + orm.FilterEq("owner", user.Did), 295 + orm.FilterEq("instance", instance), 278 296 ) 279 297 if err != nil || len(spindles) != 1 { 280 298 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 302 320 // remove spindle members first 303 321 err = db.RemoveSpindleMember( 304 322 tx, 305 - db.FilterEq("did", user.Did), 306 - db.FilterEq("instance", instance), 323 + orm.FilterEq("did", user.Did), 324 + orm.FilterEq("instance", instance), 307 325 ) 308 326 if err != nil { 309 327 l.Error("failed to remove spindle members", "err", err) ··· 313 331 314 332 err = db.DeleteSpindle( 315 333 tx, 316 - db.FilterEq("owner", user.Did), 317 - db.FilterEq("instance", instance), 334 + orm.FilterEq("owner", user.Did), 335 + orm.FilterEq("instance", instance), 318 336 ) 319 337 if err != nil { 320 338 l.Error("failed to delete spindle", "err", err) ··· 365 383 366 384 shouldRedirect := r.Header.Get("shouldRedirect") 367 385 if shouldRedirect == "true" { 368 - s.Pages.HxRedirect(w, "/spindles") 386 + s.Pages.HxRedirect(w, "/settings/spindles") 369 387 return 370 388 } 371 389 ··· 393 411 394 412 spindles, err := db.GetSpindles( 395 413 s.Db, 396 - db.FilterEq("owner", user.Did), 397 - db.FilterEq("instance", instance), 414 + orm.FilterEq("owner", user.Did), 415 + orm.FilterEq("instance", instance), 398 416 ) 399 417 if err != nil || len(spindles) != 1 { 400 418 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 436 454 437 455 verifiedSpindle, err := db.GetSpindles( 438 456 s.Db, 439 - db.FilterEq("id", rowId), 457 + orm.FilterEq("id", rowId), 440 458 ) 441 459 if err != nil || len(verifiedSpindle) != 1 { 442 460 l.Error("failed get new spindle", "err", err) ··· 469 487 470 488 spindles, err := db.GetSpindles( 471 489 s.Db, 472 - db.FilterEq("owner", user.Did), 473 - db.FilterEq("instance", instance), 490 + orm.FilterEq("owner", user.Did), 491 + orm.FilterEq("instance", instance), 474 492 ) 475 493 if err != nil || len(spindles) != 1 { 476 494 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 581 599 } 582 600 583 601 // success 584 - s.Pages.HxRedirect(w, fmt.Sprintf("/spindles/%s", instance)) 602 + s.Pages.HxRedirect(w, fmt.Sprintf("/settings/spindles/%s", instance)) 585 603 } 586 604 587 605 func (s *Spindles) removeMember(w http.ResponseWriter, r *http.Request) { ··· 605 623 606 624 spindles, err := db.GetSpindles( 607 625 s.Db, 608 - db.FilterEq("owner", user.Did), 609 - db.FilterEq("instance", instance), 626 + orm.FilterEq("owner", user.Did), 627 + orm.FilterEq("instance", instance), 610 628 ) 611 629 if err != nil || len(spindles) != 1 { 612 630 l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles)) ··· 635 653 s.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.") 636 654 return 637 655 } 638 - if memberId.Handle.IsInvalidHandle() { 639 - l.Error("failed to resolve member identity to handle") 640 - s.Pages.Notice(w, noticeId, "Failed to remove member, identity resolution failed.") 641 - return 642 - } 643 656 644 657 tx, err := s.Db.Begin() 645 658 if err != nil { ··· 655 668 // get the record from the DB first: 656 669 members, err := db.GetSpindleMembers( 657 670 s.Db, 658 - db.FilterEq("did", user.Did), 659 - db.FilterEq("instance", instance), 660 - db.FilterEq("subject", memberId.DID), 671 + orm.FilterEq("did", user.Did), 672 + orm.FilterEq("instance", instance), 673 + orm.FilterEq("subject", memberId.DID), 661 674 ) 662 675 if err != nil || len(members) != 1 { 663 676 l.Error("failed to get member", "err", err) ··· 668 681 // remove from db 669 682 if err = db.RemoveSpindleMember( 670 683 tx, 671 - db.FilterEq("did", user.Did), 672 - db.FilterEq("instance", instance), 673 - db.FilterEq("subject", memberId.DID), 684 + orm.FilterEq("did", user.Did), 685 + orm.FilterEq("instance", instance), 686 + orm.FilterEq("subject", memberId.DID), 674 687 ); err != nil { 675 688 l.Error("failed to remove spindle member", "err", err) 676 689 fail()
+6 -5
appview/state/gfi.go
··· 11 11 "tangled.org/core/appview/pages" 12 12 "tangled.org/core/appview/pagination" 13 13 "tangled.org/core/consts" 14 + "tangled.org/core/orm" 14 15 ) 15 16 16 17 func (s *State) GoodFirstIssues(w http.ResponseWriter, r *http.Request) { ··· 20 21 21 22 goodFirstIssueLabel := s.config.Label.GoodFirstIssue 22 23 23 - gfiLabelDef, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", goodFirstIssueLabel)) 24 + gfiLabelDef, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", goodFirstIssueLabel)) 24 25 if err != nil { 25 26 log.Println("failed to get gfi label def", err) 26 27 s.pages.Error500(w) 27 28 return 28 29 } 29 30 30 - repoLabels, err := db.GetRepoLabels(s.db, db.FilterEq("label_at", goodFirstIssueLabel)) 31 + repoLabels, err := db.GetRepoLabels(s.db, orm.FilterEq("label_at", goodFirstIssueLabel)) 31 32 if err != nil { 32 33 log.Println("failed to get repo labels", err) 33 34 s.pages.Error503(w) ··· 55 56 pagination.Page{ 56 57 Limit: 500, 57 58 }, 58 - db.FilterIn("repo_at", repoUris), 59 - db.FilterEq("open", 1), 59 + orm.FilterIn("repo_at", repoUris), 60 + orm.FilterEq("open", 1), 60 61 ) 61 62 if err != nil { 62 63 log.Println("failed to get issues", err) ··· 132 133 } 133 134 134 135 if len(uriList) > 0 { 135 - allLabelDefs, err = db.GetLabelDefinitions(s.db, db.FilterIn("at_uri", uriList)) 136 + allLabelDefs, err = db.GetLabelDefinitions(s.db, orm.FilterIn("at_uri", uriList)) 136 137 if err != nil { 137 138 log.Println("failed to fetch labels", err) 138 139 }
+17
appview/state/git_http.go
··· 25 25 26 26 } 27 27 28 + func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) { 29 + user, ok := r.Context().Value("resolvedId").(identity.Identity) 30 + if !ok { 31 + http.Error(w, "failed to resolve user", http.StatusInternalServerError) 32 + return 33 + } 34 + repo := r.Context().Value("repo").(*models.Repo) 35 + 36 + scheme := "https" 37 + if s.config.Core.Dev { 38 + scheme = "http" 39 + } 40 + 41 + targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery) 42 + s.proxyRequest(w, r, targetURL) 43 + } 44 + 28 45 func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) { 29 46 user, ok := r.Context().Value("resolvedId").(identity.Identity) 30 47 if !ok {
+6 -5
appview/state/knotstream.go
··· 16 16 ec "tangled.org/core/eventconsumer" 17 17 "tangled.org/core/eventconsumer/cursor" 18 18 "tangled.org/core/log" 19 + "tangled.org/core/orm" 19 20 "tangled.org/core/rbac" 20 21 "tangled.org/core/workflow" 21 22 ··· 30 31 31 32 knots, err := db.GetRegistrations( 32 33 d, 33 - db.FilterIsNot("registered", "null"), 34 + orm.FilterIsNot("registered", "null"), 34 35 ) 35 36 if err != nil { 36 37 return nil, err ··· 143 144 repos, err := db.GetRepos( 144 145 d, 145 146 0, 146 - db.FilterEq("did", record.RepoDid), 147 - db.FilterEq("name", record.RepoName), 147 + orm.FilterEq("did", record.RepoDid), 148 + orm.FilterEq("name", record.RepoName), 148 149 ) 149 150 if err != nil { 150 151 return fmt.Errorf("failed to look for repo in DB (%s/%s): %w", record.RepoDid, record.RepoName, err) ··· 209 210 repos, err := db.GetRepos( 210 211 d, 211 212 0, 212 - db.FilterEq("did", record.TriggerMetadata.Repo.Did), 213 - db.FilterEq("name", record.TriggerMetadata.Repo.Repo), 213 + orm.FilterEq("did", record.TriggerMetadata.Repo.Did), 214 + orm.FilterEq("name", record.TriggerMetadata.Repo.Repo), 214 215 ) 215 216 if err != nil { 216 217 return fmt.Errorf("failed to look for repo in DB: nsid %s, rkey %s, %w", msg.Nsid, msg.Rkey, err)
+30 -21
appview/state/profile.go
··· 19 19 "tangled.org/core/appview/db" 20 20 "tangled.org/core/appview/models" 21 21 "tangled.org/core/appview/pages" 22 + "tangled.org/core/orm" 22 23 ) 23 24 24 25 func (s *State) Profile(w http.ResponseWriter, r *http.Request) { ··· 56 57 return nil, fmt.Errorf("failed to get profile: %w", err) 57 58 } 58 59 59 - repoCount, err := db.CountRepos(s.db, db.FilterEq("did", did)) 60 + repoCount, err := db.CountRepos(s.db, orm.FilterEq("did", did)) 60 61 if err != nil { 61 62 return nil, fmt.Errorf("failed to get repo count: %w", err) 62 63 } 63 64 64 - stringCount, err := db.CountStrings(s.db, db.FilterEq("did", did)) 65 + stringCount, err := db.CountStrings(s.db, orm.FilterEq("did", did)) 65 66 if err != nil { 66 67 return nil, fmt.Errorf("failed to get string count: %w", err) 67 68 } 68 69 69 - starredCount, err := db.CountStars(s.db, db.FilterEq("starred_by_did", did)) 70 + starredCount, err := db.CountStars(s.db, orm.FilterEq("did", did)) 70 71 if err != nil { 71 72 return nil, fmt.Errorf("failed to get starred repo count: %w", err) 72 73 } ··· 86 87 startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC) 87 88 punchcard, err := db.MakePunchcard( 88 89 s.db, 89 - db.FilterEq("did", did), 90 - db.FilterGte("date", startOfYear.Format(time.DateOnly)), 91 - db.FilterLte("date", now.Format(time.DateOnly)), 90 + orm.FilterEq("did", did), 91 + orm.FilterGte("date", startOfYear.Format(time.DateOnly)), 92 + orm.FilterLte("date", now.Format(time.DateOnly)), 92 93 ) 93 94 if err != nil { 94 95 return nil, fmt.Errorf("failed to get punchcard for %s: %w", did, err) ··· 96 97 97 98 return &pages.ProfileCard{ 98 99 UserDid: did, 99 - UserHandle: ident.Handle.String(), 100 100 Profile: profile, 101 101 FollowStatus: followStatus, 102 102 Stats: pages.ProfileStats{ ··· 119 119 s.pages.Error500(w) 120 120 return 121 121 } 122 - l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle) 122 + l = l.With("profileDid", profile.UserDid) 123 123 124 124 repos, err := db.GetRepos( 125 125 s.db, 126 126 0, 127 - db.FilterEq("did", profile.UserDid), 127 + orm.FilterEq("did", profile.UserDid), 128 128 ) 129 129 if err != nil { 130 130 l.Error("failed to fetch repos", "err", err) ··· 162 162 l.Error("failed to create timeline", "err", err) 163 163 } 164 164 165 + // populate commit counts in the timeline, using the punchcard 166 + now := time.Now() 167 + for _, p := range profile.Punchcard.Punches { 168 + years := now.Year() - p.Date.Year() 169 + months := int(now.Month() - p.Date.Month()) 170 + monthsAgo := years*12 + months 171 + if monthsAgo >= 0 && monthsAgo < len(timeline.ByMonth) { 172 + timeline.ByMonth[monthsAgo].Commits += p.Count 173 + } 174 + } 175 + 165 176 s.pages.ProfileOverview(w, pages.ProfileOverviewParams{ 166 177 LoggedInUser: s.oauth.GetUser(r), 167 178 Card: profile, ··· 180 191 s.pages.Error500(w) 181 192 return 182 193 } 183 - l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle) 194 + l = l.With("profileDid", profile.UserDid) 184 195 185 196 repos, err := db.GetRepos( 186 197 s.db, 187 198 0, 188 - db.FilterEq("did", profile.UserDid), 199 + orm.FilterEq("did", profile.UserDid), 189 200 ) 190 201 if err != nil { 191 202 l.Error("failed to get repos", "err", err) ··· 209 220 s.pages.Error500(w) 210 221 return 211 222 } 212 - l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle) 223 + l = l.With("profileDid", profile.UserDid) 213 224 214 - stars, err := db.GetStars(s.db, 0, db.FilterEq("starred_by_did", profile.UserDid)) 225 + stars, err := db.GetRepoStars(s.db, 0, orm.FilterEq("did", profile.UserDid)) 215 226 if err != nil { 216 227 l.Error("failed to get stars", "err", err) 217 228 s.pages.Error500(w) ··· 219 230 } 220 231 var repos []models.Repo 221 232 for _, s := range stars { 222 - if s.Repo != nil { 223 - repos = append(repos, *s.Repo) 224 - } 233 + repos = append(repos, *s.Repo) 225 234 } 226 235 227 236 err = s.pages.ProfileStarred(w, pages.ProfileStarredParams{ ··· 240 249 s.pages.Error500(w) 241 250 return 242 251 } 243 - l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle) 252 + l = l.With("profileDid", profile.UserDid) 244 253 245 - strings, err := db.GetStrings(s.db, 0, db.FilterEq("did", profile.UserDid)) 254 + strings, err := db.GetStrings(s.db, 0, orm.FilterEq("did", profile.UserDid)) 246 255 if err != nil { 247 256 l.Error("failed to get strings", "err", err) 248 257 s.pages.Error500(w) ··· 272 281 if err != nil { 273 282 return nil, err 274 283 } 275 - l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle) 284 + l = l.With("profileDid", profile.UserDid) 276 285 277 286 loggedInUser := s.oauth.GetUser(r) 278 287 params := FollowsPageParams{ ··· 294 303 followDids = append(followDids, extractDid(follow)) 295 304 } 296 305 297 - profiles, err := db.GetProfiles(s.db, db.FilterIn("did", followDids)) 306 + profiles, err := db.GetProfiles(s.db, orm.FilterIn("did", followDids)) 298 307 if err != nil { 299 308 l.Error("failed to get profiles", "followDids", followDids, "err", err) 300 309 return &params, err ··· 697 706 log.Printf("getting profile data for %s: %s", user.Did, err) 698 707 } 699 708 700 - repos, err := db.GetRepos(s.db, 0, db.FilterEq("did", user.Did)) 709 + repos, err := db.GetRepos(s.db, 0, orm.FilterEq("did", user.Did)) 701 710 if err != nil { 702 711 log.Printf("getting repos for %s: %s", user.Did, err) 703 712 }
+22 -11
appview/state/router.go
··· 57 57 if userutil.IsFlattenedDid(firstPart) { 58 58 unflattenedDid := userutil.UnflattenDid(firstPart) 59 59 redirectPath := strings.Join(append([]string{unflattenedDid}, pathParts[1:]...), "/") 60 - http.Redirect(w, r, "/"+redirectPath, http.StatusFound) 60 + 61 + redirectURL := *r.URL 62 + redirectURL.Path = "/" + redirectPath 63 + 64 + http.Redirect(w, r, redirectURL.String(), http.StatusFound) 61 65 return 62 66 } 63 67 64 68 // if using a handle with @, rewrite to work without @ 65 69 if normalized := strings.TrimPrefix(firstPart, "@"); userutil.IsHandle(normalized) { 66 70 redirectPath := strings.Join(append([]string{normalized}, pathParts[1:]...), "/") 67 - http.Redirect(w, r, "/"+redirectPath, http.StatusFound) 71 + 72 + redirectURL := *r.URL 73 + redirectURL.Path = "/" + redirectPath 74 + 75 + http.Redirect(w, r, redirectURL.String(), http.StatusFound) 68 76 return 69 77 } 78 + 70 79 } 71 80 72 81 standardRouter.ServeHTTP(w, r) ··· 82 91 r.Get("/", s.Profile) 83 92 r.Get("/feed.atom", s.AtomFeedPage) 84 93 85 - // redirect /@handle/repo.git -> /@handle/repo 86 - r.Get("/{repo}.git", func(w http.ResponseWriter, r *http.Request) { 87 - nonDotGitPath := strings.TrimSuffix(r.URL.Path, ".git") 88 - http.Redirect(w, r, nonDotGitPath, http.StatusMovedPermanently) 89 - }) 90 - 91 94 r.With(mw.ResolveRepo()).Route("/{repo}", func(r chi.Router) { 92 95 r.Use(mw.GoImport()) 93 96 r.Mount("/", s.RepoRouter(mw)) ··· 98 101 99 102 // These routes get proxied to the knot 100 103 r.Get("/info/refs", s.InfoRefs) 104 + r.Post("/git-upload-archive", s.UploadArchive) 101 105 r.Post("/git-upload-pack", s.UploadPack) 102 106 r.Post("/git-receive-pack", s.ReceivePack) 103 107 ··· 105 109 }) 106 110 107 111 r.NotFound(func(w http.ResponseWriter, r *http.Request) { 112 + w.WriteHeader(http.StatusNotFound) 108 113 s.pages.Error404(w) 109 114 }) 110 115 ··· 136 141 // r.Post("/import", s.ImportRepo) 137 142 }) 138 143 139 - r.Get("/goodfirstissues", s.GoodFirstIssues) 144 + r.With(middleware.Paginate).Get("/goodfirstissues", s.GoodFirstIssues) 140 145 141 146 r.With(middleware.AuthMiddleware(s.oauth)).Route("/follow", func(r chi.Router) { 142 147 r.Post("/", s.Follow) ··· 163 168 164 169 r.Mount("/settings", s.SettingsRouter()) 165 170 r.Mount("/strings", s.StringsRouter(mw)) 166 - r.Mount("/knots", s.KnotsRouter()) 167 - r.Mount("/spindles", s.SpindlesRouter()) 171 + 172 + r.Mount("/settings/knots", s.KnotsRouter()) 173 + r.Mount("/settings/spindles", s.SpindlesRouter()) 174 + 168 175 r.Mount("/notifications", s.NotificationsRouter(mw)) 169 176 170 177 r.Mount("/signup", s.SignupRouter()) ··· 176 183 r.Get("/brand", s.Brand) 177 184 178 185 r.NotFound(func(w http.ResponseWriter, r *http.Request) { 186 + w.WriteHeader(http.StatusNotFound) 179 187 s.pages.Error404(w) 180 188 }) 181 189 return r ··· 258 266 issues := issues.New( 259 267 s.oauth, 260 268 s.repoResolver, 269 + s.enforcer, 261 270 s.pages, 262 271 s.idResolver, 272 + s.mentionsResolver, 263 273 s.db, 264 274 s.config, 265 275 s.notifier, ··· 276 286 s.repoResolver, 277 287 s.pages, 278 288 s.idResolver, 289 + s.mentionsResolver, 279 290 s.db, 280 291 s.config, 281 292 s.notifier,
+2 -1
appview/state/spindlestream.go
··· 17 17 ec "tangled.org/core/eventconsumer" 18 18 "tangled.org/core/eventconsumer/cursor" 19 19 "tangled.org/core/log" 20 + "tangled.org/core/orm" 20 21 "tangled.org/core/rbac" 21 22 spindle "tangled.org/core/spindle/models" 22 23 ) ··· 27 28 28 29 spindles, err := db.GetSpindles( 29 30 d, 30 - db.FilterIsNot("verified", "null"), 31 + orm.FilterIsNot("verified", "null"), 31 32 ) 32 33 if err != nil { 33 34 return nil, err
+9 -13
appview/state/star.go
··· 57 57 log.Println("created atproto record: ", resp.Uri) 58 58 59 59 star := &models.Star{ 60 - StarredByDid: currentUser.Did, 61 - RepoAt: subjectUri, 62 - Rkey: rkey, 60 + Did: currentUser.Did, 61 + RepoAt: subjectUri, 62 + Rkey: rkey, 63 63 } 64 64 65 65 err = db.AddStar(s.db, star) ··· 75 75 76 76 s.notifier.NewStar(r.Context(), star) 77 77 78 - s.pages.RepoStarFragment(w, pages.RepoStarFragmentParams{ 78 + s.pages.StarBtnFragment(w, pages.StarBtnFragmentParams{ 79 79 IsStarred: true, 80 - RepoAt: subjectUri, 81 - Stats: models.RepoStats{ 82 - StarCount: starCount, 83 - }, 80 + SubjectAt: subjectUri, 81 + StarCount: starCount, 84 82 }) 85 83 86 84 return ··· 117 115 118 116 s.notifier.DeleteStar(r.Context(), star) 119 117 120 - s.pages.RepoStarFragment(w, pages.RepoStarFragmentParams{ 118 + s.pages.StarBtnFragment(w, pages.StarBtnFragmentParams{ 121 119 IsStarred: false, 122 - RepoAt: subjectUri, 123 - Stats: models.RepoStats{ 124 - StarCount: starCount, 125 - }, 120 + SubjectAt: subjectUri, 121 + StarCount: starCount, 126 122 }) 127 123 128 124 return
+30 -24
appview/state/state.go
··· 15 15 "tangled.org/core/appview/config" 16 16 "tangled.org/core/appview/db" 17 17 "tangled.org/core/appview/indexer" 18 + "tangled.org/core/appview/mentions" 18 19 "tangled.org/core/appview/models" 19 20 "tangled.org/core/appview/notify" 20 21 dbnotify "tangled.org/core/appview/notify/db" ··· 29 30 "tangled.org/core/jetstream" 30 31 "tangled.org/core/log" 31 32 tlog "tangled.org/core/log" 33 + "tangled.org/core/orm" 32 34 "tangled.org/core/rbac" 33 35 "tangled.org/core/tid" 34 36 ··· 42 44 ) 43 45 44 46 type State struct { 45 - db *db.DB 46 - notifier notify.Notifier 47 - indexer *indexer.Indexer 48 - oauth *oauth.OAuth 49 - enforcer *rbac.Enforcer 50 - pages *pages.Pages 51 - idResolver *idresolver.Resolver 52 - posthog posthog.Client 53 - jc *jetstream.JetstreamClient 54 - config *config.Config 55 - repoResolver *reporesolver.RepoResolver 56 - knotstream *eventconsumer.Consumer 57 - spindlestream *eventconsumer.Consumer 58 - logger *slog.Logger 59 - validator *validator.Validator 47 + db *db.DB 48 + notifier notify.Notifier 49 + indexer *indexer.Indexer 50 + oauth *oauth.OAuth 51 + enforcer *rbac.Enforcer 52 + pages *pages.Pages 53 + idResolver *idresolver.Resolver 54 + mentionsResolver *mentions.Resolver 55 + posthog posthog.Client 56 + jc *jetstream.JetstreamClient 57 + config *config.Config 58 + repoResolver *reporesolver.RepoResolver 59 + knotstream *eventconsumer.Consumer 60 + spindlestream *eventconsumer.Consumer 61 + logger *slog.Logger 62 + validator *validator.Validator 60 63 } 61 64 62 65 func Make(ctx context.Context, config *config.Config) (*State, error) { ··· 96 99 } 97 100 validator := validator.New(d, res, enforcer) 98 101 99 - repoResolver := reporesolver.New(config, enforcer, res, d) 102 + repoResolver := reporesolver.New(config, enforcer, d) 103 + 104 + mentionsResolver := mentions.New(config, res, d, log.SubLogger(logger, "mentionsResolver")) 100 105 101 106 wrapper := db.DbWrapper{Execer: d} 102 107 jc, err := jetstream.NewJetstreamClient( ··· 178 183 enforcer, 179 184 pages, 180 185 res, 186 + mentionsResolver, 181 187 posthog, 182 188 jc, 183 189 config, ··· 294 300 return 295 301 } 296 302 297 - gfiLabel, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", s.config.Label.GoodFirstIssue)) 303 + gfiLabel, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", s.config.Label.GoodFirstIssue)) 298 304 if err != nil { 299 305 // non-fatal 300 306 } ··· 318 324 319 325 regs, err := db.GetRegistrations( 320 326 s.db, 321 - db.FilterEq("did", user.Did), 322 - db.FilterEq("needs_upgrade", 1), 327 + orm.FilterEq("did", user.Did), 328 + orm.FilterEq("needs_upgrade", 1), 323 329 ) 324 330 if err != nil { 325 331 l.Error("non-fatal: failed to get registrations", "err", err) ··· 327 333 328 334 spindles, err := db.GetSpindles( 329 335 s.db, 330 - db.FilterEq("owner", user.Did), 331 - db.FilterEq("needs_upgrade", 1), 336 + orm.FilterEq("owner", user.Did), 337 + orm.FilterEq("needs_upgrade", 1), 332 338 ) 333 339 if err != nil { 334 340 l.Error("non-fatal: failed to get spindles", "err", err) ··· 499 505 // Check for existing repos 500 506 existingRepo, err := db.GetRepo( 501 507 s.db, 502 - db.FilterEq("did", user.Did), 503 - db.FilterEq("name", repoName), 508 + orm.FilterEq("did", user.Did), 509 + orm.FilterEq("name", repoName), 504 510 ) 505 511 if err == nil && existingRepo != nil { 506 512 l.Info("repo exists") ··· 660 666 } 661 667 662 668 func BackfillDefaultDefs(e db.Execer, r *idresolver.Resolver, defaults []string) error { 663 - defaultLabels, err := db.GetLabelDefinitions(e, db.FilterIn("at_uri", defaults)) 669 + defaultLabels, err := db.GetLabelDefinitions(e, orm.FilterIn("at_uri", defaults)) 664 670 if err != nil { 665 671 return err 666 672 }
+21 -8
appview/strings/strings.go
··· 17 17 "tangled.org/core/appview/pages" 18 18 "tangled.org/core/appview/pages/markup" 19 19 "tangled.org/core/idresolver" 20 + "tangled.org/core/orm" 20 21 "tangled.org/core/tid" 21 22 22 23 "github.com/bluesky-social/indigo/api/atproto" ··· 108 109 strings, err := db.GetStrings( 109 110 s.Db, 110 111 0, 111 - db.FilterEq("did", id.DID), 112 - db.FilterEq("rkey", rkey), 112 + orm.FilterEq("did", id.DID), 113 + orm.FilterEq("rkey", rkey), 113 114 ) 114 115 if err != nil { 115 116 l.Error("failed to fetch string", "err", err) ··· 148 149 showRendered = r.URL.Query().Get("code") != "true" 149 150 } 150 151 152 + starCount, err := db.GetStarCount(s.Db, string.AtUri()) 153 + if err != nil { 154 + l.Error("failed to get star count", "err", err) 155 + } 156 + user := s.OAuth.GetUser(r) 157 + isStarred := false 158 + if user != nil { 159 + isStarred = db.GetStarStatus(s.Db, user.Did, string.AtUri()) 160 + } 161 + 151 162 s.Pages.SingleString(w, pages.SingleStringParams{ 152 - LoggedInUser: s.OAuth.GetUser(r), 163 + LoggedInUser: user, 153 164 RenderToggle: renderToggle, 154 165 ShowRendered: showRendered, 155 - String: string, 166 + String: &string, 156 167 Stats: string.Stats(), 168 + IsStarred: isStarred, 169 + StarCount: starCount, 157 170 Owner: id, 158 171 }) 159 172 } ··· 187 200 all, err := db.GetStrings( 188 201 s.Db, 189 202 0, 190 - db.FilterEq("did", id.DID), 191 - db.FilterEq("rkey", rkey), 203 + orm.FilterEq("did", id.DID), 204 + orm.FilterEq("rkey", rkey), 192 205 ) 193 206 if err != nil { 194 207 l.Error("failed to fetch string", "err", err) ··· 396 409 397 410 if err := db.DeleteString( 398 411 s.Db, 399 - db.FilterEq("did", user.Did), 400 - db.FilterEq("rkey", rkey), 412 + orm.FilterEq("did", user.Did), 413 + orm.FilterEq("rkey", rkey), 401 414 ); err != nil { 402 415 fail("Failed to delete string.", err) 403 416 return
+2 -1
appview/validator/issue.go
··· 6 6 7 7 "tangled.org/core/appview/db" 8 8 "tangled.org/core/appview/models" 9 + "tangled.org/core/orm" 9 10 ) 10 11 11 12 func (v *Validator) ValidateIssueComment(comment *models.IssueComment) error { 12 13 // if comments have parents, only ingest ones that are 1 level deep 13 14 if comment.ReplyTo != nil { 14 - parents, err := db.GetIssueComments(v.db, db.FilterEq("at_uri", *comment.ReplyTo)) 15 + parents, err := db.GetIssueComments(v.db, orm.FilterEq("at_uri", *comment.ReplyTo)) 15 16 if err != nil { 16 17 return fmt.Errorf("failed to fetch parent comment: %w", err) 17 18 }
+1 -34
crypto/verify.go
··· 5 5 "crypto/sha256" 6 6 "encoding/base64" 7 7 "fmt" 8 - "strings" 9 8 10 9 "github.com/hiddeco/sshsig" 11 10 "golang.org/x/crypto/ssh" 12 - "tangled.org/core/types" 13 11 ) 14 12 15 13 func VerifySignature(pubKey, signature, payload []byte) (error, bool) { ··· 28 26 // multiple algorithms but sha-512 is most secure, and git's ssh signing defaults 29 27 // to sha-512 for all key types anyway. 30 28 err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git") 31 - return err, err == nil 32 - } 33 29 34 - // VerifyCommitSignature reconstructs the payload used to sign a commit. This is 35 - // essentially the git cat-file output but without the gpgsig header. 36 - // 37 - // Caveats: signature verification will fail on commits with more than one parent, 38 - // i.e. merge commits, because types.NiceDiff doesn't carry more than one Parent field 39 - // and we are unable to reconstruct the payload correctly. 40 - // 41 - // Ideally this should directly operate on an *object.Commit. 42 - func VerifyCommitSignature(pubKey string, commit types.NiceDiff) (error, bool) { 43 - signature := commit.Commit.PGPSignature 44 - 45 - author := bytes.NewBuffer([]byte{}) 46 - committer := bytes.NewBuffer([]byte{}) 47 - commit.Commit.Author.Encode(author) 48 - commit.Commit.Committer.Encode(committer) 49 - 50 - payload := strings.Builder{} 51 - 52 - fmt.Fprintf(&payload, "tree %s\n", commit.Commit.Tree) 53 - if commit.Commit.Parent != "" { 54 - fmt.Fprintf(&payload, "parent %s\n", commit.Commit.Parent) 55 - } 56 - fmt.Fprintf(&payload, "author %s\n", author.String()) 57 - fmt.Fprintf(&payload, "committer %s\n", committer.String()) 58 - if commit.Commit.ChangedId != "" { 59 - fmt.Fprintf(&payload, "change-id %s\n", commit.Commit.ChangedId) 60 - } 61 - fmt.Fprintf(&payload, "\n%s", commit.Commit.Message) 62 - 63 - return VerifySignature([]byte(pubKey), []byte(signature), []byte(payload.String())) 30 + return err, err == nil 64 31 } 65 32 66 33 // SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
+1529
docs/DOCS.md
··· 1 + --- 2 + title: Tangled docs 3 + author: The Tangled Contributors 4 + date: 21 Sun, Dec 2025 5 + --- 6 + 7 + # Introduction 8 + 9 + Tangled is a decentralized code hosting and collaboration 10 + platform. Every component of Tangled is open-source and 11 + self-hostable. [tangled.org](https://tangled.org) also 12 + provides hosting and CI services that are free to use. 13 + 14 + There are several models for decentralized code 15 + collaboration platforms, ranging from ActivityPub’s 16 + (Forgejo) federated model, to Radicle’s entirely P2P model. 17 + Our approach attempts to be the best of both worlds by 18 + adopting the AT Protocol—a protocol for building decentralized 19 + social applications with a central identity 20 + 21 + Our approach to this is the idea of “knots”. Knots are 22 + lightweight, headless servers that enable users to host Git 23 + repositories with ease. Knots are designed for either single 24 + or multi-tenant use which is perfect for self-hosting on a 25 + Raspberry Pi at home, or larger “community” servers. By 26 + default, Tangled provides managed knots where you can host 27 + your repositories for free. 28 + 29 + The appview at tangled.org acts as a consolidated "view" 30 + into the whole network, allowing users to access, clone and 31 + contribute to repositories hosted across different knots 32 + seamlessly. 33 + 34 + # Quick start guide 35 + 36 + ## Login or sign up 37 + 38 + You can [login](https://tangled.org) by using your AT Protocol 39 + account. If you are unclear on what that means, simply head 40 + to the [signup](https://tangled.org/signup) page and create 41 + an account. By doing so, you will be choosing Tangled as 42 + your account provider (you will be granted a handle of the 43 + form `user.tngl.sh`). 44 + 45 + In the AT Protocol network, users are free to choose their account 46 + provider (known as a "Personal Data Service", or PDS), and 47 + login to applications that support AT accounts. 48 + 49 + You can think of it as "one account for all of the atmosphere"! 50 + 51 + If you already have an AT account (you may have one if you 52 + signed up to Bluesky, for example), you can login with the 53 + same handle on Tangled (so just use `user.bsky.social` on 54 + the login page). 55 + 56 + ## Add an SSH key 57 + 58 + Once you are logged in, you can start creating repositories 59 + and pushing code. Tangled supports pushing git repositories 60 + over SSH. 61 + 62 + First, you'll need to generate an SSH key if you don't 63 + already have one: 64 + 65 + ```bash 66 + ssh-keygen -t ed25519 -C "foo@bar.com" 67 + ``` 68 + 69 + When prompted, save the key to the default location 70 + (`~/.ssh/id_ed25519`) and optionally set a passphrase. 71 + 72 + Copy your public key to your clipboard: 73 + 74 + ```bash 75 + # on X11 76 + cat ~/.ssh/id_ed25519.pub | xclip -sel c 77 + 78 + # on wayland 79 + cat ~/.ssh/id_ed25519.pub | wl-copy 80 + 81 + # on macos 82 + cat ~/.ssh/id_ed25519.pub | pbcopy 83 + ``` 84 + 85 + Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key', 86 + paste your public key, give it a descriptive name, and hit 87 + save. 88 + 89 + ## Create a repository 90 + 91 + Once your SSH key is added, create your first repository: 92 + 93 + 1. Hit the green `+` icon on the topbar, and select 94 + repository 95 + 2. Enter a repository name 96 + 3. Add a description 97 + 4. Choose a knotserver to host this repository on 98 + 5. Hit create 99 + 100 + Knots are self-hostable, lightweight Git servers that can 101 + host your repository. Unlike traditional code forges, your 102 + code can live on any server. Read the [Knots](TODO) section 103 + for more. 104 + 105 + ## Configure SSH 106 + 107 + To ensure Git uses the correct SSH key and connects smoothly 108 + to Tangled, add this configuration to your `~/.ssh/config` 109 + file: 110 + 111 + ``` 112 + Host tangled.org 113 + Hostname tangled.org 114 + User git 115 + IdentityFile ~/.ssh/id_ed25519 116 + AddressFamily inet 117 + ``` 118 + 119 + This tells SSH to use your specific key when connecting to 120 + Tangled and prevents authentication issues if you have 121 + multiple SSH keys. 122 + 123 + Note that this configuration only works for knotservers that 124 + are hosted by tangled.org. If you use a custom knot, refer 125 + to the [Knots](TODO) section. 126 + 127 + ## Push your first repository 128 + 129 + Initialize a new Git repository: 130 + 131 + ```bash 132 + mkdir my-project 133 + cd my-project 134 + 135 + git init 136 + echo "# My Project" > README.md 137 + ``` 138 + 139 + Add some content and push! 140 + 141 + ```bash 142 + git add README.md 143 + git commit -m "Initial commit" 144 + git remote add origin git@tangled.org:user.tngl.sh/my-project 145 + git push -u origin main 146 + ``` 147 + 148 + That's it! Your code is now hosted on Tangled. 149 + 150 + ## Migrating an existing repository 151 + 152 + Moving your repositories from GitHub, GitLab, Bitbucket, or 153 + any other Git forge to Tangled is straightforward. You'll 154 + simply change your repository's remote URL. At the moment, 155 + Tangled does not have any tooling to migrate data such as 156 + GitHub issues or pull requests. 157 + 158 + First, create a new repository on tangled.org as described 159 + in the [Quick Start Guide](#create-a-repository). 160 + 161 + Navigate to your existing local repository: 162 + 163 + ```bash 164 + cd /path/to/your/existing/repo 165 + ``` 166 + 167 + You can inspect your existing Git remote like so: 168 + 169 + ```bash 170 + git remote -v 171 + ``` 172 + 173 + You'll see something like: 174 + 175 + ``` 176 + origin git@github.com:username/my-project (fetch) 177 + origin git@github.com:username/my-project (push) 178 + ``` 179 + 180 + Update the remote URL to point to tangled: 181 + 182 + ```bash 183 + git remote set-url origin git@tangled.org:user.tngl.sh/my-project 184 + ``` 185 + 186 + Verify the change: 187 + 188 + ```bash 189 + git remote -v 190 + ``` 191 + 192 + You should now see: 193 + 194 + ``` 195 + origin git@tangled.org:user.tngl.sh/my-project (fetch) 196 + origin git@tangled.org:user.tngl.sh/my-project (push) 197 + ``` 198 + 199 + Push all your branches and tags to Tangled: 200 + 201 + ```bash 202 + git push -u origin --all 203 + git push -u origin --tags 204 + ``` 205 + 206 + Your repository is now migrated to Tangled! All commit 207 + history, branches, and tags have been preserved. 208 + 209 + ## Mirroring a repository to Tangled 210 + 211 + If you want to maintain your repository on multiple forges 212 + simultaneously, for example, keeping your primary repository 213 + on GitHub while mirroring to Tangled for backup or 214 + redundancy, you can do so by adding multiple remotes. 215 + 216 + You can configure your local repository to push to both 217 + Tangled and, say, GitHub. You may already have the following 218 + setup: 219 + 220 + ``` 221 + $ git remote -v 222 + origin git@github.com:username/my-project (fetch) 223 + origin git@github.com:username/my-project (push) 224 + ``` 225 + 226 + Now add Tangled as an additional push URL to the same 227 + remote: 228 + 229 + ```bash 230 + git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project 231 + ``` 232 + 233 + You also need to re-add the original URL as a push 234 + destination (Git replaces the push URL when you use `--add` 235 + the first time): 236 + 237 + ```bash 238 + git remote set-url --add --push origin git@github.com:username/my-project 239 + ``` 240 + 241 + Verify your configuration: 242 + 243 + ``` 244 + $ git remote -v 245 + origin git@github.com:username/repo (fetch) 246 + origin git@tangled.org:username/my-project (push) 247 + origin git@github.com:username/repo (push) 248 + ``` 249 + 250 + Notice that there's one fetch URL (the primary remote) and 251 + two push URLs. Now, whenever you push, Git will 252 + automatically push to both remotes: 253 + 254 + ```bash 255 + git push origin main 256 + ``` 257 + 258 + This single command pushes your `main` branch to both GitHub 259 + and Tangled simultaneously. 260 + 261 + To push all branches and tags: 262 + 263 + ```bash 264 + git push origin --all 265 + git push origin --tags 266 + ``` 267 + 268 + If you prefer more control over which remote you push to, 269 + you can maintain separate remotes: 270 + 271 + ```bash 272 + git remote add github git@github.com:username/my-project 273 + git remote add tangled git@tangled.org:username/my-project 274 + ``` 275 + 276 + Then push to each explicitly: 277 + 278 + ```bash 279 + git push github main 280 + git push tangled main 281 + ``` 282 + 283 + # Knot self-hosting guide 284 + 285 + So you want to run your own knot server? Great! Here are a few prerequisites: 286 + 287 + 1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind. 288 + 2. A (sub)domain name. People generally use `knot.example.com`. 289 + 3. A valid SSL certificate for your domain. 290 + 291 + ## NixOS 292 + 293 + Refer to the [knot 294 + module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix) 295 + for a full list of options. Sample configurations: 296 + 297 + - [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85) 298 + - [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25) 299 + 300 + ## Docker 301 + 302 + Refer to 303 + [@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker). 304 + Note that this is community maintained. 305 + 306 + ## Manual setup 307 + 308 + First, clone this repository: 309 + 310 + ``` 311 + git clone https://tangled.org/@tangled.org/core 312 + ``` 313 + 314 + Then, build the `knot` CLI. This is the knot administration 315 + and operation tool. For the purpose of this guide, we're 316 + only concerned with these subcommands: 317 + 318 + * `knot server`: the main knot server process, typically 319 + run as a supervised service 320 + * `knot guard`: handles role-based access control for git 321 + over SSH (you'll never have to run this yourself) 322 + * `knot keys`: fetches SSH keys associated with your knot; 323 + we'll use this to generate the SSH 324 + `AuthorizedKeysCommand` 325 + 326 + ``` 327 + cd core 328 + export CGO_ENABLED=1 329 + go build -o knot ./cmd/knot 330 + ``` 331 + 332 + Next, move the `knot` binary to a location owned by `root` -- 333 + `/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`: 334 + 335 + ``` 336 + sudo mv knot /usr/local/bin/knot 337 + sudo chown root:root /usr/local/bin/knot 338 + ``` 339 + 340 + This is necessary because SSH `AuthorizedKeysCommand` requires [really 341 + specific permissions](https://stackoverflow.com/a/27638306). The 342 + `AuthorizedKeysCommand` specifies a command that is run by `sshd` to 343 + retrieve a user's public SSH keys dynamically for authentication. Let's 344 + set that up. 345 + 346 + ``` 347 + sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 348 + Match User git 349 + AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys 350 + AuthorizedKeysCommandUser nobody 351 + EOF 352 + ``` 353 + 354 + Then, reload `sshd`: 355 + 356 + ``` 357 + sudo systemctl reload ssh 358 + ``` 359 + 360 + Next, create the `git` user. We'll use the `git` user's home directory 361 + to store repositories: 362 + 363 + ``` 364 + sudo adduser git 365 + ``` 366 + 367 + Create `/home/git/.knot.env` with the following, updating the values as 368 + necessary. The `KNOT_SERVER_OWNER` should be set to your 369 + DID, you can find your DID in the [Settings](https://tangled.sh/settings) page. 370 + 371 + ``` 372 + KNOT_REPO_SCAN_PATH=/home/git 373 + KNOT_SERVER_HOSTNAME=knot.example.com 374 + APPVIEW_ENDPOINT=https://tangled.org 375 + KNOT_SERVER_OWNER=did:plc:foobar 376 + KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444 377 + KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555 378 + ``` 379 + 380 + If you run a Linux distribution that uses systemd, you can use the provided 381 + service file to run the server. Copy 382 + [`knotserver.service`](/systemd/knotserver.service) 383 + to `/etc/systemd/system/`. Then, run: 384 + 385 + ``` 386 + systemctl enable knotserver 387 + systemctl start knotserver 388 + ``` 389 + 390 + The last step is to configure a reverse proxy like Nginx or Caddy to front your 391 + knot. Here's an example configuration for Nginx: 392 + 393 + ``` 394 + server { 395 + listen 80; 396 + listen [::]:80; 397 + server_name knot.example.com; 398 + 399 + location / { 400 + proxy_pass http://localhost:5555; 401 + proxy_set_header Host $host; 402 + proxy_set_header X-Real-IP $remote_addr; 403 + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 404 + proxy_set_header X-Forwarded-Proto $scheme; 405 + } 406 + 407 + # wss endpoint for git events 408 + location /events { 409 + proxy_set_header X-Forwarded-For $remote_addr; 410 + proxy_set_header Host $http_host; 411 + proxy_set_header Upgrade websocket; 412 + proxy_set_header Connection Upgrade; 413 + proxy_pass http://localhost:5555; 414 + } 415 + # additional config for SSL/TLS go here. 416 + } 417 + 418 + ``` 419 + 420 + Remember to use Let's Encrypt or similar to procure a certificate for your 421 + knot domain. 422 + 423 + You should now have a running knot server! You can finalize 424 + your registration by hitting the `verify` button on the 425 + [/settings/knots](https://tangled.org/settings/knots) page. This simply creates 426 + a record on your PDS to announce the existence of the knot. 427 + 428 + ### Custom paths 429 + 430 + (This section applies to manual setup only. Docker users should edit the mounts 431 + in `docker-compose.yml` instead.) 432 + 433 + Right now, the database and repositories of your knot lives in `/home/git`. You 434 + can move these paths if you'd like to store them in another folder. Be careful 435 + when adjusting these paths: 436 + 437 + * Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent 438 + any possible side effects. Remember to restart it once you're done. 439 + * Make backups before moving in case something goes wrong. 440 + * Make sure the `git` user can read and write from the new paths. 441 + 442 + #### Database 443 + 444 + As an example, let's say the current database is at `/home/git/knotserver.db`, 445 + and we want to move it to `/home/git/database/knotserver.db`. 446 + 447 + Copy the current database to the new location. Make sure to copy the `.db-shm` 448 + and `.db-wal` files if they exist. 449 + 450 + ``` 451 + mkdir /home/git/database 452 + cp /home/git/knotserver.db* /home/git/database 453 + ``` 454 + 455 + In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to 456 + the new file path (_not_ the directory): 457 + 458 + ``` 459 + KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db 460 + ``` 461 + 462 + #### Repositories 463 + 464 + As an example, let's say the repositories are currently in `/home/git`, and we 465 + want to move them into `/home/git/repositories`. 466 + 467 + Create the new folder, then move the existing repositories (if there are any): 468 + 469 + ``` 470 + mkdir /home/git/repositories 471 + # move all DIDs into the new folder; these will vary for you! 472 + mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories 473 + ``` 474 + 475 + In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH` 476 + to the new directory: 477 + 478 + ``` 479 + KNOT_REPO_SCAN_PATH=/home/git/repositories 480 + ``` 481 + 482 + Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated 483 + repository path: 484 + 485 + ``` 486 + sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 487 + Match User git 488 + AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories 489 + AuthorizedKeysCommandUser nobody 490 + EOF 491 + ``` 492 + 493 + Make sure to restart your SSH server! 494 + 495 + #### MOTD (message of the day) 496 + 497 + To configure the MOTD used ("Welcome to this knot!" by default), edit the 498 + `/home/git/motd` file: 499 + 500 + ``` 501 + printf "Hi from this knot!\n" > /home/git/motd 502 + ``` 503 + 504 + Note that you should add a newline at the end if setting a non-empty message 505 + since the knot won't do this for you. 506 + 507 + # Spindles 508 + 509 + ## Pipelines 510 + 511 + Spindle workflows allow you to write CI/CD pipelines in a 512 + simple format. They're located in the `.tangled/workflows` 513 + directory at the root of your repository, and are defined 514 + using YAML. 515 + 516 + The fields are: 517 + 518 + - [Trigger](#trigger): A **required** field that defines 519 + when a workflow should be triggered. 520 + - [Engine](#engine): A **required** field that defines which 521 + engine a workflow should run on. 522 + - [Clone options](#clone-options): An **optional** field 523 + that defines how the repository should be cloned. 524 + - [Dependencies](#dependencies): An **optional** field that 525 + allows you to list dependencies you may need. 526 + - [Environment](#environment): An **optional** field that 527 + allows you to define environment variables. 528 + - [Steps](#steps): An **optional** field that allows you to 529 + define what steps should run in the workflow. 530 + 531 + ### Trigger 532 + 533 + The first thing to add to a workflow is the trigger, which 534 + defines when a workflow runs. This is defined using a `when` 535 + field, which takes in a list of conditions. Each condition 536 + has the following fields: 537 + 538 + - `event`: This is a **required** field that defines when 539 + your workflow should run. It's a list that can take one or 540 + more of the following values: 541 + - `push`: The workflow should run every time a commit is 542 + pushed to the repository. 543 + - `pull_request`: The workflow should run every time a 544 + pull request is made or updated. 545 + - `manual`: The workflow can be triggered manually. 546 + - `branch`: Defines which branches the workflow should run 547 + for. If used with the `push` event, commits to the 548 + branch(es) listed here will trigger the workflow. If used 549 + with the `pull_request` event, updates to pull requests 550 + targeting the branch(es) listed here will trigger the 551 + workflow. This field has no effect with the `manual` 552 + event. Supports glob patterns using `*` and `**` (e.g., 553 + `main`, `develop`, `release-*`). Either `branch` or `tag` 554 + (or both) must be specified for `push` events. 555 + - `tag`: Defines which tags the workflow should run for. 556 + Only used with the `push` event - when tags matching the 557 + pattern(s) listed here are pushed, the workflow will 558 + trigger. This field has no effect with `pull_request` or 559 + `manual` events. Supports glob patterns using `*` and `**` 560 + (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or 561 + `tag` (or both) must be specified for `push` events. 562 + 563 + For example, if you'd like to define a workflow that runs 564 + when commits are pushed to the `main` and `develop` 565 + branches, or when pull requests that target the `main` 566 + branch are updated, or manually, you can do so with: 567 + 568 + ```yaml 569 + when: 570 + - event: ["push", "manual"] 571 + branch: ["main", "develop"] 572 + - event: ["pull_request"] 573 + branch: ["main"] 574 + ``` 575 + 576 + You can also trigger workflows on tag pushes. For instance, 577 + to run a deployment workflow when tags matching `v*` are 578 + pushed: 579 + 580 + ```yaml 581 + when: 582 + - event: ["push"] 583 + tag: ["v*"] 584 + ``` 585 + 586 + You can even combine branch and tag patterns in a single 587 + constraint (the workflow triggers if either matches): 588 + 589 + ```yaml 590 + when: 591 + - event: ["push"] 592 + branch: ["main", "release-*"] 593 + tag: ["v*", "stable"] 594 + ``` 595 + 596 + ### Engine 597 + 598 + Next is the engine on which the workflow should run, defined 599 + using the **required** `engine` field. The currently 600 + supported engines are: 601 + 602 + - `nixery`: This uses an instance of 603 + [Nixery](https://nixery.dev) to run steps, which allows 604 + you to add [dependencies](#dependencies) from 605 + Nixpkgs (https://github.com/NixOS/nixpkgs). You can 606 + search for packages on https://search.nixos.org, and 607 + there's a pretty good chance the package(s) you're looking 608 + for will be there. 609 + 610 + Example: 611 + 612 + ```yaml 613 + engine: "nixery" 614 + ``` 615 + 616 + ### Clone options 617 + 618 + When a workflow starts, the first step is to clone the 619 + repository. You can customize this behavior using the 620 + **optional** `clone` field. It has the following fields: 621 + 622 + - `skip`: Setting this to `true` will skip cloning the 623 + repository. This can be useful if your workflow is doing 624 + something that doesn't require anything from the 625 + repository itself. This is `false` by default. 626 + - `depth`: This sets the number of commits, or the "clone 627 + depth", to fetch from the repository. For example, if you 628 + set this to 2, the last 2 commits will be fetched. By 629 + default, the depth is set to 1, meaning only the most 630 + recent commit will be fetched, which is the commit that 631 + triggered the workflow. 632 + - `submodules`: If you use Git submodules 633 + (https://git-scm.com/book/en/v2/Git-Tools-Submodules) 634 + in your repository, setting this field to `true` will 635 + recursively fetch all submodules. This is `false` by 636 + default. 637 + 638 + The default settings are: 639 + 640 + ```yaml 641 + clone: 642 + skip: false 643 + depth: 1 644 + submodules: false 645 + ``` 646 + 647 + ### Dependencies 648 + 649 + Usually when you're running a workflow, you'll need 650 + additional dependencies. The `dependencies` field lets you 651 + define which dependencies to get, and from where. It's a 652 + key-value map, with the key being the registry to fetch 653 + dependencies from, and the value being the list of 654 + dependencies to fetch. 655 + 656 + Say you want to fetch Node.js and Go from `nixpkgs`, and a 657 + package called `my_pkg` you've made from your own registry 658 + at your repository at 659 + `https://tangled.org/@example.com/my_pkg`. You can define 660 + those dependencies like so: 661 + 662 + ```yaml 663 + dependencies: 664 + # nixpkgs 665 + nixpkgs: 666 + - nodejs 667 + - go 668 + # custom registry 669 + git+https://tangled.org/@example.com/my_pkg: 670 + - my_pkg 671 + ``` 672 + 673 + Now these dependencies are available to use in your 674 + workflow! 675 + 676 + ### Environment 677 + 678 + The `environment` field allows you define environment 679 + variables that will be available throughout the entire 680 + workflow. **Do not put secrets here, these environment 681 + variables are visible to anyone viewing the repository. You 682 + can add secrets for pipelines in your repository's 683 + settings.** 684 + 685 + Example: 686 + 687 + ```yaml 688 + environment: 689 + GOOS: "linux" 690 + GOARCH: "arm64" 691 + NODE_ENV: "production" 692 + MY_ENV_VAR: "MY_ENV_VALUE" 693 + ``` 694 + 695 + ### Steps 696 + 697 + The `steps` field allows you to define what steps should run 698 + in the workflow. It's a list of step objects, each with the 699 + following fields: 700 + 701 + - `name`: This field allows you to give your step a name. 702 + This name is visible in your workflow runs, and is used to 703 + describe what the step is doing. 704 + - `command`: This field allows you to define a command to 705 + run in that step. The step is run in a Bash shell, and the 706 + logs from the command will be visible in the pipelines 707 + page on the Tangled website. The 708 + [dependencies](#dependencies) you added will be available 709 + to use here. 710 + - `environment`: Similar to the global 711 + [environment](#environment) config, this **optional** 712 + field is a key-value map that allows you to set 713 + environment variables for the step. **Do not put secrets 714 + here, these environment variables are visible to anyone 715 + viewing the repository. You can add secrets for pipelines 716 + in your repository's settings.** 717 + 718 + Example: 719 + 720 + ```yaml 721 + steps: 722 + - name: "Build backend" 723 + command: "go build" 724 + environment: 725 + GOOS: "darwin" 726 + GOARCH: "arm64" 727 + - name: "Build frontend" 728 + command: "npm run build" 729 + environment: 730 + NODE_ENV: "production" 731 + ``` 732 + 733 + ### Complete workflow 734 + 735 + ```yaml 736 + # .tangled/workflows/build.yml 737 + 738 + when: 739 + - event: ["push", "manual"] 740 + branch: ["main", "develop"] 741 + - event: ["pull_request"] 742 + branch: ["main"] 743 + 744 + engine: "nixery" 745 + 746 + # using the default values 747 + clone: 748 + skip: false 749 + depth: 1 750 + submodules: false 751 + 752 + dependencies: 753 + # nixpkgs 754 + nixpkgs: 755 + - nodejs 756 + - go 757 + # custom registry 758 + git+https://tangled.org/@example.com/my_pkg: 759 + - my_pkg 760 + 761 + environment: 762 + GOOS: "linux" 763 + GOARCH: "arm64" 764 + NODE_ENV: "production" 765 + MY_ENV_VAR: "MY_ENV_VALUE" 766 + 767 + steps: 768 + - name: "Build backend" 769 + command: "go build" 770 + environment: 771 + GOOS: "darwin" 772 + GOARCH: "arm64" 773 + - name: "Build frontend" 774 + command: "npm run build" 775 + environment: 776 + NODE_ENV: "production" 777 + ``` 778 + 779 + If you want another example of a workflow, you can look at 780 + the one [Tangled uses to build the 781 + project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml). 782 + 783 + ## Self-hosting guide 784 + 785 + ### Prerequisites 786 + 787 + * Go 788 + * Docker (the only supported backend currently) 789 + 790 + ### Configuration 791 + 792 + Spindle is configured using environment variables. The following environment variables are available: 793 + 794 + * `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`). 795 + * `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`). 796 + * `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required). 797 + * `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`). 798 + * `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`). 799 + * `SPINDLE_SERVER_OWNER`: The DID of the owner (required). 800 + * `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`). 801 + * `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`). 802 + * `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`). 803 + 804 + ### Running spindle 805 + 806 + 1. **Set the environment variables.** For example: 807 + 808 + ```shell 809 + export SPINDLE_SERVER_HOSTNAME="your-hostname" 810 + export SPINDLE_SERVER_OWNER="your-did" 811 + ``` 812 + 813 + 2. **Build the Spindle binary.** 814 + 815 + ```shell 816 + cd core 817 + go mod download 818 + go build -o cmd/spindle/spindle cmd/spindle/main.go 819 + ``` 820 + 821 + 3. **Create the log directory.** 822 + 823 + ```shell 824 + sudo mkdir -p /var/log/spindle 825 + sudo chown $USER:$USER -R /var/log/spindle 826 + ``` 827 + 828 + 4. **Run the Spindle binary.** 829 + 830 + ```shell 831 + ./cmd/spindle/spindle 832 + ``` 833 + 834 + Spindle will now start, connect to the Jetstream server, and begin processing pipelines. 835 + 836 + ## Architecture 837 + 838 + Spindle is a small CI runner service. Here's a high-level overview of how it operates: 839 + 840 + * Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and 841 + [`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream. 842 + * When a new repo record comes through (typically when you add a spindle to a 843 + repo from the settings), spindle then resolves the underlying knot and 844 + subscribes to repo events (see: 845 + [`sh.tangled.pipeline`](/lexicons/pipeline.json)). 846 + * The spindle engine then handles execution of the pipeline, with results and 847 + logs beamed on the spindle event stream over WebSocket 848 + 849 + ### The engine 850 + 851 + At present, the only supported backend is Docker (and Podman, if Docker 852 + compatibility is enabled, so that `/run/docker.sock` is created). spindle 853 + executes each step in the pipeline in a fresh container, with state persisted 854 + across steps within the `/tangled/workspace` directory. 855 + 856 + The base image for the container is constructed on the fly using 857 + [Nixery](https://nixery.dev), which is handy for caching layers for frequently 858 + used packages. 859 + 860 + The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines). 861 + 862 + ## Secrets with openbao 863 + 864 + This document covers setting up spindle to use OpenBao for secrets 865 + management via OpenBao Proxy instead of the default SQLite backend. 866 + 867 + ### Overview 868 + 869 + Spindle now uses OpenBao Proxy for secrets management. The proxy handles 870 + authentication automatically using AppRole credentials, while spindle 871 + connects to the local proxy instead of directly to the OpenBao server. 872 + 873 + This approach provides better security, automatic token renewal, and 874 + simplified application code. 875 + 876 + ### Installation 877 + 878 + Install OpenBao from Nixpkgs: 879 + 880 + ```bash 881 + nix shell nixpkgs#openbao # for a local server 882 + ``` 883 + 884 + ### Setup 885 + 886 + The setup process can is documented for both local development and production. 887 + 888 + #### Local development 889 + 890 + Start OpenBao in dev mode: 891 + 892 + ```bash 893 + bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201 894 + ``` 895 + 896 + This starts OpenBao on `http://localhost:8201` with a root token. 897 + 898 + Set up environment for bao CLI: 899 + 900 + ```bash 901 + export BAO_ADDR=http://localhost:8200 902 + export BAO_TOKEN=root 903 + ``` 904 + 905 + #### Production 906 + 907 + You would typically use a systemd service with a 908 + configuration file. Refer to 909 + [@tangled.org/infra](https://tangled.org/@tangled.org/infra) 910 + for how this can be achieved using Nix. 911 + 912 + Then, initialize the bao server: 913 + 914 + ```bash 915 + bao operator init -key-shares=1 -key-threshold=1 916 + ``` 917 + 918 + This will print out an unseal key and a root key. Save them 919 + somewhere (like a password manager). Then unseal the vault 920 + to begin setting it up: 921 + 922 + ```bash 923 + bao operator unseal <unseal_key> 924 + ``` 925 + 926 + All steps below remain the same across both dev and 927 + production setups. 928 + 929 + #### Configure openbao server 930 + 931 + Create the spindle KV mount: 932 + 933 + ```bash 934 + bao secrets enable -path=spindle -version=2 kv 935 + ``` 936 + 937 + Set up AppRole authentication and policy: 938 + 939 + Create a policy file `spindle-policy.hcl`: 940 + 941 + ```hcl 942 + # Full access to spindle KV v2 data 943 + path "spindle/data/*" { 944 + capabilities = ["create", "read", "update", "delete"] 945 + } 946 + 947 + # Access to metadata for listing and management 948 + path "spindle/metadata/*" { 949 + capabilities = ["list", "read", "delete", "update"] 950 + } 951 + 952 + # Allow listing at root level 953 + path "spindle/" { 954 + capabilities = ["list"] 955 + } 956 + 957 + # Required for connection testing and health checks 958 + path "auth/token/lookup-self" { 959 + capabilities = ["read"] 960 + } 961 + ``` 962 + 963 + Apply the policy and create an AppRole: 964 + 965 + ```bash 966 + bao policy write spindle-policy spindle-policy.hcl 967 + bao auth enable approle 968 + bao write auth/approle/role/spindle \ 969 + token_policies="spindle-policy" \ 970 + token_ttl=1h \ 971 + token_max_ttl=4h \ 972 + bind_secret_id=true \ 973 + secret_id_ttl=0 \ 974 + secret_id_num_uses=0 975 + ``` 976 + 977 + Get the credentials: 978 + 979 + ```bash 980 + # Get role ID (static) 981 + ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id) 982 + 983 + # Generate secret ID 984 + SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id) 985 + 986 + echo "Role ID: $ROLE_ID" 987 + echo "Secret ID: $SECRET_ID" 988 + ``` 989 + 990 + #### Create proxy configuration 991 + 992 + Create the credential files: 993 + 994 + ```bash 995 + # Create directory for OpenBao files 996 + mkdir -p /tmp/openbao 997 + 998 + # Save credentials 999 + echo "$ROLE_ID" > /tmp/openbao/role-id 1000 + echo "$SECRET_ID" > /tmp/openbao/secret-id 1001 + chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id 1002 + ``` 1003 + 1004 + Create a proxy configuration file `/tmp/openbao/proxy.hcl`: 1005 + 1006 + ```hcl 1007 + # OpenBao server connection 1008 + vault { 1009 + address = "http://localhost:8200" 1010 + } 1011 + 1012 + # Auto-Auth using AppRole 1013 + auto_auth { 1014 + method "approle" { 1015 + mount_path = "auth/approle" 1016 + config = { 1017 + role_id_file_path = "/tmp/openbao/role-id" 1018 + secret_id_file_path = "/tmp/openbao/secret-id" 1019 + } 1020 + } 1021 + 1022 + # Optional: write token to file for debugging 1023 + sink "file" { 1024 + config = { 1025 + path = "/tmp/openbao/token" 1026 + mode = 0640 1027 + } 1028 + } 1029 + } 1030 + 1031 + # Proxy listener for spindle 1032 + listener "tcp" { 1033 + address = "127.0.0.1:8201" 1034 + tls_disable = true 1035 + } 1036 + 1037 + # Enable API proxy with auto-auth token 1038 + api_proxy { 1039 + use_auto_auth_token = true 1040 + } 1041 + 1042 + # Enable response caching 1043 + cache { 1044 + use_auto_auth_token = true 1045 + } 1046 + 1047 + # Logging 1048 + log_level = "info" 1049 + ``` 1050 + 1051 + #### Start the proxy 1052 + 1053 + Start OpenBao Proxy: 1054 + 1055 + ```bash 1056 + bao proxy -config=/tmp/openbao/proxy.hcl 1057 + ``` 1058 + 1059 + The proxy will authenticate with OpenBao and start listening on 1060 + `127.0.0.1:8201`. 1061 + 1062 + #### Configure spindle 1063 + 1064 + Set these environment variables for spindle: 1065 + 1066 + ```bash 1067 + export SPINDLE_SERVER_SECRETS_PROVIDER=openbao 1068 + export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201 1069 + export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle 1070 + ``` 1071 + 1072 + On startup, spindle will now connect to the local proxy, 1073 + which handles all authentication automatically. 1074 + 1075 + ### Production setup for proxy 1076 + 1077 + For production, you'll want to run the proxy as a service: 1078 + 1079 + Place your production configuration in 1080 + `/etc/openbao/proxy.hcl` with proper TLS settings for the 1081 + vault connection. 1082 + 1083 + ### Verifying setup 1084 + 1085 + Test the proxy directly: 1086 + 1087 + ```bash 1088 + # Check proxy health 1089 + curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health 1090 + 1091 + # Test token lookup through proxy 1092 + curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self 1093 + ``` 1094 + 1095 + Test OpenBao operations through the server: 1096 + 1097 + ```bash 1098 + # List all secrets 1099 + bao kv list spindle/ 1100 + 1101 + # Add a test secret via the spindle API, then check it exists 1102 + bao kv list spindle/repos/ 1103 + 1104 + # Get a specific secret 1105 + bao kv get spindle/repos/your_repo_path/SECRET_NAME 1106 + ``` 1107 + 1108 + ### How it works 1109 + 1110 + - Spindle connects to OpenBao Proxy on localhost (typically 1111 + port 8200 or 8201) 1112 + - The proxy authenticates with OpenBao using AppRole 1113 + credentials 1114 + - All spindle requests go through the proxy, which injects 1115 + authentication tokens 1116 + - Secrets are stored at 1117 + `spindle/repos/{sanitized_repo_path}/{secret_key}` 1118 + - Repository paths like `did:plc:alice/myrepo` become 1119 + `did_plc_alice_myrepo` 1120 + - The proxy handles all token renewal automatically 1121 + - Spindle no longer manages tokens or authentication 1122 + directly 1123 + 1124 + ### Troubleshooting 1125 + 1126 + **Connection refused**: Check that the OpenBao Proxy is 1127 + running and listening on the configured address. 1128 + 1129 + **403 errors**: Verify the AppRole credentials are correct 1130 + and the policy has the necessary permissions. 1131 + 1132 + **404 route errors**: The spindle KV mount probably doesn't 1133 + exist—run the mount creation step again. 1134 + 1135 + **Proxy authentication failures**: Check the proxy logs and 1136 + verify the role-id and secret-id files are readable and 1137 + contain valid credentials. 1138 + 1139 + **Secret not found after writing**: This can indicate policy 1140 + permission issues. Verify the policy includes both 1141 + `spindle/data/*` and `spindle/metadata/*` paths with 1142 + appropriate capabilities. 1143 + 1144 + Check proxy logs: 1145 + 1146 + ```bash 1147 + # If running as systemd service 1148 + journalctl -u openbao-proxy -f 1149 + 1150 + # If running directly, check the console output 1151 + ``` 1152 + 1153 + Test AppRole authentication manually: 1154 + 1155 + ```bash 1156 + bao write auth/approle/login \ 1157 + role_id="$(cat /tmp/openbao/role-id)" \ 1158 + secret_id="$(cat /tmp/openbao/secret-id)" 1159 + ``` 1160 + 1161 + # Migrating knots and spindles 1162 + 1163 + Sometimes, non-backwards compatible changes are made to the 1164 + knot/spindle XRPC APIs. If you host a knot or a spindle, you 1165 + will need to follow this guide to upgrade. Typically, this 1166 + only requires you to deploy the newest version. 1167 + 1168 + This document is laid out in reverse-chronological order. 1169 + Newer migration guides are listed first, and older guides 1170 + are further down the page. 1171 + 1172 + ## Upgrading from v1.8.x 1173 + 1174 + After v1.8.2, the HTTP API for knots and spindles has been 1175 + deprecated and replaced with XRPC. Repositories on outdated 1176 + knots will not be viewable from the appview. Upgrading is 1177 + straightforward however. 1178 + 1179 + For knots: 1180 + 1181 + - Upgrade to the latest tag (v1.9.0 or above) 1182 + - Head to the [knot dashboard](https://tangled.org/settings/knots) and 1183 + hit the "retry" button to verify your knot 1184 + 1185 + For spindles: 1186 + 1187 + - Upgrade to the latest tag (v1.9.0 or above) 1188 + - Head to the [spindle 1189 + dashboard](https://tangled.org/settings/spindles) and hit the 1190 + "retry" button to verify your spindle 1191 + 1192 + ## Upgrading from v1.7.x 1193 + 1194 + After v1.7.0, knot secrets have been deprecated. You no 1195 + longer need a secret from the appview to run a knot. All 1196 + authorized commands to knots are managed via [Inter-Service 1197 + Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt). 1198 + Knots will be read-only until upgraded. 1199 + 1200 + Upgrading is quite easy, in essence: 1201 + 1202 + - `KNOT_SERVER_SECRET` is no more, you can remove this 1203 + environment variable entirely 1204 + - `KNOT_SERVER_OWNER` is now required on boot, set this to 1205 + your DID. You can find your DID in the 1206 + [settings](https://tangled.org/settings) page. 1207 + - Restart your knot once you have replaced the environment 1208 + variable 1209 + - Head to the [knot dashboard](https://tangled.org/settings/knots) and 1210 + hit the "retry" button to verify your knot. This simply 1211 + writes a `sh.tangled.knot` record to your PDS. 1212 + 1213 + If you use the nix module, simply bump the flake to the 1214 + latest revision, and change your config block like so: 1215 + 1216 + ```diff 1217 + services.tangled.knot = { 1218 + enable = true; 1219 + server = { 1220 + - secretFile = /path/to/secret; 1221 + + owner = "did:plc:foo"; 1222 + }; 1223 + }; 1224 + ``` 1225 + 1226 + # Hacking on Tangled 1227 + 1228 + We highly recommend [installing 1229 + Nix](https://nixos.org/download/) (the package manager) 1230 + before working on the codebase. The Nix flake provides a lot 1231 + of helpers to get started and most importantly, builds and 1232 + dev shells are entirely deterministic. 1233 + 1234 + To set up your dev environment: 1235 + 1236 + ```bash 1237 + nix develop 1238 + ``` 1239 + 1240 + Non-Nix users can look at the `devShell` attribute in the 1241 + `flake.nix` file to determine necessary dependencies. 1242 + 1243 + ## Running the appview 1244 + 1245 + The Nix flake also exposes a few `app` attributes (run `nix 1246 + flake show` to see a full list of what the flake provides), 1247 + one of the apps runs the appview with the `air` 1248 + live-reloader: 1249 + 1250 + ```bash 1251 + TANGLED_DEV=true nix run .#watch-appview 1252 + 1253 + # TANGLED_DB_PATH might be of interest to point to 1254 + # different sqlite DBs 1255 + 1256 + # in a separate shell, you can live-reload tailwind 1257 + nix run .#watch-tailwind 1258 + ``` 1259 + 1260 + To authenticate with the appview, you will need Redis and 1261 + OAuth JWKs to be set up: 1262 + 1263 + ``` 1264 + # OAuth JWKs should already be set up by the Nix devshell: 1265 + echo $TANGLED_OAUTH_CLIENT_SECRET 1266 + z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc 1267 + 1268 + echo $TANGLED_OAUTH_CLIENT_KID 1269 + 1761667908 1270 + 1271 + # if not, you can set it up yourself: 1272 + goat key generate -t P-256 1273 + Key Type: P-256 / secp256r1 / ES256 private key 1274 + Secret Key (Multibase Syntax): save this securely (eg, add to password manager) 1275 + z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL 1276 + Public Key (DID Key Syntax): share or publish this (eg, in DID document) 1277 + did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR 1278 + 1279 + # the secret key from above 1280 + export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..." 1281 + 1282 + # Run Redis in a new shell to store OAuth sessions 1283 + redis-server 1284 + ``` 1285 + 1286 + ## Running knots and spindles 1287 + 1288 + An end-to-end knot setup requires setting up a machine with 1289 + `sshd`, `AuthorizedKeysCommand`, and a Git user, which is 1290 + quite cumbersome. So the Nix flake provides a 1291 + `nixosConfiguration` to do so. 1292 + 1293 + <details> 1294 + <summary><strong>macOS users will have to set up a Nix Builder first</strong></summary> 1295 + 1296 + In order to build Tangled's dev VM on macOS, you will 1297 + first need to set up a Linux Nix builder. The recommended 1298 + way to do so is to run a [`darwin.linux-builder` 1299 + VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder) 1300 + and to register it in `nix.conf` as a builder for Linux 1301 + with the same architecture as your Mac (`linux-aarch64` if 1302 + you are using Apple Silicon). 1303 + 1304 + > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside 1305 + > the Tangled repo so that it doesn't conflict with the other VM. For example, 1306 + > you can do 1307 + > 1308 + > ```shell 1309 + > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder 1310 + > ``` 1311 + > 1312 + > to store the builder VM in a temporary dir. 1313 + > 1314 + > You should read and follow [all the other intructions][darwin builder vm] to 1315 + > avoid subtle problems. 1316 + 1317 + Alternatively, you can use any other method to set up a 1318 + Linux machine with Nix installed that you can `sudo ssh` 1319 + into (in other words, root user on your Mac has to be able 1320 + to ssh into the Linux machine without entering a password) 1321 + and that has the same architecture as your Mac. See 1322 + [remote builder 1323 + instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements) 1324 + for how to register such a builder in `nix.conf`. 1325 + 1326 + > WARNING: If you'd like to use 1327 + > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or 1328 + > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo 1329 + > ssh` works can be tricky. It seems to be [possible with 1330 + > Orbstack](https://github.com/orgs/orbstack/discussions/1669). 1331 + 1332 + </details> 1333 + 1334 + To begin, grab your DID from http://localhost:3000/settings. 1335 + Then, set `TANGLED_VM_KNOT_OWNER` and 1336 + `TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a 1337 + lightweight NixOS VM like so: 1338 + 1339 + ```bash 1340 + nix run --impure .#vm 1341 + 1342 + # type `poweroff` at the shell to exit the VM 1343 + ``` 1344 + 1345 + This starts a knot on port 6444, a spindle on port 6555 1346 + with `ssh` exposed on port 2222. 1347 + 1348 + Once the services are running, head to 1349 + http://localhost:3000/settings/knots and hit "Verify". It should 1350 + verify the ownership of the services instantly if everything 1351 + went smoothly. 1352 + 1353 + You can push repositories to this VM with this ssh config 1354 + block on your main machine: 1355 + 1356 + ```bash 1357 + Host nixos-shell 1358 + Hostname localhost 1359 + Port 2222 1360 + User git 1361 + IdentityFile ~/.ssh/my_tangled_key 1362 + ``` 1363 + 1364 + Set up a remote called `local-dev` on a git repo: 1365 + 1366 + ```bash 1367 + git remote add local-dev git@nixos-shell:user/repo 1368 + git push local-dev main 1369 + ``` 1370 + 1371 + The above VM should already be running a spindle on 1372 + `localhost:6555`. Head to http://localhost:3000/settings/spindles and 1373 + hit "Verify". You can then configure each repository to use 1374 + this spindle and run CI jobs. 1375 + 1376 + Of interest when debugging spindles: 1377 + 1378 + ``` 1379 + # Service logs from journald: 1380 + journalctl -xeu spindle 1381 + 1382 + # CI job logs from disk: 1383 + ls /var/log/spindle 1384 + 1385 + # Debugging spindle database: 1386 + sqlite3 /var/lib/spindle/spindle.db 1387 + 1388 + # litecli has a nicer REPL interface: 1389 + litecli /var/lib/spindle/spindle.db 1390 + ``` 1391 + 1392 + If for any reason you wish to disable either one of the 1393 + services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set 1394 + `services.tangled.spindle.enable` (or 1395 + `services.tangled.knot.enable`) to `false`. 1396 + 1397 + # Contribution guide 1398 + 1399 + ## Commit guidelines 1400 + 1401 + We follow a commit style similar to the Go project. Please keep commits: 1402 + 1403 + * **atomic**: each commit should represent one logical change 1404 + * **descriptive**: the commit message should clearly describe what the 1405 + change does and why it's needed 1406 + 1407 + ### Message format 1408 + 1409 + ``` 1410 + <service/top-level directory>/<affected package/directory>: <short summary of change> 1411 + 1412 + Optional longer description can go here, if necessary. Explain what the 1413 + change does and why, especially if not obvious. Reference relevant 1414 + issues or PRs when applicable. These can be links for now since we don't 1415 + auto-link issues/PRs yet. 1416 + ``` 1417 + 1418 + Here are some examples: 1419 + 1420 + ``` 1421 + appview/state: fix token expiry check in middleware 1422 + 1423 + The previous check did not account for clock drift, leading to premature 1424 + token invalidation. 1425 + ``` 1426 + 1427 + ``` 1428 + knotserver/git/service: improve error checking in upload-pack 1429 + ``` 1430 + 1431 + 1432 + ### General notes 1433 + 1434 + - PRs get merged "as-is" (fast-forward)—like applying a patch-series 1435 + using `git am`. At present, there is no squashing—so please author 1436 + your commits as they would appear on `master`, following the above 1437 + guidelines. 1438 + - If there is a lot of nesting, for example "appview: 1439 + pages/templates/repo/fragments: ...", these can be truncated down to 1440 + just "appview: repo/fragments: ...". If the change affects a lot of 1441 + subdirectories, you may abbreviate to just the top-level names, e.g. 1442 + "appview: ..." or "knotserver: ...". 1443 + - Keep commits lowercased with no trailing period. 1444 + - Use the imperative mood in the summary line (e.g., "fix bug" not 1445 + "fixed bug" or "fixes bug"). 1446 + - Try to keep the summary line under 72 characters, but we aren't too 1447 + fussed about this. 1448 + - Follow the same formatting for PR titles if filled manually. 1449 + - Don't include unrelated changes in the same commit. 1450 + - Avoid noisy commit messages like "wip" or "final fix"—rewrite history 1451 + before submitting if necessary. 1452 + 1453 + ## Code formatting 1454 + 1455 + We use a variety of tools to format our code, and multiplex them with 1456 + [`treefmt`](https://treefmt.com). All you need to do to format your changes 1457 + is run `nix run .#fmt` (or just `treefmt` if you're in the devshell). 1458 + 1459 + ## Proposals for bigger changes 1460 + 1461 + Small fixes like typos, minor bugs, or trivial refactors can be 1462 + submitted directly as PRs. 1463 + 1464 + For larger changes—especially those introducing new features, significant 1465 + refactoring, or altering system behavior—please open a proposal first. This 1466 + helps us evaluate the scope, design, and potential impact before implementation. 1467 + 1468 + Create a new issue titled: 1469 + 1470 + ``` 1471 + proposal: <affected scope>: <summary of change> 1472 + ``` 1473 + 1474 + In the description, explain: 1475 + 1476 + - What the change is 1477 + - Why it's needed 1478 + - How you plan to implement it (roughly) 1479 + - Any open questions or tradeoffs 1480 + 1481 + We'll use the issue thread to discuss and refine the idea before moving 1482 + forward. 1483 + 1484 + ## Developer Certificate of Origin (DCO) 1485 + 1486 + We require all contributors to certify that they have the right to 1487 + submit the code they're contributing. To do this, we follow the 1488 + [Developer Certificate of Origin 1489 + (DCO)](https://developercertificate.org/). 1490 + 1491 + By signing your commits, you're stating that the contribution is your 1492 + own work, or that you have the right to submit it under the project's 1493 + license. This helps us keep things clean and legally sound. 1494 + 1495 + To sign your commit, just add the `-s` flag when committing: 1496 + 1497 + ```sh 1498 + git commit -s -m "your commit message" 1499 + ``` 1500 + 1501 + This appends a line like: 1502 + 1503 + ``` 1504 + Signed-off-by: Your Name <your.email@example.com> 1505 + ``` 1506 + 1507 + We won't merge commits if they aren't signed off. If you forget, you can 1508 + amend the last commit like this: 1509 + 1510 + ```sh 1511 + git commit --amend -s 1512 + ``` 1513 + 1514 + If you're submitting a PR with multiple commits, make sure each one is 1515 + signed. 1516 + 1517 + For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command 1518 + to make it sign off commits in the tangled repo: 1519 + 1520 + ```shell 1521 + # Safety check, should say "No matching config key..." 1522 + jj config list templates.commit_trailers 1523 + # The command below may need to be adjusted if the command above returned something. 1524 + jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)" 1525 + ``` 1526 + 1527 + Refer to the [jujutsu 1528 + documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers) 1529 + for more information.
-136
docs/contributing.md
··· 1 - # tangled contributing guide 2 - 3 - ## commit guidelines 4 - 5 - We follow a commit style similar to the Go project. Please keep commits: 6 - 7 - * **atomic**: each commit should represent one logical change 8 - * **descriptive**: the commit message should clearly describe what the 9 - change does and why it's needed 10 - 11 - ### message format 12 - 13 - ``` 14 - <service/top-level directory>/<affected package/directory>: <short summary of change> 15 - 16 - 17 - Optional longer description can go here, if necessary. Explain what the 18 - change does and why, especially if not obvious. Reference relevant 19 - issues or PRs when applicable. These can be links for now since we don't 20 - auto-link issues/PRs yet. 21 - ``` 22 - 23 - Here are some examples: 24 - 25 - ``` 26 - appview/state: fix token expiry check in middleware 27 - 28 - The previous check did not account for clock drift, leading to premature 29 - token invalidation. 30 - ``` 31 - 32 - ``` 33 - knotserver/git/service: improve error checking in upload-pack 34 - ``` 35 - 36 - 37 - ### general notes 38 - 39 - - PRs get merged "as-is" (fast-forward) -- like applying a patch-series 40 - using `git am`. At present, there is no squashing -- so please author 41 - your commits as they would appear on `master`, following the above 42 - guidelines. 43 - - If there is a lot of nesting, for example "appview: 44 - pages/templates/repo/fragments: ...", these can be truncated down to 45 - just "appview: repo/fragments: ...". If the change affects a lot of 46 - subdirectories, you may abbreviate to just the top-level names, e.g. 47 - "appview: ..." or "knotserver: ...". 48 - - Keep commits lowercased with no trailing period. 49 - - Use the imperative mood in the summary line (e.g., "fix bug" not 50 - "fixed bug" or "fixes bug"). 51 - - Try to keep the summary line under 72 characters, but we aren't too 52 - fussed about this. 53 - - Follow the same formatting for PR titles if filled manually. 54 - - Don't include unrelated changes in the same commit. 55 - - Avoid noisy commit messages like "wip" or "final fix"—rewrite history 56 - before submitting if necessary. 57 - 58 - ## code formatting 59 - 60 - We use a variety of tools to format our code, and multiplex them with 61 - [`treefmt`](https://treefmt.com): all you need to do to format your changes 62 - is run `nix run .#fmt` (or just `treefmt` if you're in the devshell). 63 - 64 - ## proposals for bigger changes 65 - 66 - Small fixes like typos, minor bugs, or trivial refactors can be 67 - submitted directly as PRs. 68 - 69 - For larger changes—especially those introducing new features, significant 70 - refactoring, or altering system behavior—please open a proposal first. This 71 - helps us evaluate the scope, design, and potential impact before implementation. 72 - 73 - ### proposal format 74 - 75 - Create a new issue titled: 76 - 77 - ``` 78 - proposal: <affected scope>: <summary of change> 79 - ``` 80 - 81 - In the description, explain: 82 - 83 - - What the change is 84 - - Why it's needed 85 - - How you plan to implement it (roughly) 86 - - Any open questions or tradeoffs 87 - 88 - We'll use the issue thread to discuss and refine the idea before moving 89 - forward. 90 - 91 - ## developer certificate of origin (DCO) 92 - 93 - We require all contributors to certify that they have the right to 94 - submit the code they're contributing. To do this, we follow the 95 - [Developer Certificate of Origin 96 - (DCO)](https://developercertificate.org/). 97 - 98 - By signing your commits, you're stating that the contribution is your 99 - own work, or that you have the right to submit it under the project's 100 - license. This helps us keep things clean and legally sound. 101 - 102 - To sign your commit, just add the `-s` flag when committing: 103 - 104 - ```sh 105 - git commit -s -m "your commit message" 106 - ``` 107 - 108 - This appends a line like: 109 - 110 - ``` 111 - Signed-off-by: Your Name <your.email@example.com> 112 - ``` 113 - 114 - We won't merge commits if they aren't signed off. If you forget, you can 115 - amend the last commit like this: 116 - 117 - ```sh 118 - git commit --amend -s 119 - ``` 120 - 121 - If you're submitting a PR with multiple commits, make sure each one is 122 - signed. 123 - 124 - For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command 125 - to make it sign off commits in the tangled repo: 126 - 127 - ```shell 128 - # Safety check, should say "No matching config key..." 129 - jj config list templates.commit_trailers 130 - # The command below may need to be adjusted if the command above returned something. 131 - jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)" 132 - ``` 133 - 134 - Refer to the [jj 135 - documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers) 136 - for more information.
-172
docs/hacking.md
··· 1 - # hacking on tangled 2 - 3 - We highly recommend [installing 4 - nix](https://nixos.org/download/) (the package manager) 5 - before working on the codebase. The nix flake provides a lot 6 - of helpers to get started and most importantly, builds and 7 - dev shells are entirely deterministic. 8 - 9 - To set up your dev environment: 10 - 11 - ```bash 12 - nix develop 13 - ``` 14 - 15 - Non-nix users can look at the `devShell` attribute in the 16 - `flake.nix` file to determine necessary dependencies. 17 - 18 - ## running the appview 19 - 20 - The nix flake also exposes a few `app` attributes (run `nix 21 - flake show` to see a full list of what the flake provides), 22 - one of the apps runs the appview with the `air` 23 - live-reloader: 24 - 25 - ```bash 26 - TANGLED_DEV=true nix run .#watch-appview 27 - 28 - # TANGLED_DB_PATH might be of interest to point to 29 - # different sqlite DBs 30 - 31 - # in a separate shell, you can live-reload tailwind 32 - nix run .#watch-tailwind 33 - ``` 34 - 35 - To authenticate with the appview, you will need redis and 36 - OAUTH JWKs to be setup: 37 - 38 - ``` 39 - # oauth jwks should already be setup by the nix devshell: 40 - echo $TANGLED_OAUTH_CLIENT_SECRET 41 - z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc 42 - 43 - echo $TANGLED_OAUTH_CLIENT_KID 44 - 1761667908 45 - 46 - # if not, you can set it up yourself: 47 - goat key generate -t P-256 48 - Key Type: P-256 / secp256r1 / ES256 private key 49 - Secret Key (Multibase Syntax): save this securely (eg, add to password manager) 50 - z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL 51 - Public Key (DID Key Syntax): share or publish this (eg, in DID document) 52 - did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR 53 - 54 - # the secret key from above 55 - export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..." 56 - 57 - # run redis in at a new shell to store oauth sessions 58 - redis-server 59 - ``` 60 - 61 - ## running knots and spindles 62 - 63 - An end-to-end knot setup requires setting up a machine with 64 - `sshd`, `AuthorizedKeysCommand`, and git user, which is 65 - quite cumbersome. So the nix flake provides a 66 - `nixosConfiguration` to do so. 67 - 68 - <details> 69 - <summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary> 70 - 71 - In order to build Tangled's dev VM on macOS, you will 72 - first need to set up a Linux Nix builder. The recommended 73 - way to do so is to run a [`darwin.linux-builder` 74 - VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder) 75 - and to register it in `nix.conf` as a builder for Linux 76 - with the same architecture as your Mac (`linux-aarch64` if 77 - you are using Apple Silicon). 78 - 79 - > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside 80 - > the tangled repo so that it doesn't conflict with the other VM. For example, 81 - > you can do 82 - > 83 - > ```shell 84 - > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder 85 - > ``` 86 - > 87 - > to store the builder VM in a temporary dir. 88 - > 89 - > You should read and follow [all the other intructions][darwin builder vm] to 90 - > avoid subtle problems. 91 - 92 - Alternatively, you can use any other method to set up a 93 - Linux machine with `nix` installed that you can `sudo ssh` 94 - into (in other words, root user on your Mac has to be able 95 - to ssh into the Linux machine without entering a password) 96 - and that has the same architecture as your Mac. See 97 - [remote builder 98 - instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements) 99 - for how to register such a builder in `nix.conf`. 100 - 101 - > WARNING: If you'd like to use 102 - > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or 103 - > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo 104 - > ssh` works can be tricky. It seems to be [possible with 105 - > Orbstack](https://github.com/orgs/orbstack/discussions/1669). 106 - 107 - </details> 108 - 109 - To begin, grab your DID from http://localhost:3000/settings. 110 - Then, set `TANGLED_VM_KNOT_OWNER` and 111 - `TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a 112 - lightweight NixOS VM like so: 113 - 114 - ```bash 115 - nix run --impure .#vm 116 - 117 - # type `poweroff` at the shell to exit the VM 118 - ``` 119 - 120 - This starts a knot on port 6000, a spindle on port 6555 121 - with `ssh` exposed on port 2222. 122 - 123 - Once the services are running, head to 124 - http://localhost:3000/knots and hit verify. It should 125 - verify the ownership of the services instantly if everything 126 - went smoothly. 127 - 128 - You can push repositories to this VM with this ssh config 129 - block on your main machine: 130 - 131 - ```bash 132 - Host nixos-shell 133 - Hostname localhost 134 - Port 2222 135 - User git 136 - IdentityFile ~/.ssh/my_tangled_key 137 - ``` 138 - 139 - Set up a remote called `local-dev` on a git repo: 140 - 141 - ```bash 142 - git remote add local-dev git@nixos-shell:user/repo 143 - git push local-dev main 144 - ``` 145 - 146 - ### running a spindle 147 - 148 - The above VM should already be running a spindle on 149 - `localhost:6555`. Head to http://localhost:3000/spindles and 150 - hit verify. You can then configure each repository to use 151 - this spindle and run CI jobs. 152 - 153 - Of interest when debugging spindles: 154 - 155 - ``` 156 - # service logs from journald: 157 - journalctl -xeu spindle 158 - 159 - # CI job logs from disk: 160 - ls /var/log/spindle 161 - 162 - # debugging spindle db: 163 - sqlite3 /var/lib/spindle/spindle.db 164 - 165 - # litecli has a nicer REPL interface: 166 - litecli /var/lib/spindle/spindle.db 167 - ``` 168 - 169 - If for any reason you wish to disable either one of the 170 - services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set 171 - `services.tangled.spindle.enable` (or 172 - `services.tangled.knot.enable`) to `false`.
+93
docs/highlight.theme
··· 1 + { 2 + "text-color": null, 3 + "background-color": null, 4 + "line-number-color": null, 5 + "line-number-background-color": null, 6 + "text-styles": { 7 + "Annotation": { 8 + "text-color": null, 9 + "background-color": null, 10 + "bold": false, 11 + "italic": true, 12 + "underline": false 13 + }, 14 + "ControlFlow": { 15 + "text-color": null, 16 + "background-color": null, 17 + "bold": true, 18 + "italic": false, 19 + "underline": false 20 + }, 21 + "Error": { 22 + "text-color": null, 23 + "background-color": null, 24 + "bold": true, 25 + "italic": false, 26 + "underline": false 27 + }, 28 + "Alert": { 29 + "text-color": null, 30 + "background-color": null, 31 + "bold": true, 32 + "italic": false, 33 + "underline": false 34 + }, 35 + "Preprocessor": { 36 + "text-color": null, 37 + "background-color": null, 38 + "bold": true, 39 + "italic": false, 40 + "underline": false 41 + }, 42 + "Information": { 43 + "text-color": null, 44 + "background-color": null, 45 + "bold": false, 46 + "italic": true, 47 + "underline": false 48 + }, 49 + "Warning": { 50 + "text-color": null, 51 + "background-color": null, 52 + "bold": false, 53 + "italic": true, 54 + "underline": false 55 + }, 56 + "Documentation": { 57 + "text-color": null, 58 + "background-color": null, 59 + "bold": false, 60 + "italic": true, 61 + "underline": false 62 + }, 63 + "DataType": { 64 + "text-color": "#8f4e8b", 65 + "background-color": null, 66 + "bold": false, 67 + "italic": false, 68 + "underline": false 69 + }, 70 + "Comment": { 71 + "text-color": null, 72 + "background-color": null, 73 + "bold": false, 74 + "italic": true, 75 + "underline": false 76 + }, 77 + "CommentVar": { 78 + "text-color": null, 79 + "background-color": null, 80 + "bold": false, 81 + "italic": true, 82 + "underline": false 83 + }, 84 + "Keyword": { 85 + "text-color": null, 86 + "background-color": null, 87 + "bold": true, 88 + "italic": false, 89 + "underline": false 90 + } 91 + } 92 + } 93 +
-214
docs/knot-hosting.md
··· 1 - # knot self-hosting guide 2 - 3 - So you want to run your own knot server? Great! Here are a few prerequisites: 4 - 5 - 1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind. 6 - 2. A (sub)domain name. People generally use `knot.example.com`. 7 - 3. A valid SSL certificate for your domain. 8 - 9 - There's a couple of ways to get started: 10 - * NixOS: refer to 11 - [flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix) 12 - * Docker: Documented at 13 - [@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker) 14 - (community maintained: support is not guaranteed!) 15 - * Manual: Documented below. 16 - 17 - ## manual setup 18 - 19 - First, clone this repository: 20 - 21 - ``` 22 - git clone https://tangled.org/@tangled.org/core 23 - ``` 24 - 25 - Then, build the `knot` CLI. This is the knot administration and operation tool. 26 - For the purpose of this guide, we're only concerned with these subcommands: 27 - 28 - * `knot server`: the main knot server process, typically run as a 29 - supervised service 30 - * `knot guard`: handles role-based access control for git over SSH 31 - (you'll never have to run this yourself) 32 - * `knot keys`: fetches SSH keys associated with your knot; we'll use 33 - this to generate the SSH `AuthorizedKeysCommand` 34 - 35 - ``` 36 - cd core 37 - export CGO_ENABLED=1 38 - go build -o knot ./cmd/knot 39 - ``` 40 - 41 - Next, move the `knot` binary to a location owned by `root` -- 42 - `/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`: 43 - 44 - ``` 45 - sudo mv knot /usr/local/bin/knot 46 - sudo chown root:root /usr/local/bin/knot 47 - ``` 48 - 49 - This is necessary because SSH `AuthorizedKeysCommand` requires [really 50 - specific permissions](https://stackoverflow.com/a/27638306). The 51 - `AuthorizedKeysCommand` specifies a command that is run by `sshd` to 52 - retrieve a user's public SSH keys dynamically for authentication. Let's 53 - set that up. 54 - 55 - ``` 56 - sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 57 - Match User git 58 - AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys 59 - AuthorizedKeysCommandUser nobody 60 - EOF 61 - ``` 62 - 63 - Then, reload `sshd`: 64 - 65 - ``` 66 - sudo systemctl reload ssh 67 - ``` 68 - 69 - Next, create the `git` user. We'll use the `git` user's home directory 70 - to store repositories: 71 - 72 - ``` 73 - sudo adduser git 74 - ``` 75 - 76 - Create `/home/git/.knot.env` with the following, updating the values as 77 - necessary. The `KNOT_SERVER_OWNER` should be set to your 78 - DID, you can find your DID in the [Settings](https://tangled.sh/settings) page. 79 - 80 - ``` 81 - KNOT_REPO_SCAN_PATH=/home/git 82 - KNOT_SERVER_HOSTNAME=knot.example.com 83 - APPVIEW_ENDPOINT=https://tangled.sh 84 - KNOT_SERVER_OWNER=did:plc:foobar 85 - KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444 86 - KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555 87 - ``` 88 - 89 - If you run a Linux distribution that uses systemd, you can use the provided 90 - service file to run the server. Copy 91 - [`knotserver.service`](/systemd/knotserver.service) 92 - to `/etc/systemd/system/`. Then, run: 93 - 94 - ``` 95 - systemctl enable knotserver 96 - systemctl start knotserver 97 - ``` 98 - 99 - The last step is to configure a reverse proxy like Nginx or Caddy to front your 100 - knot. Here's an example configuration for Nginx: 101 - 102 - ``` 103 - server { 104 - listen 80; 105 - listen [::]:80; 106 - server_name knot.example.com; 107 - 108 - location / { 109 - proxy_pass http://localhost:5555; 110 - proxy_set_header Host $host; 111 - proxy_set_header X-Real-IP $remote_addr; 112 - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 113 - proxy_set_header X-Forwarded-Proto $scheme; 114 - } 115 - 116 - # wss endpoint for git events 117 - location /events { 118 - proxy_set_header X-Forwarded-For $remote_addr; 119 - proxy_set_header Host $http_host; 120 - proxy_set_header Upgrade websocket; 121 - proxy_set_header Connection Upgrade; 122 - proxy_pass http://localhost:5555; 123 - } 124 - # additional config for SSL/TLS go here. 125 - } 126 - 127 - ``` 128 - 129 - Remember to use Let's Encrypt or similar to procure a certificate for your 130 - knot domain. 131 - 132 - You should now have a running knot server! You can finalize 133 - your registration by hitting the `verify` button on the 134 - [/knots](https://tangled.org/knots) page. This simply creates 135 - a record on your PDS to announce the existence of the knot. 136 - 137 - ### custom paths 138 - 139 - (This section applies to manual setup only. Docker users should edit the mounts 140 - in `docker-compose.yml` instead.) 141 - 142 - Right now, the database and repositories of your knot lives in `/home/git`. You 143 - can move these paths if you'd like to store them in another folder. Be careful 144 - when adjusting these paths: 145 - 146 - * Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent 147 - any possible side effects. Remember to restart it once you're done. 148 - * Make backups before moving in case something goes wrong. 149 - * Make sure the `git` user can read and write from the new paths. 150 - 151 - #### database 152 - 153 - As an example, let's say the current database is at `/home/git/knotserver.db`, 154 - and we want to move it to `/home/git/database/knotserver.db`. 155 - 156 - Copy the current database to the new location. Make sure to copy the `.db-shm` 157 - and `.db-wal` files if they exist. 158 - 159 - ``` 160 - mkdir /home/git/database 161 - cp /home/git/knotserver.db* /home/git/database 162 - ``` 163 - 164 - In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to 165 - the new file path (_not_ the directory): 166 - 167 - ``` 168 - KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db 169 - ``` 170 - 171 - #### repositories 172 - 173 - As an example, let's say the repositories are currently in `/home/git`, and we 174 - want to move them into `/home/git/repositories`. 175 - 176 - Create the new folder, then move the existing repositories (if there are any): 177 - 178 - ``` 179 - mkdir /home/git/repositories 180 - # move all DIDs into the new folder; these will vary for you! 181 - mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories 182 - ``` 183 - 184 - In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH` 185 - to the new directory: 186 - 187 - ``` 188 - KNOT_REPO_SCAN_PATH=/home/git/repositories 189 - ``` 190 - 191 - Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated 192 - repository path: 193 - 194 - ``` 195 - sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF 196 - Match User git 197 - AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories 198 - AuthorizedKeysCommandUser nobody 199 - EOF 200 - ``` 201 - 202 - Make sure to restart your SSH server! 203 - 204 - #### MOTD (message of the day) 205 - 206 - To configure the MOTD used ("Welcome to this knot!" by default), edit the 207 - `/home/git/motd` file: 208 - 209 - ``` 210 - printf "Hi from this knot!\n" > /home/git/motd 211 - ``` 212 - 213 - Note that you should add a newline at the end if setting a non-empty message 214 - since the knot won't do this for you.
-59
docs/migrations.md
··· 1 - # Migrations 2 - 3 - This document is laid out in reverse-chronological order. 4 - Newer migration guides are listed first, and older guides 5 - are further down the page. 6 - 7 - ## Upgrading from v1.8.x 8 - 9 - After v1.8.2, the HTTP API for knot and spindles have been 10 - deprecated and replaced with XRPC. Repositories on outdated 11 - knots will not be viewable from the appview. Upgrading is 12 - straightforward however. 13 - 14 - For knots: 15 - 16 - - Upgrade to latest tag (v1.9.0 or above) 17 - - Head to the [knot dashboard](https://tangled.org/knots) and 18 - hit the "retry" button to verify your knot 19 - 20 - For spindles: 21 - 22 - - Upgrade to latest tag (v1.9.0 or above) 23 - - Head to the [spindle 24 - dashboard](https://tangled.org/spindles) and hit the 25 - "retry" button to verify your spindle 26 - 27 - ## Upgrading from v1.7.x 28 - 29 - After v1.7.0, knot secrets have been deprecated. You no 30 - longer need a secret from the appview to run a knot. All 31 - authorized commands to knots are managed via [Inter-Service 32 - Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt). 33 - Knots will be read-only until upgraded. 34 - 35 - Upgrading is quite easy, in essence: 36 - 37 - - `KNOT_SERVER_SECRET` is no more, you can remove this 38 - environment variable entirely 39 - - `KNOT_SERVER_OWNER` is now required on boot, set this to 40 - your DID. You can find your DID in the 41 - [settings](https://tangled.org/settings) page. 42 - - Restart your knot once you have replaced the environment 43 - variable 44 - - Head to the [knot dashboard](https://tangled.org/knots) and 45 - hit the "retry" button to verify your knot. This simply 46 - writes a `sh.tangled.knot` record to your PDS. 47 - 48 - If you use the nix module, simply bump the flake to the 49 - latest revision, and change your config block like so: 50 - 51 - ```diff 52 - services.tangled.knot = { 53 - enable = true; 54 - server = { 55 - - secretFile = /path/to/secret; 56 - + owner = "did:plc:foo"; 57 - }; 58 - }; 59 - ```
-25
docs/spindle/architecture.md
··· 1 - # spindle architecture 2 - 3 - Spindle is a small CI runner service. Here's a high level overview of how it operates: 4 - 5 - * listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and 6 - [`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream. 7 - * when a new repo record comes through (typically when you add a spindle to a 8 - repo from the settings), spindle then resolves the underlying knot and 9 - subscribes to repo events (see: 10 - [`sh.tangled.pipeline`](/lexicons/pipeline.json)). 11 - * the spindle engine then handles execution of the pipeline, with results and 12 - logs beamed on the spindle event stream over wss 13 - 14 - ### the engine 15 - 16 - At present, the only supported backend is Docker (and Podman, if Docker 17 - compatibility is enabled, so that `/run/docker.sock` is created). Spindle 18 - executes each step in the pipeline in a fresh container, with state persisted 19 - across steps within the `/tangled/workspace` directory. 20 - 21 - The base image for the container is constructed on the fly using 22 - [Nixery](https://nixery.dev), which is handy for caching layers for frequently 23 - used packages. 24 - 25 - The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
··· 1 - # spindle self-hosting guide 2 - 3 - ## prerequisites 4 - 5 - * Go 6 - * Docker (the only supported backend currently) 7 - 8 - ## configuration 9 - 10 - Spindle is configured using environment variables. The following environment variables are available: 11 - 12 - * `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`). 13 - * `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`). 14 - * `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required). 15 - * `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`). 16 - * `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`). 17 - * `SPINDLE_SERVER_OWNER`: The DID of the owner (required). 18 - * `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`). 19 - * `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`). 20 - * `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`). 21 - 22 - ## running spindle 23 - 24 - 1. **Set the environment variables.** For example: 25 - 26 - ```shell 27 - export SPINDLE_SERVER_HOSTNAME="your-hostname" 28 - export SPINDLE_SERVER_OWNER="your-did" 29 - ``` 30 - 31 - 2. **Build the Spindle binary.** 32 - 33 - ```shell 34 - cd core 35 - go mod download 36 - go build -o cmd/spindle/spindle cmd/spindle/main.go 37 - ``` 38 - 39 - 3. **Create the log directory.** 40 - 41 - ```shell 42 - sudo mkdir -p /var/log/spindle 43 - sudo chown $USER:$USER -R /var/log/spindle 44 - ``` 45 - 46 - 4. **Run the Spindle binary.** 47 - 48 - ```shell 49 - ./cmd/spindle/spindle 50 - ``` 51 - 52 - Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
··· 1 - # spindle secrets with openbao 2 - 3 - This document covers setting up Spindle to use OpenBao for secrets 4 - management via OpenBao Proxy instead of the default SQLite backend. 5 - 6 - ## overview 7 - 8 - Spindle now uses OpenBao Proxy for secrets management. The proxy handles 9 - authentication automatically using AppRole credentials, while Spindle 10 - connects to the local proxy instead of directly to the OpenBao server. 11 - 12 - This approach provides better security, automatic token renewal, and 13 - simplified application code. 14 - 15 - ## installation 16 - 17 - Install OpenBao from nixpkgs: 18 - 19 - ```bash 20 - nix shell nixpkgs#openbao # for a local server 21 - ``` 22 - 23 - ## setup 24 - 25 - The setup process can is documented for both local development and production. 26 - 27 - ### local development 28 - 29 - Start OpenBao in dev mode: 30 - 31 - ```bash 32 - bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201 33 - ``` 34 - 35 - This starts OpenBao on `http://localhost:8201` with a root token. 36 - 37 - Set up environment for bao CLI: 38 - 39 - ```bash 40 - export BAO_ADDR=http://localhost:8200 41 - export BAO_TOKEN=root 42 - ``` 43 - 44 - ### production 45 - 46 - You would typically use a systemd service with a configuration file. Refer to 47 - [@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be 48 - achieved using Nix. 49 - 50 - Then, initialize the bao server: 51 - ```bash 52 - bao operator init -key-shares=1 -key-threshold=1 53 - ``` 54 - 55 - This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up: 56 - ```bash 57 - bao operator unseal <unseal_key> 58 - ``` 59 - 60 - All steps below remain the same across both dev and production setups. 61 - 62 - ### configure openbao server 63 - 64 - Create the spindle KV mount: 65 - 66 - ```bash 67 - bao secrets enable -path=spindle -version=2 kv 68 - ``` 69 - 70 - Set up AppRole authentication and policy: 71 - 72 - Create a policy file `spindle-policy.hcl`: 73 - 74 - ```hcl 75 - # Full access to spindle KV v2 data 76 - path "spindle/data/*" { 77 - capabilities = ["create", "read", "update", "delete"] 78 - } 79 - 80 - # Access to metadata for listing and management 81 - path "spindle/metadata/*" { 82 - capabilities = ["list", "read", "delete", "update"] 83 - } 84 - 85 - # Allow listing at root level 86 - path "spindle/" { 87 - capabilities = ["list"] 88 - } 89 - 90 - # Required for connection testing and health checks 91 - path "auth/token/lookup-self" { 92 - capabilities = ["read"] 93 - } 94 - ``` 95 - 96 - Apply the policy and create an AppRole: 97 - 98 - ```bash 99 - bao policy write spindle-policy spindle-policy.hcl 100 - bao auth enable approle 101 - bao write auth/approle/role/spindle \ 102 - token_policies="spindle-policy" \ 103 - token_ttl=1h \ 104 - token_max_ttl=4h \ 105 - bind_secret_id=true \ 106 - secret_id_ttl=0 \ 107 - secret_id_num_uses=0 108 - ``` 109 - 110 - Get the credentials: 111 - 112 - ```bash 113 - # Get role ID (static) 114 - ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id) 115 - 116 - # Generate secret ID 117 - SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id) 118 - 119 - echo "Role ID: $ROLE_ID" 120 - echo "Secret ID: $SECRET_ID" 121 - ``` 122 - 123 - ### create proxy configuration 124 - 125 - Create the credential files: 126 - 127 - ```bash 128 - # Create directory for OpenBao files 129 - mkdir -p /tmp/openbao 130 - 131 - # Save credentials 132 - echo "$ROLE_ID" > /tmp/openbao/role-id 133 - echo "$SECRET_ID" > /tmp/openbao/secret-id 134 - chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id 135 - ``` 136 - 137 - Create a proxy configuration file `/tmp/openbao/proxy.hcl`: 138 - 139 - ```hcl 140 - # OpenBao server connection 141 - vault { 142 - address = "http://localhost:8200" 143 - } 144 - 145 - # Auto-Auth using AppRole 146 - auto_auth { 147 - method "approle" { 148 - mount_path = "auth/approle" 149 - config = { 150 - role_id_file_path = "/tmp/openbao/role-id" 151 - secret_id_file_path = "/tmp/openbao/secret-id" 152 - } 153 - } 154 - 155 - # Optional: write token to file for debugging 156 - sink "file" { 157 - config = { 158 - path = "/tmp/openbao/token" 159 - mode = 0640 160 - } 161 - } 162 - } 163 - 164 - # Proxy listener for Spindle 165 - listener "tcp" { 166 - address = "127.0.0.1:8201" 167 - tls_disable = true 168 - } 169 - 170 - # Enable API proxy with auto-auth token 171 - api_proxy { 172 - use_auto_auth_token = true 173 - } 174 - 175 - # Enable response caching 176 - cache { 177 - use_auto_auth_token = true 178 - } 179 - 180 - # Logging 181 - log_level = "info" 182 - ``` 183 - 184 - ### start the proxy 185 - 186 - Start OpenBao Proxy: 187 - 188 - ```bash 189 - bao proxy -config=/tmp/openbao/proxy.hcl 190 - ``` 191 - 192 - The proxy will authenticate with OpenBao and start listening on 193 - `127.0.0.1:8201`. 194 - 195 - ### configure spindle 196 - 197 - Set these environment variables for Spindle: 198 - 199 - ```bash 200 - export SPINDLE_SERVER_SECRETS_PROVIDER=openbao 201 - export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201 202 - export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle 203 - ``` 204 - 205 - Start Spindle: 206 - 207 - Spindle will now connect to the local proxy, which handles all 208 - authentication automatically. 209 - 210 - ## production setup for proxy 211 - 212 - For production, you'll want to run the proxy as a service: 213 - 214 - Place your production configuration in `/etc/openbao/proxy.hcl` with 215 - proper TLS settings for the vault connection. 216 - 217 - ## verifying setup 218 - 219 - Test the proxy directly: 220 - 221 - ```bash 222 - # Check proxy health 223 - curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health 224 - 225 - # Test token lookup through proxy 226 - curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self 227 - ``` 228 - 229 - Test OpenBao operations through the server: 230 - 231 - ```bash 232 - # List all secrets 233 - bao kv list spindle/ 234 - 235 - # Add a test secret via Spindle API, then check it exists 236 - bao kv list spindle/repos/ 237 - 238 - # Get a specific secret 239 - bao kv get spindle/repos/your_repo_path/SECRET_NAME 240 - ``` 241 - 242 - ## how it works 243 - 244 - - Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201) 245 - - The proxy authenticates with OpenBao using AppRole credentials 246 - - All Spindle requests go through the proxy, which injects authentication tokens 247 - - Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}` 248 - - Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo` 249 - - The proxy handles all token renewal automatically 250 - - Spindle no longer manages tokens or authentication directly 251 - 252 - ## troubleshooting 253 - 254 - **Connection refused**: Check that the OpenBao Proxy is running and 255 - listening on the configured address. 256 - 257 - **403 errors**: Verify the AppRole credentials are correct and the policy 258 - has the necessary permissions. 259 - 260 - **404 route errors**: The spindle KV mount probably doesn't exist - run 261 - the mount creation step again. 262 - 263 - **Proxy authentication failures**: Check the proxy logs and verify the 264 - role-id and secret-id files are readable and contain valid credentials. 265 - 266 - **Secret not found after writing**: This can indicate policy permission 267 - issues. Verify the policy includes both `spindle/data/*` and 268 - `spindle/metadata/*` paths with appropriate capabilities. 269 - 270 - Check proxy logs: 271 - 272 - ```bash 273 - # If running as systemd service 274 - journalctl -u openbao-proxy -f 275 - 276 - # If running directly, check the console output 277 - ``` 278 - 279 - Test AppRole authentication manually: 280 - 281 - ```bash 282 - bao write auth/approle/login \ 283 - role_id="$(cat /tmp/openbao/role-id)" \ 284 - secret_id="$(cat /tmp/openbao/secret-id)" 285 - ```
-183
docs/spindle/pipeline.md
··· 1 - # spindle pipelines 2 - 3 - Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML. 4 - 5 - The fields are: 6 - 7 - - [Trigger](#trigger): A **required** field that defines when a workflow should be triggered. 8 - - [Engine](#engine): A **required** field that defines which engine a workflow should run on. 9 - - [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned. 10 - - [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need. 11 - - [Environment](#environment): An **optional** field that allows you to define environment variables. 12 - - [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow. 13 - 14 - ## Trigger 15 - 16 - The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields: 17 - 18 - - `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values: 19 - - `push`: The workflow should run every time a commit is pushed to the repository. 20 - - `pull_request`: The workflow should run every time a pull request is made or updated. 21 - - `manual`: The workflow can be triggered manually. 22 - - `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events. 23 - - `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events. 24 - 25 - For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with: 26 - 27 - ```yaml 28 - when: 29 - - event: ["push", "manual"] 30 - branch: ["main", "develop"] 31 - - event: ["pull_request"] 32 - branch: ["main"] 33 - ``` 34 - 35 - You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed: 36 - 37 - ```yaml 38 - when: 39 - - event: ["push"] 40 - tag: ["v*"] 41 - ``` 42 - 43 - You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches): 44 - 45 - ```yaml 46 - when: 47 - - event: ["push"] 48 - branch: ["main", "release-*"] 49 - tag: ["v*", "stable"] 50 - ``` 51 - 52 - ## Engine 53 - 54 - Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are: 55 - 56 - - `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there. 57 - 58 - Example: 59 - 60 - ```yaml 61 - engine: "nixery" 62 - ``` 63 - 64 - ## Clone options 65 - 66 - When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields: 67 - 68 - - `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default. 69 - - `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow. 70 - - `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default. 71 - 72 - The default settings are: 73 - 74 - ```yaml 75 - clone: 76 - skip: false 77 - depth: 1 78 - submodules: false 79 - ``` 80 - 81 - ## Dependencies 82 - 83 - Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch. 84 - 85 - Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so: 86 - 87 - ```yaml 88 - dependencies: 89 - # nixpkgs 90 - nixpkgs: 91 - - nodejs 92 - - go 93 - # custom registry 94 - git+https://tangled.org/@example.com/my_pkg: 95 - - my_pkg 96 - ``` 97 - 98 - Now these dependencies are available to use in your workflow! 99 - 100 - ## Environment 101 - 102 - The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.** 103 - 104 - Example: 105 - 106 - ```yaml 107 - environment: 108 - GOOS: "linux" 109 - GOARCH: "arm64" 110 - NODE_ENV: "production" 111 - MY_ENV_VAR: "MY_ENV_VALUE" 112 - ``` 113 - 114 - ## Steps 115 - 116 - The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields: 117 - 118 - - `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing. 119 - - `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here. 120 - - `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.** 121 - 122 - Example: 123 - 124 - ```yaml 125 - steps: 126 - - name: "Build backend" 127 - command: "go build" 128 - environment: 129 - GOOS: "darwin" 130 - GOARCH: "arm64" 131 - - name: "Build frontend" 132 - command: "npm run build" 133 - environment: 134 - NODE_ENV: "production" 135 - ``` 136 - 137 - ## Complete workflow 138 - 139 - ```yaml 140 - # .tangled/workflows/build.yml 141 - 142 - when: 143 - - event: ["push", "manual"] 144 - branch: ["main", "develop"] 145 - - event: ["pull_request"] 146 - branch: ["main"] 147 - 148 - engine: "nixery" 149 - 150 - # using the default values 151 - clone: 152 - skip: false 153 - depth: 1 154 - submodules: false 155 - 156 - dependencies: 157 - # nixpkgs 158 - nixpkgs: 159 - - nodejs 160 - - go 161 - # custom registry 162 - git+https://tangled.org/@example.com/my_pkg: 163 - - my_pkg 164 - 165 - environment: 166 - GOOS: "linux" 167 - GOARCH: "arm64" 168 - NODE_ENV: "production" 169 - MY_ENV_VAR: "MY_ENV_VALUE" 170 - 171 - steps: 172 - - name: "Build backend" 173 - command: "go build" 174 - environment: 175 - GOOS: "darwin" 176 - GOARCH: "arm64" 177 - - name: "Build frontend" 178 - command: "npm run build" 179 - environment: 180 - NODE_ENV: "production" 181 - ``` 182 - 183 - If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+101
docs/styles.css
··· 1 + svg { 2 + width: 16px; 3 + height: 16px; 4 + } 5 + 6 + :root { 7 + --syntax-alert: #d20f39; 8 + --syntax-annotation: #fe640b; 9 + --syntax-attribute: #df8e1d; 10 + --syntax-basen: #40a02b; 11 + --syntax-builtin: #1e66f5; 12 + --syntax-controlflow: #8839ef; 13 + --syntax-char: #04a5e5; 14 + --syntax-constant: #fe640b; 15 + --syntax-comment: #9ca0b0; 16 + --syntax-commentvar: #7c7f93; 17 + --syntax-documentation: #9ca0b0; 18 + --syntax-datatype: #df8e1d; 19 + --syntax-decval: #40a02b; 20 + --syntax-error: #d20f39; 21 + --syntax-extension: #4c4f69; 22 + --syntax-float: #40a02b; 23 + --syntax-function: #1e66f5; 24 + --syntax-import: #40a02b; 25 + --syntax-information: #04a5e5; 26 + --syntax-keyword: #8839ef; 27 + --syntax-operator: #179299; 28 + --syntax-other: #8839ef; 29 + --syntax-preprocessor: #ea76cb; 30 + --syntax-specialchar: #04a5e5; 31 + --syntax-specialstring: #ea76cb; 32 + --syntax-string: #40a02b; 33 + --syntax-variable: #8839ef; 34 + --syntax-verbatimstring: #40a02b; 35 + --syntax-warning: #df8e1d; 36 + } 37 + 38 + @media (prefers-color-scheme: dark) { 39 + :root { 40 + --syntax-alert: #f38ba8; 41 + --syntax-annotation: #fab387; 42 + --syntax-attribute: #f9e2af; 43 + --syntax-basen: #a6e3a1; 44 + --syntax-builtin: #89b4fa; 45 + --syntax-controlflow: #cba6f7; 46 + --syntax-char: #89dceb; 47 + --syntax-constant: #fab387; 48 + --syntax-comment: #6c7086; 49 + --syntax-commentvar: #585b70; 50 + --syntax-documentation: #6c7086; 51 + --syntax-datatype: #f9e2af; 52 + --syntax-decval: #a6e3a1; 53 + --syntax-error: #f38ba8; 54 + --syntax-extension: #cdd6f4; 55 + --syntax-float: #a6e3a1; 56 + --syntax-function: #89b4fa; 57 + --syntax-import: #a6e3a1; 58 + --syntax-information: #89dceb; 59 + --syntax-keyword: #cba6f7; 60 + --syntax-operator: #94e2d5; 61 + --syntax-other: #cba6f7; 62 + --syntax-preprocessor: #f5c2e7; 63 + --syntax-specialchar: #89dceb; 64 + --syntax-specialstring: #f5c2e7; 65 + --syntax-string: #a6e3a1; 66 + --syntax-variable: #cba6f7; 67 + --syntax-verbatimstring: #a6e3a1; 68 + --syntax-warning: #f9e2af; 69 + } 70 + } 71 + 72 + /* pandoc syntax highlighting classes */ 73 + code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */ 74 + code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */ 75 + code span.at { color: var(--syntax-attribute); } /* attribute */ 76 + code span.bn { color: var(--syntax-basen); } /* basen */ 77 + code span.bu { color: var(--syntax-builtin); } /* builtin */ 78 + code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */ 79 + code span.ch { color: var(--syntax-char); } /* char */ 80 + code span.cn { color: var(--syntax-constant); } /* constant */ 81 + code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */ 82 + code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */ 83 + code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */ 84 + code span.dt { color: var(--syntax-datatype); } /* datatype */ 85 + code span.dv { color: var(--syntax-decval); } /* decval */ 86 + code span.er { color: var(--syntax-error); font-weight: bold; } /* error */ 87 + code span.ex { color: var(--syntax-extension); } /* extension */ 88 + code span.fl { color: var(--syntax-float); } /* float */ 89 + code span.fu { color: var(--syntax-function); } /* function */ 90 + code span.im { color: var(--syntax-import); font-weight: bold; } /* import */ 91 + code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */ 92 + code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */ 93 + code span.op { color: var(--syntax-operator); } /* operator */ 94 + code span.ot { color: var(--syntax-other); } /* other */ 95 + code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */ 96 + code span.sc { color: var(--syntax-specialchar); } /* specialchar */ 97 + code span.ss { color: var(--syntax-specialstring); } /* specialstring */ 98 + code span.st { color: var(--syntax-string); } /* string */ 99 + code span.va { color: var(--syntax-variable); } /* variable */ 100 + code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */ 101 + code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
+142
docs/template.html
··· 1 + <!DOCTYPE html> 2 + <html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$> 3 + <head> 4 + <meta charset="utf-8" /> 5 + <meta name="generator" content="pandoc" /> 6 + <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" /> 7 + $for(author-meta)$ 8 + <meta name="author" content="$author-meta$" /> 9 + $endfor$ 10 + 11 + $if(date-meta)$ 12 + <meta name="dcterms.date" content="$date-meta$" /> 13 + $endif$ 14 + 15 + $if(keywords)$ 16 + <meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" /> 17 + $endif$ 18 + 19 + $if(description-meta)$ 20 + <meta name="description" content="$description-meta$" /> 21 + $endif$ 22 + 23 + <title>$pagetitle$</title> 24 + 25 + <style> 26 + $styles.css()$ 27 + </style> 28 + 29 + $for(css)$ 30 + <link rel="stylesheet" href="$css$" /> 31 + $endfor$ 32 + 33 + $for(header-includes)$ 34 + $header-includes$ 35 + $endfor$ 36 + 37 + <link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin /> 38 + 39 + </head> 40 + <body class="bg-white dark:bg-gray-900 min-h-screen flex flex-col min-h-screen"> 41 + $for(include-before)$ 42 + $include-before$ 43 + $endfor$ 44 + 45 + $if(toc)$ 46 + <!-- mobile TOC trigger --> 47 + <div class="md:hidden px-6 py-4 border-b border-gray-200 dark:border-gray-700"> 48 + <button 49 + type="button" 50 + popovertarget="mobile-toc-popover" 51 + popovertargetaction="toggle" 52 + class="w-full flex gap-2 items-center text-sm font-semibold dark:text-white" 53 + > 54 + ${ menu.svg() } 55 + $if(toc-title)$$toc-title$$else$Table of Contents$endif$ 56 + </button> 57 + </div> 58 + 59 + <div 60 + id="mobile-toc-popover" 61 + popover 62 + class="mobile-toc-popover 63 + bg-white dark:bg-gray-800 64 + border-b border-gray-200 dark:border-gray-700 65 + h-full overflow-y-auto 66 + px-6 py-4 fixed inset-x-0 top-0 w-fit max-w-4/5 m-0" 67 + > 68 + <button 69 + type="button" 70 + popovertarget="mobile-toc-popover" 71 + popovertargetaction="toggle" 72 + class="w-full flex gap-2 items-center text-sm font-semibold dark:text-white mb-4"> 73 + ${ x.svg() } 74 + $if(toc-title)$$toc-title$$else$Table of Contents$endif$ 75 + </button> 76 + ${ table-of-contents:toc.html() } 77 + </div> 78 + 79 + 80 + <!-- desktop sidebar toc --> 81 + <nav id="$idprefix$TOC" role="doc-toc" class="hidden md:block fixed left-0 top-0 w-80 h-screen bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 overflow-y-auto p-4 z-50"> 82 + $if(toc-title)$ 83 + <h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2> 84 + $endif$ 85 + ${ table-of-contents:toc.html() } 86 + </nav> 87 + $endif$ 88 + 89 + <div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col"> 90 + <main class="max-w-4xl w-full mx-auto p-6 flex-1"> 91 + $if(top)$ 92 + $-- only print title block if this is NOT the top page 93 + $else$ 94 + $if(title)$ 95 + <header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700"> 96 + <h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1> 97 + $if(subtitle)$ 98 + <p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p> 99 + $endif$ 100 + $for(author)$ 101 + <p class="text-sm text-gray-500 dark:text-gray-400">$author$</p> 102 + $endfor$ 103 + $if(date)$ 104 + <p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p> 105 + $endif$ 106 + $if(abstract)$ 107 + <div class="mt-6 p-4 bg-gray-50 rounded-lg"> 108 + <div class="text-sm font-semibold text-gray-700 uppercase mb-2">$abstract-title$</div> 109 + <div class="text-gray-700">$abstract$</div> 110 + </div> 111 + $endif$ 112 + $endif$ 113 + </header> 114 + $endif$ 115 + <article class="prose dark:prose-invert max-w-none"> 116 + $body$ 117 + </article> 118 + </main> 119 + <nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800 "> 120 + <div class="max-w-4xl mx-auto px-8 py-4"> 121 + <div class="flex justify-between gap-4"> 122 + <span class="flex-1"> 123 + $if(previous.url)$ 124 + <span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span> 125 + <a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a> 126 + $endif$ 127 + </span> 128 + <span class="flex-1 text-right"> 129 + $if(next.url)$ 130 + <span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span> 131 + <a href="$next.url$" accesskey="n" rel="next">$next.title$</a> 132 + $endif$ 133 + </span> 134 + </div> 135 + </div> 136 + </nav> 137 + </div> 138 + $for(include-after)$ 139 + $include-after$ 140 + $endfor$ 141 + </body> 142 + </html>
+4
docs/toc.html
··· 1 + <div class="[&_ul]:space-y-6 [&_ul]:pl-0 [&_ul]:font-bold [&_ul_ul]:pl-4 [&_ul_ul]:font-normal [&_ul_ul]:space-y-2 [&_li]:space-y-2"> 2 + $table-of-contents$ 3 + </div> 4 +
+26 -9
flake.lock
··· 1 1 { 2 2 "nodes": { 3 + "actor-typeahead-src": { 4 + "flake": false, 5 + "locked": { 6 + "lastModified": 1762835797, 7 + "narHash": "sha256-heizoWUKDdar6ymfZTnj3ytcEv/L4d4fzSmtr0HlXsQ=", 8 + "ref": "refs/heads/main", 9 + "rev": "677fe7f743050a4e7f09d4a6f87bbf1325a06f6b", 10 + "revCount": 6, 11 + "type": "git", 12 + "url": "https://tangled.org/@jakelazaroff.com/actor-typeahead" 13 + }, 14 + "original": { 15 + "type": "git", 16 + "url": "https://tangled.org/@jakelazaroff.com/actor-typeahead" 17 + } 18 + }, 3 19 "flake-compat": { 4 20 "flake": false, 5 21 "locked": { ··· 19 35 "systems": "systems" 20 36 }, 21 37 "locked": { 22 - "lastModified": 1694529238, 23 - "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=", 38 + "lastModified": 1731533236, 39 + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 24 40 "owner": "numtide", 25 41 "repo": "flake-utils", 26 - "rev": "ff7b65b44d01cf9ba6a71320833626af21126384", 42 + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 27 43 "type": "github" 28 44 }, 29 45 "original": { ··· 40 56 ] 41 57 }, 42 58 "locked": { 43 - "lastModified": 1754078208, 44 - "narHash": "sha256-YVoIFDCDpYuU3riaDEJ3xiGdPOtsx4sR5eTzHTytPV8=", 59 + "lastModified": 1763982521, 60 + "narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=", 45 61 "owner": "nix-community", 46 62 "repo": "gomod2nix", 47 - "rev": "7f963246a71626c7fc70b431a315c4388a0c95cf", 63 + "rev": "02e63a239d6eabd595db56852535992c898eba72", 48 64 "type": "github" 49 65 }, 50 66 "original": { ··· 134 150 }, 135 151 "nixpkgs": { 136 152 "locked": { 137 - "lastModified": 1751984180, 138 - "narHash": "sha256-LwWRsENAZJKUdD3SpLluwDmdXY9F45ZEgCb0X+xgOL0=", 153 + "lastModified": 1766070988, 154 + "narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=", 139 155 "owner": "nixos", 140 156 "repo": "nixpkgs", 141 - "rev": "9807714d6944a957c2e036f84b0ff8caf9930bc0", 157 + "rev": "c6245e83d836d0433170a16eb185cefe0572f8b8", 142 158 "type": "github" 143 159 }, 144 160 "original": { ··· 150 166 }, 151 167 "root": { 152 168 "inputs": { 169 + "actor-typeahead-src": "actor-typeahead-src", 153 170 "flake-compat": "flake-compat", 154 171 "gomod2nix": "gomod2nix", 155 172 "htmx-src": "htmx-src",
+18 -15
flake.nix
··· 33 33 url = "https://github.com/rsms/inter/releases/download/v4.1/Inter-4.1.zip"; 34 34 flake = false; 35 35 }; 36 + actor-typeahead-src = { 37 + url = "git+https://tangled.org/@jakelazaroff.com/actor-typeahead"; 38 + flake = false; 39 + }; 36 40 ibm-plex-mono-src = { 37 41 url = "https://github.com/IBM/plex/releases/download/%40ibm%2Fplex-mono%401.1.0/ibm-plex-mono.zip"; 38 42 flake = false; ··· 54 58 inter-fonts-src, 55 59 sqlite-lib-src, 56 60 ibm-plex-mono-src, 61 + actor-typeahead-src, 57 62 ... 58 63 }: let 59 64 supportedSystems = ["x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin"]; ··· 71 76 }; 72 77 buildGoApplication = 73 78 (self.callPackage "${gomod2nix}/builder" { 74 - gomod2nix = gomod2nix.legacyPackages.${pkgs.system}.gomod2nix; 79 + gomod2nix = gomod2nix.legacyPackages.${pkgs.stdenv.hostPlatform.system}.gomod2nix; 75 80 }).buildGoApplication; 76 81 modules = ./nix/gomod2nix.toml; 77 82 sqlite-lib = self.callPackage ./nix/pkgs/sqlite-lib.nix { 78 - inherit (pkgs) gcc; 79 83 inherit sqlite-lib-src; 80 84 }; 81 85 lexgen = self.callPackage ./nix/pkgs/lexgen.nix {inherit indigo;}; 82 86 goat = self.callPackage ./nix/pkgs/goat.nix {inherit indigo;}; 83 87 appview-static-files = self.callPackage ./nix/pkgs/appview-static-files.nix { 84 - inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src; 88 + inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src; 85 89 }; 86 90 appview = self.callPackage ./nix/pkgs/appview.nix {}; 91 + docs = self.callPackage ./nix/pkgs/docs.nix { 92 + inherit inter-fonts-src ibm-plex-mono-src lucide-src; 93 + }; 87 94 spindle = self.callPackage ./nix/pkgs/spindle.nix {}; 88 95 knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {}; 89 96 knot = self.callPackage ./nix/pkgs/knot.nix {}; 90 97 }); 91 98 in { 92 99 overlays.default = final: prev: { 93 - inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview; 100 + inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs; 94 101 }; 95 102 96 103 packages = forAllSystems (system: let ··· 99 106 staticPackages = mkPackageSet pkgs.pkgsStatic; 100 107 crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic; 101 108 in { 102 - inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib; 109 + inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs; 103 110 104 111 pkgsStatic-appview = staticPackages.appview; 105 112 pkgsStatic-knot = staticPackages.knot; ··· 151 158 nativeBuildInputs = [ 152 159 pkgs.go 153 160 pkgs.air 154 - pkgs.tilt 155 161 pkgs.gopls 156 162 pkgs.httpie 157 163 pkgs.litecli ··· 179 185 air-watcher = name: arg: 180 186 pkgs.writeShellScriptBin "run" 181 187 '' 182 - ${pkgs.air}/bin/air -c /dev/null \ 183 - -build.cmd "${pkgs.go}/bin/go build -o ./out/${name}.out ./cmd/${name}/main.go" \ 184 - -build.bin "./out/${name}.out" \ 185 - -build.args_bin "${arg}" \ 186 - -build.stop_on_error "true" \ 187 - -build.include_ext "go" 188 + export PATH=${pkgs.go}/bin:$PATH 189 + ${pkgs.air}/bin/air -c ./.air/${name}.toml \ 190 + -build.args_bin "${arg}" 188 191 ''; 189 192 tailwind-watcher = 190 193 pkgs.writeShellScriptBin "run" ··· 283 286 }: { 284 287 imports = [./nix/modules/appview.nix]; 285 288 286 - services.tangled.appview.package = lib.mkDefault self.packages.${pkgs.system}.appview; 289 + services.tangled.appview.package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.appview; 287 290 }; 288 291 nixosModules.knot = { 289 292 lib, ··· 292 295 }: { 293 296 imports = [./nix/modules/knot.nix]; 294 297 295 - services.tangled.knot.package = lib.mkDefault self.packages.${pkgs.system}.knot; 298 + services.tangled.knot.package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.knot; 296 299 }; 297 300 nixosModules.spindle = { 298 301 lib, ··· 301 304 }: { 302 305 imports = [./nix/modules/spindle.nix]; 303 306 304 - services.tangled.spindle.package = lib.mkDefault self.packages.${pkgs.system}.spindle; 307 + services.tangled.spindle.package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.spindle; 305 308 }; 306 309 }; 307 310 }
+7 -16
go.mod
··· 1 1 module tangled.org/core 2 2 3 - go 1.24.4 3 + go 1.25.0 4 4 5 5 require ( 6 6 github.com/Blank-Xu/sql-adapter v1.1.1 7 7 github.com/alecthomas/assert/v2 v2.11.0 8 8 github.com/alecthomas/chroma/v2 v2.15.0 9 9 github.com/avast/retry-go/v4 v4.6.1 10 + github.com/blevesearch/bleve/v2 v2.5.3 10 11 github.com/bluekeyes/go-gitdiff v0.8.1 11 12 github.com/bluesky-social/indigo v0.0.0-20251003000214-3259b215110e 12 13 github.com/bluesky-social/jetstream v0.0.0-20241210005130-ea96859b93d1 14 + github.com/bmatcuk/doublestar/v4 v4.9.1 13 15 github.com/carlmjohnson/versioninfo v0.22.5 14 16 github.com/casbin/casbin/v2 v2.103.0 17 + github.com/charmbracelet/log v0.4.2 15 18 github.com/cloudflare/cloudflare-go v0.115.0 16 19 github.com/cyphar/filepath-securejoin v0.4.1 17 20 github.com/dgraph-io/ristretto v0.2.0 ··· 29 32 github.com/hiddeco/sshsig v0.2.0 30 33 github.com/hpcloud/tail v1.0.0 31 34 github.com/ipfs/go-cid v0.5.0 32 - github.com/lestrrat-go/jwx/v2 v2.1.6 33 35 github.com/mattn/go-sqlite3 v1.14.24 34 36 github.com/microcosm-cc/bluemonday v1.0.27 35 37 github.com/openbao/openbao/api/v2 v2.3.0 ··· 42 44 github.com/stretchr/testify v1.10.0 43 45 github.com/urfave/cli/v3 v3.3.3 44 46 github.com/whyrusleeping/cbor-gen v0.3.1 45 - github.com/wyatt915/goldmark-treeblood v0.0.1 46 47 github.com/yuin/goldmark v1.7.13 48 + github.com/yuin/goldmark-emoji v1.0.6 47 49 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc 50 + gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab 48 51 golang.org/x/crypto v0.40.0 49 52 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b 50 53 golang.org/x/image v0.31.0 51 54 golang.org/x/net v0.42.0 52 - golang.org/x/sync v0.17.0 53 55 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da 54 56 gopkg.in/yaml.v3 v3.0.1 55 57 ) ··· 65 67 github.com/aymerick/douceur v0.2.0 // indirect 66 68 github.com/beorn7/perks v1.0.1 // indirect 67 69 github.com/bits-and-blooms/bitset v1.22.0 // indirect 68 - github.com/blevesearch/bleve/v2 v2.5.3 // indirect 69 70 github.com/blevesearch/bleve_index_api v1.2.8 // indirect 70 71 github.com/blevesearch/geo v0.2.4 // indirect 71 72 github.com/blevesearch/go-faiss v1.0.25 // indirect ··· 83 84 github.com/blevesearch/zapx/v14 v14.4.2 // indirect 84 85 github.com/blevesearch/zapx/v15 v15.4.2 // indirect 85 86 github.com/blevesearch/zapx/v16 v16.2.4 // indirect 86 - github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect 87 87 github.com/casbin/govaluate v1.3.0 // indirect 88 88 github.com/cenkalti/backoff/v4 v4.3.0 // indirect 89 89 github.com/cespare/xxhash/v2 v2.3.0 // indirect 90 90 github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect 91 91 github.com/charmbracelet/lipgloss v1.1.0 // indirect 92 - github.com/charmbracelet/log v0.4.2 // indirect 93 92 github.com/charmbracelet/x/ansi v0.8.0 // indirect 94 93 github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect 95 94 github.com/charmbracelet/x/term v0.2.1 // indirect ··· 98 97 github.com/containerd/errdefs/pkg v0.3.0 // indirect 99 98 github.com/containerd/log v0.1.0 // indirect 100 99 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 101 - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect 102 100 github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 103 101 github.com/distribution/reference v0.6.0 // indirect 104 102 github.com/dlclark/regexp2 v1.11.5 // indirect ··· 152 150 github.com/kevinburke/ssh_config v1.2.0 // indirect 153 151 github.com/klauspost/compress v1.18.0 // indirect 154 152 github.com/klauspost/cpuid/v2 v2.3.0 // indirect 155 - github.com/lestrrat-go/blackmagic v1.0.4 // indirect 156 - github.com/lestrrat-go/httpcc v1.0.1 // indirect 157 - github.com/lestrrat-go/httprc v1.0.6 // indirect 158 - github.com/lestrrat-go/iter v1.0.2 // indirect 159 - github.com/lestrrat-go/option v1.0.1 // indirect 160 153 github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 161 154 github.com/mattn/go-isatty v0.0.20 // indirect 162 155 github.com/mattn/go-runewidth v0.0.16 // indirect ··· 191 184 github.com/prometheus/procfs v0.16.1 // indirect 192 185 github.com/rivo/uniseg v0.4.7 // indirect 193 186 github.com/ryanuber/go-glob v1.0.0 // indirect 194 - github.com/segmentio/asm v1.2.0 // indirect 195 187 github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect 196 188 github.com/spaolacci/murmur3 v1.1.0 // indirect 197 189 github.com/vmihailenco/go-tinylfu v0.2.2 // indirect 198 190 github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect 199 191 github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect 200 - github.com/wyatt915/treeblood v0.1.16 // indirect 201 192 github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect 202 - gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab // indirect 203 193 gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect 204 194 gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect 205 195 go.etcd.io/bbolt v1.4.0 // indirect ··· 213 203 go.uber.org/atomic v1.11.0 // indirect 214 204 go.uber.org/multierr v1.11.0 // indirect 215 205 go.uber.org/zap v1.27.0 // indirect 206 + golang.org/x/sync v0.17.0 // indirect 216 207 golang.org/x/sys v0.34.0 // indirect 217 208 golang.org/x/text v0.29.0 // indirect 218 209 golang.org/x/time v0.12.0 // indirect
+2 -21
go.sum
··· 71 71 github.com/bluesky-social/jetstream v0.0.0-20241210005130-ea96859b93d1 h1:CFvRtYNSnWRAi/98M3O466t9dYuwtesNbu6FVPymRrA= 72 72 github.com/bluesky-social/jetstream v0.0.0-20241210005130-ea96859b93d1/go.mod h1:WiYEeyJSdUwqoaZ71KJSpTblemUCpwJfh5oVXplK6T4= 73 73 github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= 74 - github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= 75 74 github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= 76 75 github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= 77 76 github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= ··· 126 125 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 127 126 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 128 127 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 129 - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= 130 - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= 131 128 github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= 132 129 github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= 133 130 github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= ··· 330 327 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 331 328 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 332 329 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 333 - github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA= 334 - github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= 335 - github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= 336 - github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= 337 - github.com/lestrrat-go/httprc v1.0.6 h1:qgmgIRhpvBqexMJjA/PmwSvhNk679oqD1RbovdCGW8k= 338 - github.com/lestrrat-go/httprc v1.0.6/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= 339 - github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= 340 - github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= 341 - github.com/lestrrat-go/jwx/v2 v2.1.6 h1:hxM1gfDILk/l5ylers6BX/Eq1m/pnxe9NBwW6lVfecA= 342 - github.com/lestrrat-go/jwx/v2 v2.1.6/go.mod h1:Y722kU5r/8mV7fYDifjug0r8FK8mZdw0K0GpJw/l8pU= 343 - github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= 344 - github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= 345 330 github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= 346 331 github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= 347 332 github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= ··· 466 451 github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 467 452 github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= 468 453 github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= 469 - github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= 470 - github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= 471 454 github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= 472 455 github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= 473 456 github.com/sethvargo/go-envconfig v1.1.0 h1:cWZiJxeTm7AlCvzGXrEXaSTCNgip5oJepekh/BOQuog= ··· 512 495 github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= 513 496 github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0= 514 497 github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= 515 - github.com/wyatt915/goldmark-treeblood v0.0.1 h1:6vLJcjFrHgE4ASu2ga4hqIQmbvQLU37v53jlHZ3pqDs= 516 - github.com/wyatt915/goldmark-treeblood v0.0.1/go.mod h1:SmcJp5EBaV17rroNlgNQFydYwy0+fv85CUr/ZaCz208= 517 - github.com/wyatt915/treeblood v0.1.16 h1:byxNbWZhnPDxdTp7W5kQhCeaY8RBVmojTFz1tEHgg8Y= 518 - github.com/wyatt915/treeblood v0.1.16/go.mod h1:i7+yhhmzdDP17/97pIsOSffw74EK/xk+qJ0029cSXUY= 519 498 github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= 520 499 github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= 521 500 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= ··· 526 505 github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 527 506 github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= 528 507 github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= 508 + github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= 509 + github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= 529 510 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ= 530 511 github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I= 531 512 gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab h1:gK9tS6QJw5F0SIhYJnGG2P83kuabOdmWBbSmZhJkz2A=
+4 -4
hook/hook.go
··· 48 48 }, 49 49 Commands: []*cli.Command{ 50 50 { 51 - Name: "post-recieve", 52 - Usage: "sends a post-recieve hook to the knot (waits for stdin)", 53 - Action: postRecieve, 51 + Name: "post-receive", 52 + Usage: "sends a post-receive hook to the knot (waits for stdin)", 53 + Action: postReceive, 54 54 }, 55 55 }, 56 56 } 57 57 } 58 58 59 - func postRecieve(ctx context.Context, cmd *cli.Command) error { 59 + func postReceive(ctx context.Context, cmd *cli.Command) error { 60 60 gitDir := cmd.String("git-dir") 61 61 userDid := cmd.String("user-did") 62 62 userHandle := cmd.String("user-handle")
+1 -1
hook/setup.go
··· 138 138 option_var="GIT_PUSH_OPTION_$i" 139 139 push_options+=(-push-option "${!option_var}") 140 140 done 141 - %s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-recieve 141 + %s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-receive 142 142 `, executablePath, config.internalApi) 143 143 144 144 return os.WriteFile(hookPath, []byte(hookContent), 0755)
+39
input.css
··· 161 161 @apply no-underline; 162 162 } 163 163 164 + .prose a.mention { 165 + @apply no-underline hover:underline font-bold; 166 + } 167 + 164 168 .prose li { 165 169 @apply my-0 py-0; 166 170 } ··· 241 245 details[data-callout] > summary::-webkit-details-marker { 242 246 display: none; 243 247 } 248 + 244 249 } 245 250 @layer utilities { 246 251 .error { ··· 250 255 @apply py-1 text-gray-900 dark:text-gray-100; 251 256 } 252 257 } 258 + 253 259 } 254 260 255 261 /* Background */ ··· 924 930 text-decoration: underline; 925 931 } 926 932 } 933 + 934 + actor-typeahead { 935 + --color-background: #ffffff; 936 + --color-border: #d1d5db; 937 + --color-shadow: #000000; 938 + --color-hover: #f9fafb; 939 + --color-avatar-fallback: #e5e7eb; 940 + --radius: 0.0; 941 + --padding-menu: 0.0rem; 942 + z-index: 1000; 943 + } 944 + 945 + actor-typeahead::part(handle) { 946 + color: #111827; 947 + } 948 + 949 + actor-typeahead::part(menu) { 950 + box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1); 951 + } 952 + 953 + @media (prefers-color-scheme: dark) { 954 + actor-typeahead { 955 + --color-background: #1f2937; 956 + --color-border: #4b5563; 957 + --color-shadow: #000000; 958 + --color-hover: #374151; 959 + --color-avatar-fallback: #4b5563; 960 + } 961 + 962 + actor-typeahead::part(handle) { 963 + color: #f9fafb; 964 + } 965 + }
+15 -4
jetstream/jetstream.go
··· 72 72 // existing instances of the closure when j.WantedDids is mutated 73 73 return func(ctx context.Context, evt *models.Event) error { 74 74 75 + j.mu.RLock() 75 76 // empty filter => all dids allowed 76 - if len(j.wantedDids) == 0 { 77 - return processFunc(ctx, evt) 77 + matches := len(j.wantedDids) == 0 78 + if !matches { 79 + if _, ok := j.wantedDids[evt.Did]; ok { 80 + matches = true 81 + } 78 82 } 83 + j.mu.RUnlock() 79 84 80 - if _, ok := j.wantedDids[evt.Did]; ok { 85 + if matches { 81 86 return processFunc(ctx, evt) 82 87 } else { 83 88 return nil ··· 122 127 123 128 go func() { 124 129 if j.waitForDid { 125 - for len(j.wantedDids) == 0 { 130 + for { 131 + j.mu.RLock() 132 + hasDid := len(j.wantedDids) != 0 133 + j.mu.RUnlock() 134 + if hasDid { 135 + break 136 + } 126 137 time.Sleep(time.Second) 127 138 } 128 139 }
+81
knotserver/db/db.go
··· 1 + package db 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "log/slog" 7 + "strings" 8 + 9 + _ "github.com/mattn/go-sqlite3" 10 + "tangled.org/core/log" 11 + ) 12 + 13 + type DB struct { 14 + db *sql.DB 15 + logger *slog.Logger 16 + } 17 + 18 + func Setup(ctx context.Context, dbPath string) (*DB, error) { 19 + // https://github.com/mattn/go-sqlite3#connection-string 20 + opts := []string{ 21 + "_foreign_keys=1", 22 + "_journal_mode=WAL", 23 + "_synchronous=NORMAL", 24 + "_auto_vacuum=incremental", 25 + } 26 + 27 + logger := log.FromContext(ctx) 28 + logger = log.SubLogger(logger, "db") 29 + 30 + db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&")) 31 + if err != nil { 32 + return nil, err 33 + } 34 + 35 + conn, err := db.Conn(ctx) 36 + if err != nil { 37 + return nil, err 38 + } 39 + defer conn.Close() 40 + 41 + _, err = conn.ExecContext(ctx, ` 42 + create table if not exists known_dids ( 43 + did text primary key 44 + ); 45 + 46 + create table if not exists public_keys ( 47 + id integer primary key autoincrement, 48 + did text not null, 49 + key text not null, 50 + created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 51 + unique(did, key), 52 + foreign key (did) references known_dids(did) on delete cascade 53 + ); 54 + 55 + create table if not exists _jetstream ( 56 + id integer primary key autoincrement, 57 + last_time_us integer not null 58 + ); 59 + 60 + create table if not exists events ( 61 + rkey text not null, 62 + nsid text not null, 63 + event text not null, -- json 64 + created integer not null default (strftime('%s', 'now')), 65 + primary key (rkey, nsid) 66 + ); 67 + 68 + create table if not exists migrations ( 69 + id integer primary key autoincrement, 70 + name text unique 71 + ); 72 + `) 73 + if err != nil { 74 + return nil, err 75 + } 76 + 77 + return &DB{ 78 + db: db, 79 + logger: logger, 80 + }, nil 81 + }
-64
knotserver/db/init.go
··· 1 - package db 2 - 3 - import ( 4 - "database/sql" 5 - "strings" 6 - 7 - _ "github.com/mattn/go-sqlite3" 8 - ) 9 - 10 - type DB struct { 11 - db *sql.DB 12 - } 13 - 14 - func Setup(dbPath string) (*DB, error) { 15 - // https://github.com/mattn/go-sqlite3#connection-string 16 - opts := []string{ 17 - "_foreign_keys=1", 18 - "_journal_mode=WAL", 19 - "_synchronous=NORMAL", 20 - "_auto_vacuum=incremental", 21 - } 22 - 23 - db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&")) 24 - if err != nil { 25 - return nil, err 26 - } 27 - 28 - // NOTE: If any other migration is added here, you MUST 29 - // copy the pattern in appview: use a single sql.Conn 30 - // for every migration. 31 - 32 - _, err = db.Exec(` 33 - create table if not exists known_dids ( 34 - did text primary key 35 - ); 36 - 37 - create table if not exists public_keys ( 38 - id integer primary key autoincrement, 39 - did text not null, 40 - key text not null, 41 - created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')), 42 - unique(did, key), 43 - foreign key (did) references known_dids(did) on delete cascade 44 - ); 45 - 46 - create table if not exists _jetstream ( 47 - id integer primary key autoincrement, 48 - last_time_us integer not null 49 - ); 50 - 51 - create table if not exists events ( 52 - rkey text not null, 53 - nsid text not null, 54 - event text not null, -- json 55 - created integer not null default (strftime('%s', 'now')), 56 - primary key (rkey, nsid) 57 - ); 58 - `) 59 - if err != nil { 60 - return nil, err 61 - } 62 - 63 - return &DB{db: db}, nil 64 - }
+1 -17
knotserver/git/diff.go
··· 77 77 nd.Diff = append(nd.Diff, ndiff) 78 78 } 79 79 80 - nd.Stat.FilesChanged = len(diffs) 81 - nd.Commit.This = c.Hash.String() 82 - nd.Commit.PGPSignature = c.PGPSignature 83 - nd.Commit.Committer = c.Committer 84 - nd.Commit.Tree = c.TreeHash.String() 85 - 86 - if parent.Hash.IsZero() { 87 - nd.Commit.Parent = "" 88 - } else { 89 - nd.Commit.Parent = parent.Hash.String() 90 - } 91 - nd.Commit.Author = c.Author 92 - nd.Commit.Message = c.Message 93 - 94 - if v, ok := c.ExtraHeaders["change-id"]; ok { 95 - nd.Commit.ChangedId = string(v) 96 - } 80 + nd.Commit.FromGoGitCommit(c) 97 81 98 82 return &nd, nil 99 83 }
+38 -2
knotserver/git/fork.go
··· 3 3 import ( 4 4 "errors" 5 5 "fmt" 6 + "log/slog" 7 + "net/url" 6 8 "os/exec" 9 + "path/filepath" 7 10 8 11 "github.com/go-git/go-git/v5" 9 12 "github.com/go-git/go-git/v5/config" 13 + knotconfig "tangled.org/core/knotserver/config" 10 14 ) 11 15 12 - func Fork(repoPath, source string) error { 13 - cloneCmd := exec.Command("git", "clone", "--bare", source, repoPath) 16 + func Fork(repoPath, source string, cfg *knotconfig.Config) error { 17 + u, err := url.Parse(source) 18 + if err != nil { 19 + return fmt.Errorf("failed to parse source URL: %w", err) 20 + } 21 + 22 + if o := optimizeClone(u, cfg); o != nil { 23 + u = o 24 + } 25 + 26 + cloneCmd := exec.Command("git", "clone", "--bare", u.String(), repoPath) 14 27 if err := cloneCmd.Run(); err != nil { 15 28 return fmt.Errorf("failed to bare clone repository: %w", err) 16 29 } ··· 21 34 } 22 35 23 36 return nil 37 + } 38 + 39 + func optimizeClone(u *url.URL, cfg *knotconfig.Config) *url.URL { 40 + // only optimize if it's the same host 41 + if u.Host != cfg.Server.Hostname { 42 + return nil 43 + } 44 + 45 + local := filepath.Join(cfg.Repo.ScanPath, u.Path) 46 + 47 + // sanity check: is there a git repo there? 48 + if _, err := PlainOpen(local); err != nil { 49 + return nil 50 + } 51 + 52 + // create optimized file:// URL 53 + optimized := &url.URL{ 54 + Scheme: "file", 55 + Path: local, 56 + } 57 + 58 + slog.Debug("performing local clone", "url", optimized.String()) 59 + return optimized 24 60 } 25 61 26 62 func (g *GitRepo) Sync() error {
+60 -2
knotserver/git/git.go
··· 3 3 import ( 4 4 "archive/tar" 5 5 "bytes" 6 + "errors" 6 7 "fmt" 7 8 "io" 8 9 "io/fs" ··· 12 13 "time" 13 14 14 15 "github.com/go-git/go-git/v5" 16 + "github.com/go-git/go-git/v5/config" 15 17 "github.com/go-git/go-git/v5/plumbing" 16 18 "github.com/go-git/go-git/v5/plumbing/object" 17 19 ) 18 20 19 21 var ( 20 - ErrBinaryFile = fmt.Errorf("binary file") 21 - ErrNotBinaryFile = fmt.Errorf("not binary file") 22 + ErrBinaryFile = errors.New("binary file") 23 + ErrNotBinaryFile = errors.New("not binary file") 24 + ErrMissingGitModules = errors.New("no .gitmodules file found") 25 + ErrInvalidGitModules = errors.New("invalid .gitmodules file") 26 + ErrNotSubmodule = errors.New("path is not a submodule") 22 27 ) 23 28 24 29 type GitRepo struct { ··· 188 193 defer reader.Close() 189 194 190 195 return io.ReadAll(reader) 196 + } 197 + 198 + // read and parse .gitmodules 199 + func (g *GitRepo) Submodules() (*config.Modules, error) { 200 + c, err := g.r.CommitObject(g.h) 201 + if err != nil { 202 + return nil, fmt.Errorf("commit object: %w", err) 203 + } 204 + 205 + tree, err := c.Tree() 206 + if err != nil { 207 + return nil, fmt.Errorf("tree: %w", err) 208 + } 209 + 210 + // read .gitmodules file 211 + modulesEntry, err := tree.FindEntry(".gitmodules") 212 + if err != nil { 213 + return nil, fmt.Errorf("%w: %w", ErrMissingGitModules, err) 214 + } 215 + 216 + modulesFile, err := tree.TreeEntryFile(modulesEntry) 217 + if err != nil { 218 + return nil, fmt.Errorf("%w: failed to read file: %w", ErrInvalidGitModules, err) 219 + } 220 + 221 + content, err := modulesFile.Contents() 222 + if err != nil { 223 + return nil, fmt.Errorf("%w: failed to read contents: %w", ErrInvalidGitModules, err) 224 + } 225 + 226 + // parse .gitmodules 227 + modules := config.NewModules() 228 + if err = modules.Unmarshal([]byte(content)); err != nil { 229 + return nil, fmt.Errorf("%w: failed to parse: %w", ErrInvalidGitModules, err) 230 + } 231 + 232 + return modules, nil 233 + } 234 + 235 + func (g *GitRepo) Submodule(path string) (*config.Submodule, error) { 236 + modules, err := g.Submodules() 237 + if err != nil { 238 + return nil, err 239 + } 240 + 241 + for _, submodule := range modules.Submodules { 242 + if submodule.Path == path { 243 + return submodule, nil 244 + } 245 + } 246 + 247 + // path is not a submodule 248 + return nil, ErrNotSubmodule 191 249 } 192 250 193 251 func (g *GitRepo) Branch(name string) (*plumbing.Reference, error) {
+13 -1
knotserver/git/service/service.go
··· 95 95 return c.RunService(cmd) 96 96 } 97 97 98 + func (c *ServiceCommand) UploadArchive() error { 99 + cmd := exec.Command("git", []string{ 100 + "upload-archive", 101 + ".", 102 + }...) 103 + 104 + cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} 105 + cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_PROTOCOL=%s", c.GitProtocol)) 106 + cmd.Dir = c.Dir 107 + 108 + return c.RunService(cmd) 109 + } 110 + 98 111 func (c *ServiceCommand) UploadPack() error { 99 112 cmd := exec.Command("git", []string{ 100 - "-c", "uploadpack.allowFilter=true", 101 113 "upload-pack", 102 114 "--stateless-rpc", 103 115 ".",
+4 -13
knotserver/git/tree.go
··· 7 7 "path" 8 8 "time" 9 9 10 + "github.com/go-git/go-git/v5/plumbing/filemode" 10 11 "github.com/go-git/go-git/v5/plumbing/object" 11 12 "tangled.org/core/types" 12 13 ) ··· 53 54 } 54 55 55 56 for _, e := range subtree.Entries { 56 - mode, _ := e.Mode.ToOSFileMode() 57 57 sz, _ := subtree.Size(e.Name) 58 - 59 58 fpath := path.Join(parent, e.Name) 60 59 61 60 var lastCommit *types.LastCommitInfo ··· 69 68 70 69 nts = append(nts, types.NiceTree{ 71 70 Name: e.Name, 72 - Mode: mode.String(), 73 - IsFile: e.Mode.IsFile(), 71 + Mode: e.Mode.String(), 74 72 Size: sz, 75 73 LastCommit: lastCommit, 76 74 }) ··· 126 124 default: 127 125 } 128 126 129 - mode, err := e.Mode.ToOSFileMode() 130 - if err != nil { 131 - // TODO: log this 132 - continue 133 - } 134 - 135 127 if e.Mode.IsFile() { 136 - err = cb(e, currentTree, root) 137 - if errors.Is(err, TerminateWalk) { 128 + if err := cb(e, currentTree, root); errors.Is(err, TerminateWalk) { 138 129 return err 139 130 } 140 131 } 141 132 142 133 // e is a directory 143 - if mode.IsDir() { 134 + if e.Mode == filemode.Dir { 144 135 subtree, err := currentTree.Tree(e.Name) 145 136 if err != nil { 146 137 return fmt.Errorf("sub tree %s: %w", e.Name, err)
+47
knotserver/git.go
··· 56 56 } 57 57 } 58 58 59 + func (h *Knot) UploadArchive(w http.ResponseWriter, r *http.Request) { 60 + did := chi.URLParam(r, "did") 61 + name := chi.URLParam(r, "name") 62 + repo, err := securejoin.SecureJoin(h.c.Repo.ScanPath, filepath.Join(did, name)) 63 + if err != nil { 64 + gitError(w, err.Error(), http.StatusInternalServerError) 65 + h.l.Error("git: failed to secure join repo path", "handler", "UploadPack", "error", err) 66 + return 67 + } 68 + 69 + const expectedContentType = "application/x-git-upload-archive-request" 70 + contentType := r.Header.Get("Content-Type") 71 + if contentType != expectedContentType { 72 + gitError(w, fmt.Sprintf("Expected Content-Type: '%s', but received '%s'.", expectedContentType, contentType), http.StatusUnsupportedMediaType) 73 + } 74 + 75 + var bodyReader io.ReadCloser = r.Body 76 + if r.Header.Get("Content-Encoding") == "gzip" { 77 + gzipReader, err := gzip.NewReader(r.Body) 78 + if err != nil { 79 + gitError(w, err.Error(), http.StatusInternalServerError) 80 + h.l.Error("git: failed to create gzip reader", "handler", "UploadArchive", "error", err) 81 + return 82 + } 83 + defer gzipReader.Close() 84 + bodyReader = gzipReader 85 + } 86 + 87 + w.Header().Set("Content-Type", "application/x-git-upload-archive-result") 88 + 89 + h.l.Info("git: executing git-upload-archive", "handler", "UploadArchive", "repo", repo) 90 + 91 + cmd := service.ServiceCommand{ 92 + GitProtocol: r.Header.Get("Git-Protocol"), 93 + Dir: repo, 94 + Stdout: w, 95 + Stdin: bodyReader, 96 + } 97 + 98 + w.WriteHeader(http.StatusOK) 99 + 100 + if err := cmd.UploadArchive(); err != nil { 101 + h.l.Error("git: failed to execute git-upload-pack", "handler", "UploadPack", "error", err) 102 + return 103 + } 104 + } 105 + 59 106 func (h *Knot) UploadPack(w http.ResponseWriter, r *http.Request) { 60 107 did := chi.URLParam(r, "did") 61 108 name := chi.URLParam(r, "name")
+1 -1
knotserver/ingester.go
··· 161 161 162 162 var pipeline workflow.RawPipeline 163 163 for _, e := range workflowDir { 164 - if !e.IsFile { 164 + if !e.IsFile() { 165 165 continue 166 166 } 167 167
+1 -1
knotserver/internal.go
··· 277 277 278 278 var pipeline workflow.RawPipeline 279 279 for _, e := range workflowDir { 280 - if !e.IsFile { 280 + if !e.IsFile() { 281 281 continue 282 282 } 283 283
+1
knotserver/router.go
··· 82 82 r.Route("/{name}", func(r chi.Router) { 83 83 // routes for git operations 84 84 r.Get("/info/refs", h.InfoRefs) 85 + r.Post("/git-upload-archive", h.UploadArchive) 85 86 r.Post("/git-upload-pack", h.UploadPack) 86 87 r.Post("/git-receive-pack", h.ReceivePack) 87 88 })
+1 -1
knotserver/server.go
··· 64 64 logger.Info("running in dev mode, signature verification is disabled") 65 65 } 66 66 67 - db, err := db.Setup(c.Server.DBPath) 67 + db, err := db.Setup(ctx, c.Server.DBPath) 68 68 if err != nil { 69 69 return fmt.Errorf("failed to load db: %w", err) 70 70 }
+1 -1
knotserver/xrpc/create_repo.go
··· 84 84 repoPath, _ := securejoin.SecureJoin(h.Config.Repo.ScanPath, relativeRepoPath) 85 85 86 86 if data.Source != nil && *data.Source != "" { 87 - err = git.Fork(repoPath, *data.Source) 87 + err = git.Fork(repoPath, *data.Source, h.Config) 88 88 if err != nil { 89 89 l.Error("forking repo", "error", err.Error()) 90 90 writeError(w, xrpcerr.GenericError(err), http.StatusInternalServerError)
+21 -2
knotserver/xrpc/repo_blob.go
··· 42 42 return 43 43 } 44 44 45 + // first check if this path is a submodule 46 + submodule, err := gr.Submodule(treePath) 47 + if err != nil { 48 + // this is okay, continue and try to treat it as a regular file 49 + } else { 50 + response := tangled.RepoBlob_Output{ 51 + Ref: ref, 52 + Path: treePath, 53 + Submodule: &tangled.RepoBlob_Submodule{ 54 + Name: submodule.Name, 55 + Url: submodule.URL, 56 + Branch: &submodule.Branch, 57 + }, 58 + } 59 + writeJson(w, response) 60 + return 61 + } 62 + 45 63 contents, err := gr.RawContent(treePath) 46 64 if err != nil { 47 65 x.Logger.Error("file content", "error", err.Error(), "treePath", treePath) ··· 101 119 var encoding string 102 120 103 121 isBinary := !isTextual(mimeType) 122 + size := int64(len(contents)) 104 123 105 124 if isBinary { 106 125 content = base64.StdEncoding.EncodeToString(contents) ··· 113 132 response := tangled.RepoBlob_Output{ 114 133 Ref: ref, 115 134 Path: treePath, 116 - Content: content, 135 + Content: &content, 117 136 Encoding: &encoding, 118 - Size: &[]int64{int64(len(contents))}[0], 137 + Size: &size, 119 138 IsBinary: &isBinary, 120 139 } 121 140
+6 -1
knotserver/xrpc/repo_log.go
··· 62 62 return 63 63 } 64 64 65 + tcommits := make([]types.Commit, len(commits)) 66 + for i, c := range commits { 67 + tcommits[i].FromGoGitCommit(c) 68 + } 69 + 65 70 // Create response using existing types.RepoLogResponse 66 71 response := types.RepoLogResponse{ 67 - Commits: commits, 72 + Commits: tcommits, 68 73 Ref: ref, 69 74 Page: (offset / limit) + 1, 70 75 PerPage: limit,
+3 -5
knotserver/xrpc/repo_tree.go
··· 67 67 treeEntries := make([]*tangled.RepoTree_TreeEntry, len(files)) 68 68 for i, file := range files { 69 69 entry := &tangled.RepoTree_TreeEntry{ 70 - Name: file.Name, 71 - Mode: file.Mode, 72 - Size: file.Size, 73 - Is_file: file.IsFile, 74 - Is_subtree: file.IsSubtree, 70 + Name: file.Name, 71 + Mode: file.Mode, 72 + Size: file.Size, 75 73 } 76 74 77 75 if file.LastCommit != nil {
+14
lexicons/issue/comment.json
··· 29 29 "replyTo": { 30 30 "type": "string", 31 31 "format": "at-uri" 32 + }, 33 + "mentions": { 34 + "type": "array", 35 + "items": { 36 + "type": "string", 37 + "format": "did" 38 + } 39 + }, 40 + "references": { 41 + "type": "array", 42 + "items": { 43 + "type": "string", 44 + "format": "at-uri" 45 + } 32 46 } 33 47 } 34 48 }
+14
lexicons/issue/issue.json
··· 24 24 "createdAt": { 25 25 "type": "string", 26 26 "format": "datetime" 27 + }, 28 + "mentions": { 29 + "type": "array", 30 + "items": { 31 + "type": "string", 32 + "format": "did" 33 + } 34 + }, 35 + "references": { 36 + "type": "array", 37 + "items": { 38 + "type": "string", 39 + "format": "at-uri" 40 + } 27 41 } 28 42 } 29 43 }
+14
lexicons/pulls/comment.json
··· 25 25 "createdAt": { 26 26 "type": "string", 27 27 "format": "datetime" 28 + }, 29 + "mentions": { 30 + "type": "array", 31 + "items": { 32 + "type": "string", 33 + "format": "did" 34 + } 35 + }, 36 + "references": { 37 + "type": "array", 38 + "items": { 39 + "type": "string", 40 + "format": "at-uri" 41 + } 28 42 } 29 43 } 30 44 }
+22 -2
lexicons/pulls/pull.json
··· 12 12 "required": [ 13 13 "target", 14 14 "title", 15 - "patch", 15 + "patchBlob", 16 16 "createdAt" 17 17 ], 18 18 "properties": { ··· 27 27 "type": "string" 28 28 }, 29 29 "patch": { 30 - "type": "string" 30 + "type": "string", 31 + "description": "(deprecated) use patchBlob instead" 32 + }, 33 + "patchBlob": { 34 + "type": "blob", 35 + "accept": "text/x-patch", 36 + "description": "patch content" 31 37 }, 32 38 "source": { 33 39 "type": "ref", ··· 36 42 "createdAt": { 37 43 "type": "string", 38 44 "format": "datetime" 45 + }, 46 + "mentions": { 47 + "type": "array", 48 + "items": { 49 + "type": "string", 50 + "format": "did" 51 + } 52 + }, 53 + "references": { 54 + "type": "array", 55 + "items": { 56 + "type": "string", 57 + "format": "at-uri" 58 + } 39 59 } 40 60 } 41 61 }
+49 -5
lexicons/repo/blob.json
··· 6 6 "type": "query", 7 7 "parameters": { 8 8 "type": "params", 9 - "required": ["repo", "ref", "path"], 9 + "required": [ 10 + "repo", 11 + "ref", 12 + "path" 13 + ], 10 14 "properties": { 11 15 "repo": { 12 16 "type": "string", ··· 31 35 "encoding": "application/json", 32 36 "schema": { 33 37 "type": "object", 34 - "required": ["ref", "path", "content"], 38 + "required": [ 39 + "ref", 40 + "path" 41 + ], 35 42 "properties": { 36 43 "ref": { 37 44 "type": "string", ··· 48 55 "encoding": { 49 56 "type": "string", 50 57 "description": "Content encoding", 51 - "enum": ["utf-8", "base64"] 58 + "enum": [ 59 + "utf-8", 60 + "base64" 61 + ] 52 62 }, 53 63 "size": { 54 64 "type": "integer", ··· 61 71 "mimeType": { 62 72 "type": "string", 63 73 "description": "MIME type of the file" 74 + }, 75 + "submodule": { 76 + "type": "ref", 77 + "ref": "#submodule", 78 + "description": "Submodule information if path is a submodule" 64 79 }, 65 80 "lastCommit": { 66 81 "type": "ref", ··· 90 105 }, 91 106 "lastCommit": { 92 107 "type": "object", 93 - "required": ["hash", "message", "when"], 108 + "required": [ 109 + "hash", 110 + "message", 111 + "when" 112 + ], 94 113 "properties": { 95 114 "hash": { 96 115 "type": "string", ··· 117 136 }, 118 137 "signature": { 119 138 "type": "object", 120 - "required": ["name", "email", "when"], 139 + "required": [ 140 + "name", 141 + "email", 142 + "when" 143 + ], 121 144 "properties": { 122 145 "name": { 123 146 "type": "string", ··· 131 154 "type": "string", 132 155 "format": "datetime", 133 156 "description": "Author timestamp" 157 + } 158 + } 159 + }, 160 + "submodule": { 161 + "type": "object", 162 + "required": [ 163 + "name", 164 + "url" 165 + ], 166 + "properties": { 167 + "name": { 168 + "type": "string", 169 + "description": "Submodule name" 170 + }, 171 + "url": { 172 + "type": "string", 173 + "description": "Submodule repository URL" 174 + }, 175 + "branch": { 176 + "type": "string", 177 + "description": "Branch to track in the submodule" 134 178 } 135 179 } 136 180 }
+1 -9
lexicons/repo/tree.json
··· 91 91 }, 92 92 "treeEntry": { 93 93 "type": "object", 94 - "required": ["name", "mode", "size", "is_file", "is_subtree"], 94 + "required": ["name", "mode", "size"], 95 95 "properties": { 96 96 "name": { 97 97 "type": "string", ··· 104 104 "size": { 105 105 "type": "integer", 106 106 "description": "File size in bytes" 107 - }, 108 - "is_file": { 109 - "type": "boolean", 110 - "description": "Whether this entry is a file" 111 - }, 112 - "is_subtree": { 113 - "type": "boolean", 114 - "description": "Whether this entry is a directory/subtree" 115 107 }, 116 108 "last_commit": { 117 109 "type": "ref",
-54
local-infra/Caddyfile
··· 1 - { 2 - storage file_system /data/ 3 - debug 4 - pki { 5 - ca localtangled { 6 - name "LocalTangledCA" 7 - } 8 - } 9 - auto_https disable_redirects 10 - } 11 - 12 - plc.tngl.boltless.dev { 13 - tls { 14 - issuer internal { 15 - ca localtangled 16 - } 17 - } 18 - reverse_proxy http://plc:8080 19 - } 20 - 21 - *.pds.tngl.boltless.dev, pds.tngl.boltless.dev { 22 - tls { 23 - issuer internal { 24 - ca localtangled 25 - } 26 - } 27 - reverse_proxy http://pds:3000 28 - } 29 - 30 - jetstream.tngl.boltless.dev { 31 - tls { 32 - issuer internal { 33 - ca localtangled 34 - } 35 - } 36 - reverse_proxy http://jetstream:6008 37 - } 38 - 39 - http://knot.tngl.boltless.dev { 40 - reverse_proxy http://host.docker.internal:6000 41 - } 42 - 43 - https://knot.tngl.boltless.dev { 44 - tls { 45 - issuer internal { 46 - ca localtangled 47 - } 48 - } 49 - reverse_proxy http://host.docker.internal:6000 50 - } 51 - 52 - http://spindle.tngl.boltless.dev { 53 - reverse_proxy http://host.docker.internal:6555 54 - }
-12
local-infra/cert/localtangled/intermediate.crt
··· 1 - -----BEGIN CERTIFICATE----- 2 - MIIBuTCCAV+gAwIBAgIQR5mkZ/TBSWtRFqrMyeVrNDAKBggqhkjOPQQDAjApMScw 3 - JQYDVQQDEx5Mb2NhbFRhbmdsZWRDQSAtIDIwMjUgRUNDIFJvb3QwHhcNMjUxMTAz 4 - MTAyMDA0WhcNMjUxMTEwMTAyMDA0WjAsMSowKAYDVQQDEyFMb2NhbFRhbmdsZWRD 5 - QSAtIEVDQyBJbnRlcm1lZGlhdGUwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARm 6 - 892T608pFY+dmgkEMFdvq9hj+PlR7o7Vogc+Ca5LeHB846PrZJmxdvHW8Up67hP3 7 - ZpmNjnZQvgOEEjLmquvio2YwZDAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgw 8 - BgEB/wIBADAdBgNVHQ4EFgQUn8d4TdPCYP0r1Jc09QF4/GKkSSowHwYDVR0jBBgw 9 - FoAUKSXx08/YgAxM+u7pYQcs/WHJIRAwCgYIKoZIzj0EAwIDSAAwRQIgTZeKVo6k 10 - ZBZwx2sx+T46LyjYc5xK/DCQJbLWsgoc/lECIQDNtduyds5J/BfBvnVzO/oK9+0H 11 - oRvV+fcWRAQHGKF4Ew== 12 - -----END CERTIFICATE-----
-5
local-infra/cert/localtangled/intermediate.key
··· 1 - -----BEGIN EC PRIVATE KEY----- 2 - MHcCAQEEIOoepsyQeMkbA05rTh3EwvqHWs5tzTTib7r8fyt2fUo8oAoGCCqGSM49 3 - AwEHoUQDQgAEZvPdk+tPKRWPnZoJBDBXb6vYY/j5Ue6O1aIHPgmuS3hwfOOj62SZ 4 - sXbx1vFKeu4T92aZjY52UL4DhBIy5qrr4g== 5 - -----END EC PRIVATE KEY-----
-11
local-infra/cert/localtangled/root.crt
··· 1 - -----BEGIN CERTIFICATE----- 2 - MIIBlTCCATygAwIBAgIRAMDTcwNxYDMgtUNC5LkCeEQwCgYIKoZIzj0EAwIwKTEn 3 - MCUGA1UEAxMeTG9jYWxUYW5nbGVkQ0EgLSAyMDI1IEVDQyBSb290MB4XDTI1MTAx 4 - NzE2MTE0NVoXDTM1MDgyNjE2MTE0NVowKTEnMCUGA1UEAxMeTG9jYWxUYW5nbGVk 5 - Q0EgLSAyMDI1IEVDQyBSb290MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE7rFM 6 - 4oNfT0UMqMuc3L60TCLeTd58WFSUYnKl7R1HOHDWeWZhhoNdWguXJSHhFPiWmQ5E 7 - +fiI7KvDAVQGHzfUAqNFMEMwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYB 8 - Af8CAQEwHQYDVR0OBBYEFCkl8dPP2IAMTPru6WEHLP1hySEQMAoGCCqGSM49BAMC 9 - A0cAMEQCIFjSGjvie1gO/JuNtP2HqeUHQNEh82K1fXdks54up3KEAiBWQDaOYeZ2 10 - zVTiKe8ZQHpH3glXsIS0USsxeKaohMp0zA== 11 - -----END CERTIFICATE-----
-5
local-infra/cert/localtangled/root.key
··· 1 - -----BEGIN EC PRIVATE KEY----- 2 - MHcCAQEEIBqEj1iG3q+OLBgHjWQ3UkvKjq4sy5ej47syIYWn/Ql/oAoGCCqGSM49 3 - AwEHoUQDQgAE7rFM4oNfT0UMqMuc3L60TCLeTd58WFSUYnKl7R1HOHDWeWZhhoNd 4 - WguXJSHhFPiWmQ5E+fiI7KvDAVQGHzfUAg== 5 - -----END EC PRIVATE KEY-----
-75
local-infra/docker-compose.yml
··· 1 - name: tangled-local-infra 2 - services: 3 - caddy: 4 - container_name: caddy 5 - image: caddy:2 6 - depends_on: 7 - - pds 8 - restart: unless-stopped 9 - cap_add: 10 - - NET_ADMIN 11 - ports: 12 - - "80:80" 13 - - "443:443" 14 - - "443:443/udp" 15 - volumes: 16 - - ./Caddyfile:/etc/caddy/Caddyfile 17 - - ./cert/localtangled:/data/pki/authorities/localtangled 18 - - caddy_data:/data 19 - - caddy_config:/config 20 - 21 - plc: 22 - image: ghcr.io/bluesky-social/did-method-plc:plc-f2ab7516bac5bc0f3f86842fa94e996bd1b3815b 23 - # did-method-plc only provides linux/amd64 24 - platform: linux/amd64 25 - container_name: plc 26 - restart: unless-stopped 27 - depends_on: 28 - - plc_db 29 - environment: 30 - DEBUG_MODE: 1 31 - LOG_ENABLED: "true" 32 - LOG_LEVEL: "debug" 33 - LOG_DESTINATION: 1 34 - DB_CREDS_JSON: &DB_CREDS_JSON '{"username":"pg","password":"password","host":"plc_db","port":5432}' 35 - DB_MIGRATE_CREDS_JSON: *DB_CREDS_JSON 36 - PLC_VERSION: 0.0.1 37 - PORT: 8080 38 - 39 - plc_db: 40 - image: postgres:14.4-alpine 41 - container_name: plc_db 42 - environment: 43 - - POSTGRES_USER=pg 44 - - POSTGRES_PASSWORD=password 45 - - PGPORT=5432 46 - volumes: 47 - - plc:/var/lib/postgresql/data 48 - 49 - pds: 50 - container_name: pds 51 - image: ghcr.io/bluesky-social/pds:0.4 52 - restart: unless-stopped 53 - volumes: 54 - - pds:/pds 55 - env_file: 56 - - ./pds.env 57 - 58 - jetstream: 59 - container_name: jetstream 60 - image: ghcr.io/bluesky-social/jetstream:sha-0ab10bd 61 - restart: unless-stopped 62 - volumes: 63 - - jetstream:/data 64 - environment: 65 - - JETSTREAM_DATA_DIR=/data 66 - # livness check interval to restart when no events are received (default: 15sec) 67 - - JETSTREAM_LIVENESS_TTL=300s 68 - - JETSTREAM_WS_URL=ws://pds:3000/xrpc/com.atproto.sync.subscribeRepos 69 - 70 - volumes: 71 - caddy_config: 72 - caddy_data: 73 - plc: 74 - pds: 75 - jetstream:
-17
local-infra/pds.env
··· 1 - PDS_JWT_SECRET=8cae8bffcc73d9932819650791e4e89a 2 - PDS_ADMIN_PASSWORD=d6a902588cd93bee1af83f924f60cfd3 3 - PDS_PLC_ROTATION_KEY_K256_PRIVATE_KEY_HEX=2e92e336a50a618458e1097d94a1db86ec3fd8829d7735020cbae80625c761d7 4 - 5 - LOG_ENABLED=true 6 - 7 - # PDS_BSKY_APP_VIEW_DID=did:web:api.bsky.app 8 - # PDS_BSKY_APP_VIEW_URL=https://api.bsky.app 9 - 10 - PDS_DATA_DIRECTORY=/pds 11 - PDS_BLOBSTORE_DISK_LOCATION=/pds/blocks 12 - 13 - PDS_DID_PLC_URL=http://plc:8080 14 - PDS_HOSTNAME=pds.tngl.boltless.dev 15 - 16 - # PDS_REPORT_SERVICE_DID=did:plc:ar7c4by46qjdydhdevvrndac 17 - # PDS_REPORT_SERVICE_URL=https://mod.bsky.app
-9
local-infra/readme.md
··· 1 - run compose 2 - ``` 3 - docker compose up -d 4 - ``` 5 - 6 - trust the cert (macOS) 7 - ``` 8 - sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain ./local-infra/cert/localtangled/root.crt 9 - ```
-63
local-infra/scripts/create-test-account.sh
··· 1 - #!/bin/bash 2 - set -o errexit 3 - set -o nounset 4 - set -o pipefail 5 - 6 - source "$(dirname "$0")/../pds.env" 7 - 8 - # curl a URL and fail if the request fails. 9 - function curl_cmd_get { 10 - curl --fail --silent --show-error "$@" 11 - } 12 - 13 - # curl a URL and fail if the request fails. 14 - function curl_cmd_post { 15 - curl --fail --silent --show-error --request POST --header "Content-Type: application/json" "$@" 16 - } 17 - 18 - # curl a URL but do not fail if the request fails. 19 - function curl_cmd_post_nofail { 20 - curl --silent --show-error --request POST --header "Content-Type: application/json" "$@" 21 - } 22 - 23 - USERNAME="${1:-}" 24 - 25 - if [[ "${USERNAME}" == "" ]]; then 26 - read -p "Enter a username: " USERNAME 27 - fi 28 - 29 - if [[ "${USERNAME}" == "" ]]; then 30 - echo "ERROR: missing USERNAME parameter." >/dev/stderr 31 - echo "Usage: $0 ${SUBCOMMAND} <USERNAME>" >/dev/stderr 32 - exit 1 33 - fi 34 - 35 - PASSWORD="password" 36 - INVITE_CODE="$(curl_cmd_post \ 37 - --user "admin:${PDS_ADMIN_PASSWORD}" \ 38 - --data '{"useCount": 1}' \ 39 - "https://${PDS_HOSTNAME}/xrpc/com.atproto.server.createInviteCode" | jq --raw-output '.code' 40 - )" 41 - RESULT="$(curl_cmd_post_nofail \ 42 - --data "{\"email\":\"${USERNAME}@${PDS_HOSTNAME}\", \"handle\":\"${USERNAME}.${PDS_HOSTNAME}\", \"password\":\"${PASSWORD}\", \"inviteCode\":\"${INVITE_CODE}\"}" \ 43 - "https://${PDS_HOSTNAME}/xrpc/com.atproto.server.createAccount" 44 - )" 45 - 46 - DID="$(echo $RESULT | jq --raw-output '.did')" 47 - if [[ "${DID}" != did:* ]]; then 48 - ERR="$(echo ${RESULT} | jq --raw-output '.message')" 49 - echo "ERROR: ${ERR}" >/dev/stderr 50 - echo "Usage: $0 <EMAIL> <HANDLE>" >/dev/stderr 51 - exit 1 52 - fi 53 - 54 - echo 55 - echo "Account created successfully!" 56 - echo "-----------------------------" 57 - echo "Handle : ${USERNAME}.${PDS_HOSTNAME}" 58 - echo "DID : ${DID}" 59 - echo "Password : ${PASSWORD}" 60 - echo "-----------------------------" 61 - echo "This is a test account with an insecure password." 62 - echo "Make sure it's only used for development." 63 - echo
+3 -30
nix/gomod2nix.toml
··· 165 165 [mod."github.com/davecgh/go-spew"] 166 166 version = "v1.1.2-0.20180830191138-d8f796af33cc" 167 167 hash = "sha256-fV9oI51xjHdOmEx6+dlq7Ku2Ag+m/bmbzPo6A4Y74qc=" 168 - [mod."github.com/decred/dcrd/dcrec/secp256k1/v4"] 169 - version = "v4.4.0" 170 - hash = "sha256-qrhEIwhDll3cxoVpMbm1NQ9/HTI42S7ms8Buzlo5HCg=" 171 168 [mod."github.com/dgraph-io/ristretto"] 172 169 version = "v0.2.0" 173 170 hash = "sha256-bnpxX+oO/Qf7IJevA0gsbloVoqRx+5bh7RQ9d9eLNYw=" ··· 373 370 [mod."github.com/klauspost/cpuid/v2"] 374 371 version = "v2.3.0" 375 372 hash = "sha256-50JhbQyT67BK38HIdJihPtjV7orYp96HknI2VP7A9Yc=" 376 - [mod."github.com/lestrrat-go/blackmagic"] 377 - version = "v1.0.4" 378 - hash = "sha256-HmWOpwoPDNMwLdOi7onNn3Sb+ZsAa3Ai3gVBbXmQ0e8=" 379 - [mod."github.com/lestrrat-go/httpcc"] 380 - version = "v1.0.1" 381 - hash = "sha256-SMRSwJpqDIs/xL0l2e8vP0W65qtCHX2wigcOeqPJmos=" 382 - [mod."github.com/lestrrat-go/httprc"] 383 - version = "v1.0.6" 384 - hash = "sha256-mfZzePEhrmyyu/avEBd2MsDXyto8dq5+fyu5lA8GUWM=" 385 - [mod."github.com/lestrrat-go/iter"] 386 - version = "v1.0.2" 387 - hash = "sha256-30tErRf7Qu/NOAt1YURXY/XJSA6sCr6hYQfO8QqHrtw=" 388 - [mod."github.com/lestrrat-go/jwx/v2"] 389 - version = "v2.1.6" 390 - hash = "sha256-0LszXRZIba+X8AOrs3T4uanAUafBdlVB8/MpUNEFpbc=" 391 - [mod."github.com/lestrrat-go/option"] 392 - version = "v1.0.1" 393 - hash = "sha256-jVcIYYVsxElIS/l2akEw32vdEPR8+anR6oeT1FoYULI=" 394 373 [mod."github.com/lucasb-eyer/go-colorful"] 395 374 version = "v1.2.0" 396 375 hash = "sha256-Gg9dDJFCTaHrKHRR1SrJgZ8fWieJkybljybkI9x0gyE=" ··· 511 490 [mod."github.com/ryanuber/go-glob"] 512 491 version = "v1.0.0" 513 492 hash = "sha256-YkMl1utwUhi3E0sHK23ISpAsPyj4+KeXyXKoFYGXGVY=" 514 - [mod."github.com/segmentio/asm"] 515 - version = "v1.2.0" 516 - hash = "sha256-zbNuKxNrUDUc6IlmRQNuJQzVe5Ol/mqp7srDg9IMMqs=" 517 493 [mod."github.com/sergi/go-diff"] 518 494 version = "v1.1.0" 519 495 hash = "sha256-8NJMabldpf40uwQN20T6QXx5KORDibCBJL02KD661xY=" ··· 548 524 [mod."github.com/whyrusleeping/cbor-gen"] 549 525 version = "v0.3.1" 550 526 hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc=" 551 - [mod."github.com/wyatt915/goldmark-treeblood"] 552 - version = "v0.0.1" 553 - hash = "sha256-hAVFaktO02MiiqZFffr8ZlvFEfwxw4Y84OZ2t7e5G7g=" 554 - [mod."github.com/wyatt915/treeblood"] 555 - version = "v0.1.16" 556 - hash = "sha256-T68sa+iVx0qY7dDjXEAJvRWQEGXYIpUsf9tcWwO1tIw=" 557 527 [mod."github.com/xo/terminfo"] 558 528 version = "v0.0.0-20220910002029-abceb7e1c41e" 559 529 hash = "sha256-GyCDxxMQhXA3Pi/TsWXpA8cX5akEoZV7CFx4RO3rARU=" 560 530 [mod."github.com/yuin/goldmark"] 561 531 version = "v1.7.13" 562 532 hash = "sha256-vBCxZrPYPc8x/nvAAv3Au59dCCyfS80Vw3/a9EXK7TE=" 533 + [mod."github.com/yuin/goldmark-emoji"] 534 + version = "v1.0.6" 535 + hash = "sha256-+d6bZzOPE+JSFsZbQNZMCWE+n3jgcQnkPETVk47mxSY=" 563 536 [mod."github.com/yuin/goldmark-highlighting/v2"] 564 537 version = "v2.0.0-20230729083705-37449abec8cc" 565 538 hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
+278 -12
nix/modules/appview.nix
··· 13 13 default = false; 14 14 description = "Enable tangled appview"; 15 15 }; 16 + 16 17 package = mkOption { 17 18 type = types.package; 18 19 description = "Package to use for the appview"; 19 20 }; 21 + 22 + # core configuration 20 23 port = mkOption { 21 - type = types.int; 24 + type = types.port; 22 25 default = 3000; 23 26 description = "Port to run the appview on"; 24 27 }; 28 + 29 + listenAddr = mkOption { 30 + type = types.str; 31 + default = "0.0.0.0:${toString cfg.port}"; 32 + description = "Listen address for the appview service"; 33 + }; 34 + 35 + dbPath = mkOption { 36 + type = types.str; 37 + default = "/var/lib/appview/appview.db"; 38 + description = "Path to the SQLite database file"; 39 + }; 40 + 41 + appviewHost = mkOption { 42 + type = types.str; 43 + default = "https://tangled.org"; 44 + example = "https://example.com"; 45 + description = "Public host URL for the appview instance"; 46 + }; 47 + 48 + appviewName = mkOption { 49 + type = types.str; 50 + default = "Tangled"; 51 + description = "Display name for the appview instance"; 52 + }; 53 + 54 + dev = mkOption { 55 + type = types.bool; 56 + default = false; 57 + description = "Enable development mode"; 58 + }; 59 + 60 + disallowedNicknamesFile = mkOption { 61 + type = types.nullOr types.path; 62 + default = null; 63 + description = "Path to file containing disallowed nicknames"; 64 + }; 65 + 66 + # redis configuration 67 + redis = { 68 + addr = mkOption { 69 + type = types.str; 70 + default = "localhost:6379"; 71 + description = "Redis server address"; 72 + }; 73 + 74 + db = mkOption { 75 + type = types.int; 76 + default = 0; 77 + description = "Redis database number"; 78 + }; 79 + }; 80 + 81 + # jetstream configuration 82 + jetstream = { 83 + endpoint = mkOption { 84 + type = types.str; 85 + default = "wss://jetstream1.us-east.bsky.network/subscribe"; 86 + description = "Jetstream WebSocket endpoint"; 87 + }; 88 + }; 89 + 90 + # knotstream consumer configuration 91 + knotstream = { 92 + retryInterval = mkOption { 93 + type = types.str; 94 + default = "60s"; 95 + description = "Initial retry interval for knotstream consumer"; 96 + }; 97 + 98 + maxRetryInterval = mkOption { 99 + type = types.str; 100 + default = "120m"; 101 + description = "Maximum retry interval for knotstream consumer"; 102 + }; 103 + 104 + connectionTimeout = mkOption { 105 + type = types.str; 106 + default = "5s"; 107 + description = "Connection timeout for knotstream consumer"; 108 + }; 109 + 110 + workerCount = mkOption { 111 + type = types.int; 112 + default = 64; 113 + description = "Number of workers for knotstream consumer"; 114 + }; 115 + 116 + queueSize = mkOption { 117 + type = types.int; 118 + default = 100; 119 + description = "Queue size for knotstream consumer"; 120 + }; 121 + }; 122 + 123 + # spindlestream consumer configuration 124 + spindlestream = { 125 + retryInterval = mkOption { 126 + type = types.str; 127 + default = "60s"; 128 + description = "Initial retry interval for spindlestream consumer"; 129 + }; 130 + 131 + maxRetryInterval = mkOption { 132 + type = types.str; 133 + default = "120m"; 134 + description = "Maximum retry interval for spindlestream consumer"; 135 + }; 136 + 137 + connectionTimeout = mkOption { 138 + type = types.str; 139 + default = "5s"; 140 + description = "Connection timeout for spindlestream consumer"; 141 + }; 142 + 143 + workerCount = mkOption { 144 + type = types.int; 145 + default = 64; 146 + description = "Number of workers for spindlestream consumer"; 147 + }; 148 + 149 + queueSize = mkOption { 150 + type = types.int; 151 + default = 100; 152 + description = "Queue size for spindlestream consumer"; 153 + }; 154 + }; 155 + 156 + # resend configuration 157 + resend = { 158 + sentFrom = mkOption { 159 + type = types.str; 160 + default = "noreply@notifs.tangled.sh"; 161 + description = "Email address to send notifications from"; 162 + }; 163 + }; 164 + 165 + # posthog configuration 166 + posthog = { 167 + endpoint = mkOption { 168 + type = types.str; 169 + default = "https://eu.i.posthog.com"; 170 + description = "PostHog API endpoint"; 171 + }; 172 + }; 173 + 174 + # camo configuration 175 + camo = { 176 + host = mkOption { 177 + type = types.str; 178 + default = "https://camo.tangled.sh"; 179 + description = "Camo proxy host URL"; 180 + }; 181 + }; 182 + 183 + # avatar configuration 184 + avatar = { 185 + host = mkOption { 186 + type = types.str; 187 + default = "https://avatar.tangled.sh"; 188 + description = "Avatar service host URL"; 189 + }; 190 + }; 191 + 192 + plc = { 193 + url = mkOption { 194 + type = types.str; 195 + default = "https://plc.directory"; 196 + description = "PLC directory URL"; 197 + }; 198 + }; 199 + 200 + pds = { 201 + host = mkOption { 202 + type = types.str; 203 + default = "https://tngl.sh"; 204 + description = "PDS host URL"; 205 + }; 206 + }; 207 + 208 + label = { 209 + defaults = mkOption { 210 + type = types.listOf types.str; 211 + default = [ 212 + "at://did:plc:wshs7t2adsemcrrd4snkeqli/sh.tangled.label.definition/wontfix" 213 + "at://did:plc:wshs7t2adsemcrrd4snkeqli/sh.tangled.label.definition/good-first-issue" 214 + "at://did:plc:wshs7t2adsemcrrd4snkeqli/sh.tangled.label.definition/duplicate" 215 + "at://did:plc:wshs7t2adsemcrrd4snkeqli/sh.tangled.label.definition/documentation" 216 + "at://did:plc:wshs7t2adsemcrrd4snkeqli/sh.tangled.label.definition/assignee" 217 + ]; 218 + description = "Default label definitions"; 219 + }; 220 + 221 + goodFirstIssue = mkOption { 222 + type = types.str; 223 + default = "at://did:plc:wshs7t2adsemcrrd4snkeqli/sh.tangled.label.definition/good-first-issue"; 224 + description = "Good first issue label definition"; 225 + }; 226 + }; 227 + 25 228 environmentFile = mkOption { 26 229 type = with types; nullOr path; 27 230 default = null; 28 - example = "/etc-/appview.env"; 231 + example = "/etc/appview.env"; 29 232 description = '' 30 233 Additional environment file as defined in {manpage}`systemd.exec(5)`. 31 234 32 - Sensitive secrets such as {env}`TANGLED_COOKIE_SECRET` may be 33 - passed to the service without makeing them world readable in the 34 - nix store. 35 - 235 + Sensitive secrets such as {env}`TANGLED_COOKIE_SECRET`, 236 + {env}`TANGLED_OAUTH_CLIENT_SECRET`, {env}`TANGLED_RESEND_API_KEY`, 237 + {env}`TANGLED_CAMO_SHARED_SECRET`, {env}`TANGLED_AVATAR_SHARED_SECRET`, 238 + {env}`TANGLED_REDIS_PASS`, {env}`TANGLED_PDS_ADMIN_SECRET`, 239 + {env}`TANGLED_CLOUDFLARE_API_TOKEN`, {env}`TANGLED_CLOUDFLARE_ZONE_ID`, 240 + {env}`TANGLED_CLOUDFLARE_TURNSTILE_SITE_KEY`, 241 + {env}`TANGLED_CLOUDFLARE_TURNSTILE_SECRET_KEY`, 242 + {env}`TANGLED_POSTHOG_API_KEY`, {env}`TANGLED_APP_PASSWORD`, 243 + and {env}`TANGLED_ALT_APP_PASSWORD` may be passed to the service 244 + without making them world readable in the nix store. 36 245 ''; 37 246 }; 38 247 }; ··· 47 256 systemd.services.appview = { 48 257 description = "tangled appview service"; 49 258 wantedBy = ["multi-user.target"]; 50 - after = ["redis-appview.service"]; 259 + after = ["redis-appview.service" "network-online.target"]; 51 260 requires = ["redis-appview.service"]; 261 + wants = ["network-online.target"]; 52 262 53 263 serviceConfig = { 54 - ListenStream = "0.0.0.0:${toString cfg.port}"; 264 + Type = "simple"; 55 265 ExecStart = "${cfg.package}/bin/appview"; 56 266 Restart = "always"; 57 - EnvironmentFile = optional (cfg.environmentFile != null) cfg.environmentFile; 58 - }; 267 + RestartSec = "10s"; 268 + EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile; 269 + 270 + # state directory 271 + StateDirectory = "appview"; 272 + WorkingDirectory = "/var/lib/appview"; 59 273 60 - environment = { 61 - TANGLED_DB_PATH = "appview.db"; 274 + # security hardening 275 + NoNewPrivileges = true; 276 + PrivateTmp = true; 277 + ProtectSystem = "strict"; 278 + ProtectHome = true; 279 + ReadWritePaths = ["/var/lib/appview"]; 62 280 }; 281 + 282 + environment = 283 + { 284 + TANGLED_DB_PATH = cfg.dbPath; 285 + TANGLED_LISTEN_ADDR = cfg.listenAddr; 286 + TANGLED_APPVIEW_HOST = cfg.appviewHost; 287 + TANGLED_APPVIEW_NAME = cfg.appviewName; 288 + TANGLED_DEV = 289 + if cfg.dev 290 + then "true" 291 + else "false"; 292 + } 293 + // optionalAttrs (cfg.disallowedNicknamesFile != null) { 294 + TANGLED_DISALLOWED_NICKNAMES_FILE = cfg.disallowedNicknamesFile; 295 + } 296 + // { 297 + TANGLED_REDIS_ADDR = cfg.redis.addr; 298 + TANGLED_REDIS_DB = toString cfg.redis.db; 299 + 300 + TANGLED_JETSTREAM_ENDPOINT = cfg.jetstream.endpoint; 301 + 302 + TANGLED_KNOTSTREAM_RETRY_INTERVAL = cfg.knotstream.retryInterval; 303 + TANGLED_KNOTSTREAM_MAX_RETRY_INTERVAL = cfg.knotstream.maxRetryInterval; 304 + TANGLED_KNOTSTREAM_CONNECTION_TIMEOUT = cfg.knotstream.connectionTimeout; 305 + TANGLED_KNOTSTREAM_WORKER_COUNT = toString cfg.knotstream.workerCount; 306 + TANGLED_KNOTSTREAM_QUEUE_SIZE = toString cfg.knotstream.queueSize; 307 + 308 + TANGLED_SPINDLESTREAM_RETRY_INTERVAL = cfg.spindlestream.retryInterval; 309 + TANGLED_SPINDLESTREAM_MAX_RETRY_INTERVAL = cfg.spindlestream.maxRetryInterval; 310 + TANGLED_SPINDLESTREAM_CONNECTION_TIMEOUT = cfg.spindlestream.connectionTimeout; 311 + TANGLED_SPINDLESTREAM_WORKER_COUNT = toString cfg.spindlestream.workerCount; 312 + TANGLED_SPINDLESTREAM_QUEUE_SIZE = toString cfg.spindlestream.queueSize; 313 + 314 + TANGLED_RESEND_SENT_FROM = cfg.resend.sentFrom; 315 + 316 + TANGLED_POSTHOG_ENDPOINT = cfg.posthog.endpoint; 317 + 318 + TANGLED_CAMO_HOST = cfg.camo.host; 319 + 320 + TANGLED_AVATAR_HOST = cfg.avatar.host; 321 + 322 + TANGLED_PLC_URL = cfg.plc.url; 323 + 324 + TANGLED_PDS_HOST = cfg.pds.host; 325 + 326 + TANGLED_LABEL_DEFAULTS = concatStringsSep "," cfg.label.defaults; 327 + TANGLED_LABEL_GFI = cfg.label.goodFirstIssue; 328 + }; 63 329 }; 64 330 }; 65 331 }
+60 -2
nix/modules/knot.nix
··· 51 51 description = "Path where repositories are scanned from"; 52 52 }; 53 53 54 + readme = mkOption { 55 + type = types.listOf types.str; 56 + default = [ 57 + "README.md" 58 + "readme.md" 59 + "README" 60 + "readme" 61 + "README.markdown" 62 + "readme.markdown" 63 + "README.txt" 64 + "readme.txt" 65 + "README.rst" 66 + "readme.rst" 67 + "README.org" 68 + "readme.org" 69 + "README.asciidoc" 70 + "readme.asciidoc" 71 + ]; 72 + description = "List of README filenames to look for (in priority order)"; 73 + }; 74 + 54 75 mainBranch = mkOption { 55 76 type = types.str; 56 77 default = "main"; ··· 58 79 }; 59 80 }; 60 81 82 + git = { 83 + userName = mkOption { 84 + type = types.str; 85 + default = "Tangled"; 86 + description = "Git user name used as committer"; 87 + }; 88 + 89 + userEmail = mkOption { 90 + type = types.str; 91 + default = "noreply@tangled.org"; 92 + description = "Git user email used as committer"; 93 + }; 94 + }; 95 + 61 96 motd = mkOption { 62 97 type = types.nullOr types.str; 63 98 default = null; ··· 123 158 description = "Jetstream endpoint to subscribe to"; 124 159 }; 125 160 161 + logDids = mkOption { 162 + type = types.bool; 163 + default = true; 164 + description = "Enable logging of DIDs"; 165 + }; 166 + 126 167 dev = mkOption { 127 168 type = types.bool; 128 169 default = false; ··· 154 195 Match User ${cfg.gitUser} 155 196 AuthorizedKeysCommand /etc/ssh/keyfetch_wrapper 156 197 AuthorizedKeysCommandUser nobody 198 + ChallengeResponseAuthentication no 199 + PasswordAuthentication no 157 200 ''; 158 201 }; 159 202 ··· 190 233 mkdir -p "${cfg.stateDir}/.config/git" 191 234 cat > "${cfg.stateDir}/.config/git/config" << EOF 192 235 [user] 193 - name = Git User 194 - email = git@example.com 236 + name = ${cfg.git.userName} 237 + email = ${cfg.git.userEmail} 195 238 [receive] 196 239 advertisePushOptions = true 240 + [uploadpack] 241 + allowFilter = true 197 242 EOF 198 243 ${setMotd} 199 244 chown -R ${cfg.gitUser}:${cfg.gitUser} "${cfg.stateDir}" ··· 205 250 WorkingDirectory = cfg.stateDir; 206 251 Environment = [ 207 252 "KNOT_REPO_SCAN_PATH=${cfg.repo.scanPath}" 253 + "KNOT_REPO_README=${concatStringsSep "," cfg.repo.readme}" 208 254 "KNOT_REPO_MAIN_BRANCH=${cfg.repo.mainBranch}" 255 + "KNOT_GIT_USER_NAME=${cfg.git.userName}" 256 + "KNOT_GIT_USER_EMAIL=${cfg.git.userEmail}" 209 257 "APPVIEW_ENDPOINT=${cfg.appviewEndpoint}" 210 258 "KNOT_SERVER_INTERNAL_LISTEN_ADDR=${cfg.server.internalListenAddr}" 211 259 "KNOT_SERVER_LISTEN_ADDR=${cfg.server.listenAddr}" ··· 214 262 "KNOT_SERVER_PLC_URL=${cfg.server.plcUrl}" 215 263 "KNOT_SERVER_JETSTREAM_ENDPOINT=${cfg.server.jetstreamEndpoint}" 216 264 "KNOT_SERVER_OWNER=${cfg.server.owner}" 265 + "KNOT_SERVER_LOG_DIDS=${ 266 + if cfg.server.logDids 267 + then "true" 268 + else "false" 269 + }" 270 + "KNOT_SERVER_DEV=${ 271 + if cfg.server.dev 272 + then "true" 273 + else "false" 274 + }" 217 275 ]; 218 276 ExecStart = "${cfg.package}/bin/knot server"; 219 277 Restart = "always";
+2
nix/pkgs/appview-static-files.nix
··· 5 5 lucide-src, 6 6 inter-fonts-src, 7 7 ibm-plex-mono-src, 8 + actor-typeahead-src, 8 9 sqlite-lib, 9 10 tailwindcss, 10 11 src, ··· 24 25 cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 fonts/ 25 26 cp -f ${inter-fonts-src}/InterVariable*.ttf fonts/ 26 27 cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 fonts/ 28 + cp -f ${actor-typeahead-src}/actor-typeahead.js . 27 29 # tailwindcss -c $src/tailwind.config.js -i $src/input.css -o tw.css won't work 28 30 # for whatever reason (produces broken css), so we are doing this instead 29 31 cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/tw.css
+41
nix/pkgs/docs.nix
··· 1 + { 2 + pandoc, 3 + tailwindcss, 4 + runCommandLocal, 5 + inter-fonts-src, 6 + ibm-plex-mono-src, 7 + lucide-src, 8 + src, 9 + }: 10 + runCommandLocal "docs" {} '' 11 + mkdir -p working 12 + 13 + # copy templates, themes, styles, filters to working directory 14 + cp ${src}/docs/*.html working/ 15 + cp ${src}/docs/*.theme working/ 16 + cp ${src}/docs/*.css working/ 17 + 18 + # icons 19 + cp -rf ${lucide-src}/*.svg working/ 20 + 21 + # content 22 + ${pandoc}/bin/pandoc ${src}/docs/DOCS.md \ 23 + -o $out/ \ 24 + -t chunkedhtml \ 25 + --variable toc \ 26 + --toc-depth=2 \ 27 + --css=stylesheet.css \ 28 + --chunk-template="%i.html" \ 29 + --highlight-style=working/highlight.theme \ 30 + --template=working/template.html 31 + 32 + # fonts 33 + mkdir -p $out/static/fonts 34 + cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/ 35 + cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/ 36 + cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/ 37 + cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/ 38 + 39 + # styles 40 + cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css 41 + ''
+1 -1
nix/pkgs/knot-unwrapped.nix
··· 4 4 sqlite-lib, 5 5 src, 6 6 }: let 7 - version = "1.9.1-alpha"; 7 + version = "1.11.0-alpha"; 8 8 in 9 9 buildGoApplication { 10 10 pname = "knot";
+7 -5
nix/pkgs/sqlite-lib.nix
··· 1 1 { 2 - gcc, 3 2 stdenv, 4 3 sqlite-lib-src, 5 4 }: 6 5 stdenv.mkDerivation { 7 6 name = "sqlite-lib"; 8 7 src = sqlite-lib-src; 9 - nativeBuildInputs = [gcc]; 8 + 10 9 buildPhase = '' 11 - gcc -c sqlite3.c 12 - ar rcs libsqlite3.a sqlite3.o 13 - ranlib libsqlite3.a 10 + $CC -c sqlite3.c 11 + $AR rcs libsqlite3.a sqlite3.o 12 + $RANLIB libsqlite3.a 13 + ''; 14 + 15 + installPhase = '' 14 16 mkdir -p $out/include $out/lib 15 17 cp *.h $out/include 16 18 cp libsqlite3.a $out/lib
+6 -11
nix/vm.nix
··· 8 8 var = builtins.getEnv name; 9 9 in 10 10 if var == "" 11 - then throw "\$${name} must be defined, see docs/hacking.md for more details" 11 + then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details" 12 12 else var; 13 13 envVarOr = name: default: let 14 14 var = builtins.getEnv name; ··· 48 48 # knot 49 49 { 50 50 from = "host"; 51 - host.port = 6000; 52 - guest.port = 6000; 51 + host.port = 6444; 52 + guest.port = 6444; 53 53 } 54 54 # spindle 55 55 { ··· 79 79 }; 80 80 # This is fine because any and all ports that are forwarded to host are explicitly marked above, we don't need a separate guest firewall 81 81 networking.firewall.enable = false; 82 - services.dnsmasq.enable = true; 83 - services.dnsmasq.settings.address = "/tngl.boltless.dev/10.0.2.2"; 84 - security.pki.certificates = [ 85 - (builtins.readFile ../local-infra/cert/localtangled/root.crt) 86 - ]; 87 82 time.timeZone = "Europe/London"; 88 83 services.getty.autologinUser = "root"; 89 84 environment.systemPackages = with pkgs; [curl vim git sqlite litecli]; ··· 92 87 motd = "Welcome to the development knot!\n"; 93 88 server = { 94 89 owner = envVar "TANGLED_VM_KNOT_OWNER"; 95 - hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6000"; 90 + hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6444"; 96 91 plcUrl = plcUrl; 97 92 jetstreamEndpoint = jetstream; 98 - listenAddr = "0.0.0.0:6000"; 93 + listenAddr = "0.0.0.0:6444"; 99 94 }; 100 95 }; 101 96 services.tangled.spindle = { 102 97 enable = true; 103 98 server = { 104 99 owner = envVar "TANGLED_VM_SPINDLE_OWNER"; 105 - hostname = envVarOr "TANGLED_VM_SPINDLE_OWNER" "localhost:6555"; 100 + hostname = envVarOr "TANGLED_VM_SPINDLE_HOST" "localhost:6555"; 106 101 plcUrl = plcUrl; 107 102 jetstreamEndpoint = jetstream; 108 103 listenAddr = "0.0.0.0:6555";
+122
orm/orm.go
··· 1 + package orm 2 + 3 + import ( 4 + "context" 5 + "database/sql" 6 + "fmt" 7 + "log/slog" 8 + "reflect" 9 + "strings" 10 + ) 11 + 12 + type migrationFn = func(*sql.Tx) error 13 + 14 + func RunMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error { 15 + logger = logger.With("migration", name) 16 + 17 + tx, err := c.BeginTx(context.Background(), nil) 18 + if err != nil { 19 + return err 20 + } 21 + defer tx.Rollback() 22 + 23 + var exists bool 24 + err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists) 25 + if err != nil { 26 + return err 27 + } 28 + 29 + if !exists { 30 + // run migration 31 + err = migrationFn(tx) 32 + if err != nil { 33 + logger.Error("failed to run migration", "err", err) 34 + return err 35 + } 36 + 37 + // mark migration as complete 38 + _, err = tx.Exec("insert into migrations (name) values (?)", name) 39 + if err != nil { 40 + logger.Error("failed to mark migration as complete", "err", err) 41 + return err 42 + } 43 + 44 + // commit the transaction 45 + if err := tx.Commit(); err != nil { 46 + return err 47 + } 48 + 49 + logger.Info("migration applied successfully") 50 + } else { 51 + logger.Warn("skipped migration, already applied") 52 + } 53 + 54 + return nil 55 + } 56 + 57 + type Filter struct { 58 + Key string 59 + arg any 60 + Cmp string 61 + } 62 + 63 + func newFilter(key, cmp string, arg any) Filter { 64 + return Filter{ 65 + Key: key, 66 + arg: arg, 67 + Cmp: cmp, 68 + } 69 + } 70 + 71 + func FilterEq(key string, arg any) Filter { return newFilter(key, "=", arg) } 72 + func FilterNotEq(key string, arg any) Filter { return newFilter(key, "<>", arg) } 73 + func FilterGte(key string, arg any) Filter { return newFilter(key, ">=", arg) } 74 + func FilterLte(key string, arg any) Filter { return newFilter(key, "<=", arg) } 75 + func FilterIs(key string, arg any) Filter { return newFilter(key, "is", arg) } 76 + func FilterIsNot(key string, arg any) Filter { return newFilter(key, "is not", arg) } 77 + func FilterIn(key string, arg any) Filter { return newFilter(key, "in", arg) } 78 + func FilterLike(key string, arg any) Filter { return newFilter(key, "like", arg) } 79 + func FilterNotLike(key string, arg any) Filter { return newFilter(key, "not like", arg) } 80 + func FilterContains(key string, arg any) Filter { 81 + return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg)) 82 + } 83 + 84 + func (f Filter) Condition() string { 85 + rv := reflect.ValueOf(f.arg) 86 + kind := rv.Kind() 87 + 88 + // if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)` 89 + if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 90 + if rv.Len() == 0 { 91 + // always false 92 + return "1 = 0" 93 + } 94 + 95 + placeholders := make([]string, rv.Len()) 96 + for i := range placeholders { 97 + placeholders[i] = "?" 98 + } 99 + 100 + return fmt.Sprintf("%s %s (%s)", f.Key, f.Cmp, strings.Join(placeholders, ", ")) 101 + } 102 + 103 + return fmt.Sprintf("%s %s ?", f.Key, f.Cmp) 104 + } 105 + 106 + func (f Filter) Arg() []any { 107 + rv := reflect.ValueOf(f.arg) 108 + kind := rv.Kind() 109 + if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array { 110 + if rv.Len() == 0 { 111 + return nil 112 + } 113 + 114 + out := make([]any, rv.Len()) 115 + for i := range rv.Len() { 116 + out[i] = rv.Index(i).Interface() 117 + } 118 + return out 119 + } 120 + 121 + return []any{f.arg} 122 + }
-1
patchutil/patchutil.go
··· 296 296 } 297 297 298 298 nd := types.NiceDiff{} 299 - nd.Commit.Parent = targetBranch 300 299 301 300 for _, d := range diffs { 302 301 ndiff := types.Diff{}
+8
rbac/rbac.go
··· 285 285 return e.E.Enforce(user, domain, repo, "repo:delete") 286 286 } 287 287 288 + func (e *Enforcer) IsRepoOwner(user, domain, repo string) (bool, error) { 289 + return e.E.Enforce(user, domain, repo, "repo:owner") 290 + } 291 + 292 + func (e *Enforcer) IsRepoCollaborator(user, domain, repo string) (bool, error) { 293 + return e.E.Enforce(user, domain, repo, "repo:collaborator") 294 + } 295 + 288 296 func (e *Enforcer) IsPushAllowed(user, domain, repo string) (bool, error) { 289 297 return e.E.Enforce(user, domain, repo, "repo:push") 290 298 }
+3 -3
readme.md
··· 10 10 11 11 ## docs 12 12 13 - * [knot hosting guide](/docs/knot-hosting.md) 14 - * [contributing guide](/docs/contributing.md) **please read before opening a PR!** 15 - * [hacking on tangled](/docs/hacking.md) 13 + - [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide) 14 + - [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!** 15 + - [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled) 16 16 17 17 ## security 18 18
+31
sets/gen.go
··· 1 + package sets 2 + 3 + import ( 4 + "math/rand" 5 + "reflect" 6 + "testing/quick" 7 + ) 8 + 9 + func (_ Set[T]) Generate(rand *rand.Rand, size int) reflect.Value { 10 + s := New[T]() 11 + 12 + var zero T 13 + itemType := reflect.TypeOf(zero) 14 + 15 + for { 16 + if s.Len() >= size { 17 + break 18 + } 19 + 20 + item, ok := quick.Value(itemType, rand) 21 + if !ok { 22 + continue 23 + } 24 + 25 + if val, ok := item.Interface().(T); ok { 26 + s.Insert(val) 27 + } 28 + } 29 + 30 + return reflect.ValueOf(s) 31 + }
+35
sets/readme.txt
··· 1 + sets 2 + ---- 3 + set datastructure for go with generics and iterators. the 4 + api is supposed to mimic rust's std::collections::HashSet api. 5 + 6 + s1 := sets.Collect(slices.Values([]int{1, 2, 3, 4})) 7 + s2 := sets.Collect(slices.Values([]int{1, 2, 3, 4, 5, 6})) 8 + 9 + union := sets.Collect(s1.Union(s2)) 10 + intersect := sets.Collect(s1.Intersection(s2)) 11 + diff := sets.Collect(s1.Difference(s2)) 12 + symdiff := sets.Collect(s1.SymmetricDifference(s2)) 13 + 14 + s1.Len() // 4 15 + s1.Contains(1) // true 16 + s1.IsEmpty() // false 17 + s1.IsSubset(s2) // true 18 + s1.IsSuperset(s2) // false 19 + s1.IsDisjoint(s2) // false 20 + 21 + if exists := s1.Insert(1); exists { 22 + // already existed in set 23 + } 24 + 25 + if existed := s1.Remove(1); existed { 26 + // existed in set, now removed 27 + } 28 + 29 + 30 + testing 31 + ------- 32 + includes property-based tests using the wonderful 33 + testing/quick module! 34 + 35 + go test -v
+174
sets/set.go
··· 1 + package sets 2 + 3 + import ( 4 + "iter" 5 + "maps" 6 + ) 7 + 8 + type Set[T comparable] struct { 9 + data map[T]struct{} 10 + } 11 + 12 + func New[T comparable]() Set[T] { 13 + return Set[T]{ 14 + data: make(map[T]struct{}), 15 + } 16 + } 17 + 18 + func (s *Set[T]) Insert(item T) bool { 19 + _, exists := s.data[item] 20 + s.data[item] = struct{}{} 21 + return !exists 22 + } 23 + 24 + func Singleton[T comparable](item T) Set[T] { 25 + n := New[T]() 26 + _ = n.Insert(item) 27 + return n 28 + } 29 + 30 + func (s *Set[T]) Remove(item T) bool { 31 + _, exists := s.data[item] 32 + if exists { 33 + delete(s.data, item) 34 + } 35 + return exists 36 + } 37 + 38 + func (s Set[T]) Contains(item T) bool { 39 + _, exists := s.data[item] 40 + return exists 41 + } 42 + 43 + func (s Set[T]) Len() int { 44 + return len(s.data) 45 + } 46 + 47 + func (s Set[T]) IsEmpty() bool { 48 + return len(s.data) == 0 49 + } 50 + 51 + func (s *Set[T]) Clear() { 52 + s.data = make(map[T]struct{}) 53 + } 54 + 55 + func (s Set[T]) All() iter.Seq[T] { 56 + return func(yield func(T) bool) { 57 + for item := range s.data { 58 + if !yield(item) { 59 + return 60 + } 61 + } 62 + } 63 + } 64 + 65 + func (s Set[T]) Clone() Set[T] { 66 + return Set[T]{ 67 + data: maps.Clone(s.data), 68 + } 69 + } 70 + 71 + func (s Set[T]) Union(other Set[T]) iter.Seq[T] { 72 + if s.Len() >= other.Len() { 73 + return chain(s.All(), other.Difference(s)) 74 + } else { 75 + return chain(other.All(), s.Difference(other)) 76 + } 77 + } 78 + 79 + func chain[T any](seqs ...iter.Seq[T]) iter.Seq[T] { 80 + return func(yield func(T) bool) { 81 + for _, seq := range seqs { 82 + for item := range seq { 83 + if !yield(item) { 84 + return 85 + } 86 + } 87 + } 88 + } 89 + } 90 + 91 + func (s Set[T]) Intersection(other Set[T]) iter.Seq[T] { 92 + return func(yield func(T) bool) { 93 + for item := range s.data { 94 + if other.Contains(item) { 95 + if !yield(item) { 96 + return 97 + } 98 + } 99 + } 100 + } 101 + } 102 + 103 + func (s Set[T]) Difference(other Set[T]) iter.Seq[T] { 104 + return func(yield func(T) bool) { 105 + for item := range s.data { 106 + if !other.Contains(item) { 107 + if !yield(item) { 108 + return 109 + } 110 + } 111 + } 112 + } 113 + } 114 + 115 + func (s Set[T]) SymmetricDifference(other Set[T]) iter.Seq[T] { 116 + return func(yield func(T) bool) { 117 + for item := range s.data { 118 + if !other.Contains(item) { 119 + if !yield(item) { 120 + return 121 + } 122 + } 123 + } 124 + for item := range other.data { 125 + if !s.Contains(item) { 126 + if !yield(item) { 127 + return 128 + } 129 + } 130 + } 131 + } 132 + } 133 + 134 + func (s Set[T]) IsSubset(other Set[T]) bool { 135 + for item := range s.data { 136 + if !other.Contains(item) { 137 + return false 138 + } 139 + } 140 + return true 141 + } 142 + 143 + func (s Set[T]) IsSuperset(other Set[T]) bool { 144 + return other.IsSubset(s) 145 + } 146 + 147 + func (s Set[T]) IsDisjoint(other Set[T]) bool { 148 + for item := range s.data { 149 + if other.Contains(item) { 150 + return false 151 + } 152 + } 153 + return true 154 + } 155 + 156 + func (s Set[T]) Equal(other Set[T]) bool { 157 + if s.Len() != other.Len() { 158 + return false 159 + } 160 + for item := range s.data { 161 + if !other.Contains(item) { 162 + return false 163 + } 164 + } 165 + return true 166 + } 167 + 168 + func Collect[T comparable](seq iter.Seq[T]) Set[T] { 169 + result := New[T]() 170 + for item := range seq { 171 + result.Insert(item) 172 + } 173 + return result 174 + }
+411
sets/set_test.go
··· 1 + package sets 2 + 3 + import ( 4 + "slices" 5 + "testing" 6 + "testing/quick" 7 + ) 8 + 9 + func TestNew(t *testing.T) { 10 + s := New[int]() 11 + if s.Len() != 0 { 12 + t.Errorf("New set should be empty, got length %d", s.Len()) 13 + } 14 + if !s.IsEmpty() { 15 + t.Error("New set should be empty") 16 + } 17 + } 18 + 19 + func TestFromSlice(t *testing.T) { 20 + s := Collect(slices.Values([]int{1, 2, 3, 2, 1})) 21 + if s.Len() != 3 { 22 + t.Errorf("Expected length 3, got %d", s.Len()) 23 + } 24 + if !s.Contains(1) || !s.Contains(2) || !s.Contains(3) { 25 + t.Error("Set should contain all unique elements from slice") 26 + } 27 + } 28 + 29 + func TestInsert(t *testing.T) { 30 + s := New[string]() 31 + 32 + if !s.Insert("hello") { 33 + t.Error("First insert should return true") 34 + } 35 + if s.Insert("hello") { 36 + t.Error("Duplicate insert should return false") 37 + } 38 + if s.Len() != 1 { 39 + t.Errorf("Expected length 1, got %d", s.Len()) 40 + } 41 + } 42 + 43 + func TestRemove(t *testing.T) { 44 + s := Collect(slices.Values([]int{1, 2, 3})) 45 + 46 + if !s.Remove(2) { 47 + t.Error("Remove existing element should return true") 48 + } 49 + if s.Remove(2) { 50 + t.Error("Remove non-existing element should return false") 51 + } 52 + if s.Contains(2) { 53 + t.Error("Element should be removed") 54 + } 55 + if s.Len() != 2 { 56 + t.Errorf("Expected length 2, got %d", s.Len()) 57 + } 58 + } 59 + 60 + func TestContains(t *testing.T) { 61 + s := Collect(slices.Values([]int{1, 2, 3})) 62 + 63 + if !s.Contains(1) { 64 + t.Error("Should contain 1") 65 + } 66 + if s.Contains(4) { 67 + t.Error("Should not contain 4") 68 + } 69 + } 70 + 71 + func TestClear(t *testing.T) { 72 + s := Collect(slices.Values([]int{1, 2, 3})) 73 + s.Clear() 74 + 75 + if !s.IsEmpty() { 76 + t.Error("Set should be empty after clear") 77 + } 78 + if s.Len() != 0 { 79 + t.Errorf("Expected length 0, got %d", s.Len()) 80 + } 81 + } 82 + 83 + func TestIterator(t *testing.T) { 84 + s := Collect(slices.Values([]int{1, 2, 3})) 85 + var items []int 86 + 87 + for item := range s.All() { 88 + items = append(items, item) 89 + } 90 + 91 + slices.Sort(items) 92 + expected := []int{1, 2, 3} 93 + if !slices.Equal(items, expected) { 94 + t.Errorf("Expected %v, got %v", expected, items) 95 + } 96 + } 97 + 98 + func TestClone(t *testing.T) { 99 + s1 := Collect(slices.Values([]int{1, 2, 3})) 100 + s2 := s1.Clone() 101 + 102 + if !s1.Equal(s2) { 103 + t.Error("Cloned set should be equal to original") 104 + } 105 + 106 + s2.Insert(4) 107 + if s1.Contains(4) { 108 + t.Error("Modifying clone should not affect original") 109 + } 110 + } 111 + 112 + func TestUnion(t *testing.T) { 113 + s1 := Collect(slices.Values([]int{1, 2})) 114 + s2 := Collect(slices.Values([]int{2, 3})) 115 + 116 + result := Collect(s1.Union(s2)) 117 + expected := Collect(slices.Values([]int{1, 2, 3})) 118 + 119 + if !result.Equal(expected) { 120 + t.Errorf("Expected %v, got %v", expected, result) 121 + } 122 + } 123 + 124 + func TestIntersection(t *testing.T) { 125 + s1 := Collect(slices.Values([]int{1, 2, 3})) 126 + s2 := Collect(slices.Values([]int{2, 3, 4})) 127 + 128 + expected := Collect(slices.Values([]int{2, 3})) 129 + result := Collect(s1.Intersection(s2)) 130 + 131 + if !result.Equal(expected) { 132 + t.Errorf("Expected %v, got %v", expected, result) 133 + } 134 + } 135 + 136 + func TestDifference(t *testing.T) { 137 + s1 := Collect(slices.Values([]int{1, 2, 3})) 138 + s2 := Collect(slices.Values([]int{2, 3, 4})) 139 + 140 + expected := Collect(slices.Values([]int{1})) 141 + result := Collect(s1.Difference(s2)) 142 + 143 + if !result.Equal(expected) { 144 + t.Errorf("Expected %v, got %v", expected, result) 145 + } 146 + } 147 + 148 + func TestSymmetricDifference(t *testing.T) { 149 + s1 := Collect(slices.Values([]int{1, 2, 3})) 150 + s2 := Collect(slices.Values([]int{2, 3, 4})) 151 + 152 + expected := Collect(slices.Values([]int{1, 4})) 153 + result := Collect(s1.SymmetricDifference(s2)) 154 + 155 + if !result.Equal(expected) { 156 + t.Errorf("Expected %v, got %v", expected, result) 157 + } 158 + } 159 + 160 + func TestSymmetricDifferenceCommutativeProperty(t *testing.T) { 161 + s1 := Collect(slices.Values([]int{1, 2, 3})) 162 + s2 := Collect(slices.Values([]int{2, 3, 4})) 163 + 164 + result1 := Collect(s1.SymmetricDifference(s2)) 165 + result2 := Collect(s2.SymmetricDifference(s1)) 166 + 167 + if !result1.Equal(result2) { 168 + t.Errorf("Expected %v, got %v", result1, result2) 169 + } 170 + } 171 + 172 + func TestIsSubset(t *testing.T) { 173 + s1 := Collect(slices.Values([]int{1, 2})) 174 + s2 := Collect(slices.Values([]int{1, 2, 3})) 175 + 176 + if !s1.IsSubset(s2) { 177 + t.Error("s1 should be subset of s2") 178 + } 179 + if s2.IsSubset(s1) { 180 + t.Error("s2 should not be subset of s1") 181 + } 182 + } 183 + 184 + func TestIsSuperset(t *testing.T) { 185 + s1 := Collect(slices.Values([]int{1, 2, 3})) 186 + s2 := Collect(slices.Values([]int{1, 2})) 187 + 188 + if !s1.IsSuperset(s2) { 189 + t.Error("s1 should be superset of s2") 190 + } 191 + if s2.IsSuperset(s1) { 192 + t.Error("s2 should not be superset of s1") 193 + } 194 + } 195 + 196 + func TestIsDisjoint(t *testing.T) { 197 + s1 := Collect(slices.Values([]int{1, 2})) 198 + s2 := Collect(slices.Values([]int{3, 4})) 199 + s3 := Collect(slices.Values([]int{2, 3})) 200 + 201 + if !s1.IsDisjoint(s2) { 202 + t.Error("s1 and s2 should be disjoint") 203 + } 204 + if s1.IsDisjoint(s3) { 205 + t.Error("s1 and s3 should not be disjoint") 206 + } 207 + } 208 + 209 + func TestEqual(t *testing.T) { 210 + s1 := Collect(slices.Values([]int{1, 2, 3})) 211 + s2 := Collect(slices.Values([]int{3, 2, 1})) 212 + s3 := Collect(slices.Values([]int{1, 2})) 213 + 214 + if !s1.Equal(s2) { 215 + t.Error("s1 and s2 should be equal") 216 + } 217 + if s1.Equal(s3) { 218 + t.Error("s1 and s3 should not be equal") 219 + } 220 + } 221 + 222 + func TestCollect(t *testing.T) { 223 + s1 := Collect(slices.Values([]int{1, 2})) 224 + s2 := Collect(slices.Values([]int{2, 3})) 225 + 226 + unionSet := Collect(s1.Union(s2)) 227 + if unionSet.Len() != 3 { 228 + t.Errorf("Expected union set length 3, got %d", unionSet.Len()) 229 + } 230 + if !unionSet.Contains(1) || !unionSet.Contains(2) || !unionSet.Contains(3) { 231 + t.Error("Union set should contain 1, 2, and 3") 232 + } 233 + 234 + diffSet := Collect(s1.Difference(s2)) 235 + if diffSet.Len() != 1 { 236 + t.Errorf("Expected difference set length 1, got %d", diffSet.Len()) 237 + } 238 + if !diffSet.Contains(1) { 239 + t.Error("Difference set should contain 1") 240 + } 241 + } 242 + 243 + func TestPropertySingleonLen(t *testing.T) { 244 + f := func(item int) bool { 245 + single := Singleton(item) 246 + return single.Len() == 1 247 + } 248 + 249 + if err := quick.Check(f, nil); err != nil { 250 + t.Error(err) 251 + } 252 + } 253 + 254 + func TestPropertyInsertIdempotent(t *testing.T) { 255 + f := func(s Set[int], item int) bool { 256 + clone := s.Clone() 257 + 258 + clone.Insert(item) 259 + firstLen := clone.Len() 260 + 261 + clone.Insert(item) 262 + secondLen := clone.Len() 263 + 264 + return firstLen == secondLen 265 + } 266 + 267 + if err := quick.Check(f, nil); err != nil { 268 + t.Error(err) 269 + } 270 + } 271 + 272 + func TestPropertyUnionCommutative(t *testing.T) { 273 + f := func(s1 Set[int], s2 Set[int]) bool { 274 + union1 := Collect(s1.Union(s2)) 275 + union2 := Collect(s2.Union(s1)) 276 + return union1.Equal(union2) 277 + } 278 + 279 + if err := quick.Check(f, nil); err != nil { 280 + t.Error(err) 281 + } 282 + } 283 + 284 + func TestPropertyIntersectionCommutative(t *testing.T) { 285 + f := func(s1 Set[int], s2 Set[int]) bool { 286 + inter1 := Collect(s1.Intersection(s2)) 287 + inter2 := Collect(s2.Intersection(s1)) 288 + return inter1.Equal(inter2) 289 + } 290 + 291 + if err := quick.Check(f, nil); err != nil { 292 + t.Error(err) 293 + } 294 + } 295 + 296 + func TestPropertyCloneEquals(t *testing.T) { 297 + f := func(s Set[int]) bool { 298 + clone := s.Clone() 299 + return s.Equal(clone) 300 + } 301 + 302 + if err := quick.Check(f, nil); err != nil { 303 + t.Error(err) 304 + } 305 + } 306 + 307 + func TestPropertyIntersectionIsSubset(t *testing.T) { 308 + f := func(s1 Set[int], s2 Set[int]) bool { 309 + inter := Collect(s1.Intersection(s2)) 310 + return inter.IsSubset(s1) && inter.IsSubset(s2) 311 + } 312 + 313 + if err := quick.Check(f, nil); err != nil { 314 + t.Error(err) 315 + } 316 + } 317 + 318 + func TestPropertyUnionIsSuperset(t *testing.T) { 319 + f := func(s1 Set[int], s2 Set[int]) bool { 320 + union := Collect(s1.Union(s2)) 321 + return union.IsSuperset(s1) && union.IsSuperset(s2) 322 + } 323 + 324 + if err := quick.Check(f, nil); err != nil { 325 + t.Error(err) 326 + } 327 + } 328 + 329 + func TestPropertyDifferenceDisjoint(t *testing.T) { 330 + f := func(s1 Set[int], s2 Set[int]) bool { 331 + diff := Collect(s1.Difference(s2)) 332 + return diff.IsDisjoint(s2) 333 + } 334 + 335 + if err := quick.Check(f, nil); err != nil { 336 + t.Error(err) 337 + } 338 + } 339 + 340 + func TestPropertySymmetricDifferenceCommutative(t *testing.T) { 341 + f := func(s1 Set[int], s2 Set[int]) bool { 342 + symDiff1 := Collect(s1.SymmetricDifference(s2)) 343 + symDiff2 := Collect(s2.SymmetricDifference(s1)) 344 + return symDiff1.Equal(symDiff2) 345 + } 346 + 347 + if err := quick.Check(f, nil); err != nil { 348 + t.Error(err) 349 + } 350 + } 351 + 352 + func TestPropertyRemoveWorks(t *testing.T) { 353 + f := func(s Set[int], item int) bool { 354 + clone := s.Clone() 355 + clone.Insert(item) 356 + clone.Remove(item) 357 + return !clone.Contains(item) 358 + } 359 + 360 + if err := quick.Check(f, nil); err != nil { 361 + t.Error(err) 362 + } 363 + } 364 + 365 + func TestPropertyClearEmpty(t *testing.T) { 366 + f := func(s Set[int]) bool { 367 + s.Clear() 368 + return s.IsEmpty() && s.Len() == 0 369 + } 370 + 371 + if err := quick.Check(f, nil); err != nil { 372 + t.Error(err) 373 + } 374 + } 375 + 376 + func TestPropertyIsSubsetReflexive(t *testing.T) { 377 + f := func(s Set[int]) bool { 378 + return s.IsSubset(s) 379 + } 380 + 381 + if err := quick.Check(f, nil); err != nil { 382 + t.Error(err) 383 + } 384 + } 385 + 386 + func TestPropertyDeMorganUnion(t *testing.T) { 387 + f := func(s1 Set[int], s2 Set[int], universe Set[int]) bool { 388 + // create a universe that contains both sets 389 + u := universe.Clone() 390 + for item := range s1.All() { 391 + u.Insert(item) 392 + } 393 + for item := range s2.All() { 394 + u.Insert(item) 395 + } 396 + 397 + // (A u B)' = A' n B' 398 + union := Collect(s1.Union(s2)) 399 + complementUnion := Collect(u.Difference(union)) 400 + 401 + complementS1 := Collect(u.Difference(s1)) 402 + complementS2 := Collect(u.Difference(s2)) 403 + intersectionComplements := Collect(complementS1.Intersection(complementS2)) 404 + 405 + return complementUnion.Equal(intersectionComplements) 406 + } 407 + 408 + if err := quick.Check(f, nil); err != nil { 409 + t.Error(err) 410 + } 411 + }
+1 -1
spindle/config/config.go
··· 13 13 DBPath string `env:"DB_PATH, default=spindle.db"` 14 14 Hostname string `env:"HOSTNAME, required"` 15 15 JetstreamEndpoint string `env:"JETSTREAM_ENDPOINT, default=wss://jetstream1.us-west.bsky.network/subscribe"` 16 - PlcUrl string `env:"PLC_URL, default=plc.directory"` 16 + PlcUrl string `env:"PLC_URL, default=https://plc.directory"` 17 17 Dev bool `env:"DEV, default=false"` 18 18 Owner string `env:"OWNER, required"` 19 19 Secrets Secrets `env:",prefix=SECRETS_"`
+1
spindle/db/repos.go
··· 16 16 if err != nil { 17 17 return nil, err 18 18 } 19 + defer rows.Close() 19 20 20 21 var knots []string 21 22 for rows.Next() {
+22 -21
spindle/engine/engine.go
··· 3 3 import ( 4 4 "context" 5 5 "errors" 6 - "fmt" 7 6 "log/slog" 7 + "sync" 8 8 9 9 securejoin "github.com/cyphar/filepath-securejoin" 10 - "golang.org/x/sync/errgroup" 11 10 "tangled.org/core/notifier" 12 11 "tangled.org/core/spindle/config" 13 12 "tangled.org/core/spindle/db" ··· 31 30 } 32 31 } 33 32 34 - eg, ctx := errgroup.WithContext(ctx) 33 + var wg sync.WaitGroup 35 34 for eng, wfs := range pipeline.Workflows { 36 35 workflowTimeout := eng.WorkflowTimeout() 37 36 l.Info("using workflow timeout", "timeout", workflowTimeout) 38 37 39 38 for _, w := range wfs { 40 - eg.Go(func() error { 39 + wg.Add(1) 40 + go func() { 41 + defer wg.Done() 42 + 41 43 wid := models.WorkflowId{ 42 44 PipelineId: pipelineId, 43 45 Name: w.Name, ··· 45 47 46 48 err := db.StatusRunning(wid, n) 47 49 if err != nil { 48 - return err 50 + l.Error("failed to set workflow status to running", "wid", wid, "err", err) 51 + return 49 52 } 50 53 51 54 err = eng.SetupWorkflow(ctx, wid, &w) ··· 61 64 62 65 dbErr := db.StatusFailed(wid, err.Error(), -1, n) 63 66 if dbErr != nil { 64 - return dbErr 67 + l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr) 65 68 } 66 - return err 69 + return 67 70 } 68 71 defer eng.DestroyWorkflow(ctx, wid) 69 72 70 - wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid) 73 + secretValues := make([]string, len(allSecrets)) 74 + for i, s := range allSecrets { 75 + secretValues[i] = s.Value 76 + } 77 + wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid, secretValues) 71 78 if err != nil { 72 79 l.Warn("failed to setup step logger; logs will not be persisted", "error", err) 73 80 wfLogger = nil ··· 99 106 if errors.Is(err, ErrTimedOut) { 100 107 dbErr := db.StatusTimeout(wid, n) 101 108 if dbErr != nil { 102 - return dbErr 109 + l.Error("failed to set workflow status to timeout", "wid", wid, "err", dbErr) 103 110 } 104 111 } else { 105 112 dbErr := db.StatusFailed(wid, err.Error(), -1, n) 106 113 if dbErr != nil { 107 - return dbErr 114 + l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr) 108 115 } 109 116 } 110 - 111 - return fmt.Errorf("starting steps image: %w", err) 117 + return 112 118 } 113 119 } 114 120 115 121 err = db.StatusSuccess(wid, n) 116 122 if err != nil { 117 - return err 123 + l.Error("failed to set workflow status to success", "wid", wid, "err", err) 118 124 } 119 - 120 - return nil 121 - }) 125 + }() 122 126 } 123 127 } 124 128 125 - if err := eg.Wait(); err != nil { 126 - l.Error("failed to run one or more workflows", "err", err) 127 - } else { 128 - l.Info("successfully ran full pipeline") 129 - } 129 + wg.Wait() 130 + l.Info("all workflows completed") 130 131 }
+10 -9
spindle/engines/nixery/engine.go
··· 73 73 type addlFields struct { 74 74 image string 75 75 container string 76 - env map[string]string 77 76 } 78 77 79 78 func (e *Engine) InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*models.Workflow, error) { ··· 103 102 swf.Steps = append(swf.Steps, sstep) 104 103 } 105 104 swf.Name = twf.Name 106 - addl.env = dwf.Environment 105 + swf.Environment = dwf.Environment 107 106 addl.image = workflowImage(dwf.Dependencies, e.cfg.NixeryPipelines.Nixery) 108 107 109 108 setup := &setupSteps{} 110 109 111 110 setup.addStep(nixConfStep()) 112 - setup.addStep(cloneStep(twf, *tpl.TriggerMetadata, e.cfg.Server.Dev)) 111 + setup.addStep(models.BuildCloneStep(twf, *tpl.TriggerMetadata, e.cfg.Server.Dev)) 113 112 // this step could be empty 114 113 if s := dependencyStep(dwf.Dependencies); s != nil { 115 114 setup.addStep(*s) ··· 288 287 289 288 func (e *Engine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error { 290 289 addl := w.Data.(addlFields) 291 - workflowEnvs := ConstructEnvs(addl.env) 290 + workflowEnvs := ConstructEnvs(w.Environment) 292 291 // TODO(winter): should SetupWorkflow also have secret access? 293 292 // IMO yes, but probably worth thinking on. 294 293 for _, s := range secrets { 295 294 workflowEnvs.AddEnv(s.Key, s.Value) 296 295 } 297 296 298 - step := w.Steps[idx].(Step) 297 + step := w.Steps[idx] 299 298 300 299 select { 301 300 case <-ctx.Done(): ··· 304 303 } 305 304 306 305 envs := append(EnvVars(nil), workflowEnvs...) 307 - for k, v := range step.environment { 308 - envs.AddEnv(k, v) 306 + if nixStep, ok := step.(Step); ok { 307 + for k, v := range nixStep.environment { 308 + envs.AddEnv(k, v) 309 + } 309 310 } 310 311 envs.AddEnv("HOME", homeDir) 311 312 312 313 mkExecResp, err := e.docker.ContainerExecCreate(ctx, addl.container, container.ExecOptions{ 313 - Cmd: []string{"bash", "-c", step.command}, 314 + Cmd: []string{"bash", "-c", step.Command()}, 314 315 AttachStdout: true, 315 316 AttachStderr: true, 316 317 Env: envs, ··· 333 334 // Docker doesn't provide an API to kill an exec run 334 335 // (sure, we could grab the PID and kill it ourselves, 335 336 // but that's wasted effort) 336 - e.l.Warn("step timed out", "step", step.Name) 337 + e.l.Warn("step timed out", "step", step.Name()) 337 338 338 339 <-tailDone 339 340
-73
spindle/engines/nixery/setup_steps.go
··· 2 2 3 3 import ( 4 4 "fmt" 5 - "path" 6 5 "strings" 7 - 8 - "tangled.org/core/api/tangled" 9 - "tangled.org/core/workflow" 10 6 ) 11 7 12 8 func nixConfStep() Step { ··· 17 13 command: setupCmd, 18 14 name: "Configure Nix", 19 15 } 20 - } 21 - 22 - // cloneOptsAsSteps processes clone options and adds corresponding steps 23 - // to the beginning of the workflow's step list if cloning is not skipped. 24 - // 25 - // the steps to do here are: 26 - // - git init 27 - // - git remote add origin <url> 28 - // - git fetch --depth=<d> --recurse-submodules=<yes|no> <sha> 29 - // - git checkout FETCH_HEAD 30 - func cloneStep(twf tangled.Pipeline_Workflow, tr tangled.Pipeline_TriggerMetadata, dev bool) Step { 31 - if twf.Clone.Skip { 32 - return Step{} 33 - } 34 - 35 - var commands []string 36 - 37 - // initialize git repo in workspace 38 - commands = append(commands, "git init") 39 - 40 - // add repo as git remote 41 - scheme := "https://" 42 - if dev { 43 - scheme = "http://" 44 - tr.Repo.Knot = strings.ReplaceAll(tr.Repo.Knot, "localhost", "host.docker.internal") 45 - } 46 - url := scheme + path.Join(tr.Repo.Knot, tr.Repo.Did, tr.Repo.Repo) 47 - commands = append(commands, fmt.Sprintf("git remote add origin %s", url)) 48 - 49 - // run git fetch 50 - { 51 - var fetchArgs []string 52 - 53 - // default clone depth is 1 54 - depth := 1 55 - if twf.Clone.Depth > 1 { 56 - depth = int(twf.Clone.Depth) 57 - } 58 - fetchArgs = append(fetchArgs, fmt.Sprintf("--depth=%d", depth)) 59 - 60 - // optionally recurse submodules 61 - if twf.Clone.Submodules { 62 - fetchArgs = append(fetchArgs, "--recurse-submodules=yes") 63 - } 64 - 65 - // set remote to fetch from 66 - fetchArgs = append(fetchArgs, "origin") 67 - 68 - // set revision to checkout 69 - switch workflow.TriggerKind(tr.Kind) { 70 - case workflow.TriggerKindManual: 71 - // TODO: unimplemented 72 - case workflow.TriggerKindPush: 73 - fetchArgs = append(fetchArgs, tr.Push.NewSha) 74 - case workflow.TriggerKindPullRequest: 75 - fetchArgs = append(fetchArgs, tr.PullRequest.SourceSha) 76 - } 77 - 78 - commands = append(commands, fmt.Sprintf("git fetch %s", strings.Join(fetchArgs, " "))) 79 - } 80 - 81 - // run git checkout 82 - commands = append(commands, "git checkout FETCH_HEAD") 83 - 84 - cloneStep := Step{ 85 - command: strings.Join(commands, "\n"), 86 - name: "Clone repository into workspace", 87 - } 88 - return cloneStep 89 16 } 90 17 91 18 // dependencyStep processes dependencies defined in the workflow.
+150
spindle/models/clone.go
··· 1 + package models 2 + 3 + import ( 4 + "fmt" 5 + "strings" 6 + 7 + "tangled.org/core/api/tangled" 8 + "tangled.org/core/workflow" 9 + ) 10 + 11 + type CloneStep struct { 12 + name string 13 + kind StepKind 14 + commands []string 15 + } 16 + 17 + func (s CloneStep) Name() string { 18 + return s.name 19 + } 20 + 21 + func (s CloneStep) Commands() []string { 22 + return s.commands 23 + } 24 + 25 + func (s CloneStep) Command() string { 26 + return strings.Join(s.commands, "\n") 27 + } 28 + 29 + func (s CloneStep) Kind() StepKind { 30 + return s.kind 31 + } 32 + 33 + // BuildCloneStep generates git clone commands. 34 + // The caller must ensure the current working directory is set to the desired 35 + // workspace directory before executing these commands. 36 + // 37 + // The generated commands are: 38 + // - git init 39 + // - git remote add origin <url> 40 + // - git fetch --depth=<d> --recurse-submodules=<yes|no> <sha> 41 + // - git checkout FETCH_HEAD 42 + // 43 + // Supports all trigger types (push, PR, manual) and clone options. 44 + func BuildCloneStep(twf tangled.Pipeline_Workflow, tr tangled.Pipeline_TriggerMetadata, dev bool) CloneStep { 45 + if twf.Clone != nil && twf.Clone.Skip { 46 + return CloneStep{} 47 + } 48 + 49 + commitSHA, err := extractCommitSHA(tr) 50 + if err != nil { 51 + return CloneStep{ 52 + kind: StepKindSystem, 53 + name: "Clone repository into workspace (error)", 54 + commands: []string{fmt.Sprintf("echo 'Failed to get clone info: %s' && exit 1", err.Error())}, 55 + } 56 + } 57 + 58 + repoURL := BuildRepoURL(tr.Repo, dev) 59 + 60 + var cloneOpts tangled.Pipeline_CloneOpts 61 + if twf.Clone != nil { 62 + cloneOpts = *twf.Clone 63 + } 64 + fetchArgs := buildFetchArgs(cloneOpts, commitSHA) 65 + 66 + return CloneStep{ 67 + kind: StepKindSystem, 68 + name: "Clone repository into workspace", 69 + commands: []string{ 70 + "git init", 71 + fmt.Sprintf("git remote add origin %s", repoURL), 72 + fmt.Sprintf("git fetch %s", strings.Join(fetchArgs, " ")), 73 + "git checkout FETCH_HEAD", 74 + }, 75 + } 76 + } 77 + 78 + // extractCommitSHA extracts the commit SHA from trigger metadata based on trigger type 79 + func extractCommitSHA(tr tangled.Pipeline_TriggerMetadata) (string, error) { 80 + switch workflow.TriggerKind(tr.Kind) { 81 + case workflow.TriggerKindPush: 82 + if tr.Push == nil { 83 + return "", fmt.Errorf("push trigger metadata is nil") 84 + } 85 + return tr.Push.NewSha, nil 86 + 87 + case workflow.TriggerKindPullRequest: 88 + if tr.PullRequest == nil { 89 + return "", fmt.Errorf("pull request trigger metadata is nil") 90 + } 91 + return tr.PullRequest.SourceSha, nil 92 + 93 + case workflow.TriggerKindManual: 94 + // Manual triggers don't have an explicit SHA in the metadata 95 + // For now, return empty string - could be enhanced to fetch from default branch 96 + // TODO: Implement manual trigger SHA resolution (fetch default branch HEAD) 97 + return "", nil 98 + 99 + default: 100 + return "", fmt.Errorf("unknown trigger kind: %s", tr.Kind) 101 + } 102 + } 103 + 104 + // BuildRepoURL constructs the repository URL from repo metadata. 105 + func BuildRepoURL(repo *tangled.Pipeline_TriggerRepo, devMode bool) string { 106 + if repo == nil { 107 + return "" 108 + } 109 + 110 + scheme := "https://" 111 + if devMode { 112 + scheme = "http://" 113 + } 114 + 115 + // Get host from knot 116 + host := repo.Knot 117 + 118 + // In dev mode, replace localhost with host.docker.internal for Docker networking 119 + if devMode && strings.Contains(host, "localhost") { 120 + host = strings.ReplaceAll(host, "localhost", "host.docker.internal") 121 + } 122 + 123 + // Build URL: {scheme}{knot}/{did}/{repo} 124 + return fmt.Sprintf("%s%s/%s/%s", scheme, host, repo.Did, repo.Repo) 125 + } 126 + 127 + // buildFetchArgs constructs the arguments for git fetch based on clone options 128 + func buildFetchArgs(clone tangled.Pipeline_CloneOpts, sha string) []string { 129 + args := []string{} 130 + 131 + // Set fetch depth (default to 1 for shallow clone) 132 + depth := clone.Depth 133 + if depth == 0 { 134 + depth = 1 135 + } 136 + args = append(args, fmt.Sprintf("--depth=%d", depth)) 137 + 138 + // Add submodules if requested 139 + if clone.Submodules { 140 + args = append(args, "--recurse-submodules=yes") 141 + } 142 + 143 + // Add remote and SHA 144 + args = append(args, "origin") 145 + if sha != "" { 146 + args = append(args, sha) 147 + } 148 + 149 + return args 150 + }
+371
spindle/models/clone_test.go
··· 1 + package models 2 + 3 + import ( 4 + "strings" 5 + "testing" 6 + 7 + "tangled.org/core/api/tangled" 8 + "tangled.org/core/workflow" 9 + ) 10 + 11 + func TestBuildCloneStep_PushTrigger(t *testing.T) { 12 + twf := tangled.Pipeline_Workflow{ 13 + Clone: &tangled.Pipeline_CloneOpts{ 14 + Depth: 1, 15 + Submodules: false, 16 + Skip: false, 17 + }, 18 + } 19 + tr := tangled.Pipeline_TriggerMetadata{ 20 + Kind: string(workflow.TriggerKindPush), 21 + Push: &tangled.Pipeline_PushTriggerData{ 22 + NewSha: "abc123", 23 + OldSha: "def456", 24 + Ref: "refs/heads/main", 25 + }, 26 + Repo: &tangled.Pipeline_TriggerRepo{ 27 + Knot: "example.com", 28 + Did: "did:plc:user123", 29 + Repo: "my-repo", 30 + }, 31 + } 32 + 33 + step := BuildCloneStep(twf, tr, false) 34 + 35 + if step.Kind() != StepKindSystem { 36 + t.Errorf("Expected StepKindSystem, got %v", step.Kind()) 37 + } 38 + 39 + if step.Name() != "Clone repository into workspace" { 40 + t.Errorf("Expected 'Clone repository into workspace', got '%s'", step.Name()) 41 + } 42 + 43 + commands := step.Commands() 44 + if len(commands) != 4 { 45 + t.Errorf("Expected 4 commands, got %d", len(commands)) 46 + } 47 + 48 + // Verify commands contain expected git operations 49 + allCmds := strings.Join(commands, " ") 50 + if !strings.Contains(allCmds, "git init") { 51 + t.Error("Commands should contain 'git init'") 52 + } 53 + if !strings.Contains(allCmds, "git remote add origin") { 54 + t.Error("Commands should contain 'git remote add origin'") 55 + } 56 + if !strings.Contains(allCmds, "git fetch") { 57 + t.Error("Commands should contain 'git fetch'") 58 + } 59 + if !strings.Contains(allCmds, "abc123") { 60 + t.Error("Commands should contain commit SHA") 61 + } 62 + if !strings.Contains(allCmds, "git checkout FETCH_HEAD") { 63 + t.Error("Commands should contain 'git checkout FETCH_HEAD'") 64 + } 65 + if !strings.Contains(allCmds, "https://example.com/did:plc:user123/my-repo") { 66 + t.Error("Commands should contain expected repo URL") 67 + } 68 + } 69 + 70 + func TestBuildCloneStep_PullRequestTrigger(t *testing.T) { 71 + twf := tangled.Pipeline_Workflow{ 72 + Clone: &tangled.Pipeline_CloneOpts{ 73 + Depth: 1, 74 + Skip: false, 75 + }, 76 + } 77 + tr := tangled.Pipeline_TriggerMetadata{ 78 + Kind: string(workflow.TriggerKindPullRequest), 79 + PullRequest: &tangled.Pipeline_PullRequestTriggerData{ 80 + SourceSha: "pr-sha-789", 81 + SourceBranch: "feature-branch", 82 + TargetBranch: "main", 83 + Action: "opened", 84 + }, 85 + Repo: &tangled.Pipeline_TriggerRepo{ 86 + Knot: "example.com", 87 + Did: "did:plc:user123", 88 + Repo: "my-repo", 89 + }, 90 + } 91 + 92 + step := BuildCloneStep(twf, tr, false) 93 + 94 + allCmds := strings.Join(step.Commands(), " ") 95 + if !strings.Contains(allCmds, "pr-sha-789") { 96 + t.Error("Commands should contain PR commit SHA") 97 + } 98 + } 99 + 100 + func TestBuildCloneStep_ManualTrigger(t *testing.T) { 101 + twf := tangled.Pipeline_Workflow{ 102 + Clone: &tangled.Pipeline_CloneOpts{ 103 + Depth: 1, 104 + Skip: false, 105 + }, 106 + } 107 + tr := tangled.Pipeline_TriggerMetadata{ 108 + Kind: string(workflow.TriggerKindManual), 109 + Manual: &tangled.Pipeline_ManualTriggerData{ 110 + Inputs: nil, 111 + }, 112 + Repo: &tangled.Pipeline_TriggerRepo{ 113 + Knot: "example.com", 114 + Did: "did:plc:user123", 115 + Repo: "my-repo", 116 + }, 117 + } 118 + 119 + step := BuildCloneStep(twf, tr, false) 120 + 121 + // Manual triggers don't have a SHA yet (TODO), so git fetch won't include a SHA 122 + allCmds := strings.Join(step.Commands(), " ") 123 + // Should still have basic git commands 124 + if !strings.Contains(allCmds, "git init") { 125 + t.Error("Commands should contain 'git init'") 126 + } 127 + if !strings.Contains(allCmds, "git fetch") { 128 + t.Error("Commands should contain 'git fetch'") 129 + } 130 + } 131 + 132 + func TestBuildCloneStep_SkipFlag(t *testing.T) { 133 + twf := tangled.Pipeline_Workflow{ 134 + Clone: &tangled.Pipeline_CloneOpts{ 135 + Skip: true, 136 + }, 137 + } 138 + tr := tangled.Pipeline_TriggerMetadata{ 139 + Kind: string(workflow.TriggerKindPush), 140 + Push: &tangled.Pipeline_PushTriggerData{ 141 + NewSha: "abc123", 142 + }, 143 + Repo: &tangled.Pipeline_TriggerRepo{ 144 + Knot: "example.com", 145 + Did: "did:plc:user123", 146 + Repo: "my-repo", 147 + }, 148 + } 149 + 150 + step := BuildCloneStep(twf, tr, false) 151 + 152 + // Empty step when skip is true 153 + if step.Name() != "" { 154 + t.Error("Expected empty step name when Skip is true") 155 + } 156 + if len(step.Commands()) != 0 { 157 + t.Errorf("Expected no commands when Skip is true, got %d commands", len(step.Commands())) 158 + } 159 + } 160 + 161 + func TestBuildCloneStep_DevMode(t *testing.T) { 162 + twf := tangled.Pipeline_Workflow{ 163 + Clone: &tangled.Pipeline_CloneOpts{ 164 + Depth: 1, 165 + Skip: false, 166 + }, 167 + } 168 + tr := tangled.Pipeline_TriggerMetadata{ 169 + Kind: string(workflow.TriggerKindPush), 170 + Push: &tangled.Pipeline_PushTriggerData{ 171 + NewSha: "abc123", 172 + }, 173 + Repo: &tangled.Pipeline_TriggerRepo{ 174 + Knot: "localhost:3000", 175 + Did: "did:plc:user123", 176 + Repo: "my-repo", 177 + }, 178 + } 179 + 180 + step := BuildCloneStep(twf, tr, true) 181 + 182 + // In dev mode, should use http:// and replace localhost with host.docker.internal 183 + allCmds := strings.Join(step.Commands(), " ") 184 + expectedURL := "http://host.docker.internal:3000/did:plc:user123/my-repo" 185 + if !strings.Contains(allCmds, expectedURL) { 186 + t.Errorf("Expected dev mode URL '%s' in commands", expectedURL) 187 + } 188 + } 189 + 190 + func TestBuildCloneStep_DepthAndSubmodules(t *testing.T) { 191 + twf := tangled.Pipeline_Workflow{ 192 + Clone: &tangled.Pipeline_CloneOpts{ 193 + Depth: 10, 194 + Submodules: true, 195 + Skip: false, 196 + }, 197 + } 198 + tr := tangled.Pipeline_TriggerMetadata{ 199 + Kind: string(workflow.TriggerKindPush), 200 + Push: &tangled.Pipeline_PushTriggerData{ 201 + NewSha: "abc123", 202 + }, 203 + Repo: &tangled.Pipeline_TriggerRepo{ 204 + Knot: "example.com", 205 + Did: "did:plc:user123", 206 + Repo: "my-repo", 207 + }, 208 + } 209 + 210 + step := BuildCloneStep(twf, tr, false) 211 + 212 + allCmds := strings.Join(step.Commands(), " ") 213 + if !strings.Contains(allCmds, "--depth=10") { 214 + t.Error("Commands should contain '--depth=10'") 215 + } 216 + 217 + if !strings.Contains(allCmds, "--recurse-submodules=yes") { 218 + t.Error("Commands should contain '--recurse-submodules=yes'") 219 + } 220 + } 221 + 222 + func TestBuildCloneStep_DefaultDepth(t *testing.T) { 223 + twf := tangled.Pipeline_Workflow{ 224 + Clone: &tangled.Pipeline_CloneOpts{ 225 + Depth: 0, // Default should be 1 226 + Skip: false, 227 + }, 228 + } 229 + tr := tangled.Pipeline_TriggerMetadata{ 230 + Kind: string(workflow.TriggerKindPush), 231 + Push: &tangled.Pipeline_PushTriggerData{ 232 + NewSha: "abc123", 233 + }, 234 + Repo: &tangled.Pipeline_TriggerRepo{ 235 + Knot: "example.com", 236 + Did: "did:plc:user123", 237 + Repo: "my-repo", 238 + }, 239 + } 240 + 241 + step := BuildCloneStep(twf, tr, false) 242 + 243 + allCmds := strings.Join(step.Commands(), " ") 244 + if !strings.Contains(allCmds, "--depth=1") { 245 + t.Error("Commands should default to '--depth=1'") 246 + } 247 + } 248 + 249 + func TestBuildCloneStep_NilPushData(t *testing.T) { 250 + twf := tangled.Pipeline_Workflow{ 251 + Clone: &tangled.Pipeline_CloneOpts{ 252 + Depth: 1, 253 + Skip: false, 254 + }, 255 + } 256 + tr := tangled.Pipeline_TriggerMetadata{ 257 + Kind: string(workflow.TriggerKindPush), 258 + Push: nil, // Nil push data should create error step 259 + Repo: &tangled.Pipeline_TriggerRepo{ 260 + Knot: "example.com", 261 + Did: "did:plc:user123", 262 + Repo: "my-repo", 263 + }, 264 + } 265 + 266 + step := BuildCloneStep(twf, tr, false) 267 + 268 + // Should return an error step 269 + if !strings.Contains(step.Name(), "error") { 270 + t.Error("Expected error in step name when push data is nil") 271 + } 272 + 273 + allCmds := strings.Join(step.Commands(), " ") 274 + if !strings.Contains(allCmds, "Failed to get clone info") { 275 + t.Error("Commands should contain error message") 276 + } 277 + if !strings.Contains(allCmds, "exit 1") { 278 + t.Error("Commands should exit with error") 279 + } 280 + } 281 + 282 + func TestBuildCloneStep_NilPRData(t *testing.T) { 283 + twf := tangled.Pipeline_Workflow{ 284 + Clone: &tangled.Pipeline_CloneOpts{ 285 + Depth: 1, 286 + Skip: false, 287 + }, 288 + } 289 + tr := tangled.Pipeline_TriggerMetadata{ 290 + Kind: string(workflow.TriggerKindPullRequest), 291 + PullRequest: nil, // Nil PR data should create error step 292 + Repo: &tangled.Pipeline_TriggerRepo{ 293 + Knot: "example.com", 294 + Did: "did:plc:user123", 295 + Repo: "my-repo", 296 + }, 297 + } 298 + 299 + step := BuildCloneStep(twf, tr, false) 300 + 301 + // Should return an error step 302 + if !strings.Contains(step.Name(), "error") { 303 + t.Error("Expected error in step name when pull request data is nil") 304 + } 305 + 306 + allCmds := strings.Join(step.Commands(), " ") 307 + if !strings.Contains(allCmds, "Failed to get clone info") { 308 + t.Error("Commands should contain error message") 309 + } 310 + } 311 + 312 + func TestBuildCloneStep_UnknownTriggerKind(t *testing.T) { 313 + twf := tangled.Pipeline_Workflow{ 314 + Clone: &tangled.Pipeline_CloneOpts{ 315 + Depth: 1, 316 + Skip: false, 317 + }, 318 + } 319 + tr := tangled.Pipeline_TriggerMetadata{ 320 + Kind: "unknown_trigger", 321 + Repo: &tangled.Pipeline_TriggerRepo{ 322 + Knot: "example.com", 323 + Did: "did:plc:user123", 324 + Repo: "my-repo", 325 + }, 326 + } 327 + 328 + step := BuildCloneStep(twf, tr, false) 329 + 330 + // Should return an error step 331 + if !strings.Contains(step.Name(), "error") { 332 + t.Error("Expected error in step name for unknown trigger kind") 333 + } 334 + 335 + allCmds := strings.Join(step.Commands(), " ") 336 + if !strings.Contains(allCmds, "unknown trigger kind") { 337 + t.Error("Commands should contain error message about unknown trigger kind") 338 + } 339 + } 340 + 341 + func TestBuildCloneStep_NilCloneOpts(t *testing.T) { 342 + twf := tangled.Pipeline_Workflow{ 343 + Clone: nil, // Nil clone options should use defaults 344 + } 345 + tr := tangled.Pipeline_TriggerMetadata{ 346 + Kind: string(workflow.TriggerKindPush), 347 + Push: &tangled.Pipeline_PushTriggerData{ 348 + NewSha: "abc123", 349 + }, 350 + Repo: &tangled.Pipeline_TriggerRepo{ 351 + Knot: "example.com", 352 + Did: "did:plc:user123", 353 + Repo: "my-repo", 354 + }, 355 + } 356 + 357 + step := BuildCloneStep(twf, tr, false) 358 + 359 + // Should still work with default options 360 + if step.Kind() != StepKindSystem { 361 + t.Errorf("Expected StepKindSystem, got %v", step.Kind()) 362 + } 363 + 364 + allCmds := strings.Join(step.Commands(), " ") 365 + if !strings.Contains(allCmds, "--depth=1") { 366 + t.Error("Commands should default to '--depth=1' when Clone is nil") 367 + } 368 + if !strings.Contains(allCmds, "git init") { 369 + t.Error("Commands should contain 'git init'") 370 + } 371 + }
+6 -1
spindle/models/logger.go
··· 12 12 type WorkflowLogger struct { 13 13 file *os.File 14 14 encoder *json.Encoder 15 + mask *SecretMask 15 16 } 16 17 17 - func NewWorkflowLogger(baseDir string, wid WorkflowId) (*WorkflowLogger, error) { 18 + func NewWorkflowLogger(baseDir string, wid WorkflowId, secretValues []string) (*WorkflowLogger, error) { 18 19 path := LogFilePath(baseDir, wid) 19 20 20 21 file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) ··· 25 26 return &WorkflowLogger{ 26 27 file: file, 27 28 encoder: json.NewEncoder(file), 29 + mask: NewSecretMask(secretValues), 28 30 }, nil 29 31 } 30 32 ··· 62 64 63 65 func (w *dataWriter) Write(p []byte) (int, error) { 64 66 line := strings.TrimRight(string(p), "\r\n") 67 + if w.logger.mask != nil { 68 + line = w.logger.mask.Mask(line) 69 + } 65 70 entry := NewDataLogLine(w.idx, line, w.stream) 66 71 if err := w.logger.encoder.Encode(entry); err != nil { 67 72 return 0, err
+4 -3
spindle/models/pipeline.go
··· 22 22 ) 23 23 24 24 type Workflow struct { 25 - Steps []Step 26 - Name string 27 - Data any 25 + Steps []Step 26 + Name string 27 + Data any 28 + Environment map[string]string 28 29 }
+77
spindle/models/pipeline_env.go
··· 1 + package models 2 + 3 + import ( 4 + "strings" 5 + 6 + "github.com/go-git/go-git/v5/plumbing" 7 + "tangled.org/core/api/tangled" 8 + "tangled.org/core/workflow" 9 + ) 10 + 11 + // PipelineEnvVars extracts environment variables from pipeline trigger metadata. 12 + // These are framework-provided variables that are injected into workflow steps. 13 + func PipelineEnvVars(tr *tangled.Pipeline_TriggerMetadata, pipelineId PipelineId, devMode bool) map[string]string { 14 + if tr == nil { 15 + return nil 16 + } 17 + 18 + env := make(map[string]string) 19 + 20 + // Standard CI environment variable 21 + env["CI"] = "true" 22 + 23 + env["TANGLED_PIPELINE_ID"] = pipelineId.Rkey 24 + 25 + // Repo info 26 + if tr.Repo != nil { 27 + env["TANGLED_REPO_KNOT"] = tr.Repo.Knot 28 + env["TANGLED_REPO_DID"] = tr.Repo.Did 29 + env["TANGLED_REPO_NAME"] = tr.Repo.Repo 30 + env["TANGLED_REPO_DEFAULT_BRANCH"] = tr.Repo.DefaultBranch 31 + env["TANGLED_REPO_URL"] = BuildRepoURL(tr.Repo, devMode) 32 + } 33 + 34 + switch workflow.TriggerKind(tr.Kind) { 35 + case workflow.TriggerKindPush: 36 + if tr.Push != nil { 37 + refName := plumbing.ReferenceName(tr.Push.Ref) 38 + refType := "branch" 39 + if refName.IsTag() { 40 + refType = "tag" 41 + } 42 + 43 + env["TANGLED_REF"] = tr.Push.Ref 44 + env["TANGLED_REF_NAME"] = refName.Short() 45 + env["TANGLED_REF_TYPE"] = refType 46 + env["TANGLED_SHA"] = tr.Push.NewSha 47 + env["TANGLED_COMMIT_SHA"] = tr.Push.NewSha 48 + } 49 + 50 + case workflow.TriggerKindPullRequest: 51 + if tr.PullRequest != nil { 52 + // For PRs, the "ref" is the source branch 53 + env["TANGLED_REF"] = "refs/heads/" + tr.PullRequest.SourceBranch 54 + env["TANGLED_REF_NAME"] = tr.PullRequest.SourceBranch 55 + env["TANGLED_REF_TYPE"] = "branch" 56 + env["TANGLED_SHA"] = tr.PullRequest.SourceSha 57 + env["TANGLED_COMMIT_SHA"] = tr.PullRequest.SourceSha 58 + 59 + // PR-specific variables 60 + env["TANGLED_PR_SOURCE_BRANCH"] = tr.PullRequest.SourceBranch 61 + env["TANGLED_PR_TARGET_BRANCH"] = tr.PullRequest.TargetBranch 62 + env["TANGLED_PR_SOURCE_SHA"] = tr.PullRequest.SourceSha 63 + env["TANGLED_PR_ACTION"] = tr.PullRequest.Action 64 + } 65 + 66 + case workflow.TriggerKindManual: 67 + // Manual triggers may not have ref/sha info 68 + // Include any manual inputs if present 69 + if tr.Manual != nil { 70 + for _, pair := range tr.Manual.Inputs { 71 + env["TANGLED_INPUT_"+strings.ToUpper(pair.Key)] = pair.Value 72 + } 73 + } 74 + } 75 + 76 + return env 77 + }
+260
spindle/models/pipeline_env_test.go
··· 1 + package models 2 + 3 + import ( 4 + "testing" 5 + 6 + "tangled.org/core/api/tangled" 7 + "tangled.org/core/workflow" 8 + ) 9 + 10 + func TestPipelineEnvVars_PushBranch(t *testing.T) { 11 + tr := &tangled.Pipeline_TriggerMetadata{ 12 + Kind: string(workflow.TriggerKindPush), 13 + Push: &tangled.Pipeline_PushTriggerData{ 14 + NewSha: "abc123def456", 15 + OldSha: "000000000000", 16 + Ref: "refs/heads/main", 17 + }, 18 + Repo: &tangled.Pipeline_TriggerRepo{ 19 + Knot: "example.com", 20 + Did: "did:plc:user123", 21 + Repo: "my-repo", 22 + DefaultBranch: "main", 23 + }, 24 + } 25 + id := PipelineId{ 26 + Knot: "example.com", 27 + Rkey: "123123", 28 + } 29 + env := PipelineEnvVars(tr, id, false) 30 + 31 + // Check standard CI variable 32 + if env["CI"] != "true" { 33 + t.Errorf("Expected CI='true', got '%s'", env["CI"]) 34 + } 35 + 36 + // Check ref variables 37 + if env["TANGLED_REF"] != "refs/heads/main" { 38 + t.Errorf("Expected TANGLED_REF='refs/heads/main', got '%s'", env["TANGLED_REF"]) 39 + } 40 + if env["TANGLED_REF_NAME"] != "main" { 41 + t.Errorf("Expected TANGLED_REF_NAME='main', got '%s'", env["TANGLED_REF_NAME"]) 42 + } 43 + if env["TANGLED_REF_TYPE"] != "branch" { 44 + t.Errorf("Expected TANGLED_REF_TYPE='branch', got '%s'", env["TANGLED_REF_TYPE"]) 45 + } 46 + 47 + // Check SHA variables 48 + if env["TANGLED_SHA"] != "abc123def456" { 49 + t.Errorf("Expected TANGLED_SHA='abc123def456', got '%s'", env["TANGLED_SHA"]) 50 + } 51 + if env["TANGLED_COMMIT_SHA"] != "abc123def456" { 52 + t.Errorf("Expected TANGLED_COMMIT_SHA='abc123def456', got '%s'", env["TANGLED_COMMIT_SHA"]) 53 + } 54 + 55 + // Check repo variables 56 + if env["TANGLED_REPO_KNOT"] != "example.com" { 57 + t.Errorf("Expected TANGLED_REPO_KNOT='example.com', got '%s'", env["TANGLED_REPO_KNOT"]) 58 + } 59 + if env["TANGLED_REPO_DID"] != "did:plc:user123" { 60 + t.Errorf("Expected TANGLED_REPO_DID='did:plc:user123', got '%s'", env["TANGLED_REPO_DID"]) 61 + } 62 + if env["TANGLED_REPO_NAME"] != "my-repo" { 63 + t.Errorf("Expected TANGLED_REPO_NAME='my-repo', got '%s'", env["TANGLED_REPO_NAME"]) 64 + } 65 + if env["TANGLED_REPO_DEFAULT_BRANCH"] != "main" { 66 + t.Errorf("Expected TANGLED_REPO_DEFAULT_BRANCH='main', got '%s'", env["TANGLED_REPO_DEFAULT_BRANCH"]) 67 + } 68 + if env["TANGLED_REPO_URL"] != "https://example.com/did:plc:user123/my-repo" { 69 + t.Errorf("Expected TANGLED_REPO_URL='https://example.com/did:plc:user123/my-repo', got '%s'", env["TANGLED_REPO_URL"]) 70 + } 71 + } 72 + 73 + func TestPipelineEnvVars_PushTag(t *testing.T) { 74 + tr := &tangled.Pipeline_TriggerMetadata{ 75 + Kind: string(workflow.TriggerKindPush), 76 + Push: &tangled.Pipeline_PushTriggerData{ 77 + NewSha: "abc123def456", 78 + OldSha: "000000000000", 79 + Ref: "refs/tags/v1.2.3", 80 + }, 81 + Repo: &tangled.Pipeline_TriggerRepo{ 82 + Knot: "example.com", 83 + Did: "did:plc:user123", 84 + Repo: "my-repo", 85 + }, 86 + } 87 + id := PipelineId{ 88 + Knot: "example.com", 89 + Rkey: "123123", 90 + } 91 + env := PipelineEnvVars(tr, id, false) 92 + 93 + if env["TANGLED_REF"] != "refs/tags/v1.2.3" { 94 + t.Errorf("Expected TANGLED_REF='refs/tags/v1.2.3', got '%s'", env["TANGLED_REF"]) 95 + } 96 + if env["TANGLED_REF_NAME"] != "v1.2.3" { 97 + t.Errorf("Expected TANGLED_REF_NAME='v1.2.3', got '%s'", env["TANGLED_REF_NAME"]) 98 + } 99 + if env["TANGLED_REF_TYPE"] != "tag" { 100 + t.Errorf("Expected TANGLED_REF_TYPE='tag', got '%s'", env["TANGLED_REF_TYPE"]) 101 + } 102 + } 103 + 104 + func TestPipelineEnvVars_PullRequest(t *testing.T) { 105 + tr := &tangled.Pipeline_TriggerMetadata{ 106 + Kind: string(workflow.TriggerKindPullRequest), 107 + PullRequest: &tangled.Pipeline_PullRequestTriggerData{ 108 + SourceBranch: "feature-branch", 109 + TargetBranch: "main", 110 + SourceSha: "pr-sha-789", 111 + Action: "opened", 112 + }, 113 + Repo: &tangled.Pipeline_TriggerRepo{ 114 + Knot: "example.com", 115 + Did: "did:plc:user123", 116 + Repo: "my-repo", 117 + }, 118 + } 119 + id := PipelineId{ 120 + Knot: "example.com", 121 + Rkey: "123123", 122 + } 123 + env := PipelineEnvVars(tr, id, false) 124 + 125 + // Check ref variables for PR 126 + if env["TANGLED_REF"] != "refs/heads/feature-branch" { 127 + t.Errorf("Expected TANGLED_REF='refs/heads/feature-branch', got '%s'", env["TANGLED_REF"]) 128 + } 129 + if env["TANGLED_REF_NAME"] != "feature-branch" { 130 + t.Errorf("Expected TANGLED_REF_NAME='feature-branch', got '%s'", env["TANGLED_REF_NAME"]) 131 + } 132 + if env["TANGLED_REF_TYPE"] != "branch" { 133 + t.Errorf("Expected TANGLED_REF_TYPE='branch', got '%s'", env["TANGLED_REF_TYPE"]) 134 + } 135 + 136 + // Check SHA variables 137 + if env["TANGLED_SHA"] != "pr-sha-789" { 138 + t.Errorf("Expected TANGLED_SHA='pr-sha-789', got '%s'", env["TANGLED_SHA"]) 139 + } 140 + if env["TANGLED_COMMIT_SHA"] != "pr-sha-789" { 141 + t.Errorf("Expected TANGLED_COMMIT_SHA='pr-sha-789', got '%s'", env["TANGLED_COMMIT_SHA"]) 142 + } 143 + 144 + // Check PR-specific variables 145 + if env["TANGLED_PR_SOURCE_BRANCH"] != "feature-branch" { 146 + t.Errorf("Expected TANGLED_PR_SOURCE_BRANCH='feature-branch', got '%s'", env["TANGLED_PR_SOURCE_BRANCH"]) 147 + } 148 + if env["TANGLED_PR_TARGET_BRANCH"] != "main" { 149 + t.Errorf("Expected TANGLED_PR_TARGET_BRANCH='main', got '%s'", env["TANGLED_PR_TARGET_BRANCH"]) 150 + } 151 + if env["TANGLED_PR_SOURCE_SHA"] != "pr-sha-789" { 152 + t.Errorf("Expected TANGLED_PR_SOURCE_SHA='pr-sha-789', got '%s'", env["TANGLED_PR_SOURCE_SHA"]) 153 + } 154 + if env["TANGLED_PR_ACTION"] != "opened" { 155 + t.Errorf("Expected TANGLED_PR_ACTION='opened', got '%s'", env["TANGLED_PR_ACTION"]) 156 + } 157 + } 158 + 159 + func TestPipelineEnvVars_ManualWithInputs(t *testing.T) { 160 + tr := &tangled.Pipeline_TriggerMetadata{ 161 + Kind: string(workflow.TriggerKindManual), 162 + Manual: &tangled.Pipeline_ManualTriggerData{ 163 + Inputs: []*tangled.Pipeline_Pair{ 164 + {Key: "version", Value: "1.0.0"}, 165 + {Key: "environment", Value: "production"}, 166 + }, 167 + }, 168 + Repo: &tangled.Pipeline_TriggerRepo{ 169 + Knot: "example.com", 170 + Did: "did:plc:user123", 171 + Repo: "my-repo", 172 + }, 173 + } 174 + id := PipelineId{ 175 + Knot: "example.com", 176 + Rkey: "123123", 177 + } 178 + env := PipelineEnvVars(tr, id, false) 179 + 180 + // Check manual input variables 181 + if env["TANGLED_INPUT_VERSION"] != "1.0.0" { 182 + t.Errorf("Expected TANGLED_INPUT_VERSION='1.0.0', got '%s'", env["TANGLED_INPUT_VERSION"]) 183 + } 184 + if env["TANGLED_INPUT_ENVIRONMENT"] != "production" { 185 + t.Errorf("Expected TANGLED_INPUT_ENVIRONMENT='production', got '%s'", env["TANGLED_INPUT_ENVIRONMENT"]) 186 + } 187 + 188 + // Manual triggers shouldn't have ref/sha variables 189 + if _, ok := env["TANGLED_REF"]; ok { 190 + t.Error("Manual trigger should not have TANGLED_REF") 191 + } 192 + if _, ok := env["TANGLED_SHA"]; ok { 193 + t.Error("Manual trigger should not have TANGLED_SHA") 194 + } 195 + } 196 + 197 + func TestPipelineEnvVars_DevMode(t *testing.T) { 198 + tr := &tangled.Pipeline_TriggerMetadata{ 199 + Kind: string(workflow.TriggerKindPush), 200 + Push: &tangled.Pipeline_PushTriggerData{ 201 + NewSha: "abc123", 202 + Ref: "refs/heads/main", 203 + }, 204 + Repo: &tangled.Pipeline_TriggerRepo{ 205 + Knot: "localhost:3000", 206 + Did: "did:plc:user123", 207 + Repo: "my-repo", 208 + }, 209 + } 210 + id := PipelineId{ 211 + Knot: "example.com", 212 + Rkey: "123123", 213 + } 214 + env := PipelineEnvVars(tr, id, true) 215 + 216 + // Dev mode should use http:// and replace localhost with host.docker.internal 217 + expectedURL := "http://host.docker.internal:3000/did:plc:user123/my-repo" 218 + if env["TANGLED_REPO_URL"] != expectedURL { 219 + t.Errorf("Expected TANGLED_REPO_URL='%s', got '%s'", expectedURL, env["TANGLED_REPO_URL"]) 220 + } 221 + } 222 + 223 + func TestPipelineEnvVars_NilTrigger(t *testing.T) { 224 + id := PipelineId{ 225 + Knot: "example.com", 226 + Rkey: "123123", 227 + } 228 + env := PipelineEnvVars(nil, id, false) 229 + 230 + if env != nil { 231 + t.Error("Expected nil env for nil trigger") 232 + } 233 + } 234 + 235 + func TestPipelineEnvVars_NilPushData(t *testing.T) { 236 + tr := &tangled.Pipeline_TriggerMetadata{ 237 + Kind: string(workflow.TriggerKindPush), 238 + Push: nil, 239 + Repo: &tangled.Pipeline_TriggerRepo{ 240 + Knot: "example.com", 241 + Did: "did:plc:user123", 242 + Repo: "my-repo", 243 + }, 244 + } 245 + id := PipelineId{ 246 + Knot: "example.com", 247 + Rkey: "123123", 248 + } 249 + env := PipelineEnvVars(tr, id, false) 250 + 251 + // Should still have repo variables 252 + if env["TANGLED_REPO_KNOT"] != "example.com" { 253 + t.Errorf("Expected TANGLED_REPO_KNOT='example.com', got '%s'", env["TANGLED_REPO_KNOT"]) 254 + } 255 + 256 + // Should not have ref/sha variables 257 + if _, ok := env["TANGLED_REF"]; ok { 258 + t.Error("Should not have TANGLED_REF when push data is nil") 259 + } 260 + }
+51
spindle/models/secret_mask.go
··· 1 + package models 2 + 3 + import ( 4 + "encoding/base64" 5 + "strings" 6 + ) 7 + 8 + // SecretMask replaces secret values in strings with "***". 9 + type SecretMask struct { 10 + replacer *strings.Replacer 11 + } 12 + 13 + // NewSecretMask creates a mask for the given secret values. 14 + // Also registers base64-encoded variants of each secret. 15 + func NewSecretMask(values []string) *SecretMask { 16 + var pairs []string 17 + 18 + for _, value := range values { 19 + if value == "" { 20 + continue 21 + } 22 + 23 + pairs = append(pairs, value, "***") 24 + 25 + b64 := base64.StdEncoding.EncodeToString([]byte(value)) 26 + if b64 != value { 27 + pairs = append(pairs, b64, "***") 28 + } 29 + 30 + b64NoPad := strings.TrimRight(b64, "=") 31 + if b64NoPad != b64 && b64NoPad != value { 32 + pairs = append(pairs, b64NoPad, "***") 33 + } 34 + } 35 + 36 + if len(pairs) == 0 { 37 + return nil 38 + } 39 + 40 + return &SecretMask{ 41 + replacer: strings.NewReplacer(pairs...), 42 + } 43 + } 44 + 45 + // Mask replaces all registered secret values with "***". 46 + func (m *SecretMask) Mask(input string) string { 47 + if m == nil || m.replacer == nil { 48 + return input 49 + } 50 + return m.replacer.Replace(input) 51 + }
+135
spindle/models/secret_mask_test.go
··· 1 + package models 2 + 3 + import ( 4 + "encoding/base64" 5 + "testing" 6 + ) 7 + 8 + func TestSecretMask_BasicMasking(t *testing.T) { 9 + mask := NewSecretMask([]string{"mysecret123"}) 10 + 11 + input := "The password is mysecret123 in this log" 12 + expected := "The password is *** in this log" 13 + 14 + result := mask.Mask(input) 15 + if result != expected { 16 + t.Errorf("expected %q, got %q", expected, result) 17 + } 18 + } 19 + 20 + func TestSecretMask_Base64Encoded(t *testing.T) { 21 + secret := "mysecret123" 22 + mask := NewSecretMask([]string{secret}) 23 + 24 + b64 := base64.StdEncoding.EncodeToString([]byte(secret)) 25 + input := "Encoded: " + b64 26 + expected := "Encoded: ***" 27 + 28 + result := mask.Mask(input) 29 + if result != expected { 30 + t.Errorf("expected %q, got %q", expected, result) 31 + } 32 + } 33 + 34 + func TestSecretMask_Base64NoPadding(t *testing.T) { 35 + // "test" encodes to "dGVzdA==" with padding 36 + secret := "test" 37 + mask := NewSecretMask([]string{secret}) 38 + 39 + b64NoPad := "dGVzdA" // base64 without padding 40 + input := "Token: " + b64NoPad 41 + expected := "Token: ***" 42 + 43 + result := mask.Mask(input) 44 + if result != expected { 45 + t.Errorf("expected %q, got %q", expected, result) 46 + } 47 + } 48 + 49 + func TestSecretMask_MultipleSecrets(t *testing.T) { 50 + mask := NewSecretMask([]string{"password1", "apikey123"}) 51 + 52 + input := "Using password1 and apikey123 for auth" 53 + expected := "Using *** and *** for auth" 54 + 55 + result := mask.Mask(input) 56 + if result != expected { 57 + t.Errorf("expected %q, got %q", expected, result) 58 + } 59 + } 60 + 61 + func TestSecretMask_MultipleOccurrences(t *testing.T) { 62 + mask := NewSecretMask([]string{"secret"}) 63 + 64 + input := "secret appears twice: secret" 65 + expected := "*** appears twice: ***" 66 + 67 + result := mask.Mask(input) 68 + if result != expected { 69 + t.Errorf("expected %q, got %q", expected, result) 70 + } 71 + } 72 + 73 + func TestSecretMask_ShortValues(t *testing.T) { 74 + mask := NewSecretMask([]string{"abc", "xy", ""}) 75 + 76 + if mask == nil { 77 + t.Fatal("expected non-nil mask") 78 + } 79 + 80 + input := "abc xy test" 81 + expected := "*** *** test" 82 + result := mask.Mask(input) 83 + if result != expected { 84 + t.Errorf("expected %q, got %q", expected, result) 85 + } 86 + } 87 + 88 + func TestSecretMask_NilMask(t *testing.T) { 89 + var mask *SecretMask 90 + 91 + input := "some input text" 92 + result := mask.Mask(input) 93 + if result != input { 94 + t.Errorf("expected %q, got %q", input, result) 95 + } 96 + } 97 + 98 + func TestSecretMask_EmptyInput(t *testing.T) { 99 + mask := NewSecretMask([]string{"secret"}) 100 + 101 + result := mask.Mask("") 102 + if result != "" { 103 + t.Errorf("expected empty string, got %q", result) 104 + } 105 + } 106 + 107 + func TestSecretMask_NoMatch(t *testing.T) { 108 + mask := NewSecretMask([]string{"secretvalue"}) 109 + 110 + input := "nothing to mask here" 111 + result := mask.Mask(input) 112 + if result != input { 113 + t.Errorf("expected %q, got %q", input, result) 114 + } 115 + } 116 + 117 + func TestSecretMask_EmptySecretsList(t *testing.T) { 118 + mask := NewSecretMask([]string{}) 119 + 120 + if mask != nil { 121 + t.Error("expected nil mask for empty secrets list") 122 + } 123 + } 124 + 125 + func TestSecretMask_EmptySecretsFiltered(t *testing.T) { 126 + mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"}) 127 + 128 + input := "Using validpassword here" 129 + expected := "Using *** here" 130 + 131 + result := mask.Mask(input) 132 + if result != expected { 133 + t.Errorf("expected %q, got %q", expected, result) 134 + } 135 + }
+1 -1
spindle/motd
··· 20 20 ** 21 21 ******** 22 22 23 - This is a spindle server. More info at https://tangled.sh/@tangled.sh/core/tree/master/docs/spindle 23 + This is a spindle server. More info at https://docs.tangled.org/spindles.html#spindles 24 24 25 25 Most API routes are under /xrpc/
+15 -7
spindle/secrets/openbao.go
··· 13 13 ) 14 14 15 15 type OpenBaoManager struct { 16 - client *vault.Client 17 - mountPath string 18 - logger *slog.Logger 16 + client *vault.Client 17 + mountPath string 18 + logger *slog.Logger 19 + connectionTimeout time.Duration 19 20 } 20 21 21 22 type OpenBaoManagerOpt func(*OpenBaoManager) ··· 26 27 } 27 28 } 28 29 30 + func WithConnectionTimeout(timeout time.Duration) OpenBaoManagerOpt { 31 + return func(v *OpenBaoManager) { 32 + v.connectionTimeout = timeout 33 + } 34 + } 35 + 29 36 // NewOpenBaoManager creates a new OpenBao manager that connects to a Bao Proxy 30 37 // The proxyAddress should point to the local Bao Proxy (e.g., "http://127.0.0.1:8200") 31 38 // The proxy handles all authentication automatically via Auto-Auth ··· 43 50 } 44 51 45 52 manager := &OpenBaoManager{ 46 - client: client, 47 - mountPath: "spindle", // default KV v2 mount path 48 - logger: logger, 53 + client: client, 54 + mountPath: "spindle", // default KV v2 mount path 55 + logger: logger, 56 + connectionTimeout: 10 * time.Second, // default connection timeout 49 57 } 50 58 51 59 for _, opt := range opts { ··· 62 70 63 71 // testConnection verifies that we can connect to the proxy 64 72 func (v *OpenBaoManager) testConnection() error { 65 - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 73 + ctx, cancel := context.WithTimeout(context.Background(), v.connectionTimeout) 66 74 defer cancel() 67 75 68 76 // try token self-lookup as a quick way to verify proxy works
+5 -2
spindle/secrets/openbao_test.go
··· 152 152 for _, tt := range tests { 153 153 t.Run(tt.name, func(t *testing.T) { 154 154 logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) 155 - manager, err := NewOpenBaoManager(tt.proxyAddr, logger, tt.opts...) 155 + // Use shorter timeout for tests to avoid long waits 156 + opts := append(tt.opts, WithConnectionTimeout(1*time.Second)) 157 + manager, err := NewOpenBaoManager(tt.proxyAddr, logger, opts...) 156 158 157 159 if tt.expectError { 158 160 assert.Error(t, err) ··· 596 598 597 599 // All these will fail because no real proxy is running 598 600 // but we can test that the configuration is properly accepted 599 - manager, err := NewOpenBaoManager(tt.proxyAddr, logger) 601 + // Use shorter timeout for tests to avoid long waits 602 + manager, err := NewOpenBaoManager(tt.proxyAddr, logger, WithConnectionTimeout(1*time.Second)) 600 603 assert.Error(t, err) // Expected because no real proxy 601 604 assert.Nil(t, manager) 602 605 assert.Contains(t, err.Error(), "failed to connect to bao proxy")
+117 -43
spindle/server.go
··· 6 6 "encoding/json" 7 7 "fmt" 8 8 "log/slog" 9 + "maps" 9 10 "net/http" 11 + "sync" 10 12 11 13 "github.com/go-chi/chi/v5" 12 14 "tangled.org/core/api/tangled" ··· 29 31 ) 30 32 31 33 //go:embed motd 32 - var motd []byte 34 + var defaultMotd []byte 33 35 34 36 const ( 35 37 rbacDomain = "thisserver" ··· 46 48 cfg *config.Config 47 49 ks *eventconsumer.Consumer 48 50 res *idresolver.Resolver 49 - vault secrets.Manager 51 + vault secrets.Manager 52 + motd []byte 53 + motdMu sync.RWMutex 50 54 } 51 55 52 - func Run(ctx context.Context) error { 56 + // New creates a new Spindle server with the provided configuration and engines. 57 + func New(ctx context.Context, cfg *config.Config, engines map[string]models.Engine) (*Spindle, error) { 53 58 logger := log.FromContext(ctx) 54 59 55 - cfg, err := config.Load(ctx) 56 - if err != nil { 57 - return fmt.Errorf("failed to load config: %w", err) 58 - } 59 - 60 60 d, err := db.Make(cfg.Server.DBPath) 61 61 if err != nil { 62 - return fmt.Errorf("failed to setup db: %w", err) 62 + return nil, fmt.Errorf("failed to setup db: %w", err) 63 63 } 64 64 65 65 e, err := rbac.NewEnforcer(cfg.Server.DBPath) 66 66 if err != nil { 67 - return fmt.Errorf("failed to setup rbac enforcer: %w", err) 67 + return nil, fmt.Errorf("failed to setup rbac enforcer: %w", err) 68 68 } 69 69 e.E.EnableAutoSave(true) 70 70 ··· 74 74 switch cfg.Server.Secrets.Provider { 75 75 case "openbao": 76 76 if cfg.Server.Secrets.OpenBao.ProxyAddr == "" { 77 - return fmt.Errorf("openbao proxy address is required when using openbao secrets provider") 77 + return nil, fmt.Errorf("openbao proxy address is required when using openbao secrets provider") 78 78 } 79 79 vault, err = secrets.NewOpenBaoManager( 80 80 cfg.Server.Secrets.OpenBao.ProxyAddr, ··· 82 82 secrets.WithMountPath(cfg.Server.Secrets.OpenBao.Mount), 83 83 ) 84 84 if err != nil { 85 - return fmt.Errorf("failed to setup openbao secrets provider: %w", err) 85 + return nil, fmt.Errorf("failed to setup openbao secrets provider: %w", err) 86 86 } 87 87 logger.Info("using openbao secrets provider", "proxy_address", cfg.Server.Secrets.OpenBao.ProxyAddr, "mount", cfg.Server.Secrets.OpenBao.Mount) 88 88 case "sqlite", "": 89 89 vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath, secrets.WithTableName("secrets")) 90 90 if err != nil { 91 - return fmt.Errorf("failed to setup sqlite secrets provider: %w", err) 91 + return nil, fmt.Errorf("failed to setup sqlite secrets provider: %w", err) 92 92 } 93 93 logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath) 94 94 default: 95 - return fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider) 96 - } 97 - 98 - nixeryEng, err := nixery.New(ctx, cfg) 99 - if err != nil { 100 - return err 95 + return nil, fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider) 101 96 } 102 97 103 98 jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount) ··· 110 105 } 111 106 jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true) 112 107 if err != nil { 113 - return fmt.Errorf("failed to setup jetstream client: %w", err) 108 + return nil, fmt.Errorf("failed to setup jetstream client: %w", err) 114 109 } 115 110 jc.AddDid(cfg.Server.Owner) 116 111 117 112 // Check if the spindle knows about any Dids; 118 113 dids, err := d.GetAllDids() 119 114 if err != nil { 120 - return fmt.Errorf("failed to get all dids: %w", err) 115 + return nil, fmt.Errorf("failed to get all dids: %w", err) 121 116 } 122 117 for _, d := range dids { 123 118 jc.AddDid(d) ··· 125 120 126 121 resolver := idresolver.DefaultResolver(cfg.Server.PlcUrl) 127 122 128 - spindle := Spindle{ 123 + spindle := &Spindle{ 129 124 jc: jc, 130 125 e: e, 131 126 db: d, 132 127 l: logger, 133 128 n: &n, 134 - engs: map[string]models.Engine{"nixery": nixeryEng}, 129 + engs: engines, 135 130 jq: jq, 136 131 cfg: cfg, 137 132 res: resolver, 138 133 vault: vault, 134 + motd: defaultMotd, 139 135 } 140 136 141 137 err = e.AddSpindle(rbacDomain) 142 138 if err != nil { 143 - return fmt.Errorf("failed to set rbac domain: %w", err) 139 + return nil, fmt.Errorf("failed to set rbac domain: %w", err) 144 140 } 145 141 err = spindle.configureOwner() 146 142 if err != nil { 147 - return err 143 + return nil, err 148 144 } 149 145 logger.Info("owner set", "did", cfg.Server.Owner) 150 146 151 - // starts a job queue runner in the background 152 - jq.Start() 153 - defer jq.Stop() 154 - 155 - // Stop vault token renewal if it implements Stopper 156 - if stopper, ok := vault.(secrets.Stopper); ok { 157 - defer stopper.Stop() 158 - } 159 - 160 147 cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath) 161 148 if err != nil { 162 - return fmt.Errorf("failed to setup sqlite3 cursor store: %w", err) 149 + return nil, fmt.Errorf("failed to setup sqlite3 cursor store: %w", err) 163 150 } 164 151 165 152 err = jc.StartJetstream(ctx, spindle.ingest()) 166 153 if err != nil { 167 - return fmt.Errorf("failed to start jetstream consumer: %w", err) 154 + return nil, fmt.Errorf("failed to start jetstream consumer: %w", err) 168 155 } 169 156 170 157 // for each incoming sh.tangled.pipeline, we execute ··· 177 164 ccfg.CursorStore = cursorStore 178 165 knownKnots, err := d.Knots() 179 166 if err != nil { 180 - return err 167 + return nil, err 181 168 } 182 169 for _, knot := range knownKnots { 183 170 logger.Info("adding source start", "knot", knot) ··· 185 172 } 186 173 spindle.ks = eventconsumer.NewConsumer(*ccfg) 187 174 175 + return spindle, nil 176 + } 177 + 178 + // DB returns the database instance. 179 + func (s *Spindle) DB() *db.DB { 180 + return s.db 181 + } 182 + 183 + // Queue returns the job queue instance. 184 + func (s *Spindle) Queue() *queue.Queue { 185 + return s.jq 186 + } 187 + 188 + // Engines returns the map of available engines. 189 + func (s *Spindle) Engines() map[string]models.Engine { 190 + return s.engs 191 + } 192 + 193 + // Vault returns the secrets manager instance. 194 + func (s *Spindle) Vault() secrets.Manager { 195 + return s.vault 196 + } 197 + 198 + // Notifier returns the notifier instance. 199 + func (s *Spindle) Notifier() *notifier.Notifier { 200 + return s.n 201 + } 202 + 203 + // Enforcer returns the RBAC enforcer instance. 204 + func (s *Spindle) Enforcer() *rbac.Enforcer { 205 + return s.e 206 + } 207 + 208 + // SetMotdContent sets custom MOTD content, replacing the embedded default. 209 + func (s *Spindle) SetMotdContent(content []byte) { 210 + s.motdMu.Lock() 211 + defer s.motdMu.Unlock() 212 + s.motd = content 213 + } 214 + 215 + // GetMotdContent returns the current MOTD content. 216 + func (s *Spindle) GetMotdContent() []byte { 217 + s.motdMu.RLock() 218 + defer s.motdMu.RUnlock() 219 + return s.motd 220 + } 221 + 222 + // Start starts the Spindle server (blocking). 223 + func (s *Spindle) Start(ctx context.Context) error { 224 + // starts a job queue runner in the background 225 + s.jq.Start() 226 + defer s.jq.Stop() 227 + 228 + // Stop vault token renewal if it implements Stopper 229 + if stopper, ok := s.vault.(secrets.Stopper); ok { 230 + defer stopper.Stop() 231 + } 232 + 188 233 go func() { 189 - logger.Info("starting knot event consumer") 190 - spindle.ks.Start(ctx) 234 + s.l.Info("starting knot event consumer") 235 + s.ks.Start(ctx) 191 236 }() 192 237 193 - logger.Info("starting spindle server", "address", cfg.Server.ListenAddr) 194 - logger.Error("server error", "error", http.ListenAndServe(cfg.Server.ListenAddr, spindle.Router())) 238 + s.l.Info("starting spindle server", "address", s.cfg.Server.ListenAddr) 239 + return http.ListenAndServe(s.cfg.Server.ListenAddr, s.Router()) 240 + } 195 241 196 - return nil 242 + func Run(ctx context.Context) error { 243 + cfg, err := config.Load(ctx) 244 + if err != nil { 245 + return fmt.Errorf("failed to load config: %w", err) 246 + } 247 + 248 + nixeryEng, err := nixery.New(ctx, cfg) 249 + if err != nil { 250 + return err 251 + } 252 + 253 + s, err := New(ctx, cfg, map[string]models.Engine{ 254 + "nixery": nixeryEng, 255 + }) 256 + if err != nil { 257 + return err 258 + } 259 + 260 + return s.Start(ctx) 197 261 } 198 262 199 263 func (s *Spindle) Router() http.Handler { 200 264 mux := chi.NewRouter() 201 265 202 266 mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 203 - w.Write(motd) 267 + w.Write(s.GetMotdContent()) 204 268 }) 205 269 mux.HandleFunc("/events", s.Events) 206 270 mux.HandleFunc("/logs/{knot}/{rkey}/{name}", s.Logs) ··· 266 330 267 331 workflows := make(map[models.Engine][]models.Workflow) 268 332 333 + // Build pipeline environment variables once for all workflows 334 + pipelineEnv := models.PipelineEnvVars(tpl.TriggerMetadata, pipelineId, s.cfg.Server.Dev) 335 + 269 336 for _, w := range tpl.Workflows { 270 337 if w != nil { 271 338 if _, ok := s.engs[w.Engine]; !ok { ··· 290 357 if err != nil { 291 358 return err 292 359 } 360 + 361 + // inject TANGLED_* env vars after InitWorkflow 362 + // This prevents user-defined env vars from overriding them 363 + if ewf.Environment == nil { 364 + ewf.Environment = make(map[string]string) 365 + } 366 + maps.Copy(ewf.Environment, pipelineEnv) 293 367 294 368 workflows[eng] = append(workflows[eng], *ewf) 295 369
+5
spindle/stream.go
··· 213 213 if err := conn.WriteMessage(websocket.TextMessage, []byte(line.Text)); err != nil { 214 214 return fmt.Errorf("failed to write to websocket: %w", err) 215 215 } 216 + case <-time.After(30 * time.Second): 217 + // send a keep-alive 218 + if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil { 219 + return fmt.Errorf("failed to write control: %w", err) 220 + } 216 221 } 217 222 } 218 223 }
+1 -1
tailwind.config.js
··· 2 2 const colors = require("tailwindcss/colors"); 3 3 4 4 module.exports = { 5 - content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"], 5 + content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"], 6 6 darkMode: "media", 7 7 theme: { 8 8 container: {
+199
types/commit.go
··· 1 + package types 2 + 3 + import ( 4 + "bytes" 5 + "encoding/json" 6 + "fmt" 7 + "maps" 8 + "regexp" 9 + "strings" 10 + 11 + "github.com/go-git/go-git/v5/plumbing" 12 + "github.com/go-git/go-git/v5/plumbing/object" 13 + ) 14 + 15 + type Commit struct { 16 + // hash of the commit object. 17 + Hash plumbing.Hash `json:"hash,omitempty"` 18 + 19 + // author is the original author of the commit. 20 + Author object.Signature `json:"author"` 21 + 22 + // committer is the one performing the commit, might be different from author. 23 + Committer object.Signature `json:"committer"` 24 + 25 + // message is the commit message, contains arbitrary text. 26 + Message string `json:"message"` 27 + 28 + // treehash is the hash of the root tree of the commit. 29 + Tree string `json:"tree"` 30 + 31 + // parents are the hashes of the parent commits of the commit. 32 + ParentHashes []plumbing.Hash `json:"parent_hashes,omitempty"` 33 + 34 + // pgpsignature is the pgp signature of the commit. 35 + PGPSignature string `json:"pgp_signature,omitempty"` 36 + 37 + // mergetag is the embedded tag object when a merge commit is created by 38 + // merging a signed tag. 39 + MergeTag string `json:"merge_tag,omitempty"` 40 + 41 + // changeid is a unique identifier for the change (e.g., gerrit change-id). 42 + ChangeId string `json:"change_id,omitempty"` 43 + 44 + // extraheaders contains additional headers not captured by other fields. 45 + ExtraHeaders map[string][]byte `json:"extra_headers,omitempty"` 46 + 47 + // deprecated: kept for backwards compatibility with old json format. 48 + This string `json:"this,omitempty"` 49 + 50 + // deprecated: kept for backwards compatibility with old json format. 51 + Parent string `json:"parent,omitempty"` 52 + } 53 + 54 + // types.Commit is an unify two commit structs: 55 + // - git.object.Commit from 56 + // - types.NiceDiff.commit 57 + // 58 + // to do this in backwards compatible fashion, we define the base struct 59 + // to use the same fields as NiceDiff.Commit, and then we also unmarshal 60 + // the struct fields from go-git structs, this custom unmarshal makes sense 61 + // of both representations and unifies them to have maximal data in either 62 + // form. 63 + func (c *Commit) UnmarshalJSON(data []byte) error { 64 + type Alias Commit 65 + 66 + aux := &struct { 67 + *object.Commit 68 + *Alias 69 + }{ 70 + Alias: (*Alias)(c), 71 + } 72 + 73 + if err := json.Unmarshal(data, aux); err != nil { 74 + return err 75 + } 76 + 77 + c.FromGoGitCommit(aux.Commit) 78 + 79 + return nil 80 + } 81 + 82 + // fill in as much of Commit as possible from the given go-git commit 83 + func (c *Commit) FromGoGitCommit(gc *object.Commit) { 84 + if gc == nil { 85 + return 86 + } 87 + 88 + if c.Hash.IsZero() { 89 + c.Hash = gc.Hash 90 + } 91 + if c.This == "" { 92 + c.This = gc.Hash.String() 93 + } 94 + if isEmptySignature(c.Author) { 95 + c.Author = gc.Author 96 + } 97 + if isEmptySignature(c.Committer) { 98 + c.Committer = gc.Committer 99 + } 100 + if c.Message == "" { 101 + c.Message = gc.Message 102 + } 103 + if c.Tree == "" { 104 + c.Tree = gc.TreeHash.String() 105 + } 106 + if c.PGPSignature == "" { 107 + c.PGPSignature = gc.PGPSignature 108 + } 109 + if c.MergeTag == "" { 110 + c.MergeTag = gc.MergeTag 111 + } 112 + 113 + if len(c.ParentHashes) == 0 { 114 + c.ParentHashes = gc.ParentHashes 115 + } 116 + if c.Parent == "" && len(gc.ParentHashes) > 0 { 117 + c.Parent = gc.ParentHashes[0].String() 118 + } 119 + 120 + if len(c.ExtraHeaders) == 0 { 121 + c.ExtraHeaders = make(map[string][]byte) 122 + maps.Copy(c.ExtraHeaders, gc.ExtraHeaders) 123 + } 124 + 125 + if c.ChangeId == "" { 126 + if v, ok := gc.ExtraHeaders["change-id"]; ok { 127 + c.ChangeId = string(v) 128 + } 129 + } 130 + } 131 + 132 + func isEmptySignature(s object.Signature) bool { 133 + return s.Email == "" && s.Name == "" && s.When.IsZero() 134 + } 135 + 136 + // produce a verifiable payload from this commit's metadata 137 + func (c *Commit) Payload() string { 138 + author := bytes.NewBuffer([]byte{}) 139 + c.Author.Encode(author) 140 + 141 + committer := bytes.NewBuffer([]byte{}) 142 + c.Committer.Encode(committer) 143 + 144 + payload := strings.Builder{} 145 + 146 + fmt.Fprintf(&payload, "tree %s\n", c.Tree) 147 + 148 + if len(c.ParentHashes) > 0 { 149 + for _, p := range c.ParentHashes { 150 + fmt.Fprintf(&payload, "parent %s\n", p.String()) 151 + } 152 + } else { 153 + // present for backwards compatibility 154 + fmt.Fprintf(&payload, "parent %s\n", c.Parent) 155 + } 156 + 157 + fmt.Fprintf(&payload, "author %s\n", author.String()) 158 + fmt.Fprintf(&payload, "committer %s\n", committer.String()) 159 + 160 + if c.ChangeId != "" { 161 + fmt.Fprintf(&payload, "change-id %s\n", c.ChangeId) 162 + } else if v, ok := c.ExtraHeaders["change-id"]; ok { 163 + fmt.Fprintf(&payload, "change-id %s\n", string(v)) 164 + } 165 + 166 + fmt.Fprintf(&payload, "\n%s", c.Message) 167 + 168 + return payload.String() 169 + } 170 + 171 + var ( 172 + coAuthorRegex = regexp.MustCompile(`(?im)^Co-authored-by:\s*(.+?)\s*<([^>]+)>`) 173 + ) 174 + 175 + func (commit Commit) CoAuthors() []object.Signature { 176 + var coAuthors []object.Signature 177 + seen := make(map[string]bool) 178 + matches := coAuthorRegex.FindAllStringSubmatch(commit.Message, -1) 179 + 180 + for _, match := range matches { 181 + if len(match) >= 3 { 182 + name := strings.TrimSpace(match[1]) 183 + email := strings.TrimSpace(match[2]) 184 + 185 + if seen[email] { 186 + continue 187 + } 188 + seen[email] = true 189 + 190 + coAuthors = append(coAuthors, object.Signature{ 191 + Name: name, 192 + Email: email, 193 + When: commit.Committer.When, 194 + }) 195 + } 196 + } 197 + 198 + return coAuthors 199 + }
+5 -12
types/diff.go
··· 2 2 3 3 import ( 4 4 "github.com/bluekeyes/go-gitdiff/gitdiff" 5 - "github.com/go-git/go-git/v5/plumbing/object" 6 5 ) 7 6 8 7 type DiffOpts struct { ··· 43 42 44 43 // A nicer git diff representation. 45 44 type NiceDiff struct { 46 - Commit struct { 47 - Message string `json:"message"` 48 - Author object.Signature `json:"author"` 49 - This string `json:"this"` 50 - Parent string `json:"parent"` 51 - PGPSignature string `json:"pgp_signature"` 52 - Committer object.Signature `json:"committer"` 53 - Tree string `json:"tree"` 54 - ChangedId string `json:"change_id"` 55 - } `json:"commit"` 56 - Stat struct { 45 + Commit Commit `json:"commit"` 46 + Stat struct { 57 47 FilesChanged int `json:"files_changed"` 58 48 Insertions int `json:"insertions"` 59 49 Deletions int `json:"deletions"` ··· 84 74 85 75 // used by html elements as a unique ID for hrefs 86 76 func (d *Diff) Id() string { 77 + if d.IsDelete { 78 + return d.Name.Old 79 + } 87 80 return d.Name.New 88 81 } 89 82
+112
types/diff_test.go
··· 1 + package types 2 + 3 + import "testing" 4 + 5 + func TestDiffId(t *testing.T) { 6 + tests := []struct { 7 + name string 8 + diff Diff 9 + expected string 10 + }{ 11 + { 12 + name: "regular file uses new name", 13 + diff: Diff{ 14 + Name: struct { 15 + Old string `json:"old"` 16 + New string `json:"new"` 17 + }{Old: "", New: "src/main.go"}, 18 + }, 19 + expected: "src/main.go", 20 + }, 21 + { 22 + name: "new file uses new name", 23 + diff: Diff{ 24 + Name: struct { 25 + Old string `json:"old"` 26 + New string `json:"new"` 27 + }{Old: "", New: "src/new.go"}, 28 + IsNew: true, 29 + }, 30 + expected: "src/new.go", 31 + }, 32 + { 33 + name: "deleted file uses old name", 34 + diff: Diff{ 35 + Name: struct { 36 + Old string `json:"old"` 37 + New string `json:"new"` 38 + }{Old: "src/deleted.go", New: ""}, 39 + IsDelete: true, 40 + }, 41 + expected: "src/deleted.go", 42 + }, 43 + { 44 + name: "renamed file uses new name", 45 + diff: Diff{ 46 + Name: struct { 47 + Old string `json:"old"` 48 + New string `json:"new"` 49 + }{Old: "src/old.go", New: "src/renamed.go"}, 50 + IsRename: true, 51 + }, 52 + expected: "src/renamed.go", 53 + }, 54 + } 55 + 56 + for _, tt := range tests { 57 + t.Run(tt.name, func(t *testing.T) { 58 + if got := tt.diff.Id(); got != tt.expected { 59 + t.Errorf("Diff.Id() = %q, want %q", got, tt.expected) 60 + } 61 + }) 62 + } 63 + } 64 + 65 + func TestChangedFilesMatchesDiffId(t *testing.T) { 66 + // ChangedFiles() must return values matching each Diff's Id() 67 + // so that sidebar links point to the correct anchors. 68 + // Tests existing, deleted, new, and renamed files. 69 + nd := NiceDiff{ 70 + Diff: []Diff{ 71 + { 72 + Name: struct { 73 + Old string `json:"old"` 74 + New string `json:"new"` 75 + }{Old: "", New: "src/modified.go"}, 76 + }, 77 + { 78 + Name: struct { 79 + Old string `json:"old"` 80 + New string `json:"new"` 81 + }{Old: "src/deleted.go", New: ""}, 82 + IsDelete: true, 83 + }, 84 + { 85 + Name: struct { 86 + Old string `json:"old"` 87 + New string `json:"new"` 88 + }{Old: "", New: "src/new.go"}, 89 + IsNew: true, 90 + }, 91 + { 92 + Name: struct { 93 + Old string `json:"old"` 94 + New string `json:"new"` 95 + }{Old: "src/old.go", New: "src/renamed.go"}, 96 + IsRename: true, 97 + }, 98 + }, 99 + } 100 + 101 + changedFiles := nd.ChangedFiles() 102 + 103 + if len(changedFiles) != len(nd.Diff) { 104 + t.Fatalf("ChangedFiles() returned %d items, want %d", len(changedFiles), len(nd.Diff)) 105 + } 106 + 107 + for i, diff := range nd.Diff { 108 + if changedFiles[i] != diff.Id() { 109 + t.Errorf("ChangedFiles()[%d] = %q, but Diff.Id() = %q", i, changedFiles[i], diff.Id()) 110 + } 111 + } 112 + }
+39 -18
types/repo.go
··· 1 1 package types 2 2 3 3 import ( 4 + "encoding/json" 5 + 4 6 "github.com/bluekeyes/go-gitdiff/gitdiff" 5 7 "github.com/go-git/go-git/v5/plumbing/object" 6 8 ) 7 9 8 10 type RepoIndexResponse struct { 9 - IsEmpty bool `json:"is_empty"` 10 - Ref string `json:"ref,omitempty"` 11 - Readme string `json:"readme,omitempty"` 12 - ReadmeFileName string `json:"readme_file_name,omitempty"` 13 - Commits []*object.Commit `json:"commits,omitempty"` 14 - Description string `json:"description,omitempty"` 15 - Files []NiceTree `json:"files,omitempty"` 16 - Branches []Branch `json:"branches,omitempty"` 17 - Tags []*TagReference `json:"tags,omitempty"` 18 - TotalCommits int `json:"total_commits,omitempty"` 11 + IsEmpty bool `json:"is_empty"` 12 + Ref string `json:"ref,omitempty"` 13 + Readme string `json:"readme,omitempty"` 14 + ReadmeFileName string `json:"readme_file_name,omitempty"` 15 + Commits []Commit `json:"commits,omitempty"` 16 + Description string `json:"description,omitempty"` 17 + Files []NiceTree `json:"files,omitempty"` 18 + Branches []Branch `json:"branches,omitempty"` 19 + Tags []*TagReference `json:"tags,omitempty"` 20 + TotalCommits int `json:"total_commits,omitempty"` 19 21 } 20 22 21 23 type RepoLogResponse struct { 22 - Commits []*object.Commit `json:"commits,omitempty"` 23 - Ref string `json:"ref,omitempty"` 24 - Description string `json:"description,omitempty"` 25 - Log bool `json:"log,omitempty"` 26 - Total int `json:"total,omitempty"` 27 - Page int `json:"page,omitempty"` 28 - PerPage int `json:"per_page,omitempty"` 24 + Commits []Commit `json:"commits,omitempty"` 25 + Ref string `json:"ref,omitempty"` 26 + Description string `json:"description,omitempty"` 27 + Log bool `json:"log,omitempty"` 28 + Total int `json:"total,omitempty"` 29 + Page int `json:"page,omitempty"` 30 + PerPage int `json:"per_page,omitempty"` 29 31 } 30 32 31 33 type RepoCommitResponse struct { ··· 66 68 type Branch struct { 67 69 Reference `json:"reference"` 68 70 Commit *object.Commit `json:"commit,omitempty"` 69 - IsDefault bool `json:"is_deafult,omitempty"` 71 + IsDefault bool `json:"is_default,omitempty"` 72 + } 73 + 74 + func (b *Branch) UnmarshalJSON(data []byte) error { 75 + aux := &struct { 76 + Reference `json:"reference"` 77 + Commit *object.Commit `json:"commit,omitempty"` 78 + IsDefault bool `json:"is_default,omitempty"` 79 + MispelledIsDefault bool `json:"is_deafult,omitempty"` // mispelled name 80 + }{} 81 + 82 + if err := json.Unmarshal(data, aux); err != nil { 83 + return err 84 + } 85 + 86 + b.Reference = aux.Reference 87 + b.Commit = aux.Commit 88 + b.IsDefault = aux.IsDefault || aux.MispelledIsDefault // whichever was set 89 + 90 + return nil 70 91 } 71 92 72 93 type RepoTagsResponse struct {
+88 -5
types/tree.go
··· 1 1 package types 2 2 3 3 import ( 4 + "fmt" 5 + "os" 4 6 "time" 5 7 6 8 "github.com/go-git/go-git/v5/plumbing" 9 + "github.com/go-git/go-git/v5/plumbing/filemode" 7 10 ) 8 11 9 12 // A nicer git tree representation. 10 13 type NiceTree struct { 11 14 // Relative path 12 - Name string `json:"name"` 13 - Mode string `json:"mode"` 14 - Size int64 `json:"size"` 15 - IsFile bool `json:"is_file"` 16 - IsSubtree bool `json:"is_subtree"` 15 + Name string `json:"name"` 16 + Mode string `json:"mode"` 17 + Size int64 `json:"size"` 17 18 18 19 LastCommit *LastCommitInfo `json:"last_commit,omitempty"` 20 + } 21 + 22 + func (t *NiceTree) FileMode() (filemode.FileMode, error) { 23 + if numericMode, err := filemode.New(t.Mode); err == nil { 24 + return numericMode, nil 25 + } 26 + 27 + // TODO: this is here for backwards compat, can be removed in future versions 28 + osMode, err := parseModeString(t.Mode) 29 + if err != nil { 30 + return filemode.Empty, nil 31 + } 32 + 33 + conv, err := filemode.NewFromOSFileMode(osMode) 34 + if err != nil { 35 + return filemode.Empty, nil 36 + } 37 + 38 + return conv, nil 39 + } 40 + 41 + // ParseFileModeString parses a file mode string like "-rw-r--r--" 42 + // and returns an os.FileMode 43 + func parseModeString(modeStr string) (os.FileMode, error) { 44 + if len(modeStr) != 10 { 45 + return 0, fmt.Errorf("invalid mode string length: expected 10, got %d", len(modeStr)) 46 + } 47 + 48 + var mode os.FileMode 49 + 50 + // Parse file type (first character) 51 + switch modeStr[0] { 52 + case 'd': 53 + mode |= os.ModeDir 54 + case 'l': 55 + mode |= os.ModeSymlink 56 + case '-': 57 + // regular file 58 + default: 59 + return 0, fmt.Errorf("unknown file type: %c", modeStr[0]) 60 + } 61 + 62 + // parse permissions for owner, group, and other 63 + perms := modeStr[1:] 64 + shifts := []int{6, 3, 0} // bit shifts for owner, group, other 65 + 66 + for i := range 3 { 67 + offset := i * 3 68 + shift := shifts[i] 69 + 70 + if perms[offset] == 'r' { 71 + mode |= os.FileMode(4 << shift) 72 + } 73 + if perms[offset+1] == 'w' { 74 + mode |= os.FileMode(2 << shift) 75 + } 76 + if perms[offset+2] == 'x' { 77 + mode |= os.FileMode(1 << shift) 78 + } 79 + } 80 + 81 + return mode, nil 82 + } 83 + 84 + func (t *NiceTree) IsFile() bool { 85 + m, err := t.FileMode() 86 + 87 + if err != nil { 88 + return false 89 + } 90 + 91 + return m.IsFile() 92 + } 93 + 94 + func (t *NiceTree) IsSubmodule() bool { 95 + m, err := t.FileMode() 96 + 97 + if err != nil { 98 + return false 99 + } 100 + 101 + return m == filemode.Submodule 19 102 } 20 103 21 104 type LastCommitInfo struct {