+649
-8
api/tangled/cbor_gen.go
+649
-8
api/tangled/cbor_gen.go
···
6938
6938
}
6939
6939
6940
6940
cw := cbg.NewCborWriter(w)
6941
-
fieldCount := 5
6941
+
fieldCount := 7
6942
6942
6943
6943
if t.Body == nil {
6944
+
fieldCount--
6945
+
}
6946
+
6947
+
if t.Mentions == nil {
6948
+
fieldCount--
6949
+
}
6950
+
6951
+
if t.References == nil {
6944
6952
fieldCount--
6945
6953
}
6946
6954
···
7045
7053
return err
7046
7054
}
7047
7055
7056
+
// t.Mentions ([]string) (slice)
7057
+
if t.Mentions != nil {
7058
+
7059
+
if len("mentions") > 1000000 {
7060
+
return xerrors.Errorf("Value in field \"mentions\" was too long")
7061
+
}
7062
+
7063
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil {
7064
+
return err
7065
+
}
7066
+
if _, err := cw.WriteString(string("mentions")); err != nil {
7067
+
return err
7068
+
}
7069
+
7070
+
if len(t.Mentions) > 8192 {
7071
+
return xerrors.Errorf("Slice value in field t.Mentions was too long")
7072
+
}
7073
+
7074
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil {
7075
+
return err
7076
+
}
7077
+
for _, v := range t.Mentions {
7078
+
if len(v) > 1000000 {
7079
+
return xerrors.Errorf("Value in field v was too long")
7080
+
}
7081
+
7082
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
7083
+
return err
7084
+
}
7085
+
if _, err := cw.WriteString(string(v)); err != nil {
7086
+
return err
7087
+
}
7088
+
7089
+
}
7090
+
}
7091
+
7048
7092
// t.CreatedAt (string) (string)
7049
7093
if len("createdAt") > 1000000 {
7050
7094
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
7067
7111
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
7068
7112
return err
7069
7113
}
7114
+
7115
+
// t.References ([]string) (slice)
7116
+
if t.References != nil {
7117
+
7118
+
if len("references") > 1000000 {
7119
+
return xerrors.Errorf("Value in field \"references\" was too long")
7120
+
}
7121
+
7122
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil {
7123
+
return err
7124
+
}
7125
+
if _, err := cw.WriteString(string("references")); err != nil {
7126
+
return err
7127
+
}
7128
+
7129
+
if len(t.References) > 8192 {
7130
+
return xerrors.Errorf("Slice value in field t.References was too long")
7131
+
}
7132
+
7133
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil {
7134
+
return err
7135
+
}
7136
+
for _, v := range t.References {
7137
+
if len(v) > 1000000 {
7138
+
return xerrors.Errorf("Value in field v was too long")
7139
+
}
7140
+
7141
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
7142
+
return err
7143
+
}
7144
+
if _, err := cw.WriteString(string(v)); err != nil {
7145
+
return err
7146
+
}
7147
+
7148
+
}
7149
+
}
7070
7150
return nil
7071
7151
}
7072
7152
···
7095
7175
7096
7176
n := extra
7097
7177
7098
-
nameBuf := make([]byte, 9)
7178
+
nameBuf := make([]byte, 10)
7099
7179
for i := uint64(0); i < n; i++ {
7100
7180
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
7101
7181
if err != nil {
···
7164
7244
}
7165
7245
7166
7246
t.Title = string(sval)
7247
+
}
7248
+
// t.Mentions ([]string) (slice)
7249
+
case "mentions":
7250
+
7251
+
maj, extra, err = cr.ReadHeader()
7252
+
if err != nil {
7253
+
return err
7254
+
}
7255
+
7256
+
if extra > 8192 {
7257
+
return fmt.Errorf("t.Mentions: array too large (%d)", extra)
7258
+
}
7259
+
7260
+
if maj != cbg.MajArray {
7261
+
return fmt.Errorf("expected cbor array")
7262
+
}
7263
+
7264
+
if extra > 0 {
7265
+
t.Mentions = make([]string, extra)
7266
+
}
7267
+
7268
+
for i := 0; i < int(extra); i++ {
7269
+
{
7270
+
var maj byte
7271
+
var extra uint64
7272
+
var err error
7273
+
_ = maj
7274
+
_ = extra
7275
+
_ = err
7276
+
7277
+
{
7278
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
7279
+
if err != nil {
7280
+
return err
7281
+
}
7282
+
7283
+
t.Mentions[i] = string(sval)
7284
+
}
7285
+
7286
+
}
7167
7287
}
7168
7288
// t.CreatedAt (string) (string)
7169
7289
case "createdAt":
···
7176
7296
7177
7297
t.CreatedAt = string(sval)
7178
7298
}
7299
+
// t.References ([]string) (slice)
7300
+
case "references":
7301
+
7302
+
maj, extra, err = cr.ReadHeader()
7303
+
if err != nil {
7304
+
return err
7305
+
}
7306
+
7307
+
if extra > 8192 {
7308
+
return fmt.Errorf("t.References: array too large (%d)", extra)
7309
+
}
7310
+
7311
+
if maj != cbg.MajArray {
7312
+
return fmt.Errorf("expected cbor array")
7313
+
}
7314
+
7315
+
if extra > 0 {
7316
+
t.References = make([]string, extra)
7317
+
}
7318
+
7319
+
for i := 0; i < int(extra); i++ {
7320
+
{
7321
+
var maj byte
7322
+
var extra uint64
7323
+
var err error
7324
+
_ = maj
7325
+
_ = extra
7326
+
_ = err
7327
+
7328
+
{
7329
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
7330
+
if err != nil {
7331
+
return err
7332
+
}
7333
+
7334
+
t.References[i] = string(sval)
7335
+
}
7336
+
7337
+
}
7338
+
}
7179
7339
7180
7340
default:
7181
7341
// Field doesn't exist on this type, so ignore it
···
7194
7354
}
7195
7355
7196
7356
cw := cbg.NewCborWriter(w)
7197
-
fieldCount := 5
7357
+
fieldCount := 7
7358
+
7359
+
if t.Mentions == nil {
7360
+
fieldCount--
7361
+
}
7362
+
7363
+
if t.References == nil {
7364
+
fieldCount--
7365
+
}
7198
7366
7199
7367
if t.ReplyTo == nil {
7200
7368
fieldCount--
···
7301
7469
}
7302
7470
}
7303
7471
7472
+
// t.Mentions ([]string) (slice)
7473
+
if t.Mentions != nil {
7474
+
7475
+
if len("mentions") > 1000000 {
7476
+
return xerrors.Errorf("Value in field \"mentions\" was too long")
7477
+
}
7478
+
7479
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil {
7480
+
return err
7481
+
}
7482
+
if _, err := cw.WriteString(string("mentions")); err != nil {
7483
+
return err
7484
+
}
7485
+
7486
+
if len(t.Mentions) > 8192 {
7487
+
return xerrors.Errorf("Slice value in field t.Mentions was too long")
7488
+
}
7489
+
7490
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil {
7491
+
return err
7492
+
}
7493
+
for _, v := range t.Mentions {
7494
+
if len(v) > 1000000 {
7495
+
return xerrors.Errorf("Value in field v was too long")
7496
+
}
7497
+
7498
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
7499
+
return err
7500
+
}
7501
+
if _, err := cw.WriteString(string(v)); err != nil {
7502
+
return err
7503
+
}
7504
+
7505
+
}
7506
+
}
7507
+
7304
7508
// t.CreatedAt (string) (string)
7305
7509
if len("createdAt") > 1000000 {
7306
7510
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
7323
7527
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
7324
7528
return err
7325
7529
}
7530
+
7531
+
// t.References ([]string) (slice)
7532
+
if t.References != nil {
7533
+
7534
+
if len("references") > 1000000 {
7535
+
return xerrors.Errorf("Value in field \"references\" was too long")
7536
+
}
7537
+
7538
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil {
7539
+
return err
7540
+
}
7541
+
if _, err := cw.WriteString(string("references")); err != nil {
7542
+
return err
7543
+
}
7544
+
7545
+
if len(t.References) > 8192 {
7546
+
return xerrors.Errorf("Slice value in field t.References was too long")
7547
+
}
7548
+
7549
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil {
7550
+
return err
7551
+
}
7552
+
for _, v := range t.References {
7553
+
if len(v) > 1000000 {
7554
+
return xerrors.Errorf("Value in field v was too long")
7555
+
}
7556
+
7557
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
7558
+
return err
7559
+
}
7560
+
if _, err := cw.WriteString(string(v)); err != nil {
7561
+
return err
7562
+
}
7563
+
7564
+
}
7565
+
}
7326
7566
return nil
7327
7567
}
7328
7568
···
7351
7591
7352
7592
n := extra
7353
7593
7354
-
nameBuf := make([]byte, 9)
7594
+
nameBuf := make([]byte, 10)
7355
7595
for i := uint64(0); i < n; i++ {
7356
7596
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
7357
7597
if err != nil {
···
7419
7659
}
7420
7660
7421
7661
t.ReplyTo = (*string)(&sval)
7662
+
}
7663
+
}
7664
+
// t.Mentions ([]string) (slice)
7665
+
case "mentions":
7666
+
7667
+
maj, extra, err = cr.ReadHeader()
7668
+
if err != nil {
7669
+
return err
7670
+
}
7671
+
7672
+
if extra > 8192 {
7673
+
return fmt.Errorf("t.Mentions: array too large (%d)", extra)
7674
+
}
7675
+
7676
+
if maj != cbg.MajArray {
7677
+
return fmt.Errorf("expected cbor array")
7678
+
}
7679
+
7680
+
if extra > 0 {
7681
+
t.Mentions = make([]string, extra)
7682
+
}
7683
+
7684
+
for i := 0; i < int(extra); i++ {
7685
+
{
7686
+
var maj byte
7687
+
var extra uint64
7688
+
var err error
7689
+
_ = maj
7690
+
_ = extra
7691
+
_ = err
7692
+
7693
+
{
7694
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
7695
+
if err != nil {
7696
+
return err
7697
+
}
7698
+
7699
+
t.Mentions[i] = string(sval)
7700
+
}
7701
+
7422
7702
}
7423
7703
}
7424
7704
// t.CreatedAt (string) (string)
···
7431
7711
}
7432
7712
7433
7713
t.CreatedAt = string(sval)
7714
+
}
7715
+
// t.References ([]string) (slice)
7716
+
case "references":
7717
+
7718
+
maj, extra, err = cr.ReadHeader()
7719
+
if err != nil {
7720
+
return err
7721
+
}
7722
+
7723
+
if extra > 8192 {
7724
+
return fmt.Errorf("t.References: array too large (%d)", extra)
7725
+
}
7726
+
7727
+
if maj != cbg.MajArray {
7728
+
return fmt.Errorf("expected cbor array")
7729
+
}
7730
+
7731
+
if extra > 0 {
7732
+
t.References = make([]string, extra)
7733
+
}
7734
+
7735
+
for i := 0; i < int(extra); i++ {
7736
+
{
7737
+
var maj byte
7738
+
var extra uint64
7739
+
var err error
7740
+
_ = maj
7741
+
_ = extra
7742
+
_ = err
7743
+
7744
+
{
7745
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
7746
+
if err != nil {
7747
+
return err
7748
+
}
7749
+
7750
+
t.References[i] = string(sval)
7751
+
}
7752
+
7753
+
}
7434
7754
}
7435
7755
7436
7756
default:
···
7614
7934
}
7615
7935
7616
7936
cw := cbg.NewCborWriter(w)
7617
-
fieldCount := 7
7937
+
fieldCount := 9
7618
7938
7619
7939
if t.Body == nil {
7940
+
fieldCount--
7941
+
}
7942
+
7943
+
if t.Mentions == nil {
7944
+
fieldCount--
7945
+
}
7946
+
7947
+
if t.References == nil {
7620
7948
fieldCount--
7621
7949
}
7622
7950
···
7760
8088
return err
7761
8089
}
7762
8090
8091
+
// t.Mentions ([]string) (slice)
8092
+
if t.Mentions != nil {
8093
+
8094
+
if len("mentions") > 1000000 {
8095
+
return xerrors.Errorf("Value in field \"mentions\" was too long")
8096
+
}
8097
+
8098
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil {
8099
+
return err
8100
+
}
8101
+
if _, err := cw.WriteString(string("mentions")); err != nil {
8102
+
return err
8103
+
}
8104
+
8105
+
if len(t.Mentions) > 8192 {
8106
+
return xerrors.Errorf("Slice value in field t.Mentions was too long")
8107
+
}
8108
+
8109
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil {
8110
+
return err
8111
+
}
8112
+
for _, v := range t.Mentions {
8113
+
if len(v) > 1000000 {
8114
+
return xerrors.Errorf("Value in field v was too long")
8115
+
}
8116
+
8117
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
8118
+
return err
8119
+
}
8120
+
if _, err := cw.WriteString(string(v)); err != nil {
8121
+
return err
8122
+
}
8123
+
8124
+
}
8125
+
}
8126
+
7763
8127
// t.CreatedAt (string) (string)
7764
8128
if len("createdAt") > 1000000 {
7765
8129
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
7782
8146
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
7783
8147
return err
7784
8148
}
8149
+
8150
+
// t.References ([]string) (slice)
8151
+
if t.References != nil {
8152
+
8153
+
if len("references") > 1000000 {
8154
+
return xerrors.Errorf("Value in field \"references\" was too long")
8155
+
}
8156
+
8157
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil {
8158
+
return err
8159
+
}
8160
+
if _, err := cw.WriteString(string("references")); err != nil {
8161
+
return err
8162
+
}
8163
+
8164
+
if len(t.References) > 8192 {
8165
+
return xerrors.Errorf("Slice value in field t.References was too long")
8166
+
}
8167
+
8168
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil {
8169
+
return err
8170
+
}
8171
+
for _, v := range t.References {
8172
+
if len(v) > 1000000 {
8173
+
return xerrors.Errorf("Value in field v was too long")
8174
+
}
8175
+
8176
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
8177
+
return err
8178
+
}
8179
+
if _, err := cw.WriteString(string(v)); err != nil {
8180
+
return err
8181
+
}
8182
+
8183
+
}
8184
+
}
7785
8185
return nil
7786
8186
}
7787
8187
···
7810
8210
7811
8211
n := extra
7812
8212
7813
-
nameBuf := make([]byte, 9)
8213
+
nameBuf := make([]byte, 10)
7814
8214
for i := uint64(0); i < n; i++ {
7815
8215
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
7816
8216
if err != nil {
···
7919
8319
}
7920
8320
}
7921
8321
8322
+
}
8323
+
// t.Mentions ([]string) (slice)
8324
+
case "mentions":
8325
+
8326
+
maj, extra, err = cr.ReadHeader()
8327
+
if err != nil {
8328
+
return err
8329
+
}
8330
+
8331
+
if extra > 8192 {
8332
+
return fmt.Errorf("t.Mentions: array too large (%d)", extra)
8333
+
}
8334
+
8335
+
if maj != cbg.MajArray {
8336
+
return fmt.Errorf("expected cbor array")
8337
+
}
8338
+
8339
+
if extra > 0 {
8340
+
t.Mentions = make([]string, extra)
8341
+
}
8342
+
8343
+
for i := 0; i < int(extra); i++ {
8344
+
{
8345
+
var maj byte
8346
+
var extra uint64
8347
+
var err error
8348
+
_ = maj
8349
+
_ = extra
8350
+
_ = err
8351
+
8352
+
{
8353
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
8354
+
if err != nil {
8355
+
return err
8356
+
}
8357
+
8358
+
t.Mentions[i] = string(sval)
8359
+
}
8360
+
8361
+
}
7922
8362
}
7923
8363
// t.CreatedAt (string) (string)
7924
8364
case "createdAt":
···
7931
8371
7932
8372
t.CreatedAt = string(sval)
7933
8373
}
8374
+
// t.References ([]string) (slice)
8375
+
case "references":
8376
+
8377
+
maj, extra, err = cr.ReadHeader()
8378
+
if err != nil {
8379
+
return err
8380
+
}
8381
+
8382
+
if extra > 8192 {
8383
+
return fmt.Errorf("t.References: array too large (%d)", extra)
8384
+
}
8385
+
8386
+
if maj != cbg.MajArray {
8387
+
return fmt.Errorf("expected cbor array")
8388
+
}
8389
+
8390
+
if extra > 0 {
8391
+
t.References = make([]string, extra)
8392
+
}
8393
+
8394
+
for i := 0; i < int(extra); i++ {
8395
+
{
8396
+
var maj byte
8397
+
var extra uint64
8398
+
var err error
8399
+
_ = maj
8400
+
_ = extra
8401
+
_ = err
8402
+
8403
+
{
8404
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
8405
+
if err != nil {
8406
+
return err
8407
+
}
8408
+
8409
+
t.References[i] = string(sval)
8410
+
}
8411
+
8412
+
}
8413
+
}
7934
8414
7935
8415
default:
7936
8416
// Field doesn't exist on this type, so ignore it
···
7949
8429
}
7950
8430
7951
8431
cw := cbg.NewCborWriter(w)
8432
+
fieldCount := 6
7952
8433
7953
-
if _, err := cw.Write([]byte{164}); err != nil {
8434
+
if t.Mentions == nil {
8435
+
fieldCount--
8436
+
}
8437
+
8438
+
if t.References == nil {
8439
+
fieldCount--
8440
+
}
8441
+
8442
+
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
7954
8443
return err
7955
8444
}
7956
8445
···
8019
8508
return err
8020
8509
}
8021
8510
8511
+
// t.Mentions ([]string) (slice)
8512
+
if t.Mentions != nil {
8513
+
8514
+
if len("mentions") > 1000000 {
8515
+
return xerrors.Errorf("Value in field \"mentions\" was too long")
8516
+
}
8517
+
8518
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mentions"))); err != nil {
8519
+
return err
8520
+
}
8521
+
if _, err := cw.WriteString(string("mentions")); err != nil {
8522
+
return err
8523
+
}
8524
+
8525
+
if len(t.Mentions) > 8192 {
8526
+
return xerrors.Errorf("Slice value in field t.Mentions was too long")
8527
+
}
8528
+
8529
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Mentions))); err != nil {
8530
+
return err
8531
+
}
8532
+
for _, v := range t.Mentions {
8533
+
if len(v) > 1000000 {
8534
+
return xerrors.Errorf("Value in field v was too long")
8535
+
}
8536
+
8537
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
8538
+
return err
8539
+
}
8540
+
if _, err := cw.WriteString(string(v)); err != nil {
8541
+
return err
8542
+
}
8543
+
8544
+
}
8545
+
}
8546
+
8022
8547
// t.CreatedAt (string) (string)
8023
8548
if len("createdAt") > 1000000 {
8024
8549
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
8040
8565
}
8041
8566
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
8042
8567
return err
8568
+
}
8569
+
8570
+
// t.References ([]string) (slice)
8571
+
if t.References != nil {
8572
+
8573
+
if len("references") > 1000000 {
8574
+
return xerrors.Errorf("Value in field \"references\" was too long")
8575
+
}
8576
+
8577
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("references"))); err != nil {
8578
+
return err
8579
+
}
8580
+
if _, err := cw.WriteString(string("references")); err != nil {
8581
+
return err
8582
+
}
8583
+
8584
+
if len(t.References) > 8192 {
8585
+
return xerrors.Errorf("Slice value in field t.References was too long")
8586
+
}
8587
+
8588
+
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.References))); err != nil {
8589
+
return err
8590
+
}
8591
+
for _, v := range t.References {
8592
+
if len(v) > 1000000 {
8593
+
return xerrors.Errorf("Value in field v was too long")
8594
+
}
8595
+
8596
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
8597
+
return err
8598
+
}
8599
+
if _, err := cw.WriteString(string(v)); err != nil {
8600
+
return err
8601
+
}
8602
+
8603
+
}
8043
8604
}
8044
8605
return nil
8045
8606
}
···
8069
8630
8070
8631
n := extra
8071
8632
8072
-
nameBuf := make([]byte, 9)
8633
+
nameBuf := make([]byte, 10)
8073
8634
for i := uint64(0); i < n; i++ {
8074
8635
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
8075
8636
if err != nil {
···
8118
8679
8119
8680
t.LexiconTypeID = string(sval)
8120
8681
}
8682
+
// t.Mentions ([]string) (slice)
8683
+
case "mentions":
8684
+
8685
+
maj, extra, err = cr.ReadHeader()
8686
+
if err != nil {
8687
+
return err
8688
+
}
8689
+
8690
+
if extra > 8192 {
8691
+
return fmt.Errorf("t.Mentions: array too large (%d)", extra)
8692
+
}
8693
+
8694
+
if maj != cbg.MajArray {
8695
+
return fmt.Errorf("expected cbor array")
8696
+
}
8697
+
8698
+
if extra > 0 {
8699
+
t.Mentions = make([]string, extra)
8700
+
}
8701
+
8702
+
for i := 0; i < int(extra); i++ {
8703
+
{
8704
+
var maj byte
8705
+
var extra uint64
8706
+
var err error
8707
+
_ = maj
8708
+
_ = extra
8709
+
_ = err
8710
+
8711
+
{
8712
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
8713
+
if err != nil {
8714
+
return err
8715
+
}
8716
+
8717
+
t.Mentions[i] = string(sval)
8718
+
}
8719
+
8720
+
}
8721
+
}
8121
8722
// t.CreatedAt (string) (string)
8122
8723
case "createdAt":
8123
8724
···
8128
8729
}
8129
8730
8130
8731
t.CreatedAt = string(sval)
8732
+
}
8733
+
// t.References ([]string) (slice)
8734
+
case "references":
8735
+
8736
+
maj, extra, err = cr.ReadHeader()
8737
+
if err != nil {
8738
+
return err
8739
+
}
8740
+
8741
+
if extra > 8192 {
8742
+
return fmt.Errorf("t.References: array too large (%d)", extra)
8743
+
}
8744
+
8745
+
if maj != cbg.MajArray {
8746
+
return fmt.Errorf("expected cbor array")
8747
+
}
8748
+
8749
+
if extra > 0 {
8750
+
t.References = make([]string, extra)
8751
+
}
8752
+
8753
+
for i := 0; i < int(extra); i++ {
8754
+
{
8755
+
var maj byte
8756
+
var extra uint64
8757
+
var err error
8758
+
_ = maj
8759
+
_ = extra
8760
+
_ = err
8761
+
8762
+
{
8763
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
8764
+
if err != nil {
8765
+
return err
8766
+
}
8767
+
8768
+
t.References[i] = string(sval)
8769
+
}
8770
+
8771
+
}
8131
8772
}
8132
8773
8133
8774
default:
+7
-5
api/tangled/issuecomment.go
+7
-5
api/tangled/issuecomment.go
···
17
17
} //
18
18
// RECORDTYPE: RepoIssueComment
19
19
type RepoIssueComment struct {
20
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue.comment" cborgen:"$type,const=sh.tangled.repo.issue.comment"`
21
-
Body string `json:"body" cborgen:"body"`
22
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
-
Issue string `json:"issue" cborgen:"issue"`
24
-
ReplyTo *string `json:"replyTo,omitempty" cborgen:"replyTo,omitempty"`
20
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue.comment" cborgen:"$type,const=sh.tangled.repo.issue.comment"`
21
+
Body string `json:"body" cborgen:"body"`
22
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
+
Issue string `json:"issue" cborgen:"issue"`
24
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
25
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
26
+
ReplyTo *string `json:"replyTo,omitempty" cborgen:"replyTo,omitempty"`
25
27
}
+34
api/tangled/pipelinecancelPipeline.go
+34
api/tangled/pipelinecancelPipeline.go
···
1
+
// Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT.
2
+
3
+
package tangled
4
+
5
+
// schema: sh.tangled.pipeline.cancelPipeline
6
+
7
+
import (
8
+
"context"
9
+
10
+
"github.com/bluesky-social/indigo/lex/util"
11
+
)
12
+
13
+
const (
14
+
PipelineCancelPipelineNSID = "sh.tangled.pipeline.cancelPipeline"
15
+
)
16
+
17
+
// PipelineCancelPipeline_Input is the input argument to a sh.tangled.pipeline.cancelPipeline call.
18
+
type PipelineCancelPipeline_Input struct {
19
+
// pipeline: pipeline at-uri
20
+
Pipeline string `json:"pipeline" cborgen:"pipeline"`
21
+
// repo: repo at-uri, spindle can't resolve repo from pipeline at-uri yet
22
+
Repo string `json:"repo" cborgen:"repo"`
23
+
// workflow: workflow name
24
+
Workflow string `json:"workflow" cborgen:"workflow"`
25
+
}
26
+
27
+
// PipelineCancelPipeline calls the XRPC method "sh.tangled.pipeline.cancelPipeline".
28
+
func PipelineCancelPipeline(ctx context.Context, c util.LexClient, input *PipelineCancelPipeline_Input) error {
29
+
if err := c.LexDo(ctx, util.Procedure, "application/json", "sh.tangled.pipeline.cancelPipeline", nil, input, nil); err != nil {
30
+
return err
31
+
}
32
+
33
+
return nil
34
+
}
+6
-4
api/tangled/pullcomment.go
+6
-4
api/tangled/pullcomment.go
···
17
17
} //
18
18
// RECORDTYPE: RepoPullComment
19
19
type RepoPullComment struct {
20
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull.comment" cborgen:"$type,const=sh.tangled.repo.pull.comment"`
21
-
Body string `json:"body" cborgen:"body"`
22
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
-
Pull string `json:"pull" cborgen:"pull"`
20
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull.comment" cborgen:"$type,const=sh.tangled.repo.pull.comment"`
21
+
Body string `json:"body" cborgen:"body"`
22
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
24
+
Pull string `json:"pull" cborgen:"pull"`
25
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
24
26
}
+7
-5
api/tangled/repoissue.go
+7
-5
api/tangled/repoissue.go
···
17
17
} //
18
18
// RECORDTYPE: RepoIssue
19
19
type RepoIssue struct {
20
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue" cborgen:"$type,const=sh.tangled.repo.issue"`
21
-
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
-
Repo string `json:"repo" cborgen:"repo"`
24
-
Title string `json:"title" cborgen:"title"`
20
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue" cborgen:"$type,const=sh.tangled.repo.issue"`
21
+
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
24
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
25
+
Repo string `json:"repo" cborgen:"repo"`
26
+
Title string `json:"title" cborgen:"title"`
25
27
}
+2
api/tangled/repopull.go
+2
api/tangled/repopull.go
···
20
20
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
21
21
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
22
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
23
24
Patch string `json:"patch" cborgen:"patch"`
25
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
24
26
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
25
27
Target *RepoPull_Target `json:"target" cborgen:"target"`
26
28
Title string `json:"title" cborgen:"title"`
+6
-45
appview/commitverify/verify.go
+6
-45
appview/commitverify/verify.go
···
3
3
import (
4
4
"log"
5
5
6
-
"github.com/go-git/go-git/v5/plumbing/object"
7
6
"tangled.org/core/appview/db"
8
7
"tangled.org/core/appview/models"
9
8
"tangled.org/core/crypto"
···
35
34
return ""
36
35
}
37
36
38
-
func GetVerifiedObjectCommits(e db.Execer, emailToDid map[string]string, commits []*object.Commit) (VerifiedCommits, error) {
39
-
ndCommits := []types.NiceDiff{}
40
-
for _, commit := range commits {
41
-
ndCommits = append(ndCommits, ObjectCommitToNiceDiff(commit))
42
-
}
43
-
return GetVerifiedCommits(e, emailToDid, ndCommits)
44
-
}
45
-
46
-
func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.NiceDiff) (VerifiedCommits, error) {
37
+
func GetVerifiedCommits(e db.Execer, emailToDid map[string]string, ndCommits []types.Commit) (VerifiedCommits, error) {
47
38
vcs := VerifiedCommits{}
48
39
49
40
didPubkeyCache := make(map[string][]models.PublicKey)
50
41
51
42
for _, commit := range ndCommits {
52
-
c := commit.Commit
53
-
54
-
committerEmail := c.Committer.Email
43
+
committerEmail := commit.Committer.Email
55
44
if did, exists := emailToDid[committerEmail]; exists {
56
45
// check if we've already fetched public keys for this did
57
46
pubKeys, ok := didPubkeyCache[did]
···
67
56
}
68
57
69
58
// try to verify with any associated pubkeys
59
+
payload := commit.Payload()
60
+
signature := commit.PGPSignature
70
61
for _, pk := range pubKeys {
71
-
if _, ok := crypto.VerifyCommitSignature(pk.Key, commit); ok {
62
+
if _, ok := crypto.VerifySignature([]byte(pk.Key), []byte(signature), []byte(payload)); ok {
72
63
73
64
fp, err := crypto.SSHFingerprint(pk.Key)
74
65
if err != nil {
75
66
log.Println("error computing ssh fingerprint:", err)
76
67
}
77
68
78
-
vc := verifiedCommit{fingerprint: fp, hash: c.This}
69
+
vc := verifiedCommit{fingerprint: fp, hash: commit.This}
79
70
vcs[vc] = struct{}{}
80
71
break
81
72
}
···
86
77
87
78
return vcs, nil
88
79
}
89
-
90
-
// ObjectCommitToNiceDiff is a compatibility function to convert a
91
-
// commit object into a NiceDiff structure.
92
-
func ObjectCommitToNiceDiff(c *object.Commit) types.NiceDiff {
93
-
var niceDiff types.NiceDiff
94
-
95
-
// set commit information
96
-
niceDiff.Commit.Message = c.Message
97
-
niceDiff.Commit.Author = c.Author
98
-
niceDiff.Commit.This = c.Hash.String()
99
-
niceDiff.Commit.Committer = c.Committer
100
-
niceDiff.Commit.Tree = c.TreeHash.String()
101
-
niceDiff.Commit.PGPSignature = c.PGPSignature
102
-
103
-
changeId, ok := c.ExtraHeaders["change-id"]
104
-
if ok {
105
-
niceDiff.Commit.ChangedId = string(changeId)
106
-
}
107
-
108
-
// set parent hash if available
109
-
if len(c.ParentHashes) > 0 {
110
-
niceDiff.Commit.Parent = c.ParentHashes[0].String()
111
-
}
112
-
113
-
// XXX: Stats and Diff fields are typically populated
114
-
// after fetching the actual diff information, which isn't
115
-
// directly available in the commit object itself.
116
-
117
-
return niceDiff
118
-
}
+3
-2
appview/db/artifact.go
+3
-2
appview/db/artifact.go
···
8
8
"github.com/go-git/go-git/v5/plumbing"
9
9
"github.com/ipfs/go-cid"
10
10
"tangled.org/core/appview/models"
11
+
"tangled.org/core/orm"
11
12
)
12
13
13
14
func AddArtifact(e Execer, artifact models.Artifact) error {
···
37
38
return err
38
39
}
39
40
40
-
func GetArtifact(e Execer, filters ...filter) ([]models.Artifact, error) {
41
+
func GetArtifact(e Execer, filters ...orm.Filter) ([]models.Artifact, error) {
41
42
var artifacts []models.Artifact
42
43
43
44
var conditions []string
···
109
110
return artifacts, nil
110
111
}
111
112
112
-
func DeleteArtifact(e Execer, filters ...filter) error {
113
+
func DeleteArtifact(e Execer, filters ...orm.Filter) error {
113
114
var conditions []string
114
115
var args []any
115
116
for _, filter := range filters {
+4
-3
appview/db/collaborators.go
+4
-3
appview/db/collaborators.go
···
6
6
"time"
7
7
8
8
"tangled.org/core/appview/models"
9
+
"tangled.org/core/orm"
9
10
)
10
11
11
12
func AddCollaborator(e Execer, c models.Collaborator) error {
···
16
17
return err
17
18
}
18
19
19
-
func DeleteCollaborator(e Execer, filters ...filter) error {
20
+
func DeleteCollaborator(e Execer, filters ...orm.Filter) error {
20
21
var conditions []string
21
22
var args []any
22
23
for _, filter := range filters {
···
58
59
return nil, nil
59
60
}
60
61
61
-
return GetRepos(e, 0, FilterIn("at_uri", repoAts))
62
+
return GetRepos(e, 0, orm.FilterIn("at_uri", repoAts))
62
63
}
63
64
64
-
func GetCollaborators(e Execer, filters ...filter) ([]models.Collaborator, error) {
65
+
func GetCollaborators(e Execer, filters ...orm.Filter) ([]models.Collaborator, error) {
65
66
var collaborators []models.Collaborator
66
67
var conditions []string
67
68
var args []any
+69
-136
appview/db/db.go
+69
-136
appview/db/db.go
···
3
3
import (
4
4
"context"
5
5
"database/sql"
6
-
"fmt"
7
6
"log/slog"
8
-
"reflect"
9
7
"strings"
10
8
11
9
_ "github.com/mattn/go-sqlite3"
12
10
"tangled.org/core/log"
11
+
"tangled.org/core/orm"
13
12
)
14
13
15
14
type DB struct {
···
561
560
email_notifications integer not null default 0
562
561
);
563
562
563
+
create table if not exists reference_links (
564
+
id integer primary key autoincrement,
565
+
from_at text not null,
566
+
to_at text not null,
567
+
unique (from_at, to_at)
568
+
);
569
+
564
570
create table if not exists migrations (
565
571
id integer primary key autoincrement,
566
572
name text unique
···
569
575
-- indexes for better performance
570
576
create index if not exists idx_notifications_recipient_created on notifications(recipient_did, created desc);
571
577
create index if not exists idx_notifications_recipient_read on notifications(recipient_did, read);
572
-
create index if not exists idx_stars_created on stars(created);
573
-
create index if not exists idx_stars_repo_at_created on stars(repo_at, created);
578
+
create index if not exists idx_references_from_at on reference_links(from_at);
579
+
create index if not exists idx_references_to_at on reference_links(to_at);
574
580
`)
575
581
if err != nil {
576
582
return nil, err
577
583
}
578
584
579
585
// run migrations
580
-
runMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error {
586
+
orm.RunMigration(conn, logger, "add-description-to-repos", func(tx *sql.Tx) error {
581
587
tx.Exec(`
582
588
alter table repos add column description text check (length(description) <= 200);
583
589
`)
584
590
return nil
585
591
})
586
592
587
-
runMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
593
+
orm.RunMigration(conn, logger, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
588
594
// add unconstrained column
589
595
_, err := tx.Exec(`
590
596
alter table public_keys
···
607
613
return nil
608
614
})
609
615
610
-
runMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error {
616
+
orm.RunMigration(conn, logger, "add-rkey-to-comments", func(tx *sql.Tx) error {
611
617
_, err := tx.Exec(`
612
618
alter table comments drop column comment_at;
613
619
alter table comments add column rkey text;
···
615
621
return err
616
622
})
617
623
618
-
runMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
624
+
orm.RunMigration(conn, logger, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
619
625
_, err := tx.Exec(`
620
626
alter table comments add column deleted text; -- timestamp
621
627
alter table comments add column edited text; -- timestamp
···
623
629
return err
624
630
})
625
631
626
-
runMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
632
+
orm.RunMigration(conn, logger, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
627
633
_, err := tx.Exec(`
628
634
alter table pulls add column source_branch text;
629
635
alter table pulls add column source_repo_at text;
···
632
638
return err
633
639
})
634
640
635
-
runMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error {
641
+
orm.RunMigration(conn, logger, "add-source-to-repos", func(tx *sql.Tx) error {
636
642
_, err := tx.Exec(`
637
643
alter table repos add column source text;
638
644
`)
···
644
650
//
645
651
// [0]: https://sqlite.org/pragma.html#pragma_foreign_keys
646
652
conn.ExecContext(ctx, "pragma foreign_keys = off;")
647
-
runMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
653
+
orm.RunMigration(conn, logger, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
648
654
_, err := tx.Exec(`
649
655
create table pulls_new (
650
656
-- identifiers
···
701
707
})
702
708
conn.ExecContext(ctx, "pragma foreign_keys = on;")
703
709
704
-
runMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error {
710
+
orm.RunMigration(conn, logger, "add-spindle-to-repos", func(tx *sql.Tx) error {
705
711
tx.Exec(`
706
712
alter table repos add column spindle text;
707
713
`)
···
711
717
// drop all knot secrets, add unique constraint to knots
712
718
//
713
719
// knots will henceforth use service auth for signed requests
714
-
runMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error {
720
+
orm.RunMigration(conn, logger, "no-more-secrets", func(tx *sql.Tx) error {
715
721
_, err := tx.Exec(`
716
722
create table registrations_new (
717
723
id integer primary key autoincrement,
···
734
740
})
735
741
736
742
// recreate and add rkey + created columns with default constraint
737
-
runMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error {
743
+
orm.RunMigration(conn, logger, "rework-collaborators-table", func(tx *sql.Tx) error {
738
744
// create new table
739
745
// - repo_at instead of repo integer
740
746
// - rkey field
···
788
794
return err
789
795
})
790
796
791
-
runMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error {
797
+
orm.RunMigration(conn, logger, "add-rkey-to-issues", func(tx *sql.Tx) error {
792
798
_, err := tx.Exec(`
793
799
alter table issues add column rkey text not null default '';
794
800
···
800
806
})
801
807
802
808
// repurpose the read-only column to "needs-upgrade"
803
-
runMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error {
809
+
orm.RunMigration(conn, logger, "rename-registrations-read-only-to-needs-upgrade", func(tx *sql.Tx) error {
804
810
_, err := tx.Exec(`
805
811
alter table registrations rename column read_only to needs_upgrade;
806
812
`)
···
808
814
})
809
815
810
816
// require all knots to upgrade after the release of total xrpc
811
-
runMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error {
817
+
orm.RunMigration(conn, logger, "migrate-knots-to-total-xrpc", func(tx *sql.Tx) error {
812
818
_, err := tx.Exec(`
813
819
update registrations set needs_upgrade = 1;
814
820
`)
···
816
822
})
817
823
818
824
// require all knots to upgrade after the release of total xrpc
819
-
runMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error {
825
+
orm.RunMigration(conn, logger, "migrate-spindles-to-xrpc-owner", func(tx *sql.Tx) error {
820
826
_, err := tx.Exec(`
821
827
alter table spindles add column needs_upgrade integer not null default 0;
822
828
`)
···
834
840
//
835
841
// disable foreign-keys for the next migration
836
842
conn.ExecContext(ctx, "pragma foreign_keys = off;")
837
-
runMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error {
843
+
orm.RunMigration(conn, logger, "remove-issue-at-from-issues", func(tx *sql.Tx) error {
838
844
_, err := tx.Exec(`
839
845
create table if not exists issues_new (
840
846
-- identifiers
···
904
910
// - new columns
905
911
// * column "reply_to" which can be any other comment
906
912
// * column "at-uri" which is a generated column
907
-
runMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error {
913
+
orm.RunMigration(conn, logger, "rework-issue-comments", func(tx *sql.Tx) error {
908
914
_, err := tx.Exec(`
909
915
create table if not exists issue_comments (
910
916
-- identifiers
···
964
970
//
965
971
// disable foreign-keys for the next migration
966
972
conn.ExecContext(ctx, "pragma foreign_keys = off;")
967
-
runMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error {
973
+
orm.RunMigration(conn, logger, "add-at-uri-to-pulls", func(tx *sql.Tx) error {
968
974
_, err := tx.Exec(`
969
975
create table if not exists pulls_new (
970
976
-- identifiers
···
1045
1051
//
1046
1052
// disable foreign-keys for the next migration
1047
1053
conn.ExecContext(ctx, "pragma foreign_keys = off;")
1048
-
runMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error {
1054
+
orm.RunMigration(conn, logger, "remove-repo-at-pull-id-from-pull-submissions", func(tx *sql.Tx) error {
1049
1055
_, err := tx.Exec(`
1050
1056
create table if not exists pull_submissions_new (
1051
1057
-- identifiers
···
1099
1105
1100
1106
// knots may report the combined patch for a comparison, we can store that on the appview side
1101
1107
// (but not on the pds record), because calculating the combined patch requires a git index
1102
-
runMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error {
1108
+
orm.RunMigration(conn, logger, "add-combined-column-submissions", func(tx *sql.Tx) error {
1103
1109
_, err := tx.Exec(`
1104
1110
alter table pull_submissions add column combined text;
1105
1111
`)
1106
1112
return err
1107
1113
})
1108
1114
1109
-
runMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error {
1115
+
orm.RunMigration(conn, logger, "add-pronouns-profile", func(tx *sql.Tx) error {
1110
1116
_, err := tx.Exec(`
1111
1117
alter table profile add column pronouns text;
1112
1118
`)
1113
1119
return err
1114
1120
})
1115
1121
1116
-
runMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error {
1122
+
orm.RunMigration(conn, logger, "add-meta-column-repos", func(tx *sql.Tx) error {
1117
1123
_, err := tx.Exec(`
1118
1124
alter table repos add column website text;
1119
1125
alter table repos add column topics text;
···
1121
1127
return err
1122
1128
})
1123
1129
1124
-
runMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error {
1130
+
orm.RunMigration(conn, logger, "add-usermentioned-preference", func(tx *sql.Tx) error {
1125
1131
_, err := tx.Exec(`
1126
1132
alter table notification_preferences add column user_mentioned integer not null default 1;
1127
1133
`)
1128
1134
return err
1129
1135
})
1130
1136
1131
-
return &DB{
1132
-
db,
1133
-
logger,
1134
-
}, nil
1135
-
}
1137
+
// remove the foreign key constraints from stars.
1138
+
orm.RunMigration(conn, logger, "generalize-stars-subject", func(tx *sql.Tx) error {
1139
+
_, err := tx.Exec(`
1140
+
create table stars_new (
1141
+
id integer primary key autoincrement,
1142
+
did text not null,
1143
+
rkey text not null,
1144
+
1145
+
subject_at text not null,
1136
1146
1137
-
type migrationFn = func(*sql.Tx) error
1147
+
created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
1148
+
unique(did, rkey),
1149
+
unique(did, subject_at)
1150
+
);
1138
1151
1139
-
func runMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error {
1140
-
logger = logger.With("migration", name)
1152
+
insert into stars_new (
1153
+
id,
1154
+
did,
1155
+
rkey,
1156
+
subject_at,
1157
+
created
1158
+
)
1159
+
select
1160
+
id,
1161
+
starred_by_did,
1162
+
rkey,
1163
+
repo_at,
1164
+
created
1165
+
from stars;
1141
1166
1142
-
tx, err := c.BeginTx(context.Background(), nil)
1143
-
if err != nil {
1144
-
return err
1145
-
}
1146
-
defer tx.Rollback()
1167
+
drop table stars;
1168
+
alter table stars_new rename to stars;
1147
1169
1148
-
var exists bool
1149
-
err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists)
1150
-
if err != nil {
1170
+
create index if not exists idx_stars_created on stars(created);
1171
+
create index if not exists idx_stars_subject_at_created on stars(subject_at, created);
1172
+
`)
1151
1173
return err
1152
-
}
1153
-
1154
-
if !exists {
1155
-
// run migration
1156
-
err = migrationFn(tx)
1157
-
if err != nil {
1158
-
logger.Error("failed to run migration", "err", err)
1159
-
return err
1160
-
}
1161
-
1162
-
// mark migration as complete
1163
-
_, err = tx.Exec("insert into migrations (name) values (?)", name)
1164
-
if err != nil {
1165
-
logger.Error("failed to mark migration as complete", "err", err)
1166
-
return err
1167
-
}
1174
+
})
1168
1175
1169
-
// commit the transaction
1170
-
if err := tx.Commit(); err != nil {
1171
-
return err
1172
-
}
1173
-
1174
-
logger.Info("migration applied successfully")
1175
-
} else {
1176
-
logger.Warn("skipped migration, already applied")
1177
-
}
1178
-
1179
-
return nil
1176
+
return &DB{
1177
+
db,
1178
+
logger,
1179
+
}, nil
1180
1180
}
1181
1181
1182
1182
func (d *DB) Close() error {
1183
1183
return d.DB.Close()
1184
1184
}
1185
-
1186
-
type filter struct {
1187
-
key string
1188
-
arg any
1189
-
cmp string
1190
-
}
1191
-
1192
-
func newFilter(key, cmp string, arg any) filter {
1193
-
return filter{
1194
-
key: key,
1195
-
arg: arg,
1196
-
cmp: cmp,
1197
-
}
1198
-
}
1199
-
1200
-
func FilterEq(key string, arg any) filter { return newFilter(key, "=", arg) }
1201
-
func FilterNotEq(key string, arg any) filter { return newFilter(key, "<>", arg) }
1202
-
func FilterGte(key string, arg any) filter { return newFilter(key, ">=", arg) }
1203
-
func FilterLte(key string, arg any) filter { return newFilter(key, "<=", arg) }
1204
-
func FilterIs(key string, arg any) filter { return newFilter(key, "is", arg) }
1205
-
func FilterIsNot(key string, arg any) filter { return newFilter(key, "is not", arg) }
1206
-
func FilterIn(key string, arg any) filter { return newFilter(key, "in", arg) }
1207
-
func FilterLike(key string, arg any) filter { return newFilter(key, "like", arg) }
1208
-
func FilterNotLike(key string, arg any) filter { return newFilter(key, "not like", arg) }
1209
-
func FilterContains(key string, arg any) filter {
1210
-
return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg))
1211
-
}
1212
-
1213
-
func (f filter) Condition() string {
1214
-
rv := reflect.ValueOf(f.arg)
1215
-
kind := rv.Kind()
1216
-
1217
-
// if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)`
1218
-
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
1219
-
if rv.Len() == 0 {
1220
-
// always false
1221
-
return "1 = 0"
1222
-
}
1223
-
1224
-
placeholders := make([]string, rv.Len())
1225
-
for i := range placeholders {
1226
-
placeholders[i] = "?"
1227
-
}
1228
-
1229
-
return fmt.Sprintf("%s %s (%s)", f.key, f.cmp, strings.Join(placeholders, ", "))
1230
-
}
1231
-
1232
-
return fmt.Sprintf("%s %s ?", f.key, f.cmp)
1233
-
}
1234
-
1235
-
func (f filter) Arg() []any {
1236
-
rv := reflect.ValueOf(f.arg)
1237
-
kind := rv.Kind()
1238
-
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
1239
-
if rv.Len() == 0 {
1240
-
return nil
1241
-
}
1242
-
1243
-
out := make([]any, rv.Len())
1244
-
for i := range rv.Len() {
1245
-
out[i] = rv.Index(i).Interface()
1246
-
}
1247
-
return out
1248
-
}
1249
-
1250
-
return []any{f.arg}
1251
-
}
+6
-3
appview/db/follow.go
+6
-3
appview/db/follow.go
···
7
7
"time"
8
8
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
13
func AddFollow(e Execer, follow *models.Follow) error {
···
134
135
return result, nil
135
136
}
136
137
137
-
func GetFollows(e Execer, limit int, filters ...filter) ([]models.Follow, error) {
138
+
func GetFollows(e Execer, limit int, filters ...orm.Filter) ([]models.Follow, error) {
138
139
var follows []models.Follow
139
140
140
141
var conditions []string
···
166
167
if err != nil {
167
168
return nil, err
168
169
}
170
+
defer rows.Close()
171
+
169
172
for rows.Next() {
170
173
var follow models.Follow
171
174
var followedAt string
···
191
194
}
192
195
193
196
func GetFollowers(e Execer, did string) ([]models.Follow, error) {
194
-
return GetFollows(e, 0, FilterEq("subject_did", did))
197
+
return GetFollows(e, 0, orm.FilterEq("subject_did", did))
195
198
}
196
199
197
200
func GetFollowing(e Execer, did string) ([]models.Follow, error) {
198
-
return GetFollows(e, 0, FilterEq("user_did", did))
201
+
return GetFollows(e, 0, orm.FilterEq("user_did", did))
199
202
}
200
203
201
204
func getFollowStatuses(e Execer, userDid string, subjectDids []string) (map[string]models.FollowStatus, error) {
+93
-36
appview/db/issues.go
+93
-36
appview/db/issues.go
···
10
10
"time"
11
11
12
12
"github.com/bluesky-social/indigo/atproto/syntax"
13
+
"tangled.org/core/api/tangled"
13
14
"tangled.org/core/appview/models"
14
15
"tangled.org/core/appview/pagination"
16
+
"tangled.org/core/orm"
15
17
)
16
18
17
19
func PutIssue(tx *sql.Tx, issue *models.Issue) error {
···
26
28
27
29
issues, err := GetIssues(
28
30
tx,
29
-
FilterEq("did", issue.Did),
30
-
FilterEq("rkey", issue.Rkey),
31
+
orm.FilterEq("did", issue.Did),
32
+
orm.FilterEq("rkey", issue.Rkey),
31
33
)
32
34
switch {
33
35
case err != nil:
···
69
71
returning rowid, issue_id
70
72
`, issue.RepoAt, issue.Did, issue.Rkey, newIssueId, issue.Title, issue.Body)
71
73
72
-
return row.Scan(&issue.Id, &issue.IssueId)
74
+
err = row.Scan(&issue.Id, &issue.IssueId)
75
+
if err != nil {
76
+
return fmt.Errorf("scan row: %w", err)
77
+
}
78
+
79
+
if err := putReferences(tx, issue.AtUri(), issue.References); err != nil {
80
+
return fmt.Errorf("put reference_links: %w", err)
81
+
}
82
+
return nil
73
83
}
74
84
75
85
func updateIssue(tx *sql.Tx, issue *models.Issue) error {
···
79
89
set title = ?, body = ?, edited = ?
80
90
where did = ? and rkey = ?
81
91
`, issue.Title, issue.Body, time.Now().Format(time.RFC3339), issue.Did, issue.Rkey)
82
-
return err
92
+
if err != nil {
93
+
return err
94
+
}
95
+
96
+
if err := putReferences(tx, issue.AtUri(), issue.References); err != nil {
97
+
return fmt.Errorf("put reference_links: %w", err)
98
+
}
99
+
return nil
83
100
}
84
101
85
-
func GetIssuesPaginated(e Execer, page pagination.Page, filters ...filter) ([]models.Issue, error) {
102
+
func GetIssuesPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]models.Issue, error) {
86
103
issueMap := make(map[string]*models.Issue) // at-uri -> issue
87
104
88
105
var conditions []string
···
98
115
whereClause = " where " + strings.Join(conditions, " and ")
99
116
}
100
117
101
-
pLower := FilterGte("row_num", page.Offset+1)
102
-
pUpper := FilterLte("row_num", page.Offset+page.Limit)
118
+
pLower := orm.FilterGte("row_num", page.Offset+1)
119
+
pUpper := orm.FilterLte("row_num", page.Offset+page.Limit)
103
120
104
121
pageClause := ""
105
122
if page.Limit > 0 {
···
189
206
repoAts = append(repoAts, string(issue.RepoAt))
190
207
}
191
208
192
-
repos, err := GetRepos(e, 0, FilterIn("at_uri", repoAts))
209
+
repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoAts))
193
210
if err != nil {
194
211
return nil, fmt.Errorf("failed to build repo mappings: %w", err)
195
212
}
···
212
229
// collect comments
213
230
issueAts := slices.Collect(maps.Keys(issueMap))
214
231
215
-
comments, err := GetIssueComments(e, FilterIn("issue_at", issueAts))
232
+
comments, err := GetIssueComments(e, orm.FilterIn("issue_at", issueAts))
216
233
if err != nil {
217
234
return nil, fmt.Errorf("failed to query comments: %w", err)
218
235
}
···
224
241
}
225
242
226
243
// collect allLabels for each issue
227
-
allLabels, err := GetLabels(e, FilterIn("subject", issueAts))
244
+
allLabels, err := GetLabels(e, orm.FilterIn("subject", issueAts))
228
245
if err != nil {
229
246
return nil, fmt.Errorf("failed to query labels: %w", err)
230
247
}
···
234
251
}
235
252
}
236
253
254
+
// collect references for each issue
255
+
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", issueAts))
256
+
if err != nil {
257
+
return nil, fmt.Errorf("failed to query reference_links: %w", err)
258
+
}
259
+
for issueAt, references := range allReferencs {
260
+
if issue, ok := issueMap[issueAt.String()]; ok {
261
+
issue.References = references
262
+
}
263
+
}
264
+
237
265
var issues []models.Issue
238
266
for _, i := range issueMap {
239
267
issues = append(issues, *i)
···
250
278
issues, err := GetIssuesPaginated(
251
279
e,
252
280
pagination.Page{},
253
-
FilterEq("repo_at", repoAt),
254
-
FilterEq("issue_id", issueId),
281
+
orm.FilterEq("repo_at", repoAt),
282
+
orm.FilterEq("issue_id", issueId),
255
283
)
256
284
if err != nil {
257
285
return nil, err
···
263
291
return &issues[0], nil
264
292
}
265
293
266
-
func GetIssues(e Execer, filters ...filter) ([]models.Issue, error) {
294
+
func GetIssues(e Execer, filters ...orm.Filter) ([]models.Issue, error) {
267
295
return GetIssuesPaginated(e, pagination.Page{}, filters...)
268
296
}
269
297
···
271
299
func GetIssueIDs(e Execer, opts models.IssueSearchOptions) ([]int64, error) {
272
300
var ids []int64
273
301
274
-
var filters []filter
302
+
var filters []orm.Filter
275
303
openValue := 0
276
304
if opts.IsOpen {
277
305
openValue = 1
278
306
}
279
-
filters = append(filters, FilterEq("open", openValue))
307
+
filters = append(filters, orm.FilterEq("open", openValue))
280
308
if opts.RepoAt != "" {
281
-
filters = append(filters, FilterEq("repo_at", opts.RepoAt))
309
+
filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt))
282
310
}
283
311
284
312
var conditions []string
···
323
351
return ids, nil
324
352
}
325
353
326
-
func AddIssueComment(e Execer, c models.IssueComment) (int64, error) {
327
-
result, err := e.Exec(
354
+
func AddIssueComment(tx *sql.Tx, c models.IssueComment) (int64, error) {
355
+
result, err := tx.Exec(
328
356
`insert into issue_comments (
329
357
did,
330
358
rkey,
···
363
391
return 0, err
364
392
}
365
393
394
+
if err := putReferences(tx, c.AtUri(), c.References); err != nil {
395
+
return 0, fmt.Errorf("put reference_links: %w", err)
396
+
}
397
+
366
398
return id, nil
367
399
}
368
400
369
-
func DeleteIssueComments(e Execer, filters ...filter) error {
401
+
func DeleteIssueComments(e Execer, filters ...orm.Filter) error {
370
402
var conditions []string
371
403
var args []any
372
404
for _, filter := range filters {
···
385
417
return err
386
418
}
387
419
388
-
func GetIssueComments(e Execer, filters ...filter) ([]models.IssueComment, error) {
389
-
var comments []models.IssueComment
420
+
func GetIssueComments(e Execer, filters ...orm.Filter) ([]models.IssueComment, error) {
421
+
commentMap := make(map[string]*models.IssueComment)
390
422
391
423
var conditions []string
392
424
var args []any
···
420
452
if err != nil {
421
453
return nil, err
422
454
}
455
+
defer rows.Close()
423
456
424
457
for rows.Next() {
425
458
var comment models.IssueComment
···
465
498
comment.ReplyTo = &replyTo.V
466
499
}
467
500
468
-
comments = append(comments, comment)
501
+
atUri := comment.AtUri().String()
502
+
commentMap[atUri] = &comment
469
503
}
470
504
471
505
if err = rows.Err(); err != nil {
472
506
return nil, err
473
507
}
474
508
509
+
// collect references for each comments
510
+
commentAts := slices.Collect(maps.Keys(commentMap))
511
+
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
512
+
if err != nil {
513
+
return nil, fmt.Errorf("failed to query reference_links: %w", err)
514
+
}
515
+
for commentAt, references := range allReferencs {
516
+
if comment, ok := commentMap[commentAt.String()]; ok {
517
+
comment.References = references
518
+
}
519
+
}
520
+
521
+
var comments []models.IssueComment
522
+
for _, c := range commentMap {
523
+
comments = append(comments, *c)
524
+
}
525
+
526
+
sort.Slice(comments, func(i, j int) bool {
527
+
return comments[i].Created.After(comments[j].Created)
528
+
})
529
+
475
530
return comments, nil
476
531
}
477
532
478
-
func DeleteIssues(e Execer, filters ...filter) error {
479
-
var conditions []string
480
-
var args []any
481
-
for _, filter := range filters {
482
-
conditions = append(conditions, filter.Condition())
483
-
args = append(args, filter.Arg()...)
533
+
func DeleteIssues(tx *sql.Tx, did, rkey string) error {
534
+
_, err := tx.Exec(
535
+
`delete from issues
536
+
where did = ? and rkey = ?`,
537
+
did,
538
+
rkey,
539
+
)
540
+
if err != nil {
541
+
return fmt.Errorf("delete issue: %w", err)
484
542
}
485
543
486
-
whereClause := ""
487
-
if conditions != nil {
488
-
whereClause = " where " + strings.Join(conditions, " and ")
544
+
uri := syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", did, tangled.RepoIssueNSID, rkey))
545
+
err = deleteReferences(tx, uri)
546
+
if err != nil {
547
+
return fmt.Errorf("delete reference_links: %w", err)
489
548
}
490
549
491
-
query := fmt.Sprintf(`delete from issues %s`, whereClause)
492
-
_, err := e.Exec(query, args...)
493
-
return err
550
+
return nil
494
551
}
495
552
496
-
func CloseIssues(e Execer, filters ...filter) error {
553
+
func CloseIssues(e Execer, filters ...orm.Filter) error {
497
554
var conditions []string
498
555
var args []any
499
556
for _, filter := range filters {
···
511
568
return err
512
569
}
513
570
514
-
func ReopenIssues(e Execer, filters ...filter) error {
571
+
func ReopenIssues(e Execer, filters ...orm.Filter) error {
515
572
var conditions []string
516
573
var args []any
517
574
for _, filter := range filters {
+8
-7
appview/db/label.go
+8
-7
appview/db/label.go
···
10
10
11
11
"github.com/bluesky-social/indigo/atproto/syntax"
12
12
"tangled.org/core/appview/models"
13
+
"tangled.org/core/orm"
13
14
)
14
15
15
16
// no updating type for now
···
59
60
return id, nil
60
61
}
61
62
62
-
func DeleteLabelDefinition(e Execer, filters ...filter) error {
63
+
func DeleteLabelDefinition(e Execer, filters ...orm.Filter) error {
63
64
var conditions []string
64
65
var args []any
65
66
for _, filter := range filters {
···
75
76
return err
76
77
}
77
78
78
-
func GetLabelDefinitions(e Execer, filters ...filter) ([]models.LabelDefinition, error) {
79
+
func GetLabelDefinitions(e Execer, filters ...orm.Filter) ([]models.LabelDefinition, error) {
79
80
var labelDefinitions []models.LabelDefinition
80
81
var conditions []string
81
82
var args []any
···
167
168
}
168
169
169
170
// helper to get exactly one label def
170
-
func GetLabelDefinition(e Execer, filters ...filter) (*models.LabelDefinition, error) {
171
+
func GetLabelDefinition(e Execer, filters ...orm.Filter) (*models.LabelDefinition, error) {
171
172
labels, err := GetLabelDefinitions(e, filters...)
172
173
if err != nil {
173
174
return nil, err
···
227
228
return id, nil
228
229
}
229
230
230
-
func GetLabelOps(e Execer, filters ...filter) ([]models.LabelOp, error) {
231
+
func GetLabelOps(e Execer, filters ...orm.Filter) ([]models.LabelOp, error) {
231
232
var labelOps []models.LabelOp
232
233
var conditions []string
233
234
var args []any
···
302
303
}
303
304
304
305
// get labels for a given list of subject URIs
305
-
func GetLabels(e Execer, filters ...filter) (map[syntax.ATURI]models.LabelState, error) {
306
+
func GetLabels(e Execer, filters ...orm.Filter) (map[syntax.ATURI]models.LabelState, error) {
306
307
ops, err := GetLabelOps(e, filters...)
307
308
if err != nil {
308
309
return nil, err
···
322
323
}
323
324
labelAts := slices.Collect(maps.Keys(labelAtSet))
324
325
325
-
actx, err := NewLabelApplicationCtx(e, FilterIn("at_uri", labelAts))
326
+
actx, err := NewLabelApplicationCtx(e, orm.FilterIn("at_uri", labelAts))
326
327
if err != nil {
327
328
return nil, err
328
329
}
···
338
339
return results, nil
339
340
}
340
341
341
-
func NewLabelApplicationCtx(e Execer, filters ...filter) (*models.LabelApplicationCtx, error) {
342
+
func NewLabelApplicationCtx(e Execer, filters ...orm.Filter) (*models.LabelApplicationCtx, error) {
342
343
labels, err := GetLabelDefinitions(e, filters...)
343
344
if err != nil {
344
345
return nil, err
+6
-5
appview/db/language.go
+6
-5
appview/db/language.go
···
7
7
8
8
"github.com/bluesky-social/indigo/atproto/syntax"
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
-
func GetRepoLanguages(e Execer, filters ...filter) ([]models.RepoLanguage, error) {
13
+
func GetRepoLanguages(e Execer, filters ...orm.Filter) ([]models.RepoLanguage, error) {
13
14
var conditions []string
14
15
var args []any
15
16
for _, filter := range filters {
···
27
28
whereClause,
28
29
)
29
30
rows, err := e.Query(query, args...)
30
-
31
31
if err != nil {
32
32
return nil, fmt.Errorf("failed to execute query: %w ", err)
33
33
}
34
+
defer rows.Close()
34
35
35
36
var langs []models.RepoLanguage
36
37
for rows.Next() {
···
85
86
return nil
86
87
}
87
88
88
-
func DeleteRepoLanguages(e Execer, filters ...filter) error {
89
+
func DeleteRepoLanguages(e Execer, filters ...orm.Filter) error {
89
90
var conditions []string
90
91
var args []any
91
92
for _, filter := range filters {
···
107
108
func UpdateRepoLanguages(tx *sql.Tx, repoAt syntax.ATURI, ref string, langs []models.RepoLanguage) error {
108
109
err := DeleteRepoLanguages(
109
110
tx,
110
-
FilterEq("repo_at", repoAt),
111
-
FilterEq("ref", ref),
111
+
orm.FilterEq("repo_at", repoAt),
112
+
orm.FilterEq("ref", ref),
112
113
)
113
114
if err != nil {
114
115
return fmt.Errorf("failed to delete existing languages: %w", err)
+14
-13
appview/db/notifications.go
+14
-13
appview/db/notifications.go
···
11
11
"github.com/bluesky-social/indigo/atproto/syntax"
12
12
"tangled.org/core/appview/models"
13
13
"tangled.org/core/appview/pagination"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
17
func CreateNotification(e Execer, notification *models.Notification) error {
···
44
45
}
45
46
46
47
// GetNotificationsPaginated retrieves notifications with filters and pagination
47
-
func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...filter) ([]*models.Notification, error) {
48
+
func GetNotificationsPaginated(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.Notification, error) {
48
49
var conditions []string
49
50
var args []any
50
51
···
113
114
}
114
115
115
116
// GetNotificationsWithEntities retrieves notifications with their related entities
116
-
func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...filter) ([]*models.NotificationWithEntity, error) {
117
+
func GetNotificationsWithEntities(e Execer, page pagination.Page, filters ...orm.Filter) ([]*models.NotificationWithEntity, error) {
117
118
var conditions []string
118
119
var args []any
119
120
···
256
257
}
257
258
258
259
// GetNotifications retrieves notifications with filters
259
-
func GetNotifications(e Execer, filters ...filter) ([]*models.Notification, error) {
260
+
func GetNotifications(e Execer, filters ...orm.Filter) ([]*models.Notification, error) {
260
261
return GetNotificationsPaginated(e, pagination.FirstPage(), filters...)
261
262
}
262
263
263
-
func CountNotifications(e Execer, filters ...filter) (int64, error) {
264
+
func CountNotifications(e Execer, filters ...orm.Filter) (int64, error) {
264
265
var conditions []string
265
266
var args []any
266
267
for _, filter := range filters {
···
285
286
}
286
287
287
288
func MarkNotificationRead(e Execer, notificationID int64, userDID string) error {
288
-
idFilter := FilterEq("id", notificationID)
289
-
recipientFilter := FilterEq("recipient_did", userDID)
289
+
idFilter := orm.FilterEq("id", notificationID)
290
+
recipientFilter := orm.FilterEq("recipient_did", userDID)
290
291
291
292
query := fmt.Sprintf(`
292
293
UPDATE notifications
···
314
315
}
315
316
316
317
func MarkAllNotificationsRead(e Execer, userDID string) error {
317
-
recipientFilter := FilterEq("recipient_did", userDID)
318
-
readFilter := FilterEq("read", 0)
318
+
recipientFilter := orm.FilterEq("recipient_did", userDID)
319
+
readFilter := orm.FilterEq("read", 0)
319
320
320
321
query := fmt.Sprintf(`
321
322
UPDATE notifications
···
334
335
}
335
336
336
337
func DeleteNotification(e Execer, notificationID int64, userDID string) error {
337
-
idFilter := FilterEq("id", notificationID)
338
-
recipientFilter := FilterEq("recipient_did", userDID)
338
+
idFilter := orm.FilterEq("id", notificationID)
339
+
recipientFilter := orm.FilterEq("recipient_did", userDID)
339
340
340
341
query := fmt.Sprintf(`
341
342
DELETE FROM notifications
···
362
363
}
363
364
364
365
func GetNotificationPreference(e Execer, userDid string) (*models.NotificationPreferences, error) {
365
-
prefs, err := GetNotificationPreferences(e, FilterEq("user_did", userDid))
366
+
prefs, err := GetNotificationPreferences(e, orm.FilterEq("user_did", userDid))
366
367
if err != nil {
367
368
return nil, err
368
369
}
···
375
376
return p, nil
376
377
}
377
378
378
-
func GetNotificationPreferences(e Execer, filters ...filter) (map[syntax.DID]*models.NotificationPreferences, error) {
379
+
func GetNotificationPreferences(e Execer, filters ...orm.Filter) (map[syntax.DID]*models.NotificationPreferences, error) {
379
380
prefsMap := make(map[syntax.DID]*models.NotificationPreferences)
380
381
381
382
var conditions []string
···
483
484
484
485
func (d *DB) ClearOldNotifications(ctx context.Context, olderThan time.Duration) error {
485
486
cutoff := time.Now().Add(-olderThan)
486
-
createdFilter := FilterLte("created", cutoff)
487
+
createdFilter := orm.FilterLte("created", cutoff)
487
488
488
489
query := fmt.Sprintf(`
489
490
DELETE FROM notifications
+12
-11
appview/db/pipeline.go
+12
-11
appview/db/pipeline.go
···
6
6
"strings"
7
7
"time"
8
8
9
+
"github.com/bluesky-social/indigo/atproto/syntax"
9
10
"tangled.org/core/appview/models"
11
+
"tangled.org/core/orm"
10
12
)
11
13
12
-
func GetPipelines(e Execer, filters ...filter) ([]models.Pipeline, error) {
14
+
func GetPipelines(e Execer, filters ...orm.Filter) ([]models.Pipeline, error) {
13
15
var pipelines []models.Pipeline
14
16
15
17
var conditions []string
···
168
170
169
171
// this is a mega query, but the most useful one:
170
172
// get N pipelines, for each one get the latest status of its N workflows
171
-
func GetPipelineStatuses(e Execer, limit int, filters ...filter) ([]models.Pipeline, error) {
173
+
func GetPipelineStatuses(e Execer, limit int, filters ...orm.Filter) ([]models.Pipeline, error) {
172
174
var conditions []string
173
175
var args []any
174
176
for _, filter := range filters {
175
-
filter.key = "p." + filter.key // the table is aliased in the query to `p`
177
+
filter.Key = "p." + filter.Key // the table is aliased in the query to `p`
176
178
conditions = append(conditions, filter.Condition())
177
179
args = append(args, filter.Arg()...)
178
180
}
···
215
217
}
216
218
defer rows.Close()
217
219
218
-
pipelines := make(map[string]models.Pipeline)
220
+
pipelines := make(map[syntax.ATURI]models.Pipeline)
219
221
for rows.Next() {
220
222
var p models.Pipeline
221
223
var t models.Trigger
···
252
254
p.Trigger = &t
253
255
p.Statuses = make(map[string]models.WorkflowStatus)
254
256
255
-
k := fmt.Sprintf("%s/%s", p.Knot, p.Rkey)
256
-
pipelines[k] = p
257
+
pipelines[p.AtUri()] = p
257
258
}
258
259
259
260
// get all statuses
···
264
265
conditions = nil
265
266
args = nil
266
267
for _, p := range pipelines {
267
-
knotFilter := FilterEq("pipeline_knot", p.Knot)
268
-
rkeyFilter := FilterEq("pipeline_rkey", p.Rkey)
268
+
knotFilter := orm.FilterEq("pipeline_knot", p.Knot)
269
+
rkeyFilter := orm.FilterEq("pipeline_rkey", p.Rkey)
269
270
conditions = append(conditions, fmt.Sprintf("(%s and %s)", knotFilter.Condition(), rkeyFilter.Condition()))
270
271
args = append(args, p.Knot)
271
272
args = append(args, p.Rkey)
···
313
314
return nil, fmt.Errorf("invalid status created timestamp %q: %w", created, err)
314
315
}
315
316
316
-
key := fmt.Sprintf("%s/%s", ps.PipelineKnot, ps.PipelineRkey)
317
+
pipelineAt := ps.PipelineAt()
317
318
318
319
// extract
319
-
pipeline, ok := pipelines[key]
320
+
pipeline, ok := pipelines[pipelineAt]
320
321
if !ok {
321
322
continue
322
323
}
···
330
331
331
332
// reassign
332
333
pipeline.Statuses[ps.Workflow] = statuses
333
-
pipelines[key] = pipeline
334
+
pipelines[pipelineAt] = pipeline
334
335
}
335
336
336
337
var all []models.Pipeline
+11
-5
appview/db/profile.go
+11
-5
appview/db/profile.go
···
11
11
12
12
"github.com/bluesky-social/indigo/atproto/syntax"
13
13
"tangled.org/core/appview/models"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
17
const TimeframeMonths = 7
···
44
45
45
46
issues, err := GetIssues(
46
47
e,
47
-
FilterEq("did", forDid),
48
-
FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
48
+
orm.FilterEq("did", forDid),
49
+
orm.FilterGte("created", time.Now().AddDate(0, -TimeframeMonths, 0)),
49
50
)
50
51
if err != nil {
51
52
return nil, fmt.Errorf("error getting issues by owner did: %w", err)
···
65
66
*items = append(*items, &issue)
66
67
}
67
68
68
-
repos, err := GetRepos(e, 0, FilterEq("did", forDid))
69
+
repos, err := GetRepos(e, 0, orm.FilterEq("did", forDid))
69
70
if err != nil {
70
71
return nil, fmt.Errorf("error getting all repos by did: %w", err)
71
72
}
···
199
200
return tx.Commit()
200
201
}
201
202
202
-
func GetProfiles(e Execer, filters ...filter) (map[string]*models.Profile, error) {
203
+
func GetProfiles(e Execer, filters ...orm.Filter) (map[string]*models.Profile, error) {
203
204
var conditions []string
204
205
var args []any
205
206
for _, filter := range filters {
···
229
230
if err != nil {
230
231
return nil, err
231
232
}
233
+
defer rows.Close()
232
234
233
235
profileMap := make(map[string]*models.Profile)
234
236
for rows.Next() {
···
269
271
if err != nil {
270
272
return nil, err
271
273
}
274
+
defer rows.Close()
275
+
272
276
idxs := make(map[string]int)
273
277
for did := range profileMap {
274
278
idxs[did] = 0
···
289
293
if err != nil {
290
294
return nil, err
291
295
}
296
+
defer rows.Close()
297
+
292
298
idxs = make(map[string]int)
293
299
for did := range profileMap {
294
300
idxs[did] = 0
···
441
447
}
442
448
443
449
// ensure all pinned repos are either own repos or collaborating repos
444
-
repos, err := GetRepos(e, 0, FilterEq("did", profile.Did))
450
+
repos, err := GetRepos(e, 0, orm.FilterEq("did", profile.Did))
445
451
if err != nil {
446
452
log.Printf("getting repos for %s: %s", profile.Did, err)
447
453
}
+69
-24
appview/db/pulls.go
+69
-24
appview/db/pulls.go
···
13
13
14
14
"github.com/bluesky-social/indigo/atproto/syntax"
15
15
"tangled.org/core/appview/models"
16
+
"tangled.org/core/orm"
16
17
)
17
18
18
19
func NewPull(tx *sql.Tx, pull *models.Pull) error {
···
93
94
insert into pull_submissions (pull_at, round_number, patch, combined, source_rev)
94
95
values (?, ?, ?, ?, ?)
95
96
`, pull.AtUri(), 0, pull.Submissions[0].Patch, pull.Submissions[0].Combined, pull.Submissions[0].SourceRev)
96
-
return err
97
+
if err != nil {
98
+
return err
99
+
}
100
+
101
+
if err := putReferences(tx, pull.AtUri(), pull.References); err != nil {
102
+
return fmt.Errorf("put reference_links: %w", err)
103
+
}
104
+
105
+
return nil
97
106
}
98
107
99
108
func GetPullAt(e Execer, repoAt syntax.ATURI, pullId int) (syntax.ATURI, error) {
···
110
119
return pullId - 1, err
111
120
}
112
121
113
-
func GetPullsWithLimit(e Execer, limit int, filters ...filter) ([]*models.Pull, error) {
122
+
func GetPullsWithLimit(e Execer, limit int, filters ...orm.Filter) ([]*models.Pull, error) {
114
123
pulls := make(map[syntax.ATURI]*models.Pull)
115
124
116
125
var conditions []string
···
221
230
for _, p := range pulls {
222
231
pullAts = append(pullAts, p.AtUri())
223
232
}
224
-
submissionsMap, err := GetPullSubmissions(e, FilterIn("pull_at", pullAts))
233
+
submissionsMap, err := GetPullSubmissions(e, orm.FilterIn("pull_at", pullAts))
225
234
if err != nil {
226
235
return nil, fmt.Errorf("failed to get submissions: %w", err)
227
236
}
···
233
242
}
234
243
235
244
// collect allLabels for each issue
236
-
allLabels, err := GetLabels(e, FilterIn("subject", pullAts))
245
+
allLabels, err := GetLabels(e, orm.FilterIn("subject", pullAts))
237
246
if err != nil {
238
247
return nil, fmt.Errorf("failed to query labels: %w", err)
239
248
}
···
250
259
sourceAts = append(sourceAts, *p.PullSource.RepoAt)
251
260
}
252
261
}
253
-
sourceRepos, err := GetRepos(e, 0, FilterIn("at_uri", sourceAts))
262
+
sourceRepos, err := GetRepos(e, 0, orm.FilterIn("at_uri", sourceAts))
254
263
if err != nil && !errors.Is(err, sql.ErrNoRows) {
255
264
return nil, fmt.Errorf("failed to get source repos: %w", err)
256
265
}
···
266
275
}
267
276
}
268
277
278
+
allReferences, err := GetReferencesAll(e, orm.FilterIn("from_at", pullAts))
279
+
if err != nil {
280
+
return nil, fmt.Errorf("failed to query reference_links: %w", err)
281
+
}
282
+
for pullAt, references := range allReferences {
283
+
if pull, ok := pulls[pullAt]; ok {
284
+
pull.References = references
285
+
}
286
+
}
287
+
269
288
orderedByPullId := []*models.Pull{}
270
289
for _, p := range pulls {
271
290
orderedByPullId = append(orderedByPullId, p)
···
277
296
return orderedByPullId, nil
278
297
}
279
298
280
-
func GetPulls(e Execer, filters ...filter) ([]*models.Pull, error) {
299
+
func GetPulls(e Execer, filters ...orm.Filter) ([]*models.Pull, error) {
281
300
return GetPullsWithLimit(e, 0, filters...)
282
301
}
283
302
284
303
func GetPullIDs(e Execer, opts models.PullSearchOptions) ([]int64, error) {
285
304
var ids []int64
286
305
287
-
var filters []filter
288
-
filters = append(filters, FilterEq("state", opts.State))
306
+
var filters []orm.Filter
307
+
filters = append(filters, orm.FilterEq("state", opts.State))
289
308
if opts.RepoAt != "" {
290
-
filters = append(filters, FilterEq("repo_at", opts.RepoAt))
309
+
filters = append(filters, orm.FilterEq("repo_at", opts.RepoAt))
291
310
}
292
311
293
312
var conditions []string
···
343
362
}
344
363
345
364
func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*models.Pull, error) {
346
-
pulls, err := GetPullsWithLimit(e, 1, FilterEq("repo_at", repoAt), FilterEq("pull_id", pullId))
365
+
pulls, err := GetPullsWithLimit(e, 1, orm.FilterEq("repo_at", repoAt), orm.FilterEq("pull_id", pullId))
347
366
if err != nil {
348
367
return nil, err
349
368
}
···
355
374
}
356
375
357
376
// mapping from pull -> pull submissions
358
-
func GetPullSubmissions(e Execer, filters ...filter) (map[syntax.ATURI][]*models.PullSubmission, error) {
377
+
func GetPullSubmissions(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]*models.PullSubmission, error) {
359
378
var conditions []string
360
379
var args []any
361
380
for _, filter := range filters {
···
430
449
431
450
// Get comments for all submissions using GetPullComments
432
451
submissionIds := slices.Collect(maps.Keys(submissionMap))
433
-
comments, err := GetPullComments(e, FilterIn("submission_id", submissionIds))
452
+
comments, err := GetPullComments(e, orm.FilterIn("submission_id", submissionIds))
434
453
if err != nil {
435
-
return nil, err
454
+
return nil, fmt.Errorf("failed to get pull comments: %w", err)
436
455
}
437
456
for _, comment := range comments {
438
457
if submission, ok := submissionMap[comment.SubmissionId]; ok {
···
456
475
return m, nil
457
476
}
458
477
459
-
func GetPullComments(e Execer, filters ...filter) ([]models.PullComment, error) {
478
+
func GetPullComments(e Execer, filters ...orm.Filter) ([]models.PullComment, error) {
460
479
var conditions []string
461
480
var args []any
462
481
for _, filter := range filters {
···
492
511
}
493
512
defer rows.Close()
494
513
495
-
var comments []models.PullComment
514
+
commentMap := make(map[string]*models.PullComment)
496
515
for rows.Next() {
497
516
var comment models.PullComment
498
517
var createdAt string
···
514
533
comment.Created = t
515
534
}
516
535
517
-
comments = append(comments, comment)
536
+
atUri := comment.AtUri().String()
537
+
commentMap[atUri] = &comment
518
538
}
519
539
520
540
if err := rows.Err(); err != nil {
521
541
return nil, err
522
542
}
523
543
544
+
// collect references for each comments
545
+
commentAts := slices.Collect(maps.Keys(commentMap))
546
+
allReferencs, err := GetReferencesAll(e, orm.FilterIn("from_at", commentAts))
547
+
if err != nil {
548
+
return nil, fmt.Errorf("failed to query reference_links: %w", err)
549
+
}
550
+
for commentAt, references := range allReferencs {
551
+
if comment, ok := commentMap[commentAt.String()]; ok {
552
+
comment.References = references
553
+
}
554
+
}
555
+
556
+
var comments []models.PullComment
557
+
for _, c := range commentMap {
558
+
comments = append(comments, *c)
559
+
}
560
+
561
+
sort.Slice(comments, func(i, j int) bool {
562
+
return comments[i].Created.Before(comments[j].Created)
563
+
})
564
+
524
565
return comments, nil
525
566
}
526
567
···
600
641
return pulls, nil
601
642
}
602
643
603
-
func NewPullComment(e Execer, comment *models.PullComment) (int64, error) {
644
+
func NewPullComment(tx *sql.Tx, comment *models.PullComment) (int64, error) {
604
645
query := `insert into pull_comments (owner_did, repo_at, submission_id, comment_at, pull_id, body) values (?, ?, ?, ?, ?, ?)`
605
-
res, err := e.Exec(
646
+
res, err := tx.Exec(
606
647
query,
607
648
comment.OwnerDid,
608
649
comment.RepoAt,
···
618
659
i, err := res.LastInsertId()
619
660
if err != nil {
620
661
return 0, err
662
+
}
663
+
664
+
if err := putReferences(tx, comment.AtUri(), comment.References); err != nil {
665
+
return 0, fmt.Errorf("put reference_links: %w", err)
621
666
}
622
667
623
668
return i, nil
···
664
709
return err
665
710
}
666
711
667
-
func SetPullParentChangeId(e Execer, parentChangeId string, filters ...filter) error {
712
+
func SetPullParentChangeId(e Execer, parentChangeId string, filters ...orm.Filter) error {
668
713
var conditions []string
669
714
var args []any
670
715
···
688
733
689
734
// Only used when stacking to update contents in the event of a rebase (the interdiff should be empty).
690
735
// otherwise submissions are immutable
691
-
func UpdatePull(e Execer, newPatch, sourceRev string, filters ...filter) error {
736
+
func UpdatePull(e Execer, newPatch, sourceRev string, filters ...orm.Filter) error {
692
737
var conditions []string
693
738
var args []any
694
739
···
746
791
func GetStack(e Execer, stackId string) (models.Stack, error) {
747
792
unorderedPulls, err := GetPulls(
748
793
e,
749
-
FilterEq("stack_id", stackId),
750
-
FilterNotEq("state", models.PullDeleted),
794
+
orm.FilterEq("stack_id", stackId),
795
+
orm.FilterNotEq("state", models.PullDeleted),
751
796
)
752
797
if err != nil {
753
798
return nil, err
···
791
836
func GetAbandonedPulls(e Execer, stackId string) ([]*models.Pull, error) {
792
837
pulls, err := GetPulls(
793
838
e,
794
-
FilterEq("stack_id", stackId),
795
-
FilterEq("state", models.PullDeleted),
839
+
orm.FilterEq("stack_id", stackId),
840
+
orm.FilterEq("state", models.PullDeleted),
796
841
)
797
842
if err != nil {
798
843
return nil, err
+2
-1
appview/db/punchcard.go
+2
-1
appview/db/punchcard.go
···
7
7
"time"
8
8
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
13
// this adds to the existing count
···
20
21
return err
21
22
}
22
23
23
-
func MakePunchcard(e Execer, filters ...filter) (*models.Punchcard, error) {
24
+
func MakePunchcard(e Execer, filters ...orm.Filter) (*models.Punchcard, error) {
24
25
punchcard := &models.Punchcard{}
25
26
now := time.Now()
26
27
startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
+463
appview/db/reference.go
+463
appview/db/reference.go
···
1
+
package db
2
+
3
+
import (
4
+
"database/sql"
5
+
"fmt"
6
+
"strings"
7
+
8
+
"github.com/bluesky-social/indigo/atproto/syntax"
9
+
"tangled.org/core/api/tangled"
10
+
"tangled.org/core/appview/models"
11
+
"tangled.org/core/orm"
12
+
)
13
+
14
+
// ValidateReferenceLinks resolves refLinks to Issue/PR/IssueComment/PullComment ATURIs.
15
+
// It will ignore missing refLinks.
16
+
func ValidateReferenceLinks(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) {
17
+
var (
18
+
issueRefs []models.ReferenceLink
19
+
pullRefs []models.ReferenceLink
20
+
)
21
+
for _, ref := range refLinks {
22
+
switch ref.Kind {
23
+
case models.RefKindIssue:
24
+
issueRefs = append(issueRefs, ref)
25
+
case models.RefKindPull:
26
+
pullRefs = append(pullRefs, ref)
27
+
}
28
+
}
29
+
issueUris, err := findIssueReferences(e, issueRefs)
30
+
if err != nil {
31
+
return nil, fmt.Errorf("find issue references: %w", err)
32
+
}
33
+
pullUris, err := findPullReferences(e, pullRefs)
34
+
if err != nil {
35
+
return nil, fmt.Errorf("find pull references: %w", err)
36
+
}
37
+
38
+
return append(issueUris, pullUris...), nil
39
+
}
40
+
41
+
func findIssueReferences(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) {
42
+
if len(refLinks) == 0 {
43
+
return nil, nil
44
+
}
45
+
vals := make([]string, len(refLinks))
46
+
args := make([]any, 0, len(refLinks)*4)
47
+
for i, ref := range refLinks {
48
+
vals[i] = "(?, ?, ?, ?)"
49
+
args = append(args, ref.Handle, ref.Repo, ref.SubjectId, ref.CommentId)
50
+
}
51
+
query := fmt.Sprintf(
52
+
`with input(owner_did, name, issue_id, comment_id) as (
53
+
values %s
54
+
)
55
+
select
56
+
i.did, i.rkey,
57
+
c.did, c.rkey
58
+
from input inp
59
+
join repos r
60
+
on r.did = inp.owner_did
61
+
and r.name = inp.name
62
+
join issues i
63
+
on i.repo_at = r.at_uri
64
+
and i.issue_id = inp.issue_id
65
+
left join issue_comments c
66
+
on inp.comment_id is not null
67
+
and c.issue_at = i.at_uri
68
+
and c.id = inp.comment_id
69
+
`,
70
+
strings.Join(vals, ","),
71
+
)
72
+
rows, err := e.Query(query, args...)
73
+
if err != nil {
74
+
return nil, err
75
+
}
76
+
defer rows.Close()
77
+
78
+
var uris []syntax.ATURI
79
+
80
+
for rows.Next() {
81
+
// Scan rows
82
+
var issueOwner, issueRkey string
83
+
var commentOwner, commentRkey sql.NullString
84
+
var uri syntax.ATURI
85
+
if err := rows.Scan(&issueOwner, &issueRkey, &commentOwner, &commentRkey); err != nil {
86
+
return nil, err
87
+
}
88
+
if commentOwner.Valid && commentRkey.Valid {
89
+
uri = syntax.ATURI(fmt.Sprintf(
90
+
"at://%s/%s/%s",
91
+
commentOwner.String,
92
+
tangled.RepoIssueCommentNSID,
93
+
commentRkey.String,
94
+
))
95
+
} else {
96
+
uri = syntax.ATURI(fmt.Sprintf(
97
+
"at://%s/%s/%s",
98
+
issueOwner,
99
+
tangled.RepoIssueNSID,
100
+
issueRkey,
101
+
))
102
+
}
103
+
uris = append(uris, uri)
104
+
}
105
+
if err := rows.Err(); err != nil {
106
+
return nil, fmt.Errorf("iterate rows: %w", err)
107
+
}
108
+
109
+
return uris, nil
110
+
}
111
+
112
+
func findPullReferences(e Execer, refLinks []models.ReferenceLink) ([]syntax.ATURI, error) {
113
+
if len(refLinks) == 0 {
114
+
return nil, nil
115
+
}
116
+
vals := make([]string, len(refLinks))
117
+
args := make([]any, 0, len(refLinks)*4)
118
+
for i, ref := range refLinks {
119
+
vals[i] = "(?, ?, ?, ?)"
120
+
args = append(args, ref.Handle, ref.Repo, ref.SubjectId, ref.CommentId)
121
+
}
122
+
query := fmt.Sprintf(
123
+
`with input(owner_did, name, pull_id, comment_id) as (
124
+
values %s
125
+
)
126
+
select
127
+
p.owner_did, p.rkey,
128
+
c.comment_at
129
+
from input inp
130
+
join repos r
131
+
on r.did = inp.owner_did
132
+
and r.name = inp.name
133
+
join pulls p
134
+
on p.repo_at = r.at_uri
135
+
and p.pull_id = inp.pull_id
136
+
left join pull_comments c
137
+
on inp.comment_id is not null
138
+
and c.repo_at = r.at_uri and c.pull_id = p.pull_id
139
+
and c.id = inp.comment_id
140
+
`,
141
+
strings.Join(vals, ","),
142
+
)
143
+
rows, err := e.Query(query, args...)
144
+
if err != nil {
145
+
return nil, err
146
+
}
147
+
defer rows.Close()
148
+
149
+
var uris []syntax.ATURI
150
+
151
+
for rows.Next() {
152
+
// Scan rows
153
+
var pullOwner, pullRkey string
154
+
var commentUri sql.NullString
155
+
var uri syntax.ATURI
156
+
if err := rows.Scan(&pullOwner, &pullRkey, &commentUri); err != nil {
157
+
return nil, err
158
+
}
159
+
if commentUri.Valid {
160
+
// no-op
161
+
uri = syntax.ATURI(commentUri.String)
162
+
} else {
163
+
uri = syntax.ATURI(fmt.Sprintf(
164
+
"at://%s/%s/%s",
165
+
pullOwner,
166
+
tangled.RepoPullNSID,
167
+
pullRkey,
168
+
))
169
+
}
170
+
uris = append(uris, uri)
171
+
}
172
+
return uris, nil
173
+
}
174
+
175
+
func putReferences(tx *sql.Tx, fromAt syntax.ATURI, references []syntax.ATURI) error {
176
+
err := deleteReferences(tx, fromAt)
177
+
if err != nil {
178
+
return fmt.Errorf("delete old reference_links: %w", err)
179
+
}
180
+
if len(references) == 0 {
181
+
return nil
182
+
}
183
+
184
+
values := make([]string, 0, len(references))
185
+
args := make([]any, 0, len(references)*2)
186
+
for _, ref := range references {
187
+
values = append(values, "(?, ?)")
188
+
args = append(args, fromAt, ref)
189
+
}
190
+
_, err = tx.Exec(
191
+
fmt.Sprintf(
192
+
`insert into reference_links (from_at, to_at)
193
+
values %s`,
194
+
strings.Join(values, ","),
195
+
),
196
+
args...,
197
+
)
198
+
if err != nil {
199
+
return fmt.Errorf("insert new reference_links: %w", err)
200
+
}
201
+
return nil
202
+
}
203
+
204
+
func deleteReferences(tx *sql.Tx, fromAt syntax.ATURI) error {
205
+
_, err := tx.Exec(`delete from reference_links where from_at = ?`, fromAt)
206
+
return err
207
+
}
208
+
209
+
func GetReferencesAll(e Execer, filters ...orm.Filter) (map[syntax.ATURI][]syntax.ATURI, error) {
210
+
var (
211
+
conditions []string
212
+
args []any
213
+
)
214
+
for _, filter := range filters {
215
+
conditions = append(conditions, filter.Condition())
216
+
args = append(args, filter.Arg()...)
217
+
}
218
+
219
+
whereClause := ""
220
+
if conditions != nil {
221
+
whereClause = " where " + strings.Join(conditions, " and ")
222
+
}
223
+
224
+
rows, err := e.Query(
225
+
fmt.Sprintf(
226
+
`select from_at, to_at from reference_links %s`,
227
+
whereClause,
228
+
),
229
+
args...,
230
+
)
231
+
if err != nil {
232
+
return nil, fmt.Errorf("query reference_links: %w", err)
233
+
}
234
+
defer rows.Close()
235
+
236
+
result := make(map[syntax.ATURI][]syntax.ATURI)
237
+
238
+
for rows.Next() {
239
+
var from, to syntax.ATURI
240
+
if err := rows.Scan(&from, &to); err != nil {
241
+
return nil, fmt.Errorf("scan row: %w", err)
242
+
}
243
+
244
+
result[from] = append(result[from], to)
245
+
}
246
+
if err := rows.Err(); err != nil {
247
+
return nil, fmt.Errorf("iterate rows: %w", err)
248
+
}
249
+
250
+
return result, nil
251
+
}
252
+
253
+
func GetBacklinks(e Execer, target syntax.ATURI) ([]models.RichReferenceLink, error) {
254
+
rows, err := e.Query(
255
+
`select from_at from reference_links
256
+
where to_at = ?`,
257
+
target,
258
+
)
259
+
if err != nil {
260
+
return nil, fmt.Errorf("query backlinks: %w", err)
261
+
}
262
+
defer rows.Close()
263
+
264
+
var (
265
+
backlinks []models.RichReferenceLink
266
+
backlinksMap = make(map[string][]syntax.ATURI)
267
+
)
268
+
for rows.Next() {
269
+
var from syntax.ATURI
270
+
if err := rows.Scan(&from); err != nil {
271
+
return nil, fmt.Errorf("scan row: %w", err)
272
+
}
273
+
nsid := from.Collection().String()
274
+
backlinksMap[nsid] = append(backlinksMap[nsid], from)
275
+
}
276
+
if err := rows.Err(); err != nil {
277
+
return nil, fmt.Errorf("iterate rows: %w", err)
278
+
}
279
+
280
+
var ls []models.RichReferenceLink
281
+
ls, err = getIssueBacklinks(e, backlinksMap[tangled.RepoIssueNSID])
282
+
if err != nil {
283
+
return nil, fmt.Errorf("get issue backlinks: %w", err)
284
+
}
285
+
backlinks = append(backlinks, ls...)
286
+
ls, err = getIssueCommentBacklinks(e, backlinksMap[tangled.RepoIssueCommentNSID])
287
+
if err != nil {
288
+
return nil, fmt.Errorf("get issue_comment backlinks: %w", err)
289
+
}
290
+
backlinks = append(backlinks, ls...)
291
+
ls, err = getPullBacklinks(e, backlinksMap[tangled.RepoPullNSID])
292
+
if err != nil {
293
+
return nil, fmt.Errorf("get pull backlinks: %w", err)
294
+
}
295
+
backlinks = append(backlinks, ls...)
296
+
ls, err = getPullCommentBacklinks(e, backlinksMap[tangled.RepoPullCommentNSID])
297
+
if err != nil {
298
+
return nil, fmt.Errorf("get pull_comment backlinks: %w", err)
299
+
}
300
+
backlinks = append(backlinks, ls...)
301
+
302
+
return backlinks, nil
303
+
}
304
+
305
+
func getIssueBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) {
306
+
if len(aturis) == 0 {
307
+
return nil, nil
308
+
}
309
+
vals := make([]string, len(aturis))
310
+
args := make([]any, 0, len(aturis)*2)
311
+
for i, aturi := range aturis {
312
+
vals[i] = "(?, ?)"
313
+
did := aturi.Authority().String()
314
+
rkey := aturi.RecordKey().String()
315
+
args = append(args, did, rkey)
316
+
}
317
+
rows, err := e.Query(
318
+
fmt.Sprintf(
319
+
`select r.did, r.name, i.issue_id, i.title, i.open
320
+
from issues i
321
+
join repos r
322
+
on r.at_uri = i.repo_at
323
+
where (i.did, i.rkey) in (%s)`,
324
+
strings.Join(vals, ","),
325
+
),
326
+
args...,
327
+
)
328
+
if err != nil {
329
+
return nil, err
330
+
}
331
+
defer rows.Close()
332
+
var refLinks []models.RichReferenceLink
333
+
for rows.Next() {
334
+
var l models.RichReferenceLink
335
+
l.Kind = models.RefKindIssue
336
+
if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, &l.Title, &l.State); err != nil {
337
+
return nil, err
338
+
}
339
+
refLinks = append(refLinks, l)
340
+
}
341
+
if err := rows.Err(); err != nil {
342
+
return nil, fmt.Errorf("iterate rows: %w", err)
343
+
}
344
+
return refLinks, nil
345
+
}
346
+
347
+
func getIssueCommentBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) {
348
+
if len(aturis) == 0 {
349
+
return nil, nil
350
+
}
351
+
filter := orm.FilterIn("c.at_uri", aturis)
352
+
rows, err := e.Query(
353
+
fmt.Sprintf(
354
+
`select r.did, r.name, i.issue_id, c.id, i.title, i.open
355
+
from issue_comments c
356
+
join issues i
357
+
on i.at_uri = c.issue_at
358
+
join repos r
359
+
on r.at_uri = i.repo_at
360
+
where %s`,
361
+
filter.Condition(),
362
+
),
363
+
filter.Arg()...,
364
+
)
365
+
if err != nil {
366
+
return nil, err
367
+
}
368
+
defer rows.Close()
369
+
var refLinks []models.RichReferenceLink
370
+
for rows.Next() {
371
+
var l models.RichReferenceLink
372
+
l.Kind = models.RefKindIssue
373
+
l.CommentId = new(int)
374
+
if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, l.CommentId, &l.Title, &l.State); err != nil {
375
+
return nil, err
376
+
}
377
+
refLinks = append(refLinks, l)
378
+
}
379
+
if err := rows.Err(); err != nil {
380
+
return nil, fmt.Errorf("iterate rows: %w", err)
381
+
}
382
+
return refLinks, nil
383
+
}
384
+
385
+
func getPullBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) {
386
+
if len(aturis) == 0 {
387
+
return nil, nil
388
+
}
389
+
vals := make([]string, len(aturis))
390
+
args := make([]any, 0, len(aturis)*2)
391
+
for i, aturi := range aturis {
392
+
vals[i] = "(?, ?)"
393
+
did := aturi.Authority().String()
394
+
rkey := aturi.RecordKey().String()
395
+
args = append(args, did, rkey)
396
+
}
397
+
rows, err := e.Query(
398
+
fmt.Sprintf(
399
+
`select r.did, r.name, p.pull_id, p.title, p.state
400
+
from pulls p
401
+
join repos r
402
+
on r.at_uri = p.repo_at
403
+
where (p.owner_did, p.rkey) in (%s)`,
404
+
strings.Join(vals, ","),
405
+
),
406
+
args...,
407
+
)
408
+
if err != nil {
409
+
return nil, err
410
+
}
411
+
defer rows.Close()
412
+
var refLinks []models.RichReferenceLink
413
+
for rows.Next() {
414
+
var l models.RichReferenceLink
415
+
l.Kind = models.RefKindPull
416
+
if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, &l.Title, &l.State); err != nil {
417
+
return nil, err
418
+
}
419
+
refLinks = append(refLinks, l)
420
+
}
421
+
if err := rows.Err(); err != nil {
422
+
return nil, fmt.Errorf("iterate rows: %w", err)
423
+
}
424
+
return refLinks, nil
425
+
}
426
+
427
+
func getPullCommentBacklinks(e Execer, aturis []syntax.ATURI) ([]models.RichReferenceLink, error) {
428
+
if len(aturis) == 0 {
429
+
return nil, nil
430
+
}
431
+
filter := orm.FilterIn("c.comment_at", aturis)
432
+
rows, err := e.Query(
433
+
fmt.Sprintf(
434
+
`select r.did, r.name, p.pull_id, c.id, p.title, p.state
435
+
from repos r
436
+
join pulls p
437
+
on r.at_uri = p.repo_at
438
+
join pull_comments c
439
+
on r.at_uri = c.repo_at and p.pull_id = c.pull_id
440
+
where %s`,
441
+
filter.Condition(),
442
+
),
443
+
filter.Arg()...,
444
+
)
445
+
if err != nil {
446
+
return nil, err
447
+
}
448
+
defer rows.Close()
449
+
var refLinks []models.RichReferenceLink
450
+
for rows.Next() {
451
+
var l models.RichReferenceLink
452
+
l.Kind = models.RefKindPull
453
+
l.CommentId = new(int)
454
+
if err := rows.Scan(&l.Handle, &l.Repo, &l.SubjectId, l.CommentId, &l.Title, &l.State); err != nil {
455
+
return nil, err
456
+
}
457
+
refLinks = append(refLinks, l)
458
+
}
459
+
if err := rows.Err(); err != nil {
460
+
return nil, fmt.Errorf("iterate rows: %w", err)
461
+
}
462
+
return refLinks, nil
463
+
}
+5
-3
appview/db/registration.go
+5
-3
appview/db/registration.go
···
7
7
"time"
8
8
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
-
func GetRegistrations(e Execer, filters ...filter) ([]models.Registration, error) {
13
+
func GetRegistrations(e Execer, filters ...orm.Filter) ([]models.Registration, error) {
13
14
var registrations []models.Registration
14
15
15
16
var conditions []string
···
37
38
if err != nil {
38
39
return nil, err
39
40
}
41
+
defer rows.Close()
40
42
41
43
for rows.Next() {
42
44
var createdAt string
···
69
71
return registrations, nil
70
72
}
71
73
72
-
func MarkRegistered(e Execer, filters ...filter) error {
74
+
func MarkRegistered(e Execer, filters ...orm.Filter) error {
73
75
var conditions []string
74
76
var args []any
75
77
for _, filter := range filters {
···
94
96
return err
95
97
}
96
98
97
-
func DeleteKnot(e Execer, filters ...filter) error {
99
+
func DeleteKnot(e Execer, filters ...orm.Filter) error {
98
100
var conditions []string
99
101
var args []any
100
102
for _, filter := range filters {
+32
-37
appview/db/repos.go
+32
-37
appview/db/repos.go
···
10
10
"time"
11
11
12
12
"github.com/bluesky-social/indigo/atproto/syntax"
13
-
securejoin "github.com/cyphar/filepath-securejoin"
14
-
"tangled.org/core/api/tangled"
15
13
"tangled.org/core/appview/models"
14
+
"tangled.org/core/orm"
16
15
)
17
16
18
-
type Repo struct {
19
-
Id int64
20
-
Did string
21
-
Name string
22
-
Knot string
23
-
Rkey string
24
-
Created time.Time
25
-
Description string
26
-
Spindle string
27
-
28
-
// optionally, populate this when querying for reverse mappings
29
-
RepoStats *models.RepoStats
30
-
31
-
// optional
32
-
Source string
33
-
}
34
-
35
-
func (r Repo) RepoAt() syntax.ATURI {
36
-
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, tangled.RepoNSID, r.Rkey))
37
-
}
38
-
39
-
func (r Repo) DidSlashRepo() string {
40
-
p, _ := securejoin.SecureJoin(r.Did, r.Name)
41
-
return p
42
-
}
43
-
44
-
func GetRepos(e Execer, limit int, filters ...filter) ([]models.Repo, error) {
17
+
func GetRepos(e Execer, limit int, filters ...orm.Filter) ([]models.Repo, error) {
45
18
repoMap := make(map[syntax.ATURI]*models.Repo)
46
19
47
20
var conditions []string
···
83
56
limitClause,
84
57
)
85
58
rows, err := e.Query(repoQuery, args...)
86
-
87
59
if err != nil {
88
60
return nil, fmt.Errorf("failed to execute repo query: %w ", err)
89
61
}
62
+
defer rows.Close()
90
63
91
64
for rows.Next() {
92
65
var repo models.Repo
···
155
128
if err != nil {
156
129
return nil, fmt.Errorf("failed to execute labels query: %w ", err)
157
130
}
131
+
defer rows.Close()
132
+
158
133
for rows.Next() {
159
134
var repoat, labelat string
160
135
if err := rows.Scan(&repoat, &labelat); err != nil {
···
183
158
from repo_languages
184
159
where repo_at in (%s)
185
160
and is_default_ref = 1
161
+
and language <> ''
186
162
)
187
163
where rn = 1
188
164
`,
···
192
168
if err != nil {
193
169
return nil, fmt.Errorf("failed to execute lang query: %w ", err)
194
170
}
171
+
defer rows.Close()
172
+
195
173
for rows.Next() {
196
174
var repoat, lang string
197
175
if err := rows.Scan(&repoat, &lang); err != nil {
···
208
186
209
187
starCountQuery := fmt.Sprintf(
210
188
`select
211
-
repo_at, count(1)
189
+
subject_at, count(1)
212
190
from stars
213
-
where repo_at in (%s)
214
-
group by repo_at`,
191
+
where subject_at in (%s)
192
+
group by subject_at`,
215
193
inClause,
216
194
)
217
195
rows, err = e.Query(starCountQuery, args...)
218
196
if err != nil {
219
197
return nil, fmt.Errorf("failed to execute star-count query: %w ", err)
220
198
}
199
+
defer rows.Close()
200
+
221
201
for rows.Next() {
222
202
var repoat string
223
203
var count int
···
247
227
if err != nil {
248
228
return nil, fmt.Errorf("failed to execute issue-count query: %w ", err)
249
229
}
230
+
defer rows.Close()
231
+
250
232
for rows.Next() {
251
233
var repoat string
252
234
var open, closed int
···
288
270
if err != nil {
289
271
return nil, fmt.Errorf("failed to execute pulls-count query: %w ", err)
290
272
}
273
+
defer rows.Close()
274
+
291
275
for rows.Next() {
292
276
var repoat string
293
277
var open, merged, closed, deleted int
···
322
306
}
323
307
324
308
// helper to get exactly one repo
325
-
func GetRepo(e Execer, filters ...filter) (*models.Repo, error) {
309
+
func GetRepo(e Execer, filters ...orm.Filter) (*models.Repo, error) {
326
310
repos, err := GetRepos(e, 0, filters...)
327
311
if err != nil {
328
312
return nil, err
···
339
323
return &repos[0], nil
340
324
}
341
325
342
-
func CountRepos(e Execer, filters ...filter) (int64, error) {
326
+
func CountRepos(e Execer, filters ...orm.Filter) (int64, error) {
343
327
var conditions []string
344
328
var args []any
345
329
for _, filter := range filters {
···
439
423
return nullableSource.String, nil
440
424
}
441
425
426
+
func GetRepoSourceRepo(e Execer, repoAt syntax.ATURI) (*models.Repo, error) {
427
+
source, err := GetRepoSource(e, repoAt)
428
+
if source == "" || errors.Is(err, sql.ErrNoRows) {
429
+
return nil, nil
430
+
}
431
+
if err != nil {
432
+
return nil, err
433
+
}
434
+
return GetRepoByAtUri(e, source)
435
+
}
436
+
442
437
func GetForksByDid(e Execer, did string) ([]models.Repo, error) {
443
438
var repos []models.Repo
444
439
···
559
554
return err
560
555
}
561
556
562
-
func UnsubscribeLabel(e Execer, filters ...filter) error {
557
+
func UnsubscribeLabel(e Execer, filters ...orm.Filter) error {
563
558
var conditions []string
564
559
var args []any
565
560
for _, filter := range filters {
···
577
572
return err
578
573
}
579
574
580
-
func GetRepoLabels(e Execer, filters ...filter) ([]models.RepoLabel, error) {
575
+
func GetRepoLabels(e Execer, filters ...orm.Filter) ([]models.RepoLabel, error) {
581
576
var conditions []string
582
577
var args []any
583
578
for _, filter := range filters {
+6
-5
appview/db/spindle.go
+6
-5
appview/db/spindle.go
···
7
7
"time"
8
8
9
9
"tangled.org/core/appview/models"
10
+
"tangled.org/core/orm"
10
11
)
11
12
12
-
func GetSpindles(e Execer, filters ...filter) ([]models.Spindle, error) {
13
+
func GetSpindles(e Execer, filters ...orm.Filter) ([]models.Spindle, error) {
13
14
var spindles []models.Spindle
14
15
15
16
var conditions []string
···
91
92
return err
92
93
}
93
94
94
-
func VerifySpindle(e Execer, filters ...filter) (int64, error) {
95
+
func VerifySpindle(e Execer, filters ...orm.Filter) (int64, error) {
95
96
var conditions []string
96
97
var args []any
97
98
for _, filter := range filters {
···
114
115
return res.RowsAffected()
115
116
}
116
117
117
-
func DeleteSpindle(e Execer, filters ...filter) error {
118
+
func DeleteSpindle(e Execer, filters ...orm.Filter) error {
118
119
var conditions []string
119
120
var args []any
120
121
for _, filter := range filters {
···
144
145
return err
145
146
}
146
147
147
-
func RemoveSpindleMember(e Execer, filters ...filter) error {
148
+
func RemoveSpindleMember(e Execer, filters ...orm.Filter) error {
148
149
var conditions []string
149
150
var args []any
150
151
for _, filter := range filters {
···
163
164
return err
164
165
}
165
166
166
-
func GetSpindleMembers(e Execer, filters ...filter) ([]models.SpindleMember, error) {
167
+
func GetSpindleMembers(e Execer, filters ...orm.Filter) ([]models.SpindleMember, error) {
167
168
var members []models.SpindleMember
168
169
169
170
var conditions []string
+44
-102
appview/db/star.go
+44
-102
appview/db/star.go
···
11
11
12
12
"github.com/bluesky-social/indigo/atproto/syntax"
13
13
"tangled.org/core/appview/models"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
17
func AddStar(e Execer, star *models.Star) error {
17
-
query := `insert or ignore into stars (starred_by_did, repo_at, rkey) values (?, ?, ?)`
18
+
query := `insert or ignore into stars (did, subject_at, rkey) values (?, ?, ?)`
18
19
_, err := e.Exec(
19
20
query,
20
-
star.StarredByDid,
21
+
star.Did,
21
22
star.RepoAt.String(),
22
23
star.Rkey,
23
24
)
···
25
26
}
26
27
27
28
// Get a star record
28
-
func GetStar(e Execer, starredByDid string, repoAt syntax.ATURI) (*models.Star, error) {
29
+
func GetStar(e Execer, did string, subjectAt syntax.ATURI) (*models.Star, error) {
29
30
query := `
30
-
select starred_by_did, repo_at, created, rkey
31
+
select did, subject_at, created, rkey
31
32
from stars
32
-
where starred_by_did = ? and repo_at = ?`
33
-
row := e.QueryRow(query, starredByDid, repoAt)
33
+
where did = ? and subject_at = ?`
34
+
row := e.QueryRow(query, did, subjectAt)
34
35
35
36
var star models.Star
36
37
var created string
37
-
err := row.Scan(&star.StarredByDid, &star.RepoAt, &created, &star.Rkey)
38
+
err := row.Scan(&star.Did, &star.RepoAt, &created, &star.Rkey)
38
39
if err != nil {
39
40
return nil, err
40
41
}
···
51
52
}
52
53
53
54
// Remove a star
54
-
func DeleteStar(e Execer, starredByDid string, repoAt syntax.ATURI) error {
55
-
_, err := e.Exec(`delete from stars where starred_by_did = ? and repo_at = ?`, starredByDid, repoAt)
55
+
func DeleteStar(e Execer, did string, subjectAt syntax.ATURI) error {
56
+
_, err := e.Exec(`delete from stars where did = ? and subject_at = ?`, did, subjectAt)
56
57
return err
57
58
}
58
59
59
60
// Remove a star
60
-
func DeleteStarByRkey(e Execer, starredByDid string, rkey string) error {
61
-
_, err := e.Exec(`delete from stars where starred_by_did = ? and rkey = ?`, starredByDid, rkey)
61
+
func DeleteStarByRkey(e Execer, did string, rkey string) error {
62
+
_, err := e.Exec(`delete from stars where did = ? and rkey = ?`, did, rkey)
62
63
return err
63
64
}
64
65
65
-
func GetStarCount(e Execer, repoAt syntax.ATURI) (int, error) {
66
+
func GetStarCount(e Execer, subjectAt syntax.ATURI) (int, error) {
66
67
stars := 0
67
68
err := e.QueryRow(
68
-
`select count(starred_by_did) from stars where repo_at = ?`, repoAt).Scan(&stars)
69
+
`select count(did) from stars where subject_at = ?`, subjectAt).Scan(&stars)
69
70
if err != nil {
70
71
return 0, err
71
72
}
···
89
90
}
90
91
91
92
query := fmt.Sprintf(`
92
-
SELECT repo_at
93
+
SELECT subject_at
93
94
FROM stars
94
-
WHERE starred_by_did = ? AND repo_at IN (%s)
95
+
WHERE did = ? AND subject_at IN (%s)
95
96
`, strings.Join(placeholders, ","))
96
97
97
98
rows, err := e.Query(query, args...)
···
118
119
return result, nil
119
120
}
120
121
121
-
func GetStarStatus(e Execer, userDid string, repoAt syntax.ATURI) bool {
122
-
statuses, err := getStarStatuses(e, userDid, []syntax.ATURI{repoAt})
122
+
func GetStarStatus(e Execer, userDid string, subjectAt syntax.ATURI) bool {
123
+
statuses, err := getStarStatuses(e, userDid, []syntax.ATURI{subjectAt})
123
124
if err != nil {
124
125
return false
125
126
}
126
-
return statuses[repoAt.String()]
127
+
return statuses[subjectAt.String()]
127
128
}
128
129
129
130
// GetStarStatuses returns a map of repo URIs to star status for a given user
130
-
func GetStarStatuses(e Execer, userDid string, repoAts []syntax.ATURI) (map[string]bool, error) {
131
-
return getStarStatuses(e, userDid, repoAts)
131
+
func GetStarStatuses(e Execer, userDid string, subjectAts []syntax.ATURI) (map[string]bool, error) {
132
+
return getStarStatuses(e, userDid, subjectAts)
132
133
}
133
-
func GetStars(e Execer, limit int, filters ...filter) ([]models.Star, error) {
134
+
135
+
// GetRepoStars return a list of stars each holding target repository.
136
+
// If there isn't known repo with starred at-uri, those stars will be ignored.
137
+
func GetRepoStars(e Execer, limit int, filters ...orm.Filter) ([]models.RepoStar, error) {
134
138
var conditions []string
135
139
var args []any
136
140
for _, filter := range filters {
···
149
153
}
150
154
151
155
repoQuery := fmt.Sprintf(
152
-
`select starred_by_did, repo_at, created, rkey
156
+
`select did, subject_at, created, rkey
153
157
from stars
154
158
%s
155
159
order by created desc
···
161
165
if err != nil {
162
166
return nil, err
163
167
}
168
+
defer rows.Close()
164
169
165
170
starMap := make(map[string][]models.Star)
166
171
for rows.Next() {
167
172
var star models.Star
168
173
var created string
169
-
err := rows.Scan(&star.StarredByDid, &star.RepoAt, &created, &star.Rkey)
174
+
err := rows.Scan(&star.Did, &star.RepoAt, &created, &star.Rkey)
170
175
if err != nil {
171
176
return nil, err
172
177
}
···
192
197
return nil, nil
193
198
}
194
199
195
-
repos, err := GetRepos(e, 0, FilterIn("at_uri", args))
200
+
repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", args))
196
201
if err != nil {
197
202
return nil, err
198
203
}
199
204
205
+
var repoStars []models.RepoStar
200
206
for _, r := range repos {
201
207
if stars, ok := starMap[string(r.RepoAt())]; ok {
202
-
for i := range stars {
203
-
stars[i].Repo = &r
208
+
for _, star := range stars {
209
+
repoStars = append(repoStars, models.RepoStar{
210
+
Star: star,
211
+
Repo: &r,
212
+
})
204
213
}
205
214
}
206
215
}
207
216
208
-
var stars []models.Star
209
-
for _, s := range starMap {
210
-
stars = append(stars, s...)
211
-
}
212
-
213
-
slices.SortFunc(stars, func(a, b models.Star) int {
217
+
slices.SortFunc(repoStars, func(a, b models.RepoStar) int {
214
218
if a.Created.After(b.Created) {
215
219
return -1
216
220
}
···
220
224
return 0
221
225
})
222
226
223
-
return stars, nil
227
+
return repoStars, nil
224
228
}
225
229
226
-
func CountStars(e Execer, filters ...filter) (int64, error) {
230
+
func CountStars(e Execer, filters ...orm.Filter) (int64, error) {
227
231
var conditions []string
228
232
var args []any
229
233
for _, filter := range filters {
···
247
251
return count, nil
248
252
}
249
253
250
-
func GetAllStars(e Execer, limit int) ([]models.Star, error) {
251
-
var stars []models.Star
252
-
253
-
rows, err := e.Query(`
254
-
select
255
-
s.starred_by_did,
256
-
s.repo_at,
257
-
s.rkey,
258
-
s.created,
259
-
r.did,
260
-
r.name,
261
-
r.knot,
262
-
r.rkey,
263
-
r.created
264
-
from stars s
265
-
join repos r on s.repo_at = r.at_uri
266
-
`)
267
-
268
-
if err != nil {
269
-
return nil, err
270
-
}
271
-
defer rows.Close()
272
-
273
-
for rows.Next() {
274
-
var star models.Star
275
-
var repo models.Repo
276
-
var starCreatedAt, repoCreatedAt string
277
-
278
-
if err := rows.Scan(
279
-
&star.StarredByDid,
280
-
&star.RepoAt,
281
-
&star.Rkey,
282
-
&starCreatedAt,
283
-
&repo.Did,
284
-
&repo.Name,
285
-
&repo.Knot,
286
-
&repo.Rkey,
287
-
&repoCreatedAt,
288
-
); err != nil {
289
-
return nil, err
290
-
}
291
-
292
-
star.Created, err = time.Parse(time.RFC3339, starCreatedAt)
293
-
if err != nil {
294
-
star.Created = time.Now()
295
-
}
296
-
repo.Created, err = time.Parse(time.RFC3339, repoCreatedAt)
297
-
if err != nil {
298
-
repo.Created = time.Now()
299
-
}
300
-
star.Repo = &repo
301
-
302
-
stars = append(stars, star)
303
-
}
304
-
305
-
if err := rows.Err(); err != nil {
306
-
return nil, err
307
-
}
308
-
309
-
return stars, nil
310
-
}
311
-
312
254
// GetTopStarredReposLastWeek returns the top 8 most starred repositories from the last week
313
255
func GetTopStarredReposLastWeek(e Execer) ([]models.Repo, error) {
314
256
// first, get the top repo URIs by star count from the last week
315
257
query := `
316
258
with recent_starred_repos as (
317
-
select distinct repo_at
259
+
select distinct subject_at
318
260
from stars
319
261
where created >= datetime('now', '-7 days')
320
262
),
321
263
repo_star_counts as (
322
264
select
323
-
s.repo_at,
265
+
s.subject_at,
324
266
count(*) as stars_gained_last_week
325
267
from stars s
326
-
join recent_starred_repos rsr on s.repo_at = rsr.repo_at
268
+
join recent_starred_repos rsr on s.subject_at = rsr.subject_at
327
269
where s.created >= datetime('now', '-7 days')
328
-
group by s.repo_at
270
+
group by s.subject_at
329
271
)
330
-
select rsc.repo_at
272
+
select rsc.subject_at
331
273
from repo_star_counts rsc
332
274
order by rsc.stars_gained_last_week desc
333
275
limit 8
···
358
300
}
359
301
360
302
// get full repo data
361
-
repos, err := GetRepos(e, 0, FilterIn("at_uri", repoUris))
303
+
repos, err := GetRepos(e, 0, orm.FilterIn("at_uri", repoUris))
362
304
if err != nil {
363
305
return nil, err
364
306
}
+4
-3
appview/db/strings.go
+4
-3
appview/db/strings.go
···
8
8
"time"
9
9
10
10
"tangled.org/core/appview/models"
11
+
"tangled.org/core/orm"
11
12
)
12
13
13
14
func AddString(e Execer, s models.String) error {
···
44
45
return err
45
46
}
46
47
47
-
func GetStrings(e Execer, limit int, filters ...filter) ([]models.String, error) {
48
+
func GetStrings(e Execer, limit int, filters ...orm.Filter) ([]models.String, error) {
48
49
var all []models.String
49
50
50
51
var conditions []string
···
127
128
return all, nil
128
129
}
129
130
130
-
func CountStrings(e Execer, filters ...filter) (int64, error) {
131
+
func CountStrings(e Execer, filters ...orm.Filter) (int64, error) {
131
132
var conditions []string
132
133
var args []any
133
134
for _, filter := range filters {
···
151
152
return count, nil
152
153
}
153
154
154
-
func DeleteString(e Execer, filters ...filter) error {
155
+
func DeleteString(e Execer, filters ...orm.Filter) error {
155
156
var conditions []string
156
157
var args []any
157
158
for _, filter := range filters {
+11
-20
appview/db/timeline.go
+11
-20
appview/db/timeline.go
···
5
5
6
6
"github.com/bluesky-social/indigo/atproto/syntax"
7
7
"tangled.org/core/appview/models"
8
+
"tangled.org/core/orm"
8
9
)
9
10
10
11
// TODO: this gathers heterogenous events from different sources and aggregates
···
84
85
}
85
86
86
87
func getTimelineRepos(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) {
87
-
filters := make([]filter, 0)
88
+
filters := make([]orm.Filter, 0)
88
89
if userIsFollowing != nil {
89
-
filters = append(filters, FilterIn("did", userIsFollowing))
90
+
filters = append(filters, orm.FilterIn("did", userIsFollowing))
90
91
}
91
92
92
93
repos, err := GetRepos(e, limit, filters...)
···
104
105
105
106
var origRepos []models.Repo
106
107
if args != nil {
107
-
origRepos, err = GetRepos(e, 0, FilterIn("at_uri", args))
108
+
origRepos, err = GetRepos(e, 0, orm.FilterIn("at_uri", args))
108
109
}
109
110
if err != nil {
110
111
return nil, err
···
144
145
}
145
146
146
147
func getTimelineStars(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) {
147
-
filters := make([]filter, 0)
148
+
filters := make([]orm.Filter, 0)
148
149
if userIsFollowing != nil {
149
-
filters = append(filters, FilterIn("starred_by_did", userIsFollowing))
150
+
filters = append(filters, orm.FilterIn("did", userIsFollowing))
150
151
}
151
152
152
-
stars, err := GetStars(e, limit, filters...)
153
+
stars, err := GetRepoStars(e, limit, filters...)
153
154
if err != nil {
154
155
return nil, err
155
156
}
156
157
157
-
// filter star records without a repo
158
-
n := 0
159
-
for _, s := range stars {
160
-
if s.Repo != nil {
161
-
stars[n] = s
162
-
n++
163
-
}
164
-
}
165
-
stars = stars[:n]
166
-
167
158
var repos []models.Repo
168
159
for _, s := range stars {
169
160
repos = append(repos, *s.Repo)
···
179
170
isStarred, starCount := getRepoStarInfo(s.Repo, starStatuses)
180
171
181
172
events = append(events, models.TimelineEvent{
182
-
Star: &s,
173
+
RepoStar: &s,
183
174
EventAt: s.Created,
184
175
IsStarred: isStarred,
185
176
StarCount: starCount,
···
190
181
}
191
182
192
183
func getTimelineFollows(e Execer, limit int, loggedInUserDid string, userIsFollowing []string) ([]models.TimelineEvent, error) {
193
-
filters := make([]filter, 0)
184
+
filters := make([]orm.Filter, 0)
194
185
if userIsFollowing != nil {
195
-
filters = append(filters, FilterIn("user_did", userIsFollowing))
186
+
filters = append(filters, orm.FilterIn("user_did", userIsFollowing))
196
187
}
197
188
198
189
follows, err := GetFollows(e, limit, filters...)
···
209
200
return nil, nil
210
201
}
211
202
212
-
profiles, err := GetProfiles(e, FilterIn("did", subjects))
203
+
profiles, err := GetProfiles(e, orm.FilterIn("did", subjects))
213
204
if err != nil {
214
205
return nil, err
215
206
}
+7
-12
appview/email/email.go
+7
-12
appview/email/email.go
···
3
3
import (
4
4
"fmt"
5
5
"net"
6
-
"regexp"
6
+
"net/mail"
7
7
"strings"
8
8
9
9
"github.com/resend/resend-go/v2"
···
34
34
}
35
35
36
36
func IsValidEmail(email string) bool {
37
-
// Basic length check
38
-
if len(email) < 3 || len(email) > 254 {
37
+
// Reject whitespace (ParseAddress normalizes it away)
38
+
if strings.ContainsAny(email, " \t\n\r") {
39
39
return false
40
40
}
41
41
42
-
// Regular expression for email validation (RFC 5322 compliant)
43
-
pattern := `^[a-zA-Z0-9.!#$%&'*+/=?^_\x60{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$`
44
-
45
-
// Compile regex
46
-
regex := regexp.MustCompile(pattern)
47
-
48
-
// Check if email matches regex pattern
49
-
if !regex.MatchString(email) {
42
+
// Use stdlib RFC 5322 parser
43
+
addr, err := mail.ParseAddress(email)
44
+
if err != nil {
50
45
return false
51
46
}
52
47
53
48
// Split email into local and domain parts
54
-
parts := strings.Split(email, "@")
49
+
parts := strings.Split(addr.Address, "@")
55
50
domain := parts[1]
56
51
57
52
mx, err := net.LookupMX(domain)
+53
appview/email/email_test.go
+53
appview/email/email_test.go
···
1
+
package email
2
+
3
+
import (
4
+
"testing"
5
+
)
6
+
7
+
func TestIsValidEmail(t *testing.T) {
8
+
tests := []struct {
9
+
name string
10
+
email string
11
+
want bool
12
+
}{
13
+
// Valid emails using RFC 2606 reserved domains
14
+
{"standard email", "user@example.com", true},
15
+
{"single char local", "a@example.com", true},
16
+
{"dot in middle", "first.last@example.com", true},
17
+
{"multiple dots", "a.b.c@example.com", true},
18
+
{"plus tag", "user+tag@example.com", true},
19
+
{"numbers", "user123@example.com", true},
20
+
{"example.org", "user@example.org", true},
21
+
{"example.net", "user@example.net", true},
22
+
23
+
// Invalid format - rejected by mail.ParseAddress
24
+
{"empty string", "", false},
25
+
{"no at sign", "userexample.com", false},
26
+
{"no domain", "user@", false},
27
+
{"no local part", "@example.com", false},
28
+
{"double at", "user@@example.com", false},
29
+
{"just at sign", "@", false},
30
+
{"leading dot", ".user@example.com", false},
31
+
{"trailing dot", "user.@example.com", false},
32
+
{"consecutive dots", "user..name@example.com", false},
33
+
34
+
// Whitespace - rejected before parsing
35
+
{"space in local", "user @example.com", false},
36
+
{"space in domain", "user@ example.com", false},
37
+
{"tab", "user\t@example.com", false},
38
+
{"newline", "user\n@example.com", false},
39
+
40
+
// MX lookup - using RFC 2606 reserved TLDs (guaranteed no MX)
41
+
{"invalid TLD", "user@example.invalid", false},
42
+
{"test TLD", "user@mail.test", false},
43
+
}
44
+
45
+
for _, tt := range tests {
46
+
t.Run(tt.name, func(t *testing.T) {
47
+
got := IsValidEmail(tt.email)
48
+
if got != tt.want {
49
+
t.Errorf("IsValidEmail(%q) = %v, want %v", tt.email, got, tt.want)
50
+
}
51
+
})
52
+
}
53
+
}
+50
-32
appview/ingester.go
+50
-32
appview/ingester.go
···
21
21
"tangled.org/core/appview/serververify"
22
22
"tangled.org/core/appview/validator"
23
23
"tangled.org/core/idresolver"
24
+
"tangled.org/core/orm"
24
25
"tangled.org/core/rbac"
25
26
)
26
27
···
121
122
return err
122
123
}
123
124
err = db.AddStar(i.Db, &models.Star{
124
-
StarredByDid: did,
125
-
RepoAt: subjectUri,
126
-
Rkey: e.Commit.RKey,
125
+
Did: did,
126
+
RepoAt: subjectUri,
127
+
Rkey: e.Commit.RKey,
127
128
})
128
129
case jmodels.CommitOperationDelete:
129
130
err = db.DeleteStarByRkey(i.Db, did, e.Commit.RKey)
···
253
254
254
255
err = db.AddArtifact(i.Db, artifact)
255
256
case jmodels.CommitOperationDelete:
256
-
err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey))
257
+
err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey))
257
258
}
258
259
259
260
if err != nil {
···
350
351
351
352
err = db.UpsertProfile(tx, &profile)
352
353
case jmodels.CommitOperationDelete:
353
-
err = db.DeleteArtifact(i.Db, db.FilterEq("did", did), db.FilterEq("rkey", e.Commit.RKey))
354
+
err = db.DeleteArtifact(i.Db, orm.FilterEq("did", did), orm.FilterEq("rkey", e.Commit.RKey))
354
355
}
355
356
356
357
if err != nil {
···
424
425
// get record from db first
425
426
members, err := db.GetSpindleMembers(
426
427
ddb,
427
-
db.FilterEq("did", did),
428
-
db.FilterEq("rkey", rkey),
428
+
orm.FilterEq("did", did),
429
+
orm.FilterEq("rkey", rkey),
429
430
)
430
431
if err != nil || len(members) != 1 {
431
432
return fmt.Errorf("failed to get member: %w, len(members) = %d", err, len(members))
···
440
441
// remove record by rkey && update enforcer
441
442
if err = db.RemoveSpindleMember(
442
443
tx,
443
-
db.FilterEq("did", did),
444
-
db.FilterEq("rkey", rkey),
444
+
orm.FilterEq("did", did),
445
+
orm.FilterEq("rkey", rkey),
445
446
); err != nil {
446
447
return fmt.Errorf("failed to remove from db: %w", err)
447
448
}
···
523
524
// get record from db first
524
525
spindles, err := db.GetSpindles(
525
526
ddb,
526
-
db.FilterEq("owner", did),
527
-
db.FilterEq("instance", instance),
527
+
orm.FilterEq("owner", did),
528
+
orm.FilterEq("instance", instance),
528
529
)
529
530
if err != nil || len(spindles) != 1 {
530
531
return fmt.Errorf("failed to get spindles: %w, len(spindles) = %d", err, len(spindles))
···
543
544
// remove spindle members first
544
545
err = db.RemoveSpindleMember(
545
546
tx,
546
-
db.FilterEq("owner", did),
547
-
db.FilterEq("instance", instance),
547
+
orm.FilterEq("owner", did),
548
+
orm.FilterEq("instance", instance),
548
549
)
549
550
if err != nil {
550
551
return err
···
552
553
553
554
err = db.DeleteSpindle(
554
555
tx,
555
-
db.FilterEq("owner", did),
556
-
db.FilterEq("instance", instance),
556
+
orm.FilterEq("owner", did),
557
+
orm.FilterEq("instance", instance),
557
558
)
558
559
if err != nil {
559
560
return err
···
621
622
case jmodels.CommitOperationDelete:
622
623
if err := db.DeleteString(
623
624
ddb,
624
-
db.FilterEq("did", did),
625
-
db.FilterEq("rkey", rkey),
625
+
orm.FilterEq("did", did),
626
+
orm.FilterEq("rkey", rkey),
626
627
); err != nil {
627
628
l.Error("failed to delete", "err", err)
628
629
return fmt.Errorf("failed to delete string record: %w", err)
···
740
741
// get record from db first
741
742
registrations, err := db.GetRegistrations(
742
743
ddb,
743
-
db.FilterEq("domain", domain),
744
-
db.FilterEq("did", did),
744
+
orm.FilterEq("domain", domain),
745
+
orm.FilterEq("did", did),
745
746
)
746
747
if err != nil {
747
748
return fmt.Errorf("failed to get registration: %w", err)
···
762
763
763
764
err = db.DeleteKnot(
764
765
tx,
765
-
db.FilterEq("did", did),
766
-
db.FilterEq("domain", domain),
766
+
orm.FilterEq("did", did),
767
+
orm.FilterEq("domain", domain),
767
768
)
768
769
if err != nil {
769
770
return err
···
841
842
return nil
842
843
843
844
case jmodels.CommitOperationDelete:
845
+
tx, err := ddb.BeginTx(ctx, nil)
846
+
if err != nil {
847
+
l.Error("failed to begin transaction", "err", err)
848
+
return err
849
+
}
850
+
defer tx.Rollback()
851
+
844
852
if err := db.DeleteIssues(
845
-
ddb,
846
-
db.FilterEq("did", did),
847
-
db.FilterEq("rkey", rkey),
853
+
tx,
854
+
did,
855
+
rkey,
848
856
); err != nil {
849
857
l.Error("failed to delete", "err", err)
850
858
return fmt.Errorf("failed to delete issue record: %w", err)
859
+
}
860
+
if err := tx.Commit(); err != nil {
861
+
l.Error("failed to commit txn", "err", err)
862
+
return err
851
863
}
852
864
853
865
return nil
···
888
900
return fmt.Errorf("failed to validate comment: %w", err)
889
901
}
890
902
891
-
_, err = db.AddIssueComment(ddb, *comment)
903
+
tx, err := ddb.Begin()
904
+
if err != nil {
905
+
return fmt.Errorf("failed to start transaction: %w", err)
906
+
}
907
+
defer tx.Rollback()
908
+
909
+
_, err = db.AddIssueComment(tx, *comment)
892
910
if err != nil {
893
911
return fmt.Errorf("failed to create issue comment: %w", err)
894
912
}
895
913
896
-
return nil
914
+
return tx.Commit()
897
915
898
916
case jmodels.CommitOperationDelete:
899
917
if err := db.DeleteIssueComments(
900
918
ddb,
901
-
db.FilterEq("did", did),
902
-
db.FilterEq("rkey", rkey),
919
+
orm.FilterEq("did", did),
920
+
orm.FilterEq("rkey", rkey),
903
921
); err != nil {
904
922
return fmt.Errorf("failed to delete issue comment record: %w", err)
905
923
}
···
952
970
case jmodels.CommitOperationDelete:
953
971
if err := db.DeleteLabelDefinition(
954
972
ddb,
955
-
db.FilterEq("did", did),
956
-
db.FilterEq("rkey", rkey),
973
+
orm.FilterEq("did", did),
974
+
orm.FilterEq("rkey", rkey),
957
975
); err != nil {
958
976
return fmt.Errorf("failed to delete labeldef record: %w", err)
959
977
}
···
993
1011
var repo *models.Repo
994
1012
switch collection {
995
1013
case tangled.RepoIssueNSID:
996
-
i, err := db.GetIssues(ddb, db.FilterEq("at_uri", subject))
1014
+
i, err := db.GetIssues(ddb, orm.FilterEq("at_uri", subject))
997
1015
if err != nil || len(i) != 1 {
998
1016
return fmt.Errorf("failed to find subject: %w || subject count %d", err, len(i))
999
1017
}
···
1002
1020
return fmt.Errorf("unsupport label subject: %s", collection)
1003
1021
}
1004
1022
1005
-
actx, err := db.NewLabelApplicationCtx(ddb, db.FilterIn("at_uri", repo.Labels))
1023
+
actx, err := db.NewLabelApplicationCtx(ddb, orm.FilterIn("at_uri", repo.Labels))
1006
1024
if err != nil {
1007
1025
return fmt.Errorf("failed to build label application ctx: %w", err)
1008
1026
}
+152
-135
appview/issues/issues.go
+152
-135
appview/issues/issues.go
···
7
7
"fmt"
8
8
"log/slog"
9
9
"net/http"
10
-
"slices"
11
10
"time"
12
11
13
12
comatproto "github.com/bluesky-social/indigo/api/atproto"
···
20
19
"tangled.org/core/appview/config"
21
20
"tangled.org/core/appview/db"
22
21
issues_indexer "tangled.org/core/appview/indexer/issues"
22
+
"tangled.org/core/appview/mentions"
23
23
"tangled.org/core/appview/models"
24
24
"tangled.org/core/appview/notify"
25
25
"tangled.org/core/appview/oauth"
26
26
"tangled.org/core/appview/pages"
27
-
"tangled.org/core/appview/pages/markup"
27
+
"tangled.org/core/appview/pages/repoinfo"
28
28
"tangled.org/core/appview/pagination"
29
29
"tangled.org/core/appview/reporesolver"
30
30
"tangled.org/core/appview/validator"
31
31
"tangled.org/core/idresolver"
32
+
"tangled.org/core/orm"
33
+
"tangled.org/core/rbac"
32
34
"tangled.org/core/tid"
33
35
)
34
36
35
37
type Issues struct {
36
-
oauth *oauth.OAuth
37
-
repoResolver *reporesolver.RepoResolver
38
-
pages *pages.Pages
39
-
idResolver *idresolver.Resolver
40
-
db *db.DB
41
-
config *config.Config
42
-
notifier notify.Notifier
43
-
logger *slog.Logger
44
-
validator *validator.Validator
45
-
indexer *issues_indexer.Indexer
38
+
oauth *oauth.OAuth
39
+
repoResolver *reporesolver.RepoResolver
40
+
enforcer *rbac.Enforcer
41
+
pages *pages.Pages
42
+
idResolver *idresolver.Resolver
43
+
mentionsResolver *mentions.Resolver
44
+
db *db.DB
45
+
config *config.Config
46
+
notifier notify.Notifier
47
+
logger *slog.Logger
48
+
validator *validator.Validator
49
+
indexer *issues_indexer.Indexer
46
50
}
47
51
48
52
func New(
49
53
oauth *oauth.OAuth,
50
54
repoResolver *reporesolver.RepoResolver,
55
+
enforcer *rbac.Enforcer,
51
56
pages *pages.Pages,
52
57
idResolver *idresolver.Resolver,
58
+
mentionsResolver *mentions.Resolver,
53
59
db *db.DB,
54
60
config *config.Config,
55
61
notifier notify.Notifier,
···
58
64
logger *slog.Logger,
59
65
) *Issues {
60
66
return &Issues{
61
-
oauth: oauth,
62
-
repoResolver: repoResolver,
63
-
pages: pages,
64
-
idResolver: idResolver,
65
-
db: db,
66
-
config: config,
67
-
notifier: notifier,
68
-
logger: logger,
69
-
validator: validator,
70
-
indexer: indexer,
67
+
oauth: oauth,
68
+
repoResolver: repoResolver,
69
+
enforcer: enforcer,
70
+
pages: pages,
71
+
idResolver: idResolver,
72
+
mentionsResolver: mentionsResolver,
73
+
db: db,
74
+
config: config,
75
+
notifier: notifier,
76
+
logger: logger,
77
+
validator: validator,
78
+
indexer: indexer,
71
79
}
72
80
}
73
81
···
97
105
userReactions = db.GetReactionStatusMap(rp.db, user.Did, issue.AtUri())
98
106
}
99
107
108
+
backlinks, err := db.GetBacklinks(rp.db, issue.AtUri())
109
+
if err != nil {
110
+
l.Error("failed to fetch backlinks", "err", err)
111
+
rp.pages.Error503(w)
112
+
return
113
+
}
114
+
100
115
labelDefs, err := db.GetLabelDefinitions(
101
116
rp.db,
102
-
db.FilterIn("at_uri", f.Repo.Labels),
103
-
db.FilterContains("scope", tangled.RepoIssueNSID),
117
+
orm.FilterIn("at_uri", f.Labels),
118
+
orm.FilterContains("scope", tangled.RepoIssueNSID),
104
119
)
105
120
if err != nil {
106
121
l.Error("failed to fetch labels", "err", err)
···
115
130
116
131
rp.pages.RepoSingleIssue(w, pages.RepoSingleIssueParams{
117
132
LoggedInUser: user,
118
-
RepoInfo: f.RepoInfo(user),
133
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
119
134
Issue: issue,
120
135
CommentList: issue.CommentList(),
136
+
Backlinks: backlinks,
121
137
OrderedReactionKinds: models.OrderedReactionKinds,
122
138
Reactions: reactionMap,
123
139
UserReacted: userReactions,
···
128
144
func (rp *Issues) EditIssue(w http.ResponseWriter, r *http.Request) {
129
145
l := rp.logger.With("handler", "EditIssue")
130
146
user := rp.oauth.GetUser(r)
131
-
f, err := rp.repoResolver.Resolve(r)
132
-
if err != nil {
133
-
l.Error("failed to get repo and knot", "err", err)
134
-
return
135
-
}
136
147
137
148
issue, ok := r.Context().Value("issue").(*models.Issue)
138
149
if !ok {
···
145
156
case http.MethodGet:
146
157
rp.pages.EditIssueFragment(w, pages.EditIssueParams{
147
158
LoggedInUser: user,
148
-
RepoInfo: f.RepoInfo(user),
159
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
149
160
Issue: issue,
150
161
})
151
162
case http.MethodPost:
···
153
164
newIssue := issue
154
165
newIssue.Title = r.FormValue("title")
155
166
newIssue.Body = r.FormValue("body")
167
+
newIssue.Mentions, newIssue.References = rp.mentionsResolver.Resolve(r.Context(), newIssue.Body)
156
168
157
169
if err := rp.validator.ValidateIssue(newIssue); err != nil {
158
170
l.Error("validation error", "err", err)
···
222
234
l := rp.logger.With("handler", "DeleteIssue")
223
235
noticeId := "issue-actions-error"
224
236
225
-
user := rp.oauth.GetUser(r)
226
-
227
237
f, err := rp.repoResolver.Resolve(r)
228
238
if err != nil {
229
239
l.Error("failed to get repo and knot", "err", err)
···
238
248
}
239
249
l = l.With("did", issue.Did, "rkey", issue.Rkey)
240
250
251
+
tx, err := rp.db.Begin()
252
+
if err != nil {
253
+
l.Error("failed to start transaction", "err", err)
254
+
rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.")
255
+
return
256
+
}
257
+
defer tx.Rollback()
258
+
241
259
// delete from PDS
242
260
client, err := rp.oauth.AuthorizedClient(r)
243
261
if err != nil {
···
258
276
}
259
277
260
278
// delete from db
261
-
if err := db.DeleteIssues(rp.db, db.FilterEq("id", issue.Id)); err != nil {
279
+
if err := db.DeleteIssues(tx, issue.Did, issue.Rkey); err != nil {
262
280
l.Error("failed to delete issue", "err", err)
263
281
rp.pages.Notice(w, noticeId, "Failed to delete issue.")
264
282
return
265
283
}
284
+
tx.Commit()
266
285
267
286
rp.notifier.DeleteIssue(r.Context(), issue)
268
287
269
288
// return to all issues page
270
-
rp.pages.HxRedirect(w, "/"+f.RepoInfo(user).FullName()+"/issues")
289
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
290
+
rp.pages.HxRedirect(w, "/"+ownerSlashRepo+"/issues")
271
291
}
272
292
273
293
func (rp *Issues) CloseIssue(w http.ResponseWriter, r *http.Request) {
···
286
306
return
287
307
}
288
308
289
-
collaborators, err := f.Collaborators(r.Context())
290
-
if err != nil {
291
-
l.Error("failed to fetch repo collaborators", "err", err)
292
-
}
293
-
isCollaborator := slices.ContainsFunc(collaborators, func(collab pages.Collaborator) bool {
294
-
return user.Did == collab.Did
295
-
})
309
+
roles := repoinfo.RolesInRepo{Roles: rp.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
310
+
isRepoOwner := roles.IsOwner()
311
+
isCollaborator := roles.IsCollaborator()
296
312
isIssueOwner := user.Did == issue.Did
297
313
298
314
// TODO: make this more granular
299
-
if isIssueOwner || isCollaborator {
315
+
if isIssueOwner || isRepoOwner || isCollaborator {
300
316
err = db.CloseIssues(
301
317
rp.db,
302
-
db.FilterEq("id", issue.Id),
318
+
orm.FilterEq("id", issue.Id),
303
319
)
304
320
if err != nil {
305
321
l.Error("failed to close issue", "err", err)
···
312
328
// notify about the issue closure
313
329
rp.notifier.NewIssueState(r.Context(), syntax.DID(user.Did), issue)
314
330
315
-
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId))
331
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
332
+
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId))
316
333
return
317
334
} else {
318
335
l.Error("user is not permitted to close issue")
···
337
354
return
338
355
}
339
356
340
-
collaborators, err := f.Collaborators(r.Context())
341
-
if err != nil {
342
-
l.Error("failed to fetch repo collaborators", "err", err)
343
-
}
344
-
isCollaborator := slices.ContainsFunc(collaborators, func(collab pages.Collaborator) bool {
345
-
return user.Did == collab.Did
346
-
})
357
+
roles := repoinfo.RolesInRepo{Roles: rp.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
358
+
isRepoOwner := roles.IsOwner()
359
+
isCollaborator := roles.IsCollaborator()
347
360
isIssueOwner := user.Did == issue.Did
348
361
349
-
if isCollaborator || isIssueOwner {
362
+
if isCollaborator || isRepoOwner || isIssueOwner {
350
363
err := db.ReopenIssues(
351
364
rp.db,
352
-
db.FilterEq("id", issue.Id),
365
+
orm.FilterEq("id", issue.Id),
353
366
)
354
367
if err != nil {
355
368
l.Error("failed to reopen issue", "err", err)
···
362
375
// notify about the issue reopen
363
376
rp.notifier.NewIssueState(r.Context(), syntax.DID(user.Did), issue)
364
377
365
-
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId))
378
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
379
+
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId))
366
380
return
367
381
} else {
368
382
l.Error("user is not the owner of the repo")
···
398
412
if replyToUri != "" {
399
413
replyTo = &replyToUri
400
414
}
415
+
416
+
mentions, references := rp.mentionsResolver.Resolve(r.Context(), body)
401
417
402
418
comment := models.IssueComment{
403
-
Did: user.Did,
404
-
Rkey: tid.TID(),
405
-
IssueAt: issue.AtUri().String(),
406
-
ReplyTo: replyTo,
407
-
Body: body,
408
-
Created: time.Now(),
419
+
Did: user.Did,
420
+
Rkey: tid.TID(),
421
+
IssueAt: issue.AtUri().String(),
422
+
ReplyTo: replyTo,
423
+
Body: body,
424
+
Created: time.Now(),
425
+
Mentions: mentions,
426
+
References: references,
409
427
}
410
428
if err = rp.validator.ValidateIssueComment(&comment); err != nil {
411
429
l.Error("failed to validate comment", "err", err)
···
442
460
}
443
461
}()
444
462
445
-
commentId, err := db.AddIssueComment(rp.db, comment)
463
+
tx, err := rp.db.Begin()
464
+
if err != nil {
465
+
l.Error("failed to start transaction", "err", err)
466
+
rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.")
467
+
return
468
+
}
469
+
defer tx.Rollback()
470
+
471
+
commentId, err := db.AddIssueComment(tx, comment)
446
472
if err != nil {
447
473
l.Error("failed to create comment", "err", err)
448
474
rp.pages.Notice(w, "issue-comment", "Failed to create comment.")
449
475
return
450
476
}
477
+
err = tx.Commit()
478
+
if err != nil {
479
+
l.Error("failed to commit transaction", "err", err)
480
+
rp.pages.Notice(w, "issue-comment", "Failed to create comment, try again later.")
481
+
return
482
+
}
451
483
452
484
// reset atUri to make rollback a no-op
453
485
atUri = ""
···
455
487
// notify about the new comment
456
488
comment.Id = commentId
457
489
458
-
rawMentions := markup.FindUserMentions(comment.Body)
459
-
idents := rp.idResolver.ResolveIdents(r.Context(), rawMentions)
460
-
l.Debug("parsed mentions", "raw", rawMentions, "idents", idents)
461
-
var mentions []syntax.DID
462
-
for _, ident := range idents {
463
-
if ident != nil && !ident.Handle.IsInvalidHandle() {
464
-
mentions = append(mentions, ident.DID)
465
-
}
466
-
}
467
490
rp.notifier.NewIssueComment(r.Context(), &comment, mentions)
468
491
469
-
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d#comment-%d", f.OwnerSlashRepo(), issue.IssueId, commentId))
492
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
493
+
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d#comment-%d", ownerSlashRepo, issue.IssueId, commentId))
470
494
}
471
495
472
496
func (rp *Issues) IssueComment(w http.ResponseWriter, r *http.Request) {
473
497
l := rp.logger.With("handler", "IssueComment")
474
498
user := rp.oauth.GetUser(r)
475
-
f, err := rp.repoResolver.Resolve(r)
476
-
if err != nil {
477
-
l.Error("failed to get repo and knot", "err", err)
478
-
return
479
-
}
480
499
481
500
issue, ok := r.Context().Value("issue").(*models.Issue)
482
501
if !ok {
···
488
507
commentId := chi.URLParam(r, "commentId")
489
508
comments, err := db.GetIssueComments(
490
509
rp.db,
491
-
db.FilterEq("id", commentId),
510
+
orm.FilterEq("id", commentId),
492
511
)
493
512
if err != nil {
494
513
l.Error("failed to fetch comment", "id", commentId)
···
504
523
505
524
rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{
506
525
LoggedInUser: user,
507
-
RepoInfo: f.RepoInfo(user),
526
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
508
527
Issue: issue,
509
528
Comment: &comment,
510
529
})
···
513
532
func (rp *Issues) EditIssueComment(w http.ResponseWriter, r *http.Request) {
514
533
l := rp.logger.With("handler", "EditIssueComment")
515
534
user := rp.oauth.GetUser(r)
516
-
f, err := rp.repoResolver.Resolve(r)
517
-
if err != nil {
518
-
l.Error("failed to get repo and knot", "err", err)
519
-
return
520
-
}
521
535
522
536
issue, ok := r.Context().Value("issue").(*models.Issue)
523
537
if !ok {
···
529
543
commentId := chi.URLParam(r, "commentId")
530
544
comments, err := db.GetIssueComments(
531
545
rp.db,
532
-
db.FilterEq("id", commentId),
546
+
orm.FilterEq("id", commentId),
533
547
)
534
548
if err != nil {
535
549
l.Error("failed to fetch comment", "id", commentId)
···
553
567
case http.MethodGet:
554
568
rp.pages.EditIssueCommentFragment(w, pages.EditIssueCommentParams{
555
569
LoggedInUser: user,
556
-
RepoInfo: f.RepoInfo(user),
570
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
557
571
Issue: issue,
558
572
Comment: &comment,
559
573
})
···
571
585
newComment := comment
572
586
newComment.Body = newBody
573
587
newComment.Edited = &now
588
+
newComment.Mentions, newComment.References = rp.mentionsResolver.Resolve(r.Context(), newBody)
589
+
574
590
record := newComment.AsRecord()
575
591
576
-
_, err = db.AddIssueComment(rp.db, newComment)
592
+
tx, err := rp.db.Begin()
593
+
if err != nil {
594
+
l.Error("failed to start transaction", "err", err)
595
+
rp.pages.Notice(w, "repo-notice", "Failed to update description, try again later.")
596
+
return
597
+
}
598
+
defer tx.Rollback()
599
+
600
+
_, err = db.AddIssueComment(tx, newComment)
577
601
if err != nil {
578
602
l.Error("failed to perferom update-description query", "err", err)
579
603
rp.pages.Notice(w, "repo-notice", "Failed to update description, try again later.")
580
604
return
581
605
}
606
+
tx.Commit()
582
607
583
608
// rkey is optional, it was introduced later
584
609
if newComment.Rkey != "" {
···
607
632
// return new comment body with htmx
608
633
rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{
609
634
LoggedInUser: user,
610
-
RepoInfo: f.RepoInfo(user),
635
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
611
636
Issue: issue,
612
637
Comment: &newComment,
613
638
})
···
617
642
func (rp *Issues) ReplyIssueCommentPlaceholder(w http.ResponseWriter, r *http.Request) {
618
643
l := rp.logger.With("handler", "ReplyIssueCommentPlaceholder")
619
644
user := rp.oauth.GetUser(r)
620
-
f, err := rp.repoResolver.Resolve(r)
621
-
if err != nil {
622
-
l.Error("failed to get repo and knot", "err", err)
623
-
return
624
-
}
625
645
626
646
issue, ok := r.Context().Value("issue").(*models.Issue)
627
647
if !ok {
···
633
653
commentId := chi.URLParam(r, "commentId")
634
654
comments, err := db.GetIssueComments(
635
655
rp.db,
636
-
db.FilterEq("id", commentId),
656
+
orm.FilterEq("id", commentId),
637
657
)
638
658
if err != nil {
639
659
l.Error("failed to fetch comment", "id", commentId)
···
649
669
650
670
rp.pages.ReplyIssueCommentPlaceholderFragment(w, pages.ReplyIssueCommentPlaceholderParams{
651
671
LoggedInUser: user,
652
-
RepoInfo: f.RepoInfo(user),
672
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
653
673
Issue: issue,
654
674
Comment: &comment,
655
675
})
···
658
678
func (rp *Issues) ReplyIssueComment(w http.ResponseWriter, r *http.Request) {
659
679
l := rp.logger.With("handler", "ReplyIssueComment")
660
680
user := rp.oauth.GetUser(r)
661
-
f, err := rp.repoResolver.Resolve(r)
662
-
if err != nil {
663
-
l.Error("failed to get repo and knot", "err", err)
664
-
return
665
-
}
666
681
667
682
issue, ok := r.Context().Value("issue").(*models.Issue)
668
683
if !ok {
···
674
689
commentId := chi.URLParam(r, "commentId")
675
690
comments, err := db.GetIssueComments(
676
691
rp.db,
677
-
db.FilterEq("id", commentId),
692
+
orm.FilterEq("id", commentId),
678
693
)
679
694
if err != nil {
680
695
l.Error("failed to fetch comment", "id", commentId)
···
690
705
691
706
rp.pages.ReplyIssueCommentFragment(w, pages.ReplyIssueCommentParams{
692
707
LoggedInUser: user,
693
-
RepoInfo: f.RepoInfo(user),
708
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
694
709
Issue: issue,
695
710
Comment: &comment,
696
711
})
···
699
714
func (rp *Issues) DeleteIssueComment(w http.ResponseWriter, r *http.Request) {
700
715
l := rp.logger.With("handler", "DeleteIssueComment")
701
716
user := rp.oauth.GetUser(r)
702
-
f, err := rp.repoResolver.Resolve(r)
703
-
if err != nil {
704
-
l.Error("failed to get repo and knot", "err", err)
705
-
return
706
-
}
707
717
708
718
issue, ok := r.Context().Value("issue").(*models.Issue)
709
719
if !ok {
···
715
725
commentId := chi.URLParam(r, "commentId")
716
726
comments, err := db.GetIssueComments(
717
727
rp.db,
718
-
db.FilterEq("id", commentId),
728
+
orm.FilterEq("id", commentId),
719
729
)
720
730
if err != nil {
721
731
l.Error("failed to fetch comment", "id", commentId)
···
742
752
743
753
// optimistic deletion
744
754
deleted := time.Now()
745
-
err = db.DeleteIssueComments(rp.db, db.FilterEq("id", comment.Id))
755
+
err = db.DeleteIssueComments(rp.db, orm.FilterEq("id", comment.Id))
746
756
if err != nil {
747
757
l.Error("failed to delete comment", "err", err)
748
758
rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "failed to delete comment")
···
774
784
// htmx fragment of comment after deletion
775
785
rp.pages.IssueCommentBodyFragment(w, pages.IssueCommentBodyParams{
776
786
LoggedInUser: user,
777
-
RepoInfo: f.RepoInfo(user),
787
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
778
788
Issue: issue,
779
789
Comment: &comment,
780
790
})
···
804
814
return
805
815
}
806
816
817
+
totalIssues := 0
818
+
if isOpen {
819
+
totalIssues = f.RepoStats.IssueCount.Open
820
+
} else {
821
+
totalIssues = f.RepoStats.IssueCount.Closed
822
+
}
823
+
807
824
keyword := params.Get("q")
808
825
809
826
var issues []models.Issue
···
820
837
return
821
838
}
822
839
l.Debug("searched issues with indexer", "count", len(res.Hits))
840
+
totalIssues = int(res.Total)
823
841
824
842
issues, err = db.GetIssues(
825
843
rp.db,
826
-
db.FilterIn("id", res.Hits),
844
+
orm.FilterIn("id", res.Hits),
827
845
)
828
846
if err != nil {
829
847
l.Error("failed to get issues", "err", err)
···
839
857
issues, err = db.GetIssuesPaginated(
840
858
rp.db,
841
859
page,
842
-
db.FilterEq("repo_at", f.RepoAt()),
843
-
db.FilterEq("open", openInt),
860
+
orm.FilterEq("repo_at", f.RepoAt()),
861
+
orm.FilterEq("open", openInt),
844
862
)
845
863
if err != nil {
846
864
l.Error("failed to get issues", "err", err)
···
851
869
852
870
labelDefs, err := db.GetLabelDefinitions(
853
871
rp.db,
854
-
db.FilterIn("at_uri", f.Repo.Labels),
855
-
db.FilterContains("scope", tangled.RepoIssueNSID),
872
+
orm.FilterIn("at_uri", f.Labels),
873
+
orm.FilterContains("scope", tangled.RepoIssueNSID),
856
874
)
857
875
if err != nil {
858
876
l.Error("failed to fetch labels", "err", err)
···
867
885
868
886
rp.pages.RepoIssues(w, pages.RepoIssuesParams{
869
887
LoggedInUser: rp.oauth.GetUser(r),
870
-
RepoInfo: f.RepoInfo(user),
888
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
871
889
Issues: issues,
890
+
IssueCount: totalIssues,
872
891
LabelDefs: defs,
873
892
FilteringByOpen: isOpen,
874
893
FilterQuery: keyword,
···
890
909
case http.MethodGet:
891
910
rp.pages.RepoNewIssue(w, pages.RepoNewIssueParams{
892
911
LoggedInUser: user,
893
-
RepoInfo: f.RepoInfo(user),
912
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
894
913
})
895
914
case http.MethodPost:
915
+
body := r.FormValue("body")
916
+
mentions, references := rp.mentionsResolver.Resolve(r.Context(), body)
917
+
896
918
issue := &models.Issue{
897
-
RepoAt: f.RepoAt(),
898
-
Rkey: tid.TID(),
899
-
Title: r.FormValue("title"),
900
-
Body: r.FormValue("body"),
901
-
Open: true,
902
-
Did: user.Did,
903
-
Created: time.Now(),
904
-
Repo: &f.Repo,
919
+
RepoAt: f.RepoAt(),
920
+
Rkey: tid.TID(),
921
+
Title: r.FormValue("title"),
922
+
Body: body,
923
+
Open: true,
924
+
Did: user.Did,
925
+
Created: time.Now(),
926
+
Mentions: mentions,
927
+
References: references,
928
+
Repo: f,
905
929
}
906
930
907
931
if err := rp.validator.ValidateIssue(issue); err != nil {
···
969
993
// everything is successful, do not rollback the atproto record
970
994
atUri = ""
971
995
972
-
rawMentions := markup.FindUserMentions(issue.Body)
973
-
idents := rp.idResolver.ResolveIdents(r.Context(), rawMentions)
974
-
l.Debug("parsed mentions", "raw", rawMentions, "idents", idents)
975
-
var mentions []syntax.DID
976
-
for _, ident := range idents {
977
-
if ident != nil && !ident.Handle.IsInvalidHandle() {
978
-
mentions = append(mentions, ident.DID)
979
-
}
980
-
}
981
996
rp.notifier.NewIssue(r.Context(), issue, mentions)
982
-
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", f.OwnerSlashRepo(), issue.IssueId))
997
+
998
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
999
+
rp.pages.HxLocation(w, fmt.Sprintf("/%s/issues/%d", ownerSlashRepo, issue.IssueId))
983
1000
return
984
1001
}
985
1002
}
+3
-3
appview/issues/opengraph.go
+3
-3
appview/issues/opengraph.go
···
232
232
233
233
// Get owner handle for avatar
234
234
var ownerHandle string
235
-
owner, err := rp.idResolver.ResolveIdent(r.Context(), f.Repo.Did)
235
+
owner, err := rp.idResolver.ResolveIdent(r.Context(), f.Did)
236
236
if err != nil {
237
-
ownerHandle = f.Repo.Did
237
+
ownerHandle = f.Did
238
238
} else {
239
239
ownerHandle = "@" + owner.Handle.String()
240
240
}
241
241
242
-
card, err := rp.drawIssueSummaryCard(issue, &f.Repo, commentCount, ownerHandle)
242
+
card, err := rp.drawIssueSummaryCard(issue, f, commentCount, ownerHandle)
243
243
if err != nil {
244
244
log.Println("failed to draw issue summary card", err)
245
245
http.Error(w, "failed to draw issue summary card", http.StatusInternalServerError)
+37
-19
appview/knots/knots.go
+37
-19
appview/knots/knots.go
···
21
21
"tangled.org/core/appview/xrpcclient"
22
22
"tangled.org/core/eventconsumer"
23
23
"tangled.org/core/idresolver"
24
+
"tangled.org/core/orm"
24
25
"tangled.org/core/rbac"
25
26
"tangled.org/core/tid"
26
27
···
39
40
Knotstream *eventconsumer.Consumer
40
41
}
41
42
43
+
type tab = map[string]any
44
+
45
+
var (
46
+
knotsTabs []tab = []tab{
47
+
{"Name": "profile", "Icon": "user"},
48
+
{"Name": "keys", "Icon": "key"},
49
+
{"Name": "emails", "Icon": "mail"},
50
+
{"Name": "notifications", "Icon": "bell"},
51
+
{"Name": "knots", "Icon": "volleyball"},
52
+
{"Name": "spindles", "Icon": "spool"},
53
+
}
54
+
)
55
+
42
56
func (k *Knots) Router() http.Handler {
43
57
r := chi.NewRouter()
44
58
···
59
73
user := k.OAuth.GetUser(r)
60
74
registrations, err := db.GetRegistrations(
61
75
k.Db,
62
-
db.FilterEq("did", user.Did),
76
+
orm.FilterEq("did", user.Did),
63
77
)
64
78
if err != nil {
65
79
k.Logger.Error("failed to fetch knot registrations", "err", err)
···
70
84
k.Pages.Knots(w, pages.KnotsParams{
71
85
LoggedInUser: user,
72
86
Registrations: registrations,
87
+
Tabs: knotsTabs,
88
+
Tab: "knots",
73
89
})
74
90
}
75
91
···
87
103
88
104
registrations, err := db.GetRegistrations(
89
105
k.Db,
90
-
db.FilterEq("did", user.Did),
91
-
db.FilterEq("domain", domain),
106
+
orm.FilterEq("did", user.Did),
107
+
orm.FilterEq("domain", domain),
92
108
)
93
109
if err != nil {
94
110
l.Error("failed to get registrations", "err", err)
···
112
128
repos, err := db.GetRepos(
113
129
k.Db,
114
130
0,
115
-
db.FilterEq("knot", domain),
131
+
orm.FilterEq("knot", domain),
116
132
)
117
133
if err != nil {
118
134
l.Error("failed to get knot repos", "err", err)
···
132
148
Members: members,
133
149
Repos: repoMap,
134
150
IsOwner: true,
151
+
Tabs: knotsTabs,
152
+
Tab: "knots",
135
153
})
136
154
}
137
155
···
276
294
// get record from db first
277
295
registrations, err := db.GetRegistrations(
278
296
k.Db,
279
-
db.FilterEq("did", user.Did),
280
-
db.FilterEq("domain", domain),
297
+
orm.FilterEq("did", user.Did),
298
+
orm.FilterEq("domain", domain),
281
299
)
282
300
if err != nil {
283
301
l.Error("failed to get registration", "err", err)
···
304
322
305
323
err = db.DeleteKnot(
306
324
tx,
307
-
db.FilterEq("did", user.Did),
308
-
db.FilterEq("domain", domain),
325
+
orm.FilterEq("did", user.Did),
326
+
orm.FilterEq("domain", domain),
309
327
)
310
328
if err != nil {
311
329
l.Error("failed to delete registration", "err", err)
···
385
403
// get record from db first
386
404
registrations, err := db.GetRegistrations(
387
405
k.Db,
388
-
db.FilterEq("did", user.Did),
389
-
db.FilterEq("domain", domain),
406
+
orm.FilterEq("did", user.Did),
407
+
orm.FilterEq("domain", domain),
390
408
)
391
409
if err != nil {
392
410
l.Error("failed to get registration", "err", err)
···
476
494
// Get updated registration to show
477
495
registrations, err = db.GetRegistrations(
478
496
k.Db,
479
-
db.FilterEq("did", user.Did),
480
-
db.FilterEq("domain", domain),
497
+
orm.FilterEq("did", user.Did),
498
+
orm.FilterEq("domain", domain),
481
499
)
482
500
if err != nil {
483
501
l.Error("failed to get registration", "err", err)
···
512
530
513
531
registrations, err := db.GetRegistrations(
514
532
k.Db,
515
-
db.FilterEq("did", user.Did),
516
-
db.FilterEq("domain", domain),
517
-
db.FilterIsNot("registered", "null"),
533
+
orm.FilterEq("did", user.Did),
534
+
orm.FilterEq("domain", domain),
535
+
orm.FilterIsNot("registered", "null"),
518
536
)
519
537
if err != nil {
520
538
l.Error("failed to get registration", "err", err)
···
596
614
}
597
615
598
616
// success
599
-
k.Pages.HxRedirect(w, fmt.Sprintf("/knots/%s", domain))
617
+
k.Pages.HxRedirect(w, fmt.Sprintf("/settings/knots/%s", domain))
600
618
}
601
619
602
620
func (k *Knots) removeMember(w http.ResponseWriter, r *http.Request) {
···
620
638
621
639
registrations, err := db.GetRegistrations(
622
640
k.Db,
623
-
db.FilterEq("did", user.Did),
624
-
db.FilterEq("domain", domain),
625
-
db.FilterIsNot("registered", "null"),
641
+
orm.FilterEq("did", user.Did),
642
+
orm.FilterEq("domain", domain),
643
+
orm.FilterIsNot("registered", "null"),
626
644
)
627
645
if err != nil {
628
646
l.Error("failed to get registration", "err", err)
+5
-4
appview/labels/labels.go
+5
-4
appview/labels/labels.go
···
16
16
"tangled.org/core/appview/oauth"
17
17
"tangled.org/core/appview/pages"
18
18
"tangled.org/core/appview/validator"
19
+
"tangled.org/core/orm"
19
20
"tangled.org/core/rbac"
20
21
"tangled.org/core/tid"
21
22
···
88
89
repoAt := r.Form.Get("repo")
89
90
subjectUri := r.Form.Get("subject")
90
91
91
-
repo, err := db.GetRepo(l.db, db.FilterEq("at_uri", repoAt))
92
+
repo, err := db.GetRepo(l.db, orm.FilterEq("at_uri", repoAt))
92
93
if err != nil {
93
94
fail("Failed to get repository.", err)
94
95
return
95
96
}
96
97
97
98
// find all the labels that this repo subscribes to
98
-
repoLabels, err := db.GetRepoLabels(l.db, db.FilterEq("repo_at", repoAt))
99
+
repoLabels, err := db.GetRepoLabels(l.db, orm.FilterEq("repo_at", repoAt))
99
100
if err != nil {
100
101
fail("Failed to get labels for this repository.", err)
101
102
return
···
106
107
labelAts = append(labelAts, rl.LabelAt.String())
107
108
}
108
109
109
-
actx, err := db.NewLabelApplicationCtx(l.db, db.FilterIn("at_uri", labelAts))
110
+
actx, err := db.NewLabelApplicationCtx(l.db, orm.FilterIn("at_uri", labelAts))
110
111
if err != nil {
111
112
fail("Invalid form data.", err)
112
113
return
113
114
}
114
115
115
116
// calculate the start state by applying already known labels
116
-
existingOps, err := db.GetLabelOps(l.db, db.FilterEq("subject", subjectUri))
117
+
existingOps, err := db.GetLabelOps(l.db, orm.FilterEq("subject", subjectUri))
117
118
if err != nil {
118
119
fail("Invalid form data.", err)
119
120
return
+67
appview/mentions/resolver.go
+67
appview/mentions/resolver.go
···
1
+
package mentions
2
+
3
+
import (
4
+
"context"
5
+
"log/slog"
6
+
7
+
"github.com/bluesky-social/indigo/atproto/syntax"
8
+
"tangled.org/core/appview/config"
9
+
"tangled.org/core/appview/db"
10
+
"tangled.org/core/appview/models"
11
+
"tangled.org/core/appview/pages/markup"
12
+
"tangled.org/core/idresolver"
13
+
)
14
+
15
+
type Resolver struct {
16
+
config *config.Config
17
+
idResolver *idresolver.Resolver
18
+
execer db.Execer
19
+
logger *slog.Logger
20
+
}
21
+
22
+
func New(
23
+
config *config.Config,
24
+
idResolver *idresolver.Resolver,
25
+
execer db.Execer,
26
+
logger *slog.Logger,
27
+
) *Resolver {
28
+
return &Resolver{
29
+
config,
30
+
idResolver,
31
+
execer,
32
+
logger,
33
+
}
34
+
}
35
+
36
+
func (r *Resolver) Resolve(ctx context.Context, source string) ([]syntax.DID, []syntax.ATURI) {
37
+
l := r.logger.With("method", "Resolve")
38
+
39
+
rawMentions, rawRefs := markup.FindReferences(r.config.Core.AppviewHost, source)
40
+
l.Debug("found possible references", "mentions", rawMentions, "refs", rawRefs)
41
+
42
+
idents := r.idResolver.ResolveIdents(ctx, rawMentions)
43
+
var mentions []syntax.DID
44
+
for _, ident := range idents {
45
+
if ident != nil && !ident.Handle.IsInvalidHandle() {
46
+
mentions = append(mentions, ident.DID)
47
+
}
48
+
}
49
+
l.Debug("found mentions", "mentions", mentions)
50
+
51
+
var resolvedRefs []models.ReferenceLink
52
+
for _, rawRef := range rawRefs {
53
+
ident, err := r.idResolver.ResolveIdent(ctx, rawRef.Handle)
54
+
if err != nil || ident == nil || ident.Handle.IsInvalidHandle() {
55
+
continue
56
+
}
57
+
rawRef.Handle = string(ident.DID)
58
+
resolvedRefs = append(resolvedRefs, rawRef)
59
+
}
60
+
aturiRefs, err := db.ValidateReferenceLinks(r.execer, resolvedRefs)
61
+
if err != nil {
62
+
l.Error("failed running query", "err", err)
63
+
}
64
+
l.Debug("found references", "refs", aturiRefs)
65
+
66
+
return mentions, aturiRefs
67
+
}
+5
-4
appview/middleware/middleware.go
+5
-4
appview/middleware/middleware.go
···
18
18
"tangled.org/core/appview/pagination"
19
19
"tangled.org/core/appview/reporesolver"
20
20
"tangled.org/core/idresolver"
21
+
"tangled.org/core/orm"
21
22
"tangled.org/core/rbac"
22
23
)
23
24
···
164
165
ok, err := mw.enforcer.E.Enforce(actor.Did, f.Knot, f.DidSlashRepo(), requiredPerm)
165
166
if err != nil || !ok {
166
167
// we need a logged in user
167
-
log.Printf("%s does not have perms of a %s in repo %s", actor.Did, requiredPerm, f.OwnerSlashRepo())
168
+
log.Printf("%s does not have perms of a %s in repo %s", actor.Did, requiredPerm, f.DidSlashRepo())
168
169
http.Error(w, "Forbiden", http.StatusUnauthorized)
169
170
return
170
171
}
···
217
218
218
219
repo, err := db.GetRepo(
219
220
mw.db,
220
-
db.FilterEq("did", id.DID.String()),
221
-
db.FilterEq("name", repoName),
221
+
orm.FilterEq("did", id.DID.String()),
222
+
orm.FilterEq("name", repoName),
222
223
)
223
224
if err != nil {
224
225
log.Println("failed to resolve repo", "err", err)
···
327
328
return
328
329
}
329
330
330
-
fullName := f.OwnerHandle() + "/" + f.Name
331
+
fullName := reporesolver.GetBaseRepoPath(r, f)
331
332
332
333
if r.Header.Get("User-Agent") == "Go-http-client/1.1" {
333
334
if r.URL.Query().Get("go-get") == "1" {
+70
-34
appview/models/issue.go
+70
-34
appview/models/issue.go
···
10
10
)
11
11
12
12
type Issue struct {
13
-
Id int64
14
-
Did string
15
-
Rkey string
16
-
RepoAt syntax.ATURI
17
-
IssueId int
18
-
Created time.Time
19
-
Edited *time.Time
20
-
Deleted *time.Time
21
-
Title string
22
-
Body string
23
-
Open bool
13
+
Id int64
14
+
Did string
15
+
Rkey string
16
+
RepoAt syntax.ATURI
17
+
IssueId int
18
+
Created time.Time
19
+
Edited *time.Time
20
+
Deleted *time.Time
21
+
Title string
22
+
Body string
23
+
Open bool
24
+
Mentions []syntax.DID
25
+
References []syntax.ATURI
24
26
25
27
// optionally, populate this when querying for reverse mappings
26
28
// like comment counts, parent repo etc.
···
34
36
}
35
37
36
38
func (i *Issue) AsRecord() tangled.RepoIssue {
39
+
mentions := make([]string, len(i.Mentions))
40
+
for i, did := range i.Mentions {
41
+
mentions[i] = string(did)
42
+
}
43
+
references := make([]string, len(i.References))
44
+
for i, uri := range i.References {
45
+
references[i] = string(uri)
46
+
}
37
47
return tangled.RepoIssue{
38
-
Repo: i.RepoAt.String(),
39
-
Title: i.Title,
40
-
Body: &i.Body,
41
-
CreatedAt: i.Created.Format(time.RFC3339),
48
+
Repo: i.RepoAt.String(),
49
+
Title: i.Title,
50
+
Body: &i.Body,
51
+
Mentions: mentions,
52
+
References: references,
53
+
CreatedAt: i.Created.Format(time.RFC3339),
42
54
}
43
55
}
44
56
···
161
173
}
162
174
163
175
type IssueComment struct {
164
-
Id int64
165
-
Did string
166
-
Rkey string
167
-
IssueAt string
168
-
ReplyTo *string
169
-
Body string
170
-
Created time.Time
171
-
Edited *time.Time
172
-
Deleted *time.Time
176
+
Id int64
177
+
Did string
178
+
Rkey string
179
+
IssueAt string
180
+
ReplyTo *string
181
+
Body string
182
+
Created time.Time
183
+
Edited *time.Time
184
+
Deleted *time.Time
185
+
Mentions []syntax.DID
186
+
References []syntax.ATURI
173
187
}
174
188
175
189
func (i *IssueComment) AtUri() syntax.ATURI {
···
177
191
}
178
192
179
193
func (i *IssueComment) AsRecord() tangled.RepoIssueComment {
194
+
mentions := make([]string, len(i.Mentions))
195
+
for i, did := range i.Mentions {
196
+
mentions[i] = string(did)
197
+
}
198
+
references := make([]string, len(i.References))
199
+
for i, uri := range i.References {
200
+
references[i] = string(uri)
201
+
}
180
202
return tangled.RepoIssueComment{
181
-
Body: i.Body,
182
-
Issue: i.IssueAt,
183
-
CreatedAt: i.Created.Format(time.RFC3339),
184
-
ReplyTo: i.ReplyTo,
203
+
Body: i.Body,
204
+
Issue: i.IssueAt,
205
+
CreatedAt: i.Created.Format(time.RFC3339),
206
+
ReplyTo: i.ReplyTo,
207
+
Mentions: mentions,
208
+
References: references,
185
209
}
186
210
}
187
211
···
205
229
return nil, err
206
230
}
207
231
232
+
i := record
233
+
mentions := make([]syntax.DID, len(record.Mentions))
234
+
for i, did := range record.Mentions {
235
+
mentions[i] = syntax.DID(did)
236
+
}
237
+
references := make([]syntax.ATURI, len(record.References))
238
+
for i, uri := range i.References {
239
+
references[i] = syntax.ATURI(uri)
240
+
}
241
+
208
242
comment := IssueComment{
209
-
Did: ownerDid,
210
-
Rkey: rkey,
211
-
Body: record.Body,
212
-
IssueAt: record.Issue,
213
-
ReplyTo: record.ReplyTo,
214
-
Created: created,
243
+
Did: ownerDid,
244
+
Rkey: rkey,
245
+
Body: record.Body,
246
+
IssueAt: record.Issue,
247
+
ReplyTo: record.ReplyTo,
248
+
Created: created,
249
+
Mentions: mentions,
250
+
References: references,
215
251
}
216
252
217
253
return &comment, nil
+10
appview/models/pipeline.go
+10
appview/models/pipeline.go
···
1
1
package models
2
2
3
3
import (
4
+
"fmt"
4
5
"slices"
5
6
"time"
6
7
7
8
"github.com/bluesky-social/indigo/atproto/syntax"
8
9
"github.com/go-git/go-git/v5/plumbing"
10
+
"tangled.org/core/api/tangled"
9
11
spindle "tangled.org/core/spindle/models"
10
12
"tangled.org/core/workflow"
11
13
)
···
23
25
// populate when querying for reverse mappings
24
26
Trigger *Trigger
25
27
Statuses map[string]WorkflowStatus
28
+
}
29
+
30
+
func (p *Pipeline) AtUri() syntax.ATURI {
31
+
return syntax.ATURI(fmt.Sprintf("at://did:web:%s/%s/%s", p.Knot, tangled.PipelineNSID, p.Rkey))
26
32
}
27
33
28
34
type WorkflowStatus struct {
···
128
134
Error *string
129
135
ExitCode int
130
136
}
137
+
138
+
func (ps *PipelineStatus) PipelineAt() syntax.ATURI {
139
+
return syntax.ATURI(fmt.Sprintf("at://did:web:%s/%s/%s", ps.PipelineKnot, tangled.PipelineNSID, ps.PipelineRkey))
140
+
}
+3
-1
appview/models/profile.go
+3
-1
appview/models/profile.go
···
111
111
}
112
112
113
113
type ByMonth struct {
114
+
Commits int
114
115
RepoEvents []RepoEvent
115
116
IssueEvents IssueEvents
116
117
PullEvents PullEvents
···
119
120
func (b ByMonth) IsEmpty() bool {
120
121
return len(b.RepoEvents) == 0 &&
121
122
len(b.IssueEvents.Items) == 0 &&
122
-
len(b.PullEvents.Items) == 0
123
+
len(b.PullEvents.Items) == 0 &&
124
+
b.Commits == 0
123
125
}
124
126
125
127
type IssueEvents struct {
+41
-3
appview/models/pull.go
+41
-3
appview/models/pull.go
···
66
66
TargetBranch string
67
67
State PullState
68
68
Submissions []*PullSubmission
69
+
Mentions []syntax.DID
70
+
References []syntax.ATURI
69
71
70
72
// stacking
71
73
StackId string // nullable string
···
92
94
source.Repo = &s
93
95
}
94
96
}
97
+
mentions := make([]string, len(p.Mentions))
98
+
for i, did := range p.Mentions {
99
+
mentions[i] = string(did)
100
+
}
101
+
references := make([]string, len(p.References))
102
+
for i, uri := range p.References {
103
+
references[i] = string(uri)
104
+
}
95
105
96
106
record := tangled.RepoPull{
97
-
Title: p.Title,
98
-
Body: &p.Body,
99
-
CreatedAt: p.Created.Format(time.RFC3339),
107
+
Title: p.Title,
108
+
Body: &p.Body,
109
+
Mentions: mentions,
110
+
References: references,
111
+
CreatedAt: p.Created.Format(time.RFC3339),
100
112
Target: &tangled.RepoPull_Target{
101
113
Repo: p.RepoAt.String(),
102
114
Branch: p.TargetBranch,
···
146
158
147
159
// content
148
160
Body string
161
+
162
+
// meta
163
+
Mentions []syntax.DID
164
+
References []syntax.ATURI
149
165
150
166
// meta
151
167
Created time.Time
152
168
}
169
+
170
+
func (p *PullComment) AtUri() syntax.ATURI {
171
+
return syntax.ATURI(p.CommentAt)
172
+
}
173
+
174
+
// func (p *PullComment) AsRecord() tangled.RepoPullComment {
175
+
// mentions := make([]string, len(p.Mentions))
176
+
// for i, did := range p.Mentions {
177
+
// mentions[i] = string(did)
178
+
// }
179
+
// references := make([]string, len(p.References))
180
+
// for i, uri := range p.References {
181
+
// references[i] = string(uri)
182
+
// }
183
+
// return tangled.RepoPullComment{
184
+
// Pull: p.PullAt,
185
+
// Body: p.Body,
186
+
// Mentions: mentions,
187
+
// References: references,
188
+
// CreatedAt: p.Created.Format(time.RFC3339),
189
+
// }
190
+
// }
153
191
154
192
func (p *Pull) LastRoundNumber() int {
155
193
return len(p.Submissions) - 1
+49
appview/models/reference.go
+49
appview/models/reference.go
···
1
+
package models
2
+
3
+
import "fmt"
4
+
5
+
type RefKind int
6
+
7
+
const (
8
+
RefKindIssue RefKind = iota
9
+
RefKindPull
10
+
)
11
+
12
+
func (k RefKind) String() string {
13
+
if k == RefKindIssue {
14
+
return "issues"
15
+
} else {
16
+
return "pulls"
17
+
}
18
+
}
19
+
20
+
// /@alice.com/cool-proj/issues/123
21
+
// /@alice.com/cool-proj/issues/123#comment-321
22
+
type ReferenceLink struct {
23
+
Handle string
24
+
Repo string
25
+
Kind RefKind
26
+
SubjectId int
27
+
CommentId *int
28
+
}
29
+
30
+
func (l ReferenceLink) String() string {
31
+
comment := ""
32
+
if l.CommentId != nil {
33
+
comment = fmt.Sprintf("#comment-%d", *l.CommentId)
34
+
}
35
+
return fmt.Sprintf("/%s/%s/%s/%d%s",
36
+
l.Handle,
37
+
l.Repo,
38
+
l.Kind.String(),
39
+
l.SubjectId,
40
+
comment,
41
+
)
42
+
}
43
+
44
+
type RichReferenceLink struct {
45
+
ReferenceLink
46
+
Title string
47
+
// reusing PullState for both issue & PR
48
+
State PullState
49
+
}
+14
-5
appview/models/star.go
+14
-5
appview/models/star.go
···
7
7
)
8
8
9
9
type Star struct {
10
-
StarredByDid string
11
-
RepoAt syntax.ATURI
12
-
Created time.Time
13
-
Rkey string
10
+
Did string
11
+
RepoAt syntax.ATURI
12
+
Created time.Time
13
+
Rkey string
14
+
}
14
15
15
-
// optionally, populate this when querying for reverse mappings
16
+
// RepoStar is used for reverse mapping to repos
17
+
type RepoStar struct {
18
+
Star
16
19
Repo *Repo
17
20
}
21
+
22
+
// StringStar is used for reverse mapping to strings
23
+
type StringStar struct {
24
+
Star
25
+
String *String
26
+
}
+1
-1
appview/models/string.go
+1
-1
appview/models/string.go
+1
-1
appview/models/timeline.go
+1
-1
appview/models/timeline.go
+5
-4
appview/notifications/notifications.go
+5
-4
appview/notifications/notifications.go
···
11
11
"tangled.org/core/appview/oauth"
12
12
"tangled.org/core/appview/pages"
13
13
"tangled.org/core/appview/pagination"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
17
type Notifications struct {
···
53
54
54
55
total, err := db.CountNotifications(
55
56
n.db,
56
-
db.FilterEq("recipient_did", user.Did),
57
+
orm.FilterEq("recipient_did", user.Did),
57
58
)
58
59
if err != nil {
59
60
l.Error("failed to get total notifications", "err", err)
···
64
65
notifications, err := db.GetNotificationsWithEntities(
65
66
n.db,
66
67
page,
67
-
db.FilterEq("recipient_did", user.Did),
68
+
orm.FilterEq("recipient_did", user.Did),
68
69
)
69
70
if err != nil {
70
71
l.Error("failed to get notifications", "err", err)
···
96
97
97
98
count, err := db.CountNotifications(
98
99
n.db,
99
-
db.FilterEq("recipient_did", user.Did),
100
-
db.FilterEq("read", 0),
100
+
orm.FilterEq("recipient_did", user.Did),
101
+
orm.FilterEq("read", 0),
101
102
)
102
103
if err != nil {
103
104
http.Error(w, "Failed to get unread count", http.StatusInternalServerError)
+83
-67
appview/notify/db/db.go
+83
-67
appview/notify/db/db.go
···
3
3
import (
4
4
"context"
5
5
"log"
6
-
"maps"
7
6
"slices"
8
7
9
8
"github.com/bluesky-social/indigo/atproto/syntax"
9
+
"tangled.org/core/api/tangled"
10
10
"tangled.org/core/appview/db"
11
11
"tangled.org/core/appview/models"
12
12
"tangled.org/core/appview/notify"
13
13
"tangled.org/core/idresolver"
14
+
"tangled.org/core/orm"
15
+
"tangled.org/core/sets"
14
16
)
15
17
16
18
const (
17
-
maxMentions = 5
19
+
maxMentions = 8
18
20
)
19
21
20
22
type databaseNotifier struct {
···
36
38
}
37
39
38
40
func (n *databaseNotifier) NewStar(ctx context.Context, star *models.Star) {
41
+
if star.RepoAt.Collection().String() != tangled.RepoNSID {
42
+
// skip string stars for now
43
+
return
44
+
}
39
45
var err error
40
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(star.RepoAt)))
46
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(star.RepoAt)))
41
47
if err != nil {
42
48
log.Printf("NewStar: failed to get repos: %v", err)
43
49
return
44
50
}
45
51
46
-
actorDid := syntax.DID(star.StarredByDid)
47
-
recipients := []syntax.DID{syntax.DID(repo.Did)}
52
+
actorDid := syntax.DID(star.Did)
53
+
recipients := sets.Singleton(syntax.DID(repo.Did))
48
54
eventType := models.NotificationTypeRepoStarred
49
55
entityType := "repo"
50
56
entityId := star.RepoAt.String()
···
69
75
}
70
76
71
77
func (n *databaseNotifier) NewIssue(ctx context.Context, issue *models.Issue, mentions []syntax.DID) {
72
-
73
-
// build the recipients list
74
-
// - owner of the repo
75
-
// - collaborators in the repo
76
-
var recipients []syntax.DID
77
-
recipients = append(recipients, syntax.DID(issue.Repo.Did))
78
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt()))
78
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt()))
79
79
if err != nil {
80
80
log.Printf("failed to fetch collaborators: %v", err)
81
81
return
82
82
}
83
+
84
+
// build the recipients list
85
+
// - owner of the repo
86
+
// - collaborators in the repo
87
+
// - remove users already mentioned
88
+
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
83
89
for _, c := range collaborators {
84
-
recipients = append(recipients, c.SubjectDid)
90
+
recipients.Insert(c.SubjectDid)
91
+
}
92
+
for _, m := range mentions {
93
+
recipients.Remove(m)
85
94
}
86
95
87
96
actorDid := syntax.DID(issue.Did)
···
103
112
)
104
113
n.notifyEvent(
105
114
actorDid,
106
-
mentions,
115
+
sets.Collect(slices.Values(mentions)),
107
116
models.NotificationTypeUserMentioned,
108
117
entityType,
109
118
entityId,
···
114
123
}
115
124
116
125
func (n *databaseNotifier) NewIssueComment(ctx context.Context, comment *models.IssueComment, mentions []syntax.DID) {
117
-
issues, err := db.GetIssues(n.db, db.FilterEq("at_uri", comment.IssueAt))
126
+
issues, err := db.GetIssues(n.db, orm.FilterEq("at_uri", comment.IssueAt))
118
127
if err != nil {
119
128
log.Printf("NewIssueComment: failed to get issues: %v", err)
120
129
return
···
125
134
}
126
135
issue := issues[0]
127
136
128
-
var recipients []syntax.DID
129
-
recipients = append(recipients, syntax.DID(issue.Repo.Did))
137
+
// built the recipients list:
138
+
// - the owner of the repo
139
+
// - | if the comment is a reply -> everybody on that thread
140
+
// | if the comment is a top level -> just the issue owner
141
+
// - remove mentioned users from the recipients list
142
+
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
130
143
131
144
if comment.IsReply() {
132
145
// if this comment is a reply, then notify everybody in that thread
133
146
parentAtUri := *comment.ReplyTo
134
-
allThreads := issue.CommentList()
135
147
136
148
// find the parent thread, and add all DIDs from here to the recipient list
137
-
for _, t := range allThreads {
149
+
for _, t := range issue.CommentList() {
138
150
if t.Self.AtUri().String() == parentAtUri {
139
-
recipients = append(recipients, t.Participants()...)
151
+
for _, p := range t.Participants() {
152
+
recipients.Insert(p)
153
+
}
140
154
}
141
155
}
142
156
} else {
143
157
// not a reply, notify just the issue author
144
-
recipients = append(recipients, syntax.DID(issue.Did))
158
+
recipients.Insert(syntax.DID(issue.Did))
159
+
}
160
+
161
+
for _, m := range mentions {
162
+
recipients.Remove(m)
145
163
}
146
164
147
165
actorDid := syntax.DID(comment.Did)
···
163
181
)
164
182
n.notifyEvent(
165
183
actorDid,
166
-
mentions,
184
+
sets.Collect(slices.Values(mentions)),
167
185
models.NotificationTypeUserMentioned,
168
186
entityType,
169
187
entityId,
···
179
197
180
198
func (n *databaseNotifier) NewFollow(ctx context.Context, follow *models.Follow) {
181
199
actorDid := syntax.DID(follow.UserDid)
182
-
recipients := []syntax.DID{syntax.DID(follow.SubjectDid)}
200
+
recipients := sets.Singleton(syntax.DID(follow.SubjectDid))
183
201
eventType := models.NotificationTypeFollowed
184
202
entityType := "follow"
185
203
entityId := follow.UserDid
···
202
220
}
203
221
204
222
func (n *databaseNotifier) NewPull(ctx context.Context, pull *models.Pull) {
205
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt)))
223
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt)))
206
224
if err != nil {
207
225
log.Printf("NewPull: failed to get repos: %v", err)
208
226
return
209
227
}
210
-
211
-
// build the recipients list
212
-
// - owner of the repo
213
-
// - collaborators in the repo
214
-
var recipients []syntax.DID
215
-
recipients = append(recipients, syntax.DID(repo.Did))
216
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt()))
228
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt()))
217
229
if err != nil {
218
230
log.Printf("failed to fetch collaborators: %v", err)
219
231
return
220
232
}
233
+
234
+
// build the recipients list
235
+
// - owner of the repo
236
+
// - collaborators in the repo
237
+
recipients := sets.Singleton(syntax.DID(repo.Did))
221
238
for _, c := range collaborators {
222
-
recipients = append(recipients, c.SubjectDid)
239
+
recipients.Insert(c.SubjectDid)
223
240
}
224
241
225
242
actorDid := syntax.DID(pull.OwnerDid)
···
253
270
return
254
271
}
255
272
256
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", comment.RepoAt))
273
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", comment.RepoAt))
257
274
if err != nil {
258
275
log.Printf("NewPullComment: failed to get repos: %v", err)
259
276
return
···
262
279
// build up the recipients list:
263
280
// - repo owner
264
281
// - all pull participants
265
-
var recipients []syntax.DID
266
-
recipients = append(recipients, syntax.DID(repo.Did))
282
+
// - remove those already mentioned
283
+
recipients := sets.Singleton(syntax.DID(repo.Did))
267
284
for _, p := range pull.Participants() {
268
-
recipients = append(recipients, syntax.DID(p))
285
+
recipients.Insert(syntax.DID(p))
286
+
}
287
+
for _, m := range mentions {
288
+
recipients.Remove(m)
269
289
}
270
290
271
291
actorDid := syntax.DID(comment.OwnerDid)
···
289
309
)
290
310
n.notifyEvent(
291
311
actorDid,
292
-
mentions,
312
+
sets.Collect(slices.Values(mentions)),
293
313
models.NotificationTypeUserMentioned,
294
314
entityType,
295
315
entityId,
···
316
336
}
317
337
318
338
func (n *databaseNotifier) NewIssueState(ctx context.Context, actor syntax.DID, issue *models.Issue) {
319
-
// build up the recipients list:
320
-
// - repo owner
321
-
// - repo collaborators
322
-
// - all issue participants
323
-
var recipients []syntax.DID
324
-
recipients = append(recipients, syntax.DID(issue.Repo.Did))
325
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", issue.Repo.RepoAt()))
339
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", issue.Repo.RepoAt()))
326
340
if err != nil {
327
341
log.Printf("failed to fetch collaborators: %v", err)
328
342
return
329
343
}
344
+
345
+
// build up the recipients list:
346
+
// - repo owner
347
+
// - repo collaborators
348
+
// - all issue participants
349
+
recipients := sets.Singleton(syntax.DID(issue.Repo.Did))
330
350
for _, c := range collaborators {
331
-
recipients = append(recipients, c.SubjectDid)
351
+
recipients.Insert(c.SubjectDid)
332
352
}
333
353
for _, p := range issue.Participants() {
334
-
recipients = append(recipients, syntax.DID(p))
354
+
recipients.Insert(syntax.DID(p))
335
355
}
336
356
337
357
entityType := "pull"
···
361
381
362
382
func (n *databaseNotifier) NewPullState(ctx context.Context, actor syntax.DID, pull *models.Pull) {
363
383
// Get repo details
364
-
repo, err := db.GetRepo(n.db, db.FilterEq("at_uri", string(pull.RepoAt)))
384
+
repo, err := db.GetRepo(n.db, orm.FilterEq("at_uri", string(pull.RepoAt)))
365
385
if err != nil {
366
386
log.Printf("NewPullState: failed to get repos: %v", err)
367
387
return
368
388
}
369
389
370
-
// build up the recipients list:
371
-
// - repo owner
372
-
// - all pull participants
373
-
var recipients []syntax.DID
374
-
recipients = append(recipients, syntax.DID(repo.Did))
375
-
collaborators, err := db.GetCollaborators(n.db, db.FilterEq("repo_at", repo.RepoAt()))
390
+
collaborators, err := db.GetCollaborators(n.db, orm.FilterEq("repo_at", repo.RepoAt()))
376
391
if err != nil {
377
392
log.Printf("failed to fetch collaborators: %v", err)
378
393
return
379
394
}
395
+
396
+
// build up the recipients list:
397
+
// - repo owner
398
+
// - all pull participants
399
+
recipients := sets.Singleton(syntax.DID(repo.Did))
380
400
for _, c := range collaborators {
381
-
recipients = append(recipients, c.SubjectDid)
401
+
recipients.Insert(c.SubjectDid)
382
402
}
383
403
for _, p := range pull.Participants() {
384
-
recipients = append(recipients, syntax.DID(p))
404
+
recipients.Insert(syntax.DID(p))
385
405
}
386
406
387
407
entityType := "pull"
···
417
437
418
438
func (n *databaseNotifier) notifyEvent(
419
439
actorDid syntax.DID,
420
-
recipients []syntax.DID,
440
+
recipients sets.Set[syntax.DID],
421
441
eventType models.NotificationType,
422
442
entityType string,
423
443
entityId string,
···
425
445
issueId *int64,
426
446
pullId *int64,
427
447
) {
428
-
if eventType == models.NotificationTypeUserMentioned && len(recipients) > maxMentions {
429
-
recipients = recipients[:maxMentions]
430
-
}
431
-
recipientSet := make(map[syntax.DID]struct{})
432
-
for _, did := range recipients {
433
-
// everybody except actor themselves
434
-
if did != actorDid {
435
-
recipientSet[did] = struct{}{}
436
-
}
448
+
// if the user is attempting to mention >maxMentions users, this is probably spam, do not mention anybody
449
+
if eventType == models.NotificationTypeUserMentioned && recipients.Len() > maxMentions {
450
+
return
437
451
}
438
452
453
+
recipients.Remove(actorDid)
454
+
439
455
prefMap, err := db.GetNotificationPreferences(
440
456
n.db,
441
-
db.FilterIn("user_did", slices.Collect(maps.Keys(recipientSet))),
457
+
orm.FilterIn("user_did", slices.Collect(recipients.All())),
442
458
)
443
459
if err != nil {
444
460
// failed to get prefs for users
···
454
470
defer tx.Rollback()
455
471
456
472
// filter based on preferences
457
-
for recipientDid := range recipientSet {
473
+
for recipientDid := range recipients.All() {
458
474
prefs, ok := prefMap[recipientDid]
459
475
if !ok {
460
476
prefs = models.DefaultNotificationPreferences(recipientDid)
-1
appview/notify/merged_notifier.go
-1
appview/notify/merged_notifier.go
+2
-2
appview/notify/posthog/notifier.go
+2
-2
appview/notify/posthog/notifier.go
···
37
37
38
38
func (n *posthogNotifier) NewStar(ctx context.Context, star *models.Star) {
39
39
err := n.client.Enqueue(posthog.Capture{
40
-
DistinctId: star.StarredByDid,
40
+
DistinctId: star.Did,
41
41
Event: "star",
42
42
Properties: posthog.Properties{"repo_at": star.RepoAt.String()},
43
43
})
···
48
48
49
49
func (n *posthogNotifier) DeleteStar(ctx context.Context, star *models.Star) {
50
50
err := n.client.Enqueue(posthog.Capture{
51
-
DistinctId: star.StarredByDid,
51
+
DistinctId: star.Did,
52
52
Event: "unstar",
53
53
Properties: posthog.Properties{"repo_at": star.RepoAt.String()},
54
54
})
+3
-2
appview/oauth/handler.go
+3
-2
appview/oauth/handler.go
···
16
16
"tangled.org/core/api/tangled"
17
17
"tangled.org/core/appview/db"
18
18
"tangled.org/core/consts"
19
+
"tangled.org/core/orm"
19
20
"tangled.org/core/tid"
20
21
)
21
22
···
97
98
// and create an sh.tangled.spindle.member record with that
98
99
spindleMembers, err := db.GetSpindleMembers(
99
100
o.Db,
100
-
db.FilterEq("instance", "spindle.tangled.sh"),
101
-
db.FilterEq("subject", did),
101
+
orm.FilterEq("instance", "spindle.tangled.sh"),
102
+
orm.FilterEq("subject", did),
102
103
)
103
104
if err != nil {
104
105
l.Error("failed to get spindle members", "err", err)
+15
-2
appview/oauth/oauth.go
+15
-2
appview/oauth/oauth.go
···
202
202
exp int64
203
203
lxm string
204
204
dev bool
205
+
timeout time.Duration
205
206
}
206
207
207
208
type ServiceClientOpt func(*ServiceClientOpts)
209
+
210
+
func DefaultServiceClientOpts() ServiceClientOpts {
211
+
return ServiceClientOpts{
212
+
timeout: time.Second * 5,
213
+
}
214
+
}
208
215
209
216
func WithService(service string) ServiceClientOpt {
210
217
return func(s *ServiceClientOpts) {
···
233
240
}
234
241
}
235
242
243
+
func WithTimeout(timeout time.Duration) ServiceClientOpt {
244
+
return func(s *ServiceClientOpts) {
245
+
s.timeout = timeout
246
+
}
247
+
}
248
+
236
249
func (s *ServiceClientOpts) Audience() string {
237
250
return fmt.Sprintf("did:web:%s", s.service)
238
251
}
···
247
260
}
248
261
249
262
func (o *OAuth) ServiceClient(r *http.Request, os ...ServiceClientOpt) (*xrpc.Client, error) {
250
-
opts := ServiceClientOpts{}
263
+
opts := DefaultServiceClientOpts()
251
264
for _, o := range os {
252
265
o(&opts)
253
266
}
···
274
287
},
275
288
Host: opts.Host(),
276
289
Client: &http.Client{
277
-
Timeout: time.Second * 5,
290
+
Timeout: opts.timeout,
278
291
},
279
292
}, nil
280
293
}
+44
-10
appview/pages/funcmap.go
+44
-10
appview/pages/funcmap.go
···
22
22
chromahtml "github.com/alecthomas/chroma/v2/formatters/html"
23
23
"github.com/alecthomas/chroma/v2/lexers"
24
24
"github.com/alecthomas/chroma/v2/styles"
25
-
"github.com/bluesky-social/indigo/atproto/syntax"
26
25
"github.com/dustin/go-humanize"
27
26
"github.com/go-enry/go-enry/v2"
28
27
"github.com/yuin/goldmark"
28
+
emoji "github.com/yuin/goldmark-emoji"
29
29
"tangled.org/core/appview/filetree"
30
+
"tangled.org/core/appview/models"
30
31
"tangled.org/core/appview/pages/markup"
31
32
"tangled.org/core/crypto"
32
33
)
···
71
72
}
72
73
73
74
return identity.Handle.String()
75
+
},
76
+
"ownerSlashRepo": func(repo *models.Repo) string {
77
+
ownerId, err := p.resolver.ResolveIdent(context.Background(), repo.Did)
78
+
if err != nil {
79
+
return repo.DidSlashRepo()
80
+
}
81
+
handle := ownerId.Handle
82
+
if handle != "" && !handle.IsInvalidHandle() {
83
+
return string(handle) + "/" + repo.Name
84
+
}
85
+
return repo.DidSlashRepo()
74
86
},
75
87
"truncateAt30": func(s string) string {
76
88
if len(s) <= 30 {
···
100
112
"sub": func(a, b int) int {
101
113
return a - b
102
114
},
115
+
"mul": func(a, b int) int {
116
+
return a * b
117
+
},
118
+
"div": func(a, b int) int {
119
+
return a / b
120
+
},
121
+
"mod": func(a, b int) int {
122
+
return a % b
123
+
},
103
124
"f64": func(a int) float64 {
104
125
return float64(a)
105
126
},
···
132
153
133
154
return b
134
155
},
135
-
"didOrHandle": func(did, handle string) string {
136
-
if handle != "" && handle != syntax.HandleInvalid.String() {
137
-
return handle
138
-
} else {
139
-
return did
140
-
}
141
-
},
142
156
"assoc": func(values ...string) ([][]string, error) {
143
157
if len(values)%2 != 0 {
144
158
return nil, fmt.Errorf("invalid assoc call, must have an even number of arguments")
···
149
163
}
150
164
return pairs, nil
151
165
},
152
-
"append": func(s []string, values ...string) []string {
166
+
"append": func(s []any, values ...any) []any {
153
167
s = append(s, values...)
154
168
return s
155
169
},
···
248
262
},
249
263
"description": func(text string) template.HTML {
250
264
p.rctx.RendererType = markup.RendererTypeDefault
251
-
htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New())
265
+
htmlString := p.rctx.RenderMarkdownWith(text, goldmark.New(
266
+
goldmark.WithExtensions(
267
+
emoji.Emoji,
268
+
),
269
+
))
252
270
sanitized := p.rctx.SanitizeDescription(htmlString)
253
271
return template.HTML(sanitized)
254
272
},
···
370
388
}
371
389
}
372
390
391
+
func (p *Pages) resolveDid(did string) string {
392
+
identity, err := p.resolver.ResolveIdent(context.Background(), did)
393
+
394
+
if err != nil {
395
+
return did
396
+
}
397
+
398
+
if identity.Handle.IsInvalidHandle() {
399
+
return "handle.invalid"
400
+
}
401
+
402
+
return identity.Handle.String()
403
+
}
404
+
373
405
func (p *Pages) AvatarUrl(handle, size string) string {
374
406
handle = strings.TrimPrefix(handle, "@")
407
+
408
+
handle = p.resolveDid(handle)
375
409
376
410
secret := p.avatar.SharedSecret
377
411
h := hmac.New(sha256.New, []byte(secret))
+12
-2
appview/pages/markup/extension/atlink.go
+12
-2
appview/pages/markup/extension/atlink.go
···
35
35
return KindAt
36
36
}
37
37
38
-
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9.-]+)(\b)`)
38
+
var atRegexp = regexp.MustCompile(`(^|\s|\()(@)([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\b)`)
39
+
var markdownLinkRegexp = regexp.MustCompile(`(?ms)\[.*\]\(.*\)`)
39
40
40
41
type atParser struct{}
41
42
···
55
56
if m == nil {
56
57
return nil
57
58
}
59
+
60
+
// Check for all links in the markdown to see if the handle found is inside one
61
+
linksIndexes := markdownLinkRegexp.FindAllIndex(block.Source(), -1)
62
+
for _, linkMatch := range linksIndexes {
63
+
if linkMatch[0] < segment.Start && segment.Start < linkMatch[1] {
64
+
return nil
65
+
}
66
+
}
67
+
58
68
atSegment := text.NewSegment(segment.Start, segment.Start+m[1])
59
69
block.Advance(m[1])
60
70
node := &AtNode{}
···
87
97
88
98
func (r *atHtmlRenderer) renderAt(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
89
99
if entering {
90
-
w.WriteString(`<a href="/@`)
100
+
w.WriteString(`<a href="/`)
91
101
w.WriteString(n.(*AtNode).Handle)
92
102
w.WriteString(`" class="mention">`)
93
103
} else {
+2
-26
appview/pages/markup/markdown.go
+2
-26
appview/pages/markup/markdown.go
···
12
12
13
13
chromahtml "github.com/alecthomas/chroma/v2/formatters/html"
14
14
"github.com/alecthomas/chroma/v2/styles"
15
-
treeblood "github.com/wyatt915/goldmark-treeblood"
16
15
"github.com/yuin/goldmark"
16
+
"github.com/yuin/goldmark-emoji"
17
17
highlighting "github.com/yuin/goldmark-highlighting/v2"
18
18
"github.com/yuin/goldmark/ast"
19
19
"github.com/yuin/goldmark/extension"
···
65
65
extension.NewFootnote(
66
66
extension.WithFootnoteIDPrefix([]byte("footnote")),
67
67
),
68
-
treeblood.MathML(),
69
68
callout.CalloutExtention,
70
69
textension.AtExt,
70
+
emoji.Emoji,
71
71
),
72
72
goldmark.WithParserOptions(
73
73
parser.WithAutoHeadingID(),
···
302
302
}
303
303
304
304
return path.Join(rctx.CurrentDir, dst)
305
-
}
306
-
307
-
// FindUserMentions returns Set of user handles from given markup soruce.
308
-
// It doesn't guarntee unique DIDs
309
-
func FindUserMentions(source string) []string {
310
-
var (
311
-
mentions []string
312
-
mentionsSet = make(map[string]struct{})
313
-
md = NewMarkdown()
314
-
sourceBytes = []byte(source)
315
-
root = md.Parser().Parse(text.NewReader(sourceBytes))
316
-
)
317
-
ast.Walk(root, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
318
-
if entering && n.Kind() == textension.KindAt {
319
-
handle := n.(*textension.AtNode).Handle
320
-
mentionsSet[handle] = struct{}{}
321
-
return ast.WalkSkipChildren, nil
322
-
}
323
-
return ast.WalkContinue, nil
324
-
})
325
-
for handle := range mentionsSet {
326
-
mentions = append(mentions, handle)
327
-
}
328
-
return mentions
329
305
}
330
306
331
307
func isAbsoluteUrl(link string) bool {
+121
appview/pages/markup/markdown_test.go
+121
appview/pages/markup/markdown_test.go
···
1
+
package markup
2
+
3
+
import (
4
+
"bytes"
5
+
"testing"
6
+
)
7
+
8
+
func TestAtExtension_Rendering(t *testing.T) {
9
+
tests := []struct {
10
+
name string
11
+
markdown string
12
+
expected string
13
+
}{
14
+
{
15
+
name: "renders simple at mention",
16
+
markdown: "Hello @user.tngl.sh!",
17
+
expected: `<p>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>!</p>`,
18
+
},
19
+
{
20
+
name: "renders multiple at mentions",
21
+
markdown: "Hi @alice.tngl.sh and @bob.example.com",
22
+
expected: `<p>Hi <a href="/alice.tngl.sh" class="mention">@alice.tngl.sh</a> and <a href="/bob.example.com" class="mention">@bob.example.com</a></p>`,
23
+
},
24
+
{
25
+
name: "renders at mention in parentheses",
26
+
markdown: "Check this out (@user.tngl.sh)",
27
+
expected: `<p>Check this out (<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>)</p>`,
28
+
},
29
+
{
30
+
name: "does not render email",
31
+
markdown: "Contact me at test@example.com",
32
+
expected: `<p>Contact me at <a href="mailto:test@example.com">test@example.com</a></p>`,
33
+
},
34
+
{
35
+
name: "renders at mention with hyphen",
36
+
markdown: "Follow @user-name.tngl.sh",
37
+
expected: `<p>Follow <a href="/user-name.tngl.sh" class="mention">@user-name.tngl.sh</a></p>`,
38
+
},
39
+
{
40
+
name: "renders at mention with numbers",
41
+
markdown: "@user123.test456.social",
42
+
expected: `<p><a href="/user123.test456.social" class="mention">@user123.test456.social</a></p>`,
43
+
},
44
+
{
45
+
name: "at mention at start of line",
46
+
markdown: "@user.tngl.sh is cool",
47
+
expected: `<p><a href="/user.tngl.sh" class="mention">@user.tngl.sh</a> is cool</p>`,
48
+
},
49
+
}
50
+
51
+
for _, tt := range tests {
52
+
t.Run(tt.name, func(t *testing.T) {
53
+
md := NewMarkdown()
54
+
55
+
var buf bytes.Buffer
56
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
57
+
t.Fatalf("failed to convert markdown: %v", err)
58
+
}
59
+
60
+
result := buf.String()
61
+
if result != tt.expected+"\n" {
62
+
t.Errorf("expected:\n%s\ngot:\n%s", tt.expected, result)
63
+
}
64
+
})
65
+
}
66
+
}
67
+
68
+
func TestAtExtension_WithOtherMarkdown(t *testing.T) {
69
+
tests := []struct {
70
+
name string
71
+
markdown string
72
+
contains string
73
+
}{
74
+
{
75
+
name: "at mention with bold",
76
+
markdown: "**Hello @user.tngl.sh**",
77
+
contains: `<strong>Hello <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></strong>`,
78
+
},
79
+
{
80
+
name: "at mention with italic",
81
+
markdown: "*Check @user.tngl.sh*",
82
+
contains: `<em>Check <a href="/user.tngl.sh" class="mention">@user.tngl.sh</a></em>`,
83
+
},
84
+
{
85
+
name: "at mention in list",
86
+
markdown: "- Item 1\n- @user.tngl.sh\n- Item 3",
87
+
contains: `<a href="/user.tngl.sh" class="mention">@user.tngl.sh</a>`,
88
+
},
89
+
{
90
+
name: "at mention in link",
91
+
markdown: "[@regnault.dev](https://regnault.dev)",
92
+
contains: `<a href="https://regnault.dev">@regnault.dev</a>`,
93
+
},
94
+
{
95
+
name: "at mention in link again",
96
+
markdown: "[check out @regnault.dev](https://regnault.dev)",
97
+
contains: `<a href="https://regnault.dev">check out @regnault.dev</a>`,
98
+
},
99
+
{
100
+
name: "at mention in link again, multiline",
101
+
markdown: "[\ncheck out @regnault.dev](https://regnault.dev)",
102
+
contains: "<a href=\"https://regnault.dev\">\ncheck out @regnault.dev</a>",
103
+
},
104
+
}
105
+
106
+
for _, tt := range tests {
107
+
t.Run(tt.name, func(t *testing.T) {
108
+
md := NewMarkdown()
109
+
110
+
var buf bytes.Buffer
111
+
if err := md.Convert([]byte(tt.markdown), &buf); err != nil {
112
+
t.Fatalf("failed to convert markdown: %v", err)
113
+
}
114
+
115
+
result := buf.String()
116
+
if !bytes.Contains([]byte(result), []byte(tt.contains)) {
117
+
t.Errorf("expected output to contain:\n%s\ngot:\n%s", tt.contains, result)
118
+
}
119
+
})
120
+
}
121
+
}
+124
appview/pages/markup/reference_link.go
+124
appview/pages/markup/reference_link.go
···
1
+
package markup
2
+
3
+
import (
4
+
"maps"
5
+
"net/url"
6
+
"path"
7
+
"slices"
8
+
"strconv"
9
+
"strings"
10
+
11
+
"github.com/yuin/goldmark/ast"
12
+
"github.com/yuin/goldmark/text"
13
+
"tangled.org/core/appview/models"
14
+
textension "tangled.org/core/appview/pages/markup/extension"
15
+
)
16
+
17
+
// FindReferences collects all links referencing tangled-related objects
18
+
// like issues, PRs, comments or even @-mentions
19
+
// This funciton doesn't actually check for the existence of records in the DB
20
+
// or the PDS; it merely returns a list of what are presumed to be references.
21
+
func FindReferences(baseUrl string, source string) ([]string, []models.ReferenceLink) {
22
+
var (
23
+
refLinkSet = make(map[models.ReferenceLink]struct{})
24
+
mentionsSet = make(map[string]struct{})
25
+
md = NewMarkdown()
26
+
sourceBytes = []byte(source)
27
+
root = md.Parser().Parse(text.NewReader(sourceBytes))
28
+
)
29
+
// trim url scheme. the SSL shouldn't matter
30
+
baseUrl = strings.TrimPrefix(baseUrl, "https://")
31
+
baseUrl = strings.TrimPrefix(baseUrl, "http://")
32
+
33
+
ast.Walk(root, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
34
+
if !entering {
35
+
return ast.WalkContinue, nil
36
+
}
37
+
switch n.Kind() {
38
+
case textension.KindAt:
39
+
handle := n.(*textension.AtNode).Handle
40
+
mentionsSet[handle] = struct{}{}
41
+
return ast.WalkSkipChildren, nil
42
+
case ast.KindLink:
43
+
dest := string(n.(*ast.Link).Destination)
44
+
ref := parseTangledLink(baseUrl, dest)
45
+
if ref != nil {
46
+
refLinkSet[*ref] = struct{}{}
47
+
}
48
+
return ast.WalkSkipChildren, nil
49
+
case ast.KindAutoLink:
50
+
an := n.(*ast.AutoLink)
51
+
if an.AutoLinkType == ast.AutoLinkURL {
52
+
dest := string(an.URL(sourceBytes))
53
+
ref := parseTangledLink(baseUrl, dest)
54
+
if ref != nil {
55
+
refLinkSet[*ref] = struct{}{}
56
+
}
57
+
}
58
+
return ast.WalkSkipChildren, nil
59
+
}
60
+
return ast.WalkContinue, nil
61
+
})
62
+
mentions := slices.Collect(maps.Keys(mentionsSet))
63
+
references := slices.Collect(maps.Keys(refLinkSet))
64
+
return mentions, references
65
+
}
66
+
67
+
func parseTangledLink(baseHost string, urlStr string) *models.ReferenceLink {
68
+
u, err := url.Parse(urlStr)
69
+
if err != nil {
70
+
return nil
71
+
}
72
+
73
+
if u.Host != "" && !strings.EqualFold(u.Host, baseHost) {
74
+
return nil
75
+
}
76
+
77
+
p := path.Clean(u.Path)
78
+
parts := strings.FieldsFunc(p, func(r rune) bool { return r == '/' })
79
+
if len(parts) < 4 {
80
+
// need at least: handle / repo / kind / id
81
+
return nil
82
+
}
83
+
84
+
var (
85
+
handle = parts[0]
86
+
repo = parts[1]
87
+
kindSeg = parts[2]
88
+
subjectSeg = parts[3]
89
+
)
90
+
91
+
handle = strings.TrimPrefix(handle, "@")
92
+
93
+
var kind models.RefKind
94
+
switch kindSeg {
95
+
case "issues":
96
+
kind = models.RefKindIssue
97
+
case "pulls":
98
+
kind = models.RefKindPull
99
+
default:
100
+
return nil
101
+
}
102
+
103
+
subjectId, err := strconv.Atoi(subjectSeg)
104
+
if err != nil {
105
+
return nil
106
+
}
107
+
var commentId *int
108
+
if u.Fragment != "" {
109
+
if strings.HasPrefix(u.Fragment, "comment-") {
110
+
commentIdStr := u.Fragment[len("comment-"):]
111
+
if id, err := strconv.Atoi(commentIdStr); err == nil {
112
+
commentId = &id
113
+
}
114
+
}
115
+
}
116
+
117
+
return &models.ReferenceLink{
118
+
Handle: handle,
119
+
Repo: repo,
120
+
Kind: kind,
121
+
SubjectId: subjectId,
122
+
CommentId: commentId,
123
+
}
124
+
}
+42
appview/pages/markup/reference_link_test.go
+42
appview/pages/markup/reference_link_test.go
···
1
+
package markup_test
2
+
3
+
import (
4
+
"testing"
5
+
6
+
"github.com/stretchr/testify/assert"
7
+
"tangled.org/core/appview/models"
8
+
"tangled.org/core/appview/pages/markup"
9
+
)
10
+
11
+
func TestMarkupParsing(t *testing.T) {
12
+
tests := []struct {
13
+
name string
14
+
source string
15
+
wantHandles []string
16
+
wantRefLinks []models.ReferenceLink
17
+
}{
18
+
{
19
+
name: "normal link",
20
+
source: `[link](http://127.0.0.1:3000/alice.pds.tngl.boltless.dev/coolproj/issues/1)`,
21
+
wantHandles: make([]string, 0),
22
+
wantRefLinks: []models.ReferenceLink{
23
+
{Handle: "alice.pds.tngl.boltless.dev", Repo: "coolproj", Kind: models.RefKindIssue, SubjectId: 1, CommentId: nil},
24
+
},
25
+
},
26
+
{
27
+
name: "commonmark style autolink",
28
+
source: `<http://127.0.0.1:3000/alice.pds.tngl.boltless.dev/coolproj/issues/1>`,
29
+
wantHandles: make([]string, 0),
30
+
wantRefLinks: []models.ReferenceLink{
31
+
{Handle: "alice.pds.tngl.boltless.dev", Repo: "coolproj", Kind: models.RefKindIssue, SubjectId: 1, CommentId: nil},
32
+
},
33
+
},
34
+
}
35
+
for _, tt := range tests {
36
+
t.Run(tt.name, func(t *testing.T) {
37
+
handles, refLinks := markup.FindReferences("http://127.0.0.1:3000", tt.source)
38
+
assert.ElementsMatch(t, tt.wantHandles, handles)
39
+
assert.ElementsMatch(t, tt.wantRefLinks, refLinks)
40
+
})
41
+
}
42
+
}
+29
-18
appview/pages/pages.go
+29
-18
appview/pages/pages.go
···
31
31
"github.com/bluesky-social/indigo/atproto/identity"
32
32
"github.com/bluesky-social/indigo/atproto/syntax"
33
33
"github.com/go-git/go-git/v5/plumbing"
34
-
"github.com/go-git/go-git/v5/plumbing/object"
35
34
)
36
35
37
36
//go:embed templates/* static legal
···
407
406
type KnotsParams struct {
408
407
LoggedInUser *oauth.User
409
408
Registrations []models.Registration
409
+
Tabs []map[string]any
410
+
Tab string
410
411
}
411
412
412
413
func (p *Pages) Knots(w io.Writer, params KnotsParams) error {
···
419
420
Members []string
420
421
Repos map[string][]models.Repo
421
422
IsOwner bool
423
+
Tabs []map[string]any
424
+
Tab string
422
425
}
423
426
424
427
func (p *Pages) Knot(w io.Writer, params KnotParams) error {
···
436
439
type SpindlesParams struct {
437
440
LoggedInUser *oauth.User
438
441
Spindles []models.Spindle
442
+
Tabs []map[string]any
443
+
Tab string
439
444
}
440
445
441
446
func (p *Pages) Spindles(w io.Writer, params SpindlesParams) error {
···
444
449
445
450
type SpindleListingParams struct {
446
451
models.Spindle
452
+
Tabs []map[string]any
453
+
Tab string
447
454
}
448
455
449
456
func (p *Pages) SpindleListing(w io.Writer, params SpindleListingParams) error {
···
455
462
Spindle models.Spindle
456
463
Members []string
457
464
Repos map[string][]models.Repo
465
+
Tabs []map[string]any
466
+
Tab string
458
467
}
459
468
460
469
func (p *Pages) SpindleDashboard(w io.Writer, params SpindleDashboardParams) error {
···
482
491
483
492
type ProfileCard struct {
484
493
UserDid string
485
-
UserHandle string
486
494
FollowStatus models.FollowStatus
487
495
Punchcard *models.Punchcard
488
496
Profile *models.Profile
···
625
633
return p.executePlain("user/fragments/editPins", w, params)
626
634
}
627
635
628
-
type RepoStarFragmentParams struct {
636
+
type StarBtnFragmentParams struct {
629
637
IsStarred bool
630
-
RepoAt syntax.ATURI
631
-
Stats models.RepoStats
638
+
SubjectAt syntax.ATURI
639
+
StarCount int
632
640
}
633
641
634
-
func (p *Pages) RepoStarFragment(w io.Writer, params RepoStarFragmentParams) error {
635
-
return p.executePlain("repo/fragments/repoStar", w, params)
642
+
func (p *Pages) StarBtnFragment(w io.Writer, params StarBtnFragmentParams) error {
643
+
return p.executePlain("fragments/starBtn-oob", w, params)
636
644
}
637
645
638
646
type RepoIndexParams struct {
···
640
648
RepoInfo repoinfo.RepoInfo
641
649
Active string
642
650
TagMap map[string][]string
643
-
CommitsTrunc []*object.Commit
651
+
CommitsTrunc []types.Commit
644
652
TagsTrunc []*types.TagReference
645
653
BranchesTrunc []types.Branch
646
654
// ForkInfo *types.ForkInfo
···
831
839
}
832
840
833
841
type Collaborator struct {
834
-
Did string
835
-
Handle string
836
-
Role string
842
+
Did string
843
+
Role string
837
844
}
838
845
839
846
type RepoSettingsParams struct {
···
908
915
RepoInfo repoinfo.RepoInfo
909
916
Active string
910
917
Issues []models.Issue
918
+
IssueCount int
911
919
LabelDefs map[string]*models.LabelDefinition
912
920
Page pagination.Page
913
921
FilteringByOpen bool
···
925
933
Active string
926
934
Issue *models.Issue
927
935
CommentList []models.CommentListItem
936
+
Backlinks []models.RichReferenceLink
928
937
LabelDefs map[string]*models.LabelDefinition
929
938
930
939
OrderedReactionKinds []models.ReactionKind
···
1078
1087
Pull *models.Pull
1079
1088
Stack models.Stack
1080
1089
AbandonedPulls []*models.Pull
1090
+
Backlinks []models.RichReferenceLink
1081
1091
BranchDeleteStatus *models.BranchDeleteStatus
1082
1092
MergeCheck types.MergeCheckResponse
1083
1093
ResubmitCheck ResubmitResult
···
1249
1259
return p.executePlain("repo/fragments/compareAllowPull", w, params)
1250
1260
}
1251
1261
1252
-
type RepoCompareDiffParams struct {
1253
-
LoggedInUser *oauth.User
1254
-
RepoInfo repoinfo.RepoInfo
1255
-
Diff types.NiceDiff
1262
+
type RepoCompareDiffFragmentParams struct {
1263
+
Diff types.NiceDiff
1264
+
DiffOpts types.DiffOpts
1256
1265
}
1257
1266
1258
-
func (p *Pages) RepoCompareDiff(w io.Writer, params RepoCompareDiffParams) error {
1259
-
return p.executePlain("repo/fragments/diff", w, []any{params.RepoInfo.FullName, ¶ms.Diff})
1267
+
func (p *Pages) RepoCompareDiffFragment(w io.Writer, params RepoCompareDiffFragmentParams) error {
1268
+
return p.executePlain("repo/fragments/diff", w, []any{¶ms.Diff, ¶ms.DiffOpts})
1260
1269
}
1261
1270
1262
1271
type LabelPanelParams struct {
···
1376
1385
ShowRendered bool
1377
1386
RenderToggle bool
1378
1387
RenderedContents template.HTML
1379
-
String models.String
1388
+
String *models.String
1380
1389
Stats models.StringStats
1390
+
IsStarred bool
1391
+
StarCount int
1381
1392
Owner identity.Identity
1382
1393
}
1383
1394
+25
-22
appview/pages/repoinfo/repoinfo.go
+25
-22
appview/pages/repoinfo/repoinfo.go
···
1
1
package repoinfo
2
2
3
3
import (
4
+
"fmt"
4
5
"path"
5
6
"slices"
6
7
7
8
"github.com/bluesky-social/indigo/atproto/syntax"
9
+
"tangled.org/core/api/tangled"
8
10
"tangled.org/core/appview/models"
9
11
"tangled.org/core/appview/state/userutil"
10
12
)
11
13
12
-
func (r RepoInfo) Owner() string {
14
+
func (r RepoInfo) owner() string {
13
15
if r.OwnerHandle != "" {
14
16
return r.OwnerHandle
15
17
} else {
···
18
20
}
19
21
20
22
func (r RepoInfo) FullName() string {
21
-
return path.Join(r.Owner(), r.Name)
23
+
return path.Join(r.owner(), r.Name)
22
24
}
23
25
24
-
func (r RepoInfo) OwnerWithoutAt() string {
26
+
func (r RepoInfo) ownerWithoutAt() string {
25
27
if r.OwnerHandle != "" {
26
28
return r.OwnerHandle
27
29
} else {
···
30
32
}
31
33
32
34
func (r RepoInfo) FullNameWithoutAt() string {
33
-
return path.Join(r.OwnerWithoutAt(), r.Name)
35
+
return path.Join(r.ownerWithoutAt(), r.Name)
34
36
}
35
37
36
38
func (r RepoInfo) GetTabs() [][]string {
···
48
50
return tabs
49
51
}
50
52
53
+
func (r RepoInfo) RepoAt() syntax.ATURI {
54
+
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.OwnerDid, tangled.RepoNSID, r.Rkey))
55
+
}
56
+
51
57
type RepoInfo struct {
52
-
Name string
53
-
Rkey string
54
-
OwnerDid string
55
-
OwnerHandle string
56
-
Description string
57
-
Website string
58
-
Topics []string
59
-
Knot string
60
-
Spindle string
61
-
RepoAt syntax.ATURI
62
-
IsStarred bool
63
-
Stats models.RepoStats
64
-
Roles RolesInRepo
65
-
Source *models.Repo
66
-
SourceHandle string
67
-
Ref string
68
-
DisableFork bool
69
-
CurrentDir string
58
+
Name string
59
+
Rkey string
60
+
OwnerDid string
61
+
OwnerHandle string
62
+
Description string
63
+
Website string
64
+
Topics []string
65
+
Knot string
66
+
Spindle string
67
+
IsStarred bool
68
+
Stats models.RepoStats
69
+
Roles RolesInRepo
70
+
Source *models.Repo
71
+
Ref string
72
+
CurrentDir string
70
73
}
71
74
72
75
// each tab on a repo could have some metadata:
+5
appview/pages/templates/fragments/starBtn-oob.html
+5
appview/pages/templates/fragments/starBtn-oob.html
+26
appview/pages/templates/fragments/starBtn.html
+26
appview/pages/templates/fragments/starBtn.html
···
1
+
{{ define "fragments/starBtn" }}
2
+
{{/* NOTE: this fragment is always replaced with hx-swap-oob */}}
3
+
<button
4
+
id="starBtn"
5
+
class="btn disabled:opacity-50 disabled:cursor-not-allowed flex gap-2 items-center group"
6
+
data-star-subject-at="{{ .SubjectAt }}"
7
+
{{ if .IsStarred }}
8
+
hx-delete="/star?subject={{ .SubjectAt }}&countHint={{ .StarCount }}"
9
+
{{ else }}
10
+
hx-post="/star?subject={{ .SubjectAt }}&countHint={{ .StarCount }}"
11
+
{{ end }}
12
+
13
+
hx-trigger="click"
14
+
hx-disabled-elt="#starBtn"
15
+
>
16
+
{{ if .IsStarred }}
17
+
{{ i "star" "w-4 h-4 fill-current" }}
18
+
{{ else }}
19
+
{{ i "star" "w-4 h-4" }}
20
+
{{ end }}
21
+
<span class="text-sm">
22
+
{{ .StarCount }}
23
+
</span>
24
+
{{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }}
25
+
</button>
26
+
{{ end }}
+8
appview/pages/templates/fragments/tabSelector.html
+8
appview/pages/templates/fragments/tabSelector.html
···
2
2
{{ $name := .Name }}
3
3
{{ $all := .Values }}
4
4
{{ $active := .Active }}
5
+
{{ $include := .Include }}
5
6
<div class="flex justify-between divide-x divide-gray-200 dark:divide-gray-700 rounded border border-gray-200 dark:border-gray-700 overflow-hidden">
6
7
{{ $activeTab := "bg-white dark:bg-gray-700 shadow-sm" }}
7
8
{{ $inactiveTab := "bg-gray-100 dark:bg-gray-800 shadow-inner" }}
8
9
{{ range $index, $value := $all }}
9
10
{{ $isActive := eq $value.Key $active }}
10
11
<a href="?{{ $name }}={{ $value.Key }}"
12
+
{{ if $include }}
13
+
hx-get="?{{ $name }}={{ $value.Key }}"
14
+
hx-include="{{ $include }}"
15
+
hx-push-url="true"
16
+
hx-target="body"
17
+
hx-on:htmx:config-request="if(!event.detail.parameters.q) delete event.detail.parameters.q"
18
+
{{ end }}
11
19
class="p-2 whitespace-nowrap flex justify-center items-center gap-2 text-sm w-full block hover:no-underline text-center {{ if $isActive }} {{$activeTab }} {{ else }} {{ $inactiveTab }} {{ end }}">
12
20
{{ if $value.Icon }}
13
21
{{ i $value.Icon "size-4" }}
+22
appview/pages/templates/fragments/tinyAvatarList.html
+22
appview/pages/templates/fragments/tinyAvatarList.html
···
1
+
{{ define "fragments/tinyAvatarList" }}
2
+
{{ $all := .all }}
3
+
{{ $classes := .classes }}
4
+
{{ $ps := take $all 5 }}
5
+
<div class="inline-flex items-center -space-x-3">
6
+
{{ $c := "z-50 z-40 z-30 z-20 z-10" }}
7
+
{{ range $i, $p := $ps }}
8
+
<img
9
+
src="{{ tinyAvatar . }}"
10
+
alt=""
11
+
class="rounded-full size-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0 {{ $classes }}"
12
+
/>
13
+
{{ end }}
14
+
15
+
{{ if gt (len $all) 5 }}
16
+
<span class="pl-4 text-gray-500 dark:text-gray-400 text-sm">
17
+
+{{ sub (len $all) 5 }}
18
+
</span>
19
+
{{ end }}
20
+
</div>
21
+
{{ end }}
22
+
+23
-7
appview/pages/templates/knots/dashboard.html
+23
-7
appview/pages/templates/knots/dashboard.html
···
1
-
{{ define "title" }}{{ .Registration.Domain }} · knots{{ end }}
1
+
{{ define "title" }}{{ .Registration.Domain }} · {{ .Tab }} settings{{ end }}
2
2
3
3
{{ define "content" }}
4
-
<div class="px-6 py-4">
4
+
<div class="p-6">
5
+
<p class="text-xl font-bold dark:text-white">Settings</p>
6
+
</div>
7
+
<div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
8
+
<section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6">
9
+
<div class="col-span-1">
10
+
{{ template "user/settings/fragments/sidebar" . }}
11
+
</div>
12
+
<div class="col-span-1 md:col-span-3 flex flex-col gap-6">
13
+
{{ template "knotDash" . }}
14
+
</div>
15
+
</section>
16
+
</div>
17
+
{{ end }}
18
+
19
+
{{ define "knotDash" }}
20
+
<div>
5
21
<div class="flex justify-between items-center">
6
-
<h1 class="text-xl font-bold dark:text-white">{{ .Registration.Domain }}</h1>
22
+
<h2 class="text-sm pb-2 uppercase font-bold">{{ .Tab }} · {{ .Registration.Domain }}</h2>
7
23
<div id="right-side" class="flex gap-2">
8
24
{{ $style := "px-2 py-1 rounded flex items-center flex-shrink-0 gap-2" }}
9
25
{{ $isOwner := and .LoggedInUser (eq .LoggedInUser.Did .Registration.ByDid) }}
···
35
51
</div>
36
52
37
53
{{ if .Members }}
38
-
<section class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
54
+
<section class="bg-white dark:bg-gray-800 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
39
55
<div class="flex flex-col gap-2">
40
56
{{ block "member" . }} {{ end }}
41
57
</div>
···
79
95
<button
80
96
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
81
97
title="Delete knot"
82
-
hx-delete="/knots/{{ .Domain }}"
98
+
hx-delete="/settings/knots/{{ .Domain }}"
83
99
hx-swap="outerHTML"
84
100
hx-confirm="Are you sure you want to delete the knot '{{ .Domain }}'?"
85
101
hx-headers='{"shouldRedirect": "true"}'
···
95
111
<button
96
112
class="btn gap-2 group"
97
113
title="Retry knot verification"
98
-
hx-post="/knots/{{ .Domain }}/retry"
114
+
hx-post="/settings/knots/{{ .Domain }}/retry"
99
115
hx-swap="none"
100
116
hx-headers='{"shouldRefresh": "true"}'
101
117
>
···
113
129
<button
114
130
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
115
131
title="Remove member"
116
-
hx-post="/knots/{{ $root.Registration.Domain }}/remove"
132
+
hx-post="/settings/knots/{{ $root.Registration.Domain }}/remove"
117
133
hx-swap="none"
118
134
hx-vals='{"member": "{{$member}}" }'
119
135
hx-confirm="Are you sure you want to remove {{ $memberHandle }} from this knot?"
+1
-1
appview/pages/templates/knots/fragments/addMemberModal.html
+1
-1
appview/pages/templates/knots/fragments/addMemberModal.html
+3
-3
appview/pages/templates/knots/fragments/knotListing.html
+3
-3
appview/pages/templates/knots/fragments/knotListing.html
···
7
7
8
8
{{ define "knotLeftSide" }}
9
9
{{ if .Registered }}
10
-
<a href="/knots/{{ .Domain }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]">
10
+
<a href="/settings/knots/{{ .Domain }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]">
11
11
{{ i "hard-drive" "w-4 h-4" }}
12
12
<span class="hover:underline">
13
13
{{ .Domain }}
···
56
56
<button
57
57
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
58
58
title="Delete knot"
59
-
hx-delete="/knots/{{ .Domain }}"
59
+
hx-delete="/settings/knots/{{ .Domain }}"
60
60
hx-swap="outerHTML"
61
61
hx-target="#knot-{{.Id}}"
62
62
hx-confirm="Are you sure you want to delete the knot '{{ .Domain }}'?"
···
72
72
<button
73
73
class="btn gap-2 group"
74
74
title="Retry knot verification"
75
-
hx-post="/knots/{{ .Domain }}/retry"
75
+
hx-post="/settings/knots/{{ .Domain }}/retry"
76
76
hx-swap="none"
77
77
hx-target="#knot-{{.Id}}"
78
78
>
+42
-11
appview/pages/templates/knots/index.html
+42
-11
appview/pages/templates/knots/index.html
···
1
-
{{ define "title" }}knots{{ end }}
1
+
{{ define "title" }}{{ .Tab }} settings{{ end }}
2
2
3
3
{{ define "content" }}
4
-
<div class="px-6 py-4 flex items-center justify-between gap-4 align-bottom">
5
-
<h1 class="text-xl font-bold dark:text-white">Knots</h1>
6
-
<span class="flex items-center gap-1">
7
-
{{ i "book" "w-3 h-3" }}
8
-
<a href="https://tangled.org/@tangled.org/core/blob/master/docs/knot-hosting.md">docs</a>
9
-
</span>
10
-
</div>
4
+
<div class="p-6">
5
+
<p class="text-xl font-bold dark:text-white">Settings</p>
6
+
</div>
7
+
<div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
8
+
<section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6">
9
+
<div class="col-span-1">
10
+
{{ template "user/settings/fragments/sidebar" . }}
11
+
</div>
12
+
<div class="col-span-1 md:col-span-3 flex flex-col gap-6">
13
+
{{ template "knotsList" . }}
14
+
</div>
15
+
</section>
16
+
</div>
17
+
{{ end }}
18
+
19
+
{{ define "knotsList" }}
20
+
<div class="grid grid-cols-1 md:grid-cols-3 gap-4 items-center">
21
+
<div class="col-span-1 md:col-span-2">
22
+
<h2 class="text-sm pb-2 uppercase font-bold">Knots</h2>
23
+
{{ block "about" . }} {{ end }}
24
+
</div>
25
+
<div class="col-span-1 md:col-span-1 md:justify-self-end">
26
+
{{ template "docsButton" . }}
27
+
</div>
28
+
</div>
11
29
12
-
<section class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
30
+
<section>
13
31
<div class="flex flex-col gap-6">
14
-
{{ block "about" . }} {{ end }}
15
32
{{ block "list" . }} {{ end }}
16
33
{{ block "register" . }} {{ end }}
17
34
</div>
···
50
67
<h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">register a knot</h2>
51
68
<p class="mb-2 dark:text-gray-300">Enter the hostname of your knot to get started.</p>
52
69
<form
53
-
hx-post="/knots/register"
70
+
hx-post="/settings/knots/register"
54
71
class="max-w-2xl mb-2 space-y-4"
55
72
hx-indicator="#register-button"
56
73
hx-swap="none"
···
84
101
85
102
</section>
86
103
{{ end }}
104
+
105
+
{{ define "docsButton" }}
106
+
<a
107
+
class="btn flex items-center gap-2"
108
+
href="https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide">
109
+
{{ i "book" "size-4" }}
110
+
docs
111
+
</a>
112
+
<div
113
+
id="add-email-modal"
114
+
popover
115
+
class="bg-white w-full md:w-96 dark:bg-gray-800 p-4 rounded border border-gray-200 dark:border-gray-700 drop-shadow dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50">
116
+
</div>
117
+
{{ end }}
-2
appview/pages/templates/layouts/fragments/topbar.html
-2
appview/pages/templates/layouts/fragments/topbar.html
···
61
61
<a href="/{{ $user }}">profile</a>
62
62
<a href="/{{ $user }}?tab=repos">repositories</a>
63
63
<a href="/{{ $user }}?tab=strings">strings</a>
64
-
<a href="/knots">knots</a>
65
-
<a href="/spindles">spindles</a>
66
64
<a href="/settings">settings</a>
67
65
<a href="#"
68
66
hx-post="/logout"
+8
-7
appview/pages/templates/layouts/profilebase.html
+8
-7
appview/pages/templates/layouts/profilebase.html
···
1
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }}{{ end }}
1
+
{{ define "title" }}{{ resolve .Card.UserDid }}{{ end }}
2
2
3
3
{{ define "extrameta" }}
4
-
{{ $avatarUrl := fullAvatar .Card.UserHandle }}
5
-
<meta property="og:title" content="{{ or .Card.UserHandle .Card.UserDid }}" />
4
+
{{ $handle := resolve .Card.UserDid }}
5
+
{{ $avatarUrl := fullAvatar $handle }}
6
+
<meta property="og:title" content="{{ $handle }}" />
6
7
<meta property="og:type" content="profile" />
7
-
<meta property="og:url" content="https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}?tab={{ .Active }}" />
8
-
<meta property="og:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" />
8
+
<meta property="og:url" content="https://tangled.org/{{ $handle }}?tab={{ .Active }}" />
9
+
<meta property="og:description" content="{{ or .Card.Profile.Description $handle }}" />
9
10
<meta property="og:image" content="{{ $avatarUrl }}" />
10
11
<meta property="og:image:width" content="512" />
11
12
<meta property="og:image:height" content="512" />
12
13
13
14
<meta name="twitter:card" content="summary" />
14
-
<meta name="twitter:title" content="{{ or .Card.UserHandle .Card.UserDid }}" />
15
-
<meta name="twitter:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" />
15
+
<meta name="twitter:title" content="{{ $handle }}" />
16
+
<meta name="twitter:description" content="{{ or .Card.Profile.Description $handle }}" />
16
17
<meta name="twitter:image" content="{{ $avatarUrl }}" />
17
18
{{ end }}
18
19
+4
-1
appview/pages/templates/layouts/repobase.html
+4
-1
appview/pages/templates/layouts/repobase.html
···
49
49
</div>
50
50
51
51
<div class="w-full sm:w-fit grid grid-cols-3 gap-2 z-auto">
52
-
{{ template "repo/fragments/repoStar" .RepoInfo }}
52
+
{{ template "fragments/starBtn"
53
+
(dict "SubjectAt" .RepoInfo.RepoAt
54
+
"IsStarred" .RepoInfo.IsStarred
55
+
"StarCount" .RepoInfo.Stats.StarCount) }}
53
56
<a
54
57
class="btn text-sm no-underline hover:no-underline flex items-center gap-2 group"
55
58
hx-boost="true"
+35
-10
appview/pages/templates/repo/commit.html
+35
-10
appview/pages/templates/repo/commit.html
···
25
25
</div>
26
26
27
27
<div class="flex flex-wrap items-center space-x-2">
28
-
<p class="flex flex-wrap items-center gap-2 text-sm text-gray-500 dark:text-gray-300">
29
-
{{ $did := index $.EmailToDid $commit.Author.Email }}
30
-
31
-
{{ if $did }}
32
-
{{ template "user/fragments/picHandleLink" $did }}
33
-
{{ else }}
34
-
<a href="mailto:{{ $commit.Author.Email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $commit.Author.Name }}</a>
35
-
{{ end }}
28
+
<p class="flex flex-wrap items-center gap-1 text-sm text-gray-500 dark:text-gray-300">
29
+
{{ template "attribution" . }}
36
30
37
31
<span class="px-1 select-none before:content-['\00B7']"></span>
38
-
{{ template "repo/fragments/time" $commit.Author.When }}
32
+
{{ template "repo/fragments/time" $commit.Committer.When }}
39
33
<span class="px-1 select-none before:content-['\00B7']"></span>
40
34
41
35
<a href="/{{ $repo }}/commit/{{ $commit.This }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ slice $commit.This 0 8 }}</a>
···
79
73
</section>
80
74
{{end}}
81
75
76
+
{{ define "attribution" }}
77
+
{{ $commit := .Diff.Commit }}
78
+
{{ $showCommitter := true }}
79
+
{{ if eq $commit.Author.Email $commit.Committer.Email }}
80
+
{{ $showCommitter = false }}
81
+
{{ end }}
82
+
83
+
{{ if $showCommitter }}
84
+
authored by {{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid) }}
85
+
{{ range $commit.CoAuthors }}
86
+
{{ template "attributedUser" (list .Email .Name $.EmailToDid) }}
87
+
{{ end }}
88
+
and committed by {{ template "attributedUser" (list $commit.Committer.Email $commit.Committer.Name $.EmailToDid) }}
89
+
{{ else }}
90
+
{{ template "attributedUser" (list $commit.Author.Email $commit.Author.Name $.EmailToDid )}}
91
+
{{ end }}
92
+
{{ end }}
93
+
94
+
{{ define "attributedUser" }}
95
+
{{ $email := index . 0 }}
96
+
{{ $name := index . 1 }}
97
+
{{ $map := index . 2 }}
98
+
{{ $did := index $map $email }}
99
+
100
+
{{ if $did }}
101
+
{{ template "user/fragments/picHandleLink" $did }}
102
+
{{ else }}
103
+
<a href="mailto:{{ $email }}" class="no-underline hover:underline text-gray-500 dark:text-gray-300">{{ $name }}</a>
104
+
{{ end }}
105
+
{{ end }}
106
+
82
107
{{ define "topbarLayout" }}
83
108
<header class="col-span-full" style="z-index: 20;">
84
109
{{ template "layouts/fragments/topbar" . }}
···
111
136
{{ end }}
112
137
113
138
{{ define "contentAfter" }}
114
-
{{ template "repo/fragments/diff" (list .RepoInfo.FullName .Diff .DiffOpts) }}
139
+
{{ template "repo/fragments/diff" (list .Diff .DiffOpts) }}
115
140
{{end}}
116
141
117
142
{{ define "contentAfterLeft" }}
+1
-1
appview/pages/templates/repo/compare/compare.html
+1
-1
appview/pages/templates/repo/compare/compare.html
+2
-2
appview/pages/templates/repo/empty.html
+2
-2
appview/pages/templates/repo/empty.html
···
26
26
{{ else if (and .LoggedInUser (eq .LoggedInUser.Did .RepoInfo.OwnerDid)) }}
27
27
{{ $knot := .RepoInfo.Knot }}
28
28
{{ if eq $knot "knot1.tangled.sh" }}
29
-
{{ $knot = "tangled.sh" }}
29
+
{{ $knot = "tangled.org" }}
30
30
{{ end }}
31
31
<div class="w-full flex place-content-center">
32
32
<div class="py-6 w-fit flex flex-col gap-4">
···
35
35
36
36
<p><span class="{{$bullet}}">1</span>First, generate a new <a href="https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key" class="underline">SSH key pair</a>.</p>
37
37
<p><span class="{{$bullet}}">2</span>Then add the public key to your account from the <a href="/settings" class="underline">settings</a> page.</p>
38
-
<p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code></p>
38
+
<p><span class="{{$bullet}}">3</span>Configure your remote to <code>git@{{ $knot | stripPort }}:{{ resolve .RepoInfo.OwnerDid }}/{{ .RepoInfo.Name }}</code></p>
39
39
<p><span class="{{$bullet}}">4</span>Push!</p>
40
40
</div>
41
41
</div>
+2
-1
appview/pages/templates/repo/fork.html
+2
-1
appview/pages/templates/repo/fork.html
···
25
25
value="{{ . }}"
26
26
class="mr-2"
27
27
id="domain-{{ . }}"
28
+
{{if eq (len $.Knots) 1}}checked{{end}}
28
29
/>
29
30
<label for="domain-{{ . }}" class="dark:text-white">{{ . }}</label>
30
31
</div>
···
33
34
{{ end }}
34
35
</div>
35
36
</div>
36
-
<p class="text-sm text-gray-500 dark:text-gray-400">A knot hosts repository data. <a href="/knots" class="underline">Learn how to register your own knot.</a></p>
37
+
<p class="text-sm text-gray-500 dark:text-gray-400">A knot hosts repository data. <a href="/settings/knots" class="underline">Learn how to register your own knot.</a></p>
37
38
</fieldset>
38
39
39
40
<div class="space-y-2">
+49
appview/pages/templates/repo/fragments/backlinks.html
+49
appview/pages/templates/repo/fragments/backlinks.html
···
1
+
{{ define "repo/fragments/backlinks" }}
2
+
{{ if .Backlinks }}
3
+
<div id="at-uri-panel" class="px-2 md:px-0">
4
+
<div>
5
+
<span class="text-sm py-1 font-bold text-gray-500 dark:text-gray-400">Referenced by</span>
6
+
</div>
7
+
<ul>
8
+
{{ range .Backlinks }}
9
+
<li>
10
+
{{ $repoOwner := resolve .Handle }}
11
+
{{ $repoName := .Repo }}
12
+
{{ $repoUrl := printf "%s/%s" $repoOwner $repoName }}
13
+
<div class="flex flex-col">
14
+
<div class="flex gap-2 items-center">
15
+
{{ if .State.IsClosed }}
16
+
<span class="text-gray-500 dark:text-gray-400">
17
+
{{ i "ban" "size-3" }}
18
+
</span>
19
+
{{ else if eq .Kind.String "issues" }}
20
+
<span class="text-green-600 dark:text-green-500">
21
+
{{ i "circle-dot" "size-3" }}
22
+
</span>
23
+
{{ else if .State.IsOpen }}
24
+
<span class="text-green-600 dark:text-green-500">
25
+
{{ i "git-pull-request" "size-3" }}
26
+
</span>
27
+
{{ else if .State.IsMerged }}
28
+
<span class="text-purple-600 dark:text-purple-500">
29
+
{{ i "git-merge" "size-3" }}
30
+
</span>
31
+
{{ else }}
32
+
<span class="text-gray-600 dark:text-gray-300">
33
+
{{ i "git-pull-request-closed" "size-3" }}
34
+
</span>
35
+
{{ end }}
36
+
<a href="{{ . }}" class="line-clamp-1 text-sm"><span class="text-gray-500 dark:text-gray-400">#{{ .SubjectId }}</span> {{ .Title }}</a>
37
+
</div>
38
+
{{ if not (eq $.RepoInfo.FullName $repoUrl) }}
39
+
<div>
40
+
<span>on <a href="/{{ $repoUrl }}">{{ $repoUrl }}</a></span>
41
+
</div>
42
+
{{ end }}
43
+
</div>
44
+
</li>
45
+
{{ end }}
46
+
</ul>
47
+
</div>
48
+
{{ end }}
49
+
{{ end }}
+3
-2
appview/pages/templates/repo/fragments/cloneDropdown.html
+3
-2
appview/pages/templates/repo/fragments/cloneDropdown.html
···
43
43
44
44
<!-- SSH Clone -->
45
45
<div class="mb-3">
46
+
{{ $repoOwnerHandle := resolve .RepoInfo.OwnerDid }}
46
47
<label class="block text-xs font-medium text-gray-700 dark:text-gray-300 mb-1">SSH</label>
47
48
<div class="flex items-center border border-gray-300 dark:border-gray-600 rounded">
48
49
<code
49
50
class="flex-1 px-3 py-2 text-sm bg-gray-50 dark:bg-gray-700 text-gray-900 dark:text-gray-100 rounded-l select-all cursor-pointer whitespace-nowrap overflow-x-auto"
50
51
onclick="window.getSelection().selectAllChildren(this)"
51
-
data-url="git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}"
52
-
>git@{{ $knot | stripPort }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code>
52
+
data-url="git@{{ $knot | stripPort }}:{{ $repoOwnerHandle }}/{{ .RepoInfo.Name }}"
53
+
>git@{{ $knot | stripPort }}:{{ $repoOwnerHandle }}/{{ .RepoInfo.Name }}</code>
53
54
<button
54
55
onclick="copyToClipboard(this, this.previousElementSibling.getAttribute('data-url'))"
55
56
class="px-3 py-2 text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-200 border-l border-gray-300 dark:border-gray-600"
+2
-3
appview/pages/templates/repo/fragments/diff.html
+2
-3
appview/pages/templates/repo/fragments/diff.html
+15
-1
appview/pages/templates/repo/fragments/editLabelPanel.html
+15
-1
appview/pages/templates/repo/fragments/editLabelPanel.html
···
170
170
{{ $fieldName := $def.AtUri }}
171
171
{{ $valueType := $def.ValueType }}
172
172
{{ $value := .value }}
173
+
173
174
{{ if $valueType.IsDidFormat }}
174
175
{{ $value = trimPrefix (resolve .value) "@" }}
176
+
<actor-typeahead>
177
+
<input
178
+
autocapitalize="none"
179
+
autocorrect="off"
180
+
autocomplete="off"
181
+
placeholder="user.tngl.sh"
182
+
value="{{$value}}"
183
+
name="{{$fieldName}}"
184
+
type="text"
185
+
class="p-1 w-full text-sm"
186
+
/>
187
+
</actor-typeahead>
188
+
{{ else }}
189
+
<input class="p-1 w-full" type="text" name="{{$fieldName}}" value="{{$value}}">
175
190
{{ end }}
176
-
<input class="p-1 w-full" type="text" name="{{$fieldName}}" value="{{$value}}">
177
191
{{ end }}
178
192
179
193
{{ define "nullTypeInput" }}
+1
-16
appview/pages/templates/repo/fragments/participants.html
+1
-16
appview/pages/templates/repo/fragments/participants.html
···
6
6
<span class="font-bold text-gray-500 dark:text-gray-400 capitalize">Participants</span>
7
7
<span class="bg-gray-200 dark:bg-gray-700 rounded py-1/2 px-1 ml-1">{{ len $all }}</span>
8
8
</div>
9
-
<div class="flex items-center -space-x-3 mt-2">
10
-
{{ $c := "z-50 z-40 z-30 z-20 z-10" }}
11
-
{{ range $i, $p := $ps }}
12
-
<img
13
-
src="{{ tinyAvatar . }}"
14
-
alt=""
15
-
class="rounded-full h-8 w-8 mr-1 border-2 border-gray-100 dark:border-gray-900 z-{{sub 5 $i}}0"
16
-
/>
17
-
{{ end }}
18
-
19
-
{{ if gt (len $all) 5 }}
20
-
<span class="pl-4 text-gray-500 dark:text-gray-400 text-sm">
21
-
+{{ sub (len $all) 5 }}
22
-
</span>
23
-
{{ end }}
24
-
</div>
9
+
{{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "w-8 h-8") }}
25
10
</div>
26
11
{{ end }}
-26
appview/pages/templates/repo/fragments/repoStar.html
-26
appview/pages/templates/repo/fragments/repoStar.html
···
1
-
{{ define "repo/fragments/repoStar" }}
2
-
<button
3
-
id="starBtn"
4
-
class="btn disabled:opacity-50 disabled:cursor-not-allowed flex gap-2 items-center group"
5
-
{{ if .IsStarred }}
6
-
hx-delete="/star?subject={{ .RepoAt }}&countHint={{ .Stats.StarCount }}"
7
-
{{ else }}
8
-
hx-post="/star?subject={{ .RepoAt }}&countHint={{ .Stats.StarCount }}"
9
-
{{ end }}
10
-
11
-
hx-trigger="click"
12
-
hx-target="this"
13
-
hx-swap="outerHTML"
14
-
hx-disabled-elt="#starBtn"
15
-
>
16
-
{{ if .IsStarred }}
17
-
{{ i "star" "w-4 h-4 fill-current" }}
18
-
{{ else }}
19
-
{{ i "star" "w-4 h-4" }}
20
-
{{ end }}
21
-
<span class="text-sm">
22
-
{{ .Stats.StarCount }}
23
-
</span>
24
-
{{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }}
25
-
</button>
26
-
{{ end }}
+31
-9
appview/pages/templates/repo/index.html
+31
-9
appview/pages/templates/repo/index.html
···
14
14
{{ end }}
15
15
<div class="flex items-center justify-between pb-5">
16
16
{{ block "branchSelector" . }}{{ end }}
17
-
<div class="flex md:hidden items-center gap-2">
17
+
<div class="flex md:hidden items-center gap-3">
18
18
<a href="/{{ .RepoInfo.FullName }}/commits/{{ .Ref | urlquery }}" class="inline-flex items-center text-sm gap-1 font-bold">
19
19
{{ i "git-commit-horizontal" "w-4" "h-4" }} {{ .TotalCommits }}
20
20
</a>
···
47
47
<div class="px-4 py-2 border-b border-gray-200 dark:border-gray-600 flex items-center gap-4 flex-wrap">
48
48
{{ range $value := .Languages }}
49
49
<div
50
-
class="flex flex-grow items-center gap-2 text-xs align-items-center justify-center"
50
+
class="flex items-center gap-2 text-xs align-items-center justify-center"
51
51
>
52
52
{{ template "repo/fragments/colorBall" (dict "color" (langColor $value.Name)) }}
53
53
<div>{{ or $value.Name "Other" }}
···
66
66
67
67
{{ define "branchSelector" }}
68
68
<div class="flex gap-2 items-center justify-between w-full">
69
-
<div class="flex gap-2 items-center">
69
+
<div class="flex gap-2 items-stretch">
70
70
<select
71
71
onchange="window.location.href = '/{{ .RepoInfo.FullName }}/tree/' + encodeURIComponent(this.value)"
72
72
class="p-1 border max-w-32 border-gray-200 bg-white dark:bg-gray-800 dark:text-white dark:border-gray-700"
···
228
228
<span
229
229
class="mx-1 before:content-['ยท'] before:select-none"
230
230
></span>
231
-
<span>
232
-
{{ $did := index $.EmailToDid .Author.Email }}
233
-
<a href="{{ if $did }}/{{ resolve $did }}{{ else }}mailto:{{ .Author.Email }}{{ end }}"
234
-
class="text-gray-500 dark:text-gray-400 no-underline hover:underline"
235
-
>{{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ .Author.Name }}{{ end }}</a>
236
-
</span>
231
+
{{ template "attribution" (list . $.EmailToDid) }}
237
232
<div class="inline-block px-1 select-none after:content-['ยท']"></div>
238
233
{{ template "repo/fragments/time" .Committer.When }}
239
234
···
259
254
{{ end }}
260
255
</div>
261
256
</div>
257
+
{{ end }}
258
+
259
+
{{ define "attribution" }}
260
+
{{ $commit := index . 0 }}
261
+
{{ $map := index . 1 }}
262
+
<span class="flex items-center">
263
+
{{ $author := index $map $commit.Author.Email }}
264
+
{{ $coauthors := $commit.CoAuthors }}
265
+
{{ $all := list }}
266
+
267
+
{{ if $author }}
268
+
{{ $all = append $all $author }}
269
+
{{ end }}
270
+
{{ range $coauthors }}
271
+
{{ $co := index $map .Email }}
272
+
{{ if $co }}
273
+
{{ $all = append $all $co }}
274
+
{{ end }}
275
+
{{ end }}
276
+
277
+
{{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }}
278
+
<a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}"
279
+
class="no-underline hover:underline">
280
+
{{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }}
281
+
{{ if $coauthors }} +{{ length $coauthors }}{{ end }}
282
+
</a>
283
+
</span>
262
284
{{ end }}
263
285
264
286
{{ define "branchList" }}
+2
-2
appview/pages/templates/repo/issues/fragments/issueCommentHeader.html
+2
-2
appview/pages/templates/repo/issues/fragments/issueCommentHeader.html
···
19
19
{{ end }}
20
20
21
21
{{ define "timestamp" }}
22
-
<a href="#{{ .Comment.Id }}"
22
+
<a href="#comment-{{ .Comment.Id }}"
23
23
class="text-gray-500 dark:text-gray-400 hover:text-gray-500 dark:hover:text-gray-400 hover:underline no-underline"
24
-
id="{{ .Comment.Id }}">
24
+
id="comment-{{ .Comment.Id }}">
25
25
{{ if .Comment.Deleted }}
26
26
{{ template "repo/fragments/shortTimeAgo" .Comment.Deleted }}
27
27
{{ else if .Comment.Edited }}
+3
appview/pages/templates/repo/issues/issue.html
+3
appview/pages/templates/repo/issues/issue.html
···
20
20
"Subject" $.Issue.AtUri
21
21
"State" $.Issue.Labels) }}
22
22
{{ template "repo/fragments/participants" $.Issue.Participants }}
23
+
{{ template "repo/fragments/backlinks"
24
+
(dict "RepoInfo" $.RepoInfo
25
+
"Backlinks" $.Backlinks) }}
23
26
{{ template "repo/fragments/externalLinkPanel" $.Issue.AtUri }}
24
27
</div>
25
28
</div>
+116
-35
appview/pages/templates/repo/issues/issues.html
+116
-35
appview/pages/templates/repo/issues/issues.html
···
30
30
<div class="grid gap-2 grid-cols-[auto_1fr_auto] grid-row-2">
31
31
<form class="flex relative col-span-3 sm:col-span-1 sm:col-start-2" method="GET">
32
32
<input type="hidden" name="state" value="{{ if .FilteringByOpen }}open{{ else }}closed{{ end }}">
33
-
<div class="absolute left-3 top-1/2 -translate-y-1/2 text-gray-400 pointer-events-none">
34
-
{{ i "search" "w-4 h-4" }}
33
+
<div class="flex-1 flex relative">
34
+
<input
35
+
id="search-q"
36
+
class="flex-1 py-1 pl-2 pr-10 mr-[-1px] rounded-r-none focus:border-0 focus:outline-none focus:ring focus:ring-blue-400 ring-inset peer"
37
+
type="text"
38
+
name="q"
39
+
value="{{ .FilterQuery }}"
40
+
placeholder=" "
41
+
>
42
+
<a
43
+
href="?state={{ if .FilteringByOpen }}open{{ else }}closed{{ end }}"
44
+
class="absolute right-3 top-1/2 -translate-y-1/2 text-gray-400 hover:text-gray-600 dark:hover:text-gray-300 hidden peer-[:not(:placeholder-shown)]:block"
45
+
>
46
+
{{ i "x" "w-4 h-4" }}
47
+
</a>
35
48
</div>
36
-
<input class="flex-1 p-1 pl-10 pr-10 peer" type="text" name="q" value="{{ .FilterQuery }}" placeholder=" ">
37
-
<a
38
-
href="?state={{ if .FilteringByOpen }}open{{ else }}closed{{ end }}"
39
-
class="absolute right-3 top-1/2 -translate-y-1/2 text-gray-400 hover:text-gray-600 dark:hover:text-gray-300 hidden peer-[:not(:placeholder-shown)]:block"
49
+
<button
50
+
type="submit"
51
+
class="p-2 text-gray-400 border rounded-r border-gray-400 dark:border-gray-600"
40
52
>
41
-
{{ i "x" "w-4 h-4" }}
42
-
</a>
53
+
{{ i "search" "w-4 h-4" }}
54
+
</button>
43
55
</form>
44
56
<div class="sm:row-start-1">
45
-
{{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active) }}
57
+
{{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active "Include" "#search-q") }}
46
58
</div>
47
59
<a
48
60
href="/{{ .RepoInfo.FullName }}/issues/new"
···
59
71
<div class="mt-2">
60
72
{{ template "repo/issues/fragments/issueListing" (dict "Issues" .Issues "RepoPrefix" .RepoInfo.FullName "LabelDefs" .LabelDefs) }}
61
73
</div>
62
-
{{ block "pagination" . }} {{ end }}
74
+
{{if gt .IssueCount .Page.Limit }}
75
+
{{ block "pagination" . }} {{ end }}
76
+
{{ end }}
63
77
{{ end }}
64
78
65
79
{{ define "pagination" }}
66
-
<div class="flex justify-end mt-4 gap-2">
67
-
{{ $currentState := "closed" }}
68
-
{{ if .FilteringByOpen }}
69
-
{{ $currentState = "open" }}
70
-
{{ end }}
80
+
<div class="flex justify-center items-center mt-4 gap-2">
81
+
{{ $currentState := "closed" }}
82
+
{{ if .FilteringByOpen }}
83
+
{{ $currentState = "open" }}
84
+
{{ end }}
85
+
86
+
{{ $prev := .Page.Previous.Offset }}
87
+
{{ $next := .Page.Next.Offset }}
88
+
{{ $lastPage := sub .IssueCount (mod .IssueCount .Page.Limit) }}
71
89
90
+
<a
91
+
class="
92
+
btn flex items-center gap-2 no-underline hover:no-underline
93
+
dark:text-white dark:hover:bg-gray-700
94
+
{{ if le .Page.Offset 0 }}
95
+
cursor-not-allowed opacity-50
96
+
{{ end }}
97
+
"
72
98
{{ if gt .Page.Offset 0 }}
73
-
{{ $prev := .Page.Previous }}
74
-
<a
75
-
class="btn flex items-center gap-2 no-underline hover:no-underline dark:text-white dark:hover:bg-gray-700"
76
-
hx-boost="true"
77
-
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $prev.Offset }}&limit={{ $prev.Limit }}"
78
-
>
79
-
{{ i "chevron-left" "w-4 h-4" }}
80
-
previous
81
-
</a>
82
-
{{ else }}
83
-
<div></div>
99
+
hx-boost="true"
100
+
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $prev }}&limit={{ .Page.Limit }}"
84
101
{{ end }}
102
+
>
103
+
{{ i "chevron-left" "w-4 h-4" }}
104
+
previous
105
+
</a>
85
106
107
+
<!-- dont show first page if current page is first page -->
108
+
{{ if gt .Page.Offset 0 }}
109
+
<a
110
+
hx-boost="true"
111
+
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset=0&limit={{ .Page.Limit }}"
112
+
>
113
+
1
114
+
</a>
115
+
{{ end }}
116
+
117
+
<!-- if previous page is not first or second page (prev > limit) -->
118
+
{{ if gt $prev .Page.Limit }}
119
+
<span>...</span>
120
+
{{ end }}
121
+
122
+
<!-- if previous page is not the first page -->
123
+
{{ if gt $prev 0 }}
124
+
<a
125
+
hx-boost="true"
126
+
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $prev }}&limit={{ .Page.Limit }}"
127
+
>
128
+
{{ add (div $prev .Page.Limit) 1 }}
129
+
</a>
130
+
{{ end }}
131
+
132
+
<!-- current page. this is always visible -->
133
+
<span class="font-bold">
134
+
{{ add (div .Page.Offset .Page.Limit) 1 }}
135
+
</span>
136
+
137
+
<!-- if next page is not last page -->
138
+
{{ if lt $next $lastPage }}
139
+
<a
140
+
hx-boost="true"
141
+
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $next }}&limit={{ .Page.Limit }}"
142
+
>
143
+
{{ add (div $next .Page.Limit) 1 }}
144
+
</a>
145
+
{{ end }}
146
+
147
+
<!-- if next page is not second last or last page (next < issues - 2 * limit) -->
148
+
{{ if lt ($next) (sub .IssueCount (mul (2) .Page.Limit)) }}
149
+
<span>...</span>
150
+
{{ end }}
151
+
152
+
<!-- if its not the last page -->
153
+
{{ if lt .Page.Offset $lastPage }}
154
+
<a
155
+
hx-boost="true"
156
+
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $lastPage }}&limit={{ .Page.Limit }}"
157
+
>
158
+
{{ add (div $lastPage .Page.Limit) 1 }}
159
+
</a>
160
+
{{ end }}
161
+
162
+
<a
163
+
class="
164
+
btn flex items-center gap-2 no-underline hover:no-underline
165
+
dark:text-white dark:hover:bg-gray-700
166
+
{{ if ne (len .Issues) .Page.Limit }}
167
+
cursor-not-allowed opacity-50
168
+
{{ end }}
169
+
"
86
170
{{ if eq (len .Issues) .Page.Limit }}
87
-
{{ $next := .Page.Next }}
88
-
<a
89
-
class="btn flex items-center gap-2 no-underline hover:no-underline dark:text-white dark:hover:bg-gray-700"
90
-
hx-boost="true"
91
-
href = "/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $next.Offset }}&limit={{ $next.Limit }}"
92
-
>
93
-
next
94
-
{{ i "chevron-right" "w-4 h-4" }}
95
-
</a>
171
+
hx-boost="true"
172
+
href="/{{ $.RepoInfo.FullName }}/issues?state={{ $currentState }}&q={{ .FilterQuery }}&offset={{ $next }}&limit={{ .Page.Limit }}"
96
173
{{ end }}
174
+
>
175
+
next
176
+
{{ i "chevron-right" "w-4 h-4" }}
177
+
</a>
97
178
</div>
98
179
{{ end }}
+40
-23
appview/pages/templates/repo/log.html
+40
-23
appview/pages/templates/repo/log.html
···
17
17
<div class="hidden md:flex md:flex-col divide-y divide-gray-200 dark:divide-gray-700">
18
18
{{ $grid := "grid grid-cols-14 gap-4" }}
19
19
<div class="{{ $grid }}">
20
-
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2">Author</div>
20
+
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Author</div>
21
21
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-3">Commit</div>
22
22
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-6">Message</div>
23
-
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-1"></div>
24
23
<div class="py-2 text-sm text-left text-gray-700 dark:text-gray-300 uppercase font-bold col-span-2 justify-self-end">Date</div>
25
24
</div>
26
25
{{ range $index, $commit := .Commits }}
27
26
{{ $messageParts := splitN $commit.Message "\n\n" 2 }}
28
27
<div class="{{ $grid }} py-3">
29
-
<div class="align-top truncate col-span-2">
30
-
{{ $did := index $.EmailToDid $commit.Author.Email }}
31
-
{{ if $did }}
32
-
{{ template "user/fragments/picHandleLink" $did }}
33
-
{{ else }}
34
-
<a href="mailto:{{ $commit.Author.Email }}" class="text-gray-700 dark:text-gray-300 no-underline hover:underline">{{ $commit.Author.Name }}</a>
35
-
{{ end }}
28
+
<div class="align-top col-span-3">
29
+
{{ template "attribution" (list $commit $.EmailToDid) }}
36
30
</div>
37
31
<div class="align-top font-mono flex items-start col-span-3">
38
32
{{ $verified := $.VerifiedCommits.IsVerified $commit.Hash.String }}
···
61
55
<div class="align-top col-span-6">
62
56
<div>
63
57
<a href="/{{ $.RepoInfo.FullName }}/commit/{{ $commit.Hash.String }}" class="dark:text-white no-underline hover:underline">{{ index $messageParts 0 }}</a>
58
+
64
59
{{ if gt (len $messageParts) 1 }}
65
60
<button class="py-1/2 px-1 bg-gray-200 hover:bg-gray-400 dark:bg-gray-700 dark:hover:bg-gray-600 rounded" hx-on:click="this.parentElement.nextElementSibling.classList.toggle('hidden')">{{ i "ellipsis" "w-3 h-3" }}</button>
66
61
{{ end }}
···
72
67
</span>
73
68
{{ end }}
74
69
{{ end }}
70
+
71
+
<!-- ci status -->
72
+
<span class="text-xs">
73
+
{{ $pipeline := index $.Pipelines .Hash.String }}
74
+
{{ if and $pipeline (gt (len $pipeline.Statuses) 0) }}
75
+
{{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }}
76
+
{{ end }}
77
+
</span>
75
78
</div>
76
79
77
80
{{ if gt (len $messageParts) 1 }}
78
81
<p class="hidden mt-1 text-sm text-gray-600 dark:text-gray-400">{{ nl2br (index $messageParts 1) }}</p>
79
82
{{ end }}
80
-
</div>
81
-
<div class="align-top col-span-1">
82
-
<!-- ci status -->
83
-
{{ $pipeline := index $.Pipelines .Hash.String }}
84
-
{{ if and $pipeline (gt (len $pipeline.Statuses) 0) }}
85
-
{{ template "repo/pipelines/fragments/pipelineSymbolLong" (dict "Pipeline" $pipeline "RepoInfo" $.RepoInfo) }}
86
-
{{ end }}
87
83
</div>
88
84
<div class="align-top justify-self-end text-gray-500 dark:text-gray-400 col-span-2">{{ template "repo/fragments/shortTimeAgo" $commit.Committer.When }}</div>
89
85
</div>
···
152
148
</a>
153
149
</span>
154
150
<span class="mx-2 before:content-['ยท'] before:select-none"></span>
155
-
<span>
156
-
{{ $did := index $.EmailToDid $commit.Author.Email }}
157
-
<a href="{{ if $did }}/{{ $did }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}"
158
-
class="text-gray-500 dark:text-gray-400 no-underline hover:underline">
159
-
{{ if $did }}{{ template "user/fragments/picHandleLink" $did }}{{ else }}{{ $commit.Author.Name }}{{ end }}
160
-
</a>
161
-
</span>
151
+
{{ template "attribution" (list $commit $.EmailToDid) }}
162
152
<div class="inline-block px-1 select-none after:content-['ยท']"></div>
163
153
<span>{{ template "repo/fragments/shortTime" $commit.Committer.When }}</span>
164
154
···
176
166
</div>
177
167
</section>
178
168
169
+
{{ end }}
170
+
171
+
{{ define "attribution" }}
172
+
{{ $commit := index . 0 }}
173
+
{{ $map := index . 1 }}
174
+
<span class="flex items-center gap-1">
175
+
{{ $author := index $map $commit.Author.Email }}
176
+
{{ $coauthors := $commit.CoAuthors }}
177
+
{{ $all := list }}
178
+
179
+
{{ if $author }}
180
+
{{ $all = append $all $author }}
181
+
{{ end }}
182
+
{{ range $coauthors }}
183
+
{{ $co := index $map .Email }}
184
+
{{ if $co }}
185
+
{{ $all = append $all $co }}
186
+
{{ end }}
187
+
{{ end }}
188
+
189
+
{{ template "fragments/tinyAvatarList" (dict "all" $all "classes" "size-6") }}
190
+
<a href="{{ if $author }}/{{ $author }}{{ else }}mailto:{{ $commit.Author.Email }}{{ end }}"
191
+
class="no-underline hover:underline">
192
+
{{ if $author }}{{ resolve $author }}{{ else }}{{ $commit.Author.Name }}{{ end }}
193
+
{{ if $coauthors }} +{{ length $coauthors }}{{ end }}
194
+
</a>
195
+
</span>
179
196
{{ end }}
180
197
181
198
{{ define "repoAfter" }}
+2
-1
appview/pages/templates/repo/new.html
+2
-1
appview/pages/templates/repo/new.html
···
155
155
class="mr-2"
156
156
id="domain-{{ . }}"
157
157
required
158
+
{{if eq (len $.Knots) 1}}checked{{end}}
158
159
/>
159
160
<label for="domain-{{ . }}" class="dark:text-white lowercase">{{ . }}</label>
160
161
</div>
···
164
165
</div>
165
166
<p class="text-sm text-gray-500 dark:text-gray-400 mt-1">
166
167
A knot hosts repository data and handles Git operations.
167
-
You can also <a href="/knots" class="underline">register your own knot</a>.
168
+
You can also <a href="/settings/knots" class="underline">register your own knot</a>.
168
169
</p>
169
170
</div>
170
171
{{ end }}
+1
-1
appview/pages/templates/repo/pipelines/pipelines.html
+1
-1
appview/pages/templates/repo/pipelines/pipelines.html
···
23
23
</p>
24
24
<p>
25
25
<span class="{{ $bullet }}">2</span>Configure your CI/CD
26
-
<a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/pipeline.md" class="underline">pipeline</a>.
26
+
<a href="https://docs.tangled.org/spindles.html#pipelines" class="underline">pipeline</a>.
27
27
</p>
28
28
<p><span class="{{ $bullet }}">3</span>Trigger a workflow with a push or a pull-request!</p>
29
29
</div>
+14
appview/pages/templates/repo/pipelines/workflow.html
+14
appview/pages/templates/repo/pipelines/workflow.html
···
12
12
{{ block "sidebar" . }} {{ end }}
13
13
</div>
14
14
<div class="col-span-1 md:col-span-3">
15
+
<!-- TODO(boltless): explictly check for pipeline cancel permission -->
16
+
{{ if $.RepoInfo.Roles.IsOwner }}
17
+
<div class="flex justify-between mb-2">
18
+
<div id="workflow-error" class="text-red-500 dark:text-red-400"></div>
19
+
<button
20
+
class="btn"
21
+
hx-post="/{{ $.RepoInfo.FullName }}/pipelines/{{ .Pipeline.Id }}/workflow/{{ .Workflow }}/cancel"
22
+
hx-swap="none"
23
+
{{ if (index .Pipeline.Statuses .Workflow).Latest.Status.IsFinish -}}
24
+
disabled
25
+
{{- end }}
26
+
>Cancel</button>
27
+
</div>
28
+
{{ end }}
15
29
{{ block "logs" . }} {{ end }}
16
30
</div>
17
31
</section>
+1
-1
appview/pages/templates/repo/pulls/patch.html
+1
-1
appview/pages/templates/repo/pulls/patch.html
+3
appview/pages/templates/repo/pulls/pull.html
+3
appview/pages/templates/repo/pulls/pull.html
···
21
21
"Subject" $.Pull.AtUri
22
22
"State" $.Pull.Labels) }}
23
23
{{ template "repo/fragments/participants" $.Pull.Participants }}
24
+
{{ template "repo/fragments/backlinks"
25
+
(dict "RepoInfo" $.RepoInfo
26
+
"Backlinks" $.Backlinks) }}
24
27
{{ template "repo/fragments/externalLinkPanel" $.Pull.AtUri }}
25
28
</div>
26
29
</div>
+21
-9
appview/pages/templates/repo/pulls/pulls.html
+21
-9
appview/pages/templates/repo/pulls/pulls.html
···
36
36
<div class="grid gap-2 grid-cols-[auto_1fr_auto] grid-row-2">
37
37
<form class="flex relative col-span-3 sm:col-span-1 sm:col-start-2" method="GET">
38
38
<input type="hidden" name="state" value="{{ .FilteringBy.String }}">
39
-
<div class="absolute left-3 top-1/2 -translate-y-1/2 text-gray-400 pointer-events-none">
40
-
{{ i "search" "w-4 h-4" }}
39
+
<div class="flex-1 flex relative">
40
+
<input
41
+
id="search-q"
42
+
class="flex-1 py-1 pl-2 pr-10 mr-[-1px] rounded-r-none focus:border-0 focus:outline-none focus:ring focus:ring-blue-400 ring-inset peer"
43
+
type="text"
44
+
name="q"
45
+
value="{{ .FilterQuery }}"
46
+
placeholder=" "
47
+
>
48
+
<a
49
+
href="?state={{ .FilteringBy.String }}"
50
+
class="absolute right-3 top-1/2 -translate-y-1/2 text-gray-400 hover:text-gray-600 dark:hover:text-gray-300 hidden peer-[:not(:placeholder-shown)]:block"
51
+
>
52
+
{{ i "x" "w-4 h-4" }}
53
+
</a>
41
54
</div>
42
-
<input class="flex-1 p-1 pl-10 pr-10 peer" type="text" name="q" value="{{ .FilterQuery }}" placeholder=" ">
43
-
<a
44
-
href="?state={{ .FilteringBy.String }}"
45
-
class="absolute right-3 top-1/2 -translate-y-1/2 text-gray-400 hover:text-gray-600 dark:hover:text-gray-300 hidden peer-[:not(:placeholder-shown)]:block"
55
+
<button
56
+
type="submit"
57
+
class="p-2 text-gray-400 border rounded-r border-gray-400 dark:border-gray-600"
46
58
>
47
-
{{ i "x" "w-4 h-4" }}
48
-
</a>
59
+
{{ i "search" "w-4 h-4" }}
60
+
</button>
49
61
</form>
50
62
<div class="sm:row-start-1">
51
-
{{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active) }}
63
+
{{ template "fragments/tabSelector" (dict "Name" "state" "Values" $values "Active" $active "Include" "#search-q") }}
52
64
</div>
53
65
<a
54
66
href="/{{ .RepoInfo.FullName }}/pulls/new"
+5
-4
appview/pages/templates/repo/settings/access.html
+5
-4
appview/pages/templates/repo/settings/access.html
···
29
29
{{ template "addCollaboratorButton" . }}
30
30
{{ end }}
31
31
{{ range .Collaborators }}
32
+
{{ $handle := resolve .Did }}
32
33
<div class="border border-gray-200 dark:border-gray-700 rounded p-4">
33
34
<div class="flex items-center gap-3">
34
35
<img
35
-
src="{{ fullAvatar .Handle }}"
36
-
alt="{{ .Handle }}"
36
+
src="{{ fullAvatar $handle }}"
37
+
alt="{{ $handle }}"
37
38
class="rounded-full h-10 w-10 border border-gray-300 dark:border-gray-600 flex-shrink-0"/>
38
39
39
40
<div class="flex-1 min-w-0">
40
-
<a href="/{{ .Handle }}" class="block truncate">
41
-
{{ didOrHandle .Did .Handle }}
41
+
<a href="/{{ $handle }}" class="block truncate">
42
+
{{ $handle }}
42
43
</a>
43
44
<p class="text-sm text-gray-500 dark:text-gray-400">{{ .Role }}</p>
44
45
</div>
+1
-1
appview/pages/templates/repo/settings/pipelines.html
+1
-1
appview/pages/templates/repo/settings/pipelines.html
···
22
22
<p class="text-gray-500 dark:text-gray-400">
23
23
Choose a spindle to execute your workflows on. Only repository owners
24
24
can configure spindles. Spindles can be selfhosted,
25
-
<a class="text-gray-500 dark:text-gray-400 underline" href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">
25
+
<a class="text-gray-500 dark:text-gray-400 underline" href="https://docs.tangled.org/spindles.html#self-hosting-guide">
26
26
click to learn more.
27
27
</a>
28
28
</p>
+22
-6
appview/pages/templates/spindles/dashboard.html
+22
-6
appview/pages/templates/spindles/dashboard.html
···
1
-
{{ define "title" }}{{.Spindle.Instance}} · spindles{{ end }}
1
+
{{ define "title" }}{{.Spindle.Instance}} · {{ .Tab }} settings{{ end }}
2
2
3
3
{{ define "content" }}
4
-
<div class="px-6 py-4">
4
+
<div class="p-6">
5
+
<p class="text-xl font-bold dark:text-white">Settings</p>
6
+
</div>
7
+
<div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
8
+
<section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6">
9
+
<div class="col-span-1">
10
+
{{ template "user/settings/fragments/sidebar" . }}
11
+
</div>
12
+
<div class="col-span-1 md:col-span-3 flex flex-col gap-6">
13
+
{{ template "spindleDash" . }}
14
+
</div>
15
+
</section>
16
+
</div>
17
+
{{ end }}
18
+
19
+
{{ define "spindleDash" }}
20
+
<div>
5
21
<div class="flex justify-between items-center">
6
-
<h1 class="text-xl font-bold dark:text-white">{{ .Spindle.Instance }}</h1>
22
+
<h2 class="text-sm pb-2 uppercase font-bold">{{ .Tab }} · {{ .Spindle.Instance }}</h2>
7
23
<div id="right-side" class="flex gap-2">
8
24
{{ $style := "px-2 py-1 rounded flex items-center flex-shrink-0 gap-2" }}
9
25
{{ $isOwner := and .LoggedInUser (eq .LoggedInUser.Did .Spindle.Owner) }}
···
71
87
<button
72
88
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
73
89
title="Delete spindle"
74
-
hx-delete="/spindles/{{ .Instance }}"
90
+
hx-delete="/settings/spindles/{{ .Instance }}"
75
91
hx-swap="outerHTML"
76
92
hx-confirm="Are you sure you want to delete the spindle '{{ .Instance }}'?"
77
93
hx-headers='{"shouldRedirect": "true"}'
···
87
103
<button
88
104
class="btn gap-2 group"
89
105
title="Retry spindle verification"
90
-
hx-post="/spindles/{{ .Instance }}/retry"
106
+
hx-post="/settings/spindles/{{ .Instance }}/retry"
91
107
hx-swap="none"
92
108
hx-headers='{"shouldRefresh": "true"}'
93
109
>
···
104
120
<button
105
121
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
106
122
title="Remove member"
107
-
hx-post="/spindles/{{ $root.Spindle.Instance }}/remove"
123
+
hx-post="/settings/spindles/{{ $root.Spindle.Instance }}/remove"
108
124
hx-swap="none"
109
125
hx-vals='{"member": "{{$member}}" }'
110
126
hx-confirm="Are you sure you want to remove {{ resolve $member }} from this instance?"
+1
-1
appview/pages/templates/spindles/fragments/addMemberModal.html
+1
-1
appview/pages/templates/spindles/fragments/addMemberModal.html
+3
-3
appview/pages/templates/spindles/fragments/spindleListing.html
+3
-3
appview/pages/templates/spindles/fragments/spindleListing.html
···
7
7
8
8
{{ define "spindleLeftSide" }}
9
9
{{ if .Verified }}
10
-
<a href="/spindles/{{ .Instance }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]">
10
+
<a href="/settings/spindles/{{ .Instance }}" class="hover:no-underline flex items-center gap-2 min-w-0 max-w-[60%]">
11
11
{{ i "hard-drive" "w-4 h-4" }}
12
12
<span class="hover:underline">
13
13
{{ .Instance }}
···
50
50
<button
51
51
class="btn text-red-500 hover:text-red-700 dark:text-red-400 dark:hover:text-red-300 gap-2 group"
52
52
title="Delete spindle"
53
-
hx-delete="/spindles/{{ .Instance }}"
53
+
hx-delete="/settings/spindles/{{ .Instance }}"
54
54
hx-swap="outerHTML"
55
55
hx-target="#spindle-{{.Id}}"
56
56
hx-confirm="Are you sure you want to delete the spindle '{{ .Instance }}'?"
···
66
66
<button
67
67
class="btn gap-2 group"
68
68
title="Retry spindle verification"
69
-
hx-post="/spindles/{{ .Instance }}/retry"
69
+
hx-post="/settings/spindles/{{ .Instance }}/retry"
70
70
hx-swap="none"
71
71
hx-target="#spindle-{{.Id}}"
72
72
>
+90
-59
appview/pages/templates/spindles/index.html
+90
-59
appview/pages/templates/spindles/index.html
···
1
-
{{ define "title" }}spindles{{ end }}
1
+
{{ define "title" }}{{ .Tab }} settings{{ end }}
2
2
3
3
{{ define "content" }}
4
-
<div class="px-6 py-4 flex items-center justify-between gap-4 align-bottom">
5
-
<h1 class="text-xl font-bold dark:text-white">Spindles</h1>
6
-
<span class="flex items-center gap-1">
7
-
{{ i "book" "w-3 h-3" }}
8
-
<a href="https://tangled.org/@tangled.org/core/blob/master/docs/spindle/hosting.md">docs</a>
9
-
</span>
4
+
<div class="p-6">
5
+
<p class="text-xl font-bold dark:text-white">Settings</p>
6
+
</div>
7
+
<div class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
8
+
<section class="w-full grid grid-cols-1 md:grid-cols-4 gap-6">
9
+
<div class="col-span-1">
10
+
{{ template "user/settings/fragments/sidebar" . }}
11
+
</div>
12
+
<div class="col-span-1 md:col-span-3 flex flex-col gap-6">
13
+
{{ template "spindleList" . }}
14
+
</div>
15
+
</section>
16
+
</div>
17
+
{{ end }}
18
+
19
+
{{ define "spindleList" }}
20
+
<div class="grid grid-cols-1 md:grid-cols-3 gap-4 items-center">
21
+
<div class="col-span-1 md:col-span-2">
22
+
<h2 class="text-sm pb-2 uppercase font-bold">Spindle</h2>
23
+
{{ block "about" . }} {{ end }}
24
+
</div>
25
+
<div class="col-span-1 md:col-span-1 md:justify-self-end">
26
+
{{ template "docsButton" . }}
27
+
</div>
10
28
</div>
11
29
12
-
<section class="bg-white dark:bg-gray-800 p-6 rounded relative w-full mx-auto drop-shadow-sm dark:text-white">
30
+
<section>
13
31
<div class="flex flex-col gap-6">
14
-
{{ block "about" . }} {{ end }}
15
32
{{ block "list" . }} {{ end }}
16
33
{{ block "register" . }} {{ end }}
17
34
</div>
···
20
37
21
38
{{ define "about" }}
22
39
<section class="rounded flex items-center gap-2">
23
-
<p class="text-gray-500 dark:text-gray-400">
24
-
Spindles are small CI runners.
25
-
</p>
40
+
<p class="text-gray-500 dark:text-gray-400">
41
+
Spindles are small CI runners.
42
+
</p>
26
43
</section>
27
44
{{ end }}
28
45
29
46
{{ define "list" }}
30
-
<section class="rounded w-full flex flex-col gap-2">
31
-
<h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">your spindles</h2>
32
-
<div class="flex flex-col rounded border border-gray-200 dark:border-gray-700 w-full">
33
-
{{ range $spindle := .Spindles }}
34
-
{{ template "spindles/fragments/spindleListing" . }}
35
-
{{ else }}
36
-
<div class="flex items-center justify-center p-2 border-b border-gray-200 dark:border-gray-700 text-gray-500">
37
-
no spindles registered yet
38
-
</div>
39
-
{{ end }}
47
+
<section class="rounded w-full flex flex-col gap-2">
48
+
<h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">your spindles</h2>
49
+
<div class="flex flex-col rounded border border-gray-200 dark:border-gray-700 w-full">
50
+
{{ range $spindle := .Spindles }}
51
+
{{ template "spindles/fragments/spindleListing" . }}
52
+
{{ else }}
53
+
<div class="flex items-center justify-center p-2 border-b border-gray-200 dark:border-gray-700 text-gray-500">
54
+
no spindles registered yet
40
55
</div>
41
-
<div id="operation-error" class="text-red-500 dark:text-red-400"></div>
42
-
</section>
56
+
{{ end }}
57
+
</div>
58
+
<div id="operation-error" class="text-red-500 dark:text-red-400"></div>
59
+
</section>
43
60
{{ end }}
44
61
45
62
{{ define "register" }}
46
-
<section class="rounded w-full lg:w-fit flex flex-col gap-2">
47
-
<h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">register a spindle</h2>
48
-
<p class="mb-2 dark:text-gray-300">Enter the hostname of your spindle to get started.</p>
49
-
<form
50
-
hx-post="/spindles/register"
51
-
class="max-w-2xl mb-2 space-y-4"
52
-
hx-indicator="#register-button"
53
-
hx-swap="none"
54
-
>
55
-
<div class="flex gap-2">
56
-
<input
57
-
type="text"
58
-
id="instance"
59
-
name="instance"
60
-
placeholder="spindle.example.com"
61
-
required
62
-
class="flex-1 w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400 px-3 py-2 border rounded"
63
-
>
64
-
<button
65
-
type="submit"
66
-
id="register-button"
67
-
class="btn rounded flex items-center py-2 dark:bg-gray-700 dark:text-white dark:hover:bg-gray-600 group"
68
-
>
69
-
<span class="inline-flex items-center gap-2">
70
-
{{ i "plus" "w-4 h-4" }}
71
-
register
72
-
</span>
73
-
<span class="pl-2 hidden group-[.htmx-request]:inline">
74
-
{{ i "loader-circle" "w-4 h-4 animate-spin" }}
75
-
</span>
76
-
</button>
77
-
</div>
63
+
<section class="rounded w-full lg:w-fit flex flex-col gap-2">
64
+
<h2 class="text-sm font-bold py-2 uppercase dark:text-gray-300">register a spindle</h2>
65
+
<p class="mb-2 dark:text-gray-300">Enter the hostname of your spindle to get started.</p>
66
+
<form
67
+
hx-post="/settings/spindles/register"
68
+
class="max-w-2xl mb-2 space-y-4"
69
+
hx-indicator="#register-button"
70
+
hx-swap="none"
71
+
>
72
+
<div class="flex gap-2">
73
+
<input
74
+
type="text"
75
+
id="instance"
76
+
name="instance"
77
+
placeholder="spindle.example.com"
78
+
required
79
+
class="flex-1 w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400 px-3 py-2 border rounded"
80
+
>
81
+
<button
82
+
type="submit"
83
+
id="register-button"
84
+
class="btn rounded flex items-center py-2 dark:bg-gray-700 dark:text-white dark:hover:bg-gray-600 group"
85
+
>
86
+
<span class="inline-flex items-center gap-2">
87
+
{{ i "plus" "w-4 h-4" }}
88
+
register
89
+
</span>
90
+
<span class="pl-2 hidden group-[.htmx-request]:inline">
91
+
{{ i "loader-circle" "w-4 h-4 animate-spin" }}
92
+
</span>
93
+
</button>
94
+
</div>
78
95
79
-
<div id="register-error" class="dark:text-red-400"></div>
80
-
</form>
96
+
<div id="register-error" class="dark:text-red-400"></div>
97
+
</form>
98
+
99
+
</section>
100
+
{{ end }}
81
101
82
-
</section>
102
+
{{ define "docsButton" }}
103
+
<a
104
+
class="btn flex items-center gap-2"
105
+
href="https://docs.tangled.org/spindles.html#self-hosting-guide">
106
+
{{ i "book" "size-4" }}
107
+
docs
108
+
</a>
109
+
<div
110
+
id="add-email-modal"
111
+
popover
112
+
class="bg-white w-full md:w-96 dark:bg-gray-800 p-4 rounded border border-gray-200 dark:border-gray-700 drop-shadow dark:text-white backdrop:bg-gray-400/50 dark:backdrop:bg-gray-800/50">
113
+
</div>
83
114
{{ end }}
+6
-5
appview/pages/templates/strings/dashboard.html
+6
-5
appview/pages/templates/strings/dashboard.html
···
1
-
{{ define "title" }}strings by {{ or .Card.UserHandle .Card.UserDid }}{{ end }}
1
+
{{ define "title" }}strings by {{ resolve .Card.UserDid }}{{ end }}
2
2
3
3
{{ define "extrameta" }}
4
-
<meta property="og:title" content="{{ or .Card.UserHandle .Card.UserDid }}" />
4
+
{{ $handle := resolve .Card.UserDid }}
5
+
<meta property="og:title" content="{{ $handle }}" />
5
6
<meta property="og:type" content="profile" />
6
-
<meta property="og:url" content="https://tangled.org/{{ or .Card.UserHandle .Card.UserDid }}" />
7
-
<meta property="og:description" content="{{ or .Card.Profile.Description .Card.UserHandle .Card.UserDid }}" />
7
+
<meta property="og:url" content="https://tangled.org/{{ $handle }}" />
8
+
<meta property="og:description" content="{{ or .Card.Profile.Description $handle }}" />
8
9
{{ end }}
9
10
10
11
···
35
36
{{ $s := index . 1 }}
36
37
<div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800">
37
38
<div class="font-medium dark:text-white flex gap-2 items-center">
38
-
<a href="/strings/{{ or $root.Card.UserHandle $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a>
39
+
<a href="/strings/{{ resolve $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a>
39
40
</div>
40
41
{{ with $s.Description }}
41
42
<div class="text-gray-600 dark:text-gray-300 text-sm">
+11
-7
appview/pages/templates/strings/string.html
+11
-7
appview/pages/templates/strings/string.html
···
1
-
{{ define "title" }}{{ .String.Filename }} ยท by {{ didOrHandle .Owner.DID.String .Owner.Handle.String }}{{ end }}
1
+
{{ define "title" }}{{ .String.Filename }} ยท by {{ resolve .Owner.DID.String }}{{ end }}
2
2
3
3
{{ define "extrameta" }}
4
-
{{ $ownerId := didOrHandle .Owner.DID.String .Owner.Handle.String }}
4
+
{{ $ownerId := resolve .Owner.DID.String }}
5
5
<meta property="og:title" content="{{ .String.Filename }} ยท by {{ $ownerId }}" />
6
6
<meta property="og:type" content="object" />
7
7
<meta property="og:url" content="https://tangled.org/strings/{{ $ownerId }}/{{ .String.Rkey }}" />
···
9
9
{{ end }}
10
10
11
11
{{ define "content" }}
12
-
{{ $ownerId := didOrHandle .Owner.DID.String .Owner.Handle.String }}
12
+
{{ $ownerId := resolve .Owner.DID.String }}
13
13
<section id="string-header" class="mb-4 py-2 px-6 dark:text-white">
14
14
<div class="text-lg flex items-center justify-between">
15
15
<div>
···
17
17
<span class="select-none">/</span>
18
18
<a href="/strings/{{ $ownerId }}/{{ .String.Rkey }}" class="font-bold">{{ .String.Filename }}</a>
19
19
</div>
20
-
{{ if and .LoggedInUser (eq .LoggedInUser.Did .String.Did) }}
21
-
<div class="flex gap-2 text-base">
20
+
<div class="flex gap-2 items-stretch text-base">
21
+
{{ if and .LoggedInUser (eq .LoggedInUser.Did .String.Did) }}
22
22
<a class="btn flex items-center gap-2 no-underline hover:no-underline p-2 group"
23
23
hx-boost="true"
24
24
href="/strings/{{ .String.Did }}/{{ .String.Rkey }}/edit">
···
37
37
<span class="hidden md:inline">delete</span>
38
38
{{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }}
39
39
</button>
40
-
</div>
41
-
{{ end }}
40
+
{{ end }}
41
+
{{ template "fragments/starBtn"
42
+
(dict "SubjectAt" .String.AtUri
43
+
"IsStarred" .IsStarred
44
+
"StarCount" .StarCount) }}
45
+
</div>
42
46
</div>
43
47
<span>
44
48
{{ with .String.Description }}
+1
-2
appview/pages/templates/timeline/fragments/goodfirstissues.html
+1
-2
appview/pages/templates/timeline/fragments/goodfirstissues.html
···
3
3
<a href="/goodfirstissues" class="no-underline hover:no-underline">
4
4
<div class="flex items-center justify-between gap-2 bg-purple-200 dark:bg-purple-900 border border-purple-400 dark:border-purple-500 rounded mb-4 py-4 px-6 ">
5
5
<div class="flex-1 flex flex-col gap-2">
6
-
<div class="text-purple-500 dark:text-purple-400">Oct 2025</div>
7
6
<p>
8
-
Make your first contribution to an open-source project this October.
7
+
Make your first contribution to an open-source project.
9
8
<em>good-first-issue</em> helps new contributors find easy ways to
10
9
start contributing to open-source projects.
11
10
</p>
+5
-5
appview/pages/templates/timeline/fragments/timeline.html
+5
-5
appview/pages/templates/timeline/fragments/timeline.html
···
14
14
<div class="flex flex-col divide-y divide-gray-200 dark:divide-gray-700 border border-gray-200 dark:border-gray-700 rounded-sm">
15
15
{{ if .Repo }}
16
16
{{ template "timeline/fragments/repoEvent" (list $ .) }}
17
-
{{ else if .Star }}
17
+
{{ else if .RepoStar }}
18
18
{{ template "timeline/fragments/starEvent" (list $ .) }}
19
19
{{ else if .Follow }}
20
20
{{ template "timeline/fragments/followEvent" (list $ .) }}
···
52
52
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" $repo.Created }}</span>
53
53
</div>
54
54
{{ with $repo }}
55
-
{{ template "user/fragments/repoCard" (list $root . true true (dict "IsStarred" $event.IsStarred "RepoAt" .RepoAt "Stats" (dict "StarCount" $event.StarCount))) }}
55
+
{{ template "user/fragments/repoCard" (list $root . true true (dict "IsStarred" $event.IsStarred "SubjectAt" .RepoAt "StarCount" $event.StarCount)) }}
56
56
{{ end }}
57
57
{{ end }}
58
58
59
59
{{ define "timeline/fragments/starEvent" }}
60
60
{{ $root := index . 0 }}
61
61
{{ $event := index . 1 }}
62
-
{{ $star := $event.Star }}
62
+
{{ $star := $event.RepoStar }}
63
63
{{ with $star }}
64
-
{{ $starrerHandle := resolve .StarredByDid }}
64
+
{{ $starrerHandle := resolve .Did }}
65
65
{{ $repoOwnerHandle := resolve .Repo.Did }}
66
66
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
67
67
{{ template "user/fragments/picHandleLink" $starrerHandle }}
···
72
72
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" .Created }}</span>
73
73
</div>
74
74
{{ with .Repo }}
75
-
{{ template "user/fragments/repoCard" (list $root . true true (dict "IsStarred" $event.IsStarred "RepoAt" .RepoAt "Stats" (dict "StarCount" $event.StarCount))) }}
75
+
{{ template "user/fragments/repoCard" (list $root . true true (dict "IsStarred" $event.IsStarred "SubjectAt" .RepoAt "StarCount" $event.StarCount)) }}
76
76
{{ end }}
77
77
{{ end }}
78
78
{{ end }}
+4
-2
appview/pages/templates/user/followers.html
+4
-2
appview/pages/templates/user/followers.html
···
1
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} ยท followers {{ end }}
1
+
{{ define "title" }}{{ resolve .Card.UserDid }} ยท followers {{ end }}
2
2
3
3
{{ define "profileContent" }}
4
4
<div id="all-followers" class="md:col-span-8 order-2 md:order-2">
···
19
19
"FollowersCount" .FollowersCount
20
20
"FollowingCount" .FollowingCount) }}
21
21
{{ else }}
22
-
<p class="px-6 dark:text-white">This user does not have any followers yet.</p>
22
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
23
+
<span>This user does not have any followers yet.</span>
24
+
</div>
23
25
{{ end }}
24
26
</div>
25
27
{{ end }}
+4
-2
appview/pages/templates/user/following.html
+4
-2
appview/pages/templates/user/following.html
···
1
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} ยท following {{ end }}
1
+
{{ define "title" }}{{ resolve .Card.UserDid }} ยท following {{ end }}
2
2
3
3
{{ define "profileContent" }}
4
4
<div id="all-following" class="md:col-span-8 order-2 md:order-2">
···
19
19
"FollowersCount" .FollowersCount
20
20
"FollowingCount" .FollowingCount) }}
21
21
{{ else }}
22
-
<p class="px-6 dark:text-white">This user does not follow anyone yet.</p>
22
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
23
+
<span>This user does not follow anyone yet.</span>
24
+
</div>
23
25
{{ end }}
24
26
</div>
25
27
{{ end }}
+2
-2
appview/pages/templates/user/fragments/followCard.html
+2
-2
appview/pages/templates/user/fragments/followCard.html
···
6
6
<img class="object-cover rounded-full p-2" src="{{ fullAvatar $userIdent }}" alt="{{ $userIdent }}" />
7
7
</div>
8
8
9
-
<div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full">
9
+
<div class="flex flex-col md:flex-row md:items-center md:justify-between gap-2 w-full min-w-0">
10
10
<div class="flex-1 min-h-0 justify-around flex flex-col">
11
11
<a href="/{{ $userIdent }}">
12
12
<span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $userIdent | truncateAt30 }}</span>
13
13
</a>
14
14
{{ with .Profile }}
15
-
<p class="text-sm pb-2 md:pb-2">{{.Description}}</p>
15
+
<p class="text-sm pb-2 md:pb-2 break-words">{{.Description}}</p>
16
16
{{ end }}
17
17
<div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full">
18
18
<span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
+1
-1
appview/pages/templates/user/fragments/profileCard.html
+1
-1
appview/pages/templates/user/fragments/profileCard.html
···
1
1
{{ define "user/fragments/profileCard" }}
2
-
{{ $userIdent := didOrHandle .UserDid .UserHandle }}
2
+
{{ $userIdent := resolve .UserDid }}
3
3
<div class="grid grid-cols-3 md:grid-cols-1 gap-1 items-center">
4
4
<div id="avatar" class="col-span-1 flex justify-center items-center">
5
5
<div class="w-3/4 aspect-square relative">
+2
-1
appview/pages/templates/user/fragments/repoCard.html
+2
-1
appview/pages/templates/user/fragments/repoCard.html
···
1
1
{{ define "user/fragments/repoCard" }}
2
+
{{/* root, repo, fullName [,starButton [,starData]] */}}
2
3
{{ $root := index . 0 }}
3
4
{{ $repo := index . 1 }}
4
5
{{ $fullName := index . 2 }}
···
29
30
</div>
30
31
{{ if and $starButton $root.LoggedInUser }}
31
32
<div class="shrink-0">
32
-
{{ template "repo/fragments/repoStar" $starData }}
33
+
{{ template "fragments/starBtn" $starData }}
33
34
</div>
34
35
{{ end }}
35
36
</div>
+22
-4
appview/pages/templates/user/overview.html
+22
-4
appview/pages/templates/user/overview.html
···
1
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }}{{ end }}
1
+
{{ define "title" }}{{ resolve .Card.UserDid }}{{ end }}
2
2
3
3
{{ define "profileContent" }}
4
4
<div id="all-repos" class="md:col-span-4 order-2 md:order-2">
···
16
16
<p class="text-sm font-bold px-2 pb-4 dark:text-white">ACTIVITY</p>
17
17
<div class="flex flex-col gap-4 relative">
18
18
{{ if .ProfileTimeline.IsEmpty }}
19
-
<p class="dark:text-white">This user does not have any activity yet.</p>
19
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
20
+
<span class="flex items-center gap-2">
21
+
This user does not have any activity yet.
22
+
</span>
23
+
</div>
20
24
{{ end }}
21
25
22
26
{{ with .ProfileTimeline }}
···
33
37
</p>
34
38
35
39
<div class="flex flex-col gap-1">
40
+
{{ block "commits" .Commits }} {{ end }}
36
41
{{ block "repoEvents" .RepoEvents }} {{ end }}
37
42
{{ block "issueEvents" .IssueEvents }} {{ end }}
38
43
{{ block "pullEvents" .PullEvents }} {{ end }}
···
43
48
{{ end }}
44
49
{{ end }}
45
50
</div>
51
+
{{ end }}
52
+
53
+
{{ define "commits" }}
54
+
{{ if . }}
55
+
<div class="flex flex-wrap items-center gap-1">
56
+
{{ i "git-commit-horizontal" "size-5" }}
57
+
created {{ . }} commits
58
+
</div>
59
+
{{ end }}
46
60
{{ end }}
47
61
48
62
{{ define "repoEvents" }}
···
224
238
{{ define "ownRepos" }}
225
239
<div>
226
240
<div class="text-sm font-bold px-2 pb-4 dark:text-white flex items-center gap-2">
227
-
<a href="/@{{ or $.Card.UserHandle $.Card.UserDid }}?tab=repos"
241
+
<a href="/{{ resolve $.Card.UserDid }}?tab=repos"
228
242
class="flex text-black dark:text-white items-center gap-2 no-underline hover:no-underline group">
229
243
<span>PINNED REPOS</span>
230
244
</a>
···
244
258
{{ template "user/fragments/repoCard" (list $ . false) }}
245
259
</div>
246
260
{{ else }}
247
-
<p class="dark:text-white">This user does not have any pinned repos.</p>
261
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
262
+
<span class="flex items-center gap-2">
263
+
This user does not have any pinned repos.
264
+
</span>
265
+
</div>
248
266
{{ end }}
249
267
</div>
250
268
</div>
+4
-2
appview/pages/templates/user/repos.html
+4
-2
appview/pages/templates/user/repos.html
···
1
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} ยท repos {{ end }}
1
+
{{ define "title" }}{{ resolve .Card.UserDid }} ยท repos {{ end }}
2
2
3
3
{{ define "profileContent" }}
4
4
<div id="all-repos" class="md:col-span-8 order-2 md:order-2">
···
13
13
{{ template "user/fragments/repoCard" (list $ . false) }}
14
14
</div>
15
15
{{ else }}
16
-
<p class="px-6 dark:text-white">This user does not have any repos yet.</p>
16
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
17
+
<span>This user does not have any repos yet.</span>
18
+
</div>
17
19
{{ end }}
18
20
</div>
19
21
{{ end }}
+1
-1
appview/pages/templates/user/settings/notifications.html
+1
-1
appview/pages/templates/user/settings/notifications.html
···
151
151
</div>
152
152
</div>
153
153
<label class="flex items-center gap-2">
154
-
<input type="checkbox" name="mentioned" {{if .Preferences.UserMentioned}}checked{{end}}>
154
+
<input type="checkbox" name="user_mentioned" {{if .Preferences.UserMentioned}}checked{{end}}>
155
155
</label>
156
156
</div>
157
157
+9
-6
appview/pages/templates/user/signup.html
+9
-6
appview/pages/templates/user/signup.html
···
43
43
page to complete your registration.
44
44
</span>
45
45
<div class="w-full mt-4 text-center">
46
-
<div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}"></div>
46
+
<div class="cf-turnstile" data-sitekey="{{ .CloudflareSiteKey }}" data-size="flexible"></div>
47
47
</div>
48
48
<button class="btn text-base w-full my-2 mt-6" type="submit" id="signup-button" tabindex="7" >
49
49
<span>join now</span>
50
50
</button>
51
+
<p class="text-sm text-gray-500">
52
+
Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>.
53
+
</p>
54
+
55
+
<p id="signup-msg" class="error w-full"></p>
56
+
<p class="text-sm text-gray-500 pt-4">
57
+
By signing up, you agree to our <a href="/terms" class="underline">Terms of Service</a> and <a href="/privacy" class="underline">Privacy Policy</a>.
58
+
</p>
51
59
</form>
52
-
<p class="text-sm text-gray-500">
53
-
Already have an AT Protocol account? <a href="/login" class="underline">Login to Tangled</a>.
54
-
</p>
55
-
56
-
<p id="signup-msg" class="error w-full"></p>
57
60
</main>
58
61
</body>
59
62
</html>
+4
-2
appview/pages/templates/user/starred.html
+4
-2
appview/pages/templates/user/starred.html
···
1
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} ยท repos {{ end }}
1
+
{{ define "title" }}{{ resolve .Card.UserDid }} ยท repos {{ end }}
2
2
3
3
{{ define "profileContent" }}
4
4
<div id="all-repos" class="md:col-span-8 order-2 md:order-2">
···
13
13
{{ template "user/fragments/repoCard" (list $ . true) }}
14
14
</div>
15
15
{{ else }}
16
-
<p class="px-6 dark:text-white">This user does not have any starred repos yet.</p>
16
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
17
+
<span>This user does not have any starred repos yet.</span>
18
+
</div>
17
19
{{ end }}
18
20
</div>
19
21
{{ end }}
+5
-3
appview/pages/templates/user/strings.html
+5
-3
appview/pages/templates/user/strings.html
···
1
-
{{ define "title" }}{{ or .Card.UserHandle .Card.UserDid }} ยท strings {{ end }}
1
+
{{ define "title" }}{{ resolve .Card.UserDid }} ยท strings {{ end }}
2
2
3
3
{{ define "profileContent" }}
4
4
<div id="all-strings" class="md:col-span-8 order-2 md:order-2">
···
13
13
{{ template "singleString" (list $ .) }}
14
14
</div>
15
15
{{ else }}
16
-
<p class="px-6 dark:text-white">This user does not have any strings yet.</p>
16
+
<div class="text-base text-gray-500 flex items-center justify-center italic p-12 border border-gray-200 dark:border-gray-700 rounded">
17
+
<span>This user does not have any strings yet.</span>
18
+
</div>
17
19
{{ end }}
18
20
</div>
19
21
{{ end }}
···
23
25
{{ $s := index . 1 }}
24
26
<div class="py-4 px-6 rounded bg-white dark:bg-gray-800">
25
27
<div class="font-medium dark:text-white flex gap-2 items-center">
26
-
<a href="/strings/{{ or $root.Card.UserHandle $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a>
28
+
<a href="/strings/{{ resolve $root.Card.UserDid }}/{{ $s.Rkey }}">{{ $s.Filename }}</a>
27
29
</div>
28
30
{{ with $s.Description }}
29
31
<div class="text-gray-600 dark:text-gray-300 text-sm">
+102
-23
appview/pipelines/pipelines.go
+102
-23
appview/pipelines/pipelines.go
···
4
4
"bytes"
5
5
"context"
6
6
"encoding/json"
7
+
"fmt"
7
8
"log/slog"
8
9
"net/http"
9
10
"strings"
10
11
"time"
11
12
13
+
"tangled.org/core/api/tangled"
12
14
"tangled.org/core/appview/config"
13
15
"tangled.org/core/appview/db"
16
+
"tangled.org/core/appview/middleware"
17
+
"tangled.org/core/appview/models"
14
18
"tangled.org/core/appview/oauth"
15
19
"tangled.org/core/appview/pages"
16
20
"tangled.org/core/appview/reporesolver"
17
21
"tangled.org/core/eventconsumer"
18
22
"tangled.org/core/idresolver"
23
+
"tangled.org/core/orm"
19
24
"tangled.org/core/rbac"
20
25
spindlemodel "tangled.org/core/spindle/models"
21
26
···
35
40
logger *slog.Logger
36
41
}
37
42
38
-
func (p *Pipelines) Router() http.Handler {
43
+
func (p *Pipelines) Router(mw *middleware.Middleware) http.Handler {
39
44
r := chi.NewRouter()
40
45
r.Get("/", p.Index)
41
46
r.Get("/{pipeline}/workflow/{workflow}", p.Workflow)
42
47
r.Get("/{pipeline}/workflow/{workflow}/logs", p.Logs)
48
+
r.
49
+
With(mw.RepoPermissionMiddleware("repo:owner")).
50
+
Post("/{pipeline}/workflow/{workflow}/cancel", p.Cancel)
43
51
44
52
return r
45
53
}
···
78
86
return
79
87
}
80
88
81
-
repoInfo := f.RepoInfo(user)
82
-
83
89
ps, err := db.GetPipelineStatuses(
84
90
p.db,
85
91
30,
86
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
87
-
db.FilterEq("repo_name", repoInfo.Name),
88
-
db.FilterEq("knot", repoInfo.Knot),
92
+
orm.FilterEq("repo_owner", f.Did),
93
+
orm.FilterEq("repo_name", f.Name),
94
+
orm.FilterEq("knot", f.Knot),
89
95
)
90
96
if err != nil {
91
97
l.Error("failed to query db", "err", err)
···
94
100
95
101
p.pages.Pipelines(w, pages.PipelinesParams{
96
102
LoggedInUser: user,
97
-
RepoInfo: repoInfo,
103
+
RepoInfo: p.repoResolver.GetRepoInfo(r, user),
98
104
Pipelines: ps,
99
105
})
100
106
}
···
109
115
return
110
116
}
111
117
112
-
repoInfo := f.RepoInfo(user)
113
-
114
118
pipelineId := chi.URLParam(r, "pipeline")
115
119
if pipelineId == "" {
116
120
l.Error("empty pipeline ID")
···
126
130
ps, err := db.GetPipelineStatuses(
127
131
p.db,
128
132
1,
129
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
130
-
db.FilterEq("repo_name", repoInfo.Name),
131
-
db.FilterEq("knot", repoInfo.Knot),
132
-
db.FilterEq("id", pipelineId),
133
+
orm.FilterEq("repo_owner", f.Did),
134
+
orm.FilterEq("repo_name", f.Name),
135
+
orm.FilterEq("knot", f.Knot),
136
+
orm.FilterEq("id", pipelineId),
133
137
)
134
138
if err != nil {
135
139
l.Error("failed to query db", "err", err)
···
145
149
146
150
p.pages.Workflow(w, pages.WorkflowParams{
147
151
LoggedInUser: user,
148
-
RepoInfo: repoInfo,
152
+
RepoInfo: p.repoResolver.GetRepoInfo(r, user),
149
153
Pipeline: singlePipeline,
150
154
Workflow: workflow,
151
155
})
···
176
180
ctx, cancel := context.WithCancel(r.Context())
177
181
defer cancel()
178
182
179
-
user := p.oauth.GetUser(r)
180
183
f, err := p.repoResolver.Resolve(r)
181
184
if err != nil {
182
185
l.Error("failed to get repo and knot", "err", err)
183
186
http.Error(w, "bad repo/knot", http.StatusBadRequest)
184
187
return
185
188
}
186
-
187
-
repoInfo := f.RepoInfo(user)
188
189
189
190
pipelineId := chi.URLParam(r, "pipeline")
190
191
workflow := chi.URLParam(r, "workflow")
···
196
197
ps, err := db.GetPipelineStatuses(
197
198
p.db,
198
199
1,
199
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
200
-
db.FilterEq("repo_name", repoInfo.Name),
201
-
db.FilterEq("knot", repoInfo.Knot),
202
-
db.FilterEq("id", pipelineId),
200
+
orm.FilterEq("repo_owner", f.Did),
201
+
orm.FilterEq("repo_name", f.Name),
202
+
orm.FilterEq("knot", f.Knot),
203
+
orm.FilterEq("id", pipelineId),
203
204
)
204
205
if err != nil || len(ps) != 1 {
205
206
l.Error("pipeline query failed", "err", err, "count", len(ps))
···
208
209
}
209
210
210
211
singlePipeline := ps[0]
211
-
spindle := repoInfo.Spindle
212
-
knot := repoInfo.Knot
212
+
spindle := f.Spindle
213
+
knot := f.Knot
213
214
rkey := singlePipeline.Rkey
214
215
215
216
if spindle == "" || knot == "" || rkey == "" {
···
320
321
}
321
322
}
322
323
}
324
+
}
325
+
326
+
func (p *Pipelines) Cancel(w http.ResponseWriter, r *http.Request) {
327
+
l := p.logger.With("handler", "Cancel")
328
+
329
+
var (
330
+
pipelineId = chi.URLParam(r, "pipeline")
331
+
workflow = chi.URLParam(r, "workflow")
332
+
)
333
+
if pipelineId == "" || workflow == "" {
334
+
http.Error(w, "missing pipeline ID or workflow", http.StatusBadRequest)
335
+
return
336
+
}
337
+
338
+
f, err := p.repoResolver.Resolve(r)
339
+
if err != nil {
340
+
l.Error("failed to get repo and knot", "err", err)
341
+
http.Error(w, "bad repo/knot", http.StatusBadRequest)
342
+
return
343
+
}
344
+
345
+
pipeline, err := func() (models.Pipeline, error) {
346
+
ps, err := db.GetPipelineStatuses(
347
+
p.db,
348
+
1,
349
+
orm.FilterEq("repo_owner", f.Did),
350
+
orm.FilterEq("repo_name", f.Name),
351
+
orm.FilterEq("knot", f.Knot),
352
+
orm.FilterEq("id", pipelineId),
353
+
)
354
+
if err != nil {
355
+
return models.Pipeline{}, err
356
+
}
357
+
if len(ps) != 1 {
358
+
return models.Pipeline{}, fmt.Errorf("wrong pipeline count %d", len(ps))
359
+
}
360
+
return ps[0], nil
361
+
}()
362
+
if err != nil {
363
+
l.Error("pipeline query failed", "err", err)
364
+
http.Error(w, "pipeline not found", http.StatusNotFound)
365
+
}
366
+
var (
367
+
spindle = f.Spindle
368
+
knot = f.Knot
369
+
rkey = pipeline.Rkey
370
+
)
371
+
372
+
if spindle == "" || knot == "" || rkey == "" {
373
+
http.Error(w, "invalid repo info", http.StatusBadRequest)
374
+
return
375
+
}
376
+
377
+
spindleClient, err := p.oauth.ServiceClient(
378
+
r,
379
+
oauth.WithService(f.Spindle),
380
+
oauth.WithLxm(tangled.PipelineCancelPipelineNSID),
381
+
oauth.WithDev(p.config.Core.Dev),
382
+
oauth.WithTimeout(time.Second*30), // workflow cleanup usually takes time
383
+
)
384
+
385
+
err = tangled.PipelineCancelPipeline(
386
+
r.Context(),
387
+
spindleClient,
388
+
&tangled.PipelineCancelPipeline_Input{
389
+
Repo: string(f.RepoAt()),
390
+
Pipeline: pipeline.AtUri().String(),
391
+
Workflow: workflow,
392
+
},
393
+
)
394
+
err = fmt.Errorf("boo! new error")
395
+
errorId := "workflow-error"
396
+
if err != nil {
397
+
l.Error("failed to cancel workflow", "err", err)
398
+
p.pages.Notice(w, errorId, "Failed to cancel workflow")
399
+
return
400
+
}
401
+
l.Debug("canceled pipeline", "uri", pipeline.AtUri())
323
402
}
324
403
325
404
// either a message or an error
+3
-2
appview/pulls/opengraph.go
+3
-2
appview/pulls/opengraph.go
···
13
13
"tangled.org/core/appview/db"
14
14
"tangled.org/core/appview/models"
15
15
"tangled.org/core/appview/ogcard"
16
+
"tangled.org/core/orm"
16
17
"tangled.org/core/patchutil"
17
18
"tangled.org/core/types"
18
19
)
···
276
277
}
277
278
278
279
// Get comment count from database
279
-
comments, err := db.GetPullComments(s.db, db.FilterEq("pull_id", pull.ID))
280
+
comments, err := db.GetPullComments(s.db, orm.FilterEq("pull_id", pull.ID))
280
281
if err != nil {
281
282
log.Printf("failed to get pull comments: %v", err)
282
283
}
···
293
294
filesChanged = niceDiff.Stat.FilesChanged
294
295
}
295
296
296
-
card, err := s.drawPullSummaryCard(pull, &f.Repo, commentCount, diffStats, filesChanged)
297
+
card, err := s.drawPullSummaryCard(pull, f, commentCount, diffStats, filesChanged)
297
298
if err != nil {
298
299
log.Println("failed to draw pull summary card", err)
299
300
http.Error(w, "failed to draw pull summary card", http.StatusInternalServerError)
+146
-142
appview/pulls/pulls.go
+146
-142
appview/pulls/pulls.go
···
1
1
package pulls
2
2
3
3
import (
4
+
"context"
4
5
"database/sql"
5
6
"encoding/json"
6
7
"errors"
···
18
19
"tangled.org/core/appview/config"
19
20
"tangled.org/core/appview/db"
20
21
pulls_indexer "tangled.org/core/appview/indexer/pulls"
22
+
"tangled.org/core/appview/mentions"
21
23
"tangled.org/core/appview/models"
22
24
"tangled.org/core/appview/notify"
23
25
"tangled.org/core/appview/oauth"
24
26
"tangled.org/core/appview/pages"
25
27
"tangled.org/core/appview/pages/markup"
28
+
"tangled.org/core/appview/pages/repoinfo"
26
29
"tangled.org/core/appview/reporesolver"
27
30
"tangled.org/core/appview/validator"
28
31
"tangled.org/core/appview/xrpcclient"
29
32
"tangled.org/core/idresolver"
33
+
"tangled.org/core/orm"
30
34
"tangled.org/core/patchutil"
31
35
"tangled.org/core/rbac"
32
36
"tangled.org/core/tid"
···
41
45
)
42
46
43
47
type Pulls struct {
44
-
oauth *oauth.OAuth
45
-
repoResolver *reporesolver.RepoResolver
46
-
pages *pages.Pages
47
-
idResolver *idresolver.Resolver
48
-
db *db.DB
49
-
config *config.Config
50
-
notifier notify.Notifier
51
-
enforcer *rbac.Enforcer
52
-
logger *slog.Logger
53
-
validator *validator.Validator
54
-
indexer *pulls_indexer.Indexer
48
+
oauth *oauth.OAuth
49
+
repoResolver *reporesolver.RepoResolver
50
+
pages *pages.Pages
51
+
idResolver *idresolver.Resolver
52
+
mentionsResolver *mentions.Resolver
53
+
db *db.DB
54
+
config *config.Config
55
+
notifier notify.Notifier
56
+
enforcer *rbac.Enforcer
57
+
logger *slog.Logger
58
+
validator *validator.Validator
59
+
indexer *pulls_indexer.Indexer
55
60
}
56
61
57
62
func New(
···
59
64
repoResolver *reporesolver.RepoResolver,
60
65
pages *pages.Pages,
61
66
resolver *idresolver.Resolver,
67
+
mentionsResolver *mentions.Resolver,
62
68
db *db.DB,
63
69
config *config.Config,
64
70
notifier notify.Notifier,
···
68
74
logger *slog.Logger,
69
75
) *Pulls {
70
76
return &Pulls{
71
-
oauth: oauth,
72
-
repoResolver: repoResolver,
73
-
pages: pages,
74
-
idResolver: resolver,
75
-
db: db,
76
-
config: config,
77
-
notifier: notifier,
78
-
enforcer: enforcer,
79
-
logger: logger,
80
-
validator: validator,
81
-
indexer: indexer,
77
+
oauth: oauth,
78
+
repoResolver: repoResolver,
79
+
pages: pages,
80
+
idResolver: resolver,
81
+
mentionsResolver: mentionsResolver,
82
+
db: db,
83
+
config: config,
84
+
notifier: notifier,
85
+
enforcer: enforcer,
86
+
logger: logger,
87
+
validator: validator,
88
+
indexer: indexer,
82
89
}
83
90
}
84
91
···
123
130
124
131
s.pages.PullActionsFragment(w, pages.PullActionsParams{
125
132
LoggedInUser: user,
126
-
RepoInfo: f.RepoInfo(user),
133
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
127
134
Pull: pull,
128
135
RoundNumber: roundNumber,
129
136
MergeCheck: mergeCheckResponse,
···
150
157
return
151
158
}
152
159
160
+
backlinks, err := db.GetBacklinks(s.db, pull.AtUri())
161
+
if err != nil {
162
+
log.Println("failed to get pull backlinks", err)
163
+
s.pages.Notice(w, "pull-error", "Failed to get pull. Try again later.")
164
+
return
165
+
}
166
+
153
167
// can be nil if this pull is not stacked
154
168
stack, _ := r.Context().Value("stack").(models.Stack)
155
169
abandonedPulls, _ := r.Context().Value("abandonedPulls").([]*models.Pull)
···
160
174
if user != nil && user.Did == pull.OwnerDid {
161
175
resubmitResult = s.resubmitCheck(r, f, pull, stack)
162
176
}
163
-
164
-
repoInfo := f.RepoInfo(user)
165
177
166
178
m := make(map[string]models.Pipeline)
167
179
···
179
191
ps, err := db.GetPipelineStatuses(
180
192
s.db,
181
193
len(shas),
182
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
183
-
db.FilterEq("repo_name", repoInfo.Name),
184
-
db.FilterEq("knot", repoInfo.Knot),
185
-
db.FilterIn("sha", shas),
194
+
orm.FilterEq("repo_owner", f.Did),
195
+
orm.FilterEq("repo_name", f.Name),
196
+
orm.FilterEq("knot", f.Knot),
197
+
orm.FilterIn("sha", shas),
186
198
)
187
199
if err != nil {
188
200
log.Printf("failed to fetch pipeline statuses: %s", err)
···
206
218
207
219
labelDefs, err := db.GetLabelDefinitions(
208
220
s.db,
209
-
db.FilterIn("at_uri", f.Repo.Labels),
210
-
db.FilterContains("scope", tangled.RepoPullNSID),
221
+
orm.FilterIn("at_uri", f.Labels),
222
+
orm.FilterContains("scope", tangled.RepoPullNSID),
211
223
)
212
224
if err != nil {
213
225
log.Println("failed to fetch labels", err)
···
222
234
223
235
s.pages.RepoSinglePull(w, pages.RepoSinglePullParams{
224
236
LoggedInUser: user,
225
-
RepoInfo: repoInfo,
237
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
226
238
Pull: pull,
227
239
Stack: stack,
228
240
AbandonedPulls: abandonedPulls,
241
+
Backlinks: backlinks,
229
242
BranchDeleteStatus: branchDeleteStatus,
230
243
MergeCheck: mergeCheckResponse,
231
244
ResubmitCheck: resubmitResult,
···
239
252
})
240
253
}
241
254
242
-
func (s *Pulls) mergeCheck(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull, stack models.Stack) types.MergeCheckResponse {
255
+
func (s *Pulls) mergeCheck(r *http.Request, f *models.Repo, pull *models.Pull, stack models.Stack) types.MergeCheckResponse {
243
256
if pull.State == models.PullMerged {
244
257
return types.MergeCheckResponse{}
245
258
}
···
268
281
r.Context(),
269
282
&xrpcc,
270
283
&tangled.RepoMergeCheck_Input{
271
-
Did: f.OwnerDid(),
284
+
Did: f.Did,
272
285
Name: f.Name,
273
286
Branch: pull.TargetBranch,
274
287
Patch: patch,
···
306
319
return result
307
320
}
308
321
309
-
func (s *Pulls) branchDeleteStatus(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull) *models.BranchDeleteStatus {
322
+
func (s *Pulls) branchDeleteStatus(r *http.Request, repo *models.Repo, pull *models.Pull) *models.BranchDeleteStatus {
310
323
if pull.State != models.PullMerged {
311
324
return nil
312
325
}
···
317
330
}
318
331
319
332
var branch string
320
-
var repo *models.Repo
321
333
// check if the branch exists
322
334
// NOTE: appview could cache branches/tags etc. for every repo by listening for gitRefUpdates
323
335
if pull.IsBranchBased() {
324
336
branch = pull.PullSource.Branch
325
-
repo = &f.Repo
326
337
} else if pull.IsForkBased() {
327
338
branch = pull.PullSource.Branch
328
339
repo = pull.PullSource.Repo
···
361
372
}
362
373
}
363
374
364
-
func (s *Pulls) resubmitCheck(r *http.Request, f *reporesolver.ResolvedRepo, pull *models.Pull, stack models.Stack) pages.ResubmitResult {
375
+
func (s *Pulls) resubmitCheck(r *http.Request, repo *models.Repo, pull *models.Pull, stack models.Stack) pages.ResubmitResult {
365
376
if pull.State == models.PullMerged || pull.State == models.PullDeleted || pull.PullSource == nil {
366
377
return pages.Unknown
367
378
}
···
381
392
repoName = sourceRepo.Name
382
393
} else {
383
394
// pulls within the same repo
384
-
knot = f.Knot
385
-
ownerDid = f.OwnerDid()
386
-
repoName = f.Name
395
+
knot = repo.Knot
396
+
ownerDid = repo.Did
397
+
repoName = repo.Name
387
398
}
388
399
389
400
scheme := "http"
···
395
406
Host: host,
396
407
}
397
408
398
-
repo := fmt.Sprintf("%s/%s", ownerDid, repoName)
399
-
branchResp, err := tangled.RepoBranch(r.Context(), xrpcc, pull.PullSource.Branch, repo)
409
+
didSlashName := fmt.Sprintf("%s/%s", ownerDid, repoName)
410
+
branchResp, err := tangled.RepoBranch(r.Context(), xrpcc, pull.PullSource.Branch, didSlashName)
400
411
if err != nil {
401
412
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
402
413
log.Println("failed to call XRPC repo.branches", xrpcerr)
···
424
435
425
436
func (s *Pulls) RepoPullPatch(w http.ResponseWriter, r *http.Request) {
426
437
user := s.oauth.GetUser(r)
427
-
f, err := s.repoResolver.Resolve(r)
428
-
if err != nil {
429
-
log.Println("failed to get repo and knot", err)
430
-
return
431
-
}
432
438
433
439
var diffOpts types.DiffOpts
434
440
if d := r.URL.Query().Get("diff"); d == "split" {
···
457
463
458
464
s.pages.RepoPullPatchPage(w, pages.RepoPullPatchParams{
459
465
LoggedInUser: user,
460
-
RepoInfo: f.RepoInfo(user),
466
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
461
467
Pull: pull,
462
468
Stack: stack,
463
469
Round: roundIdInt,
···
471
477
func (s *Pulls) RepoPullInterdiff(w http.ResponseWriter, r *http.Request) {
472
478
user := s.oauth.GetUser(r)
473
479
474
-
f, err := s.repoResolver.Resolve(r)
475
-
if err != nil {
476
-
log.Println("failed to get repo and knot", err)
477
-
return
478
-
}
479
-
480
480
var diffOpts types.DiffOpts
481
481
if d := r.URL.Query().Get("diff"); d == "split" {
482
482
diffOpts.Split = true
···
521
521
522
522
s.pages.RepoPullInterdiffPage(w, pages.RepoPullInterdiffParams{
523
523
LoggedInUser: s.oauth.GetUser(r),
524
-
RepoInfo: f.RepoInfo(user),
524
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
525
525
Pull: pull,
526
526
Round: roundIdInt,
527
527
Interdiff: interdiff,
···
598
598
599
599
pulls, err := db.GetPulls(
600
600
s.db,
601
-
db.FilterIn("id", ids),
601
+
orm.FilterIn("id", ids),
602
602
)
603
603
if err != nil {
604
604
log.Println("failed to get pulls", err)
···
646
646
}
647
647
pulls = pulls[:n]
648
648
649
-
repoInfo := f.RepoInfo(user)
650
649
ps, err := db.GetPipelineStatuses(
651
650
s.db,
652
651
len(shas),
653
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
654
-
db.FilterEq("repo_name", repoInfo.Name),
655
-
db.FilterEq("knot", repoInfo.Knot),
656
-
db.FilterIn("sha", shas),
652
+
orm.FilterEq("repo_owner", f.Did),
653
+
orm.FilterEq("repo_name", f.Name),
654
+
orm.FilterEq("knot", f.Knot),
655
+
orm.FilterIn("sha", shas),
657
656
)
658
657
if err != nil {
659
658
log.Printf("failed to fetch pipeline statuses: %s", err)
···
666
665
667
666
labelDefs, err := db.GetLabelDefinitions(
668
667
s.db,
669
-
db.FilterIn("at_uri", f.Repo.Labels),
670
-
db.FilterContains("scope", tangled.RepoPullNSID),
668
+
orm.FilterIn("at_uri", f.Labels),
669
+
orm.FilterContains("scope", tangled.RepoPullNSID),
671
670
)
672
671
if err != nil {
673
672
log.Println("failed to fetch labels", err)
···
682
681
683
682
s.pages.RepoPulls(w, pages.RepoPullsParams{
684
683
LoggedInUser: s.oauth.GetUser(r),
685
-
RepoInfo: f.RepoInfo(user),
684
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
686
685
Pulls: pulls,
687
686
LabelDefs: defs,
688
687
FilteringBy: state,
···
693
692
}
694
693
695
694
func (s *Pulls) PullComment(w http.ResponseWriter, r *http.Request) {
696
-
l := s.logger.With("handler", "PullComment")
697
695
user := s.oauth.GetUser(r)
698
696
f, err := s.repoResolver.Resolve(r)
699
697
if err != nil {
···
720
718
case http.MethodGet:
721
719
s.pages.PullNewCommentFragment(w, pages.PullNewCommentParams{
722
720
LoggedInUser: user,
723
-
RepoInfo: f.RepoInfo(user),
721
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
724
722
Pull: pull,
725
723
RoundNumber: roundNumber,
726
724
})
···
731
729
s.pages.Notice(w, "pull", "Comment body is required")
732
730
return
733
731
}
732
+
733
+
mentions, references := s.mentionsResolver.Resolve(r.Context(), body)
734
734
735
735
// Start a transaction
736
736
tx, err := s.db.BeginTx(r.Context(), nil)
···
774
774
Body: body,
775
775
CommentAt: atResp.Uri,
776
776
SubmissionId: pull.Submissions[roundNumber].ID,
777
+
Mentions: mentions,
778
+
References: references,
777
779
}
778
780
779
781
// Create the pull comment in the database with the commentAt field
···
791
793
return
792
794
}
793
795
794
-
rawMentions := markup.FindUserMentions(comment.Body)
795
-
idents := s.idResolver.ResolveIdents(r.Context(), rawMentions)
796
-
l.Debug("parsed mentions", "raw", rawMentions, "idents", idents)
797
-
var mentions []syntax.DID
798
-
for _, ident := range idents {
799
-
if ident != nil && !ident.Handle.IsInvalidHandle() {
800
-
mentions = append(mentions, ident.DID)
801
-
}
802
-
}
803
796
s.notifier.NewPullComment(r.Context(), comment, mentions)
804
797
805
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", f.OwnerSlashRepo(), pull.PullId, commentId))
798
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
799
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d#comment-%d", ownerSlashRepo, pull.PullId, commentId))
806
800
return
807
801
}
808
802
}
···
826
820
Host: host,
827
821
}
828
822
829
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
823
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
830
824
xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
831
825
if err != nil {
832
826
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
···
853
847
854
848
s.pages.RepoNewPull(w, pages.RepoNewPullParams{
855
849
LoggedInUser: user,
856
-
RepoInfo: f.RepoInfo(user),
850
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
857
851
Branches: result.Branches,
858
852
Strategy: strategy,
859
853
SourceBranch: sourceBranch,
···
876
870
}
877
871
878
872
// Determine PR type based on input parameters
879
-
isPushAllowed := f.RepoInfo(user).Roles.IsPushAllowed()
873
+
roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
874
+
isPushAllowed := roles.IsPushAllowed()
880
875
isBranchBased := isPushAllowed && sourceBranch != "" && fromFork == ""
881
876
isForkBased := fromFork != "" && sourceBranch != ""
882
877
isPatchBased := patch != "" && !isBranchBased && !isForkBased
···
974
969
func (s *Pulls) handleBranchBasedPull(
975
970
w http.ResponseWriter,
976
971
r *http.Request,
977
-
f *reporesolver.ResolvedRepo,
972
+
repo *models.Repo,
978
973
user *oauth.User,
979
974
title,
980
975
body,
···
986
981
if !s.config.Core.Dev {
987
982
scheme = "https"
988
983
}
989
-
host := fmt.Sprintf("%s://%s", scheme, f.Knot)
984
+
host := fmt.Sprintf("%s://%s", scheme, repo.Knot)
990
985
xrpcc := &indigoxrpc.Client{
991
986
Host: host,
992
987
}
993
988
994
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
995
-
xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, repo, targetBranch, sourceBranch)
989
+
didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name)
990
+
xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, didSlashRepo, targetBranch, sourceBranch)
996
991
if err != nil {
997
992
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
998
993
log.Println("failed to call XRPC repo.compare", xrpcerr)
···
1029
1024
Sha: comparison.Rev2,
1030
1025
}
1031
1026
1032
-
s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked)
1027
+
s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked)
1033
1028
}
1034
1029
1035
-
func (s *Pulls) handlePatchBasedPull(w http.ResponseWriter, r *http.Request, f *reporesolver.ResolvedRepo, user *oauth.User, title, body, targetBranch, patch string, isStacked bool) {
1030
+
func (s *Pulls) handlePatchBasedPull(w http.ResponseWriter, r *http.Request, repo *models.Repo, user *oauth.User, title, body, targetBranch, patch string, isStacked bool) {
1036
1031
if err := s.validator.ValidatePatch(&patch); err != nil {
1037
1032
s.logger.Error("patch validation failed", "err", err)
1038
1033
s.pages.Notice(w, "pull", "Invalid patch format. Please provide a valid diff.")
1039
1034
return
1040
1035
}
1041
1036
1042
-
s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, "", "", nil, nil, isStacked)
1037
+
s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, "", "", nil, nil, isStacked)
1043
1038
}
1044
1039
1045
-
func (s *Pulls) handleForkBasedPull(w http.ResponseWriter, r *http.Request, f *reporesolver.ResolvedRepo, user *oauth.User, forkRepo string, title, body, targetBranch, sourceBranch string, isStacked bool) {
1040
+
func (s *Pulls) handleForkBasedPull(w http.ResponseWriter, r *http.Request, repo *models.Repo, user *oauth.User, forkRepo string, title, body, targetBranch, sourceBranch string, isStacked bool) {
1046
1041
repoString := strings.SplitN(forkRepo, "/", 2)
1047
1042
forkOwnerDid := repoString[0]
1048
1043
repoName := repoString[1]
···
1144
1139
Sha: sourceRev,
1145
1140
}
1146
1141
1147
-
s.createPullRequest(w, r, f, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked)
1142
+
s.createPullRequest(w, r, repo, user, title, body, targetBranch, patch, combined, sourceRev, pullSource, recordPullSource, isStacked)
1148
1143
}
1149
1144
1150
1145
func (s *Pulls) createPullRequest(
1151
1146
w http.ResponseWriter,
1152
1147
r *http.Request,
1153
-
f *reporesolver.ResolvedRepo,
1148
+
repo *models.Repo,
1154
1149
user *oauth.User,
1155
1150
title, body, targetBranch string,
1156
1151
patch string,
···
1165
1160
s.createStackedPullRequest(
1166
1161
w,
1167
1162
r,
1168
-
f,
1163
+
repo,
1169
1164
user,
1170
1165
targetBranch,
1171
1166
patch,
···
1211
1206
}
1212
1207
}
1213
1208
1209
+
mentions, references := s.mentionsResolver.Resolve(r.Context(), body)
1210
+
1214
1211
rkey := tid.TID()
1215
1212
initialSubmission := models.PullSubmission{
1216
1213
Patch: patch,
···
1222
1219
Body: body,
1223
1220
TargetBranch: targetBranch,
1224
1221
OwnerDid: user.Did,
1225
-
RepoAt: f.RepoAt(),
1222
+
RepoAt: repo.RepoAt(),
1226
1223
Rkey: rkey,
1224
+
Mentions: mentions,
1225
+
References: references,
1227
1226
Submissions: []*models.PullSubmission{
1228
1227
&initialSubmission,
1229
1228
},
···
1235
1234
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1236
1235
return
1237
1236
}
1238
-
pullId, err := db.NextPullId(tx, f.RepoAt())
1237
+
pullId, err := db.NextPullId(tx, repo.RepoAt())
1239
1238
if err != nil {
1240
1239
log.Println("failed to get pull id", err)
1241
1240
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
···
1250
1249
Val: &tangled.RepoPull{
1251
1250
Title: title,
1252
1251
Target: &tangled.RepoPull_Target{
1253
-
Repo: string(f.RepoAt()),
1252
+
Repo: string(repo.RepoAt()),
1254
1253
Branch: targetBranch,
1255
1254
},
1256
1255
Patch: patch,
···
1273
1272
1274
1273
s.notifier.NewPull(r.Context(), pull)
1275
1274
1276
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pullId))
1275
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
1276
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pullId))
1277
1277
}
1278
1278
1279
1279
func (s *Pulls) createStackedPullRequest(
1280
1280
w http.ResponseWriter,
1281
1281
r *http.Request,
1282
-
f *reporesolver.ResolvedRepo,
1282
+
repo *models.Repo,
1283
1283
user *oauth.User,
1284
1284
targetBranch string,
1285
1285
patch string,
···
1311
1311
1312
1312
// build a stack out of this patch
1313
1313
stackId := uuid.New()
1314
-
stack, err := newStack(f, user, targetBranch, patch, pullSource, stackId.String())
1314
+
stack, err := s.newStack(r.Context(), repo, user, targetBranch, patch, pullSource, stackId.String())
1315
1315
if err != nil {
1316
1316
log.Println("failed to create stack", err)
1317
1317
s.pages.Notice(w, "pull", fmt.Sprintf("Failed to create stack: %v", err))
···
1366
1366
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1367
1367
return
1368
1368
}
1369
+
1369
1370
}
1370
1371
1371
1372
if err = tx.Commit(); err != nil {
···
1374
1375
return
1375
1376
}
1376
1377
1377
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls", f.OwnerSlashRepo()))
1378
+
// notify about each pull
1379
+
//
1380
+
// this is performed after tx.Commit, because it could result in a locked DB otherwise
1381
+
for _, p := range stack {
1382
+
s.notifier.NewPull(r.Context(), p)
1383
+
}
1384
+
1385
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
1386
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls", ownerSlashRepo))
1378
1387
}
1379
1388
1380
1389
func (s *Pulls) ValidatePatch(w http.ResponseWriter, r *http.Request) {
···
1405
1414
1406
1415
func (s *Pulls) PatchUploadFragment(w http.ResponseWriter, r *http.Request) {
1407
1416
user := s.oauth.GetUser(r)
1408
-
f, err := s.repoResolver.Resolve(r)
1409
-
if err != nil {
1410
-
log.Println("failed to get repo and knot", err)
1411
-
return
1412
-
}
1413
1417
1414
1418
s.pages.PullPatchUploadFragment(w, pages.PullPatchUploadParams{
1415
-
RepoInfo: f.RepoInfo(user),
1419
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
1416
1420
})
1417
1421
}
1418
1422
···
1433
1437
Host: host,
1434
1438
}
1435
1439
1436
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
1440
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
1437
1441
xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
1438
1442
if err != nil {
1439
1443
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
···
1466
1470
}
1467
1471
1468
1472
s.pages.PullCompareBranchesFragment(w, pages.PullCompareBranchesParams{
1469
-
RepoInfo: f.RepoInfo(user),
1473
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
1470
1474
Branches: withoutDefault,
1471
1475
})
1472
1476
}
1473
1477
1474
1478
func (s *Pulls) CompareForksFragment(w http.ResponseWriter, r *http.Request) {
1475
1479
user := s.oauth.GetUser(r)
1476
-
f, err := s.repoResolver.Resolve(r)
1477
-
if err != nil {
1478
-
log.Println("failed to get repo and knot", err)
1479
-
return
1480
-
}
1481
1480
1482
1481
forks, err := db.GetForksByDid(s.db, user.Did)
1483
1482
if err != nil {
···
1486
1485
}
1487
1486
1488
1487
s.pages.PullCompareForkFragment(w, pages.PullCompareForkParams{
1489
-
RepoInfo: f.RepoInfo(user),
1488
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
1490
1489
Forks: forks,
1491
1490
Selected: r.URL.Query().Get("fork"),
1492
1491
})
···
1508
1507
// fork repo
1509
1508
repo, err := db.GetRepo(
1510
1509
s.db,
1511
-
db.FilterEq("did", forkOwnerDid),
1512
-
db.FilterEq("name", forkName),
1510
+
orm.FilterEq("did", forkOwnerDid),
1511
+
orm.FilterEq("name", forkName),
1513
1512
)
1514
1513
if err != nil {
1515
1514
log.Println("failed to get repo", "did", forkOwnerDid, "name", forkName, "err", err)
···
1554
1553
Host: targetHost,
1555
1554
}
1556
1555
1557
-
targetRepo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
1556
+
targetRepo := fmt.Sprintf("%s/%s", f.Did, f.Name)
1558
1557
targetXrpcBytes, err := tangled.RepoBranches(r.Context(), targetXrpcc, "", 0, targetRepo)
1559
1558
if err != nil {
1560
1559
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
···
1579
1578
})
1580
1579
1581
1580
s.pages.PullCompareForkBranchesFragment(w, pages.PullCompareForkBranchesParams{
1582
-
RepoInfo: f.RepoInfo(user),
1581
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
1583
1582
SourceBranches: sourceBranches.Branches,
1584
1583
TargetBranches: targetBranches.Branches,
1585
1584
})
···
1587
1586
1588
1587
func (s *Pulls) ResubmitPull(w http.ResponseWriter, r *http.Request) {
1589
1588
user := s.oauth.GetUser(r)
1590
-
f, err := s.repoResolver.Resolve(r)
1591
-
if err != nil {
1592
-
log.Println("failed to get repo and knot", err)
1593
-
return
1594
-
}
1595
1589
1596
1590
pull, ok := r.Context().Value("pull").(*models.Pull)
1597
1591
if !ok {
···
1603
1597
switch r.Method {
1604
1598
case http.MethodGet:
1605
1599
s.pages.PullResubmitFragment(w, pages.PullResubmitParams{
1606
-
RepoInfo: f.RepoInfo(user),
1600
+
RepoInfo: s.repoResolver.GetRepoInfo(r, user),
1607
1601
Pull: pull,
1608
1602
})
1609
1603
return
···
1670
1664
return
1671
1665
}
1672
1666
1673
-
if !f.RepoInfo(user).Roles.IsPushAllowed() {
1667
+
roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
1668
+
if !roles.IsPushAllowed() {
1674
1669
log.Println("unauthorized user")
1675
1670
w.WriteHeader(http.StatusUnauthorized)
1676
1671
return
···
1685
1680
Host: host,
1686
1681
}
1687
1682
1688
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
1683
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
1689
1684
xrpcBytes, err := tangled.RepoCompare(r.Context(), xrpcc, repo, pull.TargetBranch, pull.PullSource.Branch)
1690
1685
if err != nil {
1691
1686
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
···
1812
1807
func (s *Pulls) resubmitPullHelper(
1813
1808
w http.ResponseWriter,
1814
1809
r *http.Request,
1815
-
f *reporesolver.ResolvedRepo,
1810
+
repo *models.Repo,
1816
1811
user *oauth.User,
1817
1812
pull *models.Pull,
1818
1813
patch string,
···
1821
1816
) {
1822
1817
if pull.IsStacked() {
1823
1818
log.Println("resubmitting stacked PR")
1824
-
s.resubmitStackedPullHelper(w, r, f, user, pull, patch, pull.StackId)
1819
+
s.resubmitStackedPullHelper(w, r, repo, user, pull, patch, pull.StackId)
1825
1820
return
1826
1821
}
1827
1822
···
1901
1896
Val: &tangled.RepoPull{
1902
1897
Title: pull.Title,
1903
1898
Target: &tangled.RepoPull_Target{
1904
-
Repo: string(f.RepoAt()),
1899
+
Repo: string(repo.RepoAt()),
1905
1900
Branch: pull.TargetBranch,
1906
1901
},
1907
1902
Patch: patch, // new patch
···
1922
1917
return
1923
1918
}
1924
1919
1925
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
1920
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
1921
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId))
1926
1922
}
1927
1923
1928
1924
func (s *Pulls) resubmitStackedPullHelper(
1929
1925
w http.ResponseWriter,
1930
1926
r *http.Request,
1931
-
f *reporesolver.ResolvedRepo,
1927
+
repo *models.Repo,
1932
1928
user *oauth.User,
1933
1929
pull *models.Pull,
1934
1930
patch string,
···
1937
1933
targetBranch := pull.TargetBranch
1938
1934
1939
1935
origStack, _ := r.Context().Value("stack").(models.Stack)
1940
-
newStack, err := newStack(f, user, targetBranch, patch, pull.PullSource, stackId)
1936
+
newStack, err := s.newStack(r.Context(), repo, user, targetBranch, patch, pull.PullSource, stackId)
1941
1937
if err != nil {
1942
1938
log.Println("failed to create resubmitted stack", err)
1943
1939
s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
···
2079
2075
tx,
2080
2076
p.ParentChangeId,
2081
2077
// these should be enough filters to be unique per-stack
2082
-
db.FilterEq("repo_at", p.RepoAt.String()),
2083
-
db.FilterEq("owner_did", p.OwnerDid),
2084
-
db.FilterEq("change_id", p.ChangeId),
2078
+
orm.FilterEq("repo_at", p.RepoAt.String()),
2079
+
orm.FilterEq("owner_did", p.OwnerDid),
2080
+
orm.FilterEq("change_id", p.ChangeId),
2085
2081
)
2086
2082
2087
2083
if err != nil {
···
2115
2111
return
2116
2112
}
2117
2113
2118
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
2114
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, repo)
2115
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId))
2119
2116
}
2120
2117
2121
2118
func (s *Pulls) MergePull(w http.ResponseWriter, r *http.Request) {
···
2168
2165
2169
2166
authorName := ident.Handle.String()
2170
2167
mergeInput := &tangled.RepoMerge_Input{
2171
-
Did: f.OwnerDid(),
2168
+
Did: f.Did,
2172
2169
Name: f.Name,
2173
2170
Branch: pull.TargetBranch,
2174
2171
Patch: patch,
···
2233
2230
s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p)
2234
2231
}
2235
2232
2236
-
s.pages.HxLocation(w, fmt.Sprintf("/@%s/%s/pulls/%d", f.OwnerHandle(), f.Name, pull.PullId))
2233
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
2234
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId))
2237
2235
}
2238
2236
2239
2237
func (s *Pulls) ClosePull(w http.ResponseWriter, r *http.Request) {
···
2253
2251
}
2254
2252
2255
2253
// auth filter: only owner or collaborators can close
2256
-
roles := f.RolesInRepo(user)
2254
+
roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
2257
2255
isOwner := roles.IsOwner()
2258
2256
isCollaborator := roles.IsCollaborator()
2259
2257
isPullAuthor := user.Did == pull.OwnerDid
···
2305
2303
s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p)
2306
2304
}
2307
2305
2308
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
2306
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
2307
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId))
2309
2308
}
2310
2309
2311
2310
func (s *Pulls) ReopenPull(w http.ResponseWriter, r *http.Request) {
···
2326
2325
}
2327
2326
2328
2327
// auth filter: only owner or collaborators can close
2329
-
roles := f.RolesInRepo(user)
2328
+
roles := repoinfo.RolesInRepo{Roles: s.enforcer.GetPermissionsInRepo(user.Did, f.Knot, f.DidSlashRepo())}
2330
2329
isOwner := roles.IsOwner()
2331
2330
isCollaborator := roles.IsCollaborator()
2332
2331
isPullAuthor := user.Did == pull.OwnerDid
···
2378
2377
s.notifier.NewPullState(r.Context(), syntax.DID(user.Did), p)
2379
2378
}
2380
2379
2381
-
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", f.OwnerSlashRepo(), pull.PullId))
2380
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
2381
+
s.pages.HxLocation(w, fmt.Sprintf("/%s/pulls/%d", ownerSlashRepo, pull.PullId))
2382
2382
}
2383
2383
2384
-
func newStack(f *reporesolver.ResolvedRepo, user *oauth.User, targetBranch, patch string, pullSource *models.PullSource, stackId string) (models.Stack, error) {
2384
+
func (s *Pulls) newStack(ctx context.Context, repo *models.Repo, user *oauth.User, targetBranch, patch string, pullSource *models.PullSource, stackId string) (models.Stack, error) {
2385
2385
formatPatches, err := patchutil.ExtractPatches(patch)
2386
2386
if err != nil {
2387
2387
return nil, fmt.Errorf("Failed to extract patches: %v", err)
···
2406
2406
body := fp.Body
2407
2407
rkey := tid.TID()
2408
2408
2409
+
mentions, references := s.mentionsResolver.Resolve(ctx, body)
2410
+
2409
2411
initialSubmission := models.PullSubmission{
2410
2412
Patch: fp.Raw,
2411
2413
SourceRev: fp.SHA,
···
2416
2418
Body: body,
2417
2419
TargetBranch: targetBranch,
2418
2420
OwnerDid: user.Did,
2419
-
RepoAt: f.RepoAt(),
2421
+
RepoAt: repo.RepoAt(),
2420
2422
Rkey: rkey,
2423
+
Mentions: mentions,
2424
+
References: references,
2421
2425
Submissions: []*models.PullSubmission{
2422
2426
&initialSubmission,
2423
2427
},
+2
-2
appview/repo/archive.go
+2
-2
appview/repo/archive.go
···
31
31
xrpcc := &indigoxrpc.Client{
32
32
Host: host,
33
33
}
34
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
35
-
archiveBytes, err := tangled.RepoArchive(r.Context(), xrpcc, "tar.gz", "", ref, repo)
34
+
didSlashRepo := f.DidSlashRepo()
35
+
archiveBytes, err := tangled.RepoArchive(r.Context(), xrpcc, "tar.gz", "", ref, didSlashRepo)
36
36
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
37
37
l.Error("failed to call XRPC repo.archive", "err", xrpcerr)
38
38
rp.pages.Error503(w)
+21
-14
appview/repo/artifact.go
+21
-14
appview/repo/artifact.go
···
14
14
"tangled.org/core/appview/db"
15
15
"tangled.org/core/appview/models"
16
16
"tangled.org/core/appview/pages"
17
-
"tangled.org/core/appview/reporesolver"
18
17
"tangled.org/core/appview/xrpcclient"
18
+
"tangled.org/core/orm"
19
19
"tangled.org/core/tid"
20
20
"tangled.org/core/types"
21
21
···
131
131
132
132
rp.pages.RepoArtifactFragment(w, pages.RepoArtifactParams{
133
133
LoggedInUser: user,
134
-
RepoInfo: f.RepoInfo(user),
134
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
135
135
Artifact: artifact,
136
136
})
137
137
}
···
156
156
157
157
artifacts, err := db.GetArtifact(
158
158
rp.db,
159
-
db.FilterEq("repo_at", f.RepoAt()),
160
-
db.FilterEq("tag", tag.Tag.Hash[:]),
161
-
db.FilterEq("name", filename),
159
+
orm.FilterEq("repo_at", f.RepoAt()),
160
+
orm.FilterEq("tag", tag.Tag.Hash[:]),
161
+
orm.FilterEq("name", filename),
162
162
)
163
163
if err != nil {
164
164
log.Println("failed to get artifacts", err)
···
174
174
175
175
artifact := artifacts[0]
176
176
177
-
ownerPds := f.OwnerId.PDSEndpoint()
177
+
ownerId, err := rp.idResolver.ResolveIdent(r.Context(), f.Did)
178
+
if err != nil {
179
+
log.Println("failed to resolve repo owner did", f.Did, err)
180
+
http.Error(w, "repository owner not found", http.StatusNotFound)
181
+
return
182
+
}
183
+
184
+
ownerPds := ownerId.PDSEndpoint()
178
185
url, _ := url.Parse(fmt.Sprintf("%s/xrpc/com.atproto.sync.getBlob", ownerPds))
179
186
q := url.Query()
180
187
q.Set("cid", artifact.BlobCid.String())
···
228
235
229
236
artifacts, err := db.GetArtifact(
230
237
rp.db,
231
-
db.FilterEq("repo_at", f.RepoAt()),
232
-
db.FilterEq("tag", tag[:]),
233
-
db.FilterEq("name", filename),
238
+
orm.FilterEq("repo_at", f.RepoAt()),
239
+
orm.FilterEq("tag", tag[:]),
240
+
orm.FilterEq("name", filename),
234
241
)
235
242
if err != nil {
236
243
log.Println("failed to get artifacts", err)
···
270
277
defer tx.Rollback()
271
278
272
279
err = db.DeleteArtifact(tx,
273
-
db.FilterEq("repo_at", f.RepoAt()),
274
-
db.FilterEq("tag", artifact.Tag[:]),
275
-
db.FilterEq("name", filename),
280
+
orm.FilterEq("repo_at", f.RepoAt()),
281
+
orm.FilterEq("tag", artifact.Tag[:]),
282
+
orm.FilterEq("name", filename),
276
283
)
277
284
if err != nil {
278
285
log.Println("failed to remove artifact record from db", err)
···
290
297
w.Write([]byte{})
291
298
}
292
299
293
-
func (rp *Repo) resolveTag(ctx context.Context, f *reporesolver.ResolvedRepo, tagParam string) (*types.TagReference, error) {
300
+
func (rp *Repo) resolveTag(ctx context.Context, f *models.Repo, tagParam string) (*types.TagReference, error) {
294
301
tagParam, err := url.QueryUnescape(tagParam)
295
302
if err != nil {
296
303
return nil, err
···
305
312
Host: host,
306
313
}
307
314
308
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
315
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
309
316
xrpcBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo)
310
317
if err != nil {
311
318
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
+11
-9
appview/repo/blob.go
+11
-9
appview/repo/blob.go
···
54
54
xrpcc := &indigoxrpc.Client{
55
55
Host: host,
56
56
}
57
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Repo.Name)
57
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
58
58
resp, err := tangled.RepoBlob(r.Context(), xrpcc, filePath, false, ref, repo)
59
59
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
60
60
l.Error("failed to call XRPC repo.blob", "err", xrpcerr)
···
62
62
return
63
63
}
64
64
65
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
66
+
65
67
// Use XRPC response directly instead of converting to internal types
66
68
var breadcrumbs [][]string
67
-
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), url.PathEscape(ref))})
69
+
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", ownerSlashRepo, url.PathEscape(ref))})
68
70
if filePath != "" {
69
71
for idx, elem := range strings.Split(filePath, "/") {
70
72
breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], url.PathEscape(elem))})
···
78
80
79
81
rp.pages.RepoBlob(w, pages.RepoBlobParams{
80
82
LoggedInUser: user,
81
-
RepoInfo: f.RepoInfo(user),
83
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
82
84
BreadCrumbs: breadcrumbs,
83
85
BlobView: blobView,
84
86
RepoBlob_Output: resp,
···
105
107
if !rp.config.Core.Dev {
106
108
scheme = "https"
107
109
}
108
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Repo.Name)
110
+
repo := f.DidSlashRepo()
109
111
baseURL := &url.URL{
110
112
Scheme: scheme,
111
113
Host: f.Knot,
···
176
178
}
177
179
178
180
// NewBlobView creates a BlobView from the XRPC response
179
-
func NewBlobView(resp *tangled.RepoBlob_Output, config *config.Config, f *reporesolver.ResolvedRepo, ref, filePath string, queryParams url.Values) models.BlobView {
181
+
func NewBlobView(resp *tangled.RepoBlob_Output, config *config.Config, repo *models.Repo, ref, filePath string, queryParams url.Values) models.BlobView {
180
182
view := models.BlobView{
181
183
Contents: "",
182
184
Lines: 0,
···
198
200
199
201
// Determine if binary
200
202
if resp.IsBinary != nil && *resp.IsBinary {
201
-
view.ContentSrc = generateBlobURL(config, f, ref, filePath)
203
+
view.ContentSrc = generateBlobURL(config, repo, ref, filePath)
202
204
ext := strings.ToLower(filepath.Ext(resp.Path))
203
205
204
206
switch ext {
···
250
252
return view
251
253
}
252
254
253
-
func generateBlobURL(config *config.Config, f *reporesolver.ResolvedRepo, ref, filePath string) string {
255
+
func generateBlobURL(config *config.Config, repo *models.Repo, ref, filePath string) string {
254
256
scheme := "http"
255
257
if !config.Core.Dev {
256
258
scheme = "https"
257
259
}
258
260
259
-
repoName := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
261
+
repoName := fmt.Sprintf("%s/%s", repo.Did, repo.Name)
260
262
baseURL := &url.URL{
261
263
Scheme: scheme,
262
-
Host: f.Knot,
264
+
Host: repo.Knot,
263
265
Path: "/xrpc/sh.tangled.repo.blob",
264
266
}
265
267
query := baseURL.Query()
+2
-2
appview/repo/branches.go
+2
-2
appview/repo/branches.go
···
29
29
xrpcc := &indigoxrpc.Client{
30
30
Host: host,
31
31
}
32
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
32
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
33
33
xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
34
34
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
35
35
l.Error("failed to call XRPC repo.branches", "err", xrpcerr)
···
46
46
user := rp.oauth.GetUser(r)
47
47
rp.pages.RepoBranches(w, pages.RepoBranchesParams{
48
48
LoggedInUser: user,
49
-
RepoInfo: f.RepoInfo(user),
49
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
50
50
RepoBranchesResponse: result,
51
51
})
52
52
}
+4
-8
appview/repo/compare.go
+4
-8
appview/repo/compare.go
···
36
36
Host: host,
37
37
}
38
38
39
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
39
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
40
40
branchBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
41
41
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
42
42
l.Error("failed to call XRPC repo.branches", "err", xrpcerr)
···
88
88
return
89
89
}
90
90
91
-
repoinfo := f.RepoInfo(user)
92
-
93
91
rp.pages.RepoCompareNew(w, pages.RepoCompareNewParams{
94
92
LoggedInUser: user,
95
-
RepoInfo: repoinfo,
93
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
96
94
Branches: branches,
97
95
Tags: tags.Tags,
98
96
Base: base,
···
151
149
Host: host,
152
150
}
153
151
154
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
152
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
155
153
156
154
branchBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
157
155
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
···
202
200
diff = patchutil.AsNiceDiff(formatPatch.FormatPatchRaw, base)
203
201
}
204
202
205
-
repoinfo := f.RepoInfo(user)
206
-
207
203
rp.pages.RepoCompare(w, pages.RepoCompareParams{
208
204
LoggedInUser: user,
209
-
RepoInfo: repoinfo,
205
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
210
206
Branches: branches.Branches,
211
207
Tags: tags.Tags,
212
208
Base: base,
+24
-17
appview/repo/feed.go
+24
-17
appview/repo/feed.go
···
11
11
"tangled.org/core/appview/db"
12
12
"tangled.org/core/appview/models"
13
13
"tangled.org/core/appview/pagination"
14
-
"tangled.org/core/appview/reporesolver"
14
+
"tangled.org/core/orm"
15
15
16
+
"github.com/bluesky-social/indigo/atproto/identity"
16
17
"github.com/bluesky-social/indigo/atproto/syntax"
17
18
"github.com/gorilla/feeds"
18
19
)
19
20
20
-
func (rp *Repo) getRepoFeed(ctx context.Context, f *reporesolver.ResolvedRepo) (*feeds.Feed, error) {
21
+
func (rp *Repo) getRepoFeed(ctx context.Context, repo *models.Repo, ownerSlashRepo string) (*feeds.Feed, error) {
21
22
const feedLimitPerType = 100
22
23
23
-
pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, db.FilterEq("repo_at", f.RepoAt()))
24
+
pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, orm.FilterEq("repo_at", repo.RepoAt()))
24
25
if err != nil {
25
26
return nil, err
26
27
}
···
28
29
issues, err := db.GetIssuesPaginated(
29
30
rp.db,
30
31
pagination.Page{Limit: feedLimitPerType},
31
-
db.FilterEq("repo_at", f.RepoAt()),
32
+
orm.FilterEq("repo_at", repo.RepoAt()),
32
33
)
33
34
if err != nil {
34
35
return nil, err
35
36
}
36
37
37
38
feed := &feeds.Feed{
38
-
Title: fmt.Sprintf("activity feed for %s", f.OwnerSlashRepo()),
39
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s", rp.config.Core.AppviewHost, f.OwnerSlashRepo()), Type: "text/html", Rel: "alternate"},
39
+
Title: fmt.Sprintf("activity feed for @%s", ownerSlashRepo),
40
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s", rp.config.Core.AppviewHost, ownerSlashRepo), Type: "text/html", Rel: "alternate"},
40
41
Items: make([]*feeds.Item, 0),
41
42
Updated: time.UnixMilli(0),
42
43
}
43
44
44
45
for _, pull := range pulls {
45
-
items, err := rp.createPullItems(ctx, pull, f)
46
+
items, err := rp.createPullItems(ctx, pull, repo, ownerSlashRepo)
46
47
if err != nil {
47
48
return nil, err
48
49
}
···
50
51
}
51
52
52
53
for _, issue := range issues {
53
-
item, err := rp.createIssueItem(ctx, issue, f)
54
+
item, err := rp.createIssueItem(ctx, issue, repo, ownerSlashRepo)
54
55
if err != nil {
55
56
return nil, err
56
57
}
···
71
72
return feed, nil
72
73
}
73
74
74
-
func (rp *Repo) createPullItems(ctx context.Context, pull *models.Pull, f *reporesolver.ResolvedRepo) ([]*feeds.Item, error) {
75
+
func (rp *Repo) createPullItems(ctx context.Context, pull *models.Pull, repo *models.Repo, ownerSlashRepo string) ([]*feeds.Item, error) {
75
76
owner, err := rp.idResolver.ResolveIdent(ctx, pull.OwnerDid)
76
77
if err != nil {
77
78
return nil, err
···
80
81
var items []*feeds.Item
81
82
82
83
state := rp.getPullState(pull)
83
-
description := rp.buildPullDescription(owner.Handle, state, pull, f.OwnerSlashRepo())
84
+
description := rp.buildPullDescription(owner.Handle, state, pull, ownerSlashRepo)
84
85
85
86
mainItem := &feeds.Item{
86
87
Title: fmt.Sprintf("[PR #%d] %s", pull.PullId, pull.Title),
87
88
Description: description,
88
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId)},
89
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d", rp.config.Core.AppviewHost, ownerSlashRepo, pull.PullId)},
89
90
Created: pull.Created,
90
91
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
91
92
}
···
98
99
99
100
roundItem := &feeds.Item{
100
101
Title: fmt.Sprintf("[PR #%d] %s (round #%d)", pull.PullId, pull.Title, round.RoundNumber),
101
-
Description: fmt.Sprintf("@%s submitted changes (at round #%d) on PR #%d in %s", owner.Handle, round.RoundNumber, pull.PullId, f.OwnerSlashRepo()),
102
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d/round/%d/", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId, round.RoundNumber)},
102
+
Description: fmt.Sprintf("@%s submitted changes (at round #%d) on PR #%d in @%s", owner.Handle, round.RoundNumber, pull.PullId, ownerSlashRepo),
103
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d/round/%d/", rp.config.Core.AppviewHost, ownerSlashRepo, pull.PullId, round.RoundNumber)},
103
104
Created: round.Created,
104
105
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
105
106
}
···
109
110
return items, nil
110
111
}
111
112
112
-
func (rp *Repo) createIssueItem(ctx context.Context, issue models.Issue, f *reporesolver.ResolvedRepo) (*feeds.Item, error) {
113
+
func (rp *Repo) createIssueItem(ctx context.Context, issue models.Issue, repo *models.Repo, ownerSlashRepo string) (*feeds.Item, error) {
113
114
owner, err := rp.idResolver.ResolveIdent(ctx, issue.Did)
114
115
if err != nil {
115
116
return nil, err
···
122
123
123
124
return &feeds.Item{
124
125
Title: fmt.Sprintf("[Issue #%d] %s", issue.IssueId, issue.Title),
125
-
Description: fmt.Sprintf("@%s %s issue #%d in %s", owner.Handle, state, issue.IssueId, f.OwnerSlashRepo()),
126
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/issues/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), issue.IssueId)},
126
+
Description: fmt.Sprintf("@%s %s issue #%d in @%s", owner.Handle, state, issue.IssueId, ownerSlashRepo),
127
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/issues/%d", rp.config.Core.AppviewHost, ownerSlashRepo, issue.IssueId)},
127
128
Created: issue.Created,
128
129
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
129
130
}, nil
···
152
153
log.Println("failed to fully resolve repo:", err)
153
154
return
154
155
}
156
+
repoOwnerId, ok := r.Context().Value("resolvedId").(identity.Identity)
157
+
if !ok || repoOwnerId.Handle.IsInvalidHandle() {
158
+
log.Println("failed to get resolved repo owner id")
159
+
return
160
+
}
161
+
ownerSlashRepo := repoOwnerId.Handle.String() + "/" + f.Name
155
162
156
-
feed, err := rp.getRepoFeed(r.Context(), f)
163
+
feed, err := rp.getRepoFeed(r.Context(), f, ownerSlashRepo)
157
164
if err != nil {
158
165
log.Println("failed to get repo feed:", err)
159
166
rp.pages.Error500(w)
+18
-19
appview/repo/index.go
+18
-19
appview/repo/index.go
···
22
22
"tangled.org/core/appview/db"
23
23
"tangled.org/core/appview/models"
24
24
"tangled.org/core/appview/pages"
25
-
"tangled.org/core/appview/reporesolver"
26
25
"tangled.org/core/appview/xrpcclient"
26
+
"tangled.org/core/orm"
27
27
"tangled.org/core/types"
28
28
29
29
"github.com/go-chi/chi/v5"
···
52
52
}
53
53
54
54
user := rp.oauth.GetUser(r)
55
-
repoInfo := f.RepoInfo(user)
56
55
57
56
// Build index response from multiple XRPC calls
58
57
result, err := rp.buildIndexResponse(r.Context(), xrpcc, f, ref)
···
62
61
rp.pages.RepoIndexPage(w, pages.RepoIndexParams{
63
62
LoggedInUser: user,
64
63
NeedsKnotUpgrade: true,
65
-
RepoInfo: repoInfo,
64
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
66
65
})
67
66
return
68
67
}
···
124
123
l.Error("failed to get email to did map", "err", err)
125
124
}
126
125
127
-
vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, commitsTrunc)
126
+
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, commitsTrunc)
128
127
if err != nil {
129
128
l.Error("failed to GetVerifiedObjectCommits", "err", err)
130
129
}
···
140
139
for _, c := range commitsTrunc {
141
140
shas = append(shas, c.Hash.String())
142
141
}
143
-
pipelines, err := getPipelineStatuses(rp.db, repoInfo, shas)
142
+
pipelines, err := getPipelineStatuses(rp.db, f, shas)
144
143
if err != nil {
145
144
l.Error("failed to fetch pipeline statuses", "err", err)
146
145
// non-fatal
···
148
147
149
148
rp.pages.RepoIndexPage(w, pages.RepoIndexParams{
150
149
LoggedInUser: user,
151
-
RepoInfo: repoInfo,
150
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
152
151
TagMap: tagMap,
153
152
RepoIndexResponse: *result,
154
153
CommitsTrunc: commitsTrunc,
···
165
164
func (rp *Repo) getLanguageInfo(
166
165
ctx context.Context,
167
166
l *slog.Logger,
168
-
f *reporesolver.ResolvedRepo,
167
+
repo *models.Repo,
169
168
xrpcc *indigoxrpc.Client,
170
169
currentRef string,
171
170
isDefaultRef bool,
···
173
172
// first attempt to fetch from db
174
173
langs, err := db.GetRepoLanguages(
175
174
rp.db,
176
-
db.FilterEq("repo_at", f.RepoAt()),
177
-
db.FilterEq("ref", currentRef),
175
+
orm.FilterEq("repo_at", repo.RepoAt()),
176
+
orm.FilterEq("ref", currentRef),
178
177
)
179
178
180
179
if err != nil || langs == nil {
181
180
// non-fatal, fetch langs from ks via XRPC
182
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
183
-
ls, err := tangled.RepoLanguages(ctx, xrpcc, currentRef, repo)
181
+
didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name)
182
+
ls, err := tangled.RepoLanguages(ctx, xrpcc, currentRef, didSlashRepo)
184
183
if err != nil {
185
184
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
186
185
l.Error("failed to call XRPC repo.languages", "err", xrpcerr)
···
195
194
196
195
for _, lang := range ls.Languages {
197
196
langs = append(langs, models.RepoLanguage{
198
-
RepoAt: f.RepoAt(),
197
+
RepoAt: repo.RepoAt(),
199
198
Ref: currentRef,
200
199
IsDefaultRef: isDefaultRef,
201
200
Language: lang.Name,
···
210
209
defer tx.Rollback()
211
210
212
211
// update appview's cache
213
-
err = db.UpdateRepoLanguages(tx, f.RepoAt(), currentRef, langs)
212
+
err = db.UpdateRepoLanguages(tx, repo.RepoAt(), currentRef, langs)
214
213
if err != nil {
215
214
// non-fatal
216
215
l.Error("failed to cache lang results", "err", err)
···
255
254
}
256
255
257
256
// buildIndexResponse creates a RepoIndexResponse by combining multiple xrpc calls in parallel
258
-
func (rp *Repo) buildIndexResponse(ctx context.Context, xrpcc *indigoxrpc.Client, f *reporesolver.ResolvedRepo, ref string) (*types.RepoIndexResponse, error) {
259
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
257
+
func (rp *Repo) buildIndexResponse(ctx context.Context, xrpcc *indigoxrpc.Client, repo *models.Repo, ref string) (*types.RepoIndexResponse, error) {
258
+
didSlashRepo := fmt.Sprintf("%s/%s", repo.Did, repo.Name)
260
259
261
260
// first get branches to determine the ref if not specified
262
-
branchesBytes, err := tangled.RepoBranches(ctx, xrpcc, "", 0, repo)
261
+
branchesBytes, err := tangled.RepoBranches(ctx, xrpcc, "", 0, didSlashRepo)
263
262
if err != nil {
264
263
return nil, fmt.Errorf("failed to call repoBranches: %w", err)
265
264
}
···
303
302
wg.Add(1)
304
303
go func() {
305
304
defer wg.Done()
306
-
tagsBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, repo)
305
+
tagsBytes, err := tangled.RepoTags(ctx, xrpcc, "", 0, didSlashRepo)
307
306
if err != nil {
308
307
errs = errors.Join(errs, fmt.Errorf("failed to call repoTags: %w", err))
309
308
return
···
318
317
wg.Add(1)
319
318
go func() {
320
319
defer wg.Done()
321
-
resp, err := tangled.RepoTree(ctx, xrpcc, "", ref, repo)
320
+
resp, err := tangled.RepoTree(ctx, xrpcc, "", ref, didSlashRepo)
322
321
if err != nil {
323
322
errs = errors.Join(errs, fmt.Errorf("failed to call repoTree: %w", err))
324
323
return
···
330
329
wg.Add(1)
331
330
go func() {
332
331
defer wg.Done()
333
-
logBytes, err := tangled.RepoLog(ctx, xrpcc, "", 50, "", ref, repo)
332
+
logBytes, err := tangled.RepoLog(ctx, xrpcc, "", 50, "", ref, didSlashRepo)
334
333
if err != nil {
335
334
errs = errors.Join(errs, fmt.Errorf("failed to call repoLog: %w", err))
336
335
return
+8
-11
appview/repo/log.go
+8
-11
appview/repo/log.go
···
57
57
cursor = strconv.Itoa(offset)
58
58
}
59
59
60
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
60
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
61
61
xrpcBytes, err := tangled.RepoLog(r.Context(), xrpcc, cursor, limit, "", ref, repo)
62
62
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
63
63
l.Error("failed to call XRPC repo.log", "err", xrpcerr)
···
116
116
l.Error("failed to fetch email to did mapping", "err", err)
117
117
}
118
118
119
-
vc, err := commitverify.GetVerifiedObjectCommits(rp.db, emailToDidMap, xrpcResp.Commits)
119
+
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, xrpcResp.Commits)
120
120
if err != nil {
121
121
l.Error("failed to GetVerifiedObjectCommits", "err", err)
122
122
}
123
-
124
-
repoInfo := f.RepoInfo(user)
125
123
126
124
var shas []string
127
125
for _, c := range xrpcResp.Commits {
128
126
shas = append(shas, c.Hash.String())
129
127
}
130
-
pipelines, err := getPipelineStatuses(rp.db, repoInfo, shas)
128
+
pipelines, err := getPipelineStatuses(rp.db, f, shas)
131
129
if err != nil {
132
130
l.Error("failed to getPipelineStatuses", "err", err)
133
131
// non-fatal
···
136
134
rp.pages.RepoLog(w, pages.RepoLogParams{
137
135
LoggedInUser: user,
138
136
TagMap: tagMap,
139
-
RepoInfo: repoInfo,
137
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
140
138
RepoLogResponse: xrpcResp,
141
139
EmailToDid: emailToDidMap,
142
140
VerifiedCommits: vc,
···
174
172
Host: host,
175
173
}
176
174
177
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
175
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
178
176
xrpcBytes, err := tangled.RepoDiff(r.Context(), xrpcc, ref, repo)
179
177
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
180
178
l.Error("failed to call XRPC repo.diff", "err", xrpcerr)
···
194
192
l.Error("failed to get email to did mapping", "err", err)
195
193
}
196
194
197
-
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.NiceDiff{*result.Diff})
195
+
vc, err := commitverify.GetVerifiedCommits(rp.db, emailToDidMap, []types.Commit{result.Diff.Commit})
198
196
if err != nil {
199
197
l.Error("failed to GetVerifiedCommits", "err", err)
200
198
}
201
199
202
200
user := rp.oauth.GetUser(r)
203
-
repoInfo := f.RepoInfo(user)
204
-
pipelines, err := getPipelineStatuses(rp.db, repoInfo, []string{result.Diff.Commit.This})
201
+
pipelines, err := getPipelineStatuses(rp.db, f, []string{result.Diff.Commit.This})
205
202
if err != nil {
206
203
l.Error("failed to getPipelineStatuses", "err", err)
207
204
// non-fatal
···
213
210
214
211
rp.pages.RepoCommit(w, pages.RepoCommitParams{
215
212
LoggedInUser: user,
216
-
RepoInfo: f.RepoInfo(user),
213
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
217
214
RepoCommitResponse: result,
218
215
EmailToDid: emailToDidMap,
219
216
VerifiedCommit: vc,
+4
-3
appview/repo/opengraph.go
+4
-3
appview/repo/opengraph.go
···
16
16
"tangled.org/core/appview/db"
17
17
"tangled.org/core/appview/models"
18
18
"tangled.org/core/appview/ogcard"
19
+
"tangled.org/core/orm"
19
20
"tangled.org/core/types"
20
21
)
21
22
···
338
339
var languageStats []types.RepoLanguageDetails
339
340
langs, err := db.GetRepoLanguages(
340
341
rp.db,
341
-
db.FilterEq("repo_at", f.RepoAt()),
342
-
db.FilterEq("is_default_ref", 1),
342
+
orm.FilterEq("repo_at", f.RepoAt()),
343
+
orm.FilterEq("is_default_ref", 1),
343
344
)
344
345
if err != nil {
345
346
log.Printf("failed to get language stats from db: %v", err)
···
374
375
})
375
376
}
376
377
377
-
card, err := rp.drawRepoSummaryCard(&f.Repo, languageStats)
378
+
card, err := rp.drawRepoSummaryCard(f, languageStats)
378
379
if err != nil {
379
380
log.Println("failed to draw repo summary card", err)
380
381
http.Error(w, "failed to draw repo summary card", http.StatusInternalServerError)
+37
-35
appview/repo/repo.go
+37
-35
appview/repo/repo.go
···
24
24
xrpcclient "tangled.org/core/appview/xrpcclient"
25
25
"tangled.org/core/eventconsumer"
26
26
"tangled.org/core/idresolver"
27
+
"tangled.org/core/orm"
27
28
"tangled.org/core/rbac"
28
29
"tangled.org/core/tid"
29
30
"tangled.org/core/xrpc/serviceauth"
···
118
119
}
119
120
}
120
121
121
-
newRepo := f.Repo
122
+
newRepo := *f
122
123
newRepo.Spindle = newSpindle
123
124
record := newRepo.AsRecord()
124
125
···
257
258
l.Info("wrote label record to PDS")
258
259
259
260
// update the repo to subscribe to this label
260
-
newRepo := f.Repo
261
+
newRepo := *f
261
262
newRepo.Labels = append(newRepo.Labels, aturi)
262
263
repoRecord := newRepo.AsRecord()
263
264
···
345
346
// get form values
346
347
labelId := r.FormValue("label-id")
347
348
348
-
label, err := db.GetLabelDefinition(rp.db, db.FilterEq("id", labelId))
349
+
label, err := db.GetLabelDefinition(rp.db, orm.FilterEq("id", labelId))
349
350
if err != nil {
350
351
fail("Failed to find label definition.", err)
351
352
return
···
369
370
}
370
371
371
372
// update repo record to remove the label reference
372
-
newRepo := f.Repo
373
+
newRepo := *f
373
374
var updated []string
374
375
removedAt := label.AtUri().String()
375
376
for _, l := range newRepo.Labels {
···
409
410
410
411
err = db.UnsubscribeLabel(
411
412
tx,
412
-
db.FilterEq("repo_at", f.RepoAt()),
413
-
db.FilterEq("label_at", removedAt),
413
+
orm.FilterEq("repo_at", f.RepoAt()),
414
+
orm.FilterEq("label_at", removedAt),
414
415
)
415
416
if err != nil {
416
417
fail("Failed to unsubscribe label.", err)
417
418
return
418
419
}
419
420
420
-
err = db.DeleteLabelDefinition(tx, db.FilterEq("id", label.Id))
421
+
err = db.DeleteLabelDefinition(tx, orm.FilterEq("id", label.Id))
421
422
if err != nil {
422
423
fail("Failed to delete label definition.", err)
423
424
return
···
456
457
}
457
458
458
459
labelAts := r.Form["label"]
459
-
_, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts))
460
+
_, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts))
460
461
if err != nil {
461
462
fail("Failed to subscribe to label.", err)
462
463
return
463
464
}
464
465
465
-
newRepo := f.Repo
466
+
newRepo := *f
466
467
newRepo.Labels = append(newRepo.Labels, labelAts...)
467
468
468
469
// dedup
···
477
478
return
478
479
}
479
480
480
-
ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Repo.Did, f.Repo.Rkey)
481
+
ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Did, f.Rkey)
481
482
if err != nil {
482
483
fail("Failed to update labels, no record found on PDS.", err)
483
484
return
···
542
543
}
543
544
544
545
labelAts := r.Form["label"]
545
-
_, err = db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", labelAts))
546
+
_, err = db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", labelAts))
546
547
if err != nil {
547
548
fail("Failed to unsubscribe to label.", err)
548
549
return
549
550
}
550
551
551
552
// update repo record to remove the label reference
552
-
newRepo := f.Repo
553
+
newRepo := *f
553
554
var updated []string
554
555
for _, l := range newRepo.Labels {
555
556
if !slices.Contains(labelAts, l) {
···
565
566
return
566
567
}
567
568
568
-
ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Repo.Did, f.Repo.Rkey)
569
+
ex, err := comatproto.RepoGetRecord(r.Context(), client, "", tangled.RepoNSID, f.Did, f.Rkey)
569
570
if err != nil {
570
571
fail("Failed to update labels, no record found on PDS.", err)
571
572
return
···
582
583
583
584
err = db.UnsubscribeLabel(
584
585
rp.db,
585
-
db.FilterEq("repo_at", f.RepoAt()),
586
-
db.FilterIn("label_at", labelAts),
586
+
orm.FilterEq("repo_at", f.RepoAt()),
587
+
orm.FilterIn("label_at", labelAts),
587
588
)
588
589
if err != nil {
589
590
fail("Failed to unsubscribe label.", err)
···
612
613
613
614
labelDefs, err := db.GetLabelDefinitions(
614
615
rp.db,
615
-
db.FilterIn("at_uri", f.Repo.Labels),
616
-
db.FilterContains("scope", subject.Collection().String()),
616
+
orm.FilterIn("at_uri", f.Labels),
617
+
orm.FilterContains("scope", subject.Collection().String()),
617
618
)
618
619
if err != nil {
619
620
l.Error("failed to fetch label defs", "err", err)
···
625
626
defs[l.AtUri().String()] = &l
626
627
}
627
628
628
-
states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject))
629
+
states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject))
629
630
if err != nil {
630
631
l.Error("failed to build label state", "err", err)
631
632
return
···
635
636
user := rp.oauth.GetUser(r)
636
637
rp.pages.LabelPanel(w, pages.LabelPanelParams{
637
638
LoggedInUser: user,
638
-
RepoInfo: f.RepoInfo(user),
639
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
639
640
Defs: defs,
640
641
Subject: subject.String(),
641
642
State: state,
···
660
661
661
662
labelDefs, err := db.GetLabelDefinitions(
662
663
rp.db,
663
-
db.FilterIn("at_uri", f.Repo.Labels),
664
-
db.FilterContains("scope", subject.Collection().String()),
664
+
orm.FilterIn("at_uri", f.Labels),
665
+
orm.FilterContains("scope", subject.Collection().String()),
665
666
)
666
667
if err != nil {
667
668
l.Error("failed to fetch labels", "err", err)
···
673
674
defs[l.AtUri().String()] = &l
674
675
}
675
676
676
-
states, err := db.GetLabels(rp.db, db.FilterEq("subject", subject))
677
+
states, err := db.GetLabels(rp.db, orm.FilterEq("subject", subject))
677
678
if err != nil {
678
679
l.Error("failed to build label state", "err", err)
679
680
return
···
683
684
user := rp.oauth.GetUser(r)
684
685
rp.pages.EditLabelPanel(w, pages.EditLabelPanelParams{
685
686
LoggedInUser: user,
686
-
RepoInfo: f.RepoInfo(user),
687
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
687
688
Defs: defs,
688
689
Subject: subject.String(),
689
690
State: state,
···
864
865
r.Context(),
865
866
client,
866
867
&tangled.RepoDelete_Input{
867
-
Did: f.OwnerDid(),
868
+
Did: f.Did,
868
869
Name: f.Name,
869
870
Rkey: f.Rkey,
870
871
},
···
902
903
l.Info("removed collaborators")
903
904
904
905
// remove repo RBAC
905
-
err = rp.enforcer.RemoveRepo(f.OwnerDid(), f.Knot, f.DidSlashRepo())
906
+
err = rp.enforcer.RemoveRepo(f.Did, f.Knot, f.DidSlashRepo())
906
907
if err != nil {
907
908
rp.pages.Notice(w, noticeId, "Failed to update RBAC rules")
908
909
return
909
910
}
910
911
911
912
// remove repo from db
912
-
err = db.RemoveRepo(tx, f.OwnerDid(), f.Name)
913
+
err = db.RemoveRepo(tx, f.Did, f.Name)
913
914
if err != nil {
914
915
rp.pages.Notice(w, noticeId, "Failed to update appview")
915
916
return
···
930
931
return
931
932
}
932
933
933
-
rp.pages.HxRedirect(w, fmt.Sprintf("/%s", f.OwnerDid()))
934
+
rp.pages.HxRedirect(w, fmt.Sprintf("/%s", f.Did))
934
935
}
935
936
936
937
func (rp *Repo) SyncRepoFork(w http.ResponseWriter, r *http.Request) {
···
959
960
return
960
961
}
961
962
962
-
repoInfo := f.RepoInfo(user)
963
-
if repoInfo.Source == nil {
963
+
if f.Source == "" {
964
964
rp.pages.Notice(w, "repo", "This repository is not a fork.")
965
965
return
966
966
}
···
971
971
&tangled.RepoForkSync_Input{
972
972
Did: user.Did,
973
973
Name: f.Name,
974
-
Source: repoInfo.Source.RepoAt().String(),
974
+
Source: f.Source,
975
975
Branch: ref,
976
976
},
977
977
)
···
1007
1007
rp.pages.ForkRepo(w, pages.ForkRepoParams{
1008
1008
LoggedInUser: user,
1009
1009
Knots: knots,
1010
-
RepoInfo: f.RepoInfo(user),
1010
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
1011
1011
})
1012
1012
1013
1013
case http.MethodPost:
···
1037
1037
// in the user's account.
1038
1038
existingRepo, err := db.GetRepo(
1039
1039
rp.db,
1040
-
db.FilterEq("did", user.Did),
1041
-
db.FilterEq("name", forkName),
1040
+
orm.FilterEq("did", user.Did),
1041
+
orm.FilterEq("name", forkName),
1042
1042
)
1043
1043
if err != nil {
1044
1044
if !errors.Is(err, sql.ErrNoRows) {
···
1058
1058
uri = "http"
1059
1059
}
1060
1060
1061
-
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.Repo.Name)
1061
+
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.Did, f.Name)
1062
1062
l = l.With("cloneUrl", forkSourceUrl)
1063
1063
1064
1064
sourceAt := f.RepoAt().String()
···
1071
1071
Knot: targetKnot,
1072
1072
Rkey: rkey,
1073
1073
Source: sourceAt,
1074
-
Description: f.Repo.Description,
1074
+
Description: f.Description,
1075
1075
Created: time.Now(),
1076
1076
Labels: rp.config.Label.DefaultLabelDefs,
1077
1077
}
···
1130
1130
}
1131
1131
defer rollback()
1132
1132
1133
+
// TODO: this could coordinate better with the knot to recieve a clone status
1133
1134
client, err := rp.oauth.ServiceClient(
1134
1135
r,
1135
1136
oauth.WithService(targetKnot),
1136
1137
oauth.WithLxm(tangled.RepoCreateNSID),
1137
1138
oauth.WithDev(rp.config.Core.Dev),
1139
+
oauth.WithTimeout(time.Second*20), // big repos take time to clone
1138
1140
)
1139
1141
if err != nil {
1140
1142
l.Error("could not create service client", "err", err)
+17
-19
appview/repo/repo_util.go
+17
-19
appview/repo/repo_util.go
···
1
1
package repo
2
2
3
3
import (
4
+
"maps"
4
5
"slices"
5
6
"sort"
6
7
"strings"
7
8
8
9
"tangled.org/core/appview/db"
9
10
"tangled.org/core/appview/models"
10
-
"tangled.org/core/appview/pages/repoinfo"
11
+
"tangled.org/core/orm"
11
12
"tangled.org/core/types"
12
-
13
-
"github.com/go-git/go-git/v5/plumbing/object"
14
13
)
15
14
16
15
func sortFiles(files []types.NiceTree) {
···
43
42
})
44
43
}
45
44
46
-
func uniqueEmails(commits []*object.Commit) []string {
45
+
func uniqueEmails(commits []types.Commit) []string {
47
46
emails := make(map[string]struct{})
48
47
for _, commit := range commits {
49
-
if commit.Author.Email != "" {
50
-
emails[commit.Author.Email] = struct{}{}
51
-
}
52
-
if commit.Committer.Email != "" {
53
-
emails[commit.Committer.Email] = struct{}{}
48
+
emails[commit.Author.Email] = struct{}{}
49
+
emails[commit.Committer.Email] = struct{}{}
50
+
for _, c := range commit.CoAuthors() {
51
+
emails[c.Email] = struct{}{}
54
52
}
55
53
}
56
-
var uniqueEmails []string
57
-
for email := range emails {
58
-
uniqueEmails = append(uniqueEmails, email)
59
-
}
60
-
return uniqueEmails
54
+
55
+
// delete empty emails if any, from the set
56
+
delete(emails, "")
57
+
58
+
return slices.Collect(maps.Keys(emails))
61
59
}
62
60
63
61
func balanceIndexItems(commitCount, branchCount, tagCount, fileCount int) (commitsTrunc int, branchesTrunc int, tagsTrunc int) {
···
93
91
// golang is so blessed that it requires 35 lines of imperative code for this
94
92
func getPipelineStatuses(
95
93
d *db.DB,
96
-
repoInfo repoinfo.RepoInfo,
94
+
repo *models.Repo,
97
95
shas []string,
98
96
) (map[string]models.Pipeline, error) {
99
97
m := make(map[string]models.Pipeline)
···
105
103
ps, err := db.GetPipelineStatuses(
106
104
d,
107
105
len(shas),
108
-
db.FilterEq("repo_owner", repoInfo.OwnerDid),
109
-
db.FilterEq("repo_name", repoInfo.Name),
110
-
db.FilterEq("knot", repoInfo.Knot),
111
-
db.FilterIn("sha", shas),
106
+
orm.FilterEq("repo_owner", repo.Did),
107
+
orm.FilterEq("repo_name", repo.Name),
108
+
orm.FilterEq("knot", repo.Knot),
109
+
orm.FilterIn("sha", shas),
112
110
)
113
111
if err != nil {
114
112
return nil, err
+40
-11
appview/repo/settings.go
+40
-11
appview/repo/settings.go
···
10
10
11
11
"tangled.org/core/api/tangled"
12
12
"tangled.org/core/appview/db"
13
+
"tangled.org/core/appview/models"
13
14
"tangled.org/core/appview/oauth"
14
15
"tangled.org/core/appview/pages"
15
16
xrpcclient "tangled.org/core/appview/xrpcclient"
17
+
"tangled.org/core/orm"
16
18
"tangled.org/core/types"
17
19
18
20
comatproto "github.com/bluesky-social/indigo/api/atproto"
···
194
196
Host: host,
195
197
}
196
198
197
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
199
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
198
200
xrpcBytes, err := tangled.RepoBranches(r.Context(), xrpcc, "", 0, repo)
199
201
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
200
202
l.Error("failed to call XRPC repo.branches", "err", xrpcerr)
···
209
211
return
210
212
}
211
213
212
-
defaultLabels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs))
214
+
defaultLabels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", rp.config.Label.DefaultLabelDefs))
213
215
if err != nil {
214
216
l.Error("failed to fetch labels", "err", err)
215
217
rp.pages.Error503(w)
216
218
return
217
219
}
218
220
219
-
labels, err := db.GetLabelDefinitions(rp.db, db.FilterIn("at_uri", f.Repo.Labels))
221
+
labels, err := db.GetLabelDefinitions(rp.db, orm.FilterIn("at_uri", f.Labels))
220
222
if err != nil {
221
223
l.Error("failed to fetch labels", "err", err)
222
224
rp.pages.Error503(w)
···
237
239
labels = labels[:n]
238
240
239
241
subscribedLabels := make(map[string]struct{})
240
-
for _, l := range f.Repo.Labels {
242
+
for _, l := range f.Labels {
241
243
subscribedLabels[l] = struct{}{}
242
244
}
243
245
···
254
256
255
257
rp.pages.RepoGeneralSettings(w, pages.RepoGeneralSettingsParams{
256
258
LoggedInUser: user,
257
-
RepoInfo: f.RepoInfo(user),
259
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
258
260
Branches: result.Branches,
259
261
Labels: labels,
260
262
DefaultLabels: defaultLabels,
···
271
273
f, err := rp.repoResolver.Resolve(r)
272
274
user := rp.oauth.GetUser(r)
273
275
274
-
repoCollaborators, err := f.Collaborators(r.Context())
276
+
collaborators, err := func(repo *models.Repo) ([]pages.Collaborator, error) {
277
+
repoCollaborators, err := rp.enforcer.E.GetImplicitUsersForResourceByDomain(repo.DidSlashRepo(), repo.Knot)
278
+
if err != nil {
279
+
return nil, err
280
+
}
281
+
var collaborators []pages.Collaborator
282
+
for _, item := range repoCollaborators {
283
+
// currently only two roles: owner and member
284
+
var role string
285
+
switch item[3] {
286
+
case "repo:owner":
287
+
role = "owner"
288
+
case "repo:collaborator":
289
+
role = "collaborator"
290
+
default:
291
+
continue
292
+
}
293
+
294
+
did := item[0]
295
+
296
+
c := pages.Collaborator{
297
+
Did: did,
298
+
Role: role,
299
+
}
300
+
collaborators = append(collaborators, c)
301
+
}
302
+
return collaborators, nil
303
+
}(f)
275
304
if err != nil {
276
305
l.Error("failed to get collaborators", "err", err)
277
306
}
278
307
279
308
rp.pages.RepoAccessSettings(w, pages.RepoAccessSettingsParams{
280
309
LoggedInUser: user,
281
-
RepoInfo: f.RepoInfo(user),
310
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
282
311
Tabs: settingsTabs,
283
312
Tab: "access",
284
-
Collaborators: repoCollaborators,
313
+
Collaborators: collaborators,
285
314
})
286
315
}
287
316
···
292
321
user := rp.oauth.GetUser(r)
293
322
294
323
// all spindles that the repo owner is a member of
295
-
spindles, err := rp.enforcer.GetSpindlesForUser(f.OwnerDid())
324
+
spindles, err := rp.enforcer.GetSpindlesForUser(f.Did)
296
325
if err != nil {
297
326
l.Error("failed to fetch spindles", "err", err)
298
327
return
···
339
368
340
369
rp.pages.RepoPipelineSettings(w, pages.RepoPipelineSettingsParams{
341
370
LoggedInUser: user,
342
-
RepoInfo: f.RepoInfo(user),
371
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
343
372
Tabs: settingsTabs,
344
373
Tab: "pipelines",
345
374
Spindles: spindles,
···
388
417
}
389
418
l.Debug("got", "topicsStr", topicStr, "topics", topics)
390
419
391
-
newRepo := f.Repo
420
+
newRepo := *f
392
421
newRepo.Description = description
393
422
newRepo.Website = website
394
423
newRepo.Topics = topics
+6
-4
appview/repo/tree.go
+6
-4
appview/repo/tree.go
···
9
9
10
10
"tangled.org/core/api/tangled"
11
11
"tangled.org/core/appview/pages"
12
+
"tangled.org/core/appview/reporesolver"
12
13
xrpcclient "tangled.org/core/appview/xrpcclient"
13
14
"tangled.org/core/types"
14
15
···
39
40
xrpcc := &indigoxrpc.Client{
40
41
Host: host,
41
42
}
42
-
repo := fmt.Sprintf("%s/%s", f.OwnerDid(), f.Name)
43
+
repo := fmt.Sprintf("%s/%s", f.Did, f.Name)
43
44
xrpcResp, err := tangled.RepoTree(r.Context(), xrpcc, treePath, ref, repo)
44
45
if xrpcerr := xrpcclient.HandleXrpcErr(err); xrpcerr != nil {
45
46
l.Error("failed to call XRPC repo.tree", "err", xrpcerr)
···
79
80
result.ReadmeFileName = xrpcResp.Readme.Filename
80
81
result.Readme = xrpcResp.Readme.Contents
81
82
}
83
+
ownerSlashRepo := reporesolver.GetBaseRepoPath(r, f)
82
84
// redirects tree paths trying to access a blob; in this case the result.Files is unpopulated,
83
85
// so we can safely redirect to the "parent" (which is the same file).
84
86
if len(result.Files) == 0 && result.Parent == treePath {
85
-
redirectTo := fmt.Sprintf("/%s/blob/%s/%s", f.OwnerSlashRepo(), url.PathEscape(ref), result.Parent)
87
+
redirectTo := fmt.Sprintf("/%s/blob/%s/%s", ownerSlashRepo, url.PathEscape(ref), result.Parent)
86
88
http.Redirect(w, r, redirectTo, http.StatusFound)
87
89
return
88
90
}
89
91
user := rp.oauth.GetUser(r)
90
92
var breadcrumbs [][]string
91
-
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), url.PathEscape(ref))})
93
+
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", ownerSlashRepo, url.PathEscape(ref))})
92
94
if treePath != "" {
93
95
for idx, elem := range strings.Split(treePath, "/") {
94
96
breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], url.PathEscape(elem))})
···
100
102
LoggedInUser: user,
101
103
BreadCrumbs: breadcrumbs,
102
104
TreePath: treePath,
103
-
RepoInfo: f.RepoInfo(user),
105
+
RepoInfo: rp.repoResolver.GetRepoInfo(r, user),
104
106
RepoTreeResponse: result,
105
107
})
106
108
}
+76
-164
appview/reporesolver/resolver.go
+76
-164
appview/reporesolver/resolver.go
···
1
1
package reporesolver
2
2
3
3
import (
4
-
"context"
5
-
"database/sql"
6
-
"errors"
7
4
"fmt"
8
5
"log"
9
6
"net/http"
···
12
9
"strings"
13
10
14
11
"github.com/bluesky-social/indigo/atproto/identity"
15
-
securejoin "github.com/cyphar/filepath-securejoin"
16
12
"github.com/go-chi/chi/v5"
17
13
"tangled.org/core/appview/config"
18
14
"tangled.org/core/appview/db"
19
15
"tangled.org/core/appview/models"
20
16
"tangled.org/core/appview/oauth"
21
-
"tangled.org/core/appview/pages"
22
17
"tangled.org/core/appview/pages/repoinfo"
23
-
"tangled.org/core/idresolver"
24
18
"tangled.org/core/rbac"
25
19
)
26
20
27
-
type ResolvedRepo struct {
28
-
models.Repo
29
-
OwnerId identity.Identity
30
-
CurrentDir string
31
-
Ref string
32
-
33
-
rr *RepoResolver
21
+
type RepoResolver struct {
22
+
config *config.Config
23
+
enforcer *rbac.Enforcer
24
+
execer db.Execer
34
25
}
35
26
36
-
type RepoResolver struct {
37
-
config *config.Config
38
-
enforcer *rbac.Enforcer
39
-
idResolver *idresolver.Resolver
40
-
execer db.Execer
27
+
func New(config *config.Config, enforcer *rbac.Enforcer, execer db.Execer) *RepoResolver {
28
+
return &RepoResolver{config: config, enforcer: enforcer, execer: execer}
41
29
}
42
30
43
-
func New(config *config.Config, enforcer *rbac.Enforcer, resolver *idresolver.Resolver, execer db.Execer) *RepoResolver {
44
-
return &RepoResolver{config: config, enforcer: enforcer, idResolver: resolver, execer: execer}
31
+
// NOTE: this... should not even be here. the entire package will be removed in future refactor
32
+
func GetBaseRepoPath(r *http.Request, repo *models.Repo) string {
33
+
var (
34
+
user = chi.URLParam(r, "user")
35
+
name = chi.URLParam(r, "repo")
36
+
)
37
+
if user == "" || name == "" {
38
+
return repo.DidSlashRepo()
39
+
}
40
+
return path.Join(user, name)
45
41
}
46
42
47
-
func (rr *RepoResolver) Resolve(r *http.Request) (*ResolvedRepo, error) {
43
+
// TODO: move this out of `RepoResolver` struct
44
+
func (rr *RepoResolver) Resolve(r *http.Request) (*models.Repo, error) {
48
45
repo, ok := r.Context().Value("repo").(*models.Repo)
49
46
if !ok {
50
47
log.Println("malformed middleware: `repo` not exist in context")
51
48
return nil, fmt.Errorf("malformed middleware")
52
49
}
53
-
id, ok := r.Context().Value("resolvedId").(identity.Identity)
54
-
if !ok {
55
-
log.Println("malformed middleware")
56
-
return nil, fmt.Errorf("malformed middleware")
57
-
}
58
50
59
-
currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath()))
60
-
ref := chi.URLParam(r, "ref")
61
-
62
-
return &ResolvedRepo{
63
-
Repo: *repo,
64
-
OwnerId: id,
65
-
CurrentDir: currentDir,
66
-
Ref: ref,
67
-
68
-
rr: rr,
69
-
}, nil
70
-
}
71
-
72
-
func (f *ResolvedRepo) OwnerDid() string {
73
-
return f.OwnerId.DID.String()
74
-
}
75
-
76
-
func (f *ResolvedRepo) OwnerHandle() string {
77
-
return f.OwnerId.Handle.String()
51
+
return repo, nil
78
52
}
79
53
80
-
func (f *ResolvedRepo) OwnerSlashRepo() string {
81
-
handle := f.OwnerId.Handle
82
-
83
-
var p string
84
-
if handle != "" && !handle.IsInvalidHandle() {
85
-
p, _ = securejoin.SecureJoin(fmt.Sprintf("@%s", handle), f.Name)
86
-
} else {
87
-
p, _ = securejoin.SecureJoin(f.OwnerDid(), f.Name)
54
+
// 1. [x] replace `RepoInfo` to `reporesolver.GetRepoInfo(r *http.Request, repo, user)`
55
+
// 2. [x] remove `rr`, `CurrentDir`, `Ref` fields from `ResolvedRepo`
56
+
// 3. [x] remove `ResolvedRepo`
57
+
// 4. [ ] replace reporesolver to reposervice
58
+
func (rr *RepoResolver) GetRepoInfo(r *http.Request, user *oauth.User) repoinfo.RepoInfo {
59
+
ownerId, ook := r.Context().Value("resolvedId").(identity.Identity)
60
+
repo, rok := r.Context().Value("repo").(*models.Repo)
61
+
if !ook || !rok {
62
+
log.Println("malformed request, failed to get repo from context")
88
63
}
89
64
90
-
return p
91
-
}
65
+
// get dir/ref
66
+
currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath()))
67
+
ref := chi.URLParam(r, "ref")
92
68
93
-
func (f *ResolvedRepo) Collaborators(ctx context.Context) ([]pages.Collaborator, error) {
94
-
repoCollaborators, err := f.rr.enforcer.E.GetImplicitUsersForResourceByDomain(f.DidSlashRepo(), f.Knot)
95
-
if err != nil {
96
-
return nil, err
69
+
repoAt := repo.RepoAt()
70
+
isStarred := false
71
+
roles := repoinfo.RolesInRepo{}
72
+
if user != nil {
73
+
isStarred = db.GetStarStatus(rr.execer, user.Did, repoAt)
74
+
roles.Roles = rr.enforcer.GetPermissionsInRepo(user.Did, repo.Knot, repo.DidSlashRepo())
97
75
}
98
76
99
-
var collaborators []pages.Collaborator
100
-
for _, item := range repoCollaborators {
101
-
// currently only two roles: owner and member
102
-
var role string
103
-
switch item[3] {
104
-
case "repo:owner":
105
-
role = "owner"
106
-
case "repo:collaborator":
107
-
role = "collaborator"
108
-
default:
109
-
continue
77
+
stats := repo.RepoStats
78
+
if stats == nil {
79
+
starCount, err := db.GetStarCount(rr.execer, repoAt)
80
+
if err != nil {
81
+
log.Println("failed to get star count for ", repoAt)
110
82
}
111
-
112
-
did := item[0]
113
-
114
-
c := pages.Collaborator{
115
-
Did: did,
116
-
Handle: "",
117
-
Role: role,
83
+
issueCount, err := db.GetIssueCount(rr.execer, repoAt)
84
+
if err != nil {
85
+
log.Println("failed to get issue count for ", repoAt)
118
86
}
119
-
collaborators = append(collaborators, c)
120
-
}
121
-
122
-
// populate all collborators with handles
123
-
identsToResolve := make([]string, len(collaborators))
124
-
for i, collab := range collaborators {
125
-
identsToResolve[i] = collab.Did
126
-
}
127
-
128
-
resolvedIdents := f.rr.idResolver.ResolveIdents(ctx, identsToResolve)
129
-
for i, resolved := range resolvedIdents {
130
-
if resolved != nil {
131
-
collaborators[i].Handle = resolved.Handle.String()
87
+
pullCount, err := db.GetPullCount(rr.execer, repoAt)
88
+
if err != nil {
89
+
log.Println("failed to get pull count for ", repoAt)
132
90
}
133
-
}
134
-
135
-
return collaborators, nil
136
-
}
137
-
138
-
// this function is a bit weird since it now returns RepoInfo from an entirely different
139
-
// package. we should refactor this or get rid of RepoInfo entirely.
140
-
func (f *ResolvedRepo) RepoInfo(user *oauth.User) repoinfo.RepoInfo {
141
-
repoAt := f.RepoAt()
142
-
isStarred := false
143
-
if user != nil {
144
-
isStarred = db.GetStarStatus(f.rr.execer, user.Did, repoAt)
145
-
}
146
-
147
-
starCount, err := db.GetStarCount(f.rr.execer, repoAt)
148
-
if err != nil {
149
-
log.Println("failed to get star count for ", repoAt)
150
-
}
151
-
issueCount, err := db.GetIssueCount(f.rr.execer, repoAt)
152
-
if err != nil {
153
-
log.Println("failed to get issue count for ", repoAt)
154
-
}
155
-
pullCount, err := db.GetPullCount(f.rr.execer, repoAt)
156
-
if err != nil {
157
-
log.Println("failed to get issue count for ", repoAt)
158
-
}
159
-
source, err := db.GetRepoSource(f.rr.execer, repoAt)
160
-
if errors.Is(err, sql.ErrNoRows) {
161
-
source = ""
162
-
} else if err != nil {
163
-
log.Println("failed to get repo source for ", repoAt, err)
91
+
stats = &models.RepoStats{
92
+
StarCount: starCount,
93
+
IssueCount: issueCount,
94
+
PullCount: pullCount,
95
+
}
164
96
}
165
97
166
98
var sourceRepo *models.Repo
167
-
if source != "" {
168
-
sourceRepo, err = db.GetRepoByAtUri(f.rr.execer, source)
99
+
var err error
100
+
if repo.Source != "" {
101
+
sourceRepo, err = db.GetRepoByAtUri(rr.execer, repo.Source)
169
102
if err != nil {
170
103
log.Println("failed to get repo by at uri", err)
171
104
}
172
105
}
173
106
174
-
var sourceHandle *identity.Identity
175
-
if sourceRepo != nil {
176
-
sourceHandle, err = f.rr.idResolver.ResolveIdent(context.Background(), sourceRepo.Did)
177
-
if err != nil {
178
-
log.Println("failed to resolve source repo", err)
179
-
}
180
-
}
107
+
repoInfo := repoinfo.RepoInfo{
108
+
// this is basically a models.Repo
109
+
OwnerDid: ownerId.DID.String(),
110
+
OwnerHandle: ownerId.Handle.String(),
111
+
Name: repo.Name,
112
+
Rkey: repo.Rkey,
113
+
Description: repo.Description,
114
+
Website: repo.Website,
115
+
Topics: repo.Topics,
116
+
Knot: repo.Knot,
117
+
Spindle: repo.Spindle,
118
+
Stats: *stats,
181
119
182
-
knot := f.Knot
120
+
// fork repo upstream
121
+
Source: sourceRepo,
183
122
184
-
repoInfo := repoinfo.RepoInfo{
185
-
OwnerDid: f.OwnerDid(),
186
-
OwnerHandle: f.OwnerHandle(),
187
-
Name: f.Name,
188
-
Rkey: f.Repo.Rkey,
189
-
RepoAt: repoAt,
190
-
Description: f.Description,
191
-
Website: f.Website,
192
-
Topics: f.Topics,
193
-
IsStarred: isStarred,
194
-
Knot: knot,
195
-
Spindle: f.Spindle,
196
-
Roles: f.RolesInRepo(user),
197
-
Stats: models.RepoStats{
198
-
StarCount: starCount,
199
-
IssueCount: issueCount,
200
-
PullCount: pullCount,
201
-
},
202
-
CurrentDir: f.CurrentDir,
203
-
Ref: f.Ref,
204
-
}
123
+
// page context
124
+
CurrentDir: currentDir,
125
+
Ref: ref,
205
126
206
-
if sourceRepo != nil {
207
-
repoInfo.Source = sourceRepo
208
-
repoInfo.SourceHandle = sourceHandle.Handle.String()
127
+
// info related to the session
128
+
IsStarred: isStarred,
129
+
Roles: roles,
209
130
}
210
131
211
132
return repoInfo
212
-
}
213
-
214
-
func (f *ResolvedRepo) RolesInRepo(u *oauth.User) repoinfo.RolesInRepo {
215
-
if u != nil {
216
-
r := f.rr.enforcer.GetPermissionsInRepo(u.Did, f.Knot, f.DidSlashRepo())
217
-
return repoinfo.RolesInRepo{Roles: r}
218
-
} else {
219
-
return repoinfo.RolesInRepo{}
220
-
}
221
133
}
222
134
223
135
// extractPathAfterRef gets the actual repository path
+5
-4
appview/serververify/verify.go
+5
-4
appview/serververify/verify.go
···
9
9
"tangled.org/core/api/tangled"
10
10
"tangled.org/core/appview/db"
11
11
"tangled.org/core/appview/xrpcclient"
12
+
"tangled.org/core/orm"
12
13
"tangled.org/core/rbac"
13
14
)
14
15
···
76
77
// mark this spindle as verified in the db
77
78
rowId, err := db.VerifySpindle(
78
79
tx,
79
-
db.FilterEq("owner", owner),
80
-
db.FilterEq("instance", instance),
80
+
orm.FilterEq("owner", owner),
81
+
orm.FilterEq("instance", instance),
81
82
)
82
83
if err != nil {
83
84
return 0, fmt.Errorf("failed to write to DB: %w", err)
···
115
116
// mark as registered
116
117
err = db.MarkRegistered(
117
118
tx,
118
-
db.FilterEq("did", owner),
119
-
db.FilterEq("domain", domain),
119
+
orm.FilterEq("did", owner),
120
+
orm.FilterEq("domain", domain),
120
121
)
121
122
if err != nil {
122
123
return fmt.Errorf("failed to register domain: %w", err)
+2
appview/settings/settings.go
+2
appview/settings/settings.go
+44
-26
appview/spindles/spindles.go
+44
-26
appview/spindles/spindles.go
···
20
20
"tangled.org/core/appview/serververify"
21
21
"tangled.org/core/appview/xrpcclient"
22
22
"tangled.org/core/idresolver"
23
+
"tangled.org/core/orm"
23
24
"tangled.org/core/rbac"
24
25
"tangled.org/core/tid"
25
26
···
38
39
Logger *slog.Logger
39
40
}
40
41
42
+
type tab = map[string]any
43
+
44
+
var (
45
+
spindlesTabs []tab = []tab{
46
+
{"Name": "profile", "Icon": "user"},
47
+
{"Name": "keys", "Icon": "key"},
48
+
{"Name": "emails", "Icon": "mail"},
49
+
{"Name": "notifications", "Icon": "bell"},
50
+
{"Name": "knots", "Icon": "volleyball"},
51
+
{"Name": "spindles", "Icon": "spool"},
52
+
}
53
+
)
54
+
41
55
func (s *Spindles) Router() http.Handler {
42
56
r := chi.NewRouter()
43
57
···
58
72
user := s.OAuth.GetUser(r)
59
73
all, err := db.GetSpindles(
60
74
s.Db,
61
-
db.FilterEq("owner", user.Did),
75
+
orm.FilterEq("owner", user.Did),
62
76
)
63
77
if err != nil {
64
78
s.Logger.Error("failed to fetch spindles", "err", err)
···
69
83
s.Pages.Spindles(w, pages.SpindlesParams{
70
84
LoggedInUser: user,
71
85
Spindles: all,
86
+
Tabs: spindlesTabs,
87
+
Tab: "spindles",
72
88
})
73
89
}
74
90
···
86
102
87
103
spindles, err := db.GetSpindles(
88
104
s.Db,
89
-
db.FilterEq("instance", instance),
90
-
db.FilterEq("owner", user.Did),
91
-
db.FilterIsNot("verified", "null"),
105
+
orm.FilterEq("instance", instance),
106
+
orm.FilterEq("owner", user.Did),
107
+
orm.FilterIsNot("verified", "null"),
92
108
)
93
109
if err != nil || len(spindles) != 1 {
94
110
l.Error("failed to get spindle", "err", err, "len(spindles)", len(spindles))
···
108
124
repos, err := db.GetRepos(
109
125
s.Db,
110
126
0,
111
-
db.FilterEq("spindle", instance),
127
+
orm.FilterEq("spindle", instance),
112
128
)
113
129
if err != nil {
114
130
l.Error("failed to get spindle repos", "err", err)
···
127
143
Spindle: spindle,
128
144
Members: members,
129
145
Repos: repoMap,
146
+
Tabs: spindlesTabs,
147
+
Tab: "spindles",
130
148
})
131
149
}
132
150
···
273
291
274
292
spindles, err := db.GetSpindles(
275
293
s.Db,
276
-
db.FilterEq("owner", user.Did),
277
-
db.FilterEq("instance", instance),
294
+
orm.FilterEq("owner", user.Did),
295
+
orm.FilterEq("instance", instance),
278
296
)
279
297
if err != nil || len(spindles) != 1 {
280
298
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
302
320
// remove spindle members first
303
321
err = db.RemoveSpindleMember(
304
322
tx,
305
-
db.FilterEq("did", user.Did),
306
-
db.FilterEq("instance", instance),
323
+
orm.FilterEq("did", user.Did),
324
+
orm.FilterEq("instance", instance),
307
325
)
308
326
if err != nil {
309
327
l.Error("failed to remove spindle members", "err", err)
···
313
331
314
332
err = db.DeleteSpindle(
315
333
tx,
316
-
db.FilterEq("owner", user.Did),
317
-
db.FilterEq("instance", instance),
334
+
orm.FilterEq("owner", user.Did),
335
+
orm.FilterEq("instance", instance),
318
336
)
319
337
if err != nil {
320
338
l.Error("failed to delete spindle", "err", err)
···
365
383
366
384
shouldRedirect := r.Header.Get("shouldRedirect")
367
385
if shouldRedirect == "true" {
368
-
s.Pages.HxRedirect(w, "/spindles")
386
+
s.Pages.HxRedirect(w, "/settings/spindles")
369
387
return
370
388
}
371
389
···
393
411
394
412
spindles, err := db.GetSpindles(
395
413
s.Db,
396
-
db.FilterEq("owner", user.Did),
397
-
db.FilterEq("instance", instance),
414
+
orm.FilterEq("owner", user.Did),
415
+
orm.FilterEq("instance", instance),
398
416
)
399
417
if err != nil || len(spindles) != 1 {
400
418
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
436
454
437
455
verifiedSpindle, err := db.GetSpindles(
438
456
s.Db,
439
-
db.FilterEq("id", rowId),
457
+
orm.FilterEq("id", rowId),
440
458
)
441
459
if err != nil || len(verifiedSpindle) != 1 {
442
460
l.Error("failed get new spindle", "err", err)
···
469
487
470
488
spindles, err := db.GetSpindles(
471
489
s.Db,
472
-
db.FilterEq("owner", user.Did),
473
-
db.FilterEq("instance", instance),
490
+
orm.FilterEq("owner", user.Did),
491
+
orm.FilterEq("instance", instance),
474
492
)
475
493
if err != nil || len(spindles) != 1 {
476
494
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
581
599
}
582
600
583
601
// success
584
-
s.Pages.HxRedirect(w, fmt.Sprintf("/spindles/%s", instance))
602
+
s.Pages.HxRedirect(w, fmt.Sprintf("/settings/spindles/%s", instance))
585
603
}
586
604
587
605
func (s *Spindles) removeMember(w http.ResponseWriter, r *http.Request) {
···
605
623
606
624
spindles, err := db.GetSpindles(
607
625
s.Db,
608
-
db.FilterEq("owner", user.Did),
609
-
db.FilterEq("instance", instance),
626
+
orm.FilterEq("owner", user.Did),
627
+
orm.FilterEq("instance", instance),
610
628
)
611
629
if err != nil || len(spindles) != 1 {
612
630
l.Error("failed to retrieve instance", "err", err, "len(spindles)", len(spindles))
···
655
673
// get the record from the DB first:
656
674
members, err := db.GetSpindleMembers(
657
675
s.Db,
658
-
db.FilterEq("did", user.Did),
659
-
db.FilterEq("instance", instance),
660
-
db.FilterEq("subject", memberId.DID),
676
+
orm.FilterEq("did", user.Did),
677
+
orm.FilterEq("instance", instance),
678
+
orm.FilterEq("subject", memberId.DID),
661
679
)
662
680
if err != nil || len(members) != 1 {
663
681
l.Error("failed to get member", "err", err)
···
668
686
// remove from db
669
687
if err = db.RemoveSpindleMember(
670
688
tx,
671
-
db.FilterEq("did", user.Did),
672
-
db.FilterEq("instance", instance),
673
-
db.FilterEq("subject", memberId.DID),
689
+
orm.FilterEq("did", user.Did),
690
+
orm.FilterEq("instance", instance),
691
+
orm.FilterEq("subject", memberId.DID),
674
692
); err != nil {
675
693
l.Error("failed to remove spindle member", "err", err)
676
694
fail()
+6
-5
appview/state/gfi.go
+6
-5
appview/state/gfi.go
···
11
11
"tangled.org/core/appview/pages"
12
12
"tangled.org/core/appview/pagination"
13
13
"tangled.org/core/consts"
14
+
"tangled.org/core/orm"
14
15
)
15
16
16
17
func (s *State) GoodFirstIssues(w http.ResponseWriter, r *http.Request) {
···
20
21
21
22
goodFirstIssueLabel := s.config.Label.GoodFirstIssue
22
23
23
-
gfiLabelDef, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", goodFirstIssueLabel))
24
+
gfiLabelDef, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", goodFirstIssueLabel))
24
25
if err != nil {
25
26
log.Println("failed to get gfi label def", err)
26
27
s.pages.Error500(w)
27
28
return
28
29
}
29
30
30
-
repoLabels, err := db.GetRepoLabels(s.db, db.FilterEq("label_at", goodFirstIssueLabel))
31
+
repoLabels, err := db.GetRepoLabels(s.db, orm.FilterEq("label_at", goodFirstIssueLabel))
31
32
if err != nil {
32
33
log.Println("failed to get repo labels", err)
33
34
s.pages.Error503(w)
···
55
56
pagination.Page{
56
57
Limit: 500,
57
58
},
58
-
db.FilterIn("repo_at", repoUris),
59
-
db.FilterEq("open", 1),
59
+
orm.FilterIn("repo_at", repoUris),
60
+
orm.FilterEq("open", 1),
60
61
)
61
62
if err != nil {
62
63
log.Println("failed to get issues", err)
···
132
133
}
133
134
134
135
if len(uriList) > 0 {
135
-
allLabelDefs, err = db.GetLabelDefinitions(s.db, db.FilterIn("at_uri", uriList))
136
+
allLabelDefs, err = db.GetLabelDefinitions(s.db, orm.FilterIn("at_uri", uriList))
136
137
if err != nil {
137
138
log.Println("failed to fetch labels", err)
138
139
}
+17
appview/state/git_http.go
+17
appview/state/git_http.go
···
25
25
26
26
}
27
27
28
+
func (s *State) UploadArchive(w http.ResponseWriter, r *http.Request) {
29
+
user, ok := r.Context().Value("resolvedId").(identity.Identity)
30
+
if !ok {
31
+
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
32
+
return
33
+
}
34
+
repo := r.Context().Value("repo").(*models.Repo)
35
+
36
+
scheme := "https"
37
+
if s.config.Core.Dev {
38
+
scheme = "http"
39
+
}
40
+
41
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-archive?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
42
+
s.proxyRequest(w, r, targetURL)
43
+
}
44
+
28
45
func (s *State) UploadPack(w http.ResponseWriter, r *http.Request) {
29
46
user, ok := r.Context().Value("resolvedId").(identity.Identity)
30
47
if !ok {
+4
-89
appview/state/knotstream.go
+4
-89
appview/state/knotstream.go
···
16
16
ec "tangled.org/core/eventconsumer"
17
17
"tangled.org/core/eventconsumer/cursor"
18
18
"tangled.org/core/log"
19
+
"tangled.org/core/orm"
19
20
"tangled.org/core/rbac"
20
-
"tangled.org/core/workflow"
21
21
22
-
"github.com/bluesky-social/indigo/atproto/syntax"
23
22
"github.com/go-git/go-git/v5/plumbing"
24
23
"github.com/posthog/posthog-go"
25
24
)
···
30
29
31
30
knots, err := db.GetRegistrations(
32
31
d,
33
-
db.FilterIsNot("registered", "null"),
32
+
orm.FilterIsNot("registered", "null"),
34
33
)
35
34
if err != nil {
36
35
return nil, err
···
66
65
switch msg.Nsid {
67
66
case tangled.GitRefUpdateNSID:
68
67
return ingestRefUpdate(d, enforcer, posthog, dev, source, msg)
69
-
case tangled.PipelineNSID:
70
-
return ingestPipeline(d, source, msg)
71
68
}
72
69
73
70
return nil
···
143
140
repos, err := db.GetRepos(
144
141
d,
145
142
0,
146
-
db.FilterEq("did", record.RepoDid),
147
-
db.FilterEq("name", record.RepoName),
143
+
orm.FilterEq("did", record.RepoDid),
144
+
orm.FilterEq("name", record.RepoName),
148
145
)
149
146
if err != nil {
150
147
return fmt.Errorf("failed to look for repo in DB (%s/%s): %w", record.RepoDid, record.RepoName, err)
···
189
186
190
187
return tx.Commit()
191
188
}
192
-
193
-
func ingestPipeline(d *db.DB, source ec.Source, msg ec.Message) error {
194
-
var record tangled.Pipeline
195
-
err := json.Unmarshal(msg.EventJson, &record)
196
-
if err != nil {
197
-
return err
198
-
}
199
-
200
-
if record.TriggerMetadata == nil {
201
-
return fmt.Errorf("empty trigger metadata: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
202
-
}
203
-
204
-
if record.TriggerMetadata.Repo == nil {
205
-
return fmt.Errorf("empty repo: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
206
-
}
207
-
208
-
// does this repo have a spindle configured?
209
-
repos, err := db.GetRepos(
210
-
d,
211
-
0,
212
-
db.FilterEq("did", record.TriggerMetadata.Repo.Did),
213
-
db.FilterEq("name", record.TriggerMetadata.Repo.Repo),
214
-
)
215
-
if err != nil {
216
-
return fmt.Errorf("failed to look for repo in DB: nsid %s, rkey %s, %w", msg.Nsid, msg.Rkey, err)
217
-
}
218
-
if len(repos) != 1 {
219
-
return fmt.Errorf("incorrect number of repos returned: %d (expected 1)", len(repos))
220
-
}
221
-
if repos[0].Spindle == "" {
222
-
return fmt.Errorf("repo does not have a spindle configured yet: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
223
-
}
224
-
225
-
// trigger info
226
-
var trigger models.Trigger
227
-
var sha string
228
-
trigger.Kind = workflow.TriggerKind(record.TriggerMetadata.Kind)
229
-
switch trigger.Kind {
230
-
case workflow.TriggerKindPush:
231
-
trigger.PushRef = &record.TriggerMetadata.Push.Ref
232
-
trigger.PushNewSha = &record.TriggerMetadata.Push.NewSha
233
-
trigger.PushOldSha = &record.TriggerMetadata.Push.OldSha
234
-
sha = *trigger.PushNewSha
235
-
case workflow.TriggerKindPullRequest:
236
-
trigger.PRSourceBranch = &record.TriggerMetadata.PullRequest.SourceBranch
237
-
trigger.PRTargetBranch = &record.TriggerMetadata.PullRequest.TargetBranch
238
-
trigger.PRSourceSha = &record.TriggerMetadata.PullRequest.SourceSha
239
-
trigger.PRAction = &record.TriggerMetadata.PullRequest.Action
240
-
sha = *trigger.PRSourceSha
241
-
}
242
-
243
-
tx, err := d.Begin()
244
-
if err != nil {
245
-
return fmt.Errorf("failed to start txn: %w", err)
246
-
}
247
-
248
-
triggerId, err := db.AddTrigger(tx, trigger)
249
-
if err != nil {
250
-
return fmt.Errorf("failed to add trigger entry: %w", err)
251
-
}
252
-
253
-
pipeline := models.Pipeline{
254
-
Rkey: msg.Rkey,
255
-
Knot: source.Key(),
256
-
RepoOwner: syntax.DID(record.TriggerMetadata.Repo.Did),
257
-
RepoName: record.TriggerMetadata.Repo.Repo,
258
-
TriggerId: int(triggerId),
259
-
Sha: sha,
260
-
}
261
-
262
-
err = db.AddPipeline(tx, pipeline)
263
-
if err != nil {
264
-
return fmt.Errorf("failed to add pipeline: %w", err)
265
-
}
266
-
267
-
err = tx.Commit()
268
-
if err != nil {
269
-
return fmt.Errorf("failed to commit txn: %w", err)
270
-
}
271
-
272
-
return nil
273
-
}
+28
-21
appview/state/profile.go
+28
-21
appview/state/profile.go
···
19
19
"tangled.org/core/appview/db"
20
20
"tangled.org/core/appview/models"
21
21
"tangled.org/core/appview/pages"
22
+
"tangled.org/core/orm"
22
23
)
23
24
24
25
func (s *State) Profile(w http.ResponseWriter, r *http.Request) {
···
56
57
return nil, fmt.Errorf("failed to get profile: %w", err)
57
58
}
58
59
59
-
repoCount, err := db.CountRepos(s.db, db.FilterEq("did", did))
60
+
repoCount, err := db.CountRepos(s.db, orm.FilterEq("did", did))
60
61
if err != nil {
61
62
return nil, fmt.Errorf("failed to get repo count: %w", err)
62
63
}
63
64
64
-
stringCount, err := db.CountStrings(s.db, db.FilterEq("did", did))
65
+
stringCount, err := db.CountStrings(s.db, orm.FilterEq("did", did))
65
66
if err != nil {
66
67
return nil, fmt.Errorf("failed to get string count: %w", err)
67
68
}
68
69
69
-
starredCount, err := db.CountStars(s.db, db.FilterEq("starred_by_did", did))
70
+
starredCount, err := db.CountStars(s.db, orm.FilterEq("did", did))
70
71
if err != nil {
71
72
return nil, fmt.Errorf("failed to get starred repo count: %w", err)
72
73
}
···
86
87
startOfYear := time.Date(now.Year(), 1, 1, 0, 0, 0, 0, time.UTC)
87
88
punchcard, err := db.MakePunchcard(
88
89
s.db,
89
-
db.FilterEq("did", did),
90
-
db.FilterGte("date", startOfYear.Format(time.DateOnly)),
91
-
db.FilterLte("date", now.Format(time.DateOnly)),
90
+
orm.FilterEq("did", did),
91
+
orm.FilterGte("date", startOfYear.Format(time.DateOnly)),
92
+
orm.FilterLte("date", now.Format(time.DateOnly)),
92
93
)
93
94
if err != nil {
94
95
return nil, fmt.Errorf("failed to get punchcard for %s: %w", did, err)
···
96
97
97
98
return &pages.ProfileCard{
98
99
UserDid: did,
99
-
UserHandle: ident.Handle.String(),
100
100
Profile: profile,
101
101
FollowStatus: followStatus,
102
102
Stats: pages.ProfileStats{
···
119
119
s.pages.Error500(w)
120
120
return
121
121
}
122
-
l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle)
122
+
l = l.With("profileDid", profile.UserDid)
123
123
124
124
repos, err := db.GetRepos(
125
125
s.db,
126
126
0,
127
-
db.FilterEq("did", profile.UserDid),
127
+
orm.FilterEq("did", profile.UserDid),
128
128
)
129
129
if err != nil {
130
130
l.Error("failed to fetch repos", "err", err)
···
162
162
l.Error("failed to create timeline", "err", err)
163
163
}
164
164
165
+
// populate commit counts in the timeline, using the punchcard
166
+
currentMonth := time.Now().Month()
167
+
for _, p := range profile.Punchcard.Punches {
168
+
idx := currentMonth - p.Date.Month()
169
+
if int(idx) < len(timeline.ByMonth) {
170
+
timeline.ByMonth[idx].Commits += p.Count
171
+
}
172
+
}
173
+
165
174
s.pages.ProfileOverview(w, pages.ProfileOverviewParams{
166
175
LoggedInUser: s.oauth.GetUser(r),
167
176
Card: profile,
···
180
189
s.pages.Error500(w)
181
190
return
182
191
}
183
-
l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle)
192
+
l = l.With("profileDid", profile.UserDid)
184
193
185
194
repos, err := db.GetRepos(
186
195
s.db,
187
196
0,
188
-
db.FilterEq("did", profile.UserDid),
197
+
orm.FilterEq("did", profile.UserDid),
189
198
)
190
199
if err != nil {
191
200
l.Error("failed to get repos", "err", err)
···
209
218
s.pages.Error500(w)
210
219
return
211
220
}
212
-
l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle)
221
+
l = l.With("profileDid", profile.UserDid)
213
222
214
-
stars, err := db.GetStars(s.db, 0, db.FilterEq("starred_by_did", profile.UserDid))
223
+
stars, err := db.GetRepoStars(s.db, 0, orm.FilterEq("did", profile.UserDid))
215
224
if err != nil {
216
225
l.Error("failed to get stars", "err", err)
217
226
s.pages.Error500(w)
···
219
228
}
220
229
var repos []models.Repo
221
230
for _, s := range stars {
222
-
if s.Repo != nil {
223
-
repos = append(repos, *s.Repo)
224
-
}
231
+
repos = append(repos, *s.Repo)
225
232
}
226
233
227
234
err = s.pages.ProfileStarred(w, pages.ProfileStarredParams{
···
240
247
s.pages.Error500(w)
241
248
return
242
249
}
243
-
l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle)
250
+
l = l.With("profileDid", profile.UserDid)
244
251
245
-
strings, err := db.GetStrings(s.db, 0, db.FilterEq("did", profile.UserDid))
252
+
strings, err := db.GetStrings(s.db, 0, orm.FilterEq("did", profile.UserDid))
246
253
if err != nil {
247
254
l.Error("failed to get strings", "err", err)
248
255
s.pages.Error500(w)
···
272
279
if err != nil {
273
280
return nil, err
274
281
}
275
-
l = l.With("profileDid", profile.UserDid, "profileHandle", profile.UserHandle)
282
+
l = l.With("profileDid", profile.UserDid)
276
283
277
284
loggedInUser := s.oauth.GetUser(r)
278
285
params := FollowsPageParams{
···
294
301
followDids = append(followDids, extractDid(follow))
295
302
}
296
303
297
-
profiles, err := db.GetProfiles(s.db, db.FilterIn("did", followDids))
304
+
profiles, err := db.GetProfiles(s.db, orm.FilterIn("did", followDids))
298
305
if err != nil {
299
306
l.Error("failed to get profiles", "followDids", followDids, "err", err)
300
307
return ¶ms, err
···
697
704
log.Printf("getting profile data for %s: %s", user.Did, err)
698
705
}
699
706
700
-
repos, err := db.GetRepos(s.db, 0, db.FilterEq("did", user.Did))
707
+
repos, err := db.GetRepos(s.db, 0, orm.FilterEq("did", user.Did))
701
708
if err != nil {
702
709
log.Printf("getting repos for %s: %s", user.Did, err)
703
710
}
+11
-5
appview/state/router.go
+11
-5
appview/state/router.go
···
96
96
r.Mount("/", s.RepoRouter(mw))
97
97
r.Mount("/issues", s.IssuesRouter(mw))
98
98
r.Mount("/pulls", s.PullsRouter(mw))
99
-
r.Mount("/pipelines", s.PipelinesRouter())
99
+
r.Mount("/pipelines", s.PipelinesRouter(mw))
100
100
r.Mount("/labels", s.LabelsRouter())
101
101
102
102
// These routes get proxied to the knot
103
103
r.Get("/info/refs", s.InfoRefs)
104
+
r.Post("/git-upload-archive", s.UploadArchive)
104
105
r.Post("/git-upload-pack", s.UploadPack)
105
106
r.Post("/git-receive-pack", s.ReceivePack)
106
107
···
166
167
167
168
r.Mount("/settings", s.SettingsRouter())
168
169
r.Mount("/strings", s.StringsRouter(mw))
169
-
r.Mount("/knots", s.KnotsRouter())
170
-
r.Mount("/spindles", s.SpindlesRouter())
170
+
171
+
r.Mount("/settings/knots", s.KnotsRouter())
172
+
r.Mount("/settings/spindles", s.SpindlesRouter())
173
+
171
174
r.Mount("/notifications", s.NotificationsRouter(mw))
172
175
173
176
r.Mount("/signup", s.SignupRouter())
···
261
264
issues := issues.New(
262
265
s.oauth,
263
266
s.repoResolver,
267
+
s.enforcer,
264
268
s.pages,
265
269
s.idResolver,
270
+
s.mentionsResolver,
266
271
s.db,
267
272
s.config,
268
273
s.notifier,
···
279
284
s.repoResolver,
280
285
s.pages,
281
286
s.idResolver,
287
+
s.mentionsResolver,
282
288
s.db,
283
289
s.config,
284
290
s.notifier,
···
307
313
return repo.Router(mw)
308
314
}
309
315
310
-
func (s *State) PipelinesRouter() http.Handler {
316
+
func (s *State) PipelinesRouter(mw *middleware.Middleware) http.Handler {
311
317
pipes := pipelines.New(
312
318
s.oauth,
313
319
s.repoResolver,
···
319
325
s.enforcer,
320
326
log.SubLogger(s.logger, "pipelines"),
321
327
)
322
-
return pipes.Router()
328
+
return pipes.Router(mw)
323
329
}
324
330
325
331
func (s *State) LabelsRouter() http.Handler {
+91
-1
appview/state/spindlestream.go
+91
-1
appview/state/spindlestream.go
···
17
17
ec "tangled.org/core/eventconsumer"
18
18
"tangled.org/core/eventconsumer/cursor"
19
19
"tangled.org/core/log"
20
+
"tangled.org/core/orm"
20
21
"tangled.org/core/rbac"
21
22
spindle "tangled.org/core/spindle/models"
23
+
"tangled.org/core/workflow"
22
24
)
23
25
24
26
func Spindlestream(ctx context.Context, c *config.Config, d *db.DB, enforcer *rbac.Enforcer) (*ec.Consumer, error) {
···
27
29
28
30
spindles, err := db.GetSpindles(
29
31
d,
30
-
db.FilterIsNot("verified", "null"),
32
+
orm.FilterIsNot("verified", "null"),
31
33
)
32
34
if err != nil {
33
35
return nil, err
···
61
63
func spindleIngester(ctx context.Context, logger *slog.Logger, d *db.DB) ec.ProcessFunc {
62
64
return func(ctx context.Context, source ec.Source, msg ec.Message) error {
63
65
switch msg.Nsid {
66
+
case tangled.PipelineNSID:
67
+
return ingestPipeline(logger, d, source, msg)
64
68
case tangled.PipelineStatusNSID:
65
69
return ingestPipelineStatus(ctx, logger, d, source, msg)
66
70
}
67
71
68
72
return nil
69
73
}
74
+
}
75
+
76
+
func ingestPipeline(l *slog.Logger, d *db.DB, source ec.Source, msg ec.Message) error {
77
+
var record tangled.Pipeline
78
+
err := json.Unmarshal(msg.EventJson, &record)
79
+
if err != nil {
80
+
return err
81
+
}
82
+
83
+
if record.TriggerMetadata == nil {
84
+
return fmt.Errorf("empty trigger metadata: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
85
+
}
86
+
87
+
if record.TriggerMetadata.Repo == nil {
88
+
return fmt.Errorf("empty repo: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
89
+
}
90
+
91
+
// does this repo have a spindle configured?
92
+
repos, err := db.GetRepos(
93
+
d,
94
+
0,
95
+
orm.FilterEq("did", record.TriggerMetadata.Repo.Did),
96
+
orm.FilterEq("name", record.TriggerMetadata.Repo.Repo),
97
+
)
98
+
if err != nil {
99
+
return fmt.Errorf("failed to look for repo in DB: nsid %s, rkey %s, %w", msg.Nsid, msg.Rkey, err)
100
+
}
101
+
if len(repos) != 1 {
102
+
return fmt.Errorf("incorrect number of repos returned: %d (expected 1)", len(repos))
103
+
}
104
+
if repos[0].Spindle == "" {
105
+
return fmt.Errorf("repo does not have a spindle configured yet: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
106
+
}
107
+
108
+
// trigger info
109
+
var trigger models.Trigger
110
+
var sha string
111
+
trigger.Kind = workflow.TriggerKind(record.TriggerMetadata.Kind)
112
+
switch trigger.Kind {
113
+
case workflow.TriggerKindPush:
114
+
trigger.PushRef = &record.TriggerMetadata.Push.Ref
115
+
trigger.PushNewSha = &record.TriggerMetadata.Push.NewSha
116
+
trigger.PushOldSha = &record.TriggerMetadata.Push.OldSha
117
+
sha = *trigger.PushNewSha
118
+
case workflow.TriggerKindPullRequest:
119
+
trigger.PRSourceBranch = &record.TriggerMetadata.PullRequest.SourceBranch
120
+
trigger.PRTargetBranch = &record.TriggerMetadata.PullRequest.TargetBranch
121
+
trigger.PRSourceSha = &record.TriggerMetadata.PullRequest.SourceSha
122
+
trigger.PRAction = &record.TriggerMetadata.PullRequest.Action
123
+
sha = *trigger.PRSourceSha
124
+
}
125
+
126
+
tx, err := d.Begin()
127
+
if err != nil {
128
+
return fmt.Errorf("failed to start txn: %w", err)
129
+
}
130
+
131
+
triggerId, err := db.AddTrigger(tx, trigger)
132
+
if err != nil {
133
+
return fmt.Errorf("failed to add trigger entry: %w", err)
134
+
}
135
+
136
+
// TODO: we shouldn't even use knot to identify pipelines
137
+
knot := record.TriggerMetadata.Repo.Knot
138
+
pipeline := models.Pipeline{
139
+
Rkey: msg.Rkey,
140
+
Knot: knot,
141
+
RepoOwner: syntax.DID(record.TriggerMetadata.Repo.Did),
142
+
RepoName: record.TriggerMetadata.Repo.Repo,
143
+
TriggerId: int(triggerId),
144
+
Sha: sha,
145
+
}
146
+
147
+
err = db.AddPipeline(tx, pipeline)
148
+
if err != nil {
149
+
return fmt.Errorf("failed to add pipeline: %w", err)
150
+
}
151
+
152
+
err = tx.Commit()
153
+
if err != nil {
154
+
return fmt.Errorf("failed to commit txn: %w", err)
155
+
}
156
+
157
+
l.Info("added pipeline", "pipeline", pipeline)
158
+
159
+
return nil
70
160
}
71
161
72
162
func ingestPipelineStatus(ctx context.Context, logger *slog.Logger, d *db.DB, source ec.Source, msg ec.Message) error {
+9
-13
appview/state/star.go
+9
-13
appview/state/star.go
···
57
57
log.Println("created atproto record: ", resp.Uri)
58
58
59
59
star := &models.Star{
60
-
StarredByDid: currentUser.Did,
61
-
RepoAt: subjectUri,
62
-
Rkey: rkey,
60
+
Did: currentUser.Did,
61
+
RepoAt: subjectUri,
62
+
Rkey: rkey,
63
63
}
64
64
65
65
err = db.AddStar(s.db, star)
···
75
75
76
76
s.notifier.NewStar(r.Context(), star)
77
77
78
-
s.pages.RepoStarFragment(w, pages.RepoStarFragmentParams{
78
+
s.pages.StarBtnFragment(w, pages.StarBtnFragmentParams{
79
79
IsStarred: true,
80
-
RepoAt: subjectUri,
81
-
Stats: models.RepoStats{
82
-
StarCount: starCount,
83
-
},
80
+
SubjectAt: subjectUri,
81
+
StarCount: starCount,
84
82
})
85
83
86
84
return
···
117
115
118
116
s.notifier.DeleteStar(r.Context(), star)
119
117
120
-
s.pages.RepoStarFragment(w, pages.RepoStarFragmentParams{
118
+
s.pages.StarBtnFragment(w, pages.StarBtnFragmentParams{
121
119
IsStarred: false,
122
-
RepoAt: subjectUri,
123
-
Stats: models.RepoStats{
124
-
StarCount: starCount,
125
-
},
120
+
SubjectAt: subjectUri,
121
+
StarCount: starCount,
126
122
})
127
123
128
124
return
+30
-24
appview/state/state.go
+30
-24
appview/state/state.go
···
15
15
"tangled.org/core/appview/config"
16
16
"tangled.org/core/appview/db"
17
17
"tangled.org/core/appview/indexer"
18
+
"tangled.org/core/appview/mentions"
18
19
"tangled.org/core/appview/models"
19
20
"tangled.org/core/appview/notify"
20
21
dbnotify "tangled.org/core/appview/notify/db"
···
29
30
"tangled.org/core/jetstream"
30
31
"tangled.org/core/log"
31
32
tlog "tangled.org/core/log"
33
+
"tangled.org/core/orm"
32
34
"tangled.org/core/rbac"
33
35
"tangled.org/core/tid"
34
36
···
42
44
)
43
45
44
46
type State struct {
45
-
db *db.DB
46
-
notifier notify.Notifier
47
-
indexer *indexer.Indexer
48
-
oauth *oauth.OAuth
49
-
enforcer *rbac.Enforcer
50
-
pages *pages.Pages
51
-
idResolver *idresolver.Resolver
52
-
posthog posthog.Client
53
-
jc *jetstream.JetstreamClient
54
-
config *config.Config
55
-
repoResolver *reporesolver.RepoResolver
56
-
knotstream *eventconsumer.Consumer
57
-
spindlestream *eventconsumer.Consumer
58
-
logger *slog.Logger
59
-
validator *validator.Validator
47
+
db *db.DB
48
+
notifier notify.Notifier
49
+
indexer *indexer.Indexer
50
+
oauth *oauth.OAuth
51
+
enforcer *rbac.Enforcer
52
+
pages *pages.Pages
53
+
idResolver *idresolver.Resolver
54
+
mentionsResolver *mentions.Resolver
55
+
posthog posthog.Client
56
+
jc *jetstream.JetstreamClient
57
+
config *config.Config
58
+
repoResolver *reporesolver.RepoResolver
59
+
knotstream *eventconsumer.Consumer
60
+
spindlestream *eventconsumer.Consumer
61
+
logger *slog.Logger
62
+
validator *validator.Validator
60
63
}
61
64
62
65
func Make(ctx context.Context, config *config.Config) (*State, error) {
···
96
99
}
97
100
validator := validator.New(d, res, enforcer)
98
101
99
-
repoResolver := reporesolver.New(config, enforcer, res, d)
102
+
repoResolver := reporesolver.New(config, enforcer, d)
103
+
104
+
mentionsResolver := mentions.New(config, res, d, log.SubLogger(logger, "mentionsResolver"))
100
105
101
106
wrapper := db.DbWrapper{Execer: d}
102
107
jc, err := jetstream.NewJetstreamClient(
···
178
183
enforcer,
179
184
pages,
180
185
res,
186
+
mentionsResolver,
181
187
posthog,
182
188
jc,
183
189
config,
···
294
300
return
295
301
}
296
302
297
-
gfiLabel, err := db.GetLabelDefinition(s.db, db.FilterEq("at_uri", s.config.Label.GoodFirstIssue))
303
+
gfiLabel, err := db.GetLabelDefinition(s.db, orm.FilterEq("at_uri", s.config.Label.GoodFirstIssue))
298
304
if err != nil {
299
305
// non-fatal
300
306
}
···
318
324
319
325
regs, err := db.GetRegistrations(
320
326
s.db,
321
-
db.FilterEq("did", user.Did),
322
-
db.FilterEq("needs_upgrade", 1),
327
+
orm.FilterEq("did", user.Did),
328
+
orm.FilterEq("needs_upgrade", 1),
323
329
)
324
330
if err != nil {
325
331
l.Error("non-fatal: failed to get registrations", "err", err)
···
327
333
328
334
spindles, err := db.GetSpindles(
329
335
s.db,
330
-
db.FilterEq("owner", user.Did),
331
-
db.FilterEq("needs_upgrade", 1),
336
+
orm.FilterEq("owner", user.Did),
337
+
orm.FilterEq("needs_upgrade", 1),
332
338
)
333
339
if err != nil {
334
340
l.Error("non-fatal: failed to get spindles", "err", err)
···
499
505
// Check for existing repos
500
506
existingRepo, err := db.GetRepo(
501
507
s.db,
502
-
db.FilterEq("did", user.Did),
503
-
db.FilterEq("name", repoName),
508
+
orm.FilterEq("did", user.Did),
509
+
orm.FilterEq("name", repoName),
504
510
)
505
511
if err == nil && existingRepo != nil {
506
512
l.Info("repo exists")
···
660
666
}
661
667
662
668
func BackfillDefaultDefs(e db.Execer, r *idresolver.Resolver, defaults []string) error {
663
-
defaultLabels, err := db.GetLabelDefinitions(e, db.FilterIn("at_uri", defaults))
669
+
defaultLabels, err := db.GetLabelDefinitions(e, orm.FilterIn("at_uri", defaults))
664
670
if err != nil {
665
671
return err
666
672
}
+21
-8
appview/strings/strings.go
+21
-8
appview/strings/strings.go
···
17
17
"tangled.org/core/appview/pages"
18
18
"tangled.org/core/appview/pages/markup"
19
19
"tangled.org/core/idresolver"
20
+
"tangled.org/core/orm"
20
21
"tangled.org/core/tid"
21
22
22
23
"github.com/bluesky-social/indigo/api/atproto"
···
108
109
strings, err := db.GetStrings(
109
110
s.Db,
110
111
0,
111
-
db.FilterEq("did", id.DID),
112
-
db.FilterEq("rkey", rkey),
112
+
orm.FilterEq("did", id.DID),
113
+
orm.FilterEq("rkey", rkey),
113
114
)
114
115
if err != nil {
115
116
l.Error("failed to fetch string", "err", err)
···
148
149
showRendered = r.URL.Query().Get("code") != "true"
149
150
}
150
151
152
+
starCount, err := db.GetStarCount(s.Db, string.AtUri())
153
+
if err != nil {
154
+
l.Error("failed to get star count", "err", err)
155
+
}
156
+
user := s.OAuth.GetUser(r)
157
+
isStarred := false
158
+
if user != nil {
159
+
isStarred = db.GetStarStatus(s.Db, user.Did, string.AtUri())
160
+
}
161
+
151
162
s.Pages.SingleString(w, pages.SingleStringParams{
152
-
LoggedInUser: s.OAuth.GetUser(r),
163
+
LoggedInUser: user,
153
164
RenderToggle: renderToggle,
154
165
ShowRendered: showRendered,
155
-
String: string,
166
+
String: &string,
156
167
Stats: string.Stats(),
168
+
IsStarred: isStarred,
169
+
StarCount: starCount,
157
170
Owner: id,
158
171
})
159
172
}
···
187
200
all, err := db.GetStrings(
188
201
s.Db,
189
202
0,
190
-
db.FilterEq("did", id.DID),
191
-
db.FilterEq("rkey", rkey),
203
+
orm.FilterEq("did", id.DID),
204
+
orm.FilterEq("rkey", rkey),
192
205
)
193
206
if err != nil {
194
207
l.Error("failed to fetch string", "err", err)
···
396
409
397
410
if err := db.DeleteString(
398
411
s.Db,
399
-
db.FilterEq("did", user.Did),
400
-
db.FilterEq("rkey", rkey),
412
+
orm.FilterEq("did", user.Did),
413
+
orm.FilterEq("rkey", rkey),
401
414
); err != nil {
402
415
fail("Failed to delete string.", err)
403
416
return
+2
-1
appview/validator/issue.go
+2
-1
appview/validator/issue.go
···
6
6
7
7
"tangled.org/core/appview/db"
8
8
"tangled.org/core/appview/models"
9
+
"tangled.org/core/orm"
9
10
)
10
11
11
12
func (v *Validator) ValidateIssueComment(comment *models.IssueComment) error {
12
13
// if comments have parents, only ingest ones that are 1 level deep
13
14
if comment.ReplyTo != nil {
14
-
parents, err := db.GetIssueComments(v.db, db.FilterEq("at_uri", *comment.ReplyTo))
15
+
parents, err := db.GetIssueComments(v.db, orm.FilterEq("at_uri", *comment.ReplyTo))
15
16
if err != nil {
16
17
return fmt.Errorf("failed to fetch parent comment: %w", err)
17
18
}
+1
-34
crypto/verify.go
+1
-34
crypto/verify.go
···
5
5
"crypto/sha256"
6
6
"encoding/base64"
7
7
"fmt"
8
-
"strings"
9
8
10
9
"github.com/hiddeco/sshsig"
11
10
"golang.org/x/crypto/ssh"
12
-
"tangled.org/core/types"
13
11
)
14
12
15
13
func VerifySignature(pubKey, signature, payload []byte) (error, bool) {
···
28
26
// multiple algorithms but sha-512 is most secure, and git's ssh signing defaults
29
27
// to sha-512 for all key types anyway.
30
28
err = sshsig.Verify(buf, sig, pub, sshsig.HashSHA512, "git")
31
-
return err, err == nil
32
-
}
33
29
34
-
// VerifyCommitSignature reconstructs the payload used to sign a commit. This is
35
-
// essentially the git cat-file output but without the gpgsig header.
36
-
//
37
-
// Caveats: signature verification will fail on commits with more than one parent,
38
-
// i.e. merge commits, because types.NiceDiff doesn't carry more than one Parent field
39
-
// and we are unable to reconstruct the payload correctly.
40
-
//
41
-
// Ideally this should directly operate on an *object.Commit.
42
-
func VerifyCommitSignature(pubKey string, commit types.NiceDiff) (error, bool) {
43
-
signature := commit.Commit.PGPSignature
44
-
45
-
author := bytes.NewBuffer([]byte{})
46
-
committer := bytes.NewBuffer([]byte{})
47
-
commit.Commit.Author.Encode(author)
48
-
commit.Commit.Committer.Encode(committer)
49
-
50
-
payload := strings.Builder{}
51
-
52
-
fmt.Fprintf(&payload, "tree %s\n", commit.Commit.Tree)
53
-
if commit.Commit.Parent != "" {
54
-
fmt.Fprintf(&payload, "parent %s\n", commit.Commit.Parent)
55
-
}
56
-
fmt.Fprintf(&payload, "author %s\n", author.String())
57
-
fmt.Fprintf(&payload, "committer %s\n", committer.String())
58
-
if commit.Commit.ChangedId != "" {
59
-
fmt.Fprintf(&payload, "change-id %s\n", commit.Commit.ChangedId)
60
-
}
61
-
fmt.Fprintf(&payload, "\n%s", commit.Commit.Message)
62
-
63
-
return VerifySignature([]byte(pubKey), []byte(signature), []byte(payload.String()))
30
+
return err, err == nil
64
31
}
65
32
66
33
// SSHFingerprint computes the fingerprint of the supplied ssh pubkey.
+1529
docs/DOCS.md
+1529
docs/DOCS.md
···
1
+
---
2
+
title: Tangled docs
3
+
author: The Tangled Contributors
4
+
date: 21 Sun, Dec 2025
5
+
---
6
+
7
+
# Introduction
8
+
9
+
Tangled is a decentralized code hosting and collaboration
10
+
platform. Every component of Tangled is open-source and
11
+
self-hostable. [tangled.org](https://tangled.org) also
12
+
provides hosting and CI services that are free to use.
13
+
14
+
There are several models for decentralized code
15
+
collaboration platforms, ranging from ActivityPubโs
16
+
(Forgejo) federated model, to Radicleโs entirely P2P model.
17
+
Our approach attempts to be the best of both worlds by
18
+
adopting the AT Protocolโa protocol for building decentralized
19
+
social applications with a central identity
20
+
21
+
Our approach to this is the idea of โknotsโ. Knots are
22
+
lightweight, headless servers that enable users to host Git
23
+
repositories with ease. Knots are designed for either single
24
+
or multi-tenant use which is perfect for self-hosting on a
25
+
Raspberry Pi at home, or larger โcommunityโ servers. By
26
+
default, Tangled provides managed knots where you can host
27
+
your repositories for free.
28
+
29
+
The appview at tangled.org acts as a consolidated "view"
30
+
into the whole network, allowing users to access, clone and
31
+
contribute to repositories hosted across different knots
32
+
seamlessly.
33
+
34
+
# Quick start guide
35
+
36
+
## Login or sign up
37
+
38
+
You can [login](https://tangled.org) by using your AT Protocol
39
+
account. If you are unclear on what that means, simply head
40
+
to the [signup](https://tangled.org/signup) page and create
41
+
an account. By doing so, you will be choosing Tangled as
42
+
your account provider (you will be granted a handle of the
43
+
form `user.tngl.sh`).
44
+
45
+
In the AT Protocol network, users are free to choose their account
46
+
provider (known as a "Personal Data Service", or PDS), and
47
+
login to applications that support AT accounts.
48
+
49
+
You can think of it as "one account for all of the atmosphere"!
50
+
51
+
If you already have an AT account (you may have one if you
52
+
signed up to Bluesky, for example), you can login with the
53
+
same handle on Tangled (so just use `user.bsky.social` on
54
+
the login page).
55
+
56
+
## Add an SSH key
57
+
58
+
Once you are logged in, you can start creating repositories
59
+
and pushing code. Tangled supports pushing git repositories
60
+
over SSH.
61
+
62
+
First, you'll need to generate an SSH key if you don't
63
+
already have one:
64
+
65
+
```bash
66
+
ssh-keygen -t ed25519 -C "foo@bar.com"
67
+
```
68
+
69
+
When prompted, save the key to the default location
70
+
(`~/.ssh/id_ed25519`) and optionally set a passphrase.
71
+
72
+
Copy your public key to your clipboard:
73
+
74
+
```bash
75
+
# on X11
76
+
cat ~/.ssh/id_ed25519.pub | xclip -sel c
77
+
78
+
# on wayland
79
+
cat ~/.ssh/id_ed25519.pub | wl-copy
80
+
81
+
# on macos
82
+
cat ~/.ssh/id_ed25519.pub | pbcopy
83
+
```
84
+
85
+
Now, navigate to 'Settings' -> 'Keys' and hit 'Add Key',
86
+
paste your public key, give it a descriptive name, and hit
87
+
save.
88
+
89
+
## Create a repository
90
+
91
+
Once your SSH key is added, create your first repository:
92
+
93
+
1. Hit the green `+` icon on the topbar, and select
94
+
repository
95
+
2. Enter a repository name
96
+
3. Add a description
97
+
4. Choose a knotserver to host this repository on
98
+
5. Hit create
99
+
100
+
Knots are self-hostable, lightweight Git servers that can
101
+
host your repository. Unlike traditional code forges, your
102
+
code can live on any server. Read the [Knots](TODO) section
103
+
for more.
104
+
105
+
## Configure SSH
106
+
107
+
To ensure Git uses the correct SSH key and connects smoothly
108
+
to Tangled, add this configuration to your `~/.ssh/config`
109
+
file:
110
+
111
+
```
112
+
Host tangled.org
113
+
Hostname tangled.org
114
+
User git
115
+
IdentityFile ~/.ssh/id_ed25519
116
+
AddressFamily inet
117
+
```
118
+
119
+
This tells SSH to use your specific key when connecting to
120
+
Tangled and prevents authentication issues if you have
121
+
multiple SSH keys.
122
+
123
+
Note that this configuration only works for knotservers that
124
+
are hosted by tangled.org. If you use a custom knot, refer
125
+
to the [Knots](TODO) section.
126
+
127
+
## Push your first repository
128
+
129
+
Initialize a new Git repository:
130
+
131
+
```bash
132
+
mkdir my-project
133
+
cd my-project
134
+
135
+
git init
136
+
echo "# My Project" > README.md
137
+
```
138
+
139
+
Add some content and push!
140
+
141
+
```bash
142
+
git add README.md
143
+
git commit -m "Initial commit"
144
+
git remote add origin git@tangled.org:user.tngl.sh/my-project
145
+
git push -u origin main
146
+
```
147
+
148
+
That's it! Your code is now hosted on Tangled.
149
+
150
+
## Migrating an existing repository
151
+
152
+
Moving your repositories from GitHub, GitLab, Bitbucket, or
153
+
any other Git forge to Tangled is straightforward. You'll
154
+
simply change your repository's remote URL. At the moment,
155
+
Tangled does not have any tooling to migrate data such as
156
+
GitHub issues or pull requests.
157
+
158
+
First, create a new repository on tangled.org as described
159
+
in the [Quick Start Guide](#create-a-repository).
160
+
161
+
Navigate to your existing local repository:
162
+
163
+
```bash
164
+
cd /path/to/your/existing/repo
165
+
```
166
+
167
+
You can inspect your existing Git remote like so:
168
+
169
+
```bash
170
+
git remote -v
171
+
```
172
+
173
+
You'll see something like:
174
+
175
+
```
176
+
origin git@github.com:username/my-project (fetch)
177
+
origin git@github.com:username/my-project (push)
178
+
```
179
+
180
+
Update the remote URL to point to tangled:
181
+
182
+
```bash
183
+
git remote set-url origin git@tangled.org:user.tngl.sh/my-project
184
+
```
185
+
186
+
Verify the change:
187
+
188
+
```bash
189
+
git remote -v
190
+
```
191
+
192
+
You should now see:
193
+
194
+
```
195
+
origin git@tangled.org:user.tngl.sh/my-project (fetch)
196
+
origin git@tangled.org:user.tngl.sh/my-project (push)
197
+
```
198
+
199
+
Push all your branches and tags to Tangled:
200
+
201
+
```bash
202
+
git push -u origin --all
203
+
git push -u origin --tags
204
+
```
205
+
206
+
Your repository is now migrated to Tangled! All commit
207
+
history, branches, and tags have been preserved.
208
+
209
+
## Mirroring a repository to Tangled
210
+
211
+
If you want to maintain your repository on multiple forges
212
+
simultaneously, for example, keeping your primary repository
213
+
on GitHub while mirroring to Tangled for backup or
214
+
redundancy, you can do so by adding multiple remotes.
215
+
216
+
You can configure your local repository to push to both
217
+
Tangled and, say, GitHub. You may already have the following
218
+
setup:
219
+
220
+
```
221
+
$ git remote -v
222
+
origin git@github.com:username/my-project (fetch)
223
+
origin git@github.com:username/my-project (push)
224
+
```
225
+
226
+
Now add Tangled as an additional push URL to the same
227
+
remote:
228
+
229
+
```bash
230
+
git remote set-url --add --push origin git@tangled.org:user.tngl.sh/my-project
231
+
```
232
+
233
+
You also need to re-add the original URL as a push
234
+
destination (Git replaces the push URL when you use `--add`
235
+
the first time):
236
+
237
+
```bash
238
+
git remote set-url --add --push origin git@github.com:username/my-project
239
+
```
240
+
241
+
Verify your configuration:
242
+
243
+
```
244
+
$ git remote -v
245
+
origin git@github.com:username/repo (fetch)
246
+
origin git@tangled.org:username/my-project (push)
247
+
origin git@github.com:username/repo (push)
248
+
```
249
+
250
+
Notice that there's one fetch URL (the primary remote) and
251
+
two push URLs. Now, whenever you push, Git will
252
+
automatically push to both remotes:
253
+
254
+
```bash
255
+
git push origin main
256
+
```
257
+
258
+
This single command pushes your `main` branch to both GitHub
259
+
and Tangled simultaneously.
260
+
261
+
To push all branches and tags:
262
+
263
+
```bash
264
+
git push origin --all
265
+
git push origin --tags
266
+
```
267
+
268
+
If you prefer more control over which remote you push to,
269
+
you can maintain separate remotes:
270
+
271
+
```bash
272
+
git remote add github git@github.com:username/my-project
273
+
git remote add tangled git@tangled.org:username/my-project
274
+
```
275
+
276
+
Then push to each explicitly:
277
+
278
+
```bash
279
+
git push github main
280
+
git push tangled main
281
+
```
282
+
283
+
# Knot self-hosting guide
284
+
285
+
So you want to run your own knot server? Great! Here are a few prerequisites:
286
+
287
+
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
288
+
2. A (sub)domain name. People generally use `knot.example.com`.
289
+
3. A valid SSL certificate for your domain.
290
+
291
+
## NixOS
292
+
293
+
Refer to the [knot
294
+
module](https://tangled.org/tangled.org/core/blob/master/nix/modules/knot.nix)
295
+
for a full list of options. Sample configurations:
296
+
297
+
- [The test VM](https://tangled.org/tangled.org/core/blob/master/nix/vm.nix#L85)
298
+
- [@pyrox.dev/nix](https://tangled.org/pyrox.dev/nix/blob/d19571cc1b5fe01035e1e6951ec8cf8a476b4dee/hosts/marvin/services/tangled.nix#L15-25)
299
+
300
+
## Docker
301
+
302
+
Refer to
303
+
[@tangled.org/knot-docker](https://tangled.org/@tangled.org/knot-docker).
304
+
Note that this is community maintained.
305
+
306
+
## Manual setup
307
+
308
+
First, clone this repository:
309
+
310
+
```
311
+
git clone https://tangled.org/@tangled.org/core
312
+
```
313
+
314
+
Then, build the `knot` CLI. This is the knot administration
315
+
and operation tool. For the purpose of this guide, we're
316
+
only concerned with these subcommands:
317
+
318
+
* `knot server`: the main knot server process, typically
319
+
run as a supervised service
320
+
* `knot guard`: handles role-based access control for git
321
+
over SSH (you'll never have to run this yourself)
322
+
* `knot keys`: fetches SSH keys associated with your knot;
323
+
we'll use this to generate the SSH
324
+
`AuthorizedKeysCommand`
325
+
326
+
```
327
+
cd core
328
+
export CGO_ENABLED=1
329
+
go build -o knot ./cmd/knot
330
+
```
331
+
332
+
Next, move the `knot` binary to a location owned by `root` --
333
+
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
334
+
335
+
```
336
+
sudo mv knot /usr/local/bin/knot
337
+
sudo chown root:root /usr/local/bin/knot
338
+
```
339
+
340
+
This is necessary because SSH `AuthorizedKeysCommand` requires [really
341
+
specific permissions](https://stackoverflow.com/a/27638306). The
342
+
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
343
+
retrieve a user's public SSH keys dynamically for authentication. Let's
344
+
set that up.
345
+
346
+
```
347
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
348
+
Match User git
349
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
350
+
AuthorizedKeysCommandUser nobody
351
+
EOF
352
+
```
353
+
354
+
Then, reload `sshd`:
355
+
356
+
```
357
+
sudo systemctl reload ssh
358
+
```
359
+
360
+
Next, create the `git` user. We'll use the `git` user's home directory
361
+
to store repositories:
362
+
363
+
```
364
+
sudo adduser git
365
+
```
366
+
367
+
Create `/home/git/.knot.env` with the following, updating the values as
368
+
necessary. The `KNOT_SERVER_OWNER` should be set to your
369
+
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
370
+
371
+
```
372
+
KNOT_REPO_SCAN_PATH=/home/git
373
+
KNOT_SERVER_HOSTNAME=knot.example.com
374
+
APPVIEW_ENDPOINT=https://tangled.org
375
+
KNOT_SERVER_OWNER=did:plc:foobar
376
+
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
377
+
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
378
+
```
379
+
380
+
If you run a Linux distribution that uses systemd, you can use the provided
381
+
service file to run the server. Copy
382
+
[`knotserver.service`](/systemd/knotserver.service)
383
+
to `/etc/systemd/system/`. Then, run:
384
+
385
+
```
386
+
systemctl enable knotserver
387
+
systemctl start knotserver
388
+
```
389
+
390
+
The last step is to configure a reverse proxy like Nginx or Caddy to front your
391
+
knot. Here's an example configuration for Nginx:
392
+
393
+
```
394
+
server {
395
+
listen 80;
396
+
listen [::]:80;
397
+
server_name knot.example.com;
398
+
399
+
location / {
400
+
proxy_pass http://localhost:5555;
401
+
proxy_set_header Host $host;
402
+
proxy_set_header X-Real-IP $remote_addr;
403
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
404
+
proxy_set_header X-Forwarded-Proto $scheme;
405
+
}
406
+
407
+
# wss endpoint for git events
408
+
location /events {
409
+
proxy_set_header X-Forwarded-For $remote_addr;
410
+
proxy_set_header Host $http_host;
411
+
proxy_set_header Upgrade websocket;
412
+
proxy_set_header Connection Upgrade;
413
+
proxy_pass http://localhost:5555;
414
+
}
415
+
# additional config for SSL/TLS go here.
416
+
}
417
+
418
+
```
419
+
420
+
Remember to use Let's Encrypt or similar to procure a certificate for your
421
+
knot domain.
422
+
423
+
You should now have a running knot server! You can finalize
424
+
your registration by hitting the `verify` button on the
425
+
[/settings/knots](https://tangled.org/settings/knots) page. This simply creates
426
+
a record on your PDS to announce the existence of the knot.
427
+
428
+
### Custom paths
429
+
430
+
(This section applies to manual setup only. Docker users should edit the mounts
431
+
in `docker-compose.yml` instead.)
432
+
433
+
Right now, the database and repositories of your knot lives in `/home/git`. You
434
+
can move these paths if you'd like to store them in another folder. Be careful
435
+
when adjusting these paths:
436
+
437
+
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
438
+
any possible side effects. Remember to restart it once you're done.
439
+
* Make backups before moving in case something goes wrong.
440
+
* Make sure the `git` user can read and write from the new paths.
441
+
442
+
#### Database
443
+
444
+
As an example, let's say the current database is at `/home/git/knotserver.db`,
445
+
and we want to move it to `/home/git/database/knotserver.db`.
446
+
447
+
Copy the current database to the new location. Make sure to copy the `.db-shm`
448
+
and `.db-wal` files if they exist.
449
+
450
+
```
451
+
mkdir /home/git/database
452
+
cp /home/git/knotserver.db* /home/git/database
453
+
```
454
+
455
+
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
456
+
the new file path (_not_ the directory):
457
+
458
+
```
459
+
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
460
+
```
461
+
462
+
#### Repositories
463
+
464
+
As an example, let's say the repositories are currently in `/home/git`, and we
465
+
want to move them into `/home/git/repositories`.
466
+
467
+
Create the new folder, then move the existing repositories (if there are any):
468
+
469
+
```
470
+
mkdir /home/git/repositories
471
+
# move all DIDs into the new folder; these will vary for you!
472
+
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
473
+
```
474
+
475
+
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
476
+
to the new directory:
477
+
478
+
```
479
+
KNOT_REPO_SCAN_PATH=/home/git/repositories
480
+
```
481
+
482
+
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
483
+
repository path:
484
+
485
+
```
486
+
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
487
+
Match User git
488
+
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
489
+
AuthorizedKeysCommandUser nobody
490
+
EOF
491
+
```
492
+
493
+
Make sure to restart your SSH server!
494
+
495
+
#### MOTD (message of the day)
496
+
497
+
To configure the MOTD used ("Welcome to this knot!" by default), edit the
498
+
`/home/git/motd` file:
499
+
500
+
```
501
+
printf "Hi from this knot!\n" > /home/git/motd
502
+
```
503
+
504
+
Note that you should add a newline at the end if setting a non-empty message
505
+
since the knot won't do this for you.
506
+
507
+
# Spindles
508
+
509
+
## Pipelines
510
+
511
+
Spindle workflows allow you to write CI/CD pipelines in a
512
+
simple format. They're located in the `.tangled/workflows`
513
+
directory at the root of your repository, and are defined
514
+
using YAML.
515
+
516
+
The fields are:
517
+
518
+
- [Trigger](#trigger): A **required** field that defines
519
+
when a workflow should be triggered.
520
+
- [Engine](#engine): A **required** field that defines which
521
+
engine a workflow should run on.
522
+
- [Clone options](#clone-options): An **optional** field
523
+
that defines how the repository should be cloned.
524
+
- [Dependencies](#dependencies): An **optional** field that
525
+
allows you to list dependencies you may need.
526
+
- [Environment](#environment): An **optional** field that
527
+
allows you to define environment variables.
528
+
- [Steps](#steps): An **optional** field that allows you to
529
+
define what steps should run in the workflow.
530
+
531
+
### Trigger
532
+
533
+
The first thing to add to a workflow is the trigger, which
534
+
defines when a workflow runs. This is defined using a `when`
535
+
field, which takes in a list of conditions. Each condition
536
+
has the following fields:
537
+
538
+
- `event`: This is a **required** field that defines when
539
+
your workflow should run. It's a list that can take one or
540
+
more of the following values:
541
+
- `push`: The workflow should run every time a commit is
542
+
pushed to the repository.
543
+
- `pull_request`: The workflow should run every time a
544
+
pull request is made or updated.
545
+
- `manual`: The workflow can be triggered manually.
546
+
- `branch`: Defines which branches the workflow should run
547
+
for. If used with the `push` event, commits to the
548
+
branch(es) listed here will trigger the workflow. If used
549
+
with the `pull_request` event, updates to pull requests
550
+
targeting the branch(es) listed here will trigger the
551
+
workflow. This field has no effect with the `manual`
552
+
event. Supports glob patterns using `*` and `**` (e.g.,
553
+
`main`, `develop`, `release-*`). Either `branch` or `tag`
554
+
(or both) must be specified for `push` events.
555
+
- `tag`: Defines which tags the workflow should run for.
556
+
Only used with the `push` event - when tags matching the
557
+
pattern(s) listed here are pushed, the workflow will
558
+
trigger. This field has no effect with `pull_request` or
559
+
`manual` events. Supports glob patterns using `*` and `**`
560
+
(e.g., `v*`, `v1.*`, `release-**`). Either `branch` or
561
+
`tag` (or both) must be specified for `push` events.
562
+
563
+
For example, if you'd like to define a workflow that runs
564
+
when commits are pushed to the `main` and `develop`
565
+
branches, or when pull requests that target the `main`
566
+
branch are updated, or manually, you can do so with:
567
+
568
+
```yaml
569
+
when:
570
+
- event: ["push", "manual"]
571
+
branch: ["main", "develop"]
572
+
- event: ["pull_request"]
573
+
branch: ["main"]
574
+
```
575
+
576
+
You can also trigger workflows on tag pushes. For instance,
577
+
to run a deployment workflow when tags matching `v*` are
578
+
pushed:
579
+
580
+
```yaml
581
+
when:
582
+
- event: ["push"]
583
+
tag: ["v*"]
584
+
```
585
+
586
+
You can even combine branch and tag patterns in a single
587
+
constraint (the workflow triggers if either matches):
588
+
589
+
```yaml
590
+
when:
591
+
- event: ["push"]
592
+
branch: ["main", "release-*"]
593
+
tag: ["v*", "stable"]
594
+
```
595
+
596
+
### Engine
597
+
598
+
Next is the engine on which the workflow should run, defined
599
+
using the **required** `engine` field. The currently
600
+
supported engines are:
601
+
602
+
- `nixery`: This uses an instance of
603
+
[Nixery](https://nixery.dev) to run steps, which allows
604
+
you to add [dependencies](#dependencies) from
605
+
Nixpkgs (https://github.com/NixOS/nixpkgs). You can
606
+
search for packages on https://search.nixos.org, and
607
+
there's a pretty good chance the package(s) you're looking
608
+
for will be there.
609
+
610
+
Example:
611
+
612
+
```yaml
613
+
engine: "nixery"
614
+
```
615
+
616
+
### Clone options
617
+
618
+
When a workflow starts, the first step is to clone the
619
+
repository. You can customize this behavior using the
620
+
**optional** `clone` field. It has the following fields:
621
+
622
+
- `skip`: Setting this to `true` will skip cloning the
623
+
repository. This can be useful if your workflow is doing
624
+
something that doesn't require anything from the
625
+
repository itself. This is `false` by default.
626
+
- `depth`: This sets the number of commits, or the "clone
627
+
depth", to fetch from the repository. For example, if you
628
+
set this to 2, the last 2 commits will be fetched. By
629
+
default, the depth is set to 1, meaning only the most
630
+
recent commit will be fetched, which is the commit that
631
+
triggered the workflow.
632
+
- `submodules`: If you use Git submodules
633
+
(https://git-scm.com/book/en/v2/Git-Tools-Submodules)
634
+
in your repository, setting this field to `true` will
635
+
recursively fetch all submodules. This is `false` by
636
+
default.
637
+
638
+
The default settings are:
639
+
640
+
```yaml
641
+
clone:
642
+
skip: false
643
+
depth: 1
644
+
submodules: false
645
+
```
646
+
647
+
### Dependencies
648
+
649
+
Usually when you're running a workflow, you'll need
650
+
additional dependencies. The `dependencies` field lets you
651
+
define which dependencies to get, and from where. It's a
652
+
key-value map, with the key being the registry to fetch
653
+
dependencies from, and the value being the list of
654
+
dependencies to fetch.
655
+
656
+
Say you want to fetch Node.js and Go from `nixpkgs`, and a
657
+
package called `my_pkg` you've made from your own registry
658
+
at your repository at
659
+
`https://tangled.org/@example.com/my_pkg`. You can define
660
+
those dependencies like so:
661
+
662
+
```yaml
663
+
dependencies:
664
+
# nixpkgs
665
+
nixpkgs:
666
+
- nodejs
667
+
- go
668
+
# custom registry
669
+
git+https://tangled.org/@example.com/my_pkg:
670
+
- my_pkg
671
+
```
672
+
673
+
Now these dependencies are available to use in your
674
+
workflow!
675
+
676
+
### Environment
677
+
678
+
The `environment` field allows you define environment
679
+
variables that will be available throughout the entire
680
+
workflow. **Do not put secrets here, these environment
681
+
variables are visible to anyone viewing the repository. You
682
+
can add secrets for pipelines in your repository's
683
+
settings.**
684
+
685
+
Example:
686
+
687
+
```yaml
688
+
environment:
689
+
GOOS: "linux"
690
+
GOARCH: "arm64"
691
+
NODE_ENV: "production"
692
+
MY_ENV_VAR: "MY_ENV_VALUE"
693
+
```
694
+
695
+
### Steps
696
+
697
+
The `steps` field allows you to define what steps should run
698
+
in the workflow. It's a list of step objects, each with the
699
+
following fields:
700
+
701
+
- `name`: This field allows you to give your step a name.
702
+
This name is visible in your workflow runs, and is used to
703
+
describe what the step is doing.
704
+
- `command`: This field allows you to define a command to
705
+
run in that step. The step is run in a Bash shell, and the
706
+
logs from the command will be visible in the pipelines
707
+
page on the Tangled website. The
708
+
[dependencies](#dependencies) you added will be available
709
+
to use here.
710
+
- `environment`: Similar to the global
711
+
[environment](#environment) config, this **optional**
712
+
field is a key-value map that allows you to set
713
+
environment variables for the step. **Do not put secrets
714
+
here, these environment variables are visible to anyone
715
+
viewing the repository. You can add secrets for pipelines
716
+
in your repository's settings.**
717
+
718
+
Example:
719
+
720
+
```yaml
721
+
steps:
722
+
- name: "Build backend"
723
+
command: "go build"
724
+
environment:
725
+
GOOS: "darwin"
726
+
GOARCH: "arm64"
727
+
- name: "Build frontend"
728
+
command: "npm run build"
729
+
environment:
730
+
NODE_ENV: "production"
731
+
```
732
+
733
+
### Complete workflow
734
+
735
+
```yaml
736
+
# .tangled/workflows/build.yml
737
+
738
+
when:
739
+
- event: ["push", "manual"]
740
+
branch: ["main", "develop"]
741
+
- event: ["pull_request"]
742
+
branch: ["main"]
743
+
744
+
engine: "nixery"
745
+
746
+
# using the default values
747
+
clone:
748
+
skip: false
749
+
depth: 1
750
+
submodules: false
751
+
752
+
dependencies:
753
+
# nixpkgs
754
+
nixpkgs:
755
+
- nodejs
756
+
- go
757
+
# custom registry
758
+
git+https://tangled.org/@example.com/my_pkg:
759
+
- my_pkg
760
+
761
+
environment:
762
+
GOOS: "linux"
763
+
GOARCH: "arm64"
764
+
NODE_ENV: "production"
765
+
MY_ENV_VAR: "MY_ENV_VALUE"
766
+
767
+
steps:
768
+
- name: "Build backend"
769
+
command: "go build"
770
+
environment:
771
+
GOOS: "darwin"
772
+
GOARCH: "arm64"
773
+
- name: "Build frontend"
774
+
command: "npm run build"
775
+
environment:
776
+
NODE_ENV: "production"
777
+
```
778
+
779
+
If you want another example of a workflow, you can look at
780
+
the one [Tangled uses to build the
781
+
project](https://tangled.org/@tangled.org/core/blob/master/.tangled/workflows/build.yml).
782
+
783
+
## Self-hosting guide
784
+
785
+
### Prerequisites
786
+
787
+
* Go
788
+
* Docker (the only supported backend currently)
789
+
790
+
### Configuration
791
+
792
+
Spindle is configured using environment variables. The following environment variables are available:
793
+
794
+
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
795
+
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
796
+
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
797
+
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
798
+
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
799
+
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
800
+
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
801
+
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
802
+
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
803
+
804
+
### Running spindle
805
+
806
+
1. **Set the environment variables.** For example:
807
+
808
+
```shell
809
+
export SPINDLE_SERVER_HOSTNAME="your-hostname"
810
+
export SPINDLE_SERVER_OWNER="your-did"
811
+
```
812
+
813
+
2. **Build the Spindle binary.**
814
+
815
+
```shell
816
+
cd core
817
+
go mod download
818
+
go build -o cmd/spindle/spindle cmd/spindle/main.go
819
+
```
820
+
821
+
3. **Create the log directory.**
822
+
823
+
```shell
824
+
sudo mkdir -p /var/log/spindle
825
+
sudo chown $USER:$USER -R /var/log/spindle
826
+
```
827
+
828
+
4. **Run the Spindle binary.**
829
+
830
+
```shell
831
+
./cmd/spindle/spindle
832
+
```
833
+
834
+
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
835
+
836
+
## Architecture
837
+
838
+
Spindle is a small CI runner service. Here's a high-level overview of how it operates:
839
+
840
+
* Listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
841
+
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
842
+
* When a new repo record comes through (typically when you add a spindle to a
843
+
repo from the settings), spindle then resolves the underlying knot and
844
+
subscribes to repo events (see:
845
+
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
846
+
* The spindle engine then handles execution of the pipeline, with results and
847
+
logs beamed on the spindle event stream over WebSocket
848
+
849
+
### The engine
850
+
851
+
At present, the only supported backend is Docker (and Podman, if Docker
852
+
compatibility is enabled, so that `/run/docker.sock` is created). spindle
853
+
executes each step in the pipeline in a fresh container, with state persisted
854
+
across steps within the `/tangled/workspace` directory.
855
+
856
+
The base image for the container is constructed on the fly using
857
+
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
858
+
used packages.
859
+
860
+
The pipeline manifest is [specified here](https://docs.tangled.org/spindles.html#pipelines).
861
+
862
+
## Secrets with openbao
863
+
864
+
This document covers setting up spindle to use OpenBao for secrets
865
+
management via OpenBao Proxy instead of the default SQLite backend.
866
+
867
+
### Overview
868
+
869
+
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
870
+
authentication automatically using AppRole credentials, while spindle
871
+
connects to the local proxy instead of directly to the OpenBao server.
872
+
873
+
This approach provides better security, automatic token renewal, and
874
+
simplified application code.
875
+
876
+
### Installation
877
+
878
+
Install OpenBao from Nixpkgs:
879
+
880
+
```bash
881
+
nix shell nixpkgs#openbao # for a local server
882
+
```
883
+
884
+
### Setup
885
+
886
+
The setup process can is documented for both local development and production.
887
+
888
+
#### Local development
889
+
890
+
Start OpenBao in dev mode:
891
+
892
+
```bash
893
+
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
894
+
```
895
+
896
+
This starts OpenBao on `http://localhost:8201` with a root token.
897
+
898
+
Set up environment for bao CLI:
899
+
900
+
```bash
901
+
export BAO_ADDR=http://localhost:8200
902
+
export BAO_TOKEN=root
903
+
```
904
+
905
+
#### Production
906
+
907
+
You would typically use a systemd service with a
908
+
configuration file. Refer to
909
+
[@tangled.org/infra](https://tangled.org/@tangled.org/infra)
910
+
for how this can be achieved using Nix.
911
+
912
+
Then, initialize the bao server:
913
+
914
+
```bash
915
+
bao operator init -key-shares=1 -key-threshold=1
916
+
```
917
+
918
+
This will print out an unseal key and a root key. Save them
919
+
somewhere (like a password manager). Then unseal the vault
920
+
to begin setting it up:
921
+
922
+
```bash
923
+
bao operator unseal <unseal_key>
924
+
```
925
+
926
+
All steps below remain the same across both dev and
927
+
production setups.
928
+
929
+
#### Configure openbao server
930
+
931
+
Create the spindle KV mount:
932
+
933
+
```bash
934
+
bao secrets enable -path=spindle -version=2 kv
935
+
```
936
+
937
+
Set up AppRole authentication and policy:
938
+
939
+
Create a policy file `spindle-policy.hcl`:
940
+
941
+
```hcl
942
+
# Full access to spindle KV v2 data
943
+
path "spindle/data/*" {
944
+
capabilities = ["create", "read", "update", "delete"]
945
+
}
946
+
947
+
# Access to metadata for listing and management
948
+
path "spindle/metadata/*" {
949
+
capabilities = ["list", "read", "delete", "update"]
950
+
}
951
+
952
+
# Allow listing at root level
953
+
path "spindle/" {
954
+
capabilities = ["list"]
955
+
}
956
+
957
+
# Required for connection testing and health checks
958
+
path "auth/token/lookup-self" {
959
+
capabilities = ["read"]
960
+
}
961
+
```
962
+
963
+
Apply the policy and create an AppRole:
964
+
965
+
```bash
966
+
bao policy write spindle-policy spindle-policy.hcl
967
+
bao auth enable approle
968
+
bao write auth/approle/role/spindle \
969
+
token_policies="spindle-policy" \
970
+
token_ttl=1h \
971
+
token_max_ttl=4h \
972
+
bind_secret_id=true \
973
+
secret_id_ttl=0 \
974
+
secret_id_num_uses=0
975
+
```
976
+
977
+
Get the credentials:
978
+
979
+
```bash
980
+
# Get role ID (static)
981
+
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
982
+
983
+
# Generate secret ID
984
+
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
985
+
986
+
echo "Role ID: $ROLE_ID"
987
+
echo "Secret ID: $SECRET_ID"
988
+
```
989
+
990
+
#### Create proxy configuration
991
+
992
+
Create the credential files:
993
+
994
+
```bash
995
+
# Create directory for OpenBao files
996
+
mkdir -p /tmp/openbao
997
+
998
+
# Save credentials
999
+
echo "$ROLE_ID" > /tmp/openbao/role-id
1000
+
echo "$SECRET_ID" > /tmp/openbao/secret-id
1001
+
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
1002
+
```
1003
+
1004
+
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
1005
+
1006
+
```hcl
1007
+
# OpenBao server connection
1008
+
vault {
1009
+
address = "http://localhost:8200"
1010
+
}
1011
+
1012
+
# Auto-Auth using AppRole
1013
+
auto_auth {
1014
+
method "approle" {
1015
+
mount_path = "auth/approle"
1016
+
config = {
1017
+
role_id_file_path = "/tmp/openbao/role-id"
1018
+
secret_id_file_path = "/tmp/openbao/secret-id"
1019
+
}
1020
+
}
1021
+
1022
+
# Optional: write token to file for debugging
1023
+
sink "file" {
1024
+
config = {
1025
+
path = "/tmp/openbao/token"
1026
+
mode = 0640
1027
+
}
1028
+
}
1029
+
}
1030
+
1031
+
# Proxy listener for spindle
1032
+
listener "tcp" {
1033
+
address = "127.0.0.1:8201"
1034
+
tls_disable = true
1035
+
}
1036
+
1037
+
# Enable API proxy with auto-auth token
1038
+
api_proxy {
1039
+
use_auto_auth_token = true
1040
+
}
1041
+
1042
+
# Enable response caching
1043
+
cache {
1044
+
use_auto_auth_token = true
1045
+
}
1046
+
1047
+
# Logging
1048
+
log_level = "info"
1049
+
```
1050
+
1051
+
#### Start the proxy
1052
+
1053
+
Start OpenBao Proxy:
1054
+
1055
+
```bash
1056
+
bao proxy -config=/tmp/openbao/proxy.hcl
1057
+
```
1058
+
1059
+
The proxy will authenticate with OpenBao and start listening on
1060
+
`127.0.0.1:8201`.
1061
+
1062
+
#### Configure spindle
1063
+
1064
+
Set these environment variables for spindle:
1065
+
1066
+
```bash
1067
+
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
1068
+
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
1069
+
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
1070
+
```
1071
+
1072
+
On startup, spindle will now connect to the local proxy,
1073
+
which handles all authentication automatically.
1074
+
1075
+
### Production setup for proxy
1076
+
1077
+
For production, you'll want to run the proxy as a service:
1078
+
1079
+
Place your production configuration in
1080
+
`/etc/openbao/proxy.hcl` with proper TLS settings for the
1081
+
vault connection.
1082
+
1083
+
### Verifying setup
1084
+
1085
+
Test the proxy directly:
1086
+
1087
+
```bash
1088
+
# Check proxy health
1089
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
1090
+
1091
+
# Test token lookup through proxy
1092
+
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
1093
+
```
1094
+
1095
+
Test OpenBao operations through the server:
1096
+
1097
+
```bash
1098
+
# List all secrets
1099
+
bao kv list spindle/
1100
+
1101
+
# Add a test secret via the spindle API, then check it exists
1102
+
bao kv list spindle/repos/
1103
+
1104
+
# Get a specific secret
1105
+
bao kv get spindle/repos/your_repo_path/SECRET_NAME
1106
+
```
1107
+
1108
+
### How it works
1109
+
1110
+
- Spindle connects to OpenBao Proxy on localhost (typically
1111
+
port 8200 or 8201)
1112
+
- The proxy authenticates with OpenBao using AppRole
1113
+
credentials
1114
+
- All spindle requests go through the proxy, which injects
1115
+
authentication tokens
1116
+
- Secrets are stored at
1117
+
`spindle/repos/{sanitized_repo_path}/{secret_key}`
1118
+
- Repository paths like `did:plc:alice/myrepo` become
1119
+
`did_plc_alice_myrepo`
1120
+
- The proxy handles all token renewal automatically
1121
+
- Spindle no longer manages tokens or authentication
1122
+
directly
1123
+
1124
+
### Troubleshooting
1125
+
1126
+
**Connection refused**: Check that the OpenBao Proxy is
1127
+
running and listening on the configured address.
1128
+
1129
+
**403 errors**: Verify the AppRole credentials are correct
1130
+
and the policy has the necessary permissions.
1131
+
1132
+
**404 route errors**: The spindle KV mount probably doesn't
1133
+
existโrun the mount creation step again.
1134
+
1135
+
**Proxy authentication failures**: Check the proxy logs and
1136
+
verify the role-id and secret-id files are readable and
1137
+
contain valid credentials.
1138
+
1139
+
**Secret not found after writing**: This can indicate policy
1140
+
permission issues. Verify the policy includes both
1141
+
`spindle/data/*` and `spindle/metadata/*` paths with
1142
+
appropriate capabilities.
1143
+
1144
+
Check proxy logs:
1145
+
1146
+
```bash
1147
+
# If running as systemd service
1148
+
journalctl -u openbao-proxy -f
1149
+
1150
+
# If running directly, check the console output
1151
+
```
1152
+
1153
+
Test AppRole authentication manually:
1154
+
1155
+
```bash
1156
+
bao write auth/approle/login \
1157
+
role_id="$(cat /tmp/openbao/role-id)" \
1158
+
secret_id="$(cat /tmp/openbao/secret-id)"
1159
+
```
1160
+
1161
+
# Migrating knots and spindles
1162
+
1163
+
Sometimes, non-backwards compatible changes are made to the
1164
+
knot/spindle XRPC APIs. If you host a knot or a spindle, you
1165
+
will need to follow this guide to upgrade. Typically, this
1166
+
only requires you to deploy the newest version.
1167
+
1168
+
This document is laid out in reverse-chronological order.
1169
+
Newer migration guides are listed first, and older guides
1170
+
are further down the page.
1171
+
1172
+
## Upgrading from v1.8.x
1173
+
1174
+
After v1.8.2, the HTTP API for knots and spindles has been
1175
+
deprecated and replaced with XRPC. Repositories on outdated
1176
+
knots will not be viewable from the appview. Upgrading is
1177
+
straightforward however.
1178
+
1179
+
For knots:
1180
+
1181
+
- Upgrade to the latest tag (v1.9.0 or above)
1182
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1183
+
hit the "retry" button to verify your knot
1184
+
1185
+
For spindles:
1186
+
1187
+
- Upgrade to the latest tag (v1.9.0 or above)
1188
+
- Head to the [spindle
1189
+
dashboard](https://tangled.org/settings/spindles) and hit the
1190
+
"retry" button to verify your spindle
1191
+
1192
+
## Upgrading from v1.7.x
1193
+
1194
+
After v1.7.0, knot secrets have been deprecated. You no
1195
+
longer need a secret from the appview to run a knot. All
1196
+
authorized commands to knots are managed via [Inter-Service
1197
+
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
1198
+
Knots will be read-only until upgraded.
1199
+
1200
+
Upgrading is quite easy, in essence:
1201
+
1202
+
- `KNOT_SERVER_SECRET` is no more, you can remove this
1203
+
environment variable entirely
1204
+
- `KNOT_SERVER_OWNER` is now required on boot, set this to
1205
+
your DID. You can find your DID in the
1206
+
[settings](https://tangled.org/settings) page.
1207
+
- Restart your knot once you have replaced the environment
1208
+
variable
1209
+
- Head to the [knot dashboard](https://tangled.org/settings/knots) and
1210
+
hit the "retry" button to verify your knot. This simply
1211
+
writes a `sh.tangled.knot` record to your PDS.
1212
+
1213
+
If you use the nix module, simply bump the flake to the
1214
+
latest revision, and change your config block like so:
1215
+
1216
+
```diff
1217
+
services.tangled.knot = {
1218
+
enable = true;
1219
+
server = {
1220
+
- secretFile = /path/to/secret;
1221
+
+ owner = "did:plc:foo";
1222
+
};
1223
+
};
1224
+
```
1225
+
1226
+
# Hacking on Tangled
1227
+
1228
+
We highly recommend [installing
1229
+
Nix](https://nixos.org/download/) (the package manager)
1230
+
before working on the codebase. The Nix flake provides a lot
1231
+
of helpers to get started and most importantly, builds and
1232
+
dev shells are entirely deterministic.
1233
+
1234
+
To set up your dev environment:
1235
+
1236
+
```bash
1237
+
nix develop
1238
+
```
1239
+
1240
+
Non-Nix users can look at the `devShell` attribute in the
1241
+
`flake.nix` file to determine necessary dependencies.
1242
+
1243
+
## Running the appview
1244
+
1245
+
The Nix flake also exposes a few `app` attributes (run `nix
1246
+
flake show` to see a full list of what the flake provides),
1247
+
one of the apps runs the appview with the `air`
1248
+
live-reloader:
1249
+
1250
+
```bash
1251
+
TANGLED_DEV=true nix run .#watch-appview
1252
+
1253
+
# TANGLED_DB_PATH might be of interest to point to
1254
+
# different sqlite DBs
1255
+
1256
+
# in a separate shell, you can live-reload tailwind
1257
+
nix run .#watch-tailwind
1258
+
```
1259
+
1260
+
To authenticate with the appview, you will need Redis and
1261
+
OAuth JWKs to be set up:
1262
+
1263
+
```
1264
+
# OAuth JWKs should already be set up by the Nix devshell:
1265
+
echo $TANGLED_OAUTH_CLIENT_SECRET
1266
+
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
1267
+
1268
+
echo $TANGLED_OAUTH_CLIENT_KID
1269
+
1761667908
1270
+
1271
+
# if not, you can set it up yourself:
1272
+
goat key generate -t P-256
1273
+
Key Type: P-256 / secp256r1 / ES256 private key
1274
+
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
1275
+
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
1276
+
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
1277
+
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
1278
+
1279
+
# the secret key from above
1280
+
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
1281
+
1282
+
# Run Redis in a new shell to store OAuth sessions
1283
+
redis-server
1284
+
```
1285
+
1286
+
## Running knots and spindles
1287
+
1288
+
An end-to-end knot setup requires setting up a machine with
1289
+
`sshd`, `AuthorizedKeysCommand`, and a Git user, which is
1290
+
quite cumbersome. So the Nix flake provides a
1291
+
`nixosConfiguration` to do so.
1292
+
1293
+
<details>
1294
+
<summary><strong>macOS users will have to set up a Nix Builder first</strong></summary>
1295
+
1296
+
In order to build Tangled's dev VM on macOS, you will
1297
+
first need to set up a Linux Nix builder. The recommended
1298
+
way to do so is to run a [`darwin.linux-builder`
1299
+
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
1300
+
and to register it in `nix.conf` as a builder for Linux
1301
+
with the same architecture as your Mac (`linux-aarch64` if
1302
+
you are using Apple Silicon).
1303
+
1304
+
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
1305
+
> the Tangled repo so that it doesn't conflict with the other VM. For example,
1306
+
> you can do
1307
+
>
1308
+
> ```shell
1309
+
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
1310
+
> ```
1311
+
>
1312
+
> to store the builder VM in a temporary dir.
1313
+
>
1314
+
> You should read and follow [all the other intructions][darwin builder vm] to
1315
+
> avoid subtle problems.
1316
+
1317
+
Alternatively, you can use any other method to set up a
1318
+
Linux machine with Nix installed that you can `sudo ssh`
1319
+
into (in other words, root user on your Mac has to be able
1320
+
to ssh into the Linux machine without entering a password)
1321
+
and that has the same architecture as your Mac. See
1322
+
[remote builder
1323
+
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
1324
+
for how to register such a builder in `nix.conf`.
1325
+
1326
+
> WARNING: If you'd like to use
1327
+
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
1328
+
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
1329
+
> ssh` works can be tricky. It seems to be [possible with
1330
+
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
1331
+
1332
+
</details>
1333
+
1334
+
To begin, grab your DID from http://localhost:3000/settings.
1335
+
Then, set `TANGLED_VM_KNOT_OWNER` and
1336
+
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
1337
+
lightweight NixOS VM like so:
1338
+
1339
+
```bash
1340
+
nix run --impure .#vm
1341
+
1342
+
# type `poweroff` at the shell to exit the VM
1343
+
```
1344
+
1345
+
This starts a knot on port 6444, a spindle on port 6555
1346
+
with `ssh` exposed on port 2222.
1347
+
1348
+
Once the services are running, head to
1349
+
http://localhost:3000/settings/knots and hit "Verify". It should
1350
+
verify the ownership of the services instantly if everything
1351
+
went smoothly.
1352
+
1353
+
You can push repositories to this VM with this ssh config
1354
+
block on your main machine:
1355
+
1356
+
```bash
1357
+
Host nixos-shell
1358
+
Hostname localhost
1359
+
Port 2222
1360
+
User git
1361
+
IdentityFile ~/.ssh/my_tangled_key
1362
+
```
1363
+
1364
+
Set up a remote called `local-dev` on a git repo:
1365
+
1366
+
```bash
1367
+
git remote add local-dev git@nixos-shell:user/repo
1368
+
git push local-dev main
1369
+
```
1370
+
1371
+
The above VM should already be running a spindle on
1372
+
`localhost:6555`. Head to http://localhost:3000/settings/spindles and
1373
+
hit "Verify". You can then configure each repository to use
1374
+
this spindle and run CI jobs.
1375
+
1376
+
Of interest when debugging spindles:
1377
+
1378
+
```
1379
+
# Service logs from journald:
1380
+
journalctl -xeu spindle
1381
+
1382
+
# CI job logs from disk:
1383
+
ls /var/log/spindle
1384
+
1385
+
# Debugging spindle database:
1386
+
sqlite3 /var/lib/spindle/spindle.db
1387
+
1388
+
# litecli has a nicer REPL interface:
1389
+
litecli /var/lib/spindle/spindle.db
1390
+
```
1391
+
1392
+
If for any reason you wish to disable either one of the
1393
+
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
1394
+
`services.tangled.spindle.enable` (or
1395
+
`services.tangled.knot.enable`) to `false`.
1396
+
1397
+
# Contribution guide
1398
+
1399
+
## Commit guidelines
1400
+
1401
+
We follow a commit style similar to the Go project. Please keep commits:
1402
+
1403
+
* **atomic**: each commit should represent one logical change
1404
+
* **descriptive**: the commit message should clearly describe what the
1405
+
change does and why it's needed
1406
+
1407
+
### Message format
1408
+
1409
+
```
1410
+
<service/top-level directory>/<affected package/directory>: <short summary of change>
1411
+
1412
+
Optional longer description can go here, if necessary. Explain what the
1413
+
change does and why, especially if not obvious. Reference relevant
1414
+
issues or PRs when applicable. These can be links for now since we don't
1415
+
auto-link issues/PRs yet.
1416
+
```
1417
+
1418
+
Here are some examples:
1419
+
1420
+
```
1421
+
appview/state: fix token expiry check in middleware
1422
+
1423
+
The previous check did not account for clock drift, leading to premature
1424
+
token invalidation.
1425
+
```
1426
+
1427
+
```
1428
+
knotserver/git/service: improve error checking in upload-pack
1429
+
```
1430
+
1431
+
1432
+
### General notes
1433
+
1434
+
- PRs get merged "as-is" (fast-forward)โlike applying a patch-series
1435
+
using `git am`. At present, there is no squashingโso please author
1436
+
your commits as they would appear on `master`, following the above
1437
+
guidelines.
1438
+
- If there is a lot of nesting, for example "appview:
1439
+
pages/templates/repo/fragments: ...", these can be truncated down to
1440
+
just "appview: repo/fragments: ...". If the change affects a lot of
1441
+
subdirectories, you may abbreviate to just the top-level names, e.g.
1442
+
"appview: ..." or "knotserver: ...".
1443
+
- Keep commits lowercased with no trailing period.
1444
+
- Use the imperative mood in the summary line (e.g., "fix bug" not
1445
+
"fixed bug" or "fixes bug").
1446
+
- Try to keep the summary line under 72 characters, but we aren't too
1447
+
fussed about this.
1448
+
- Follow the same formatting for PR titles if filled manually.
1449
+
- Don't include unrelated changes in the same commit.
1450
+
- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
1451
+
before submitting if necessary.
1452
+
1453
+
## Code formatting
1454
+
1455
+
We use a variety of tools to format our code, and multiplex them with
1456
+
[`treefmt`](https://treefmt.com). All you need to do to format your changes
1457
+
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
1458
+
1459
+
## Proposals for bigger changes
1460
+
1461
+
Small fixes like typos, minor bugs, or trivial refactors can be
1462
+
submitted directly as PRs.
1463
+
1464
+
For larger changesโespecially those introducing new features, significant
1465
+
refactoring, or altering system behaviorโplease open a proposal first. This
1466
+
helps us evaluate the scope, design, and potential impact before implementation.
1467
+
1468
+
Create a new issue titled:
1469
+
1470
+
```
1471
+
proposal: <affected scope>: <summary of change>
1472
+
```
1473
+
1474
+
In the description, explain:
1475
+
1476
+
- What the change is
1477
+
- Why it's needed
1478
+
- How you plan to implement it (roughly)
1479
+
- Any open questions or tradeoffs
1480
+
1481
+
We'll use the issue thread to discuss and refine the idea before moving
1482
+
forward.
1483
+
1484
+
## Developer Certificate of Origin (DCO)
1485
+
1486
+
We require all contributors to certify that they have the right to
1487
+
submit the code they're contributing. To do this, we follow the
1488
+
[Developer Certificate of Origin
1489
+
(DCO)](https://developercertificate.org/).
1490
+
1491
+
By signing your commits, you're stating that the contribution is your
1492
+
own work, or that you have the right to submit it under the project's
1493
+
license. This helps us keep things clean and legally sound.
1494
+
1495
+
To sign your commit, just add the `-s` flag when committing:
1496
+
1497
+
```sh
1498
+
git commit -s -m "your commit message"
1499
+
```
1500
+
1501
+
This appends a line like:
1502
+
1503
+
```
1504
+
Signed-off-by: Your Name <your.email@example.com>
1505
+
```
1506
+
1507
+
We won't merge commits if they aren't signed off. If you forget, you can
1508
+
amend the last commit like this:
1509
+
1510
+
```sh
1511
+
git commit --amend -s
1512
+
```
1513
+
1514
+
If you're submitting a PR with multiple commits, make sure each one is
1515
+
signed.
1516
+
1517
+
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
1518
+
to make it sign off commits in the tangled repo:
1519
+
1520
+
```shell
1521
+
# Safety check, should say "No matching config key..."
1522
+
jj config list templates.commit_trailers
1523
+
# The command below may need to be adjusted if the command above returned something.
1524
+
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
1525
+
```
1526
+
1527
+
Refer to the [jujutsu
1528
+
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
1529
+
for more information.
-136
docs/contributing.md
-136
docs/contributing.md
···
1
-
# tangled contributing guide
2
-
3
-
## commit guidelines
4
-
5
-
We follow a commit style similar to the Go project. Please keep commits:
6
-
7
-
* **atomic**: each commit should represent one logical change
8
-
* **descriptive**: the commit message should clearly describe what the
9
-
change does and why it's needed
10
-
11
-
### message format
12
-
13
-
```
14
-
<service/top-level directory>/<affected package/directory>: <short summary of change>
15
-
16
-
17
-
Optional longer description can go here, if necessary. Explain what the
18
-
change does and why, especially if not obvious. Reference relevant
19
-
issues or PRs when applicable. These can be links for now since we don't
20
-
auto-link issues/PRs yet.
21
-
```
22
-
23
-
Here are some examples:
24
-
25
-
```
26
-
appview/state: fix token expiry check in middleware
27
-
28
-
The previous check did not account for clock drift, leading to premature
29
-
token invalidation.
30
-
```
31
-
32
-
```
33
-
knotserver/git/service: improve error checking in upload-pack
34
-
```
35
-
36
-
37
-
### general notes
38
-
39
-
- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
40
-
using `git am`. At present, there is no squashing -- so please author
41
-
your commits as they would appear on `master`, following the above
42
-
guidelines.
43
-
- If there is a lot of nesting, for example "appview:
44
-
pages/templates/repo/fragments: ...", these can be truncated down to
45
-
just "appview: repo/fragments: ...". If the change affects a lot of
46
-
subdirectories, you may abbreviate to just the top-level names, e.g.
47
-
"appview: ..." or "knotserver: ...".
48
-
- Keep commits lowercased with no trailing period.
49
-
- Use the imperative mood in the summary line (e.g., "fix bug" not
50
-
"fixed bug" or "fixes bug").
51
-
- Try to keep the summary line under 72 characters, but we aren't too
52
-
fussed about this.
53
-
- Follow the same formatting for PR titles if filled manually.
54
-
- Don't include unrelated changes in the same commit.
55
-
- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
56
-
before submitting if necessary.
57
-
58
-
## code formatting
59
-
60
-
We use a variety of tools to format our code, and multiplex them with
61
-
[`treefmt`](https://treefmt.com): all you need to do to format your changes
62
-
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
63
-
64
-
## proposals for bigger changes
65
-
66
-
Small fixes like typos, minor bugs, or trivial refactors can be
67
-
submitted directly as PRs.
68
-
69
-
For larger changesโespecially those introducing new features, significant
70
-
refactoring, or altering system behaviorโplease open a proposal first. This
71
-
helps us evaluate the scope, design, and potential impact before implementation.
72
-
73
-
### proposal format
74
-
75
-
Create a new issue titled:
76
-
77
-
```
78
-
proposal: <affected scope>: <summary of change>
79
-
```
80
-
81
-
In the description, explain:
82
-
83
-
- What the change is
84
-
- Why it's needed
85
-
- How you plan to implement it (roughly)
86
-
- Any open questions or tradeoffs
87
-
88
-
We'll use the issue thread to discuss and refine the idea before moving
89
-
forward.
90
-
91
-
## developer certificate of origin (DCO)
92
-
93
-
We require all contributors to certify that they have the right to
94
-
submit the code they're contributing. To do this, we follow the
95
-
[Developer Certificate of Origin
96
-
(DCO)](https://developercertificate.org/).
97
-
98
-
By signing your commits, you're stating that the contribution is your
99
-
own work, or that you have the right to submit it under the project's
100
-
license. This helps us keep things clean and legally sound.
101
-
102
-
To sign your commit, just add the `-s` flag when committing:
103
-
104
-
```sh
105
-
git commit -s -m "your commit message"
106
-
```
107
-
108
-
This appends a line like:
109
-
110
-
```
111
-
Signed-off-by: Your Name <your.email@example.com>
112
-
```
113
-
114
-
We won't merge commits if they aren't signed off. If you forget, you can
115
-
amend the last commit like this:
116
-
117
-
```sh
118
-
git commit --amend -s
119
-
```
120
-
121
-
If you're submitting a PR with multiple commits, make sure each one is
122
-
signed.
123
-
124
-
For [jj](https://jj-vcs.github.io/jj/latest/) users, you can run the following command
125
-
to make it sign off commits in the tangled repo:
126
-
127
-
```shell
128
-
# Safety check, should say "No matching config key..."
129
-
jj config list templates.commit_trailers
130
-
# The command below may need to be adjusted if the command above returned something.
131
-
jj config set --repo templates.commit_trailers "format_signed_off_by_trailer(self)"
132
-
```
133
-
134
-
Refer to the [jj
135
-
documentation](https://jj-vcs.github.io/jj/latest/config/#commit-trailers)
136
-
for more information.
-172
docs/hacking.md
-172
docs/hacking.md
···
1
-
# hacking on tangled
2
-
3
-
We highly recommend [installing
4
-
nix](https://nixos.org/download/) (the package manager)
5
-
before working on the codebase. The nix flake provides a lot
6
-
of helpers to get started and most importantly, builds and
7
-
dev shells are entirely deterministic.
8
-
9
-
To set up your dev environment:
10
-
11
-
```bash
12
-
nix develop
13
-
```
14
-
15
-
Non-nix users can look at the `devShell` attribute in the
16
-
`flake.nix` file to determine necessary dependencies.
17
-
18
-
## running the appview
19
-
20
-
The nix flake also exposes a few `app` attributes (run `nix
21
-
flake show` to see a full list of what the flake provides),
22
-
one of the apps runs the appview with the `air`
23
-
live-reloader:
24
-
25
-
```bash
26
-
TANGLED_DEV=true nix run .#watch-appview
27
-
28
-
# TANGLED_DB_PATH might be of interest to point to
29
-
# different sqlite DBs
30
-
31
-
# in a separate shell, you can live-reload tailwind
32
-
nix run .#watch-tailwind
33
-
```
34
-
35
-
To authenticate with the appview, you will need redis and
36
-
OAUTH JWKs to be setup:
37
-
38
-
```
39
-
# oauth jwks should already be setup by the nix devshell:
40
-
echo $TANGLED_OAUTH_CLIENT_SECRET
41
-
z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
42
-
43
-
echo $TANGLED_OAUTH_CLIENT_KID
44
-
1761667908
45
-
46
-
# if not, you can set it up yourself:
47
-
goat key generate -t P-256
48
-
Key Type: P-256 / secp256r1 / ES256 private key
49
-
Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
50
-
z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
51
-
Public Key (DID Key Syntax): share or publish this (eg, in DID document)
52
-
did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
53
-
54
-
# the secret key from above
55
-
export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
56
-
57
-
# run redis in at a new shell to store oauth sessions
58
-
redis-server
59
-
```
60
-
61
-
## running knots and spindles
62
-
63
-
An end-to-end knot setup requires setting up a machine with
64
-
`sshd`, `AuthorizedKeysCommand`, and git user, which is
65
-
quite cumbersome. So the nix flake provides a
66
-
`nixosConfiguration` to do so.
67
-
68
-
<details>
69
-
<summary><strong>MacOS users will have to setup a Nix Builder first</strong></summary>
70
-
71
-
In order to build Tangled's dev VM on macOS, you will
72
-
first need to set up a Linux Nix builder. The recommended
73
-
way to do so is to run a [`darwin.linux-builder`
74
-
VM](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder)
75
-
and to register it in `nix.conf` as a builder for Linux
76
-
with the same architecture as your Mac (`linux-aarch64` if
77
-
you are using Apple Silicon).
78
-
79
-
> IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside
80
-
> the tangled repo so that it doesn't conflict with the other VM. For example,
81
-
> you can do
82
-
>
83
-
> ```shell
84
-
> cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder
85
-
> ```
86
-
>
87
-
> to store the builder VM in a temporary dir.
88
-
>
89
-
> You should read and follow [all the other intructions][darwin builder vm] to
90
-
> avoid subtle problems.
91
-
92
-
Alternatively, you can use any other method to set up a
93
-
Linux machine with `nix` installed that you can `sudo ssh`
94
-
into (in other words, root user on your Mac has to be able
95
-
to ssh into the Linux machine without entering a password)
96
-
and that has the same architecture as your Mac. See
97
-
[remote builder
98
-
instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements)
99
-
for how to register such a builder in `nix.conf`.
100
-
101
-
> WARNING: If you'd like to use
102
-
> [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or
103
-
> [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo
104
-
> ssh` works can be tricky. It seems to be [possible with
105
-
> Orbstack](https://github.com/orgs/orbstack/discussions/1669).
106
-
107
-
</details>
108
-
109
-
To begin, grab your DID from http://localhost:3000/settings.
110
-
Then, set `TANGLED_VM_KNOT_OWNER` and
111
-
`TANGLED_VM_SPINDLE_OWNER` to your DID. You can now start a
112
-
lightweight NixOS VM like so:
113
-
114
-
```bash
115
-
nix run --impure .#vm
116
-
117
-
# type `poweroff` at the shell to exit the VM
118
-
```
119
-
120
-
This starts a knot on port 6000, a spindle on port 6555
121
-
with `ssh` exposed on port 2222.
122
-
123
-
Once the services are running, head to
124
-
http://localhost:3000/knots and hit verify. It should
125
-
verify the ownership of the services instantly if everything
126
-
went smoothly.
127
-
128
-
You can push repositories to this VM with this ssh config
129
-
block on your main machine:
130
-
131
-
```bash
132
-
Host nixos-shell
133
-
Hostname localhost
134
-
Port 2222
135
-
User git
136
-
IdentityFile ~/.ssh/my_tangled_key
137
-
```
138
-
139
-
Set up a remote called `local-dev` on a git repo:
140
-
141
-
```bash
142
-
git remote add local-dev git@nixos-shell:user/repo
143
-
git push local-dev main
144
-
```
145
-
146
-
### running a spindle
147
-
148
-
The above VM should already be running a spindle on
149
-
`localhost:6555`. Head to http://localhost:3000/spindles and
150
-
hit verify. You can then configure each repository to use
151
-
this spindle and run CI jobs.
152
-
153
-
Of interest when debugging spindles:
154
-
155
-
```
156
-
# service logs from journald:
157
-
journalctl -xeu spindle
158
-
159
-
# CI job logs from disk:
160
-
ls /var/log/spindle
161
-
162
-
# debugging spindle db:
163
-
sqlite3 /var/lib/spindle/spindle.db
164
-
165
-
# litecli has a nicer REPL interface:
166
-
litecli /var/lib/spindle/spindle.db
167
-
```
168
-
169
-
If for any reason you wish to disable either one of the
170
-
services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
171
-
`services.tangled.spindle.enable` (or
172
-
`services.tangled.knot.enable`) to `false`.
+93
docs/highlight.theme
+93
docs/highlight.theme
···
1
+
{
2
+
"text-color": null,
3
+
"background-color": null,
4
+
"line-number-color": null,
5
+
"line-number-background-color": null,
6
+
"text-styles": {
7
+
"Annotation": {
8
+
"text-color": null,
9
+
"background-color": null,
10
+
"bold": false,
11
+
"italic": true,
12
+
"underline": false
13
+
},
14
+
"ControlFlow": {
15
+
"text-color": null,
16
+
"background-color": null,
17
+
"bold": true,
18
+
"italic": false,
19
+
"underline": false
20
+
},
21
+
"Error": {
22
+
"text-color": null,
23
+
"background-color": null,
24
+
"bold": true,
25
+
"italic": false,
26
+
"underline": false
27
+
},
28
+
"Alert": {
29
+
"text-color": null,
30
+
"background-color": null,
31
+
"bold": true,
32
+
"italic": false,
33
+
"underline": false
34
+
},
35
+
"Preprocessor": {
36
+
"text-color": null,
37
+
"background-color": null,
38
+
"bold": true,
39
+
"italic": false,
40
+
"underline": false
41
+
},
42
+
"Information": {
43
+
"text-color": null,
44
+
"background-color": null,
45
+
"bold": false,
46
+
"italic": true,
47
+
"underline": false
48
+
},
49
+
"Warning": {
50
+
"text-color": null,
51
+
"background-color": null,
52
+
"bold": false,
53
+
"italic": true,
54
+
"underline": false
55
+
},
56
+
"Documentation": {
57
+
"text-color": null,
58
+
"background-color": null,
59
+
"bold": false,
60
+
"italic": true,
61
+
"underline": false
62
+
},
63
+
"DataType": {
64
+
"text-color": "#8f4e8b",
65
+
"background-color": null,
66
+
"bold": false,
67
+
"italic": false,
68
+
"underline": false
69
+
},
70
+
"Comment": {
71
+
"text-color": null,
72
+
"background-color": null,
73
+
"bold": false,
74
+
"italic": true,
75
+
"underline": false
76
+
},
77
+
"CommentVar": {
78
+
"text-color": null,
79
+
"background-color": null,
80
+
"bold": false,
81
+
"italic": true,
82
+
"underline": false
83
+
},
84
+
"Keyword": {
85
+
"text-color": null,
86
+
"background-color": null,
87
+
"bold": true,
88
+
"italic": false,
89
+
"underline": false
90
+
}
91
+
}
92
+
}
93
+
-214
docs/knot-hosting.md
-214
docs/knot-hosting.md
···
1
-
# knot self-hosting guide
2
-
3
-
So you want to run your own knot server? Great! Here are a few prerequisites:
4
-
5
-
1. A server of some kind (a VPS, a Raspberry Pi, etc.). Preferably running a Linux distribution of some kind.
6
-
2. A (sub)domain name. People generally use `knot.example.com`.
7
-
3. A valid SSL certificate for your domain.
8
-
9
-
There's a couple of ways to get started:
10
-
* NixOS: refer to
11
-
[flake.nix](https://tangled.sh/@tangled.sh/core/blob/master/flake.nix)
12
-
* Docker: Documented at
13
-
[@tangled.sh/knot-docker](https://tangled.sh/@tangled.sh/knot-docker)
14
-
(community maintained: support is not guaranteed!)
15
-
* Manual: Documented below.
16
-
17
-
## manual setup
18
-
19
-
First, clone this repository:
20
-
21
-
```
22
-
git clone https://tangled.org/@tangled.org/core
23
-
```
24
-
25
-
Then, build the `knot` CLI. This is the knot administration and operation tool.
26
-
For the purpose of this guide, we're only concerned with these subcommands:
27
-
28
-
* `knot server`: the main knot server process, typically run as a
29
-
supervised service
30
-
* `knot guard`: handles role-based access control for git over SSH
31
-
(you'll never have to run this yourself)
32
-
* `knot keys`: fetches SSH keys associated with your knot; we'll use
33
-
this to generate the SSH `AuthorizedKeysCommand`
34
-
35
-
```
36
-
cd core
37
-
export CGO_ENABLED=1
38
-
go build -o knot ./cmd/knot
39
-
```
40
-
41
-
Next, move the `knot` binary to a location owned by `root` --
42
-
`/usr/local/bin/` is a good choice. Make sure the binary itself is also owned by `root`:
43
-
44
-
```
45
-
sudo mv knot /usr/local/bin/knot
46
-
sudo chown root:root /usr/local/bin/knot
47
-
```
48
-
49
-
This is necessary because SSH `AuthorizedKeysCommand` requires [really
50
-
specific permissions](https://stackoverflow.com/a/27638306). The
51
-
`AuthorizedKeysCommand` specifies a command that is run by `sshd` to
52
-
retrieve a user's public SSH keys dynamically for authentication. Let's
53
-
set that up.
54
-
55
-
```
56
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
57
-
Match User git
58
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys
59
-
AuthorizedKeysCommandUser nobody
60
-
EOF
61
-
```
62
-
63
-
Then, reload `sshd`:
64
-
65
-
```
66
-
sudo systemctl reload ssh
67
-
```
68
-
69
-
Next, create the `git` user. We'll use the `git` user's home directory
70
-
to store repositories:
71
-
72
-
```
73
-
sudo adduser git
74
-
```
75
-
76
-
Create `/home/git/.knot.env` with the following, updating the values as
77
-
necessary. The `KNOT_SERVER_OWNER` should be set to your
78
-
DID, you can find your DID in the [Settings](https://tangled.sh/settings) page.
79
-
80
-
```
81
-
KNOT_REPO_SCAN_PATH=/home/git
82
-
KNOT_SERVER_HOSTNAME=knot.example.com
83
-
APPVIEW_ENDPOINT=https://tangled.sh
84
-
KNOT_SERVER_OWNER=did:plc:foobar
85
-
KNOT_SERVER_INTERNAL_LISTEN_ADDR=127.0.0.1:5444
86
-
KNOT_SERVER_LISTEN_ADDR=127.0.0.1:5555
87
-
```
88
-
89
-
If you run a Linux distribution that uses systemd, you can use the provided
90
-
service file to run the server. Copy
91
-
[`knotserver.service`](/systemd/knotserver.service)
92
-
to `/etc/systemd/system/`. Then, run:
93
-
94
-
```
95
-
systemctl enable knotserver
96
-
systemctl start knotserver
97
-
```
98
-
99
-
The last step is to configure a reverse proxy like Nginx or Caddy to front your
100
-
knot. Here's an example configuration for Nginx:
101
-
102
-
```
103
-
server {
104
-
listen 80;
105
-
listen [::]:80;
106
-
server_name knot.example.com;
107
-
108
-
location / {
109
-
proxy_pass http://localhost:5555;
110
-
proxy_set_header Host $host;
111
-
proxy_set_header X-Real-IP $remote_addr;
112
-
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
113
-
proxy_set_header X-Forwarded-Proto $scheme;
114
-
}
115
-
116
-
# wss endpoint for git events
117
-
location /events {
118
-
proxy_set_header X-Forwarded-For $remote_addr;
119
-
proxy_set_header Host $http_host;
120
-
proxy_set_header Upgrade websocket;
121
-
proxy_set_header Connection Upgrade;
122
-
proxy_pass http://localhost:5555;
123
-
}
124
-
# additional config for SSL/TLS go here.
125
-
}
126
-
127
-
```
128
-
129
-
Remember to use Let's Encrypt or similar to procure a certificate for your
130
-
knot domain.
131
-
132
-
You should now have a running knot server! You can finalize
133
-
your registration by hitting the `verify` button on the
134
-
[/knots](https://tangled.org/knots) page. This simply creates
135
-
a record on your PDS to announce the existence of the knot.
136
-
137
-
### custom paths
138
-
139
-
(This section applies to manual setup only. Docker users should edit the mounts
140
-
in `docker-compose.yml` instead.)
141
-
142
-
Right now, the database and repositories of your knot lives in `/home/git`. You
143
-
can move these paths if you'd like to store them in another folder. Be careful
144
-
when adjusting these paths:
145
-
146
-
* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
147
-
any possible side effects. Remember to restart it once you're done.
148
-
* Make backups before moving in case something goes wrong.
149
-
* Make sure the `git` user can read and write from the new paths.
150
-
151
-
#### database
152
-
153
-
As an example, let's say the current database is at `/home/git/knotserver.db`,
154
-
and we want to move it to `/home/git/database/knotserver.db`.
155
-
156
-
Copy the current database to the new location. Make sure to copy the `.db-shm`
157
-
and `.db-wal` files if they exist.
158
-
159
-
```
160
-
mkdir /home/git/database
161
-
cp /home/git/knotserver.db* /home/git/database
162
-
```
163
-
164
-
In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
165
-
the new file path (_not_ the directory):
166
-
167
-
```
168
-
KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
169
-
```
170
-
171
-
#### repositories
172
-
173
-
As an example, let's say the repositories are currently in `/home/git`, and we
174
-
want to move them into `/home/git/repositories`.
175
-
176
-
Create the new folder, then move the existing repositories (if there are any):
177
-
178
-
```
179
-
mkdir /home/git/repositories
180
-
# move all DIDs into the new folder; these will vary for you!
181
-
mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
182
-
```
183
-
184
-
In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
185
-
to the new directory:
186
-
187
-
```
188
-
KNOT_REPO_SCAN_PATH=/home/git/repositories
189
-
```
190
-
191
-
Similarly, update your `sshd` `AuthorizedKeysCommand` to use the updated
192
-
repository path:
193
-
194
-
```
195
-
sudo tee /etc/ssh/sshd_config.d/authorized_keys_command.conf <<EOF
196
-
Match User git
197
-
AuthorizedKeysCommand /usr/local/bin/knot keys -o authorized-keys -git-dir /home/git/repositories
198
-
AuthorizedKeysCommandUser nobody
199
-
EOF
200
-
```
201
-
202
-
Make sure to restart your SSH server!
203
-
204
-
#### MOTD (message of the day)
205
-
206
-
To configure the MOTD used ("Welcome to this knot!" by default), edit the
207
-
`/home/git/motd` file:
208
-
209
-
```
210
-
printf "Hi from this knot!\n" > /home/git/motd
211
-
```
212
-
213
-
Note that you should add a newline at the end if setting a non-empty message
214
-
since the knot won't do this for you.
-59
docs/migrations.md
-59
docs/migrations.md
···
1
-
# Migrations
2
-
3
-
This document is laid out in reverse-chronological order.
4
-
Newer migration guides are listed first, and older guides
5
-
are further down the page.
6
-
7
-
## Upgrading from v1.8.x
8
-
9
-
After v1.8.2, the HTTP API for knot and spindles have been
10
-
deprecated and replaced with XRPC. Repositories on outdated
11
-
knots will not be viewable from the appview. Upgrading is
12
-
straightforward however.
13
-
14
-
For knots:
15
-
16
-
- Upgrade to latest tag (v1.9.0 or above)
17
-
- Head to the [knot dashboard](https://tangled.org/knots) and
18
-
hit the "retry" button to verify your knot
19
-
20
-
For spindles:
21
-
22
-
- Upgrade to latest tag (v1.9.0 or above)
23
-
- Head to the [spindle
24
-
dashboard](https://tangled.org/spindles) and hit the
25
-
"retry" button to verify your spindle
26
-
27
-
## Upgrading from v1.7.x
28
-
29
-
After v1.7.0, knot secrets have been deprecated. You no
30
-
longer need a secret from the appview to run a knot. All
31
-
authorized commands to knots are managed via [Inter-Service
32
-
Authentication](https://atproto.com/specs/xrpc#inter-service-authentication-jwt).
33
-
Knots will be read-only until upgraded.
34
-
35
-
Upgrading is quite easy, in essence:
36
-
37
-
- `KNOT_SERVER_SECRET` is no more, you can remove this
38
-
environment variable entirely
39
-
- `KNOT_SERVER_OWNER` is now required on boot, set this to
40
-
your DID. You can find your DID in the
41
-
[settings](https://tangled.org/settings) page.
42
-
- Restart your knot once you have replaced the environment
43
-
variable
44
-
- Head to the [knot dashboard](https://tangled.org/knots) and
45
-
hit the "retry" button to verify your knot. This simply
46
-
writes a `sh.tangled.knot` record to your PDS.
47
-
48
-
If you use the nix module, simply bump the flake to the
49
-
latest revision, and change your config block like so:
50
-
51
-
```diff
52
-
services.tangled.knot = {
53
-
enable = true;
54
-
server = {
55
-
- secretFile = /path/to/secret;
56
-
+ owner = "did:plc:foo";
57
-
};
58
-
};
59
-
```
-25
docs/spindle/architecture.md
-25
docs/spindle/architecture.md
···
1
-
# spindle architecture
2
-
3
-
Spindle is a small CI runner service. Here's a high level overview of how it operates:
4
-
5
-
* listens for [`sh.tangled.spindle.member`](/lexicons/spindle/member.json) and
6
-
[`sh.tangled.repo`](/lexicons/repo.json) records on the Jetstream.
7
-
* when a new repo record comes through (typically when you add a spindle to a
8
-
repo from the settings), spindle then resolves the underlying knot and
9
-
subscribes to repo events (see:
10
-
[`sh.tangled.pipeline`](/lexicons/pipeline.json)).
11
-
* the spindle engine then handles execution of the pipeline, with results and
12
-
logs beamed on the spindle event stream over wss
13
-
14
-
### the engine
15
-
16
-
At present, the only supported backend is Docker (and Podman, if Docker
17
-
compatibility is enabled, so that `/run/docker.sock` is created). Spindle
18
-
executes each step in the pipeline in a fresh container, with state persisted
19
-
across steps within the `/tangled/workspace` directory.
20
-
21
-
The base image for the container is constructed on the fly using
22
-
[Nixery](https://nixery.dev), which is handy for caching layers for frequently
23
-
used packages.
24
-
25
-
The pipeline manifest is [specified here](/docs/spindle/pipeline.md).
-52
docs/spindle/hosting.md
-52
docs/spindle/hosting.md
···
1
-
# spindle self-hosting guide
2
-
3
-
## prerequisites
4
-
5
-
* Go
6
-
* Docker (the only supported backend currently)
7
-
8
-
## configuration
9
-
10
-
Spindle is configured using environment variables. The following environment variables are available:
11
-
12
-
* `SPINDLE_SERVER_LISTEN_ADDR`: The address the server listens on (default: `"0.0.0.0:6555"`).
13
-
* `SPINDLE_SERVER_DB_PATH`: The path to the SQLite database file (default: `"spindle.db"`).
14
-
* `SPINDLE_SERVER_HOSTNAME`: The hostname of the server (required).
15
-
* `SPINDLE_SERVER_JETSTREAM_ENDPOINT`: The endpoint of the Jetstream server (default: `"wss://jetstream1.us-west.bsky.network/subscribe"`).
16
-
* `SPINDLE_SERVER_DEV`: A boolean indicating whether the server is running in development mode (default: `false`).
17
-
* `SPINDLE_SERVER_OWNER`: The DID of the owner (required).
18
-
* `SPINDLE_PIPELINES_NIXERY`: The Nixery URL (default: `"nixery.tangled.sh"`).
19
-
* `SPINDLE_PIPELINES_WORKFLOW_TIMEOUT`: The default workflow timeout (default: `"5m"`).
20
-
* `SPINDLE_PIPELINES_LOG_DIR`: The directory to store workflow logs (default: `"/var/log/spindle"`).
21
-
22
-
## running spindle
23
-
24
-
1. **Set the environment variables.** For example:
25
-
26
-
```shell
27
-
export SPINDLE_SERVER_HOSTNAME="your-hostname"
28
-
export SPINDLE_SERVER_OWNER="your-did"
29
-
```
30
-
31
-
2. **Build the Spindle binary.**
32
-
33
-
```shell
34
-
cd core
35
-
go mod download
36
-
go build -o cmd/spindle/spindle cmd/spindle/main.go
37
-
```
38
-
39
-
3. **Create the log directory.**
40
-
41
-
```shell
42
-
sudo mkdir -p /var/log/spindle
43
-
sudo chown $USER:$USER -R /var/log/spindle
44
-
```
45
-
46
-
4. **Run the Spindle binary.**
47
-
48
-
```shell
49
-
./cmd/spindle/spindle
50
-
```
51
-
52
-
Spindle will now start, connect to the Jetstream server, and begin processing pipelines.
-285
docs/spindle/openbao.md
-285
docs/spindle/openbao.md
···
1
-
# spindle secrets with openbao
2
-
3
-
This document covers setting up Spindle to use OpenBao for secrets
4
-
management via OpenBao Proxy instead of the default SQLite backend.
5
-
6
-
## overview
7
-
8
-
Spindle now uses OpenBao Proxy for secrets management. The proxy handles
9
-
authentication automatically using AppRole credentials, while Spindle
10
-
connects to the local proxy instead of directly to the OpenBao server.
11
-
12
-
This approach provides better security, automatic token renewal, and
13
-
simplified application code.
14
-
15
-
## installation
16
-
17
-
Install OpenBao from nixpkgs:
18
-
19
-
```bash
20
-
nix shell nixpkgs#openbao # for a local server
21
-
```
22
-
23
-
## setup
24
-
25
-
The setup process can is documented for both local development and production.
26
-
27
-
### local development
28
-
29
-
Start OpenBao in dev mode:
30
-
31
-
```bash
32
-
bao server -dev -dev-root-token-id="root" -dev-listen-address=127.0.0.1:8201
33
-
```
34
-
35
-
This starts OpenBao on `http://localhost:8201` with a root token.
36
-
37
-
Set up environment for bao CLI:
38
-
39
-
```bash
40
-
export BAO_ADDR=http://localhost:8200
41
-
export BAO_TOKEN=root
42
-
```
43
-
44
-
### production
45
-
46
-
You would typically use a systemd service with a configuration file. Refer to
47
-
[@tangled.org/infra](https://tangled.org/@tangled.org/infra) for how this can be
48
-
achieved using Nix.
49
-
50
-
Then, initialize the bao server:
51
-
```bash
52
-
bao operator init -key-shares=1 -key-threshold=1
53
-
```
54
-
55
-
This will print out an unseal key and a root key. Save them somewhere (like a password manager). Then unseal the vault to begin setting it up:
56
-
```bash
57
-
bao operator unseal <unseal_key>
58
-
```
59
-
60
-
All steps below remain the same across both dev and production setups.
61
-
62
-
### configure openbao server
63
-
64
-
Create the spindle KV mount:
65
-
66
-
```bash
67
-
bao secrets enable -path=spindle -version=2 kv
68
-
```
69
-
70
-
Set up AppRole authentication and policy:
71
-
72
-
Create a policy file `spindle-policy.hcl`:
73
-
74
-
```hcl
75
-
# Full access to spindle KV v2 data
76
-
path "spindle/data/*" {
77
-
capabilities = ["create", "read", "update", "delete"]
78
-
}
79
-
80
-
# Access to metadata for listing and management
81
-
path "spindle/metadata/*" {
82
-
capabilities = ["list", "read", "delete", "update"]
83
-
}
84
-
85
-
# Allow listing at root level
86
-
path "spindle/" {
87
-
capabilities = ["list"]
88
-
}
89
-
90
-
# Required for connection testing and health checks
91
-
path "auth/token/lookup-self" {
92
-
capabilities = ["read"]
93
-
}
94
-
```
95
-
96
-
Apply the policy and create an AppRole:
97
-
98
-
```bash
99
-
bao policy write spindle-policy spindle-policy.hcl
100
-
bao auth enable approle
101
-
bao write auth/approle/role/spindle \
102
-
token_policies="spindle-policy" \
103
-
token_ttl=1h \
104
-
token_max_ttl=4h \
105
-
bind_secret_id=true \
106
-
secret_id_ttl=0 \
107
-
secret_id_num_uses=0
108
-
```
109
-
110
-
Get the credentials:
111
-
112
-
```bash
113
-
# Get role ID (static)
114
-
ROLE_ID=$(bao read -field=role_id auth/approle/role/spindle/role-id)
115
-
116
-
# Generate secret ID
117
-
SECRET_ID=$(bao write -f -field=secret_id auth/approle/role/spindle/secret-id)
118
-
119
-
echo "Role ID: $ROLE_ID"
120
-
echo "Secret ID: $SECRET_ID"
121
-
```
122
-
123
-
### create proxy configuration
124
-
125
-
Create the credential files:
126
-
127
-
```bash
128
-
# Create directory for OpenBao files
129
-
mkdir -p /tmp/openbao
130
-
131
-
# Save credentials
132
-
echo "$ROLE_ID" > /tmp/openbao/role-id
133
-
echo "$SECRET_ID" > /tmp/openbao/secret-id
134
-
chmod 600 /tmp/openbao/role-id /tmp/openbao/secret-id
135
-
```
136
-
137
-
Create a proxy configuration file `/tmp/openbao/proxy.hcl`:
138
-
139
-
```hcl
140
-
# OpenBao server connection
141
-
vault {
142
-
address = "http://localhost:8200"
143
-
}
144
-
145
-
# Auto-Auth using AppRole
146
-
auto_auth {
147
-
method "approle" {
148
-
mount_path = "auth/approle"
149
-
config = {
150
-
role_id_file_path = "/tmp/openbao/role-id"
151
-
secret_id_file_path = "/tmp/openbao/secret-id"
152
-
}
153
-
}
154
-
155
-
# Optional: write token to file for debugging
156
-
sink "file" {
157
-
config = {
158
-
path = "/tmp/openbao/token"
159
-
mode = 0640
160
-
}
161
-
}
162
-
}
163
-
164
-
# Proxy listener for Spindle
165
-
listener "tcp" {
166
-
address = "127.0.0.1:8201"
167
-
tls_disable = true
168
-
}
169
-
170
-
# Enable API proxy with auto-auth token
171
-
api_proxy {
172
-
use_auto_auth_token = true
173
-
}
174
-
175
-
# Enable response caching
176
-
cache {
177
-
use_auto_auth_token = true
178
-
}
179
-
180
-
# Logging
181
-
log_level = "info"
182
-
```
183
-
184
-
### start the proxy
185
-
186
-
Start OpenBao Proxy:
187
-
188
-
```bash
189
-
bao proxy -config=/tmp/openbao/proxy.hcl
190
-
```
191
-
192
-
The proxy will authenticate with OpenBao and start listening on
193
-
`127.0.0.1:8201`.
194
-
195
-
### configure spindle
196
-
197
-
Set these environment variables for Spindle:
198
-
199
-
```bash
200
-
export SPINDLE_SERVER_SECRETS_PROVIDER=openbao
201
-
export SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=http://127.0.0.1:8201
202
-
export SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=spindle
203
-
```
204
-
205
-
Start Spindle:
206
-
207
-
Spindle will now connect to the local proxy, which handles all
208
-
authentication automatically.
209
-
210
-
## production setup for proxy
211
-
212
-
For production, you'll want to run the proxy as a service:
213
-
214
-
Place your production configuration in `/etc/openbao/proxy.hcl` with
215
-
proper TLS settings for the vault connection.
216
-
217
-
## verifying setup
218
-
219
-
Test the proxy directly:
220
-
221
-
```bash
222
-
# Check proxy health
223
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/sys/health
224
-
225
-
# Test token lookup through proxy
226
-
curl -H "X-Vault-Request: true" http://127.0.0.1:8201/v1/auth/token/lookup-self
227
-
```
228
-
229
-
Test OpenBao operations through the server:
230
-
231
-
```bash
232
-
# List all secrets
233
-
bao kv list spindle/
234
-
235
-
# Add a test secret via Spindle API, then check it exists
236
-
bao kv list spindle/repos/
237
-
238
-
# Get a specific secret
239
-
bao kv get spindle/repos/your_repo_path/SECRET_NAME
240
-
```
241
-
242
-
## how it works
243
-
244
-
- Spindle connects to OpenBao Proxy on localhost (typically port 8200 or 8201)
245
-
- The proxy authenticates with OpenBao using AppRole credentials
246
-
- All Spindle requests go through the proxy, which injects authentication tokens
247
-
- Secrets are stored at `spindle/repos/{sanitized_repo_path}/{secret_key}`
248
-
- Repository paths like `did:plc:alice/myrepo` become `did_plc_alice_myrepo`
249
-
- The proxy handles all token renewal automatically
250
-
- Spindle no longer manages tokens or authentication directly
251
-
252
-
## troubleshooting
253
-
254
-
**Connection refused**: Check that the OpenBao Proxy is running and
255
-
listening on the configured address.
256
-
257
-
**403 errors**: Verify the AppRole credentials are correct and the policy
258
-
has the necessary permissions.
259
-
260
-
**404 route errors**: The spindle KV mount probably doesn't exist - run
261
-
the mount creation step again.
262
-
263
-
**Proxy authentication failures**: Check the proxy logs and verify the
264
-
role-id and secret-id files are readable and contain valid credentials.
265
-
266
-
**Secret not found after writing**: This can indicate policy permission
267
-
issues. Verify the policy includes both `spindle/data/*` and
268
-
`spindle/metadata/*` paths with appropriate capabilities.
269
-
270
-
Check proxy logs:
271
-
272
-
```bash
273
-
# If running as systemd service
274
-
journalctl -u openbao-proxy -f
275
-
276
-
# If running directly, check the console output
277
-
```
278
-
279
-
Test AppRole authentication manually:
280
-
281
-
```bash
282
-
bao write auth/approle/login \
283
-
role_id="$(cat /tmp/openbao/role-id)" \
284
-
secret_id="$(cat /tmp/openbao/secret-id)"
285
-
```
-183
docs/spindle/pipeline.md
-183
docs/spindle/pipeline.md
···
1
-
# spindle pipelines
2
-
3
-
Spindle workflows allow you to write CI/CD pipelines in a simple format. They're located in the `.tangled/workflows` directory at the root of your repository, and are defined using YAML.
4
-
5
-
The fields are:
6
-
7
-
- [Trigger](#trigger): A **required** field that defines when a workflow should be triggered.
8
-
- [Engine](#engine): A **required** field that defines which engine a workflow should run on.
9
-
- [Clone options](#clone-options): An **optional** field that defines how the repository should be cloned.
10
-
- [Dependencies](#dependencies): An **optional** field that allows you to list dependencies you may need.
11
-
- [Environment](#environment): An **optional** field that allows you to define environment variables.
12
-
- [Steps](#steps): An **optional** field that allows you to define what steps should run in the workflow.
13
-
14
-
## Trigger
15
-
16
-
The first thing to add to a workflow is the trigger, which defines when a workflow runs. This is defined using a `when` field, which takes in a list of conditions. Each condition has the following fields:
17
-
18
-
- `event`: This is a **required** field that defines when your workflow should run. It's a list that can take one or more of the following values:
19
-
- `push`: The workflow should run every time a commit is pushed to the repository.
20
-
- `pull_request`: The workflow should run every time a pull request is made or updated.
21
-
- `manual`: The workflow can be triggered manually.
22
-
- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
23
-
- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
24
-
25
-
For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
26
-
27
-
```yaml
28
-
when:
29
-
- event: ["push", "manual"]
30
-
branch: ["main", "develop"]
31
-
- event: ["pull_request"]
32
-
branch: ["main"]
33
-
```
34
-
35
-
You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
36
-
37
-
```yaml
38
-
when:
39
-
- event: ["push"]
40
-
tag: ["v*"]
41
-
```
42
-
43
-
You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
44
-
45
-
```yaml
46
-
when:
47
-
- event: ["push"]
48
-
branch: ["main", "release-*"]
49
-
tag: ["v*", "stable"]
50
-
```
51
-
52
-
## Engine
53
-
54
-
Next is the engine on which the workflow should run, defined using the **required** `engine` field. The currently supported engines are:
55
-
56
-
- `nixery`: This uses an instance of [Nixery](https://nixery.dev) to run steps, which allows you to add [dependencies](#dependencies) from [Nixpkgs](https://github.com/NixOS/nixpkgs). You can search for packages on https://search.nixos.org, and there's a pretty good chance the package(s) you're looking for will be there.
57
-
58
-
Example:
59
-
60
-
```yaml
61
-
engine: "nixery"
62
-
```
63
-
64
-
## Clone options
65
-
66
-
When a workflow starts, the first step is to clone the repository. You can customize this behavior using the **optional** `clone` field. It has the following fields:
67
-
68
-
- `skip`: Setting this to `true` will skip cloning the repository. This can be useful if your workflow is doing something that doesn't require anything from the repository itself. This is `false` by default.
69
-
- `depth`: This sets the number of commits, or the "clone depth", to fetch from the repository. For example, if you set this to 2, the last 2 commits will be fetched. By default, the depth is set to 1, meaning only the most recent commit will be fetched, which is the commit that triggered the workflow.
70
-
- `submodules`: If you use [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules) in your repository, setting this field to `true` will recursively fetch all submodules. This is `false` by default.
71
-
72
-
The default settings are:
73
-
74
-
```yaml
75
-
clone:
76
-
skip: false
77
-
depth: 1
78
-
submodules: false
79
-
```
80
-
81
-
## Dependencies
82
-
83
-
Usually when you're running a workflow, you'll need additional dependencies. The `dependencies` field lets you define which dependencies to get, and from where. It's a key-value map, with the key being the registry to fetch dependencies from, and the value being the list of dependencies to fetch.
84
-
85
-
Say you want to fetch Node.js and Go from `nixpkgs`, and a package called `my_pkg` you've made from your own registry at your repository at `https://tangled.sh/@example.com/my_pkg`. You can define those dependencies like so:
86
-
87
-
```yaml
88
-
dependencies:
89
-
# nixpkgs
90
-
nixpkgs:
91
-
- nodejs
92
-
- go
93
-
# custom registry
94
-
git+https://tangled.org/@example.com/my_pkg:
95
-
- my_pkg
96
-
```
97
-
98
-
Now these dependencies are available to use in your workflow!
99
-
100
-
## Environment
101
-
102
-
The `environment` field allows you define environment variables that will be available throughout the entire workflow. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
103
-
104
-
Example:
105
-
106
-
```yaml
107
-
environment:
108
-
GOOS: "linux"
109
-
GOARCH: "arm64"
110
-
NODE_ENV: "production"
111
-
MY_ENV_VAR: "MY_ENV_VALUE"
112
-
```
113
-
114
-
## Steps
115
-
116
-
The `steps` field allows you to define what steps should run in the workflow. It's a list of step objects, each with the following fields:
117
-
118
-
- `name`: This field allows you to give your step a name. This name is visible in your workflow runs, and is used to describe what the step is doing.
119
-
- `command`: This field allows you to define a command to run in that step. The step is run in a Bash shell, and the logs from the command will be visible in the pipelines page on the Tangled website. The [dependencies](#dependencies) you added will be available to use here.
120
-
- `environment`: Similar to the global [environment](#environment) config, this **optional** field is a key-value map that allows you to set environment variables for the step. **Do not put secrets here, these environment variables are visible to anyone viewing the repository. You can add secrets for pipelines in your repository's settings.**
121
-
122
-
Example:
123
-
124
-
```yaml
125
-
steps:
126
-
- name: "Build backend"
127
-
command: "go build"
128
-
environment:
129
-
GOOS: "darwin"
130
-
GOARCH: "arm64"
131
-
- name: "Build frontend"
132
-
command: "npm run build"
133
-
environment:
134
-
NODE_ENV: "production"
135
-
```
136
-
137
-
## Complete workflow
138
-
139
-
```yaml
140
-
# .tangled/workflows/build.yml
141
-
142
-
when:
143
-
- event: ["push", "manual"]
144
-
branch: ["main", "develop"]
145
-
- event: ["pull_request"]
146
-
branch: ["main"]
147
-
148
-
engine: "nixery"
149
-
150
-
# using the default values
151
-
clone:
152
-
skip: false
153
-
depth: 1
154
-
submodules: false
155
-
156
-
dependencies:
157
-
# nixpkgs
158
-
nixpkgs:
159
-
- nodejs
160
-
- go
161
-
# custom registry
162
-
git+https://tangled.org/@example.com/my_pkg:
163
-
- my_pkg
164
-
165
-
environment:
166
-
GOOS: "linux"
167
-
GOARCH: "arm64"
168
-
NODE_ENV: "production"
169
-
MY_ENV_VAR: "MY_ENV_VALUE"
170
-
171
-
steps:
172
-
- name: "Build backend"
173
-
command: "go build"
174
-
environment:
175
-
GOOS: "darwin"
176
-
GOARCH: "arm64"
177
-
- name: "Build frontend"
178
-
command: "npm run build"
179
-
environment:
180
-
NODE_ENV: "production"
181
-
```
182
-
183
-
If you want another example of a workflow, you can look at the one [Tangled uses to build the project](https://tangled.sh/@tangled.sh/core/blob/master/.tangled/workflows/build.yml).
+101
docs/styles.css
+101
docs/styles.css
···
1
+
svg {
2
+
width: 16px;
3
+
height: 16px;
4
+
}
5
+
6
+
:root {
7
+
--syntax-alert: #d20f39;
8
+
--syntax-annotation: #fe640b;
9
+
--syntax-attribute: #df8e1d;
10
+
--syntax-basen: #40a02b;
11
+
--syntax-builtin: #1e66f5;
12
+
--syntax-controlflow: #8839ef;
13
+
--syntax-char: #04a5e5;
14
+
--syntax-constant: #fe640b;
15
+
--syntax-comment: #9ca0b0;
16
+
--syntax-commentvar: #7c7f93;
17
+
--syntax-documentation: #9ca0b0;
18
+
--syntax-datatype: #df8e1d;
19
+
--syntax-decval: #40a02b;
20
+
--syntax-error: #d20f39;
21
+
--syntax-extension: #4c4f69;
22
+
--syntax-float: #40a02b;
23
+
--syntax-function: #1e66f5;
24
+
--syntax-import: #40a02b;
25
+
--syntax-information: #04a5e5;
26
+
--syntax-keyword: #8839ef;
27
+
--syntax-operator: #179299;
28
+
--syntax-other: #8839ef;
29
+
--syntax-preprocessor: #ea76cb;
30
+
--syntax-specialchar: #04a5e5;
31
+
--syntax-specialstring: #ea76cb;
32
+
--syntax-string: #40a02b;
33
+
--syntax-variable: #8839ef;
34
+
--syntax-verbatimstring: #40a02b;
35
+
--syntax-warning: #df8e1d;
36
+
}
37
+
38
+
@media (prefers-color-scheme: dark) {
39
+
:root {
40
+
--syntax-alert: #f38ba8;
41
+
--syntax-annotation: #fab387;
42
+
--syntax-attribute: #f9e2af;
43
+
--syntax-basen: #a6e3a1;
44
+
--syntax-builtin: #89b4fa;
45
+
--syntax-controlflow: #cba6f7;
46
+
--syntax-char: #89dceb;
47
+
--syntax-constant: #fab387;
48
+
--syntax-comment: #6c7086;
49
+
--syntax-commentvar: #585b70;
50
+
--syntax-documentation: #6c7086;
51
+
--syntax-datatype: #f9e2af;
52
+
--syntax-decval: #a6e3a1;
53
+
--syntax-error: #f38ba8;
54
+
--syntax-extension: #cdd6f4;
55
+
--syntax-float: #a6e3a1;
56
+
--syntax-function: #89b4fa;
57
+
--syntax-import: #a6e3a1;
58
+
--syntax-information: #89dceb;
59
+
--syntax-keyword: #cba6f7;
60
+
--syntax-operator: #94e2d5;
61
+
--syntax-other: #cba6f7;
62
+
--syntax-preprocessor: #f5c2e7;
63
+
--syntax-specialchar: #89dceb;
64
+
--syntax-specialstring: #f5c2e7;
65
+
--syntax-string: #a6e3a1;
66
+
--syntax-variable: #cba6f7;
67
+
--syntax-verbatimstring: #a6e3a1;
68
+
--syntax-warning: #f9e2af;
69
+
}
70
+
}
71
+
72
+
/* pandoc syntax highlighting classes */
73
+
code span.al { color: var(--syntax-alert); font-weight: bold; } /* alert */
74
+
code span.an { color: var(--syntax-annotation); font-weight: bold; font-style: italic; } /* annotation */
75
+
code span.at { color: var(--syntax-attribute); } /* attribute */
76
+
code span.bn { color: var(--syntax-basen); } /* basen */
77
+
code span.bu { color: var(--syntax-builtin); } /* builtin */
78
+
code span.cf { color: var(--syntax-controlflow); font-weight: bold; } /* controlflow */
79
+
code span.ch { color: var(--syntax-char); } /* char */
80
+
code span.cn { color: var(--syntax-constant); } /* constant */
81
+
code span.co { color: var(--syntax-comment); font-style: italic; } /* comment */
82
+
code span.cv { color: var(--syntax-commentvar); font-weight: bold; font-style: italic; } /* commentvar */
83
+
code span.do { color: var(--syntax-documentation); font-style: italic; } /* documentation */
84
+
code span.dt { color: var(--syntax-datatype); } /* datatype */
85
+
code span.dv { color: var(--syntax-decval); } /* decval */
86
+
code span.er { color: var(--syntax-error); font-weight: bold; } /* error */
87
+
code span.ex { color: var(--syntax-extension); } /* extension */
88
+
code span.fl { color: var(--syntax-float); } /* float */
89
+
code span.fu { color: var(--syntax-function); } /* function */
90
+
code span.im { color: var(--syntax-import); font-weight: bold; } /* import */
91
+
code span.in { color: var(--syntax-information); font-weight: bold; font-style: italic; } /* information */
92
+
code span.kw { color: var(--syntax-keyword); font-weight: bold; } /* keyword */
93
+
code span.op { color: var(--syntax-operator); } /* operator */
94
+
code span.ot { color: var(--syntax-other); } /* other */
95
+
code span.pp { color: var(--syntax-preprocessor); } /* preprocessor */
96
+
code span.sc { color: var(--syntax-specialchar); } /* specialchar */
97
+
code span.ss { color: var(--syntax-specialstring); } /* specialstring */
98
+
code span.st { color: var(--syntax-string); } /* string */
99
+
code span.va { color: var(--syntax-variable); } /* variable */
100
+
code span.vs { color: var(--syntax-verbatimstring); } /* verbatimstring */
101
+
code span.wa { color: var(--syntax-warning); font-weight: bold; font-style: italic; } /* warning */
+117
docs/template.html
+117
docs/template.html
···
1
+
<!DOCTYPE html>
2
+
<html xmlns="http://www.w3.org/1999/xhtml" lang="$lang$" xml:lang="$lang$"$if(dir)$ dir="$dir$"$endif$>
3
+
<head>
4
+
<meta charset="utf-8" />
5
+
<meta name="generator" content="pandoc" />
6
+
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
7
+
$for(author-meta)$
8
+
<meta name="author" content="$author-meta$" />
9
+
$endfor$
10
+
11
+
$if(date-meta)$
12
+
<meta name="dcterms.date" content="$date-meta$" />
13
+
$endif$
14
+
15
+
$if(keywords)$
16
+
<meta name="keywords" content="$for(keywords)$$keywords$$sep$, $endfor$" />
17
+
$endif$
18
+
19
+
$if(description-meta)$
20
+
<meta name="description" content="$description-meta$" />
21
+
$endif$
22
+
23
+
<title>$pagetitle$</title>
24
+
25
+
<style>
26
+
$styles.css()$
27
+
</style>
28
+
29
+
$for(css)$
30
+
<link rel="stylesheet" href="$css$" />
31
+
$endfor$
32
+
33
+
$for(header-includes)$
34
+
$header-includes$
35
+
$endfor$
36
+
37
+
<link rel="preload" href="/static/fonts/InterVariable.woff2" as="font" type="font/woff2" crossorigin />
38
+
39
+
</head>
40
+
<body class="bg-white dark:bg-gray-900 min-h-screen flex flex-col min-h-screen">
41
+
$for(include-before)$
42
+
$include-before$
43
+
$endfor$
44
+
45
+
$if(toc)$
46
+
<!-- mobile topbar toc -->
47
+
<details id="mobile-$idprefix$TOC" role="doc-toc" class="md:hidden bg-gray-50 dark:bg-gray-800 border-b border-gray-200 dark:border-gray-700 z-50 space-y-4 group px-6 py-4">
48
+
<summary class="cursor-pointer list-none text-sm font-semibold select-none flex gap-2 justify-between items-center dark:text-white">
49
+
$if(toc-title)$$toc-title$$else$Table of Contents$endif$
50
+
<span class="group-open:hidden inline">${ menu.svg() }</span>
51
+
<span class="hidden group-open:inline">${ x.svg() }</span>
52
+
</summary>
53
+
${ table-of-contents:toc.html() }
54
+
</details>
55
+
<!-- desktop sidebar toc -->
56
+
<nav id="$idprefix$TOC" role="doc-toc" class="hidden md:block fixed left-0 top-0 w-80 h-screen bg-gray-50 dark:bg-gray-800 border-r border-gray-200 dark:border-gray-700 overflow-y-auto p-4 z-50">
57
+
$if(toc-title)$
58
+
<h2 id="$idprefix$toc-title" class="text-lg font-semibold mb-4 text-gray-900">$toc-title$</h2>
59
+
$endif$
60
+
${ table-of-contents:toc.html() }
61
+
</nav>
62
+
$endif$
63
+
64
+
<div class="$if(toc)$md:ml-80$endif$ flex-1 flex flex-col">
65
+
<main class="max-w-4xl w-full mx-auto p-6 flex-1">
66
+
$if(top)$
67
+
$-- only print title block if this is NOT the top page
68
+
$else$
69
+
$if(title)$
70
+
<header id="title-block-header" class="mb-8 pb-8 border-b border-gray-200 dark:border-gray-700">
71
+
<h1 class="text-4xl font-bold mb-2 text-black dark:text-white">$title$</h1>
72
+
$if(subtitle)$
73
+
<p class="text-xl text-gray-500 dark:text-gray-400 mb-2">$subtitle$</p>
74
+
$endif$
75
+
$for(author)$
76
+
<p class="text-sm text-gray-500 dark:text-gray-400">$author$</p>
77
+
$endfor$
78
+
$if(date)$
79
+
<p class="text-sm text-gray-500 dark:text-gray-400">Updated on $date$</p>
80
+
$endif$
81
+
$if(abstract)$
82
+
<div class="mt-6 p-4 bg-gray-50 rounded-lg">
83
+
<div class="text-sm font-semibold text-gray-700 uppercase mb-2">$abstract-title$</div>
84
+
<div class="text-gray-700">$abstract$</div>
85
+
</div>
86
+
$endif$
87
+
$endif$
88
+
</header>
89
+
$endif$
90
+
<article class="prose dark:prose-invert max-w-none">
91
+
$body$
92
+
</article>
93
+
</main>
94
+
<nav id="sitenav" class="border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-800 ">
95
+
<div class="max-w-4xl mx-auto px-8 py-4">
96
+
<div class="flex justify-between gap-4">
97
+
<span class="flex-1">
98
+
$if(previous.url)$
99
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Previous</span>
100
+
<a href="$previous.url$" accesskey="p" rel="previous">$previous.title$</a>
101
+
$endif$
102
+
</span>
103
+
<span class="flex-1 text-right">
104
+
$if(next.url)$
105
+
<span class="text-xs text-gray-500 dark:text-gray-400 uppercase block mb-1">Next</span>
106
+
<a href="$next.url$" accesskey="n" rel="next">$next.title$</a>
107
+
$endif$
108
+
</span>
109
+
</div>
110
+
</div>
111
+
</nav>
112
+
</div>
113
+
$for(include-after)$
114
+
$include-after$
115
+
$endfor$
116
+
</body>
117
+
</html>
+4
docs/toc.html
+4
docs/toc.html
+9
-9
flake.lock
+9
-9
flake.lock
···
35
35
"systems": "systems"
36
36
},
37
37
"locked": {
38
-
"lastModified": 1694529238,
39
-
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
38
+
"lastModified": 1731533236,
39
+
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
40
40
"owner": "numtide",
41
41
"repo": "flake-utils",
42
-
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
42
+
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
43
43
"type": "github"
44
44
},
45
45
"original": {
···
56
56
]
57
57
},
58
58
"locked": {
59
-
"lastModified": 1754078208,
60
-
"narHash": "sha256-YVoIFDCDpYuU3riaDEJ3xiGdPOtsx4sR5eTzHTytPV8=",
59
+
"lastModified": 1763982521,
60
+
"narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=",
61
61
"owner": "nix-community",
62
62
"repo": "gomod2nix",
63
-
"rev": "7f963246a71626c7fc70b431a315c4388a0c95cf",
63
+
"rev": "02e63a239d6eabd595db56852535992c898eba72",
64
64
"type": "github"
65
65
},
66
66
"original": {
···
150
150
},
151
151
"nixpkgs": {
152
152
"locked": {
153
-
"lastModified": 1751984180,
154
-
"narHash": "sha256-LwWRsENAZJKUdD3SpLluwDmdXY9F45ZEgCb0X+xgOL0=",
153
+
"lastModified": 1766070988,
154
+
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
155
155
"owner": "nixos",
156
156
"repo": "nixpkgs",
157
-
"rev": "9807714d6944a957c2e036f84b0ff8caf9930bc0",
157
+
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
158
158
"type": "github"
159
159
},
160
160
"original": {
+34
-4
flake.nix
+34
-4
flake.nix
···
80
80
}).buildGoApplication;
81
81
modules = ./nix/gomod2nix.toml;
82
82
sqlite-lib = self.callPackage ./nix/pkgs/sqlite-lib.nix {
83
-
inherit (pkgs) gcc;
84
83
inherit sqlite-lib-src;
85
84
};
86
85
lexgen = self.callPackage ./nix/pkgs/lexgen.nix {inherit indigo;};
···
89
88
inherit htmx-src htmx-ws-src lucide-src inter-fonts-src ibm-plex-mono-src actor-typeahead-src;
90
89
};
91
90
appview = self.callPackage ./nix/pkgs/appview.nix {};
91
+
docs = self.callPackage ./nix/pkgs/docs.nix {
92
+
inherit inter-fonts-src ibm-plex-mono-src lucide-src;
93
+
};
92
94
spindle = self.callPackage ./nix/pkgs/spindle.nix {};
93
95
knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {};
94
96
knot = self.callPackage ./nix/pkgs/knot.nix {};
97
+
did-method-plc = self.callPackage ./nix/pkgs/did-method-plc.nix {};
98
+
bluesky-jetstream = self.callPackage ./nix/pkgs/bluesky-jetstream.nix {};
99
+
bluesky-relay = self.callPackage ./nix/pkgs/bluesky-relay.nix {};
100
+
tap = self.callPackage ./nix/pkgs/tap.nix {};
95
101
});
96
102
in {
97
103
overlays.default = final: prev: {
98
-
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview;
104
+
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs did-method-plc bluesky-jetstream bluesky-relay tap;
99
105
};
100
106
101
107
packages = forAllSystems (system: let
···
104
110
staticPackages = mkPackageSet pkgs.pkgsStatic;
105
111
crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic;
106
112
in {
107
-
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib;
113
+
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs did-method-plc bluesky-jetstream bluesky-relay tap;
108
114
109
115
pkgsStatic-appview = staticPackages.appview;
110
116
pkgsStatic-knot = staticPackages.knot;
···
156
162
nativeBuildInputs = [
157
163
pkgs.go
158
164
pkgs.air
159
-
pkgs.tilt
160
165
pkgs.gopls
161
166
pkgs.httpie
162
167
pkgs.litecli
···
304
309
imports = [./nix/modules/spindle.nix];
305
310
306
311
services.tangled.spindle.package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.spindle;
312
+
services.tangled.spindle.tap-package = lib.mkDefault self.packages.${pkgs.system}.tap;
313
+
};
314
+
nixosModules.did-method-plc = {
315
+
lib,
316
+
pkgs,
317
+
...
318
+
}: {
319
+
imports = [./nix/modules/did-method-plc.nix];
320
+
services.did-method-plc.package = lib.mkDefault self.packages.${pkgs.system}.did-method-plc;
321
+
};
322
+
nixosModules.bluesky-relay = {
323
+
lib,
324
+
pkgs,
325
+
...
326
+
}: {
327
+
imports = [./nix/modules/bluesky-relay.nix];
328
+
services.bluesky-relay.package = lib.mkDefault self.packages.${pkgs.system}.bluesky-relay;
329
+
};
330
+
nixosModules.bluesky-jetstream = {
331
+
lib,
332
+
pkgs,
333
+
...
334
+
}: {
335
+
imports = [./nix/modules/bluesky-jetstream.nix];
336
+
services.bluesky-jetstream.package = lib.mkDefault self.packages.${pkgs.system}.bluesky-jetstream;
307
337
};
308
338
};
309
339
}
+4
-4
go.mod
+4
-4
go.mod
···
1
1
module tangled.org/core
2
2
3
-
go 1.24.4
3
+
go 1.25.0
4
4
5
5
require (
6
6
github.com/Blank-Xu/sql-adapter v1.1.1
···
29
29
github.com/gorilla/feeds v1.2.0
30
30
github.com/gorilla/sessions v1.4.0
31
31
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
32
+
github.com/hashicorp/go-version v1.8.0
32
33
github.com/hiddeco/sshsig v0.2.0
33
34
github.com/hpcloud/tail v1.0.0
34
35
github.com/ipfs/go-cid v0.5.0
···
44
45
github.com/stretchr/testify v1.10.0
45
46
github.com/urfave/cli/v3 v3.3.3
46
47
github.com/whyrusleeping/cbor-gen v0.3.1
47
-
github.com/wyatt915/goldmark-treeblood v0.0.1
48
48
github.com/yuin/goldmark v1.7.13
49
+
github.com/yuin/goldmark-emoji v1.0.6
49
50
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
50
51
gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab
51
52
golang.org/x/crypto v0.40.0
52
53
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
53
54
golang.org/x/image v0.31.0
54
55
golang.org/x/net v0.42.0
55
-
golang.org/x/sync v0.17.0
56
56
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da
57
57
gopkg.in/yaml.v3 v3.0.1
58
58
)
···
190
190
github.com/vmihailenco/go-tinylfu v0.2.2 // indirect
191
191
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
192
192
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
193
-
github.com/wyatt915/treeblood v0.1.16 // indirect
194
193
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
195
194
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b // indirect
196
195
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 // indirect
···
205
204
go.uber.org/atomic v1.11.0 // indirect
206
205
go.uber.org/multierr v1.11.0 // indirect
207
206
go.uber.org/zap v1.27.0 // indirect
207
+
golang.org/x/sync v0.17.0 // indirect
208
208
golang.org/x/sys v0.34.0 // indirect
209
209
golang.org/x/text v0.29.0 // indirect
210
210
golang.org/x/time v0.12.0 // indirect
+4
-4
go.sum
+4
-4
go.sum
···
264
264
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
265
265
github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=
266
266
github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=
267
+
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
268
+
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
267
269
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
268
270
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
269
271
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
···
495
497
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
496
498
github.com/whyrusleeping/cbor-gen v0.3.1 h1:82ioxmhEYut7LBVGhGq8xoRkXPLElVuh5mV67AFfdv0=
497
499
github.com/whyrusleeping/cbor-gen v0.3.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so=
498
-
github.com/wyatt915/goldmark-treeblood v0.0.1 h1:6vLJcjFrHgE4ASu2ga4hqIQmbvQLU37v53jlHZ3pqDs=
499
-
github.com/wyatt915/goldmark-treeblood v0.0.1/go.mod h1:SmcJp5EBaV17rroNlgNQFydYwy0+fv85CUr/ZaCz208=
500
-
github.com/wyatt915/treeblood v0.1.16 h1:byxNbWZhnPDxdTp7W5kQhCeaY8RBVmojTFz1tEHgg8Y=
501
-
github.com/wyatt915/treeblood v0.1.16/go.mod h1:i7+yhhmzdDP17/97pIsOSffw74EK/xk+qJ0029cSXUY=
502
500
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
503
501
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
504
502
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
···
509
507
github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
510
508
github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA=
511
509
github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg=
510
+
github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs=
511
+
github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA=
512
512
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ=
513
513
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I=
514
514
gitlab.com/staticnoise/goldmark-callout v0.0.0-20240609120641-6366b799e4ab h1:gK9tS6QJw5F0SIhYJnGG2P83kuabOdmWBbSmZhJkz2A=
+4
-4
hook/hook.go
+4
-4
hook/hook.go
···
48
48
},
49
49
Commands: []*cli.Command{
50
50
{
51
-
Name: "post-recieve",
52
-
Usage: "sends a post-recieve hook to the knot (waits for stdin)",
53
-
Action: postRecieve,
51
+
Name: "post-receive",
52
+
Usage: "sends a post-receive hook to the knot (waits for stdin)",
53
+
Action: postReceive,
54
54
},
55
55
},
56
56
}
57
57
}
58
58
59
-
func postRecieve(ctx context.Context, cmd *cli.Command) error {
59
+
func postReceive(ctx context.Context, cmd *cli.Command) error {
60
60
gitDir := cmd.String("git-dir")
61
61
userDid := cmd.String("user-did")
62
62
userHandle := cmd.String("user-handle")
+1
-1
hook/setup.go
+1
-1
hook/setup.go
···
138
138
option_var="GIT_PUSH_OPTION_$i"
139
139
push_options+=(-push-option "${!option_var}")
140
140
done
141
-
%s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-recieve
141
+
%s hook -git-dir "$GIT_DIR" -user-did "$GIT_USER_DID" -user-handle "$GIT_USER_HANDLE" -internal-api "%s" "${push_options[@]}" post-receive
142
142
`, executablePath, config.internalApi)
143
143
144
144
return os.WriteFile(hookPath, []byte(hookContent), 0755)
+1
-1
input.css
+1
-1
input.css
+15
-4
jetstream/jetstream.go
+15
-4
jetstream/jetstream.go
···
72
72
// existing instances of the closure when j.WantedDids is mutated
73
73
return func(ctx context.Context, evt *models.Event) error {
74
74
75
+
j.mu.RLock()
75
76
// empty filter => all dids allowed
76
-
if len(j.wantedDids) == 0 {
77
-
return processFunc(ctx, evt)
77
+
matches := len(j.wantedDids) == 0
78
+
if !matches {
79
+
if _, ok := j.wantedDids[evt.Did]; ok {
80
+
matches = true
81
+
}
78
82
}
83
+
j.mu.RUnlock()
79
84
80
-
if _, ok := j.wantedDids[evt.Did]; ok {
85
+
if matches {
81
86
return processFunc(ctx, evt)
82
87
} else {
83
88
return nil
···
122
127
123
128
go func() {
124
129
if j.waitForDid {
125
-
for len(j.wantedDids) == 0 {
130
+
for {
131
+
j.mu.RLock()
132
+
hasDid := len(j.wantedDids) != 0
133
+
j.mu.RUnlock()
134
+
if hasDid {
135
+
break
136
+
}
126
137
time.Sleep(time.Second)
127
138
}
128
139
}
+81
knotserver/db/db.go
+81
knotserver/db/db.go
···
1
+
package db
2
+
3
+
import (
4
+
"context"
5
+
"database/sql"
6
+
"log/slog"
7
+
"strings"
8
+
9
+
_ "github.com/mattn/go-sqlite3"
10
+
"tangled.org/core/log"
11
+
)
12
+
13
+
type DB struct {
14
+
db *sql.DB
15
+
logger *slog.Logger
16
+
}
17
+
18
+
func Setup(ctx context.Context, dbPath string) (*DB, error) {
19
+
// https://github.com/mattn/go-sqlite3#connection-string
20
+
opts := []string{
21
+
"_foreign_keys=1",
22
+
"_journal_mode=WAL",
23
+
"_synchronous=NORMAL",
24
+
"_auto_vacuum=incremental",
25
+
}
26
+
27
+
logger := log.FromContext(ctx)
28
+
logger = log.SubLogger(logger, "db")
29
+
30
+
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
31
+
if err != nil {
32
+
return nil, err
33
+
}
34
+
35
+
conn, err := db.Conn(ctx)
36
+
if err != nil {
37
+
return nil, err
38
+
}
39
+
defer conn.Close()
40
+
41
+
_, err = conn.ExecContext(ctx, `
42
+
create table if not exists known_dids (
43
+
did text primary key
44
+
);
45
+
46
+
create table if not exists public_keys (
47
+
id integer primary key autoincrement,
48
+
did text not null,
49
+
key text not null,
50
+
created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
51
+
unique(did, key),
52
+
foreign key (did) references known_dids(did) on delete cascade
53
+
);
54
+
55
+
create table if not exists _jetstream (
56
+
id integer primary key autoincrement,
57
+
last_time_us integer not null
58
+
);
59
+
60
+
create table if not exists events (
61
+
rkey text not null,
62
+
nsid text not null,
63
+
event text not null, -- json
64
+
created integer not null default (strftime('%s', 'now')),
65
+
primary key (rkey, nsid)
66
+
);
67
+
68
+
create table if not exists migrations (
69
+
id integer primary key autoincrement,
70
+
name text unique
71
+
);
72
+
`)
73
+
if err != nil {
74
+
return nil, err
75
+
}
76
+
77
+
return &DB{
78
+
db: db,
79
+
logger: logger,
80
+
}, nil
81
+
}
-64
knotserver/db/init.go
-64
knotserver/db/init.go
···
1
-
package db
2
-
3
-
import (
4
-
"database/sql"
5
-
"strings"
6
-
7
-
_ "github.com/mattn/go-sqlite3"
8
-
)
9
-
10
-
type DB struct {
11
-
db *sql.DB
12
-
}
13
-
14
-
func Setup(dbPath string) (*DB, error) {
15
-
// https://github.com/mattn/go-sqlite3#connection-string
16
-
opts := []string{
17
-
"_foreign_keys=1",
18
-
"_journal_mode=WAL",
19
-
"_synchronous=NORMAL",
20
-
"_auto_vacuum=incremental",
21
-
}
22
-
23
-
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
24
-
if err != nil {
25
-
return nil, err
26
-
}
27
-
28
-
// NOTE: If any other migration is added here, you MUST
29
-
// copy the pattern in appview: use a single sql.Conn
30
-
// for every migration.
31
-
32
-
_, err = db.Exec(`
33
-
create table if not exists known_dids (
34
-
did text primary key
35
-
);
36
-
37
-
create table if not exists public_keys (
38
-
id integer primary key autoincrement,
39
-
did text not null,
40
-
key text not null,
41
-
created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
42
-
unique(did, key),
43
-
foreign key (did) references known_dids(did) on delete cascade
44
-
);
45
-
46
-
create table if not exists _jetstream (
47
-
id integer primary key autoincrement,
48
-
last_time_us integer not null
49
-
);
50
-
51
-
create table if not exists events (
52
-
rkey text not null,
53
-
nsid text not null,
54
-
event text not null, -- json
55
-
created integer not null default (strftime('%s', 'now')),
56
-
primary key (rkey, nsid)
57
-
);
58
-
`)
59
-
if err != nil {
60
-
return nil, err
61
-
}
62
-
63
-
return &DB{db: db}, nil
64
-
}
+1
-17
knotserver/git/diff.go
+1
-17
knotserver/git/diff.go
···
77
77
nd.Diff = append(nd.Diff, ndiff)
78
78
}
79
79
80
-
nd.Stat.FilesChanged = len(diffs)
81
-
nd.Commit.This = c.Hash.String()
82
-
nd.Commit.PGPSignature = c.PGPSignature
83
-
nd.Commit.Committer = c.Committer
84
-
nd.Commit.Tree = c.TreeHash.String()
85
-
86
-
if parent.Hash.IsZero() {
87
-
nd.Commit.Parent = ""
88
-
} else {
89
-
nd.Commit.Parent = parent.Hash.String()
90
-
}
91
-
nd.Commit.Author = c.Author
92
-
nd.Commit.Message = c.Message
93
-
94
-
if v, ok := c.ExtraHeaders["change-id"]; ok {
95
-
nd.Commit.ChangedId = string(v)
96
-
}
80
+
nd.Commit.FromGoGitCommit(c)
97
81
98
82
return &nd, nil
99
83
}
+38
-2
knotserver/git/fork.go
+38
-2
knotserver/git/fork.go
···
3
3
import (
4
4
"errors"
5
5
"fmt"
6
+
"log/slog"
7
+
"net/url"
6
8
"os/exec"
9
+
"path/filepath"
7
10
8
11
"github.com/go-git/go-git/v5"
9
12
"github.com/go-git/go-git/v5/config"
13
+
knotconfig "tangled.org/core/knotserver/config"
10
14
)
11
15
12
-
func Fork(repoPath, source string) error {
13
-
cloneCmd := exec.Command("git", "clone", "--bare", source, repoPath)
16
+
func Fork(repoPath, source string, cfg *knotconfig.Config) error {
17
+
u, err := url.Parse(source)
18
+
if err != nil {
19
+
return fmt.Errorf("failed to parse source URL: %w", err)
20
+
}
21
+
22
+
if o := optimizeClone(u, cfg); o != nil {
23
+
u = o
24
+
}
25
+
26
+
cloneCmd := exec.Command("git", "clone", "--bare", u.String(), repoPath)
14
27
if err := cloneCmd.Run(); err != nil {
15
28
return fmt.Errorf("failed to bare clone repository: %w", err)
16
29
}
···
21
34
}
22
35
23
36
return nil
37
+
}
38
+
39
+
func optimizeClone(u *url.URL, cfg *knotconfig.Config) *url.URL {
40
+
// only optimize if it's the same host
41
+
if u.Host != cfg.Server.Hostname {
42
+
return nil
43
+
}
44
+
45
+
local := filepath.Join(cfg.Repo.ScanPath, u.Path)
46
+
47
+
// sanity check: is there a git repo there?
48
+
if _, err := PlainOpen(local); err != nil {
49
+
return nil
50
+
}
51
+
52
+
// create optimized file:// URL
53
+
optimized := &url.URL{
54
+
Scheme: "file",
55
+
Path: local,
56
+
}
57
+
58
+
slog.Debug("performing local clone", "url", optimized.String())
59
+
return optimized
24
60
}
25
61
26
62
func (g *GitRepo) Sync() error {
+13
-1
knotserver/git/service/service.go
+13
-1
knotserver/git/service/service.go
···
95
95
return c.RunService(cmd)
96
96
}
97
97
98
+
func (c *ServiceCommand) UploadArchive() error {
99
+
cmd := exec.Command("git", []string{
100
+
"upload-archive",
101
+
".",
102
+
}...)
103
+
104
+
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
105
+
cmd.Env = append(cmd.Env, fmt.Sprintf("GIT_PROTOCOL=%s", c.GitProtocol))
106
+
cmd.Dir = c.Dir
107
+
108
+
return c.RunService(cmd)
109
+
}
110
+
98
111
func (c *ServiceCommand) UploadPack() error {
99
112
cmd := exec.Command("git", []string{
100
-
"-c", "uploadpack.allowFilter=true",
101
113
"upload-pack",
102
114
"--stateless-rpc",
103
115
".",
+47
knotserver/git.go
+47
knotserver/git.go
···
56
56
}
57
57
}
58
58
59
+
func (h *Knot) UploadArchive(w http.ResponseWriter, r *http.Request) {
60
+
did := chi.URLParam(r, "did")
61
+
name := chi.URLParam(r, "name")
62
+
repo, err := securejoin.SecureJoin(h.c.Repo.ScanPath, filepath.Join(did, name))
63
+
if err != nil {
64
+
gitError(w, err.Error(), http.StatusInternalServerError)
65
+
h.l.Error("git: failed to secure join repo path", "handler", "UploadPack", "error", err)
66
+
return
67
+
}
68
+
69
+
const expectedContentType = "application/x-git-upload-archive-request"
70
+
contentType := r.Header.Get("Content-Type")
71
+
if contentType != expectedContentType {
72
+
gitError(w, fmt.Sprintf("Expected Content-Type: '%s', but received '%s'.", expectedContentType, contentType), http.StatusUnsupportedMediaType)
73
+
}
74
+
75
+
var bodyReader io.ReadCloser = r.Body
76
+
if r.Header.Get("Content-Encoding") == "gzip" {
77
+
gzipReader, err := gzip.NewReader(r.Body)
78
+
if err != nil {
79
+
gitError(w, err.Error(), http.StatusInternalServerError)
80
+
h.l.Error("git: failed to create gzip reader", "handler", "UploadArchive", "error", err)
81
+
return
82
+
}
83
+
defer gzipReader.Close()
84
+
bodyReader = gzipReader
85
+
}
86
+
87
+
w.Header().Set("Content-Type", "application/x-git-upload-archive-result")
88
+
89
+
h.l.Info("git: executing git-upload-archive", "handler", "UploadArchive", "repo", repo)
90
+
91
+
cmd := service.ServiceCommand{
92
+
GitProtocol: r.Header.Get("Git-Protocol"),
93
+
Dir: repo,
94
+
Stdout: w,
95
+
Stdin: bodyReader,
96
+
}
97
+
98
+
w.WriteHeader(http.StatusOK)
99
+
100
+
if err := cmd.UploadArchive(); err != nil {
101
+
h.l.Error("git: failed to execute git-upload-pack", "handler", "UploadPack", "error", err)
102
+
return
103
+
}
104
+
}
105
+
59
106
func (h *Knot) UploadPack(w http.ResponseWriter, r *http.Request) {
60
107
did := chi.URLParam(r, "did")
61
108
name := chi.URLParam(r, "name")
-136
knotserver/ingester.go
-136
knotserver/ingester.go
···
7
7
"io"
8
8
"net/http"
9
9
"net/url"
10
-
"path/filepath"
11
10
"strings"
12
11
13
12
comatproto "github.com/bluesky-social/indigo/api/atproto"
···
17
16
securejoin "github.com/cyphar/filepath-securejoin"
18
17
"tangled.org/core/api/tangled"
19
18
"tangled.org/core/knotserver/db"
20
-
"tangled.org/core/knotserver/git"
21
19
"tangled.org/core/log"
22
20
"tangled.org/core/rbac"
23
-
"tangled.org/core/workflow"
24
21
)
25
22
26
23
func (h *Knot) processPublicKey(ctx context.Context, event *models.Event) error {
···
85
82
return nil
86
83
}
87
84
88
-
func (h *Knot) processPull(ctx context.Context, event *models.Event) error {
89
-
raw := json.RawMessage(event.Commit.Record)
90
-
did := event.Did
91
-
92
-
var record tangled.RepoPull
93
-
if err := json.Unmarshal(raw, &record); err != nil {
94
-
return fmt.Errorf("failed to unmarshal record: %w", err)
95
-
}
96
-
97
-
l := log.FromContext(ctx)
98
-
l = l.With("handler", "processPull")
99
-
l = l.With("did", did)
100
-
101
-
if record.Target == nil {
102
-
return fmt.Errorf("ignoring pull record: target repo is nil")
103
-
}
104
-
105
-
l = l.With("target_repo", record.Target.Repo)
106
-
l = l.With("target_branch", record.Target.Branch)
107
-
108
-
if record.Source == nil {
109
-
return fmt.Errorf("ignoring pull record: not a branch-based pull request")
110
-
}
111
-
112
-
if record.Source.Repo != nil {
113
-
return fmt.Errorf("ignoring pull record: fork based pull")
114
-
}
115
-
116
-
repoAt, err := syntax.ParseATURI(record.Target.Repo)
117
-
if err != nil {
118
-
return fmt.Errorf("failed to parse ATURI: %w", err)
119
-
}
120
-
121
-
// resolve this aturi to extract the repo record
122
-
ident, err := h.resolver.ResolveIdent(ctx, repoAt.Authority().String())
123
-
if err != nil || ident.Handle.IsInvalidHandle() {
124
-
return fmt.Errorf("failed to resolve handle: %w", err)
125
-
}
126
-
127
-
xrpcc := xrpc.Client{
128
-
Host: ident.PDSEndpoint(),
129
-
}
130
-
131
-
resp, err := comatproto.RepoGetRecord(ctx, &xrpcc, "", tangled.RepoNSID, repoAt.Authority().String(), repoAt.RecordKey().String())
132
-
if err != nil {
133
-
return fmt.Errorf("failed to resolver repo: %w", err)
134
-
}
135
-
136
-
repo := resp.Value.Val.(*tangled.Repo)
137
-
138
-
if repo.Knot != h.c.Server.Hostname {
139
-
return fmt.Errorf("rejected pull record: not this knot, %s != %s", repo.Knot, h.c.Server.Hostname)
140
-
}
141
-
142
-
didSlashRepo, err := securejoin.SecureJoin(ident.DID.String(), repo.Name)
143
-
if err != nil {
144
-
return fmt.Errorf("failed to construct relative repo path: %w", err)
145
-
}
146
-
147
-
repoPath, err := securejoin.SecureJoin(h.c.Repo.ScanPath, didSlashRepo)
148
-
if err != nil {
149
-
return fmt.Errorf("failed to construct absolute repo path: %w", err)
150
-
}
151
-
152
-
gr, err := git.Open(repoPath, record.Source.Sha)
153
-
if err != nil {
154
-
return fmt.Errorf("failed to open git repository: %w", err)
155
-
}
156
-
157
-
workflowDir, err := gr.FileTree(ctx, workflow.WorkflowDir)
158
-
if err != nil {
159
-
return fmt.Errorf("failed to open workflow directory: %w", err)
160
-
}
161
-
162
-
var pipeline workflow.RawPipeline
163
-
for _, e := range workflowDir {
164
-
if !e.IsFile() {
165
-
continue
166
-
}
167
-
168
-
fpath := filepath.Join(workflow.WorkflowDir, e.Name)
169
-
contents, err := gr.RawContent(fpath)
170
-
if err != nil {
171
-
continue
172
-
}
173
-
174
-
pipeline = append(pipeline, workflow.RawWorkflow{
175
-
Name: e.Name,
176
-
Contents: contents,
177
-
})
178
-
}
179
-
180
-
trigger := tangled.Pipeline_PullRequestTriggerData{
181
-
Action: "create",
182
-
SourceBranch: record.Source.Branch,
183
-
SourceSha: record.Source.Sha,
184
-
TargetBranch: record.Target.Branch,
185
-
}
186
-
187
-
compiler := workflow.Compiler{
188
-
Trigger: tangled.Pipeline_TriggerMetadata{
189
-
Kind: string(workflow.TriggerKindPullRequest),
190
-
PullRequest: &trigger,
191
-
Repo: &tangled.Pipeline_TriggerRepo{
192
-
Did: ident.DID.String(),
193
-
Knot: repo.Knot,
194
-
Repo: repo.Name,
195
-
},
196
-
},
197
-
}
198
-
199
-
cp := compiler.Compile(compiler.Parse(pipeline))
200
-
eventJson, err := json.Marshal(cp)
201
-
if err != nil {
202
-
return fmt.Errorf("failed to marshal pipeline event: %w", err)
203
-
}
204
-
205
-
// do not run empty pipelines
206
-
if cp.Workflows == nil {
207
-
return nil
208
-
}
209
-
210
-
ev := db.Event{
211
-
Rkey: TID(),
212
-
Nsid: tangled.PipelineNSID,
213
-
EventJson: string(eventJson),
214
-
}
215
-
216
-
return h.db.InsertEvent(ev, h.n)
217
-
}
218
-
219
85
// duplicated from add collaborator
220
86
func (h *Knot) processCollaborator(ctx context.Context, event *models.Event) error {
221
87
raw := json.RawMessage(event.Commit.Record)
···
338
204
err = h.processPublicKey(ctx, event)
339
205
case tangled.KnotMemberNSID:
340
206
err = h.processKnotMember(ctx, event)
341
-
case tangled.RepoPullNSID:
342
-
err = h.processPull(ctx, event)
343
207
case tangled.RepoCollaboratorNSID:
344
208
err = h.processCollaborator(ctx, event)
345
209
}
+1
-109
knotserver/internal.go
+1
-109
knotserver/internal.go
···
23
23
"tangled.org/core/log"
24
24
"tangled.org/core/notifier"
25
25
"tangled.org/core/rbac"
26
-
"tangled.org/core/workflow"
27
26
)
28
27
29
28
type InternalHandle struct {
···
176
175
}
177
176
178
177
for _, line := range lines {
178
+
// TODO: pass pushOptions to refUpdate
179
179
err := h.insertRefUpdate(line, gitUserDid, repoDid, repoName)
180
180
if err != nil {
181
181
l.Error("failed to insert op", "err", err, "line", line, "did", gitUserDid, "repo", gitRelativeDir)
···
185
185
err = h.emitCompareLink(&resp.Messages, line, repoDid, repoName)
186
186
if err != nil {
187
187
l.Error("failed to reply with compare link", "err", err, "line", line, "did", gitUserDid, "repo", gitRelativeDir)
188
-
// non-fatal
189
-
}
190
-
191
-
err = h.triggerPipeline(&resp.Messages, line, gitUserDid, repoDid, repoName, pushOptions)
192
-
if err != nil {
193
-
l.Error("failed to trigger pipeline", "err", err, "line", line, "did", gitUserDid, "repo", gitRelativeDir)
194
188
// non-fatal
195
189
}
196
190
}
···
241
235
}
242
236
243
237
return errors.Join(errs, h.db.InsertEvent(event, h.n))
244
-
}
245
-
246
-
func (h *InternalHandle) triggerPipeline(
247
-
clientMsgs *[]string,
248
-
line git.PostReceiveLine,
249
-
gitUserDid string,
250
-
repoDid string,
251
-
repoName string,
252
-
pushOptions PushOptions,
253
-
) error {
254
-
if pushOptions.skipCi {
255
-
return nil
256
-
}
257
-
258
-
didSlashRepo, err := securejoin.SecureJoin(repoDid, repoName)
259
-
if err != nil {
260
-
return err
261
-
}
262
-
263
-
repoPath, err := securejoin.SecureJoin(h.c.Repo.ScanPath, didSlashRepo)
264
-
if err != nil {
265
-
return err
266
-
}
267
-
268
-
gr, err := git.Open(repoPath, line.Ref)
269
-
if err != nil {
270
-
return err
271
-
}
272
-
273
-
workflowDir, err := gr.FileTree(context.Background(), workflow.WorkflowDir)
274
-
if err != nil {
275
-
return err
276
-
}
277
-
278
-
var pipeline workflow.RawPipeline
279
-
for _, e := range workflowDir {
280
-
if !e.IsFile() {
281
-
continue
282
-
}
283
-
284
-
fpath := filepath.Join(workflow.WorkflowDir, e.Name)
285
-
contents, err := gr.RawContent(fpath)
286
-
if err != nil {
287
-
continue
288
-
}
289
-
290
-
pipeline = append(pipeline, workflow.RawWorkflow{
291
-
Name: e.Name,
292
-
Contents: contents,
293
-
})
294
-
}
295
-
296
-
trigger := tangled.Pipeline_PushTriggerData{
297
-
Ref: line.Ref,
298
-
OldSha: line.OldSha.String(),
299
-
NewSha: line.NewSha.String(),
300
-
}
301
-
302
-
compiler := workflow.Compiler{
303
-
Trigger: tangled.Pipeline_TriggerMetadata{
304
-
Kind: string(workflow.TriggerKindPush),
305
-
Push: &trigger,
306
-
Repo: &tangled.Pipeline_TriggerRepo{
307
-
Did: repoDid,
308
-
Knot: h.c.Server.Hostname,
309
-
Repo: repoName,
310
-
},
311
-
},
312
-
}
313
-
314
-
cp := compiler.Compile(compiler.Parse(pipeline))
315
-
eventJson, err := json.Marshal(cp)
316
-
if err != nil {
317
-
return err
318
-
}
319
-
320
-
for _, e := range compiler.Diagnostics.Errors {
321
-
*clientMsgs = append(*clientMsgs, e.String())
322
-
}
323
-
324
-
if pushOptions.verboseCi {
325
-
if compiler.Diagnostics.IsEmpty() {
326
-
*clientMsgs = append(*clientMsgs, "success: pipeline compiled with no diagnostics")
327
-
}
328
-
329
-
for _, w := range compiler.Diagnostics.Warnings {
330
-
*clientMsgs = append(*clientMsgs, w.String())
331
-
}
332
-
}
333
-
334
-
// do not run empty pipelines
335
-
if cp.Workflows == nil {
336
-
return nil
337
-
}
338
-
339
-
event := db.Event{
340
-
Rkey: TID(),
341
-
Nsid: tangled.PipelineNSID,
342
-
EventJson: string(eventJson),
343
-
}
344
-
345
-
return h.db.InsertEvent(event, h.n)
346
238
}
347
239
348
240
func (h *InternalHandle) emitCompareLink(
+1
knotserver/router.go
+1
knotserver/router.go
+1
-2
knotserver/server.go
+1
-2
knotserver/server.go
···
64
64
logger.Info("running in dev mode, signature verification is disabled")
65
65
}
66
66
67
-
db, err := db.Setup(c.Server.DBPath)
67
+
db, err := db.Setup(ctx, c.Server.DBPath)
68
68
if err != nil {
69
69
return fmt.Errorf("failed to load db: %w", err)
70
70
}
···
79
79
jc, err := jetstream.NewJetstreamClient(c.Server.JetstreamEndpoint, "knotserver", []string{
80
80
tangled.PublicKeyNSID,
81
81
tangled.KnotMemberNSID,
82
-
tangled.RepoPullNSID,
83
82
tangled.RepoCollaboratorNSID,
84
83
}, nil, log.SubLogger(logger, "jetstream"), db, true, c.Server.LogDids)
85
84
if err != nil {
+1
-1
knotserver/xrpc/create_repo.go
+1
-1
knotserver/xrpc/create_repo.go
···
84
84
repoPath, _ := securejoin.SecureJoin(h.Config.Repo.ScanPath, relativeRepoPath)
85
85
86
86
if data.Source != nil && *data.Source != "" {
87
-
err = git.Fork(repoPath, *data.Source)
87
+
err = git.Fork(repoPath, *data.Source, h.Config)
88
88
if err != nil {
89
89
l.Error("forking repo", "error", err.Error())
90
90
writeError(w, xrpcerr.GenericError(err), http.StatusInternalServerError)
+6
-1
knotserver/xrpc/repo_log.go
+6
-1
knotserver/xrpc/repo_log.go
···
62
62
return
63
63
}
64
64
65
+
tcommits := make([]types.Commit, len(commits))
66
+
for i, c := range commits {
67
+
tcommits[i].FromGoGitCommit(c)
68
+
}
69
+
65
70
// Create response using existing types.RepoLogResponse
66
71
response := types.RepoLogResponse{
67
-
Commits: commits,
72
+
Commits: tcommits,
68
73
Ref: ref,
69
74
Page: (offset / limit) + 1,
70
75
PerPage: limit,
+14
lexicons/issue/comment.json
+14
lexicons/issue/comment.json
···
29
29
"replyTo": {
30
30
"type": "string",
31
31
"format": "at-uri"
32
+
},
33
+
"mentions": {
34
+
"type": "array",
35
+
"items": {
36
+
"type": "string",
37
+
"format": "did"
38
+
}
39
+
},
40
+
"references": {
41
+
"type": "array",
42
+
"items": {
43
+
"type": "string",
44
+
"format": "at-uri"
45
+
}
32
46
}
33
47
}
34
48
}
+14
lexicons/issue/issue.json
+14
lexicons/issue/issue.json
···
24
24
"createdAt": {
25
25
"type": "string",
26
26
"format": "datetime"
27
+
},
28
+
"mentions": {
29
+
"type": "array",
30
+
"items": {
31
+
"type": "string",
32
+
"format": "did"
33
+
}
34
+
},
35
+
"references": {
36
+
"type": "array",
37
+
"items": {
38
+
"type": "string",
39
+
"format": "at-uri"
40
+
}
27
41
}
28
42
}
29
43
}
+33
lexicons/pipeline/cancelPipeline.json
+33
lexicons/pipeline/cancelPipeline.json
···
1
+
{
2
+
"lexicon": 1,
3
+
"id": "sh.tangled.pipeline.cancelPipeline",
4
+
"defs": {
5
+
"main": {
6
+
"type": "procedure",
7
+
"description": "Cancel a running pipeline",
8
+
"input": {
9
+
"encoding": "application/json",
10
+
"schema": {
11
+
"type": "object",
12
+
"required": ["repo", "pipeline", "workflow"],
13
+
"properties": {
14
+
"repo": {
15
+
"type": "string",
16
+
"format": "at-uri",
17
+
"description": "repo at-uri, spindle can't resolve repo from pipeline at-uri yet"
18
+
},
19
+
"pipeline": {
20
+
"type": "string",
21
+
"format": "at-uri",
22
+
"description": "pipeline at-uri"
23
+
},
24
+
"workflow": {
25
+
"type": "string",
26
+
"description": "workflow name"
27
+
}
28
+
}
29
+
}
30
+
}
31
+
}
32
+
}
33
+
}
+14
lexicons/pulls/comment.json
+14
lexicons/pulls/comment.json
···
25
25
"createdAt": {
26
26
"type": "string",
27
27
"format": "datetime"
28
+
},
29
+
"mentions": {
30
+
"type": "array",
31
+
"items": {
32
+
"type": "string",
33
+
"format": "did"
34
+
}
35
+
},
36
+
"references": {
37
+
"type": "array",
38
+
"items": {
39
+
"type": "string",
40
+
"format": "at-uri"
41
+
}
28
42
}
29
43
}
30
44
}
+14
lexicons/pulls/pull.json
+14
lexicons/pulls/pull.json
···
36
36
"createdAt": {
37
37
"type": "string",
38
38
"format": "datetime"
39
+
},
40
+
"mentions": {
41
+
"type": "array",
42
+
"items": {
43
+
"type": "string",
44
+
"format": "did"
45
+
}
46
+
},
47
+
"references": {
48
+
"type": "array",
49
+
"items": {
50
+
"type": "string",
51
+
"format": "at-uri"
52
+
}
39
53
}
40
54
}
41
55
}
+6
-30
nix/gomod2nix.toml
+6
-30
nix/gomod2nix.toml
···
165
165
[mod."github.com/davecgh/go-spew"]
166
166
version = "v1.1.2-0.20180830191138-d8f796af33cc"
167
167
hash = "sha256-fV9oI51xjHdOmEx6+dlq7Ku2Ag+m/bmbzPo6A4Y74qc="
168
-
[mod."github.com/decred/dcrd/dcrec/secp256k1/v4"]
169
-
version = "v4.4.0"
170
-
hash = "sha256-qrhEIwhDll3cxoVpMbm1NQ9/HTI42S7ms8Buzlo5HCg="
171
168
[mod."github.com/dgraph-io/ristretto"]
172
169
version = "v0.2.0"
173
170
hash = "sha256-bnpxX+oO/Qf7IJevA0gsbloVoqRx+5bh7RQ9d9eLNYw="
···
307
304
[mod."github.com/hashicorp/go-sockaddr"]
308
305
version = "v1.0.7"
309
306
hash = "sha256-p6eDOrGzN1jMmT/F/f/VJMq0cKNFhUcEuVVwTE6vSrs="
307
+
[mod."github.com/hashicorp/go-version"]
308
+
version = "v1.8.0"
309
+
hash = "sha256-KXtqERmYrWdpqPCViWcHbe6jnuH7k16bvBIcuJuevj8="
310
310
[mod."github.com/hashicorp/golang-lru"]
311
311
version = "v1.0.2"
312
312
hash = "sha256-yy+5botc6T5wXgOe2mfNXJP3wr+MkVlUZ2JBkmmrA48="
···
373
373
[mod."github.com/klauspost/cpuid/v2"]
374
374
version = "v2.3.0"
375
375
hash = "sha256-50JhbQyT67BK38HIdJihPtjV7orYp96HknI2VP7A9Yc="
376
-
[mod."github.com/lestrrat-go/blackmagic"]
377
-
version = "v1.0.4"
378
-
hash = "sha256-HmWOpwoPDNMwLdOi7onNn3Sb+ZsAa3Ai3gVBbXmQ0e8="
379
-
[mod."github.com/lestrrat-go/httpcc"]
380
-
version = "v1.0.1"
381
-
hash = "sha256-SMRSwJpqDIs/xL0l2e8vP0W65qtCHX2wigcOeqPJmos="
382
-
[mod."github.com/lestrrat-go/httprc"]
383
-
version = "v1.0.6"
384
-
hash = "sha256-mfZzePEhrmyyu/avEBd2MsDXyto8dq5+fyu5lA8GUWM="
385
-
[mod."github.com/lestrrat-go/iter"]
386
-
version = "v1.0.2"
387
-
hash = "sha256-30tErRf7Qu/NOAt1YURXY/XJSA6sCr6hYQfO8QqHrtw="
388
-
[mod."github.com/lestrrat-go/jwx/v2"]
389
-
version = "v2.1.6"
390
-
hash = "sha256-0LszXRZIba+X8AOrs3T4uanAUafBdlVB8/MpUNEFpbc="
391
-
[mod."github.com/lestrrat-go/option"]
392
-
version = "v1.0.1"
393
-
hash = "sha256-jVcIYYVsxElIS/l2akEw32vdEPR8+anR6oeT1FoYULI="
394
376
[mod."github.com/lucasb-eyer/go-colorful"]
395
377
version = "v1.2.0"
396
378
hash = "sha256-Gg9dDJFCTaHrKHRR1SrJgZ8fWieJkybljybkI9x0gyE="
···
511
493
[mod."github.com/ryanuber/go-glob"]
512
494
version = "v1.0.0"
513
495
hash = "sha256-YkMl1utwUhi3E0sHK23ISpAsPyj4+KeXyXKoFYGXGVY="
514
-
[mod."github.com/segmentio/asm"]
515
-
version = "v1.2.0"
516
-
hash = "sha256-zbNuKxNrUDUc6IlmRQNuJQzVe5Ol/mqp7srDg9IMMqs="
517
496
[mod."github.com/sergi/go-diff"]
518
497
version = "v1.1.0"
519
498
hash = "sha256-8NJMabldpf40uwQN20T6QXx5KORDibCBJL02KD661xY="
···
548
527
[mod."github.com/whyrusleeping/cbor-gen"]
549
528
version = "v0.3.1"
550
529
hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc="
551
-
[mod."github.com/wyatt915/goldmark-treeblood"]
552
-
version = "v0.0.1"
553
-
hash = "sha256-hAVFaktO02MiiqZFffr8ZlvFEfwxw4Y84OZ2t7e5G7g="
554
-
[mod."github.com/wyatt915/treeblood"]
555
-
version = "v0.1.16"
556
-
hash = "sha256-T68sa+iVx0qY7dDjXEAJvRWQEGXYIpUsf9tcWwO1tIw="
557
530
[mod."github.com/xo/terminfo"]
558
531
version = "v0.0.0-20220910002029-abceb7e1c41e"
559
532
hash = "sha256-GyCDxxMQhXA3Pi/TsWXpA8cX5akEoZV7CFx4RO3rARU="
560
533
[mod."github.com/yuin/goldmark"]
561
534
version = "v1.7.13"
562
535
hash = "sha256-vBCxZrPYPc8x/nvAAv3Au59dCCyfS80Vw3/a9EXK7TE="
536
+
[mod."github.com/yuin/goldmark-emoji"]
537
+
version = "v1.0.6"
538
+
hash = "sha256-+d6bZzOPE+JSFsZbQNZMCWE+n3jgcQnkPETVk47mxSY="
563
539
[mod."github.com/yuin/goldmark-highlighting/v2"]
564
540
version = "v2.0.0-20230729083705-37449abec8cc"
565
541
hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
+64
nix/modules/bluesky-jetstream.nix
+64
nix/modules/bluesky-jetstream.nix
···
1
+
{
2
+
config,
3
+
pkgs,
4
+
lib,
5
+
...
6
+
}: let
7
+
cfg = config.services.bluesky-jetstream;
8
+
in
9
+
with lib; {
10
+
options.services.bluesky-jetstream = {
11
+
enable = mkEnableOption "jetstream server";
12
+
package = mkPackageOption pkgs "bluesky-jetstream" {};
13
+
14
+
# dataDir = mkOption {
15
+
# type = types.str;
16
+
# default = "/var/lib/jetstream";
17
+
# description = "directory to store data (pebbleDB)";
18
+
# };
19
+
livenessTtl = mkOption {
20
+
type = types.int;
21
+
default = 15;
22
+
description = "time to restart when no event detected (seconds)";
23
+
};
24
+
websocketUrl = mkOption {
25
+
type = types.str;
26
+
default = "wss://bsky.network/xrpc/com.atproto.sync.subscribeRepos";
27
+
description = "full websocket path to the ATProto SubscribeRepos XRPC endpoint";
28
+
};
29
+
};
30
+
config = mkIf cfg.enable {
31
+
systemd.services.bluesky-jetstream = {
32
+
description = "bluesky jetstream";
33
+
after = ["network.target" "pds.service"];
34
+
wantedBy = ["multi-user.target"];
35
+
36
+
serviceConfig = {
37
+
User = "jetstream";
38
+
Group = "jetstream";
39
+
StateDirectory = "jetstream";
40
+
StateDirectoryMode = "0755";
41
+
# preStart = ''
42
+
# mkdir -p "${cfg.dataDir}"
43
+
# chown -R jetstream:jetstream "${cfg.dataDir}"
44
+
# '';
45
+
# WorkingDirectory = cfg.dataDir;
46
+
Environment = [
47
+
"JETSTREAM_DATA_DIR=/var/lib/jetstream/data"
48
+
"JETSTREAM_LIVENESS_TTL=${toString cfg.livenessTtl}s"
49
+
"JETSTREAM_WS_URL=${cfg.websocketUrl}"
50
+
];
51
+
ExecStart = getExe cfg.package;
52
+
Restart = "always";
53
+
RestartSec = 5;
54
+
};
55
+
};
56
+
users = {
57
+
users.jetstream = {
58
+
group = "jetstream";
59
+
isSystemUser = true;
60
+
};
61
+
groups.jetstream = {};
62
+
};
63
+
};
64
+
}
+48
nix/modules/bluesky-relay.nix
+48
nix/modules/bluesky-relay.nix
···
1
+
{
2
+
config,
3
+
pkgs,
4
+
lib,
5
+
...
6
+
}: let
7
+
cfg = config.services.bluesky-relay;
8
+
in
9
+
with lib; {
10
+
options.services.bluesky-relay = {
11
+
enable = mkEnableOption "relay server";
12
+
package = mkPackageOption pkgs "bluesky-relay" {};
13
+
};
14
+
config = mkIf cfg.enable {
15
+
systemd.services.bluesky-relay = {
16
+
description = "bluesky relay";
17
+
after = ["network.target" "pds.service"];
18
+
wantedBy = ["multi-user.target"];
19
+
20
+
serviceConfig = {
21
+
User = "relay";
22
+
Group = "relay";
23
+
StateDirectory = "relay";
24
+
StateDirectoryMode = "0755";
25
+
Environment = [
26
+
"RELAY_ADMIN_PASSWORD=password"
27
+
"RELAY_PLC_HOST=https://plc.tngl.boltless.dev"
28
+
"DATABASE_URL=sqlite:///var/lib/relay/relay.sqlite"
29
+
"RELAY_IP_BIND=:2470"
30
+
"RELAY_PERSIST_DIR=/var/lib/relay"
31
+
"RELAY_DISABLE_REQUEST_CRAWL=0"
32
+
"RELAY_INITIAL_SEQ_NUMBER=1"
33
+
"RELAY_ALLOW_INSECURE_HOSTS=1"
34
+
];
35
+
ExecStart = "${getExe cfg.package} serve";
36
+
Restart = "always";
37
+
RestartSec = 5;
38
+
};
39
+
};
40
+
users = {
41
+
users.relay = {
42
+
group = "relay";
43
+
isSystemUser = true;
44
+
};
45
+
groups.relay = {};
46
+
};
47
+
};
48
+
}
+76
nix/modules/did-method-plc.nix
+76
nix/modules/did-method-plc.nix
···
1
+
{
2
+
config,
3
+
pkgs,
4
+
lib,
5
+
...
6
+
}: let
7
+
cfg = config.services.did-method-plc;
8
+
in
9
+
with lib; {
10
+
options.services.did-method-plc = {
11
+
enable = mkEnableOption "did-method-plc server";
12
+
package = mkPackageOption pkgs "did-method-plc" {};
13
+
};
14
+
config = mkIf cfg.enable {
15
+
services.postgresql = {
16
+
enable = true;
17
+
package = pkgs.postgresql_14;
18
+
ensureDatabases = ["plc"];
19
+
ensureUsers = [
20
+
{
21
+
name = "pg";
22
+
# ensurePermissions."DATABASE plc" = "ALL PRIVILEGES";
23
+
}
24
+
];
25
+
authentication = ''
26
+
local all all trust
27
+
host all all 127.0.0.1/32 trust
28
+
'';
29
+
};
30
+
systemd.services.did-method-plc = {
31
+
description = "did-method-plc";
32
+
33
+
after = ["postgresql.service"];
34
+
wants = ["postgresql.service"];
35
+
wantedBy = ["multi-user.target"];
36
+
37
+
environment = let
38
+
db_creds_json = builtins.toJSON {
39
+
username = "pg";
40
+
password = "";
41
+
host = "127.0.0.1";
42
+
port = 5432;
43
+
};
44
+
in {
45
+
# TODO: inherit from config
46
+
DEBUG_MODE = "1";
47
+
LOG_ENABLED = "true";
48
+
LOG_LEVEL = "debug";
49
+
LOG_DESTINATION = "1";
50
+
ENABLE_MIGRATIONS = "true";
51
+
DB_CREDS_JSON = db_creds_json;
52
+
DB_MIGRATE_CREDS_JSON = db_creds_json;
53
+
PLC_VERSION = "0.0.1";
54
+
PORT = "8080";
55
+
};
56
+
57
+
serviceConfig = {
58
+
ExecStart = getExe cfg.package;
59
+
User = "plc";
60
+
Group = "plc";
61
+
StateDirectory = "plc";
62
+
StateDirectoryMode = "0755";
63
+
Restart = "always";
64
+
65
+
# Hardening
66
+
};
67
+
};
68
+
users = {
69
+
users.plc = {
70
+
group = "plc";
71
+
isSystemUser = true;
72
+
};
73
+
groups.plc = {};
74
+
};
75
+
};
76
+
}
+2
nix/modules/knot.nix
+2
nix/modules/knot.nix
+46
-12
nix/modules/spindle.nix
+46
-12
nix/modules/spindle.nix
···
1
1
{
2
2
config,
3
+
pkgs,
3
4
lib,
4
5
...
5
6
}: let
···
17
18
type = types.package;
18
19
description = "Package to use for the spindle";
19
20
};
21
+
tap-package = mkOption {
22
+
type = types.package;
23
+
description = "Package to use for the spindle";
24
+
};
25
+
26
+
atpRelayUrl = mkOption {
27
+
type = types.str;
28
+
default = "https://relay1.us-east.bsky.network";
29
+
description = "atproto relay";
30
+
};
20
31
21
32
server = {
22
33
listenAddr = mkOption {
···
25
36
description = "Address to listen on";
26
37
};
27
38
28
-
dbPath = mkOption {
39
+
stateDir = mkOption {
29
40
type = types.path;
30
-
default = "/var/lib/spindle/spindle.db";
31
-
description = "Path to the database file";
41
+
default = "/var/lib/spindle";
42
+
description = "Tangled spindle data directory";
32
43
};
33
44
34
45
hostname = mkOption {
···
41
52
type = types.str;
42
53
default = "https://plc.directory";
43
54
description = "atproto PLC directory";
44
-
};
45
-
46
-
jetstreamEndpoint = mkOption {
47
-
type = types.str;
48
-
default = "wss://jetstream1.us-west.bsky.network/subscribe";
49
-
description = "Jetstream endpoint to subscribe to";
50
55
};
51
56
52
57
dev = mkOption {
···
114
119
config = mkIf cfg.enable {
115
120
virtualisation.docker.enable = true;
116
121
122
+
systemd.services.spindle-tap = {
123
+
description = "spindle tap service";
124
+
after = ["network.target" "docker.service"];
125
+
wantedBy = ["multi-user.target"];
126
+
serviceConfig = {
127
+
LogsDirectory = "spindle-tap";
128
+
StateDirectory = "spindle-tap";
129
+
Environment = [
130
+
"TAP_BIND=:2480"
131
+
"TAP_PLC_URL=${cfg.server.plcUrl}"
132
+
"TAP_RELAY_URL=${cfg.atpRelayUrl}"
133
+
"TAP_DATABASE_URL=sqlite:///var/lib/spindle-tap/tap.db"
134
+
"TAP_RETRY_TIMEOUT=3s"
135
+
"TAP_COLLECTION_FILTERS=${concatStringsSep "," [
136
+
"sh.tangled.repo"
137
+
"sh.tangled.repo.collaborator"
138
+
"sh.tangled.spindle.member"
139
+
"sh.tangled.repo.pull"
140
+
]}"
141
+
# temporary hack to listen for repo.pull from non-tangled users
142
+
"TAP_SIGNAL_COLLECTION=sh.tangled.repo.pull"
143
+
];
144
+
ExecStart = "${getExe cfg.tap-package} run";
145
+
};
146
+
};
147
+
117
148
systemd.services.spindle = {
118
149
description = "spindle service";
119
-
after = ["network.target" "docker.service"];
150
+
after = ["network.target" "docker.service" "spindle-tap.service"];
120
151
wantedBy = ["multi-user.target"];
152
+
path = [
153
+
pkgs.git
154
+
];
121
155
serviceConfig = {
122
156
LogsDirectory = "spindle";
123
157
StateDirectory = "spindle";
124
158
Environment = [
125
159
"SPINDLE_SERVER_LISTEN_ADDR=${cfg.server.listenAddr}"
126
-
"SPINDLE_SERVER_DB_PATH=${cfg.server.dbPath}"
160
+
"SPINDLE_SERVER_DATA_DIR=${cfg.server.stateDir}"
127
161
"SPINDLE_SERVER_HOSTNAME=${cfg.server.hostname}"
128
162
"SPINDLE_SERVER_PLC_URL=${cfg.server.plcUrl}"
129
-
"SPINDLE_SERVER_JETSTREAM_ENDPOINT=${cfg.server.jetstreamEndpoint}"
130
163
"SPINDLE_SERVER_DEV=${lib.boolToString cfg.server.dev}"
131
164
"SPINDLE_SERVER_OWNER=${cfg.server.owner}"
132
165
"SPINDLE_SERVER_MAX_JOB_COUNT=${toString cfg.server.maxJobCount}"
···
134
167
"SPINDLE_SERVER_SECRETS_PROVIDER=${cfg.server.secrets.provider}"
135
168
"SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=${cfg.server.secrets.openbao.proxyAddr}"
136
169
"SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=${cfg.server.secrets.openbao.mount}"
170
+
"SPINDLE_SERVER_TAP_URL=http://localhost:2480"
137
171
"SPINDLE_NIXERY_PIPELINES_NIXERY=${cfg.pipelines.nixery}"
138
172
"SPINDLE_NIXERY_PIPELINES_WORKFLOW_TIMEOUT=${cfg.pipelines.workflowTimeout}"
139
173
];
+20
nix/pkgs/bluesky-jetstream.nix
+20
nix/pkgs/bluesky-jetstream.nix
···
1
+
{
2
+
buildGoModule,
3
+
fetchFromGitHub,
4
+
}:
5
+
buildGoModule {
6
+
pname = "bluesky-jetstream";
7
+
version = "0.1.0";
8
+
src = fetchFromGitHub {
9
+
owner = "bluesky-social";
10
+
repo = "jetstream";
11
+
rev = "7d7efa58d7f14101a80ccc4f1085953948b7d5de";
12
+
sha256 = "sha256-1e9SL/8gaDPMA4YZed51ffzgpkptbMd0VTbTTDbPTFw=";
13
+
};
14
+
subPackages = ["cmd/jetstream"];
15
+
vendorHash = "sha256-/21XJQH6fo9uPzlABUAbdBwt1O90odmppH6gXu2wkiQ=";
16
+
doCheck = false;
17
+
meta = {
18
+
mainProgram = "jetstream";
19
+
};
20
+
}
+20
nix/pkgs/bluesky-relay.nix
+20
nix/pkgs/bluesky-relay.nix
···
1
+
{
2
+
buildGoModule,
3
+
fetchFromGitHub,
4
+
}:
5
+
buildGoModule {
6
+
pname = "bluesky-relay";
7
+
version = "0.1.0";
8
+
src = fetchFromGitHub {
9
+
owner = "boltlessengineer";
10
+
repo = "indigo";
11
+
rev = "7fe70a304d795b998f354d2b7b2050b909709c99";
12
+
sha256 = "sha256-+h34x67cqH5t30+8rua53/ucvbn3BanrmH0Og3moHok=";
13
+
};
14
+
subPackages = ["cmd/relay"];
15
+
vendorHash = "sha256-UOedwNYnM8Jx6B7Y9tFcZX8IeUBESAFAPTRYk7n0yo8=";
16
+
doCheck = false;
17
+
meta = {
18
+
mainProgram = "relay";
19
+
};
20
+
}
+65
nix/pkgs/did-method-plc.nix
+65
nix/pkgs/did-method-plc.nix
···
1
+
# inspired by https://github.com/NixOS/nixpkgs/blob/333bfb7c258fab089a834555ea1c435674c459b4/pkgs/by-name/ga/gatsby-cli/package.nix
2
+
{
3
+
lib,
4
+
stdenv,
5
+
fetchFromGitHub,
6
+
fetchYarnDeps,
7
+
yarnConfigHook,
8
+
yarnBuildHook,
9
+
nodejs,
10
+
makeBinaryWrapper,
11
+
}:
12
+
stdenv.mkDerivation (finalAttrs: {
13
+
pname = "did-method-plc";
14
+
version = "0.0.1";
15
+
16
+
src = fetchFromGitHub {
17
+
owner = "did-method-plc";
18
+
repo = "did-method-plc";
19
+
rev = "158ba5535ac3da4fd4309954bde41deab0b45972";
20
+
sha256 = "sha256-O5smubbrnTDMCvL6iRyMXkddr5G7YHxkQRVMRULHanQ=";
21
+
};
22
+
postPatch = ''
23
+
# remove dd-trace dependency
24
+
sed -i '3d' packages/server/service/index.js
25
+
'';
26
+
27
+
yarnOfflineCache = fetchYarnDeps {
28
+
yarnLock = finalAttrs.src + "/yarn.lock";
29
+
hash = "sha256-g8GzaAbWSnWwbQjJMV2DL5/ZlWCCX0sRkjjvX3tqU4Y=";
30
+
};
31
+
32
+
nativeBuildInputs = [
33
+
yarnConfigHook
34
+
yarnBuildHook
35
+
nodejs
36
+
makeBinaryWrapper
37
+
];
38
+
yarnBuildScript = "lerna";
39
+
yarnBuildFlags = [
40
+
"run"
41
+
"build"
42
+
"--scope"
43
+
"@did-plc/server"
44
+
"--include-dependencies"
45
+
];
46
+
47
+
installPhase = ''
48
+
runHook preInstall
49
+
50
+
mkdir -p $out/lib/node_modules/
51
+
mv packages/ $out/lib/packages/
52
+
mv node_modules/* $out/lib/node_modules/
53
+
54
+
makeWrapper ${lib.getExe nodejs} $out/bin/plc \
55
+
--add-flags $out/lib/packages/server/service/index.js \
56
+
--add-flags --enable-source-maps \
57
+
--set NODE_PATH $out/lib/node_modules
58
+
59
+
runHook postInstall
60
+
'';
61
+
62
+
meta = {
63
+
mainProgram = "plc";
64
+
};
65
+
})
+41
nix/pkgs/docs.nix
+41
nix/pkgs/docs.nix
···
1
+
{
2
+
pandoc,
3
+
tailwindcss,
4
+
runCommandLocal,
5
+
inter-fonts-src,
6
+
ibm-plex-mono-src,
7
+
lucide-src,
8
+
src,
9
+
}:
10
+
runCommandLocal "docs" {} ''
11
+
mkdir -p working
12
+
13
+
# copy templates, themes, styles, filters to working directory
14
+
cp ${src}/docs/*.html working/
15
+
cp ${src}/docs/*.theme working/
16
+
cp ${src}/docs/*.css working/
17
+
18
+
# icons
19
+
cp -rf ${lucide-src}/*.svg working/
20
+
21
+
# content
22
+
${pandoc}/bin/pandoc ${src}/docs/DOCS.md \
23
+
-o $out/ \
24
+
-t chunkedhtml \
25
+
--variable toc \
26
+
--toc-depth=2 \
27
+
--css=stylesheet.css \
28
+
--chunk-template="%i.html" \
29
+
--highlight-style=working/highlight.theme \
30
+
--template=working/template.html
31
+
32
+
# fonts
33
+
mkdir -p $out/static/fonts
34
+
cp -f ${inter-fonts-src}/web/InterVariable*.woff2 $out/static/fonts/
35
+
cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 $out/static/fonts/
36
+
cp -f ${inter-fonts-src}/InterVariable*.ttf $out/static/fonts/
37
+
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 $out/static/fonts/
38
+
39
+
# styles
40
+
cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/stylesheet.css
41
+
''
+7
-5
nix/pkgs/sqlite-lib.nix
+7
-5
nix/pkgs/sqlite-lib.nix
···
1
1
{
2
-
gcc,
3
2
stdenv,
4
3
sqlite-lib-src,
5
4
}:
6
5
stdenv.mkDerivation {
7
6
name = "sqlite-lib";
8
7
src = sqlite-lib-src;
9
-
nativeBuildInputs = [gcc];
8
+
10
9
buildPhase = ''
11
-
gcc -c sqlite3.c
12
-
ar rcs libsqlite3.a sqlite3.o
13
-
ranlib libsqlite3.a
10
+
$CC -c sqlite3.c
11
+
$AR rcs libsqlite3.a sqlite3.o
12
+
$RANLIB libsqlite3.a
13
+
'';
14
+
15
+
installPhase = ''
14
16
mkdir -p $out/include $out/lib
15
17
cp *.h $out/include
16
18
cp libsqlite3.a $out/lib
+20
nix/pkgs/tap.nix
+20
nix/pkgs/tap.nix
···
1
+
{
2
+
buildGoModule,
3
+
fetchFromGitHub,
4
+
}:
5
+
buildGoModule {
6
+
pname = "tap";
7
+
version = "0.1.0";
8
+
src = fetchFromGitHub {
9
+
owner = "bluesky-social";
10
+
repo = "indigo";
11
+
rev = "498ecb9693e8ae050f73234c86f340f51ad896a9";
12
+
sha256 = "sha256-KASCdwkg/hlKBt7RTW3e3R5J3hqJkphoarFbaMgtN1k=";
13
+
};
14
+
subPackages = ["cmd/tap"];
15
+
vendorHash = "sha256-UOedwNYnM8Jx6B7Y9tFcZX8IeUBESAFAPTRYk7n0yo8=";
16
+
doCheck = false;
17
+
meta = {
18
+
mainProgram = "tap";
19
+
};
20
+
}
+13
-7
nix/vm.nix
+13
-7
nix/vm.nix
···
8
8
var = builtins.getEnv name;
9
9
in
10
10
if var == ""
11
-
then throw "\$${name} must be defined, see docs/hacking.md for more details"
11
+
then throw "\$${name} must be defined, see https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled for more details"
12
12
else var;
13
13
envVarOr = name: default: let
14
14
var = builtins.getEnv name;
···
19
19
20
20
plcUrl = envVarOr "TANGLED_VM_PLC_URL" "https://plc.directory";
21
21
jetstream = envVarOr "TANGLED_VM_JETSTREAM_ENDPOINT" "wss://jetstream1.us-west.bsky.network/subscribe";
22
+
relayUrl = envVarOr "TANGLED_VM_RELAY_URL" "https://relay1.us-east.bsky.network";
22
23
in
23
24
nixpkgs.lib.nixosSystem {
24
25
inherit system;
···
48
49
# knot
49
50
{
50
51
from = "host";
51
-
host.port = 6000;
52
-
guest.port = 6000;
52
+
host.port = 6444;
53
+
guest.port = 6444;
53
54
}
54
55
# spindle
55
56
{
···
57
58
host.port = 6555;
58
59
guest.port = 6555;
59
60
}
61
+
{
62
+
from = "host";
63
+
host.port = 6556;
64
+
guest.port = 2480;
65
+
}
60
66
];
61
67
sharedDirectories = {
62
68
# We can't use the 9p mounts directly for most of these
···
87
93
motd = "Welcome to the development knot!\n";
88
94
server = {
89
95
owner = envVar "TANGLED_VM_KNOT_OWNER";
90
-
hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6000";
96
+
hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6444";
91
97
plcUrl = plcUrl;
92
98
jetstreamEndpoint = jetstream;
93
-
listenAddr = "0.0.0.0:6000";
99
+
listenAddr = "0.0.0.0:6444";
94
100
};
95
101
};
96
102
services.tangled.spindle = {
97
103
enable = true;
104
+
atpRelayUrl = relayUrl;
98
105
server = {
99
106
owner = envVar "TANGLED_VM_SPINDLE_OWNER";
100
107
hostname = envVarOr "TANGLED_VM_SPINDLE_HOST" "localhost:6555";
101
108
plcUrl = plcUrl;
102
-
jetstreamEndpoint = jetstream;
103
109
listenAddr = "0.0.0.0:6555";
104
110
dev = true;
105
111
queueSize = 100;
···
134
140
};
135
141
in {
136
142
knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled.knot.stateDir;
137
-
spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled.spindle.server.dbPath);
143
+
spindle = mkDataSyncScripts "/mnt/spindle-data" config.services.tangled.spindle.server.stateDir;
138
144
};
139
145
})
140
146
];
+132
orm/orm.go
+132
orm/orm.go
···
1
+
package orm
2
+
3
+
import (
4
+
"context"
5
+
"database/sql"
6
+
"fmt"
7
+
"log/slog"
8
+
"reflect"
9
+
"strings"
10
+
)
11
+
12
+
type migrationFn = func(*sql.Tx) error
13
+
14
+
func RunMigration(c *sql.Conn, logger *slog.Logger, name string, migrationFn migrationFn) error {
15
+
logger = logger.With("migration", name)
16
+
17
+
tx, err := c.BeginTx(context.Background(), nil)
18
+
if err != nil {
19
+
return err
20
+
}
21
+
defer tx.Rollback()
22
+
23
+
_, err = tx.Exec(`
24
+
create table if not exists migrations (
25
+
id integer primary key autoincrement,
26
+
name text unique
27
+
);
28
+
`)
29
+
if err != nil {
30
+
return fmt.Errorf("creating migrations table: %w", err)
31
+
}
32
+
33
+
var exists bool
34
+
err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists)
35
+
if err != nil {
36
+
return err
37
+
}
38
+
39
+
if !exists {
40
+
// run migration
41
+
err = migrationFn(tx)
42
+
if err != nil {
43
+
logger.Error("failed to run migration", "err", err)
44
+
return err
45
+
}
46
+
47
+
// mark migration as complete
48
+
_, err = tx.Exec("insert into migrations (name) values (?)", name)
49
+
if err != nil {
50
+
logger.Error("failed to mark migration as complete", "err", err)
51
+
return err
52
+
}
53
+
54
+
// commit the transaction
55
+
if err := tx.Commit(); err != nil {
56
+
return err
57
+
}
58
+
59
+
logger.Info("migration applied successfully")
60
+
} else {
61
+
logger.Warn("skipped migration, already applied")
62
+
}
63
+
64
+
return nil
65
+
}
66
+
67
+
type Filter struct {
68
+
Key string
69
+
arg any
70
+
Cmp string
71
+
}
72
+
73
+
func newFilter(key, cmp string, arg any) Filter {
74
+
return Filter{
75
+
Key: key,
76
+
arg: arg,
77
+
Cmp: cmp,
78
+
}
79
+
}
80
+
81
+
func FilterEq(key string, arg any) Filter { return newFilter(key, "=", arg) }
82
+
func FilterNotEq(key string, arg any) Filter { return newFilter(key, "<>", arg) }
83
+
func FilterGte(key string, arg any) Filter { return newFilter(key, ">=", arg) }
84
+
func FilterLte(key string, arg any) Filter { return newFilter(key, "<=", arg) }
85
+
func FilterIs(key string, arg any) Filter { return newFilter(key, "is", arg) }
86
+
func FilterIsNot(key string, arg any) Filter { return newFilter(key, "is not", arg) }
87
+
func FilterIn(key string, arg any) Filter { return newFilter(key, "in", arg) }
88
+
func FilterLike(key string, arg any) Filter { return newFilter(key, "like", arg) }
89
+
func FilterNotLike(key string, arg any) Filter { return newFilter(key, "not like", arg) }
90
+
func FilterContains(key string, arg any) Filter {
91
+
return newFilter(key, "like", fmt.Sprintf("%%%v%%", arg))
92
+
}
93
+
94
+
func (f Filter) Condition() string {
95
+
rv := reflect.ValueOf(f.arg)
96
+
kind := rv.Kind()
97
+
98
+
// if we have `FilterIn(k, [1, 2, 3])`, compile it down to `k in (?, ?, ?)`
99
+
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
100
+
if rv.Len() == 0 {
101
+
// always false
102
+
return "1 = 0"
103
+
}
104
+
105
+
placeholders := make([]string, rv.Len())
106
+
for i := range placeholders {
107
+
placeholders[i] = "?"
108
+
}
109
+
110
+
return fmt.Sprintf("%s %s (%s)", f.Key, f.Cmp, strings.Join(placeholders, ", "))
111
+
}
112
+
113
+
return fmt.Sprintf("%s %s ?", f.Key, f.Cmp)
114
+
}
115
+
116
+
func (f Filter) Arg() []any {
117
+
rv := reflect.ValueOf(f.arg)
118
+
kind := rv.Kind()
119
+
if (kind == reflect.Slice && rv.Type().Elem().Kind() != reflect.Uint8) || kind == reflect.Array {
120
+
if rv.Len() == 0 {
121
+
return nil
122
+
}
123
+
124
+
out := make([]any, rv.Len())
125
+
for i := range rv.Len() {
126
+
out[i] = rv.Index(i).Interface()
127
+
}
128
+
return out
129
+
}
130
+
131
+
return []any{f.arg}
132
+
}
-1
patchutil/patchutil.go
-1
patchutil/patchutil.go
+8
rbac/rbac.go
+8
rbac/rbac.go
···
285
285
return e.E.Enforce(user, domain, repo, "repo:delete")
286
286
}
287
287
288
+
func (e *Enforcer) IsRepoOwner(user, domain, repo string) (bool, error) {
289
+
return e.E.Enforce(user, domain, repo, "repo:owner")
290
+
}
291
+
292
+
func (e *Enforcer) IsRepoCollaborator(user, domain, repo string) (bool, error) {
293
+
return e.E.Enforce(user, domain, repo, "repo:collaborator")
294
+
}
295
+
288
296
func (e *Enforcer) IsPushAllowed(user, domain, repo string) (bool, error) {
289
297
return e.E.Enforce(user, domain, repo, "repo:push")
290
298
}
+52
rbac2/bytesadapter/adapter.go
+52
rbac2/bytesadapter/adapter.go
···
1
+
package bytesadapter
2
+
3
+
import (
4
+
"bufio"
5
+
"bytes"
6
+
"errors"
7
+
"strings"
8
+
9
+
"github.com/casbin/casbin/v2/model"
10
+
"github.com/casbin/casbin/v2/persist"
11
+
)
12
+
13
+
var (
14
+
errNotImplemented = errors.New("not implemented")
15
+
)
16
+
17
+
type Adapter struct {
18
+
b []byte
19
+
}
20
+
21
+
var _ persist.Adapter = &Adapter{}
22
+
23
+
func NewAdapter(b []byte) *Adapter {
24
+
return &Adapter{b}
25
+
}
26
+
27
+
func (a *Adapter) LoadPolicy(model model.Model) error {
28
+
scanner := bufio.NewScanner(bytes.NewReader(a.b))
29
+
for scanner.Scan() {
30
+
line := strings.TrimSpace(scanner.Text())
31
+
if err := persist.LoadPolicyLine(line, model); err != nil {
32
+
return err
33
+
}
34
+
}
35
+
return scanner.Err()
36
+
}
37
+
38
+
func (a *Adapter) AddPolicy(sec string, ptype string, rule []string) error {
39
+
return errNotImplemented
40
+
}
41
+
42
+
func (a *Adapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error {
43
+
return errNotImplemented
44
+
}
45
+
46
+
func (a *Adapter) RemovePolicy(sec string, ptype string, rule []string) error {
47
+
return errNotImplemented
48
+
}
49
+
50
+
func (a *Adapter) SavePolicy(model model.Model) error {
51
+
return errNotImplemented
52
+
}
+139
rbac2/rbac2.go
+139
rbac2/rbac2.go
···
1
+
package rbac2
2
+
3
+
import (
4
+
"database/sql"
5
+
_ "embed"
6
+
"fmt"
7
+
8
+
adapter "github.com/Blank-Xu/sql-adapter"
9
+
"github.com/bluesky-social/indigo/atproto/syntax"
10
+
"github.com/casbin/casbin/v2"
11
+
"github.com/casbin/casbin/v2/model"
12
+
"github.com/casbin/casbin/v2/util"
13
+
"tangled.org/core/rbac2/bytesadapter"
14
+
)
15
+
16
+
const (
17
+
Model = `
18
+
[request_definition]
19
+
r = sub, dom, obj, act
20
+
21
+
[policy_definition]
22
+
p = sub, dom, obj, act
23
+
24
+
[role_definition]
25
+
g = _, _, _
26
+
27
+
[policy_effect]
28
+
e = some(where (p.eft == allow))
29
+
30
+
[matchers]
31
+
m = g(r.sub, p.sub, r.dom) && keyMatch4(r.dom, p.dom) && r.obj == p.obj && r.act == p.act
32
+
`
33
+
)
34
+
35
+
type Enforcer struct {
36
+
e *casbin.Enforcer
37
+
}
38
+
39
+
//go:embed tangled_policy.csv
40
+
var tangledPolicy []byte
41
+
42
+
func NewEnforcer(path string) (*Enforcer, error) {
43
+
db, err := sql.Open("sqlite3", path+"?_foreign_keys=1")
44
+
if err != nil {
45
+
return nil, err
46
+
}
47
+
return NewEnforcerWithDB(db)
48
+
}
49
+
50
+
func NewEnforcerWithDB(db *sql.DB) (*Enforcer, error) {
51
+
m, err := model.NewModelFromString(Model)
52
+
if err != nil {
53
+
return nil, err
54
+
}
55
+
56
+
a, err := adapter.NewAdapter(db, "sqlite3", "acl")
57
+
if err != nil {
58
+
return nil, err
59
+
}
60
+
61
+
// // PATCH: create unique index to make `AddPoliciesEx` work
62
+
// _, err = db.Exec(fmt.Sprintf(
63
+
// `create unique index if not exists uq_%[1]s on %[1]s (p_type,v0,v1,v2,v3,v4,v5);`,
64
+
// tableName,
65
+
// ))
66
+
// if err != nil {
67
+
// return nil, err
68
+
// }
69
+
70
+
e, _ := casbin.NewEnforcer() // NewEnforcer() without param won't return error
71
+
// e.EnableLog(true)
72
+
73
+
// NOTE: casbin clears the model on init, so we should intialize with temporary adapter first
74
+
// and then override the adapter to sql-adapter.
75
+
// `e.SetModel(m)` after init doesn't work for some reason
76
+
if err := e.InitWithModelAndAdapter(m, bytesadapter.NewAdapter(tangledPolicy)); err != nil {
77
+
return nil, err
78
+
}
79
+
80
+
// load dynamic policy from db
81
+
e.EnableAutoSave(false)
82
+
if err := a.LoadPolicy(e.GetModel()); err != nil {
83
+
return nil, err
84
+
}
85
+
e.AddNamedDomainMatchingFunc("g", "keyMatch4", util.KeyMatch4)
86
+
e.BuildRoleLinks()
87
+
e.SetAdapter(a)
88
+
e.EnableAutoSave(true)
89
+
90
+
return &Enforcer{e}, nil
91
+
}
92
+
93
+
// CaptureModel returns copy of current model. Used for testing
94
+
func (e *Enforcer) CaptureModel() model.Model {
95
+
return e.e.GetModel().Copy()
96
+
}
97
+
98
+
func (e *Enforcer) hasImplicitRoleForUser(name string, role string, domain ...string) (bool, error) {
99
+
roles, err := e.e.GetImplicitRolesForUser(name, domain...)
100
+
if err != nil {
101
+
return false, err
102
+
}
103
+
for _, r := range roles {
104
+
if r == role {
105
+
return true, nil
106
+
}
107
+
}
108
+
return false, nil
109
+
}
110
+
111
+
// setRoleForUser sets single user role for specified domain.
112
+
// All existing users with that role will be removed.
113
+
func (e *Enforcer) setRoleForUser(name string, role string, domain ...string) error {
114
+
currentUsers, err := e.e.GetUsersForRole(role, domain...)
115
+
if err != nil {
116
+
return err
117
+
}
118
+
119
+
for _, oldUser := range currentUsers {
120
+
_, err = e.e.DeleteRoleForUser(oldUser, role, domain...)
121
+
if err != nil {
122
+
return err
123
+
}
124
+
}
125
+
126
+
_, err = e.e.AddRoleForUser(name, role, domain...)
127
+
return err
128
+
}
129
+
130
+
// validateAtUri enforeces AT-URI to have valid did as authority and match collection NSID.
131
+
func validateAtUri(uri syntax.ATURI, expected string) error {
132
+
if !uri.Authority().IsDID() {
133
+
return fmt.Errorf("expected at-uri with did")
134
+
}
135
+
if expected != "" && uri.Collection().String() != expected {
136
+
return fmt.Errorf("incorrect repo at-uri collection nsid '%s' (expected '%s')", uri.Collection(), expected)
137
+
}
138
+
return nil
139
+
}
+150
rbac2/rbac2_test.go
+150
rbac2/rbac2_test.go
···
1
+
package rbac2_test
2
+
3
+
import (
4
+
"database/sql"
5
+
"testing"
6
+
7
+
"github.com/bluesky-social/indigo/atproto/syntax"
8
+
_ "github.com/mattn/go-sqlite3"
9
+
"github.com/stretchr/testify/assert"
10
+
"tangled.org/core/rbac2"
11
+
)
12
+
13
+
func setup(t *testing.T) *rbac2.Enforcer {
14
+
enforcer, err := rbac2.NewEnforcer(":memory:")
15
+
assert.NoError(t, err)
16
+
17
+
return enforcer
18
+
}
19
+
20
+
func TestNewEnforcer(t *testing.T) {
21
+
db, err := sql.Open("sqlite3", "/tmp/test/test.db?_foreign_keys=1")
22
+
assert.NoError(t, err)
23
+
24
+
enforcer1, err := rbac2.NewEnforcerWithDB(db)
25
+
assert.NoError(t, err)
26
+
enforcer1.AddRepo(syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey"))
27
+
model1 := enforcer1.CaptureModel()
28
+
29
+
enforcer2, err := rbac2.NewEnforcerWithDB(db)
30
+
assert.NoError(t, err)
31
+
model2 := enforcer2.CaptureModel()
32
+
33
+
// model1.GetLogger().EnableLog(true)
34
+
// model1.PrintModel()
35
+
// model1.PrintPolicy()
36
+
// model1.GetLogger().EnableLog(false)
37
+
38
+
model2.GetLogger().EnableLog(true)
39
+
model2.PrintModel()
40
+
model2.PrintPolicy()
41
+
model2.GetLogger().EnableLog(false)
42
+
43
+
assert.Equal(t, model1, model2)
44
+
}
45
+
46
+
func TestRepoOwnerPermissions(t *testing.T) {
47
+
var (
48
+
e = setup(t)
49
+
ok bool
50
+
err error
51
+
fooRepo = syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey")
52
+
fooUser = syntax.DID("did:plc:foo")
53
+
)
54
+
55
+
assert.NoError(t, e.AddRepo(fooRepo))
56
+
57
+
ok, err = e.IsRepoOwner(fooUser, fooRepo)
58
+
assert.NoError(t, err)
59
+
assert.True(t, ok, "repo author should be repo owner")
60
+
61
+
ok, err = e.IsRepoWriteAllowed(fooUser, fooRepo)
62
+
assert.NoError(t, err)
63
+
assert.True(t, ok, "repo owner should be able to modify the repo itself")
64
+
65
+
ok, err = e.IsRepoCollaborator(fooUser, fooRepo)
66
+
assert.NoError(t, err)
67
+
assert.True(t, ok, "repo owner should inherit role role:collaborator")
68
+
69
+
ok, err = e.IsRepoSettingsWriteAllowed(fooUser, fooRepo)
70
+
assert.NoError(t, err)
71
+
assert.True(t, ok, "repo owner should inherit collaborator permissions")
72
+
}
73
+
74
+
func TestRepoCollaboratorPermissions(t *testing.T) {
75
+
var (
76
+
e = setup(t)
77
+
ok bool
78
+
err error
79
+
fooRepo = syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey")
80
+
barUser = syntax.DID("did:plc:bar")
81
+
)
82
+
83
+
assert.NoError(t, e.AddRepo(fooRepo))
84
+
assert.NoError(t, e.AddRepoCollaborator(barUser, fooRepo))
85
+
86
+
ok, err = e.IsRepoCollaborator(barUser, fooRepo)
87
+
assert.NoError(t, err)
88
+
assert.True(t, ok, "should set repo collaborator")
89
+
90
+
ok, err = e.IsRepoSettingsWriteAllowed(barUser, fooRepo)
91
+
assert.NoError(t, err)
92
+
assert.True(t, ok, "repo collaborator should be able to edit repo settings")
93
+
94
+
ok, err = e.IsRepoWriteAllowed(barUser, fooRepo)
95
+
assert.NoError(t, err)
96
+
assert.False(t, ok, "repo collaborator shouldn't be able to modify the repo itself")
97
+
}
98
+
99
+
func TestGetByRole(t *testing.T) {
100
+
var (
101
+
e = setup(t)
102
+
err error
103
+
fooRepo = syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey")
104
+
owner = syntax.DID("did:plc:foo")
105
+
collaborator1 = syntax.DID("did:plc:bar")
106
+
collaborator2 = syntax.DID("did:plc:baz")
107
+
)
108
+
109
+
assert.NoError(t, e.AddRepo(fooRepo))
110
+
assert.NoError(t, e.AddRepoCollaborator(collaborator1, fooRepo))
111
+
assert.NoError(t, e.AddRepoCollaborator(collaborator2, fooRepo))
112
+
113
+
collaborators, err := e.GetRepoCollaborators(fooRepo)
114
+
assert.NoError(t, err)
115
+
assert.ElementsMatch(t, []syntax.DID{
116
+
owner,
117
+
collaborator1,
118
+
collaborator2,
119
+
}, collaborators)
120
+
}
121
+
122
+
func TestSpindleOwnerPermissions(t *testing.T) {
123
+
var (
124
+
e = setup(t)
125
+
ok bool
126
+
err error
127
+
spindle = syntax.DID("did:web:spindle.example.com")
128
+
owner = syntax.DID("did:plc:foo")
129
+
member = syntax.DID("did:plc:bar")
130
+
)
131
+
132
+
assert.NoError(t, e.SetSpindleOwner(owner, spindle))
133
+
assert.NoError(t, e.AddSpindleMember(member, spindle))
134
+
135
+
ok, err = e.IsSpindleMember(owner, spindle)
136
+
assert.NoError(t, err)
137
+
assert.True(t, ok, "spindle owner is spindle member")
138
+
139
+
ok, err = e.IsSpindleMember(member, spindle)
140
+
assert.NoError(t, err)
141
+
assert.True(t, ok, "spindle member is spindle member")
142
+
143
+
ok, err = e.IsSpindleMemberInviteAllowed(owner, spindle)
144
+
assert.NoError(t, err)
145
+
assert.True(t, ok, "spindle owner can invite members")
146
+
147
+
ok, err = e.IsSpindleMemberInviteAllowed(member, spindle)
148
+
assert.NoError(t, err)
149
+
assert.False(t, ok, "spindle member cannot invite members")
150
+
}
+91
rbac2/repo.go
+91
rbac2/repo.go
···
1
+
package rbac2
2
+
3
+
import (
4
+
"slices"
5
+
"strings"
6
+
7
+
"github.com/bluesky-social/indigo/atproto/syntax"
8
+
"tangled.org/core/api/tangled"
9
+
)
10
+
11
+
// AddRepo adds new repo with its owner to rbac enforcer
12
+
func (e *Enforcer) AddRepo(repo syntax.ATURI) error {
13
+
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
14
+
return err
15
+
}
16
+
user := repo.Authority()
17
+
18
+
return e.setRoleForUser(user.String(), "repo:owner", repo.String())
19
+
}
20
+
21
+
// DeleteRepo deletes all policies related to the repo
22
+
func (e *Enforcer) DeleteRepo(repo syntax.ATURI) error {
23
+
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
24
+
return err
25
+
}
26
+
27
+
_, err := e.e.DeleteDomains(repo.String())
28
+
return err
29
+
}
30
+
31
+
// AddRepoCollaborator adds new collaborator to the repo
32
+
func (e *Enforcer) AddRepoCollaborator(user syntax.DID, repo syntax.ATURI) error {
33
+
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
34
+
return err
35
+
}
36
+
37
+
_, err := e.e.AddRoleForUser(user.String(), "repo:collaborator", repo.String())
38
+
return err
39
+
}
40
+
41
+
// RemoveRepoCollaborator removes the collaborator from the repo.
42
+
// This won't remove inherited roles like repository owner.
43
+
func (e *Enforcer) RemoveRepoCollaborator(user syntax.DID, repo syntax.ATURI) error {
44
+
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
45
+
return err
46
+
}
47
+
48
+
_, err := e.e.DeleteRoleForUser(user.String(), "repo:collaborator", repo.String())
49
+
return err
50
+
}
51
+
52
+
func (e *Enforcer) GetRepoCollaborators(repo syntax.ATURI) ([]syntax.DID, error) {
53
+
var collaborators []syntax.DID
54
+
members, err := e.e.GetImplicitUsersForRole("repo:collaborator", repo.String())
55
+
if err != nil {
56
+
return nil, err
57
+
}
58
+
for _, m := range members {
59
+
if !strings.HasPrefix(m, "did:") { // skip non-user subjects like 'repo:owner'
60
+
continue
61
+
}
62
+
collaborators = append(collaborators, syntax.DID(m))
63
+
}
64
+
65
+
slices.Sort(collaborators)
66
+
return slices.Compact(collaborators), nil
67
+
}
68
+
69
+
func (e *Enforcer) IsRepoOwner(user syntax.DID, repo syntax.ATURI) (bool, error) {
70
+
return e.e.HasRoleForUser(user.String(), "repo:owner", repo.String())
71
+
}
72
+
73
+
func (e *Enforcer) IsRepoCollaborator(user syntax.DID, repo syntax.ATURI) (bool, error) {
74
+
return e.hasImplicitRoleForUser(user.String(), "repo:collaborator", repo.String())
75
+
}
76
+
77
+
func (e *Enforcer) IsRepoWriteAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
78
+
return e.e.Enforce(user.String(), repo.String(), "/", "write")
79
+
}
80
+
81
+
func (e *Enforcer) IsRepoSettingsWriteAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
82
+
return e.e.Enforce(user.String(), repo.String(), "/settings", "write")
83
+
}
84
+
85
+
func (e *Enforcer) IsRepoCollaboratorInviteAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
86
+
return e.e.Enforce(user.String(), repo.String(), "/collaborator", "write")
87
+
}
88
+
89
+
func (e *Enforcer) IsRepoGitPushAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
90
+
return e.e.Enforce(user.String(), repo.String(), "/git", "write")
91
+
}
+29
rbac2/spindle.go
+29
rbac2/spindle.go
···
1
+
package rbac2
2
+
3
+
import "github.com/bluesky-social/indigo/atproto/syntax"
4
+
5
+
func (e *Enforcer) SetSpindleOwner(user syntax.DID, spindle syntax.DID) error {
6
+
return e.setRoleForUser(user.String(), "server:owner", intoSpindle(spindle))
7
+
}
8
+
9
+
func (e *Enforcer) IsSpindleMember(user syntax.DID, spindle syntax.DID) (bool, error) {
10
+
return e.hasImplicitRoleForUser(user.String(), "server:member", intoSpindle(spindle))
11
+
}
12
+
13
+
func (e *Enforcer) AddSpindleMember(user syntax.DID, spindle syntax.DID) error {
14
+
_, err := e.e.AddRoleForUser(user.String(), "server:member", intoSpindle(spindle))
15
+
return err
16
+
}
17
+
18
+
func (e *Enforcer) RemoveSpindleMember(user syntax.DID, spindle syntax.DID) error {
19
+
_, err := e.e.DeleteRoleForUser(user.String(), "server:member", intoSpindle(spindle))
20
+
return err
21
+
}
22
+
23
+
func (e *Enforcer) IsSpindleMemberInviteAllowed(user syntax.DID, spindle syntax.DID) (bool, error) {
24
+
return e.e.Enforce(user.String(), intoSpindle(spindle), "/member", "write")
25
+
}
26
+
27
+
func intoSpindle(did syntax.DID) string {
28
+
return "/spindle/" + did.String()
29
+
}
+19
rbac2/tangled_policy.csv
+19
rbac2/tangled_policy.csv
···
1
+
#, policies
2
+
#, sub, dom, obj, act
3
+
p, repo:owner, at://{did}/sh.tangled.repo/{rkey}, /, write
4
+
p, repo:owner, at://{did}/sh.tangled.repo/{rkey}, /collaborator, write
5
+
p, repo:collaborator, at://{did}/sh.tangled.repo/{rkey}, /settings, write
6
+
p, repo:collaborator, at://{did}/sh.tangled.repo/{rkey}, /git, write
7
+
8
+
p, server:owner, /knot/{did}, /member, write
9
+
p, server:member, /knot/{did}, /git, write
10
+
11
+
p, server:owner, /spindle/{did}, /member, write
12
+
13
+
14
+
#, group policies
15
+
#, sub, role, dom
16
+
g, repo:owner, repo:collaborator, at://{did}/sh.tangled.repo/{rkey}
17
+
18
+
g, server:owner, server:member, /knot/{did}
19
+
g, server:owner, server:member, /spindle/{did}
+3
-3
readme.md
+3
-3
readme.md
···
10
10
11
11
## docs
12
12
13
-
* [knot hosting guide](/docs/knot-hosting.md)
14
-
* [contributing guide](/docs/contributing.md) **please read before opening a PR!**
15
-
* [hacking on tangled](/docs/hacking.md)
13
+
- [knot hosting guide](https://docs.tangled.org/knot-self-hosting-guide.html#knot-self-hosting-guide)
14
+
- [contributing guide](https://docs.tangled.org/contribution-guide.html#contribution-guide) **please read before opening a PR!**
15
+
- [hacking on tangled](https://docs.tangled.org/hacking-on-tangled.html#hacking-on-tangled)
16
16
17
17
## security
18
18
+31
sets/gen.go
+31
sets/gen.go
···
1
+
package sets
2
+
3
+
import (
4
+
"math/rand"
5
+
"reflect"
6
+
"testing/quick"
7
+
)
8
+
9
+
func (_ Set[T]) Generate(rand *rand.Rand, size int) reflect.Value {
10
+
s := New[T]()
11
+
12
+
var zero T
13
+
itemType := reflect.TypeOf(zero)
14
+
15
+
for {
16
+
if s.Len() >= size {
17
+
break
18
+
}
19
+
20
+
item, ok := quick.Value(itemType, rand)
21
+
if !ok {
22
+
continue
23
+
}
24
+
25
+
if val, ok := item.Interface().(T); ok {
26
+
s.Insert(val)
27
+
}
28
+
}
29
+
30
+
return reflect.ValueOf(s)
31
+
}
+35
sets/readme.txt
+35
sets/readme.txt
···
1
+
sets
2
+
----
3
+
set datastructure for go with generics and iterators. the
4
+
api is supposed to mimic rust's std::collections::HashSet api.
5
+
6
+
s1 := sets.Collect(slices.Values([]int{1, 2, 3, 4}))
7
+
s2 := sets.Collect(slices.Values([]int{1, 2, 3, 4, 5, 6}))
8
+
9
+
union := sets.Collect(s1.Union(s2))
10
+
intersect := sets.Collect(s1.Intersection(s2))
11
+
diff := sets.Collect(s1.Difference(s2))
12
+
symdiff := sets.Collect(s1.SymmetricDifference(s2))
13
+
14
+
s1.Len() // 4
15
+
s1.Contains(1) // true
16
+
s1.IsEmpty() // false
17
+
s1.IsSubset(s2) // true
18
+
s1.IsSuperset(s2) // false
19
+
s1.IsDisjoint(s2) // false
20
+
21
+
if exists := s1.Insert(1); exists {
22
+
// already existed in set
23
+
}
24
+
25
+
if existed := s1.Remove(1); existed {
26
+
// existed in set, now removed
27
+
}
28
+
29
+
30
+
testing
31
+
-------
32
+
includes property-based tests using the wonderful
33
+
testing/quick module!
34
+
35
+
go test -v
+174
sets/set.go
+174
sets/set.go
···
1
+
package sets
2
+
3
+
import (
4
+
"iter"
5
+
"maps"
6
+
)
7
+
8
+
type Set[T comparable] struct {
9
+
data map[T]struct{}
10
+
}
11
+
12
+
func New[T comparable]() Set[T] {
13
+
return Set[T]{
14
+
data: make(map[T]struct{}),
15
+
}
16
+
}
17
+
18
+
func (s *Set[T]) Insert(item T) bool {
19
+
_, exists := s.data[item]
20
+
s.data[item] = struct{}{}
21
+
return !exists
22
+
}
23
+
24
+
func Singleton[T comparable](item T) Set[T] {
25
+
n := New[T]()
26
+
_ = n.Insert(item)
27
+
return n
28
+
}
29
+
30
+
func (s *Set[T]) Remove(item T) bool {
31
+
_, exists := s.data[item]
32
+
if exists {
33
+
delete(s.data, item)
34
+
}
35
+
return exists
36
+
}
37
+
38
+
func (s Set[T]) Contains(item T) bool {
39
+
_, exists := s.data[item]
40
+
return exists
41
+
}
42
+
43
+
func (s Set[T]) Len() int {
44
+
return len(s.data)
45
+
}
46
+
47
+
func (s Set[T]) IsEmpty() bool {
48
+
return len(s.data) == 0
49
+
}
50
+
51
+
func (s *Set[T]) Clear() {
52
+
s.data = make(map[T]struct{})
53
+
}
54
+
55
+
func (s Set[T]) All() iter.Seq[T] {
56
+
return func(yield func(T) bool) {
57
+
for item := range s.data {
58
+
if !yield(item) {
59
+
return
60
+
}
61
+
}
62
+
}
63
+
}
64
+
65
+
func (s Set[T]) Clone() Set[T] {
66
+
return Set[T]{
67
+
data: maps.Clone(s.data),
68
+
}
69
+
}
70
+
71
+
func (s Set[T]) Union(other Set[T]) iter.Seq[T] {
72
+
if s.Len() >= other.Len() {
73
+
return chain(s.All(), other.Difference(s))
74
+
} else {
75
+
return chain(other.All(), s.Difference(other))
76
+
}
77
+
}
78
+
79
+
func chain[T any](seqs ...iter.Seq[T]) iter.Seq[T] {
80
+
return func(yield func(T) bool) {
81
+
for _, seq := range seqs {
82
+
for item := range seq {
83
+
if !yield(item) {
84
+
return
85
+
}
86
+
}
87
+
}
88
+
}
89
+
}
90
+
91
+
func (s Set[T]) Intersection(other Set[T]) iter.Seq[T] {
92
+
return func(yield func(T) bool) {
93
+
for item := range s.data {
94
+
if other.Contains(item) {
95
+
if !yield(item) {
96
+
return
97
+
}
98
+
}
99
+
}
100
+
}
101
+
}
102
+
103
+
func (s Set[T]) Difference(other Set[T]) iter.Seq[T] {
104
+
return func(yield func(T) bool) {
105
+
for item := range s.data {
106
+
if !other.Contains(item) {
107
+
if !yield(item) {
108
+
return
109
+
}
110
+
}
111
+
}
112
+
}
113
+
}
114
+
115
+
func (s Set[T]) SymmetricDifference(other Set[T]) iter.Seq[T] {
116
+
return func(yield func(T) bool) {
117
+
for item := range s.data {
118
+
if !other.Contains(item) {
119
+
if !yield(item) {
120
+
return
121
+
}
122
+
}
123
+
}
124
+
for item := range other.data {
125
+
if !s.Contains(item) {
126
+
if !yield(item) {
127
+
return
128
+
}
129
+
}
130
+
}
131
+
}
132
+
}
133
+
134
+
func (s Set[T]) IsSubset(other Set[T]) bool {
135
+
for item := range s.data {
136
+
if !other.Contains(item) {
137
+
return false
138
+
}
139
+
}
140
+
return true
141
+
}
142
+
143
+
func (s Set[T]) IsSuperset(other Set[T]) bool {
144
+
return other.IsSubset(s)
145
+
}
146
+
147
+
func (s Set[T]) IsDisjoint(other Set[T]) bool {
148
+
for item := range s.data {
149
+
if other.Contains(item) {
150
+
return false
151
+
}
152
+
}
153
+
return true
154
+
}
155
+
156
+
func (s Set[T]) Equal(other Set[T]) bool {
157
+
if s.Len() != other.Len() {
158
+
return false
159
+
}
160
+
for item := range s.data {
161
+
if !other.Contains(item) {
162
+
return false
163
+
}
164
+
}
165
+
return true
166
+
}
167
+
168
+
func Collect[T comparable](seq iter.Seq[T]) Set[T] {
169
+
result := New[T]()
170
+
for item := range seq {
171
+
result.Insert(item)
172
+
}
173
+
return result
174
+
}
+411
sets/set_test.go
+411
sets/set_test.go
···
1
+
package sets
2
+
3
+
import (
4
+
"slices"
5
+
"testing"
6
+
"testing/quick"
7
+
)
8
+
9
+
func TestNew(t *testing.T) {
10
+
s := New[int]()
11
+
if s.Len() != 0 {
12
+
t.Errorf("New set should be empty, got length %d", s.Len())
13
+
}
14
+
if !s.IsEmpty() {
15
+
t.Error("New set should be empty")
16
+
}
17
+
}
18
+
19
+
func TestFromSlice(t *testing.T) {
20
+
s := Collect(slices.Values([]int{1, 2, 3, 2, 1}))
21
+
if s.Len() != 3 {
22
+
t.Errorf("Expected length 3, got %d", s.Len())
23
+
}
24
+
if !s.Contains(1) || !s.Contains(2) || !s.Contains(3) {
25
+
t.Error("Set should contain all unique elements from slice")
26
+
}
27
+
}
28
+
29
+
func TestInsert(t *testing.T) {
30
+
s := New[string]()
31
+
32
+
if !s.Insert("hello") {
33
+
t.Error("First insert should return true")
34
+
}
35
+
if s.Insert("hello") {
36
+
t.Error("Duplicate insert should return false")
37
+
}
38
+
if s.Len() != 1 {
39
+
t.Errorf("Expected length 1, got %d", s.Len())
40
+
}
41
+
}
42
+
43
+
func TestRemove(t *testing.T) {
44
+
s := Collect(slices.Values([]int{1, 2, 3}))
45
+
46
+
if !s.Remove(2) {
47
+
t.Error("Remove existing element should return true")
48
+
}
49
+
if s.Remove(2) {
50
+
t.Error("Remove non-existing element should return false")
51
+
}
52
+
if s.Contains(2) {
53
+
t.Error("Element should be removed")
54
+
}
55
+
if s.Len() != 2 {
56
+
t.Errorf("Expected length 2, got %d", s.Len())
57
+
}
58
+
}
59
+
60
+
func TestContains(t *testing.T) {
61
+
s := Collect(slices.Values([]int{1, 2, 3}))
62
+
63
+
if !s.Contains(1) {
64
+
t.Error("Should contain 1")
65
+
}
66
+
if s.Contains(4) {
67
+
t.Error("Should not contain 4")
68
+
}
69
+
}
70
+
71
+
func TestClear(t *testing.T) {
72
+
s := Collect(slices.Values([]int{1, 2, 3}))
73
+
s.Clear()
74
+
75
+
if !s.IsEmpty() {
76
+
t.Error("Set should be empty after clear")
77
+
}
78
+
if s.Len() != 0 {
79
+
t.Errorf("Expected length 0, got %d", s.Len())
80
+
}
81
+
}
82
+
83
+
func TestIterator(t *testing.T) {
84
+
s := Collect(slices.Values([]int{1, 2, 3}))
85
+
var items []int
86
+
87
+
for item := range s.All() {
88
+
items = append(items, item)
89
+
}
90
+
91
+
slices.Sort(items)
92
+
expected := []int{1, 2, 3}
93
+
if !slices.Equal(items, expected) {
94
+
t.Errorf("Expected %v, got %v", expected, items)
95
+
}
96
+
}
97
+
98
+
func TestClone(t *testing.T) {
99
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
100
+
s2 := s1.Clone()
101
+
102
+
if !s1.Equal(s2) {
103
+
t.Error("Cloned set should be equal to original")
104
+
}
105
+
106
+
s2.Insert(4)
107
+
if s1.Contains(4) {
108
+
t.Error("Modifying clone should not affect original")
109
+
}
110
+
}
111
+
112
+
func TestUnion(t *testing.T) {
113
+
s1 := Collect(slices.Values([]int{1, 2}))
114
+
s2 := Collect(slices.Values([]int{2, 3}))
115
+
116
+
result := Collect(s1.Union(s2))
117
+
expected := Collect(slices.Values([]int{1, 2, 3}))
118
+
119
+
if !result.Equal(expected) {
120
+
t.Errorf("Expected %v, got %v", expected, result)
121
+
}
122
+
}
123
+
124
+
func TestIntersection(t *testing.T) {
125
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
126
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
127
+
128
+
expected := Collect(slices.Values([]int{2, 3}))
129
+
result := Collect(s1.Intersection(s2))
130
+
131
+
if !result.Equal(expected) {
132
+
t.Errorf("Expected %v, got %v", expected, result)
133
+
}
134
+
}
135
+
136
+
func TestDifference(t *testing.T) {
137
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
138
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
139
+
140
+
expected := Collect(slices.Values([]int{1}))
141
+
result := Collect(s1.Difference(s2))
142
+
143
+
if !result.Equal(expected) {
144
+
t.Errorf("Expected %v, got %v", expected, result)
145
+
}
146
+
}
147
+
148
+
func TestSymmetricDifference(t *testing.T) {
149
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
150
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
151
+
152
+
expected := Collect(slices.Values([]int{1, 4}))
153
+
result := Collect(s1.SymmetricDifference(s2))
154
+
155
+
if !result.Equal(expected) {
156
+
t.Errorf("Expected %v, got %v", expected, result)
157
+
}
158
+
}
159
+
160
+
func TestSymmetricDifferenceCommutativeProperty(t *testing.T) {
161
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
162
+
s2 := Collect(slices.Values([]int{2, 3, 4}))
163
+
164
+
result1 := Collect(s1.SymmetricDifference(s2))
165
+
result2 := Collect(s2.SymmetricDifference(s1))
166
+
167
+
if !result1.Equal(result2) {
168
+
t.Errorf("Expected %v, got %v", result1, result2)
169
+
}
170
+
}
171
+
172
+
func TestIsSubset(t *testing.T) {
173
+
s1 := Collect(slices.Values([]int{1, 2}))
174
+
s2 := Collect(slices.Values([]int{1, 2, 3}))
175
+
176
+
if !s1.IsSubset(s2) {
177
+
t.Error("s1 should be subset of s2")
178
+
}
179
+
if s2.IsSubset(s1) {
180
+
t.Error("s2 should not be subset of s1")
181
+
}
182
+
}
183
+
184
+
func TestIsSuperset(t *testing.T) {
185
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
186
+
s2 := Collect(slices.Values([]int{1, 2}))
187
+
188
+
if !s1.IsSuperset(s2) {
189
+
t.Error("s1 should be superset of s2")
190
+
}
191
+
if s2.IsSuperset(s1) {
192
+
t.Error("s2 should not be superset of s1")
193
+
}
194
+
}
195
+
196
+
func TestIsDisjoint(t *testing.T) {
197
+
s1 := Collect(slices.Values([]int{1, 2}))
198
+
s2 := Collect(slices.Values([]int{3, 4}))
199
+
s3 := Collect(slices.Values([]int{2, 3}))
200
+
201
+
if !s1.IsDisjoint(s2) {
202
+
t.Error("s1 and s2 should be disjoint")
203
+
}
204
+
if s1.IsDisjoint(s3) {
205
+
t.Error("s1 and s3 should not be disjoint")
206
+
}
207
+
}
208
+
209
+
func TestEqual(t *testing.T) {
210
+
s1 := Collect(slices.Values([]int{1, 2, 3}))
211
+
s2 := Collect(slices.Values([]int{3, 2, 1}))
212
+
s3 := Collect(slices.Values([]int{1, 2}))
213
+
214
+
if !s1.Equal(s2) {
215
+
t.Error("s1 and s2 should be equal")
216
+
}
217
+
if s1.Equal(s3) {
218
+
t.Error("s1 and s3 should not be equal")
219
+
}
220
+
}
221
+
222
+
func TestCollect(t *testing.T) {
223
+
s1 := Collect(slices.Values([]int{1, 2}))
224
+
s2 := Collect(slices.Values([]int{2, 3}))
225
+
226
+
unionSet := Collect(s1.Union(s2))
227
+
if unionSet.Len() != 3 {
228
+
t.Errorf("Expected union set length 3, got %d", unionSet.Len())
229
+
}
230
+
if !unionSet.Contains(1) || !unionSet.Contains(2) || !unionSet.Contains(3) {
231
+
t.Error("Union set should contain 1, 2, and 3")
232
+
}
233
+
234
+
diffSet := Collect(s1.Difference(s2))
235
+
if diffSet.Len() != 1 {
236
+
t.Errorf("Expected difference set length 1, got %d", diffSet.Len())
237
+
}
238
+
if !diffSet.Contains(1) {
239
+
t.Error("Difference set should contain 1")
240
+
}
241
+
}
242
+
243
+
func TestPropertySingleonLen(t *testing.T) {
244
+
f := func(item int) bool {
245
+
single := Singleton(item)
246
+
return single.Len() == 1
247
+
}
248
+
249
+
if err := quick.Check(f, nil); err != nil {
250
+
t.Error(err)
251
+
}
252
+
}
253
+
254
+
func TestPropertyInsertIdempotent(t *testing.T) {
255
+
f := func(s Set[int], item int) bool {
256
+
clone := s.Clone()
257
+
258
+
clone.Insert(item)
259
+
firstLen := clone.Len()
260
+
261
+
clone.Insert(item)
262
+
secondLen := clone.Len()
263
+
264
+
return firstLen == secondLen
265
+
}
266
+
267
+
if err := quick.Check(f, nil); err != nil {
268
+
t.Error(err)
269
+
}
270
+
}
271
+
272
+
func TestPropertyUnionCommutative(t *testing.T) {
273
+
f := func(s1 Set[int], s2 Set[int]) bool {
274
+
union1 := Collect(s1.Union(s2))
275
+
union2 := Collect(s2.Union(s1))
276
+
return union1.Equal(union2)
277
+
}
278
+
279
+
if err := quick.Check(f, nil); err != nil {
280
+
t.Error(err)
281
+
}
282
+
}
283
+
284
+
func TestPropertyIntersectionCommutative(t *testing.T) {
285
+
f := func(s1 Set[int], s2 Set[int]) bool {
286
+
inter1 := Collect(s1.Intersection(s2))
287
+
inter2 := Collect(s2.Intersection(s1))
288
+
return inter1.Equal(inter2)
289
+
}
290
+
291
+
if err := quick.Check(f, nil); err != nil {
292
+
t.Error(err)
293
+
}
294
+
}
295
+
296
+
func TestPropertyCloneEquals(t *testing.T) {
297
+
f := func(s Set[int]) bool {
298
+
clone := s.Clone()
299
+
return s.Equal(clone)
300
+
}
301
+
302
+
if err := quick.Check(f, nil); err != nil {
303
+
t.Error(err)
304
+
}
305
+
}
306
+
307
+
func TestPropertyIntersectionIsSubset(t *testing.T) {
308
+
f := func(s1 Set[int], s2 Set[int]) bool {
309
+
inter := Collect(s1.Intersection(s2))
310
+
return inter.IsSubset(s1) && inter.IsSubset(s2)
311
+
}
312
+
313
+
if err := quick.Check(f, nil); err != nil {
314
+
t.Error(err)
315
+
}
316
+
}
317
+
318
+
func TestPropertyUnionIsSuperset(t *testing.T) {
319
+
f := func(s1 Set[int], s2 Set[int]) bool {
320
+
union := Collect(s1.Union(s2))
321
+
return union.IsSuperset(s1) && union.IsSuperset(s2)
322
+
}
323
+
324
+
if err := quick.Check(f, nil); err != nil {
325
+
t.Error(err)
326
+
}
327
+
}
328
+
329
+
func TestPropertyDifferenceDisjoint(t *testing.T) {
330
+
f := func(s1 Set[int], s2 Set[int]) bool {
331
+
diff := Collect(s1.Difference(s2))
332
+
return diff.IsDisjoint(s2)
333
+
}
334
+
335
+
if err := quick.Check(f, nil); err != nil {
336
+
t.Error(err)
337
+
}
338
+
}
339
+
340
+
func TestPropertySymmetricDifferenceCommutative(t *testing.T) {
341
+
f := func(s1 Set[int], s2 Set[int]) bool {
342
+
symDiff1 := Collect(s1.SymmetricDifference(s2))
343
+
symDiff2 := Collect(s2.SymmetricDifference(s1))
344
+
return symDiff1.Equal(symDiff2)
345
+
}
346
+
347
+
if err := quick.Check(f, nil); err != nil {
348
+
t.Error(err)
349
+
}
350
+
}
351
+
352
+
func TestPropertyRemoveWorks(t *testing.T) {
353
+
f := func(s Set[int], item int) bool {
354
+
clone := s.Clone()
355
+
clone.Insert(item)
356
+
clone.Remove(item)
357
+
return !clone.Contains(item)
358
+
}
359
+
360
+
if err := quick.Check(f, nil); err != nil {
361
+
t.Error(err)
362
+
}
363
+
}
364
+
365
+
func TestPropertyClearEmpty(t *testing.T) {
366
+
f := func(s Set[int]) bool {
367
+
s.Clear()
368
+
return s.IsEmpty() && s.Len() == 0
369
+
}
370
+
371
+
if err := quick.Check(f, nil); err != nil {
372
+
t.Error(err)
373
+
}
374
+
}
375
+
376
+
func TestPropertyIsSubsetReflexive(t *testing.T) {
377
+
f := func(s Set[int]) bool {
378
+
return s.IsSubset(s)
379
+
}
380
+
381
+
if err := quick.Check(f, nil); err != nil {
382
+
t.Error(err)
383
+
}
384
+
}
385
+
386
+
func TestPropertyDeMorganUnion(t *testing.T) {
387
+
f := func(s1 Set[int], s2 Set[int], universe Set[int]) bool {
388
+
// create a universe that contains both sets
389
+
u := universe.Clone()
390
+
for item := range s1.All() {
391
+
u.Insert(item)
392
+
}
393
+
for item := range s2.All() {
394
+
u.Insert(item)
395
+
}
396
+
397
+
// (A u B)' = A' n B'
398
+
union := Collect(s1.Union(s2))
399
+
complementUnion := Collect(u.Difference(union))
400
+
401
+
complementS1 := Collect(u.Difference(s1))
402
+
complementS2 := Collect(u.Difference(s2))
403
+
intersectionComplements := Collect(complementS1.Intersection(complementS2))
404
+
405
+
return complementUnion.Equal(intersectionComplements)
406
+
}
407
+
408
+
if err := quick.Check(f, nil); err != nil {
409
+
t.Error(err)
410
+
}
411
+
}
+20
-11
spindle/config/config.go
+20
-11
spindle/config/config.go
···
3
3
import (
4
4
"context"
5
5
"fmt"
6
+
"path/filepath"
6
7
7
8
"github.com/bluesky-social/indigo/atproto/syntax"
8
9
"github.com/sethvargo/go-envconfig"
9
10
)
10
11
11
12
type Server struct {
12
-
ListenAddr string `env:"LISTEN_ADDR, default=0.0.0.0:6555"`
13
-
DBPath string `env:"DB_PATH, default=spindle.db"`
14
-
Hostname string `env:"HOSTNAME, required"`
15
-
JetstreamEndpoint string `env:"JETSTREAM_ENDPOINT, default=wss://jetstream1.us-west.bsky.network/subscribe"`
16
-
PlcUrl string `env:"PLC_URL, default=https://plc.directory"`
17
-
Dev bool `env:"DEV, default=false"`
18
-
Owner string `env:"OWNER, required"`
19
-
Secrets Secrets `env:",prefix=SECRETS_"`
20
-
LogDir string `env:"LOG_DIR, default=/var/log/spindle"`
21
-
QueueSize int `env:"QUEUE_SIZE, default=100"`
22
-
MaxJobCount int `env:"MAX_JOB_COUNT, default=2"` // max number of jobs that run at a time
13
+
ListenAddr string `env:"LISTEN_ADDR, default=0.0.0.0:6555"`
14
+
Hostname string `env:"HOSTNAME, required"`
15
+
TapUrl string `env:"TAP_URL, required"`
16
+
PlcUrl string `env:"PLC_URL, default=https://plc.directory"`
17
+
Dev bool `env:"DEV, default=false"`
18
+
Owner syntax.DID `env:"OWNER, required"`
19
+
Secrets Secrets `env:",prefix=SECRETS_"`
20
+
LogDir string `env:"LOG_DIR, default=/var/log/spindle"`
21
+
DataDir string `env:"DATA_DIR, default=/var/lib/spindle"`
22
+
QueueSize int `env:"QUEUE_SIZE, default=100"`
23
+
MaxJobCount int `env:"MAX_JOB_COUNT, default=2"` // max number of jobs that run at a time
23
24
}
24
25
25
26
func (s Server) Did() syntax.DID {
26
27
return syntax.DID(fmt.Sprintf("did:web:%s", s.Hostname))
28
+
}
29
+
30
+
func (s Server) RepoDir() string {
31
+
return filepath.Join(s.DataDir, "repos")
32
+
}
33
+
34
+
func (s Server) DBPath() string {
35
+
return filepath.Join(s.DataDir, "spindle.db")
27
36
}
28
37
29
38
type Secrets struct {
+73
-18
spindle/db/db.go
+73
-18
spindle/db/db.go
···
1
1
package db
2
2
3
3
import (
4
+
"context"
4
5
"database/sql"
5
6
"strings"
6
7
8
+
"github.com/bluesky-social/indigo/atproto/syntax"
7
9
_ "github.com/mattn/go-sqlite3"
10
+
"tangled.org/core/log"
11
+
"tangled.org/core/orm"
8
12
)
9
13
10
14
type DB struct {
11
15
*sql.DB
12
16
}
13
17
14
-
func Make(dbPath string) (*DB, error) {
18
+
func Make(ctx context.Context, dbPath string) (*DB, error) {
15
19
// https://github.com/mattn/go-sqlite3#connection-string
16
20
opts := []string{
17
21
"_foreign_keys=1",
···
20
24
"_auto_vacuum=incremental",
21
25
}
22
26
27
+
logger := log.FromContext(ctx)
28
+
logger = log.SubLogger(logger, "db")
29
+
23
30
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
24
31
if err != nil {
25
32
return nil, err
26
33
}
27
34
28
-
// NOTE: If any other migration is added here, you MUST
29
-
// copy the pattern in appview: use a single sql.Conn
30
-
// for every migration.
35
+
conn, err := db.Conn(ctx)
36
+
if err != nil {
37
+
return nil, err
38
+
}
39
+
defer conn.Close()
31
40
32
41
_, err = db.Exec(`
33
42
create table if not exists _jetstream (
···
49
58
unique(owner, name)
50
59
);
51
60
61
+
create table if not exists repo_collaborators (
62
+
-- identifiers
63
+
id integer primary key autoincrement,
64
+
did text not null,
65
+
rkey text not null,
66
+
at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo.collaborator' || '/' || rkey) stored,
67
+
68
+
repo text not null,
69
+
subject text not null,
70
+
71
+
addedAt text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
72
+
unique(did, rkey)
73
+
);
74
+
52
75
create table if not exists spindle_members (
53
76
-- identifiers for the record
54
77
id integer primary key autoincrement,
···
76
99
return nil, err
77
100
}
78
101
102
+
// run migrations
103
+
104
+
// NOTE: this won't migrate existing records
105
+
// they will be fetched again with tap instead
106
+
orm.RunMigration(conn, logger, "add-rkey-to-repos", func(tx *sql.Tx) error {
107
+
// archive legacy repos (just in case)
108
+
_, err = tx.Exec(`alter table repos rename to repos_old`)
109
+
if err != nil {
110
+
return err
111
+
}
112
+
113
+
_, err := tx.Exec(`
114
+
create table repos (
115
+
-- identifiers
116
+
id integer primary key autoincrement,
117
+
did text not null,
118
+
rkey text not null,
119
+
at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo' || '/' || rkey) stored,
120
+
121
+
name text not null,
122
+
knot text not null,
123
+
124
+
addedAt text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
125
+
unique(did, rkey)
126
+
);
127
+
`)
128
+
if err != nil {
129
+
return err
130
+
}
131
+
132
+
return nil
133
+
})
134
+
79
135
return &DB{db}, nil
80
136
}
81
137
82
-
func (d *DB) SaveLastTimeUs(lastTimeUs int64) error {
83
-
_, err := d.Exec(`
84
-
insert into _jetstream (id, last_time_us)
85
-
values (1, ?)
86
-
on conflict(id) do update set last_time_us = excluded.last_time_us
87
-
`, lastTimeUs)
88
-
return err
89
-
}
90
-
91
-
func (d *DB) GetLastTimeUs() (int64, error) {
92
-
var lastTimeUs int64
93
-
row := d.QueryRow(`select last_time_us from _jetstream where id = 1;`)
94
-
err := row.Scan(&lastTimeUs)
95
-
return lastTimeUs, err
138
+
func (d *DB) IsKnownDid(did syntax.DID) (bool, error) {
139
+
// is spindle member / repo collaborator
140
+
var exists bool
141
+
err := d.QueryRow(
142
+
`select exists (
143
+
select 1 from repo_collaborators where subject = ?
144
+
union all
145
+
select 1 from spindle_members where did = ?
146
+
)`,
147
+
did,
148
+
did,
149
+
).Scan(&exists)
150
+
return exists, err
96
151
}
+10
-8
spindle/db/events.go
+10
-8
spindle/db/events.go
···
18
18
EventJson string `json:"event"`
19
19
}
20
20
21
-
func (d *DB) InsertEvent(event Event, notifier *notifier.Notifier) error {
21
+
func (d *DB) insertEvent(event Event, notifier *notifier.Notifier) error {
22
22
_, err := d.Exec(
23
23
`insert into events (rkey, nsid, event, created) values (?, ?, ?, ?)`,
24
24
event.Rkey,
···
70
70
return evts, nil
71
71
}
72
72
73
-
func (d *DB) CreateStatusEvent(rkey string, s tangled.PipelineStatus, n *notifier.Notifier) error {
74
-
eventJson, err := json.Marshal(s)
73
+
func (d *DB) CreatePipelineEvent(rkey string, pipeline tangled.Pipeline, n *notifier.Notifier) error {
74
+
eventJson, err := json.Marshal(pipeline)
75
75
if err != nil {
76
76
return err
77
77
}
78
-
79
78
event := Event{
80
79
Rkey: rkey,
81
-
Nsid: tangled.PipelineStatusNSID,
80
+
Nsid: tangled.PipelineNSID,
82
81
Created: time.Now().UnixNano(),
83
82
EventJson: string(eventJson),
84
83
}
85
-
86
-
return d.InsertEvent(event, n)
84
+
return d.insertEvent(event, n)
87
85
}
88
86
89
87
func (d *DB) createStatusEvent(
···
116
114
EventJson: string(eventJson),
117
115
}
118
116
119
-
return d.InsertEvent(event, n)
117
+
return d.insertEvent(event, n)
120
118
121
119
}
122
120
···
164
162
165
163
func (d *DB) StatusFailed(workflowId models.WorkflowId, workflowError string, exitCode int64, n *notifier.Notifier) error {
166
164
return d.createStatusEvent(workflowId, models.StatusKindFailed, &workflowError, &exitCode, n)
165
+
}
166
+
167
+
func (d *DB) StatusCancelled(workflowId models.WorkflowId, workflowError string, exitCode int64, n *notifier.Notifier) error {
168
+
return d.createStatusEvent(workflowId, models.StatusKindCancelled, &workflowError, &exitCode, n)
167
169
}
168
170
169
171
func (d *DB) StatusSuccess(workflowId models.WorkflowId, n *notifier.Notifier) error {
-44
spindle/db/known_dids.go
-44
spindle/db/known_dids.go
···
1
-
package db
2
-
3
-
func (d *DB) AddDid(did string) error {
4
-
_, err := d.Exec(`insert or ignore into known_dids (did) values (?)`, did)
5
-
return err
6
-
}
7
-
8
-
func (d *DB) RemoveDid(did string) error {
9
-
_, err := d.Exec(`delete from known_dids where did = ?`, did)
10
-
return err
11
-
}
12
-
13
-
func (d *DB) GetAllDids() ([]string, error) {
14
-
var dids []string
15
-
16
-
rows, err := d.Query(`select did from known_dids`)
17
-
if err != nil {
18
-
return nil, err
19
-
}
20
-
defer rows.Close()
21
-
22
-
for rows.Next() {
23
-
var did string
24
-
if err := rows.Scan(&did); err != nil {
25
-
return nil, err
26
-
}
27
-
dids = append(dids, did)
28
-
}
29
-
30
-
if err := rows.Err(); err != nil {
31
-
return nil, err
32
-
}
33
-
34
-
return dids, nil
35
-
}
36
-
37
-
func (d *DB) HasKnownDids() bool {
38
-
var count int
39
-
err := d.QueryRow(`select count(*) from known_dids`).Scan(&count)
40
-
if err != nil {
41
-
return false
42
-
}
43
-
return count > 0
44
-
}
+120
-11
spindle/db/repos.go
+120
-11
spindle/db/repos.go
···
1
1
package db
2
2
3
+
import "github.com/bluesky-social/indigo/atproto/syntax"
4
+
3
5
type Repo struct {
4
-
Knot string
5
-
Owner string
6
-
Name string
6
+
Did syntax.DID
7
+
Rkey syntax.RecordKey
8
+
Name string
9
+
Knot string
10
+
}
11
+
12
+
type RepoCollaborator struct {
13
+
Did syntax.DID
14
+
Rkey syntax.RecordKey
15
+
Repo syntax.ATURI
16
+
Subject syntax.DID
7
17
}
8
18
9
-
func (d *DB) AddRepo(knot, owner, name string) error {
10
-
_, err := d.Exec(`insert or ignore into repos (knot, owner, name) values (?, ?, ?)`, knot, owner, name)
19
+
func (d *DB) PutRepo(repo *Repo) error {
20
+
_, err := d.Exec(
21
+
`insert or ignore into repos (did, rkey, name, knot)
22
+
values (?, ?, ?, ?)
23
+
on conflict(did, rkey) do update set
24
+
name = excluded.name,
25
+
knot = excluded.knot`,
26
+
repo.Did,
27
+
repo.Rkey,
28
+
repo.Name,
29
+
repo.Knot,
30
+
)
31
+
return err
32
+
}
33
+
34
+
func (d *DB) DeleteRepo(did syntax.DID, rkey syntax.RecordKey) error {
35
+
_, err := d.Exec(
36
+
`delete from repos where did = ? and rkey = ?`,
37
+
did,
38
+
rkey,
39
+
)
11
40
return err
12
41
}
13
42
···
16
45
if err != nil {
17
46
return nil, err
18
47
}
48
+
defer rows.Close()
19
49
20
50
var knots []string
21
51
for rows.Next() {
···
33
63
return knots, nil
34
64
}
35
65
36
-
func (d *DB) GetRepo(knot, owner, name string) (*Repo, error) {
66
+
func (d *DB) GetRepo(repoAt syntax.ATURI) (*Repo, error) {
37
67
var repo Repo
68
+
err := d.DB.QueryRow(
69
+
`select
70
+
did,
71
+
rkey,
72
+
name,
73
+
knot
74
+
from repos where at_uri = ?`,
75
+
repoAt,
76
+
).Scan(
77
+
&repo.Did,
78
+
&repo.Rkey,
79
+
&repo.Name,
80
+
&repo.Knot,
81
+
)
82
+
if err != nil {
83
+
return nil, err
84
+
}
85
+
return &repo, nil
86
+
}
38
87
39
-
query := "select knot, owner, name from repos where knot = ? and owner = ? and name = ?"
40
-
err := d.DB.QueryRow(query, knot, owner, name).
41
-
Scan(&repo.Knot, &repo.Owner, &repo.Name)
88
+
func (d *DB) GetRepoWithName(did syntax.DID, name string) (*Repo, error) {
89
+
var repo Repo
90
+
err := d.DB.QueryRow(
91
+
`select
92
+
did,
93
+
rkey,
94
+
name,
95
+
knot
96
+
from repos where did = ? and name = ?`,
97
+
did,
98
+
name,
99
+
).Scan(
100
+
&repo.Did,
101
+
&repo.Rkey,
102
+
&repo.Name,
103
+
&repo.Knot,
104
+
)
105
+
if err != nil {
106
+
return nil, err
107
+
}
108
+
return &repo, nil
109
+
}
110
+
111
+
func (d *DB) PutRepoCollaborator(collaborator *RepoCollaborator) error {
112
+
_, err := d.Exec(
113
+
`insert into repo_collaborators (did, rkey, repo, subject)
114
+
values (?, ?, ?, ?)
115
+
on conflict(did, rkey) do update set
116
+
repo = excluded.repo,
117
+
subject = excluded.subject`,
118
+
collaborator.Did,
119
+
collaborator.Rkey,
120
+
collaborator.Repo,
121
+
collaborator.Subject,
122
+
)
123
+
return err
124
+
}
125
+
126
+
func (d *DB) RemoveRepoCollaborator(did syntax.DID, rkey syntax.RecordKey) error {
127
+
_, err := d.Exec(
128
+
`delete from repo_collaborators where did = ? and rkey = ?`,
129
+
did,
130
+
rkey,
131
+
)
132
+
return err
133
+
}
42
134
135
+
func (d *DB) GetRepoCollaborator(did syntax.DID, rkey syntax.RecordKey) (*RepoCollaborator, error) {
136
+
var collaborator RepoCollaborator
137
+
err := d.DB.QueryRow(
138
+
`select
139
+
did,
140
+
rkey,
141
+
repo,
142
+
subject
143
+
from repo_collaborators
144
+
where did = ? and rkey = ?`,
145
+
did,
146
+
rkey,
147
+
).Scan(
148
+
&collaborator.Did,
149
+
&collaborator.Rkey,
150
+
&collaborator.Repo,
151
+
&collaborator.Subject,
152
+
)
43
153
if err != nil {
44
154
return nil, err
45
155
}
46
-
47
-
return &repo, nil
156
+
return &collaborator, nil
48
157
}
+22
-21
spindle/engine/engine.go
+22
-21
spindle/engine/engine.go
···
3
3
import (
4
4
"context"
5
5
"errors"
6
-
"fmt"
7
6
"log/slog"
7
+
"sync"
8
8
9
9
securejoin "github.com/cyphar/filepath-securejoin"
10
-
"golang.org/x/sync/errgroup"
11
10
"tangled.org/core/notifier"
12
11
"tangled.org/core/spindle/config"
13
12
"tangled.org/core/spindle/db"
···
31
30
}
32
31
}
33
32
34
-
eg, ctx := errgroup.WithContext(ctx)
33
+
var wg sync.WaitGroup
35
34
for eng, wfs := range pipeline.Workflows {
36
35
workflowTimeout := eng.WorkflowTimeout()
37
36
l.Info("using workflow timeout", "timeout", workflowTimeout)
38
37
39
38
for _, w := range wfs {
40
-
eg.Go(func() error {
39
+
wg.Add(1)
40
+
go func() {
41
+
defer wg.Done()
42
+
41
43
wid := models.WorkflowId{
42
44
PipelineId: pipelineId,
43
45
Name: w.Name,
···
45
47
46
48
err := db.StatusRunning(wid, n)
47
49
if err != nil {
48
-
return err
50
+
l.Error("failed to set workflow status to running", "wid", wid, "err", err)
51
+
return
49
52
}
50
53
51
54
err = eng.SetupWorkflow(ctx, wid, &w)
···
61
64
62
65
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
63
66
if dbErr != nil {
64
-
return dbErr
67
+
l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr)
65
68
}
66
-
return err
69
+
return
67
70
}
68
71
defer eng.DestroyWorkflow(ctx, wid)
69
72
70
-
wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid)
73
+
secretValues := make([]string, len(allSecrets))
74
+
for i, s := range allSecrets {
75
+
secretValues[i] = s.Value
76
+
}
77
+
wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid, secretValues)
71
78
if err != nil {
72
79
l.Warn("failed to setup step logger; logs will not be persisted", "error", err)
73
80
wfLogger = nil
···
99
106
if errors.Is(err, ErrTimedOut) {
100
107
dbErr := db.StatusTimeout(wid, n)
101
108
if dbErr != nil {
102
-
return dbErr
109
+
l.Error("failed to set workflow status to timeout", "wid", wid, "err", dbErr)
103
110
}
104
111
} else {
105
112
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
106
113
if dbErr != nil {
107
-
return dbErr
114
+
l.Error("failed to set workflow status to failed", "wid", wid, "err", dbErr)
108
115
}
109
116
}
110
-
111
-
return fmt.Errorf("starting steps image: %w", err)
117
+
return
112
118
}
113
119
}
114
120
115
121
err = db.StatusSuccess(wid, n)
116
122
if err != nil {
117
-
return err
123
+
l.Error("failed to set workflow status to success", "wid", wid, "err", err)
118
124
}
119
-
120
-
return nil
121
-
})
125
+
}()
122
126
}
123
127
}
124
128
125
-
if err := eg.Wait(); err != nil {
126
-
l.Error("failed to run one or more workflows", "err", err)
127
-
} else {
128
-
l.Info("successfully ran full pipeline")
129
-
}
129
+
wg.Wait()
130
+
l.Info("all workflows completed")
130
131
}
+34
-22
spindle/engines/nixery/engine.go
+34
-22
spindle/engines/nixery/engine.go
···
73
73
type addlFields struct {
74
74
image string
75
75
container string
76
-
env map[string]string
77
76
}
78
77
79
78
func (e *Engine) InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*models.Workflow, error) {
···
103
102
swf.Steps = append(swf.Steps, sstep)
104
103
}
105
104
swf.Name = twf.Name
106
-
addl.env = dwf.Environment
105
+
swf.Environment = dwf.Environment
107
106
addl.image = workflowImage(dwf.Dependencies, e.cfg.NixeryPipelines.Nixery)
108
107
109
108
setup := &setupSteps{}
110
109
111
110
setup.addStep(nixConfStep())
112
-
setup.addStep(cloneStep(twf, *tpl.TriggerMetadata, e.cfg.Server.Dev))
111
+
setup.addStep(models.BuildCloneStep(twf, *tpl.TriggerMetadata, e.cfg.Server.Dev))
113
112
// this step could be empty
114
113
if s := dependencyStep(dwf.Dependencies); s != nil {
115
114
setup.addStep(*s)
···
180
179
return err
181
180
}
182
181
e.registerCleanup(wid, func(ctx context.Context) error {
183
-
return e.docker.NetworkRemove(ctx, networkName(wid))
182
+
if err := e.docker.NetworkRemove(ctx, networkName(wid)); err != nil {
183
+
return fmt.Errorf("removing network: %w", err)
184
+
}
185
+
return nil
184
186
})
185
187
186
188
addl := wf.Data.(addlFields)
···
230
232
return fmt.Errorf("creating container: %w", err)
231
233
}
232
234
e.registerCleanup(wid, func(ctx context.Context) error {
233
-
err = e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{})
234
-
if err != nil {
235
-
return err
235
+
if err := e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{}); err != nil {
236
+
return fmt.Errorf("stopping container: %w", err)
236
237
}
237
238
238
-
return e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{
239
+
err := e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{
239
240
RemoveVolumes: true,
240
241
RemoveLinks: false,
241
242
Force: false,
242
243
})
244
+
if err != nil {
245
+
return fmt.Errorf("removing container: %w", err)
246
+
}
247
+
return nil
243
248
})
244
249
245
-
err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{})
246
-
if err != nil {
250
+
if err := e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
247
251
return fmt.Errorf("starting container: %w", err)
248
252
}
249
253
···
288
292
289
293
func (e *Engine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error {
290
294
addl := w.Data.(addlFields)
291
-
workflowEnvs := ConstructEnvs(addl.env)
295
+
workflowEnvs := ConstructEnvs(w.Environment)
292
296
// TODO(winter): should SetupWorkflow also have secret access?
293
297
// IMO yes, but probably worth thinking on.
294
298
for _, s := range secrets {
295
299
workflowEnvs.AddEnv(s.Key, s.Value)
296
300
}
297
301
298
-
step := w.Steps[idx].(Step)
302
+
step := w.Steps[idx]
299
303
300
304
select {
301
305
case <-ctx.Done():
···
304
308
}
305
309
306
310
envs := append(EnvVars(nil), workflowEnvs...)
307
-
for k, v := range step.environment {
308
-
envs.AddEnv(k, v)
311
+
if nixStep, ok := step.(Step); ok {
312
+
for k, v := range nixStep.environment {
313
+
envs.AddEnv(k, v)
314
+
}
309
315
}
310
316
envs.AddEnv("HOME", homeDir)
311
317
312
318
mkExecResp, err := e.docker.ContainerExecCreate(ctx, addl.container, container.ExecOptions{
313
-
Cmd: []string{"bash", "-c", step.command},
319
+
Cmd: []string{"bash", "-c", step.Command()},
314
320
AttachStdout: true,
315
321
AttachStderr: true,
316
322
Env: envs,
···
333
339
// Docker doesn't provide an API to kill an exec run
334
340
// (sure, we could grab the PID and kill it ourselves,
335
341
// but that's wasted effort)
336
-
e.l.Warn("step timed out", "step", step.Name)
342
+
e.l.Warn("step timed out", "step", step.Name())
337
343
338
344
<-tailDone
339
345
···
393
399
}
394
400
395
401
func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error {
396
-
e.cleanupMu.Lock()
397
-
key := wid.String()
398
-
399
-
fns := e.cleanup[key]
400
-
delete(e.cleanup, key)
401
-
e.cleanupMu.Unlock()
402
+
fns := e.drainCleanups(wid)
402
403
403
404
for _, fn := range fns {
404
405
if err := fn(ctx); err != nil {
···
414
415
415
416
key := wid.String()
416
417
e.cleanup[key] = append(e.cleanup[key], fn)
418
+
}
419
+
420
+
func (e *Engine) drainCleanups(wid models.WorkflowId) []cleanupFunc {
421
+
e.cleanupMu.Lock()
422
+
key := wid.String()
423
+
424
+
fns := e.cleanup[key]
425
+
delete(e.cleanup, key)
426
+
e.cleanupMu.Unlock()
427
+
428
+
return fns
417
429
}
418
430
419
431
func networkName(wid models.WorkflowId) string {
-73
spindle/engines/nixery/setup_steps.go
-73
spindle/engines/nixery/setup_steps.go
···
2
2
3
3
import (
4
4
"fmt"
5
-
"path"
6
5
"strings"
7
-
8
-
"tangled.org/core/api/tangled"
9
-
"tangled.org/core/workflow"
10
6
)
11
7
12
8
func nixConfStep() Step {
···
17
13
command: setupCmd,
18
14
name: "Configure Nix",
19
15
}
20
-
}
21
-
22
-
// cloneOptsAsSteps processes clone options and adds corresponding steps
23
-
// to the beginning of the workflow's step list if cloning is not skipped.
24
-
//
25
-
// the steps to do here are:
26
-
// - git init
27
-
// - git remote add origin <url>
28
-
// - git fetch --depth=<d> --recurse-submodules=<yes|no> <sha>
29
-
// - git checkout FETCH_HEAD
30
-
func cloneStep(twf tangled.Pipeline_Workflow, tr tangled.Pipeline_TriggerMetadata, dev bool) Step {
31
-
if twf.Clone.Skip {
32
-
return Step{}
33
-
}
34
-
35
-
var commands []string
36
-
37
-
// initialize git repo in workspace
38
-
commands = append(commands, "git init")
39
-
40
-
// add repo as git remote
41
-
scheme := "https://"
42
-
if dev {
43
-
scheme = "http://"
44
-
tr.Repo.Knot = strings.ReplaceAll(tr.Repo.Knot, "localhost", "host.docker.internal")
45
-
}
46
-
url := scheme + path.Join(tr.Repo.Knot, tr.Repo.Did, tr.Repo.Repo)
47
-
commands = append(commands, fmt.Sprintf("git remote add origin %s", url))
48
-
49
-
// run git fetch
50
-
{
51
-
var fetchArgs []string
52
-
53
-
// default clone depth is 1
54
-
depth := 1
55
-
if twf.Clone.Depth > 1 {
56
-
depth = int(twf.Clone.Depth)
57
-
}
58
-
fetchArgs = append(fetchArgs, fmt.Sprintf("--depth=%d", depth))
59
-
60
-
// optionally recurse submodules
61
-
if twf.Clone.Submodules {
62
-
fetchArgs = append(fetchArgs, "--recurse-submodules=yes")
63
-
}
64
-
65
-
// set remote to fetch from
66
-
fetchArgs = append(fetchArgs, "origin")
67
-
68
-
// set revision to checkout
69
-
switch workflow.TriggerKind(tr.Kind) {
70
-
case workflow.TriggerKindManual:
71
-
// TODO: unimplemented
72
-
case workflow.TriggerKindPush:
73
-
fetchArgs = append(fetchArgs, tr.Push.NewSha)
74
-
case workflow.TriggerKindPullRequest:
75
-
fetchArgs = append(fetchArgs, tr.PullRequest.SourceSha)
76
-
}
77
-
78
-
commands = append(commands, fmt.Sprintf("git fetch %s", strings.Join(fetchArgs, " ")))
79
-
}
80
-
81
-
// run git checkout
82
-
commands = append(commands, "git checkout FETCH_HEAD")
83
-
84
-
cloneStep := Step{
85
-
command: strings.Join(commands, "\n"),
86
-
name: "Clone repository into workspace",
87
-
}
88
-
return cloneStep
89
16
}
90
17
91
18
// dependencyStep processes dependencies defined in the workflow.
+73
spindle/git/git.go
+73
spindle/git/git.go
···
1
+
package git
2
+
3
+
import (
4
+
"bytes"
5
+
"context"
6
+
"fmt"
7
+
"os"
8
+
"os/exec"
9
+
"strings"
10
+
11
+
"github.com/hashicorp/go-version"
12
+
)
13
+
14
+
func Version() (*version.Version, error) {
15
+
var buf bytes.Buffer
16
+
cmd := exec.Command("git", "version")
17
+
cmd.Stdout = &buf
18
+
cmd.Stderr = os.Stderr
19
+
err := cmd.Run()
20
+
if err != nil {
21
+
return nil, err
22
+
}
23
+
fields := strings.Fields(buf.String())
24
+
if len(fields) < 3 {
25
+
return nil, fmt.Errorf("invalid git version: %s", buf.String())
26
+
}
27
+
28
+
// version string is like: "git version 2.29.3" or "git version 2.29.3.windows.1"
29
+
versionString := fields[2]
30
+
if pos := strings.Index(versionString, "windows"); pos >= 1 {
31
+
versionString = versionString[:pos-1]
32
+
}
33
+
return version.NewVersion(versionString)
34
+
}
35
+
36
+
const WorkflowDir = `/.tangled/workflows`
37
+
38
+
func SparseSyncGitRepo(ctx context.Context, cloneUri, path, rev string) error {
39
+
exist, err := isDir(path)
40
+
if err != nil {
41
+
return err
42
+
}
43
+
if rev == "" {
44
+
rev = "HEAD"
45
+
}
46
+
if !exist {
47
+
if err := exec.Command("git", "clone", "--no-checkout", "--depth=1", "--filter=tree:0", "--revision="+rev, cloneUri, path).Run(); err != nil {
48
+
return fmt.Errorf("git clone: %w", err)
49
+
}
50
+
if err := exec.Command("git", "-C", path, "sparse-checkout", "set", "--no-cone", WorkflowDir).Run(); err != nil {
51
+
return fmt.Errorf("git sparse-checkout set: %w", err)
52
+
}
53
+
} else {
54
+
if err := exec.Command("git", "-C", path, "fetch", "--depth=1", "--filter=tree:0", "origin", rev).Run(); err != nil {
55
+
return fmt.Errorf("git pull: %w", err)
56
+
}
57
+
}
58
+
if err := exec.Command("git", "-C", path, "checkout", rev).Run(); err != nil {
59
+
return fmt.Errorf("git checkout: %w", err)
60
+
}
61
+
return nil
62
+
}
63
+
64
+
func isDir(path string) (bool, error) {
65
+
info, err := os.Stat(path)
66
+
if err == nil && info.IsDir() {
67
+
return true, nil
68
+
}
69
+
if os.IsNotExist(err) {
70
+
return false, nil
71
+
}
72
+
return false, err
73
+
}
-300
spindle/ingester.go
-300
spindle/ingester.go
···
1
-
package spindle
2
-
3
-
import (
4
-
"context"
5
-
"encoding/json"
6
-
"errors"
7
-
"fmt"
8
-
"time"
9
-
10
-
"tangled.org/core/api/tangled"
11
-
"tangled.org/core/eventconsumer"
12
-
"tangled.org/core/rbac"
13
-
"tangled.org/core/spindle/db"
14
-
15
-
comatproto "github.com/bluesky-social/indigo/api/atproto"
16
-
"github.com/bluesky-social/indigo/atproto/identity"
17
-
"github.com/bluesky-social/indigo/atproto/syntax"
18
-
"github.com/bluesky-social/indigo/xrpc"
19
-
"github.com/bluesky-social/jetstream/pkg/models"
20
-
securejoin "github.com/cyphar/filepath-securejoin"
21
-
)
22
-
23
-
type Ingester func(ctx context.Context, e *models.Event) error
24
-
25
-
func (s *Spindle) ingest() Ingester {
26
-
return func(ctx context.Context, e *models.Event) error {
27
-
var err error
28
-
defer func() {
29
-
eventTime := e.TimeUS
30
-
lastTimeUs := eventTime + 1
31
-
if err := s.db.SaveLastTimeUs(lastTimeUs); err != nil {
32
-
err = fmt.Errorf("(deferred) failed to save last time us: %w", err)
33
-
}
34
-
}()
35
-
36
-
if e.Kind != models.EventKindCommit {
37
-
return nil
38
-
}
39
-
40
-
switch e.Commit.Collection {
41
-
case tangled.SpindleMemberNSID:
42
-
err = s.ingestMember(ctx, e)
43
-
case tangled.RepoNSID:
44
-
err = s.ingestRepo(ctx, e)
45
-
case tangled.RepoCollaboratorNSID:
46
-
err = s.ingestCollaborator(ctx, e)
47
-
}
48
-
49
-
if err != nil {
50
-
s.l.Debug("failed to process message", "nsid", e.Commit.Collection, "err", err)
51
-
}
52
-
53
-
return nil
54
-
}
55
-
}
56
-
57
-
func (s *Spindle) ingestMember(_ context.Context, e *models.Event) error {
58
-
var err error
59
-
did := e.Did
60
-
rkey := e.Commit.RKey
61
-
62
-
l := s.l.With("component", "ingester", "record", tangled.SpindleMemberNSID)
63
-
64
-
switch e.Commit.Operation {
65
-
case models.CommitOperationCreate, models.CommitOperationUpdate:
66
-
raw := e.Commit.Record
67
-
record := tangled.SpindleMember{}
68
-
err = json.Unmarshal(raw, &record)
69
-
if err != nil {
70
-
l.Error("invalid record", "error", err)
71
-
return err
72
-
}
73
-
74
-
domain := s.cfg.Server.Hostname
75
-
recordInstance := record.Instance
76
-
77
-
if recordInstance != domain {
78
-
l.Error("domain mismatch", "domain", recordInstance, "expected", domain)
79
-
return fmt.Errorf("domain mismatch: %s != %s", record.Instance, domain)
80
-
}
81
-
82
-
ok, err := s.e.IsSpindleInviteAllowed(did, rbacDomain)
83
-
if err != nil || !ok {
84
-
l.Error("failed to add member", "did", did, "error", err)
85
-
return fmt.Errorf("failed to enforce permissions: %w", err)
86
-
}
87
-
88
-
if err := db.AddSpindleMember(s.db, db.SpindleMember{
89
-
Did: syntax.DID(did),
90
-
Rkey: rkey,
91
-
Instance: recordInstance,
92
-
Subject: syntax.DID(record.Subject),
93
-
Created: time.Now(),
94
-
}); err != nil {
95
-
l.Error("failed to add member", "error", err)
96
-
return fmt.Errorf("failed to add member: %w", err)
97
-
}
98
-
99
-
if err := s.e.AddSpindleMember(rbacDomain, record.Subject); err != nil {
100
-
l.Error("failed to add member", "error", err)
101
-
return fmt.Errorf("failed to add member: %w", err)
102
-
}
103
-
l.Info("added member from firehose", "member", record.Subject)
104
-
105
-
if err := s.db.AddDid(record.Subject); err != nil {
106
-
l.Error("failed to add did", "error", err)
107
-
return fmt.Errorf("failed to add did: %w", err)
108
-
}
109
-
s.jc.AddDid(record.Subject)
110
-
111
-
return nil
112
-
113
-
case models.CommitOperationDelete:
114
-
record, err := db.GetSpindleMember(s.db, did, rkey)
115
-
if err != nil {
116
-
l.Error("failed to find member", "error", err)
117
-
return fmt.Errorf("failed to find member: %w", err)
118
-
}
119
-
120
-
if err := db.RemoveSpindleMember(s.db, did, rkey); err != nil {
121
-
l.Error("failed to remove member", "error", err)
122
-
return fmt.Errorf("failed to remove member: %w", err)
123
-
}
124
-
125
-
if err := s.e.RemoveSpindleMember(rbacDomain, record.Subject.String()); err != nil {
126
-
l.Error("failed to add member", "error", err)
127
-
return fmt.Errorf("failed to add member: %w", err)
128
-
}
129
-
l.Info("added member from firehose", "member", record.Subject)
130
-
131
-
if err := s.db.RemoveDid(record.Subject.String()); err != nil {
132
-
l.Error("failed to add did", "error", err)
133
-
return fmt.Errorf("failed to add did: %w", err)
134
-
}
135
-
s.jc.RemoveDid(record.Subject.String())
136
-
137
-
}
138
-
return nil
139
-
}
140
-
141
-
func (s *Spindle) ingestRepo(ctx context.Context, e *models.Event) error {
142
-
var err error
143
-
did := e.Did
144
-
145
-
l := s.l.With("component", "ingester", "record", tangled.RepoNSID)
146
-
147
-
l.Info("ingesting repo record", "did", did)
148
-
149
-
switch e.Commit.Operation {
150
-
case models.CommitOperationCreate, models.CommitOperationUpdate:
151
-
raw := e.Commit.Record
152
-
record := tangled.Repo{}
153
-
err = json.Unmarshal(raw, &record)
154
-
if err != nil {
155
-
l.Error("invalid record", "error", err)
156
-
return err
157
-
}
158
-
159
-
domain := s.cfg.Server.Hostname
160
-
161
-
// no spindle configured for this repo
162
-
if record.Spindle == nil {
163
-
l.Info("no spindle configured", "name", record.Name)
164
-
return nil
165
-
}
166
-
167
-
// this repo did not want this spindle
168
-
if *record.Spindle != domain {
169
-
l.Info("different spindle configured", "name", record.Name, "spindle", *record.Spindle, "domain", domain)
170
-
return nil
171
-
}
172
-
173
-
// add this repo to the watch list
174
-
if err := s.db.AddRepo(record.Knot, did, record.Name); err != nil {
175
-
l.Error("failed to add repo", "error", err)
176
-
return fmt.Errorf("failed to add repo: %w", err)
177
-
}
178
-
179
-
didSlashRepo, err := securejoin.SecureJoin(did, record.Name)
180
-
if err != nil {
181
-
return err
182
-
}
183
-
184
-
// add repo to rbac
185
-
if err := s.e.AddRepo(did, rbac.ThisServer, didSlashRepo); err != nil {
186
-
l.Error("failed to add repo to enforcer", "error", err)
187
-
return fmt.Errorf("failed to add repo: %w", err)
188
-
}
189
-
190
-
// add collaborators to rbac
191
-
owner, err := s.res.ResolveIdent(ctx, did)
192
-
if err != nil || owner.Handle.IsInvalidHandle() {
193
-
return err
194
-
}
195
-
if err := s.fetchAndAddCollaborators(ctx, owner, didSlashRepo); err != nil {
196
-
return err
197
-
}
198
-
199
-
// add this knot to the event consumer
200
-
src := eventconsumer.NewKnotSource(record.Knot)
201
-
s.ks.AddSource(context.Background(), src)
202
-
203
-
return nil
204
-
205
-
}
206
-
return nil
207
-
}
208
-
209
-
func (s *Spindle) ingestCollaborator(ctx context.Context, e *models.Event) error {
210
-
var err error
211
-
212
-
l := s.l.With("component", "ingester", "record", tangled.RepoCollaboratorNSID, "did", e.Did)
213
-
214
-
l.Info("ingesting collaborator record")
215
-
216
-
switch e.Commit.Operation {
217
-
case models.CommitOperationCreate, models.CommitOperationUpdate:
218
-
raw := e.Commit.Record
219
-
record := tangled.RepoCollaborator{}
220
-
err = json.Unmarshal(raw, &record)
221
-
if err != nil {
222
-
l.Error("invalid record", "error", err)
223
-
return err
224
-
}
225
-
226
-
subjectId, err := s.res.ResolveIdent(ctx, record.Subject)
227
-
if err != nil || subjectId.Handle.IsInvalidHandle() {
228
-
return err
229
-
}
230
-
231
-
repoAt, err := syntax.ParseATURI(record.Repo)
232
-
if err != nil {
233
-
l.Info("rejecting record, invalid repoAt", "repoAt", record.Repo)
234
-
return nil
235
-
}
236
-
237
-
// TODO: get rid of this entirely
238
-
// resolve this aturi to extract the repo record
239
-
owner, err := s.res.ResolveIdent(ctx, repoAt.Authority().String())
240
-
if err != nil || owner.Handle.IsInvalidHandle() {
241
-
return fmt.Errorf("failed to resolve handle: %w", err)
242
-
}
243
-
244
-
xrpcc := xrpc.Client{
245
-
Host: owner.PDSEndpoint(),
246
-
}
247
-
248
-
resp, err := comatproto.RepoGetRecord(ctx, &xrpcc, "", tangled.RepoNSID, repoAt.Authority().String(), repoAt.RecordKey().String())
249
-
if err != nil {
250
-
return err
251
-
}
252
-
253
-
repo := resp.Value.Val.(*tangled.Repo)
254
-
didSlashRepo, _ := securejoin.SecureJoin(owner.DID.String(), repo.Name)
255
-
256
-
// check perms for this user
257
-
if ok, err := s.e.IsCollaboratorInviteAllowed(owner.DID.String(), rbac.ThisServer, didSlashRepo); !ok || err != nil {
258
-
return fmt.Errorf("insufficient permissions: %w", err)
259
-
}
260
-
261
-
// add collaborator to rbac
262
-
if err := s.e.AddCollaborator(record.Subject, rbac.ThisServer, didSlashRepo); err != nil {
263
-
l.Error("failed to add repo to enforcer", "error", err)
264
-
return fmt.Errorf("failed to add repo: %w", err)
265
-
}
266
-
267
-
return nil
268
-
}
269
-
return nil
270
-
}
271
-
272
-
func (s *Spindle) fetchAndAddCollaborators(ctx context.Context, owner *identity.Identity, didSlashRepo string) error {
273
-
l := s.l.With("component", "ingester", "handler", "fetchAndAddCollaborators")
274
-
275
-
l.Info("fetching and adding existing collaborators")
276
-
277
-
xrpcc := xrpc.Client{
278
-
Host: owner.PDSEndpoint(),
279
-
}
280
-
281
-
resp, err := comatproto.RepoListRecords(ctx, &xrpcc, tangled.RepoCollaboratorNSID, "", 50, owner.DID.String(), false)
282
-
if err != nil {
283
-
return err
284
-
}
285
-
286
-
var errs error
287
-
for _, r := range resp.Records {
288
-
if r == nil {
289
-
continue
290
-
}
291
-
record := r.Value.Val.(*tangled.RepoCollaborator)
292
-
293
-
if err := s.e.AddCollaborator(record.Subject, rbac.ThisServer, didSlashRepo); err != nil {
294
-
l.Error("failed to add repo to enforcer", "error", err)
295
-
errors.Join(errs, fmt.Errorf("failed to add repo: %w", err))
296
-
}
297
-
}
298
-
299
-
return errs
300
-
}
+150
spindle/models/clone.go
+150
spindle/models/clone.go
···
1
+
package models
2
+
3
+
import (
4
+
"fmt"
5
+
"strings"
6
+
7
+
"tangled.org/core/api/tangled"
8
+
"tangled.org/core/workflow"
9
+
)
10
+
11
+
type CloneStep struct {
12
+
name string
13
+
kind StepKind
14
+
commands []string
15
+
}
16
+
17
+
func (s CloneStep) Name() string {
18
+
return s.name
19
+
}
20
+
21
+
func (s CloneStep) Commands() []string {
22
+
return s.commands
23
+
}
24
+
25
+
func (s CloneStep) Command() string {
26
+
return strings.Join(s.commands, "\n")
27
+
}
28
+
29
+
func (s CloneStep) Kind() StepKind {
30
+
return s.kind
31
+
}
32
+
33
+
// BuildCloneStep generates git clone commands.
34
+
// The caller must ensure the current working directory is set to the desired
35
+
// workspace directory before executing these commands.
36
+
//
37
+
// The generated commands are:
38
+
// - git init
39
+
// - git remote add origin <url>
40
+
// - git fetch --depth=<d> --recurse-submodules=<yes|no> <sha>
41
+
// - git checkout FETCH_HEAD
42
+
//
43
+
// Supports all trigger types (push, PR, manual) and clone options.
44
+
func BuildCloneStep(twf tangled.Pipeline_Workflow, tr tangled.Pipeline_TriggerMetadata, dev bool) CloneStep {
45
+
if twf.Clone != nil && twf.Clone.Skip {
46
+
return CloneStep{}
47
+
}
48
+
49
+
commitSHA, err := extractCommitSHA(tr)
50
+
if err != nil {
51
+
return CloneStep{
52
+
kind: StepKindSystem,
53
+
name: "Clone repository into workspace (error)",
54
+
commands: []string{fmt.Sprintf("echo 'Failed to get clone info: %s' && exit 1", err.Error())},
55
+
}
56
+
}
57
+
58
+
repoURL := BuildRepoURL(tr.Repo, dev)
59
+
60
+
var cloneOpts tangled.Pipeline_CloneOpts
61
+
if twf.Clone != nil {
62
+
cloneOpts = *twf.Clone
63
+
}
64
+
fetchArgs := buildFetchArgs(cloneOpts, commitSHA)
65
+
66
+
return CloneStep{
67
+
kind: StepKindSystem,
68
+
name: "Clone repository into workspace",
69
+
commands: []string{
70
+
"git init",
71
+
fmt.Sprintf("git remote add origin %s", repoURL),
72
+
fmt.Sprintf("git fetch %s", strings.Join(fetchArgs, " ")),
73
+
"git checkout FETCH_HEAD",
74
+
},
75
+
}
76
+
}
77
+
78
+
// extractCommitSHA extracts the commit SHA from trigger metadata based on trigger type
79
+
func extractCommitSHA(tr tangled.Pipeline_TriggerMetadata) (string, error) {
80
+
switch workflow.TriggerKind(tr.Kind) {
81
+
case workflow.TriggerKindPush:
82
+
if tr.Push == nil {
83
+
return "", fmt.Errorf("push trigger metadata is nil")
84
+
}
85
+
return tr.Push.NewSha, nil
86
+
87
+
case workflow.TriggerKindPullRequest:
88
+
if tr.PullRequest == nil {
89
+
return "", fmt.Errorf("pull request trigger metadata is nil")
90
+
}
91
+
return tr.PullRequest.SourceSha, nil
92
+
93
+
case workflow.TriggerKindManual:
94
+
// Manual triggers don't have an explicit SHA in the metadata
95
+
// For now, return empty string - could be enhanced to fetch from default branch
96
+
// TODO: Implement manual trigger SHA resolution (fetch default branch HEAD)
97
+
return "", nil
98
+
99
+
default:
100
+
return "", fmt.Errorf("unknown trigger kind: %s", tr.Kind)
101
+
}
102
+
}
103
+
104
+
// BuildRepoURL constructs the repository URL from repo metadata.
105
+
func BuildRepoURL(repo *tangled.Pipeline_TriggerRepo, devMode bool) string {
106
+
if repo == nil {
107
+
return ""
108
+
}
109
+
110
+
scheme := "https://"
111
+
if devMode {
112
+
scheme = "http://"
113
+
}
114
+
115
+
// Get host from knot
116
+
host := repo.Knot
117
+
118
+
// In dev mode, replace localhost with host.docker.internal for Docker networking
119
+
if devMode && strings.Contains(host, "localhost") {
120
+
host = strings.ReplaceAll(host, "localhost", "host.docker.internal")
121
+
}
122
+
123
+
// Build URL: {scheme}{knot}/{did}/{repo}
124
+
return fmt.Sprintf("%s%s/%s/%s", scheme, host, repo.Did, repo.Repo)
125
+
}
126
+
127
+
// buildFetchArgs constructs the arguments for git fetch based on clone options
128
+
func buildFetchArgs(clone tangled.Pipeline_CloneOpts, sha string) []string {
129
+
args := []string{}
130
+
131
+
// Set fetch depth (default to 1 for shallow clone)
132
+
depth := clone.Depth
133
+
if depth == 0 {
134
+
depth = 1
135
+
}
136
+
args = append(args, fmt.Sprintf("--depth=%d", depth))
137
+
138
+
// Add submodules if requested
139
+
if clone.Submodules {
140
+
args = append(args, "--recurse-submodules=yes")
141
+
}
142
+
143
+
// Add remote and SHA
144
+
args = append(args, "origin")
145
+
if sha != "" {
146
+
args = append(args, sha)
147
+
}
148
+
149
+
return args
150
+
}
+371
spindle/models/clone_test.go
+371
spindle/models/clone_test.go
···
1
+
package models
2
+
3
+
import (
4
+
"strings"
5
+
"testing"
6
+
7
+
"tangled.org/core/api/tangled"
8
+
"tangled.org/core/workflow"
9
+
)
10
+
11
+
func TestBuildCloneStep_PushTrigger(t *testing.T) {
12
+
twf := tangled.Pipeline_Workflow{
13
+
Clone: &tangled.Pipeline_CloneOpts{
14
+
Depth: 1,
15
+
Submodules: false,
16
+
Skip: false,
17
+
},
18
+
}
19
+
tr := tangled.Pipeline_TriggerMetadata{
20
+
Kind: string(workflow.TriggerKindPush),
21
+
Push: &tangled.Pipeline_PushTriggerData{
22
+
NewSha: "abc123",
23
+
OldSha: "def456",
24
+
Ref: "refs/heads/main",
25
+
},
26
+
Repo: &tangled.Pipeline_TriggerRepo{
27
+
Knot: "example.com",
28
+
Did: "did:plc:user123",
29
+
Repo: "my-repo",
30
+
},
31
+
}
32
+
33
+
step := BuildCloneStep(twf, tr, false)
34
+
35
+
if step.Kind() != StepKindSystem {
36
+
t.Errorf("Expected StepKindSystem, got %v", step.Kind())
37
+
}
38
+
39
+
if step.Name() != "Clone repository into workspace" {
40
+
t.Errorf("Expected 'Clone repository into workspace', got '%s'", step.Name())
41
+
}
42
+
43
+
commands := step.Commands()
44
+
if len(commands) != 4 {
45
+
t.Errorf("Expected 4 commands, got %d", len(commands))
46
+
}
47
+
48
+
// Verify commands contain expected git operations
49
+
allCmds := strings.Join(commands, " ")
50
+
if !strings.Contains(allCmds, "git init") {
51
+
t.Error("Commands should contain 'git init'")
52
+
}
53
+
if !strings.Contains(allCmds, "git remote add origin") {
54
+
t.Error("Commands should contain 'git remote add origin'")
55
+
}
56
+
if !strings.Contains(allCmds, "git fetch") {
57
+
t.Error("Commands should contain 'git fetch'")
58
+
}
59
+
if !strings.Contains(allCmds, "abc123") {
60
+
t.Error("Commands should contain commit SHA")
61
+
}
62
+
if !strings.Contains(allCmds, "git checkout FETCH_HEAD") {
63
+
t.Error("Commands should contain 'git checkout FETCH_HEAD'")
64
+
}
65
+
if !strings.Contains(allCmds, "https://example.com/did:plc:user123/my-repo") {
66
+
t.Error("Commands should contain expected repo URL")
67
+
}
68
+
}
69
+
70
+
func TestBuildCloneStep_PullRequestTrigger(t *testing.T) {
71
+
twf := tangled.Pipeline_Workflow{
72
+
Clone: &tangled.Pipeline_CloneOpts{
73
+
Depth: 1,
74
+
Skip: false,
75
+
},
76
+
}
77
+
tr := tangled.Pipeline_TriggerMetadata{
78
+
Kind: string(workflow.TriggerKindPullRequest),
79
+
PullRequest: &tangled.Pipeline_PullRequestTriggerData{
80
+
SourceSha: "pr-sha-789",
81
+
SourceBranch: "feature-branch",
82
+
TargetBranch: "main",
83
+
Action: "opened",
84
+
},
85
+
Repo: &tangled.Pipeline_TriggerRepo{
86
+
Knot: "example.com",
87
+
Did: "did:plc:user123",
88
+
Repo: "my-repo",
89
+
},
90
+
}
91
+
92
+
step := BuildCloneStep(twf, tr, false)
93
+
94
+
allCmds := strings.Join(step.Commands(), " ")
95
+
if !strings.Contains(allCmds, "pr-sha-789") {
96
+
t.Error("Commands should contain PR commit SHA")
97
+
}
98
+
}
99
+
100
+
func TestBuildCloneStep_ManualTrigger(t *testing.T) {
101
+
twf := tangled.Pipeline_Workflow{
102
+
Clone: &tangled.Pipeline_CloneOpts{
103
+
Depth: 1,
104
+
Skip: false,
105
+
},
106
+
}
107
+
tr := tangled.Pipeline_TriggerMetadata{
108
+
Kind: string(workflow.TriggerKindManual),
109
+
Manual: &tangled.Pipeline_ManualTriggerData{
110
+
Inputs: nil,
111
+
},
112
+
Repo: &tangled.Pipeline_TriggerRepo{
113
+
Knot: "example.com",
114
+
Did: "did:plc:user123",
115
+
Repo: "my-repo",
116
+
},
117
+
}
118
+
119
+
step := BuildCloneStep(twf, tr, false)
120
+
121
+
// Manual triggers don't have a SHA yet (TODO), so git fetch won't include a SHA
122
+
allCmds := strings.Join(step.Commands(), " ")
123
+
// Should still have basic git commands
124
+
if !strings.Contains(allCmds, "git init") {
125
+
t.Error("Commands should contain 'git init'")
126
+
}
127
+
if !strings.Contains(allCmds, "git fetch") {
128
+
t.Error("Commands should contain 'git fetch'")
129
+
}
130
+
}
131
+
132
+
func TestBuildCloneStep_SkipFlag(t *testing.T) {
133
+
twf := tangled.Pipeline_Workflow{
134
+
Clone: &tangled.Pipeline_CloneOpts{
135
+
Skip: true,
136
+
},
137
+
}
138
+
tr := tangled.Pipeline_TriggerMetadata{
139
+
Kind: string(workflow.TriggerKindPush),
140
+
Push: &tangled.Pipeline_PushTriggerData{
141
+
NewSha: "abc123",
142
+
},
143
+
Repo: &tangled.Pipeline_TriggerRepo{
144
+
Knot: "example.com",
145
+
Did: "did:plc:user123",
146
+
Repo: "my-repo",
147
+
},
148
+
}
149
+
150
+
step := BuildCloneStep(twf, tr, false)
151
+
152
+
// Empty step when skip is true
153
+
if step.Name() != "" {
154
+
t.Error("Expected empty step name when Skip is true")
155
+
}
156
+
if len(step.Commands()) != 0 {
157
+
t.Errorf("Expected no commands when Skip is true, got %d commands", len(step.Commands()))
158
+
}
159
+
}
160
+
161
+
func TestBuildCloneStep_DevMode(t *testing.T) {
162
+
twf := tangled.Pipeline_Workflow{
163
+
Clone: &tangled.Pipeline_CloneOpts{
164
+
Depth: 1,
165
+
Skip: false,
166
+
},
167
+
}
168
+
tr := tangled.Pipeline_TriggerMetadata{
169
+
Kind: string(workflow.TriggerKindPush),
170
+
Push: &tangled.Pipeline_PushTriggerData{
171
+
NewSha: "abc123",
172
+
},
173
+
Repo: &tangled.Pipeline_TriggerRepo{
174
+
Knot: "localhost:3000",
175
+
Did: "did:plc:user123",
176
+
Repo: "my-repo",
177
+
},
178
+
}
179
+
180
+
step := BuildCloneStep(twf, tr, true)
181
+
182
+
// In dev mode, should use http:// and replace localhost with host.docker.internal
183
+
allCmds := strings.Join(step.Commands(), " ")
184
+
expectedURL := "http://host.docker.internal:3000/did:plc:user123/my-repo"
185
+
if !strings.Contains(allCmds, expectedURL) {
186
+
t.Errorf("Expected dev mode URL '%s' in commands", expectedURL)
187
+
}
188
+
}
189
+
190
+
func TestBuildCloneStep_DepthAndSubmodules(t *testing.T) {
191
+
twf := tangled.Pipeline_Workflow{
192
+
Clone: &tangled.Pipeline_CloneOpts{
193
+
Depth: 10,
194
+
Submodules: true,
195
+
Skip: false,
196
+
},
197
+
}
198
+
tr := tangled.Pipeline_TriggerMetadata{
199
+
Kind: string(workflow.TriggerKindPush),
200
+
Push: &tangled.Pipeline_PushTriggerData{
201
+
NewSha: "abc123",
202
+
},
203
+
Repo: &tangled.Pipeline_TriggerRepo{
204
+
Knot: "example.com",
205
+
Did: "did:plc:user123",
206
+
Repo: "my-repo",
207
+
},
208
+
}
209
+
210
+
step := BuildCloneStep(twf, tr, false)
211
+
212
+
allCmds := strings.Join(step.Commands(), " ")
213
+
if !strings.Contains(allCmds, "--depth=10") {
214
+
t.Error("Commands should contain '--depth=10'")
215
+
}
216
+
217
+
if !strings.Contains(allCmds, "--recurse-submodules=yes") {
218
+
t.Error("Commands should contain '--recurse-submodules=yes'")
219
+
}
220
+
}
221
+
222
+
func TestBuildCloneStep_DefaultDepth(t *testing.T) {
223
+
twf := tangled.Pipeline_Workflow{
224
+
Clone: &tangled.Pipeline_CloneOpts{
225
+
Depth: 0, // Default should be 1
226
+
Skip: false,
227
+
},
228
+
}
229
+
tr := tangled.Pipeline_TriggerMetadata{
230
+
Kind: string(workflow.TriggerKindPush),
231
+
Push: &tangled.Pipeline_PushTriggerData{
232
+
NewSha: "abc123",
233
+
},
234
+
Repo: &tangled.Pipeline_TriggerRepo{
235
+
Knot: "example.com",
236
+
Did: "did:plc:user123",
237
+
Repo: "my-repo",
238
+
},
239
+
}
240
+
241
+
step := BuildCloneStep(twf, tr, false)
242
+
243
+
allCmds := strings.Join(step.Commands(), " ")
244
+
if !strings.Contains(allCmds, "--depth=1") {
245
+
t.Error("Commands should default to '--depth=1'")
246
+
}
247
+
}
248
+
249
+
func TestBuildCloneStep_NilPushData(t *testing.T) {
250
+
twf := tangled.Pipeline_Workflow{
251
+
Clone: &tangled.Pipeline_CloneOpts{
252
+
Depth: 1,
253
+
Skip: false,
254
+
},
255
+
}
256
+
tr := tangled.Pipeline_TriggerMetadata{
257
+
Kind: string(workflow.TriggerKindPush),
258
+
Push: nil, // Nil push data should create error step
259
+
Repo: &tangled.Pipeline_TriggerRepo{
260
+
Knot: "example.com",
261
+
Did: "did:plc:user123",
262
+
Repo: "my-repo",
263
+
},
264
+
}
265
+
266
+
step := BuildCloneStep(twf, tr, false)
267
+
268
+
// Should return an error step
269
+
if !strings.Contains(step.Name(), "error") {
270
+
t.Error("Expected error in step name when push data is nil")
271
+
}
272
+
273
+
allCmds := strings.Join(step.Commands(), " ")
274
+
if !strings.Contains(allCmds, "Failed to get clone info") {
275
+
t.Error("Commands should contain error message")
276
+
}
277
+
if !strings.Contains(allCmds, "exit 1") {
278
+
t.Error("Commands should exit with error")
279
+
}
280
+
}
281
+
282
+
func TestBuildCloneStep_NilPRData(t *testing.T) {
283
+
twf := tangled.Pipeline_Workflow{
284
+
Clone: &tangled.Pipeline_CloneOpts{
285
+
Depth: 1,
286
+
Skip: false,
287
+
},
288
+
}
289
+
tr := tangled.Pipeline_TriggerMetadata{
290
+
Kind: string(workflow.TriggerKindPullRequest),
291
+
PullRequest: nil, // Nil PR data should create error step
292
+
Repo: &tangled.Pipeline_TriggerRepo{
293
+
Knot: "example.com",
294
+
Did: "did:plc:user123",
295
+
Repo: "my-repo",
296
+
},
297
+
}
298
+
299
+
step := BuildCloneStep(twf, tr, false)
300
+
301
+
// Should return an error step
302
+
if !strings.Contains(step.Name(), "error") {
303
+
t.Error("Expected error in step name when pull request data is nil")
304
+
}
305
+
306
+
allCmds := strings.Join(step.Commands(), " ")
307
+
if !strings.Contains(allCmds, "Failed to get clone info") {
308
+
t.Error("Commands should contain error message")
309
+
}
310
+
}
311
+
312
+
func TestBuildCloneStep_UnknownTriggerKind(t *testing.T) {
313
+
twf := tangled.Pipeline_Workflow{
314
+
Clone: &tangled.Pipeline_CloneOpts{
315
+
Depth: 1,
316
+
Skip: false,
317
+
},
318
+
}
319
+
tr := tangled.Pipeline_TriggerMetadata{
320
+
Kind: "unknown_trigger",
321
+
Repo: &tangled.Pipeline_TriggerRepo{
322
+
Knot: "example.com",
323
+
Did: "did:plc:user123",
324
+
Repo: "my-repo",
325
+
},
326
+
}
327
+
328
+
step := BuildCloneStep(twf, tr, false)
329
+
330
+
// Should return an error step
331
+
if !strings.Contains(step.Name(), "error") {
332
+
t.Error("Expected error in step name for unknown trigger kind")
333
+
}
334
+
335
+
allCmds := strings.Join(step.Commands(), " ")
336
+
if !strings.Contains(allCmds, "unknown trigger kind") {
337
+
t.Error("Commands should contain error message about unknown trigger kind")
338
+
}
339
+
}
340
+
341
+
func TestBuildCloneStep_NilCloneOpts(t *testing.T) {
342
+
twf := tangled.Pipeline_Workflow{
343
+
Clone: nil, // Nil clone options should use defaults
344
+
}
345
+
tr := tangled.Pipeline_TriggerMetadata{
346
+
Kind: string(workflow.TriggerKindPush),
347
+
Push: &tangled.Pipeline_PushTriggerData{
348
+
NewSha: "abc123",
349
+
},
350
+
Repo: &tangled.Pipeline_TriggerRepo{
351
+
Knot: "example.com",
352
+
Did: "did:plc:user123",
353
+
Repo: "my-repo",
354
+
},
355
+
}
356
+
357
+
step := BuildCloneStep(twf, tr, false)
358
+
359
+
// Should still work with default options
360
+
if step.Kind() != StepKindSystem {
361
+
t.Errorf("Expected StepKindSystem, got %v", step.Kind())
362
+
}
363
+
364
+
allCmds := strings.Join(step.Commands(), " ")
365
+
if !strings.Contains(allCmds, "--depth=1") {
366
+
t.Error("Commands should default to '--depth=1' when Clone is nil")
367
+
}
368
+
if !strings.Contains(allCmds, "git init") {
369
+
t.Error("Commands should contain 'git init'")
370
+
}
371
+
}
+6
-1
spindle/models/logger.go
+6
-1
spindle/models/logger.go
···
12
12
type WorkflowLogger struct {
13
13
file *os.File
14
14
encoder *json.Encoder
15
+
mask *SecretMask
15
16
}
16
17
17
-
func NewWorkflowLogger(baseDir string, wid WorkflowId) (*WorkflowLogger, error) {
18
+
func NewWorkflowLogger(baseDir string, wid WorkflowId, secretValues []string) (*WorkflowLogger, error) {
18
19
path := LogFilePath(baseDir, wid)
19
20
20
21
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
···
25
26
return &WorkflowLogger{
26
27
file: file,
27
28
encoder: json.NewEncoder(file),
29
+
mask: NewSecretMask(secretValues),
28
30
}, nil
29
31
}
30
32
···
62
64
63
65
func (w *dataWriter) Write(p []byte) (int, error) {
64
66
line := strings.TrimRight(string(p), "\r\n")
67
+
if w.logger.mask != nil {
68
+
line = w.logger.mask.Mask(line)
69
+
}
65
70
entry := NewDataLogLine(w.idx, line, w.stream)
66
71
if err := w.logger.encoder.Encode(entry); err != nil {
67
72
return 0, err
+4
-3
spindle/models/pipeline.go
+4
-3
spindle/models/pipeline.go
+77
spindle/models/pipeline_env.go
+77
spindle/models/pipeline_env.go
···
1
+
package models
2
+
3
+
import (
4
+
"strings"
5
+
6
+
"github.com/go-git/go-git/v5/plumbing"
7
+
"tangled.org/core/api/tangled"
8
+
"tangled.org/core/workflow"
9
+
)
10
+
11
+
// PipelineEnvVars extracts environment variables from pipeline trigger metadata.
12
+
// These are framework-provided variables that are injected into workflow steps.
13
+
func PipelineEnvVars(tr *tangled.Pipeline_TriggerMetadata, pipelineId PipelineId, devMode bool) map[string]string {
14
+
if tr == nil {
15
+
return nil
16
+
}
17
+
18
+
env := make(map[string]string)
19
+
20
+
// Standard CI environment variable
21
+
env["CI"] = "true"
22
+
23
+
env["TANGLED_PIPELINE_ID"] = pipelineId.AtUri().String()
24
+
25
+
// Repo info
26
+
if tr.Repo != nil {
27
+
env["TANGLED_REPO_KNOT"] = tr.Repo.Knot
28
+
env["TANGLED_REPO_DID"] = tr.Repo.Did
29
+
env["TANGLED_REPO_NAME"] = tr.Repo.Repo
30
+
env["TANGLED_REPO_DEFAULT_BRANCH"] = tr.Repo.DefaultBranch
31
+
env["TANGLED_REPO_URL"] = BuildRepoURL(tr.Repo, devMode)
32
+
}
33
+
34
+
switch workflow.TriggerKind(tr.Kind) {
35
+
case workflow.TriggerKindPush:
36
+
if tr.Push != nil {
37
+
refName := plumbing.ReferenceName(tr.Push.Ref)
38
+
refType := "branch"
39
+
if refName.IsTag() {
40
+
refType = "tag"
41
+
}
42
+
43
+
env["TANGLED_REF"] = tr.Push.Ref
44
+
env["TANGLED_REF_NAME"] = refName.Short()
45
+
env["TANGLED_REF_TYPE"] = refType
46
+
env["TANGLED_SHA"] = tr.Push.NewSha
47
+
env["TANGLED_COMMIT_SHA"] = tr.Push.NewSha
48
+
}
49
+
50
+
case workflow.TriggerKindPullRequest:
51
+
if tr.PullRequest != nil {
52
+
// For PRs, the "ref" is the source branch
53
+
env["TANGLED_REF"] = "refs/heads/" + tr.PullRequest.SourceBranch
54
+
env["TANGLED_REF_NAME"] = tr.PullRequest.SourceBranch
55
+
env["TANGLED_REF_TYPE"] = "branch"
56
+
env["TANGLED_SHA"] = tr.PullRequest.SourceSha
57
+
env["TANGLED_COMMIT_SHA"] = tr.PullRequest.SourceSha
58
+
59
+
// PR-specific variables
60
+
env["TANGLED_PR_SOURCE_BRANCH"] = tr.PullRequest.SourceBranch
61
+
env["TANGLED_PR_TARGET_BRANCH"] = tr.PullRequest.TargetBranch
62
+
env["TANGLED_PR_SOURCE_SHA"] = tr.PullRequest.SourceSha
63
+
env["TANGLED_PR_ACTION"] = tr.PullRequest.Action
64
+
}
65
+
66
+
case workflow.TriggerKindManual:
67
+
// Manual triggers may not have ref/sha info
68
+
// Include any manual inputs if present
69
+
if tr.Manual != nil {
70
+
for _, pair := range tr.Manual.Inputs {
71
+
env["TANGLED_INPUT_"+strings.ToUpper(pair.Key)] = pair.Value
72
+
}
73
+
}
74
+
}
75
+
76
+
return env
77
+
}
+260
spindle/models/pipeline_env_test.go
+260
spindle/models/pipeline_env_test.go
···
1
+
package models
2
+
3
+
import (
4
+
"testing"
5
+
6
+
"tangled.org/core/api/tangled"
7
+
"tangled.org/core/workflow"
8
+
)
9
+
10
+
func TestPipelineEnvVars_PushBranch(t *testing.T) {
11
+
tr := &tangled.Pipeline_TriggerMetadata{
12
+
Kind: string(workflow.TriggerKindPush),
13
+
Push: &tangled.Pipeline_PushTriggerData{
14
+
NewSha: "abc123def456",
15
+
OldSha: "000000000000",
16
+
Ref: "refs/heads/main",
17
+
},
18
+
Repo: &tangled.Pipeline_TriggerRepo{
19
+
Knot: "example.com",
20
+
Did: "did:plc:user123",
21
+
Repo: "my-repo",
22
+
DefaultBranch: "main",
23
+
},
24
+
}
25
+
id := PipelineId{
26
+
Knot: "example.com",
27
+
Rkey: "123123",
28
+
}
29
+
env := PipelineEnvVars(tr, id, false)
30
+
31
+
// Check standard CI variable
32
+
if env["CI"] != "true" {
33
+
t.Errorf("Expected CI='true', got '%s'", env["CI"])
34
+
}
35
+
36
+
// Check ref variables
37
+
if env["TANGLED_REF"] != "refs/heads/main" {
38
+
t.Errorf("Expected TANGLED_REF='refs/heads/main', got '%s'", env["TANGLED_REF"])
39
+
}
40
+
if env["TANGLED_REF_NAME"] != "main" {
41
+
t.Errorf("Expected TANGLED_REF_NAME='main', got '%s'", env["TANGLED_REF_NAME"])
42
+
}
43
+
if env["TANGLED_REF_TYPE"] != "branch" {
44
+
t.Errorf("Expected TANGLED_REF_TYPE='branch', got '%s'", env["TANGLED_REF_TYPE"])
45
+
}
46
+
47
+
// Check SHA variables
48
+
if env["TANGLED_SHA"] != "abc123def456" {
49
+
t.Errorf("Expected TANGLED_SHA='abc123def456', got '%s'", env["TANGLED_SHA"])
50
+
}
51
+
if env["TANGLED_COMMIT_SHA"] != "abc123def456" {
52
+
t.Errorf("Expected TANGLED_COMMIT_SHA='abc123def456', got '%s'", env["TANGLED_COMMIT_SHA"])
53
+
}
54
+
55
+
// Check repo variables
56
+
if env["TANGLED_REPO_KNOT"] != "example.com" {
57
+
t.Errorf("Expected TANGLED_REPO_KNOT='example.com', got '%s'", env["TANGLED_REPO_KNOT"])
58
+
}
59
+
if env["TANGLED_REPO_DID"] != "did:plc:user123" {
60
+
t.Errorf("Expected TANGLED_REPO_DID='did:plc:user123', got '%s'", env["TANGLED_REPO_DID"])
61
+
}
62
+
if env["TANGLED_REPO_NAME"] != "my-repo" {
63
+
t.Errorf("Expected TANGLED_REPO_NAME='my-repo', got '%s'", env["TANGLED_REPO_NAME"])
64
+
}
65
+
if env["TANGLED_REPO_DEFAULT_BRANCH"] != "main" {
66
+
t.Errorf("Expected TANGLED_REPO_DEFAULT_BRANCH='main', got '%s'", env["TANGLED_REPO_DEFAULT_BRANCH"])
67
+
}
68
+
if env["TANGLED_REPO_URL"] != "https://example.com/did:plc:user123/my-repo" {
69
+
t.Errorf("Expected TANGLED_REPO_URL='https://example.com/did:plc:user123/my-repo', got '%s'", env["TANGLED_REPO_URL"])
70
+
}
71
+
}
72
+
73
+
func TestPipelineEnvVars_PushTag(t *testing.T) {
74
+
tr := &tangled.Pipeline_TriggerMetadata{
75
+
Kind: string(workflow.TriggerKindPush),
76
+
Push: &tangled.Pipeline_PushTriggerData{
77
+
NewSha: "abc123def456",
78
+
OldSha: "000000000000",
79
+
Ref: "refs/tags/v1.2.3",
80
+
},
81
+
Repo: &tangled.Pipeline_TriggerRepo{
82
+
Knot: "example.com",
83
+
Did: "did:plc:user123",
84
+
Repo: "my-repo",
85
+
},
86
+
}
87
+
id := PipelineId{
88
+
Knot: "example.com",
89
+
Rkey: "123123",
90
+
}
91
+
env := PipelineEnvVars(tr, id, false)
92
+
93
+
if env["TANGLED_REF"] != "refs/tags/v1.2.3" {
94
+
t.Errorf("Expected TANGLED_REF='refs/tags/v1.2.3', got '%s'", env["TANGLED_REF"])
95
+
}
96
+
if env["TANGLED_REF_NAME"] != "v1.2.3" {
97
+
t.Errorf("Expected TANGLED_REF_NAME='v1.2.3', got '%s'", env["TANGLED_REF_NAME"])
98
+
}
99
+
if env["TANGLED_REF_TYPE"] != "tag" {
100
+
t.Errorf("Expected TANGLED_REF_TYPE='tag', got '%s'", env["TANGLED_REF_TYPE"])
101
+
}
102
+
}
103
+
104
+
func TestPipelineEnvVars_PullRequest(t *testing.T) {
105
+
tr := &tangled.Pipeline_TriggerMetadata{
106
+
Kind: string(workflow.TriggerKindPullRequest),
107
+
PullRequest: &tangled.Pipeline_PullRequestTriggerData{
108
+
SourceBranch: "feature-branch",
109
+
TargetBranch: "main",
110
+
SourceSha: "pr-sha-789",
111
+
Action: "opened",
112
+
},
113
+
Repo: &tangled.Pipeline_TriggerRepo{
114
+
Knot: "example.com",
115
+
Did: "did:plc:user123",
116
+
Repo: "my-repo",
117
+
},
118
+
}
119
+
id := PipelineId{
120
+
Knot: "example.com",
121
+
Rkey: "123123",
122
+
}
123
+
env := PipelineEnvVars(tr, id, false)
124
+
125
+
// Check ref variables for PR
126
+
if env["TANGLED_REF"] != "refs/heads/feature-branch" {
127
+
t.Errorf("Expected TANGLED_REF='refs/heads/feature-branch', got '%s'", env["TANGLED_REF"])
128
+
}
129
+
if env["TANGLED_REF_NAME"] != "feature-branch" {
130
+
t.Errorf("Expected TANGLED_REF_NAME='feature-branch', got '%s'", env["TANGLED_REF_NAME"])
131
+
}
132
+
if env["TANGLED_REF_TYPE"] != "branch" {
133
+
t.Errorf("Expected TANGLED_REF_TYPE='branch', got '%s'", env["TANGLED_REF_TYPE"])
134
+
}
135
+
136
+
// Check SHA variables
137
+
if env["TANGLED_SHA"] != "pr-sha-789" {
138
+
t.Errorf("Expected TANGLED_SHA='pr-sha-789', got '%s'", env["TANGLED_SHA"])
139
+
}
140
+
if env["TANGLED_COMMIT_SHA"] != "pr-sha-789" {
141
+
t.Errorf("Expected TANGLED_COMMIT_SHA='pr-sha-789', got '%s'", env["TANGLED_COMMIT_SHA"])
142
+
}
143
+
144
+
// Check PR-specific variables
145
+
if env["TANGLED_PR_SOURCE_BRANCH"] != "feature-branch" {
146
+
t.Errorf("Expected TANGLED_PR_SOURCE_BRANCH='feature-branch', got '%s'", env["TANGLED_PR_SOURCE_BRANCH"])
147
+
}
148
+
if env["TANGLED_PR_TARGET_BRANCH"] != "main" {
149
+
t.Errorf("Expected TANGLED_PR_TARGET_BRANCH='main', got '%s'", env["TANGLED_PR_TARGET_BRANCH"])
150
+
}
151
+
if env["TANGLED_PR_SOURCE_SHA"] != "pr-sha-789" {
152
+
t.Errorf("Expected TANGLED_PR_SOURCE_SHA='pr-sha-789', got '%s'", env["TANGLED_PR_SOURCE_SHA"])
153
+
}
154
+
if env["TANGLED_PR_ACTION"] != "opened" {
155
+
t.Errorf("Expected TANGLED_PR_ACTION='opened', got '%s'", env["TANGLED_PR_ACTION"])
156
+
}
157
+
}
158
+
159
+
func TestPipelineEnvVars_ManualWithInputs(t *testing.T) {
160
+
tr := &tangled.Pipeline_TriggerMetadata{
161
+
Kind: string(workflow.TriggerKindManual),
162
+
Manual: &tangled.Pipeline_ManualTriggerData{
163
+
Inputs: []*tangled.Pipeline_Pair{
164
+
{Key: "version", Value: "1.0.0"},
165
+
{Key: "environment", Value: "production"},
166
+
},
167
+
},
168
+
Repo: &tangled.Pipeline_TriggerRepo{
169
+
Knot: "example.com",
170
+
Did: "did:plc:user123",
171
+
Repo: "my-repo",
172
+
},
173
+
}
174
+
id := PipelineId{
175
+
Knot: "example.com",
176
+
Rkey: "123123",
177
+
}
178
+
env := PipelineEnvVars(tr, id, false)
179
+
180
+
// Check manual input variables
181
+
if env["TANGLED_INPUT_VERSION"] != "1.0.0" {
182
+
t.Errorf("Expected TANGLED_INPUT_VERSION='1.0.0', got '%s'", env["TANGLED_INPUT_VERSION"])
183
+
}
184
+
if env["TANGLED_INPUT_ENVIRONMENT"] != "production" {
185
+
t.Errorf("Expected TANGLED_INPUT_ENVIRONMENT='production', got '%s'", env["TANGLED_INPUT_ENVIRONMENT"])
186
+
}
187
+
188
+
// Manual triggers shouldn't have ref/sha variables
189
+
if _, ok := env["TANGLED_REF"]; ok {
190
+
t.Error("Manual trigger should not have TANGLED_REF")
191
+
}
192
+
if _, ok := env["TANGLED_SHA"]; ok {
193
+
t.Error("Manual trigger should not have TANGLED_SHA")
194
+
}
195
+
}
196
+
197
+
func TestPipelineEnvVars_DevMode(t *testing.T) {
198
+
tr := &tangled.Pipeline_TriggerMetadata{
199
+
Kind: string(workflow.TriggerKindPush),
200
+
Push: &tangled.Pipeline_PushTriggerData{
201
+
NewSha: "abc123",
202
+
Ref: "refs/heads/main",
203
+
},
204
+
Repo: &tangled.Pipeline_TriggerRepo{
205
+
Knot: "localhost:3000",
206
+
Did: "did:plc:user123",
207
+
Repo: "my-repo",
208
+
},
209
+
}
210
+
id := PipelineId{
211
+
Knot: "example.com",
212
+
Rkey: "123123",
213
+
}
214
+
env := PipelineEnvVars(tr, id, true)
215
+
216
+
// Dev mode should use http:// and replace localhost with host.docker.internal
217
+
expectedURL := "http://host.docker.internal:3000/did:plc:user123/my-repo"
218
+
if env["TANGLED_REPO_URL"] != expectedURL {
219
+
t.Errorf("Expected TANGLED_REPO_URL='%s', got '%s'", expectedURL, env["TANGLED_REPO_URL"])
220
+
}
221
+
}
222
+
223
+
func TestPipelineEnvVars_NilTrigger(t *testing.T) {
224
+
id := PipelineId{
225
+
Knot: "example.com",
226
+
Rkey: "123123",
227
+
}
228
+
env := PipelineEnvVars(nil, id, false)
229
+
230
+
if env != nil {
231
+
t.Error("Expected nil env for nil trigger")
232
+
}
233
+
}
234
+
235
+
func TestPipelineEnvVars_NilPushData(t *testing.T) {
236
+
tr := &tangled.Pipeline_TriggerMetadata{
237
+
Kind: string(workflow.TriggerKindPush),
238
+
Push: nil,
239
+
Repo: &tangled.Pipeline_TriggerRepo{
240
+
Knot: "example.com",
241
+
Did: "did:plc:user123",
242
+
Repo: "my-repo",
243
+
},
244
+
}
245
+
id := PipelineId{
246
+
Knot: "example.com",
247
+
Rkey: "123123",
248
+
}
249
+
env := PipelineEnvVars(tr, id, false)
250
+
251
+
// Should still have repo variables
252
+
if env["TANGLED_REPO_KNOT"] != "example.com" {
253
+
t.Errorf("Expected TANGLED_REPO_KNOT='example.com', got '%s'", env["TANGLED_REPO_KNOT"])
254
+
}
255
+
256
+
// Should not have ref/sha variables
257
+
if _, ok := env["TANGLED_REF"]; ok {
258
+
t.Error("Should not have TANGLED_REF when push data is nil")
259
+
}
260
+
}
+51
spindle/models/secret_mask.go
+51
spindle/models/secret_mask.go
···
1
+
package models
2
+
3
+
import (
4
+
"encoding/base64"
5
+
"strings"
6
+
)
7
+
8
+
// SecretMask replaces secret values in strings with "***".
9
+
type SecretMask struct {
10
+
replacer *strings.Replacer
11
+
}
12
+
13
+
// NewSecretMask creates a mask for the given secret values.
14
+
// Also registers base64-encoded variants of each secret.
15
+
func NewSecretMask(values []string) *SecretMask {
16
+
var pairs []string
17
+
18
+
for _, value := range values {
19
+
if value == "" {
20
+
continue
21
+
}
22
+
23
+
pairs = append(pairs, value, "***")
24
+
25
+
b64 := base64.StdEncoding.EncodeToString([]byte(value))
26
+
if b64 != value {
27
+
pairs = append(pairs, b64, "***")
28
+
}
29
+
30
+
b64NoPad := strings.TrimRight(b64, "=")
31
+
if b64NoPad != b64 && b64NoPad != value {
32
+
pairs = append(pairs, b64NoPad, "***")
33
+
}
34
+
}
35
+
36
+
if len(pairs) == 0 {
37
+
return nil
38
+
}
39
+
40
+
return &SecretMask{
41
+
replacer: strings.NewReplacer(pairs...),
42
+
}
43
+
}
44
+
45
+
// Mask replaces all registered secret values with "***".
46
+
func (m *SecretMask) Mask(input string) string {
47
+
if m == nil || m.replacer == nil {
48
+
return input
49
+
}
50
+
return m.replacer.Replace(input)
51
+
}
+135
spindle/models/secret_mask_test.go
+135
spindle/models/secret_mask_test.go
···
1
+
package models
2
+
3
+
import (
4
+
"encoding/base64"
5
+
"testing"
6
+
)
7
+
8
+
func TestSecretMask_BasicMasking(t *testing.T) {
9
+
mask := NewSecretMask([]string{"mysecret123"})
10
+
11
+
input := "The password is mysecret123 in this log"
12
+
expected := "The password is *** in this log"
13
+
14
+
result := mask.Mask(input)
15
+
if result != expected {
16
+
t.Errorf("expected %q, got %q", expected, result)
17
+
}
18
+
}
19
+
20
+
func TestSecretMask_Base64Encoded(t *testing.T) {
21
+
secret := "mysecret123"
22
+
mask := NewSecretMask([]string{secret})
23
+
24
+
b64 := base64.StdEncoding.EncodeToString([]byte(secret))
25
+
input := "Encoded: " + b64
26
+
expected := "Encoded: ***"
27
+
28
+
result := mask.Mask(input)
29
+
if result != expected {
30
+
t.Errorf("expected %q, got %q", expected, result)
31
+
}
32
+
}
33
+
34
+
func TestSecretMask_Base64NoPadding(t *testing.T) {
35
+
// "test" encodes to "dGVzdA==" with padding
36
+
secret := "test"
37
+
mask := NewSecretMask([]string{secret})
38
+
39
+
b64NoPad := "dGVzdA" // base64 without padding
40
+
input := "Token: " + b64NoPad
41
+
expected := "Token: ***"
42
+
43
+
result := mask.Mask(input)
44
+
if result != expected {
45
+
t.Errorf("expected %q, got %q", expected, result)
46
+
}
47
+
}
48
+
49
+
func TestSecretMask_MultipleSecrets(t *testing.T) {
50
+
mask := NewSecretMask([]string{"password1", "apikey123"})
51
+
52
+
input := "Using password1 and apikey123 for auth"
53
+
expected := "Using *** and *** for auth"
54
+
55
+
result := mask.Mask(input)
56
+
if result != expected {
57
+
t.Errorf("expected %q, got %q", expected, result)
58
+
}
59
+
}
60
+
61
+
func TestSecretMask_MultipleOccurrences(t *testing.T) {
62
+
mask := NewSecretMask([]string{"secret"})
63
+
64
+
input := "secret appears twice: secret"
65
+
expected := "*** appears twice: ***"
66
+
67
+
result := mask.Mask(input)
68
+
if result != expected {
69
+
t.Errorf("expected %q, got %q", expected, result)
70
+
}
71
+
}
72
+
73
+
func TestSecretMask_ShortValues(t *testing.T) {
74
+
mask := NewSecretMask([]string{"abc", "xy", ""})
75
+
76
+
if mask == nil {
77
+
t.Fatal("expected non-nil mask")
78
+
}
79
+
80
+
input := "abc xy test"
81
+
expected := "*** *** test"
82
+
result := mask.Mask(input)
83
+
if result != expected {
84
+
t.Errorf("expected %q, got %q", expected, result)
85
+
}
86
+
}
87
+
88
+
func TestSecretMask_NilMask(t *testing.T) {
89
+
var mask *SecretMask
90
+
91
+
input := "some input text"
92
+
result := mask.Mask(input)
93
+
if result != input {
94
+
t.Errorf("expected %q, got %q", input, result)
95
+
}
96
+
}
97
+
98
+
func TestSecretMask_EmptyInput(t *testing.T) {
99
+
mask := NewSecretMask([]string{"secret"})
100
+
101
+
result := mask.Mask("")
102
+
if result != "" {
103
+
t.Errorf("expected empty string, got %q", result)
104
+
}
105
+
}
106
+
107
+
func TestSecretMask_NoMatch(t *testing.T) {
108
+
mask := NewSecretMask([]string{"secretvalue"})
109
+
110
+
input := "nothing to mask here"
111
+
result := mask.Mask(input)
112
+
if result != input {
113
+
t.Errorf("expected %q, got %q", input, result)
114
+
}
115
+
}
116
+
117
+
func TestSecretMask_EmptySecretsList(t *testing.T) {
118
+
mask := NewSecretMask([]string{})
119
+
120
+
if mask != nil {
121
+
t.Error("expected nil mask for empty secrets list")
122
+
}
123
+
}
124
+
125
+
func TestSecretMask_EmptySecretsFiltered(t *testing.T) {
126
+
mask := NewSecretMask([]string{"ab", "validpassword", "", "xyz"})
127
+
128
+
input := "Using validpassword here"
129
+
expected := "Using *** here"
130
+
131
+
result := mask.Mask(input)
132
+
if result != expected {
133
+
t.Errorf("expected %q, got %q", expected, result)
134
+
}
135
+
}
+1
-1
spindle/motd
+1
-1
spindle/motd
+225
-141
spindle/server.go
+225
-141
spindle/server.go
···
4
4
"context"
5
5
_ "embed"
6
6
"encoding/json"
7
+
"errors"
7
8
"fmt"
8
9
"log/slog"
10
+
"maps"
9
11
"net/http"
12
+
"path/filepath"
10
13
14
+
"github.com/bluesky-social/indigo/atproto/syntax"
11
15
"github.com/go-chi/chi/v5"
16
+
"github.com/go-git/go-git/v5/plumbing/object"
17
+
"github.com/hashicorp/go-version"
12
18
"tangled.org/core/api/tangled"
13
19
"tangled.org/core/eventconsumer"
14
20
"tangled.org/core/eventconsumer/cursor"
15
21
"tangled.org/core/idresolver"
16
-
"tangled.org/core/jetstream"
22
+
kgit "tangled.org/core/knotserver/git"
17
23
"tangled.org/core/log"
18
24
"tangled.org/core/notifier"
19
-
"tangled.org/core/rbac"
25
+
"tangled.org/core/rbac2"
20
26
"tangled.org/core/spindle/config"
21
27
"tangled.org/core/spindle/db"
22
28
"tangled.org/core/spindle/engine"
23
29
"tangled.org/core/spindle/engines/nixery"
30
+
"tangled.org/core/spindle/git"
24
31
"tangled.org/core/spindle/models"
25
32
"tangled.org/core/spindle/queue"
26
33
"tangled.org/core/spindle/secrets"
27
34
"tangled.org/core/spindle/xrpc"
35
+
"tangled.org/core/tap"
36
+
"tangled.org/core/tid"
37
+
"tangled.org/core/workflow"
28
38
"tangled.org/core/xrpc/serviceauth"
29
39
)
30
40
31
41
//go:embed motd
32
42
var motd []byte
33
-
34
-
const (
35
-
rbacDomain = "thisserver"
36
-
)
37
43
38
44
type Spindle struct {
39
-
jc *jetstream.JetstreamClient
45
+
tap *tap.Client
40
46
db *db.DB
41
-
e *rbac.Enforcer
47
+
e *rbac2.Enforcer
42
48
l *slog.Logger
43
49
n *notifier.Notifier
44
50
engs map[string]models.Engine
···
53
59
func New(ctx context.Context, cfg *config.Config, engines map[string]models.Engine) (*Spindle, error) {
54
60
logger := log.FromContext(ctx)
55
61
56
-
d, err := db.Make(cfg.Server.DBPath)
62
+
if err := ensureGitVersion(); err != nil {
63
+
return nil, fmt.Errorf("ensuring git version: %w", err)
64
+
}
65
+
66
+
d, err := db.Make(ctx, cfg.Server.DBPath())
57
67
if err != nil {
58
68
return nil, fmt.Errorf("failed to setup db: %w", err)
59
69
}
60
70
61
-
e, err := rbac.NewEnforcer(cfg.Server.DBPath)
71
+
e, err := rbac2.NewEnforcer(cfg.Server.DBPath())
62
72
if err != nil {
63
73
return nil, fmt.Errorf("failed to setup rbac enforcer: %w", err)
64
74
}
65
-
e.E.EnableAutoSave(true)
66
75
67
76
n := notifier.New()
68
77
···
82
91
}
83
92
logger.Info("using openbao secrets provider", "proxy_address", cfg.Server.Secrets.OpenBao.ProxyAddr, "mount", cfg.Server.Secrets.OpenBao.Mount)
84
93
case "sqlite", "":
85
-
vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath, secrets.WithTableName("secrets"))
94
+
vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath(), secrets.WithTableName("secrets"))
86
95
if err != nil {
87
96
return nil, fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
88
97
}
89
-
logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath)
98
+
logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath())
90
99
default:
91
100
return nil, fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
92
101
}
···
94
103
jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
95
104
logger.Info("initialized queue", "queueSize", cfg.Server.QueueSize, "numWorkers", cfg.Server.MaxJobCount)
96
105
97
-
collections := []string{
98
-
tangled.SpindleMemberNSID,
99
-
tangled.RepoNSID,
100
-
tangled.RepoCollaboratorNSID,
101
-
}
102
-
jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true)
103
-
if err != nil {
104
-
return nil, fmt.Errorf("failed to setup jetstream client: %w", err)
105
-
}
106
-
jc.AddDid(cfg.Server.Owner)
107
-
108
-
// Check if the spindle knows about any Dids;
109
-
dids, err := d.GetAllDids()
110
-
if err != nil {
111
-
return nil, fmt.Errorf("failed to get all dids: %w", err)
112
-
}
113
-
for _, d := range dids {
114
-
jc.AddDid(d)
115
-
}
106
+
tap := tap.NewClient(cfg.Server.TapUrl, "")
116
107
117
108
resolver := idresolver.DefaultResolver(cfg.Server.PlcUrl)
118
109
119
110
spindle := &Spindle{
120
-
jc: jc,
111
+
tap: &tap,
121
112
e: e,
122
113
db: d,
123
114
l: logger,
···
129
120
vault: vault,
130
121
}
131
122
132
-
err = e.AddSpindle(rbacDomain)
133
-
if err != nil {
134
-
return nil, fmt.Errorf("failed to set rbac domain: %w", err)
135
-
}
136
-
err = spindle.configureOwner()
123
+
err = e.SetSpindleOwner(spindle.cfg.Server.Owner, spindle.cfg.Server.Did())
137
124
if err != nil {
138
125
return nil, err
139
126
}
140
127
logger.Info("owner set", "did", cfg.Server.Owner)
141
128
142
-
cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath)
129
+
cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath())
143
130
if err != nil {
144
131
return nil, fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
145
132
}
146
133
147
-
err = jc.StartJetstream(ctx, spindle.ingest())
148
-
if err != nil {
149
-
return nil, fmt.Errorf("failed to start jetstream consumer: %w", err)
150
-
}
151
-
152
-
// for each incoming sh.tangled.pipeline, we execute
153
-
// spindle.processPipeline, which in turn enqueues the pipeline
154
-
// job in the above registered queue.
134
+
// spindle listen to knot stream for sh.tangled.git.refUpdate
135
+
// which will sync the local workflow files in spindle and enqueues the
136
+
// pipeline job for on-push workflows
155
137
ccfg := eventconsumer.NewConsumerConfig()
156
138
ccfg.Logger = log.SubLogger(logger, "eventconsumer")
157
139
ccfg.Dev = cfg.Server.Dev
158
-
ccfg.ProcessFunc = spindle.processPipeline
140
+
ccfg.ProcessFunc = spindle.processKnotStream
159
141
ccfg.CursorStore = cursorStore
160
142
knownKnots, err := d.Knots()
161
143
if err != nil {
···
196
178
}
197
179
198
180
// Enforcer returns the RBAC enforcer instance.
199
-
func (s *Spindle) Enforcer() *rbac.Enforcer {
181
+
func (s *Spindle) Enforcer() *rbac2.Enforcer {
200
182
return s.e
201
183
}
202
184
···
214
196
go func() {
215
197
s.l.Info("starting knot event consumer")
216
198
s.ks.Start(ctx)
199
+
}()
200
+
201
+
// ensure server owner is tracked
202
+
if err := s.tap.AddRepos(ctx, []syntax.DID{s.cfg.Server.Owner}); err != nil {
203
+
return err
204
+
}
205
+
206
+
go func() {
207
+
s.l.Info("starting tap stream consumer")
208
+
s.tap.Connect(ctx, &tap.SimpleIndexer{
209
+
EventHandler: s.processEvent,
210
+
})
217
211
}()
218
212
219
213
s.l.Info("starting spindle server", "address", s.cfg.Server.ListenAddr)
···
267
261
Config: s.cfg,
268
262
Resolver: s.res,
269
263
Vault: s.vault,
264
+
Notifier: s.Notifier(),
270
265
ServiceAuth: serviceAuth,
271
266
}
272
267
273
268
return x.Router()
274
269
}
275
270
276
-
func (s *Spindle) processPipeline(ctx context.Context, src eventconsumer.Source, msg eventconsumer.Message) error {
277
-
if msg.Nsid == tangled.PipelineNSID {
278
-
tpl := tangled.Pipeline{}
279
-
err := json.Unmarshal(msg.EventJson, &tpl)
280
-
if err != nil {
281
-
fmt.Println("error unmarshalling", err)
271
+
func (s *Spindle) processKnotStream(ctx context.Context, src eventconsumer.Source, msg eventconsumer.Message) error {
272
+
l := log.FromContext(ctx).With("handler", "processKnotStream")
273
+
l = l.With("src", src.Key(), "msg.Nsid", msg.Nsid, "msg.Rkey", msg.Rkey)
274
+
if msg.Nsid == tangled.GitRefUpdateNSID {
275
+
event := tangled.GitRefUpdate{}
276
+
if err := json.Unmarshal(msg.EventJson, &event); err != nil {
277
+
l.Error("error unmarshalling", "err", err)
282
278
return err
283
279
}
280
+
l = l.With("repoDid", event.RepoDid, "repoName", event.RepoName)
284
281
285
-
if tpl.TriggerMetadata == nil {
286
-
return fmt.Errorf("no trigger metadata found")
282
+
// resolve repo name to rkey
283
+
// TODO: git.refUpdate should respond with rkey instead of repo name
284
+
repo, err := s.db.GetRepoWithName(syntax.DID(event.RepoDid), event.RepoName)
285
+
if err != nil {
286
+
return fmt.Errorf("get repo with did and name (%s/%s): %w", event.RepoDid, event.RepoName, err)
287
287
}
288
288
289
-
if tpl.TriggerMetadata.Repo == nil {
290
-
return fmt.Errorf("no repo data found")
289
+
// NOTE: we are blindly trusting the knot that it will return only repos it own
290
+
repoCloneUri := s.newRepoCloneUrl(src.Key(), event.RepoDid, event.RepoName)
291
+
repoPath := s.newRepoPath(repo.Did, repo.Rkey)
292
+
if err := git.SparseSyncGitRepo(ctx, repoCloneUri, repoPath, event.NewSha); err != nil {
293
+
return fmt.Errorf("sync git repo: %w", err)
291
294
}
295
+
l.Info("synced git repo")
292
296
293
-
if src.Key() != tpl.TriggerMetadata.Repo.Knot {
294
-
return fmt.Errorf("repo knot does not match event source: %s != %s", src.Key(), tpl.TriggerMetadata.Repo.Knot)
297
+
compiler := workflow.Compiler{
298
+
Trigger: tangled.Pipeline_TriggerMetadata{
299
+
Kind: string(workflow.TriggerKindPush),
300
+
Push: &tangled.Pipeline_PushTriggerData{
301
+
Ref: event.Ref,
302
+
OldSha: event.OldSha,
303
+
NewSha: event.NewSha,
304
+
},
305
+
Repo: &tangled.Pipeline_TriggerRepo{
306
+
Did: repo.Did.String(),
307
+
Knot: repo.Knot,
308
+
Repo: repo.Name,
309
+
},
310
+
},
295
311
}
296
312
297
-
// filter by repos
298
-
_, err = s.db.GetRepo(
299
-
tpl.TriggerMetadata.Repo.Knot,
300
-
tpl.TriggerMetadata.Repo.Did,
301
-
tpl.TriggerMetadata.Repo.Repo,
302
-
)
313
+
// load workflow definitions from rev (without spindle context)
314
+
rawPipeline, err := s.loadPipeline(ctx, repoCloneUri, repoPath, event.NewSha)
303
315
if err != nil {
304
-
return err
316
+
return fmt.Errorf("loading pipeline: %w", err)
317
+
}
318
+
if len(rawPipeline) == 0 {
319
+
l.Info("no workflow definition find for the repo. skipping the event")
320
+
return nil
321
+
}
322
+
tpl := compiler.Compile(compiler.Parse(rawPipeline))
323
+
// TODO: pass compile error to workflow log
324
+
for _, w := range compiler.Diagnostics.Errors {
325
+
l.Error(w.String())
326
+
}
327
+
for _, w := range compiler.Diagnostics.Warnings {
328
+
l.Warn(w.String())
305
329
}
306
330
307
331
pipelineId := models.PipelineId{
308
-
Knot: src.Key(),
309
-
Rkey: msg.Rkey,
332
+
Knot: tpl.TriggerMetadata.Repo.Knot,
333
+
Rkey: tid.TID(),
310
334
}
311
-
312
-
workflows := make(map[models.Engine][]models.Workflow)
313
-
314
-
for _, w := range tpl.Workflows {
315
-
if w != nil {
316
-
if _, ok := s.engs[w.Engine]; !ok {
317
-
err = s.db.StatusFailed(models.WorkflowId{
318
-
PipelineId: pipelineId,
319
-
Name: w.Name,
320
-
}, fmt.Sprintf("unknown engine %#v", w.Engine), -1, s.n)
321
-
if err != nil {
322
-
return err
323
-
}
324
-
325
-
continue
326
-
}
335
+
if err := s.db.CreatePipelineEvent(pipelineId.Rkey, tpl, s.n); err != nil {
336
+
l.Error("failed to create pipeline event", "err", err)
337
+
return nil
338
+
}
339
+
err = s.processPipeline(ctx, tpl, pipelineId)
340
+
if err != nil {
341
+
return err
342
+
}
343
+
}
327
344
328
-
eng := s.engs[w.Engine]
345
+
return nil
346
+
}
329
347
330
-
if _, ok := workflows[eng]; !ok {
331
-
workflows[eng] = []models.Workflow{}
332
-
}
348
+
func (s *Spindle) loadPipeline(ctx context.Context, repoUri, repoPath, rev string) (workflow.RawPipeline, error) {
349
+
if err := git.SparseSyncGitRepo(ctx, repoUri, repoPath, rev); err != nil {
350
+
return nil, fmt.Errorf("syncing git repo: %w", err)
351
+
}
352
+
gr, err := kgit.Open(repoPath, rev)
353
+
if err != nil {
354
+
return nil, fmt.Errorf("opening git repo: %w", err)
355
+
}
333
356
334
-
ewf, err := s.engs[w.Engine].InitWorkflow(*w, tpl)
335
-
if err != nil {
336
-
return err
337
-
}
357
+
workflowDir, err := gr.FileTree(ctx, workflow.WorkflowDir)
358
+
if errors.Is(err, object.ErrDirectoryNotFound) {
359
+
// return empty RawPipeline when directory doesn't exist
360
+
return nil, nil
361
+
} else if err != nil {
362
+
return nil, fmt.Errorf("loading file tree: %w", err)
363
+
}
338
364
339
-
workflows[eng] = append(workflows[eng], *ewf)
365
+
var rawPipeline workflow.RawPipeline
366
+
for _, e := range workflowDir {
367
+
if !e.IsFile() {
368
+
continue
369
+
}
340
370
341
-
err = s.db.StatusPending(models.WorkflowId{
342
-
PipelineId: pipelineId,
343
-
Name: w.Name,
344
-
}, s.n)
345
-
if err != nil {
346
-
return err
347
-
}
348
-
}
371
+
fpath := filepath.Join(workflow.WorkflowDir, e.Name)
372
+
contents, err := gr.RawContent(fpath)
373
+
if err != nil {
374
+
return nil, fmt.Errorf("reading raw content of '%s': %w", fpath, err)
349
375
}
350
376
351
-
ok := s.jq.Enqueue(queue.Job{
352
-
Run: func() error {
353
-
engine.StartWorkflows(log.SubLogger(s.l, "engine"), s.vault, s.cfg, s.db, s.n, ctx, &models.Pipeline{
354
-
RepoOwner: tpl.TriggerMetadata.Repo.Did,
355
-
RepoName: tpl.TriggerMetadata.Repo.Repo,
356
-
Workflows: workflows,
357
-
}, pipelineId)
358
-
return nil
359
-
},
360
-
OnFail: func(jobError error) {
361
-
s.l.Error("pipeline run failed", "error", jobError)
362
-
},
377
+
rawPipeline = append(rawPipeline, workflow.RawWorkflow{
378
+
Name: e.Name,
379
+
Contents: contents,
363
380
})
364
-
if ok {
365
-
s.l.Info("pipeline enqueued successfully", "id", msg.Rkey)
366
-
} else {
367
-
s.l.Error("failed to enqueue pipeline: queue is full")
368
-
}
369
381
}
370
382
371
-
return nil
383
+
return rawPipeline, nil
372
384
}
373
385
374
-
func (s *Spindle) configureOwner() error {
375
-
cfgOwner := s.cfg.Server.Owner
386
+
func (s *Spindle) processPipeline(ctx context.Context, tpl tangled.Pipeline, pipelineId models.PipelineId) error {
387
+
// Build pipeline environment variables once for all workflows
388
+
pipelineEnv := models.PipelineEnvVars(tpl.TriggerMetadata, pipelineId, s.cfg.Server.Dev)
389
+
390
+
// filter & init workflows
391
+
workflows := make(map[models.Engine][]models.Workflow)
392
+
for _, w := range tpl.Workflows {
393
+
if w == nil {
394
+
continue
395
+
}
396
+
if _, ok := s.engs[w.Engine]; !ok {
397
+
err := s.db.StatusFailed(models.WorkflowId{
398
+
PipelineId: pipelineId,
399
+
Name: w.Name,
400
+
}, fmt.Sprintf("unknown engine %#v", w.Engine), -1, s.n)
401
+
if err != nil {
402
+
return fmt.Errorf("db.StatusFailed: %w", err)
403
+
}
376
404
377
-
existing, err := s.e.GetSpindleUsersByRole("server:owner", rbacDomain)
378
-
if err != nil {
379
-
return err
380
-
}
405
+
continue
406
+
}
381
407
382
-
switch len(existing) {
383
-
case 0:
384
-
// no owner configured, continue
385
-
case 1:
386
-
// find existing owner
387
-
existingOwner := existing[0]
408
+
eng := s.engs[w.Engine]
388
409
389
-
// no ownership change, this is okay
390
-
if existingOwner == s.cfg.Server.Owner {
391
-
break
410
+
if _, ok := workflows[eng]; !ok {
411
+
workflows[eng] = []models.Workflow{}
392
412
}
393
413
394
-
// remove existing owner
395
-
err = s.e.RemoveSpindleOwner(rbacDomain, existingOwner)
414
+
ewf, err := s.engs[w.Engine].InitWorkflow(*w, tpl)
396
415
if err != nil {
416
+
return fmt.Errorf("init workflow: %w", err)
417
+
}
418
+
419
+
// inject TANGLED_* env vars after InitWorkflow
420
+
// This prevents user-defined env vars from overriding them
421
+
if ewf.Environment == nil {
422
+
ewf.Environment = make(map[string]string)
423
+
}
424
+
maps.Copy(ewf.Environment, pipelineEnv)
425
+
426
+
workflows[eng] = append(workflows[eng], *ewf)
427
+
}
428
+
429
+
// enqueue pipeline
430
+
ok := s.jq.Enqueue(queue.Job{
431
+
Run: func() error {
432
+
engine.StartWorkflows(log.SubLogger(s.l, "engine"), s.vault, s.cfg, s.db, s.n, ctx, &models.Pipeline{
433
+
RepoOwner: tpl.TriggerMetadata.Repo.Did,
434
+
RepoName: tpl.TriggerMetadata.Repo.Repo,
435
+
Workflows: workflows,
436
+
}, pipelineId)
397
437
return nil
438
+
},
439
+
OnFail: func(jobError error) {
440
+
s.l.Error("pipeline run failed", "error", jobError)
441
+
},
442
+
})
443
+
if !ok {
444
+
return fmt.Errorf("failed to enqueue pipeline: queue is full")
445
+
}
446
+
s.l.Info("pipeline enqueued successfully", "id", pipelineId)
447
+
448
+
// emit StatusPending for all workflows here (after successful enqueue)
449
+
for _, ewfs := range workflows {
450
+
for _, ewf := range ewfs {
451
+
err := s.db.StatusPending(models.WorkflowId{
452
+
PipelineId: pipelineId,
453
+
Name: ewf.Name,
454
+
}, s.n)
455
+
if err != nil {
456
+
return fmt.Errorf("db.StatusPending: %w", err)
457
+
}
398
458
}
399
-
default:
400
-
return fmt.Errorf("more than one owner in DB, try deleting %q and starting over", s.cfg.Server.DBPath)
459
+
}
460
+
return nil
461
+
}
462
+
463
+
// newRepoPath creates a path to store repository by its did and rkey.
464
+
// The path format would be: `/data/repos/did:plc:foo/sh.tangled.repo/repo-rkey
465
+
func (s *Spindle) newRepoPath(did syntax.DID, rkey syntax.RecordKey) string {
466
+
return filepath.Join(s.cfg.Server.RepoDir(), did.String(), tangled.RepoNSID, rkey.String())
467
+
}
468
+
469
+
func (s *Spindle) newRepoCloneUrl(knot, did, name string) string {
470
+
scheme := "https://"
471
+
if s.cfg.Server.Dev {
472
+
scheme = "http://"
401
473
}
474
+
return fmt.Sprintf("%s%s/%s/%s", scheme, knot, did, name)
475
+
}
402
476
403
-
return s.e.AddSpindleOwner(rbacDomain, cfgOwner)
477
+
const RequiredVersion = "2.49.0"
478
+
479
+
func ensureGitVersion() error {
480
+
v, err := git.Version()
481
+
if err != nil {
482
+
return fmt.Errorf("fetching git version: %w", err)
483
+
}
484
+
if v.LessThan(version.Must(version.NewVersion(RequiredVersion))) {
485
+
return fmt.Errorf("installed git version %q is not supported, Spindle requires git version >= %q", v, RequiredVersion)
486
+
}
487
+
return nil
404
488
}
+391
spindle/tap.go
+391
spindle/tap.go
···
1
+
package spindle
2
+
3
+
import (
4
+
"context"
5
+
"encoding/json"
6
+
"fmt"
7
+
"time"
8
+
9
+
"github.com/bluesky-social/indigo/atproto/syntax"
10
+
"tangled.org/core/api/tangled"
11
+
"tangled.org/core/eventconsumer"
12
+
"tangled.org/core/spindle/db"
13
+
"tangled.org/core/spindle/git"
14
+
"tangled.org/core/spindle/models"
15
+
"tangled.org/core/tap"
16
+
"tangled.org/core/tid"
17
+
"tangled.org/core/workflow"
18
+
)
19
+
20
+
func (s *Spindle) processEvent(ctx context.Context, evt tap.Event) error {
21
+
l := s.l.With("component", "tapIndexer")
22
+
23
+
var err error
24
+
switch evt.Type {
25
+
case tap.EvtRecord:
26
+
switch evt.Record.Collection.String() {
27
+
case tangled.SpindleMemberNSID:
28
+
err = s.processMember(ctx, evt)
29
+
case tangled.RepoNSID:
30
+
err = s.processRepo(ctx, evt)
31
+
case tangled.RepoCollaboratorNSID:
32
+
err = s.processCollaborator(ctx, evt)
33
+
case tangled.RepoPullNSID:
34
+
err = s.processPull(ctx, evt)
35
+
}
36
+
case tap.EvtIdentity:
37
+
// no-op
38
+
}
39
+
40
+
if err != nil {
41
+
l.Error("failed to process message. will retry later", "event.ID", evt.ID, "err", err)
42
+
return err
43
+
}
44
+
return nil
45
+
}
46
+
47
+
// NOTE: make sure to return nil if we don't need to retry (e.g. forbidden, unrelated)
48
+
49
+
func (s *Spindle) processMember(ctx context.Context, evt tap.Event) error {
50
+
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
51
+
52
+
l.Info("processing spindle.member record")
53
+
54
+
// only listen to members
55
+
if ok, err := s.e.IsSpindleMemberInviteAllowed(evt.Record.Did, s.cfg.Server.Did()); !ok || err != nil {
56
+
l.Warn("forbidden request: member invite not allowed", "did", evt.Record.Did, "error", err)
57
+
return nil
58
+
}
59
+
60
+
switch evt.Record.Action {
61
+
case tap.RecordCreateAction, tap.RecordUpdateAction:
62
+
record := tangled.SpindleMember{}
63
+
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
64
+
return fmt.Errorf("parsing record: %w", err)
65
+
}
66
+
67
+
domain := s.cfg.Server.Hostname
68
+
if record.Instance != domain {
69
+
l.Info("domain mismatch", "domain", record.Instance, "expected", domain)
70
+
return nil
71
+
}
72
+
73
+
created, err := time.Parse(record.CreatedAt, time.RFC3339)
74
+
if err != nil {
75
+
created = time.Now()
76
+
}
77
+
if err := db.AddSpindleMember(s.db, db.SpindleMember{
78
+
Did: evt.Record.Did,
79
+
Rkey: evt.Record.Rkey.String(),
80
+
Instance: record.Instance,
81
+
Subject: syntax.DID(record.Subject),
82
+
Created: created,
83
+
}); err != nil {
84
+
l.Error("failed to add member", "error", err)
85
+
return fmt.Errorf("adding member to db: %w", err)
86
+
}
87
+
if err := s.e.AddSpindleMember(syntax.DID(record.Subject), s.cfg.Server.Did()); err != nil {
88
+
return fmt.Errorf("adding member to rbac: %w", err)
89
+
}
90
+
if err := s.tap.AddRepos(ctx, []syntax.DID{syntax.DID(record.Subject)}); err != nil {
91
+
return fmt.Errorf("adding did to tap", err)
92
+
}
93
+
94
+
l.Info("added member", "member", record.Subject)
95
+
return nil
96
+
97
+
case tap.RecordDeleteAction:
98
+
var (
99
+
did = evt.Record.Did.String()
100
+
rkey = evt.Record.Rkey.String()
101
+
)
102
+
member, err := db.GetSpindleMember(s.db, did, rkey)
103
+
if err != nil {
104
+
return fmt.Errorf("finding member: %w", err)
105
+
}
106
+
107
+
if err := db.RemoveSpindleMember(s.db, did, rkey); err != nil {
108
+
return fmt.Errorf("removing member from db: %w", err)
109
+
}
110
+
if err := s.e.RemoveSpindleMember(member.Subject, s.cfg.Server.Did()); err != nil {
111
+
return fmt.Errorf("removing member from rbac: %w", err)
112
+
}
113
+
if err := s.tapSafeRemoveDid(ctx, member.Subject); err != nil {
114
+
return fmt.Errorf("removing did from tap: %w", err)
115
+
}
116
+
117
+
l.Info("removed member", "member", member.Subject)
118
+
return nil
119
+
}
120
+
return nil
121
+
}
122
+
123
+
func (s *Spindle) processCollaborator(ctx context.Context, evt tap.Event) error {
124
+
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
125
+
126
+
l.Info("processing repo.collaborator record")
127
+
128
+
// only listen to members
129
+
if ok, err := s.e.IsSpindleMember(evt.Record.Did, s.cfg.Server.Did()); !ok || err != nil {
130
+
l.Warn("forbidden request: not spindle member", "did", evt.Record.Did, "err", err)
131
+
return nil
132
+
}
133
+
134
+
switch evt.Record.Action {
135
+
case tap.RecordCreateAction, tap.RecordUpdateAction:
136
+
record := tangled.RepoCollaborator{}
137
+
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
138
+
l.Error("invalid record", "err", err)
139
+
return fmt.Errorf("parsing record: %w", err)
140
+
}
141
+
142
+
// retry later if target repo is not ingested yet
143
+
if _, err := s.db.GetRepo(syntax.ATURI(record.Repo)); err != nil {
144
+
l.Warn("target repo is not ingested yet", "repo", record.Repo, "err", err)
145
+
return fmt.Errorf("target repo is unknown")
146
+
}
147
+
148
+
// check perms for this user
149
+
if ok, err := s.e.IsRepoCollaboratorInviteAllowed(evt.Record.Did, syntax.ATURI(record.Repo)); !ok || err != nil {
150
+
l.Warn("forbidden request collaborator invite not allowed", "did", evt.Record.Did, "err", err)
151
+
return nil
152
+
}
153
+
154
+
if err := s.db.PutRepoCollaborator(&db.RepoCollaborator{
155
+
Did: evt.Record.Did,
156
+
Rkey: evt.Record.Rkey,
157
+
Repo: syntax.ATURI(record.Repo),
158
+
Subject: syntax.DID(record.Subject),
159
+
}); err != nil {
160
+
return fmt.Errorf("adding collaborator to db: %w", err)
161
+
}
162
+
if err := s.e.AddRepoCollaborator(syntax.DID(record.Subject), syntax.ATURI(record.Repo)); err != nil {
163
+
return fmt.Errorf("adding collaborator to rbac: %w", err)
164
+
}
165
+
if err := s.tap.AddRepos(ctx, []syntax.DID{syntax.DID(record.Subject)}); err != nil {
166
+
return fmt.Errorf("adding did to tap: %w", err)
167
+
}
168
+
169
+
l.Info("add repo collaborator", "subejct", record.Subject, "repo", record.Repo)
170
+
return nil
171
+
172
+
case tap.RecordDeleteAction:
173
+
// get existing collaborator
174
+
collaborator, err := s.db.GetRepoCollaborator(evt.Record.Did, evt.Record.Rkey)
175
+
if err != nil {
176
+
return fmt.Errorf("failed to get existing collaborator info: %w", err)
177
+
}
178
+
179
+
// check perms for this user
180
+
if ok, err := s.e.IsRepoCollaboratorInviteAllowed(evt.Record.Did, collaborator.Repo); !ok || err != nil {
181
+
l.Warn("forbidden request collaborator invite not allowed", "did", evt.Record.Did, "err", err)
182
+
return nil
183
+
}
184
+
185
+
if err := s.db.RemoveRepoCollaborator(collaborator.Subject, collaborator.Rkey); err != nil {
186
+
return fmt.Errorf("removing collaborator from db: %w", err)
187
+
}
188
+
if err := s.e.RemoveRepoCollaborator(collaborator.Subject, collaborator.Repo); err != nil {
189
+
return fmt.Errorf("removing collaborator from rbac: %w", err)
190
+
}
191
+
if err := s.tapSafeRemoveDid(ctx, collaborator.Subject); err != nil {
192
+
return fmt.Errorf("removing did from tap: %w", err)
193
+
}
194
+
195
+
l.Info("removed repo collaborator", "subejct", collaborator.Subject, "repo", collaborator.Repo)
196
+
return nil
197
+
}
198
+
return nil
199
+
}
200
+
201
+
func (s *Spindle) processRepo(ctx context.Context, evt tap.Event) error {
202
+
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
203
+
204
+
l.Info("processing repo record")
205
+
206
+
// only listen to members
207
+
if ok, err := s.e.IsSpindleMember(evt.Record.Did, s.cfg.Server.Did()); !ok || err != nil {
208
+
l.Warn("forbidden request: not spindle member", "did", evt.Record.Did, "err", err)
209
+
return nil
210
+
}
211
+
212
+
switch evt.Record.Action {
213
+
case tap.RecordCreateAction, tap.RecordUpdateAction:
214
+
record := tangled.Repo{}
215
+
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
216
+
return fmt.Errorf("parsing record: %w", err)
217
+
}
218
+
219
+
domain := s.cfg.Server.Hostname
220
+
if record.Spindle == nil || *record.Spindle != domain {
221
+
if record.Spindle == nil {
222
+
l.Info("spindle isn't configured", "name", record.Name)
223
+
} else {
224
+
l.Info("different spindle configured", "name", record.Name, "spindle", *record.Spindle, "domain", domain)
225
+
}
226
+
if err := s.db.DeleteRepo(evt.Record.Did, evt.Record.Rkey); err != nil {
227
+
return fmt.Errorf("deleting repo from db: %w", err)
228
+
}
229
+
return nil
230
+
}
231
+
232
+
repo := &db.Repo{
233
+
Did: evt.Record.Did,
234
+
Rkey: evt.Record.Rkey,
235
+
Name: record.Name,
236
+
Knot: record.Knot,
237
+
}
238
+
239
+
if err := s.db.PutRepo(repo); err != nil {
240
+
return fmt.Errorf("adding repo to db: %w", err)
241
+
}
242
+
243
+
if err := s.e.AddRepo(evt.Record.AtUri()); err != nil {
244
+
return fmt.Errorf("adding repo to rbac")
245
+
}
246
+
247
+
// add this knot to the event consumer
248
+
src := eventconsumer.NewKnotSource(record.Knot)
249
+
s.ks.AddSource(context.Background(), src)
250
+
251
+
// setup sparse sync
252
+
repoCloneUri := s.newRepoCloneUrl(repo.Knot, repo.Did.String(), repo.Name)
253
+
repoPath := s.newRepoPath(repo.Did, repo.Rkey)
254
+
if err := git.SparseSyncGitRepo(ctx, repoCloneUri, repoPath, ""); err != nil {
255
+
return fmt.Errorf("setting up sparse-clone git repo: %w", err)
256
+
}
257
+
258
+
l.Info("added repo", "repo", evt.Record.AtUri())
259
+
return nil
260
+
261
+
case tap.RecordDeleteAction:
262
+
// check perms for this user
263
+
if ok, err := s.e.IsRepoOwner(evt.Record.Did, evt.Record.AtUri()); !ok || err != nil {
264
+
l.Warn("forbidden request: not repo owner", "did", evt.Record.Did, "err", err)
265
+
return nil
266
+
}
267
+
268
+
if err := s.db.DeleteRepo(evt.Record.Did, evt.Record.Rkey); err != nil {
269
+
return fmt.Errorf("deleting repo from db: %w", err)
270
+
}
271
+
272
+
if err := s.e.DeleteRepo(evt.Record.AtUri()); err != nil {
273
+
return fmt.Errorf("deleting repo from rbac: %w", err)
274
+
}
275
+
276
+
l.Info("deleted repo", "repo", evt.Record.AtUri())
277
+
return nil
278
+
}
279
+
return nil
280
+
}
281
+
282
+
func (s *Spindle) processPull(ctx context.Context, evt tap.Event) error {
283
+
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
284
+
285
+
l.Info("processing pull record")
286
+
287
+
// only listen to live events
288
+
if !evt.Record.Live {
289
+
l.Info("skipping backfill event", "event", evt.Record.AtUri())
290
+
return nil
291
+
}
292
+
293
+
switch evt.Record.Action {
294
+
case tap.RecordCreateAction, tap.RecordUpdateAction:
295
+
record := tangled.RepoPull{}
296
+
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
297
+
l.Error("invalid record", "err", err)
298
+
return fmt.Errorf("parsing record: %w", err)
299
+
}
300
+
301
+
// ignore legacy records
302
+
if record.Target == nil {
303
+
l.Info("ignoring pull record: target repo is nil")
304
+
return nil
305
+
}
306
+
307
+
// ignore patch-based and fork-based PRs
308
+
if record.Source == nil || record.Source.Repo != nil {
309
+
l.Info("ignoring pull record: not a branch-based pull request")
310
+
return nil
311
+
}
312
+
313
+
// skip if target repo is unknown
314
+
repo, err := s.db.GetRepo(syntax.ATURI(record.Target.Repo))
315
+
if err != nil {
316
+
l.Warn("target repo is not ingested yet", "repo", record.Target.Repo, "err", err)
317
+
return fmt.Errorf("target repo is unknown")
318
+
}
319
+
320
+
compiler := workflow.Compiler{
321
+
Trigger: tangled.Pipeline_TriggerMetadata{
322
+
Kind: string(workflow.TriggerKindPullRequest),
323
+
PullRequest: &tangled.Pipeline_PullRequestTriggerData{
324
+
Action: "create",
325
+
SourceBranch: record.Source.Branch,
326
+
SourceSha: record.Source.Sha,
327
+
TargetBranch: record.Target.Branch,
328
+
},
329
+
Repo: &tangled.Pipeline_TriggerRepo{
330
+
Did: repo.Did.String(),
331
+
Knot: repo.Knot,
332
+
Repo: repo.Name,
333
+
},
334
+
},
335
+
}
336
+
337
+
repoUri := s.newRepoCloneUrl(repo.Knot, repo.Did.String(), repo.Name)
338
+
repoPath := s.newRepoPath(repo.Did, repo.Rkey)
339
+
340
+
// load workflow definitions from rev (without spindle context)
341
+
rawPipeline, err := s.loadPipeline(ctx, repoUri, repoPath, record.Source.Sha)
342
+
if err != nil {
343
+
// don't retry
344
+
l.Error("failed loading pipeline", "err", err)
345
+
return nil
346
+
}
347
+
if len(rawPipeline) == 0 {
348
+
l.Info("no workflow definition find for the repo. skipping the event")
349
+
return nil
350
+
}
351
+
tpl := compiler.Compile(compiler.Parse(rawPipeline))
352
+
// TODO: pass compile error to workflow log
353
+
for _, w := range compiler.Diagnostics.Errors {
354
+
l.Error(w.String())
355
+
}
356
+
for _, w := range compiler.Diagnostics.Warnings {
357
+
l.Warn(w.String())
358
+
}
359
+
360
+
pipelineId := models.PipelineId{
361
+
Knot: tpl.TriggerMetadata.Repo.Knot,
362
+
Rkey: tid.TID(),
363
+
}
364
+
if err := s.db.CreatePipelineEvent(pipelineId.Rkey, tpl, s.n); err != nil {
365
+
l.Error("failed to create pipeline event", "err", err)
366
+
return nil
367
+
}
368
+
err = s.processPipeline(ctx, tpl, pipelineId)
369
+
if err != nil {
370
+
// don't retry
371
+
l.Error("failed processing pipeline", "err", err)
372
+
return nil
373
+
}
374
+
case tap.RecordDeleteAction:
375
+
// no-op
376
+
}
377
+
return nil
378
+
}
379
+
380
+
func (s *Spindle) tapSafeRemoveDid(ctx context.Context, did syntax.DID) error {
381
+
known, err := s.db.IsKnownDid(syntax.DID(did))
382
+
if err != nil {
383
+
return fmt.Errorf("ensuring did known state: %w", err)
384
+
}
385
+
if !known {
386
+
if err := s.tap.RemoveRepos(ctx, []syntax.DID{did}); err != nil {
387
+
return fmt.Errorf("removing did from tap: %w", err)
388
+
}
389
+
}
390
+
return nil
391
+
}
+1
-2
spindle/xrpc/add_secret.go
+1
-2
spindle/xrpc/add_secret.go
···
11
11
"github.com/bluesky-social/indigo/xrpc"
12
12
securejoin "github.com/cyphar/filepath-securejoin"
13
13
"tangled.org/core/api/tangled"
14
-
"tangled.org/core/rbac"
15
14
"tangled.org/core/spindle/secrets"
16
15
xrpcerr "tangled.org/core/xrpc/errors"
17
16
)
···
68
67
return
69
68
}
70
69
71
-
if ok, err := x.Enforcer.IsSettingsAllowed(actorDid.String(), rbac.ThisServer, didPath); !ok || err != nil {
70
+
if ok, err := x.Enforcer.IsRepoSettingsWriteAllowed(actorDid, repoAt); !ok || err != nil {
72
71
l.Error("insufficent permissions", "did", actorDid.String())
73
72
writeError(w, xrpcerr.AccessControlError(actorDid.String()), http.StatusUnauthorized)
74
73
return
+1
-2
spindle/xrpc/list_secrets.go
+1
-2
spindle/xrpc/list_secrets.go
···
11
11
"github.com/bluesky-social/indigo/xrpc"
12
12
securejoin "github.com/cyphar/filepath-securejoin"
13
13
"tangled.org/core/api/tangled"
14
-
"tangled.org/core/rbac"
15
14
"tangled.org/core/spindle/secrets"
16
15
xrpcerr "tangled.org/core/xrpc/errors"
17
16
)
···
63
62
return
64
63
}
65
64
66
-
if ok, err := x.Enforcer.IsSettingsAllowed(actorDid.String(), rbac.ThisServer, didPath); !ok || err != nil {
65
+
if ok, err := x.Enforcer.IsRepoSettingsWriteAllowed(actorDid, repoAt); !ok || err != nil {
67
66
l.Error("insufficent permissions", "did", actorDid.String())
68
67
writeError(w, xrpcerr.AccessControlError(actorDid.String()), http.StatusUnauthorized)
69
68
return
+1
-1
spindle/xrpc/owner.go
+1
-1
spindle/xrpc/owner.go
+72
spindle/xrpc/pipeline_cancelPipeline.go
+72
spindle/xrpc/pipeline_cancelPipeline.go
···
1
+
package xrpc
2
+
3
+
import (
4
+
"encoding/json"
5
+
"fmt"
6
+
"net/http"
7
+
"strings"
8
+
9
+
"github.com/bluesky-social/indigo/atproto/syntax"
10
+
"tangled.org/core/api/tangled"
11
+
"tangled.org/core/spindle/models"
12
+
xrpcerr "tangled.org/core/xrpc/errors"
13
+
)
14
+
15
+
func (x *Xrpc) CancelPipeline(w http.ResponseWriter, r *http.Request) {
16
+
l := x.Logger
17
+
fail := func(e xrpcerr.XrpcError) {
18
+
l.Error("failed", "kind", e.Tag, "error", e.Message)
19
+
writeError(w, e, http.StatusBadRequest)
20
+
}
21
+
l.Debug("cancel pipeline")
22
+
23
+
actorDid, ok := r.Context().Value(ActorDid).(syntax.DID)
24
+
if !ok {
25
+
fail(xrpcerr.MissingActorDidError)
26
+
return
27
+
}
28
+
29
+
var input tangled.PipelineCancelPipeline_Input
30
+
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
31
+
fail(xrpcerr.GenericError(err))
32
+
return
33
+
}
34
+
35
+
aturi := syntax.ATURI(input.Pipeline)
36
+
wid := models.WorkflowId{
37
+
PipelineId: models.PipelineId{
38
+
Knot: strings.TrimPrefix(aturi.Authority().String(), "did:web:"),
39
+
Rkey: aturi.RecordKey().String(),
40
+
},
41
+
Name: input.Workflow,
42
+
}
43
+
l.Debug("cancel pipeline", "wid", wid)
44
+
45
+
// unfortunately we have to resolve repo-at here
46
+
repoAt, err := syntax.ParseATURI(input.Repo)
47
+
if err != nil {
48
+
fail(xrpcerr.InvalidRepoError(input.Repo))
49
+
return
50
+
}
51
+
52
+
isRepoOwner, err := x.Enforcer.IsRepoOwner(actorDid, repoAt)
53
+
if err != nil || !isRepoOwner {
54
+
fail(xrpcerr.AccessControlError(actorDid.String()))
55
+
return
56
+
}
57
+
for _, engine := range x.Engines {
58
+
l.Debug("destorying workflow", "wid", wid)
59
+
err = engine.DestroyWorkflow(r.Context(), wid)
60
+
if err != nil {
61
+
fail(xrpcerr.GenericError(fmt.Errorf("dailed to destroy workflow: %w", err)))
62
+
return
63
+
}
64
+
err = x.Db.StatusCancelled(wid, "User canceled the workflow", -1, x.Notifier)
65
+
if err != nil {
66
+
fail(xrpcerr.GenericError(fmt.Errorf("dailed to emit status failed: %w", err)))
67
+
return
68
+
}
69
+
}
70
+
71
+
w.WriteHeader(http.StatusOK)
72
+
}
+1
-2
spindle/xrpc/remove_secret.go
+1
-2
spindle/xrpc/remove_secret.go
···
10
10
"github.com/bluesky-social/indigo/xrpc"
11
11
securejoin "github.com/cyphar/filepath-securejoin"
12
12
"tangled.org/core/api/tangled"
13
-
"tangled.org/core/rbac"
14
13
"tangled.org/core/spindle/secrets"
15
14
xrpcerr "tangled.org/core/xrpc/errors"
16
15
)
···
62
61
return
63
62
}
64
63
65
-
if ok, err := x.Enforcer.IsSettingsAllowed(actorDid.String(), rbac.ThisServer, didPath); !ok || err != nil {
64
+
if ok, err := x.Enforcer.IsRepoSettingsWriteAllowed(actorDid, repoAt); !ok || err != nil {
66
65
l.Error("insufficent permissions", "did", actorDid.String())
67
66
writeError(w, xrpcerr.AccessControlError(actorDid.String()), http.StatusUnauthorized)
68
67
return
+5
-2
spindle/xrpc/xrpc.go
+5
-2
spindle/xrpc/xrpc.go
···
10
10
11
11
"tangled.org/core/api/tangled"
12
12
"tangled.org/core/idresolver"
13
-
"tangled.org/core/rbac"
13
+
"tangled.org/core/notifier"
14
+
"tangled.org/core/rbac2"
14
15
"tangled.org/core/spindle/config"
15
16
"tangled.org/core/spindle/db"
16
17
"tangled.org/core/spindle/models"
···
24
25
type Xrpc struct {
25
26
Logger *slog.Logger
26
27
Db *db.DB
27
-
Enforcer *rbac.Enforcer
28
+
Enforcer *rbac2.Enforcer
28
29
Engines map[string]models.Engine
29
30
Config *config.Config
30
31
Resolver *idresolver.Resolver
31
32
Vault secrets.Manager
33
+
Notifier *notifier.Notifier
32
34
ServiceAuth *serviceauth.ServiceAuth
33
35
}
34
36
···
41
43
r.Post("/"+tangled.RepoAddSecretNSID, x.AddSecret)
42
44
r.Post("/"+tangled.RepoRemoveSecretNSID, x.RemoveSecret)
43
45
r.Get("/"+tangled.RepoListSecretsNSID, x.ListSecrets)
46
+
r.Post("/"+tangled.PipelineCancelPipelineNSID, x.CancelPipeline)
44
47
})
45
48
46
49
// service query endpoints (no auth required)
+1
-1
tailwind.config.js
+1
-1
tailwind.config.js
···
2
2
const colors = require("tailwindcss/colors");
3
3
4
4
module.exports = {
5
-
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go"],
5
+
content: ["./appview/pages/templates/**/*.html", "./appview/pages/chroma.go", "./docs/*.html"],
6
6
darkMode: "media",
7
7
theme: {
8
8
container: {
+24
tap/simpleIndexer.go
+24
tap/simpleIndexer.go
···
1
+
package tap
2
+
3
+
import "context"
4
+
5
+
type SimpleIndexer struct {
6
+
EventHandler func(ctx context.Context, evt Event) error
7
+
ErrorHandler func(ctx context.Context, err error)
8
+
}
9
+
10
+
var _ Handler = (*SimpleIndexer)(nil)
11
+
12
+
func (i *SimpleIndexer) OnEvent(ctx context.Context, evt Event) error {
13
+
if i.EventHandler == nil {
14
+
return nil
15
+
}
16
+
return i.EventHandler(ctx, evt)
17
+
}
18
+
19
+
func (i *SimpleIndexer) OnError(ctx context.Context, err error) {
20
+
if i.ErrorHandler == nil {
21
+
return
22
+
}
23
+
i.ErrorHandler(ctx, err)
24
+
}
+169
tap/tap.go
+169
tap/tap.go
···
1
+
/// heavily inspired by <https://github.com/bluesky-social/atproto/blob/c7f5a868837d3e9b3289f988fee2267789327b06/packages/tap/README.md>
2
+
3
+
package tap
4
+
5
+
import (
6
+
"bytes"
7
+
"context"
8
+
"encoding/json"
9
+
"fmt"
10
+
"net/http"
11
+
"net/url"
12
+
13
+
"github.com/bluesky-social/indigo/atproto/syntax"
14
+
"github.com/gorilla/websocket"
15
+
"tangled.org/core/log"
16
+
)
17
+
18
+
// type WebsocketOptions struct {
19
+
// maxReconnectSeconds int
20
+
// heartbeatIntervalMs int
21
+
// // onReconnectError
22
+
// }
23
+
24
+
type Handler interface {
25
+
OnEvent(ctx context.Context, evt Event) error
26
+
OnError(ctx context.Context, err error)
27
+
}
28
+
29
+
type Client struct {
30
+
Url string
31
+
AdminPassword string
32
+
HTTPClient *http.Client
33
+
}
34
+
35
+
func NewClient(url, adminPassword string) Client {
36
+
return Client{
37
+
Url: url,
38
+
AdminPassword: adminPassword,
39
+
HTTPClient: &http.Client{},
40
+
}
41
+
}
42
+
43
+
func (c *Client) AddRepos(ctx context.Context, dids []syntax.DID) error {
44
+
body, err := json.Marshal(map[string][]syntax.DID{"dids": dids})
45
+
if err != nil {
46
+
return err
47
+
}
48
+
req, err := http.NewRequestWithContext(ctx, "POST", c.Url+"/repos/add", bytes.NewReader(body))
49
+
if err != nil {
50
+
return err
51
+
}
52
+
req.SetBasicAuth("admin", c.AdminPassword)
53
+
req.Header.Set("Content-Type", "application/json")
54
+
55
+
resp, err := c.HTTPClient.Do(req)
56
+
if err != nil {
57
+
return err
58
+
}
59
+
defer resp.Body.Close()
60
+
if resp.StatusCode != http.StatusOK {
61
+
return fmt.Errorf("tap: /repos/add failed with status %d", resp.StatusCode)
62
+
}
63
+
return nil
64
+
}
65
+
66
+
func (c *Client) RemoveRepos(ctx context.Context, dids []syntax.DID) error {
67
+
body, err := json.Marshal(map[string][]syntax.DID{"dids": dids})
68
+
if err != nil {
69
+
return err
70
+
}
71
+
req, err := http.NewRequestWithContext(ctx, "POST", c.Url+"/repos/remove", bytes.NewReader(body))
72
+
if err != nil {
73
+
return err
74
+
}
75
+
req.SetBasicAuth("admin", c.AdminPassword)
76
+
req.Header.Set("Content-Type", "application/json")
77
+
78
+
resp, err := c.HTTPClient.Do(req)
79
+
if err != nil {
80
+
return err
81
+
}
82
+
defer resp.Body.Close()
83
+
if resp.StatusCode != http.StatusOK {
84
+
return fmt.Errorf("tap: /repos/remove failed with status %d", resp.StatusCode)
85
+
}
86
+
return nil
87
+
}
88
+
89
+
func (c *Client) Connect(ctx context.Context, handler Handler) error {
90
+
l := log.FromContext(ctx)
91
+
92
+
u, err := url.Parse(c.Url)
93
+
if err != nil {
94
+
return err
95
+
}
96
+
if u.Scheme == "https" {
97
+
u.Scheme = "wss"
98
+
} else {
99
+
u.Scheme = "ws"
100
+
}
101
+
u.Path = "/channel"
102
+
103
+
// TODO: set auth on dial
104
+
105
+
url := u.String()
106
+
107
+
// var backoff int
108
+
// for {
109
+
// select {
110
+
// case <-ctx.Done():
111
+
// return ctx.Err()
112
+
// default:
113
+
// }
114
+
//
115
+
// header := http.Header{
116
+
// "Authorization": []string{""},
117
+
// }
118
+
// conn, res, err := websocket.DefaultDialer.DialContext(ctx, url, header)
119
+
// if err != nil {
120
+
// l.Warn("dialing failed", "url", url, "err", err, "backoff", backoff)
121
+
// time.Sleep(time.Duration(5+backoff) * time.Second)
122
+
// backoff++
123
+
//
124
+
// continue
125
+
// } else {
126
+
// backoff = 0
127
+
// }
128
+
//
129
+
// l.Info("event subscription response", "code", res.StatusCode)
130
+
// }
131
+
132
+
// TODO: keep websocket connection alive
133
+
conn, _, err := websocket.DefaultDialer.DialContext(ctx, url, nil)
134
+
if err != nil {
135
+
return err
136
+
}
137
+
defer conn.Close()
138
+
139
+
for {
140
+
select {
141
+
case <-ctx.Done():
142
+
return ctx.Err()
143
+
default:
144
+
}
145
+
_, message, err := conn.ReadMessage()
146
+
if err != nil {
147
+
return err
148
+
}
149
+
150
+
var ev Event
151
+
if err := json.Unmarshal(message, &ev); err != nil {
152
+
handler.OnError(ctx, fmt.Errorf("failed to parse message: %w", err))
153
+
continue
154
+
}
155
+
if err := handler.OnEvent(ctx, ev); err != nil {
156
+
handler.OnError(ctx, fmt.Errorf("failed to process event %d: %w", ev.ID, err))
157
+
continue
158
+
}
159
+
160
+
ack := map[string]any{
161
+
"type": "ack",
162
+
"id": ev.ID,
163
+
}
164
+
if err := conn.WriteJSON(ack); err != nil {
165
+
l.Warn("failed to send ack", "err", err)
166
+
continue
167
+
}
168
+
}
169
+
}
+62
tap/types.go
+62
tap/types.go
···
1
+
package tap
2
+
3
+
import (
4
+
"encoding/json"
5
+
"fmt"
6
+
7
+
"github.com/bluesky-social/indigo/atproto/syntax"
8
+
)
9
+
10
+
type EventType string
11
+
12
+
const (
13
+
EvtRecord EventType = "record"
14
+
EvtIdentity EventType = "identity"
15
+
)
16
+
17
+
type Event struct {
18
+
ID int64 `json:"id"`
19
+
Type EventType `json:"type"`
20
+
Record *RecordEventData `json:"record,omitempty"`
21
+
Identity *IdentityEventData `json:"identity,omitempty"`
22
+
}
23
+
24
+
type RecordEventData struct {
25
+
Live bool `json:"live"`
26
+
Did syntax.DID `json:"did"`
27
+
Rev string `json:"rev"`
28
+
Collection syntax.NSID `json:"collection"`
29
+
Rkey syntax.RecordKey `json:"rkey"`
30
+
Action RecordAction `json:"action"`
31
+
Record json.RawMessage `json:"record,omitempty"`
32
+
CID *syntax.CID `json:"cid,omitempty"`
33
+
}
34
+
35
+
func (r *RecordEventData) AtUri() syntax.ATURI {
36
+
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, r.Collection, r.Rkey))
37
+
}
38
+
39
+
type RecordAction string
40
+
41
+
const (
42
+
RecordCreateAction RecordAction = "create"
43
+
RecordUpdateAction RecordAction = "update"
44
+
RecordDeleteAction RecordAction = "delete"
45
+
)
46
+
47
+
type IdentityEventData struct {
48
+
DID syntax.DID `json:"did"`
49
+
Handle string `json:"handle"`
50
+
IsActive bool `json:"is_active"`
51
+
Status RepoStatus `json:"status"`
52
+
}
53
+
54
+
type RepoStatus string
55
+
56
+
const (
57
+
RepoStatusActive RepoStatus = "active"
58
+
RepoStatusTakendown RepoStatus = "takendown"
59
+
RepoStatusSuspended RepoStatus = "suspended"
60
+
RepoStatusDeactivated RepoStatus = "deactivated"
61
+
RepoStatusDeleted RepoStatus = "deleted"
62
+
)
+199
types/commit.go
+199
types/commit.go
···
1
+
package types
2
+
3
+
import (
4
+
"bytes"
5
+
"encoding/json"
6
+
"fmt"
7
+
"maps"
8
+
"regexp"
9
+
"strings"
10
+
11
+
"github.com/go-git/go-git/v5/plumbing"
12
+
"github.com/go-git/go-git/v5/plumbing/object"
13
+
)
14
+
15
+
type Commit struct {
16
+
// hash of the commit object.
17
+
Hash plumbing.Hash `json:"hash,omitempty"`
18
+
19
+
// author is the original author of the commit.
20
+
Author object.Signature `json:"author"`
21
+
22
+
// committer is the one performing the commit, might be different from author.
23
+
Committer object.Signature `json:"committer"`
24
+
25
+
// message is the commit message, contains arbitrary text.
26
+
Message string `json:"message"`
27
+
28
+
// treehash is the hash of the root tree of the commit.
29
+
Tree string `json:"tree"`
30
+
31
+
// parents are the hashes of the parent commits of the commit.
32
+
ParentHashes []plumbing.Hash `json:"parent_hashes,omitempty"`
33
+
34
+
// pgpsignature is the pgp signature of the commit.
35
+
PGPSignature string `json:"pgp_signature,omitempty"`
36
+
37
+
// mergetag is the embedded tag object when a merge commit is created by
38
+
// merging a signed tag.
39
+
MergeTag string `json:"merge_tag,omitempty"`
40
+
41
+
// changeid is a unique identifier for the change (e.g., gerrit change-id).
42
+
ChangeId string `json:"change_id,omitempty"`
43
+
44
+
// extraheaders contains additional headers not captured by other fields.
45
+
ExtraHeaders map[string][]byte `json:"extra_headers,omitempty"`
46
+
47
+
// deprecated: kept for backwards compatibility with old json format.
48
+
This string `json:"this,omitempty"`
49
+
50
+
// deprecated: kept for backwards compatibility with old json format.
51
+
Parent string `json:"parent,omitempty"`
52
+
}
53
+
54
+
// types.Commit is an unify two commit structs:
55
+
// - git.object.Commit from
56
+
// - types.NiceDiff.commit
57
+
//
58
+
// to do this in backwards compatible fashion, we define the base struct
59
+
// to use the same fields as NiceDiff.Commit, and then we also unmarshal
60
+
// the struct fields from go-git structs, this custom unmarshal makes sense
61
+
// of both representations and unifies them to have maximal data in either
62
+
// form.
63
+
func (c *Commit) UnmarshalJSON(data []byte) error {
64
+
type Alias Commit
65
+
66
+
aux := &struct {
67
+
*object.Commit
68
+
*Alias
69
+
}{
70
+
Alias: (*Alias)(c),
71
+
}
72
+
73
+
if err := json.Unmarshal(data, aux); err != nil {
74
+
return err
75
+
}
76
+
77
+
c.FromGoGitCommit(aux.Commit)
78
+
79
+
return nil
80
+
}
81
+
82
+
// fill in as much of Commit as possible from the given go-git commit
83
+
func (c *Commit) FromGoGitCommit(gc *object.Commit) {
84
+
if gc == nil {
85
+
return
86
+
}
87
+
88
+
if c.Hash.IsZero() {
89
+
c.Hash = gc.Hash
90
+
}
91
+
if c.This == "" {
92
+
c.This = gc.Hash.String()
93
+
}
94
+
if isEmptySignature(c.Author) {
95
+
c.Author = gc.Author
96
+
}
97
+
if isEmptySignature(c.Committer) {
98
+
c.Committer = gc.Committer
99
+
}
100
+
if c.Message == "" {
101
+
c.Message = gc.Message
102
+
}
103
+
if c.Tree == "" {
104
+
c.Tree = gc.TreeHash.String()
105
+
}
106
+
if c.PGPSignature == "" {
107
+
c.PGPSignature = gc.PGPSignature
108
+
}
109
+
if c.MergeTag == "" {
110
+
c.MergeTag = gc.MergeTag
111
+
}
112
+
113
+
if len(c.ParentHashes) == 0 {
114
+
c.ParentHashes = gc.ParentHashes
115
+
}
116
+
if c.Parent == "" && len(gc.ParentHashes) > 0 {
117
+
c.Parent = gc.ParentHashes[0].String()
118
+
}
119
+
120
+
if len(c.ExtraHeaders) == 0 {
121
+
c.ExtraHeaders = make(map[string][]byte)
122
+
maps.Copy(c.ExtraHeaders, gc.ExtraHeaders)
123
+
}
124
+
125
+
if c.ChangeId == "" {
126
+
if v, ok := gc.ExtraHeaders["change-id"]; ok {
127
+
c.ChangeId = string(v)
128
+
}
129
+
}
130
+
}
131
+
132
+
func isEmptySignature(s object.Signature) bool {
133
+
return s.Email == "" && s.Name == "" && s.When.IsZero()
134
+
}
135
+
136
+
// produce a verifiable payload from this commit's metadata
137
+
func (c *Commit) Payload() string {
138
+
author := bytes.NewBuffer([]byte{})
139
+
c.Author.Encode(author)
140
+
141
+
committer := bytes.NewBuffer([]byte{})
142
+
c.Committer.Encode(committer)
143
+
144
+
payload := strings.Builder{}
145
+
146
+
fmt.Fprintf(&payload, "tree %s\n", c.Tree)
147
+
148
+
if len(c.ParentHashes) > 0 {
149
+
for _, p := range c.ParentHashes {
150
+
fmt.Fprintf(&payload, "parent %s\n", p.String())
151
+
}
152
+
} else {
153
+
// present for backwards compatibility
154
+
fmt.Fprintf(&payload, "parent %s\n", c.Parent)
155
+
}
156
+
157
+
fmt.Fprintf(&payload, "author %s\n", author.String())
158
+
fmt.Fprintf(&payload, "committer %s\n", committer.String())
159
+
160
+
if c.ChangeId != "" {
161
+
fmt.Fprintf(&payload, "change-id %s\n", c.ChangeId)
162
+
} else if v, ok := c.ExtraHeaders["change-id"]; ok {
163
+
fmt.Fprintf(&payload, "change-id %s\n", string(v))
164
+
}
165
+
166
+
fmt.Fprintf(&payload, "\n%s", c.Message)
167
+
168
+
return payload.String()
169
+
}
170
+
171
+
var (
172
+
coAuthorRegex = regexp.MustCompile(`(?im)^Co-authored-by:\s*(.+?)\s*<([^>]+)>`)
173
+
)
174
+
175
+
func (commit Commit) CoAuthors() []object.Signature {
176
+
var coAuthors []object.Signature
177
+
seen := make(map[string]bool)
178
+
matches := coAuthorRegex.FindAllStringSubmatch(commit.Message, -1)
179
+
180
+
for _, match := range matches {
181
+
if len(match) >= 3 {
182
+
name := strings.TrimSpace(match[1])
183
+
email := strings.TrimSpace(match[2])
184
+
185
+
if seen[email] {
186
+
continue
187
+
}
188
+
seen[email] = true
189
+
190
+
coAuthors = append(coAuthors, object.Signature{
191
+
Name: name,
192
+
Email: email,
193
+
When: commit.Committer.When,
194
+
})
195
+
}
196
+
}
197
+
198
+
return coAuthors
199
+
}
+2
-12
types/diff.go
+2
-12
types/diff.go
···
2
2
3
3
import (
4
4
"github.com/bluekeyes/go-gitdiff/gitdiff"
5
-
"github.com/go-git/go-git/v5/plumbing/object"
6
5
)
7
6
8
7
type DiffOpts struct {
···
43
42
44
43
// A nicer git diff representation.
45
44
type NiceDiff struct {
46
-
Commit struct {
47
-
Message string `json:"message"`
48
-
Author object.Signature `json:"author"`
49
-
This string `json:"this"`
50
-
Parent string `json:"parent"`
51
-
PGPSignature string `json:"pgp_signature"`
52
-
Committer object.Signature `json:"committer"`
53
-
Tree string `json:"tree"`
54
-
ChangedId string `json:"change_id"`
55
-
} `json:"commit"`
56
-
Stat struct {
45
+
Commit Commit `json:"commit"`
46
+
Stat struct {
57
47
FilesChanged int `json:"files_changed"`
58
48
Insertions int `json:"insertions"`
59
49
Deletions int `json:"deletions"`
+17
-17
types/repo.go
+17
-17
types/repo.go
···
8
8
)
9
9
10
10
type RepoIndexResponse struct {
11
-
IsEmpty bool `json:"is_empty"`
12
-
Ref string `json:"ref,omitempty"`
13
-
Readme string `json:"readme,omitempty"`
14
-
ReadmeFileName string `json:"readme_file_name,omitempty"`
15
-
Commits []*object.Commit `json:"commits,omitempty"`
16
-
Description string `json:"description,omitempty"`
17
-
Files []NiceTree `json:"files,omitempty"`
18
-
Branches []Branch `json:"branches,omitempty"`
19
-
Tags []*TagReference `json:"tags,omitempty"`
20
-
TotalCommits int `json:"total_commits,omitempty"`
11
+
IsEmpty bool `json:"is_empty"`
12
+
Ref string `json:"ref,omitempty"`
13
+
Readme string `json:"readme,omitempty"`
14
+
ReadmeFileName string `json:"readme_file_name,omitempty"`
15
+
Commits []Commit `json:"commits,omitempty"`
16
+
Description string `json:"description,omitempty"`
17
+
Files []NiceTree `json:"files,omitempty"`
18
+
Branches []Branch `json:"branches,omitempty"`
19
+
Tags []*TagReference `json:"tags,omitempty"`
20
+
TotalCommits int `json:"total_commits,omitempty"`
21
21
}
22
22
23
23
type RepoLogResponse struct {
24
-
Commits []*object.Commit `json:"commits,omitempty"`
25
-
Ref string `json:"ref,omitempty"`
26
-
Description string `json:"description,omitempty"`
27
-
Log bool `json:"log,omitempty"`
28
-
Total int `json:"total,omitempty"`
29
-
Page int `json:"page,omitempty"`
30
-
PerPage int `json:"per_page,omitempty"`
24
+
Commits []Commit `json:"commits,omitempty"`
25
+
Ref string `json:"ref,omitempty"`
26
+
Description string `json:"description,omitempty"`
27
+
Log bool `json:"log,omitempty"`
28
+
Total int `json:"total,omitempty"`
29
+
Page int `json:"page,omitempty"`
30
+
PerPage int `json:"per_page,omitempty"`
31
31
}
32
32
33
33
type RepoCommitResponse struct {