+2
.tangled/workflows/build.yml
+2
.tangled/workflows/build.yml
+2
.tangled/workflows/fmt.yml
+2
.tangled/workflows/fmt.yml
+2
.tangled/workflows/test.yml
+2
.tangled/workflows/test.yml
+55
-722
api/tangled/cbor_gen.go
+55
-722
api/tangled/cbor_gen.go
···
2728
2728
2729
2729
return nil
2730
2730
}
2731
-
func (t *Pipeline_Dependency) MarshalCBOR(w io.Writer) error {
2732
-
if t == nil {
2733
-
_, err := w.Write(cbg.CborNull)
2734
-
return err
2735
-
}
2736
-
2737
-
cw := cbg.NewCborWriter(w)
2738
-
2739
-
if _, err := cw.Write([]byte{162}); err != nil {
2740
-
return err
2741
-
}
2742
-
2743
-
// t.Packages ([]string) (slice)
2744
-
if len("packages") > 1000000 {
2745
-
return xerrors.Errorf("Value in field \"packages\" was too long")
2746
-
}
2747
-
2748
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("packages"))); err != nil {
2749
-
return err
2750
-
}
2751
-
if _, err := cw.WriteString(string("packages")); err != nil {
2752
-
return err
2753
-
}
2754
-
2755
-
if len(t.Packages) > 8192 {
2756
-
return xerrors.Errorf("Slice value in field t.Packages was too long")
2757
-
}
2758
-
2759
-
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Packages))); err != nil {
2760
-
return err
2761
-
}
2762
-
for _, v := range t.Packages {
2763
-
if len(v) > 1000000 {
2764
-
return xerrors.Errorf("Value in field v was too long")
2765
-
}
2766
-
2767
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
2768
-
return err
2769
-
}
2770
-
if _, err := cw.WriteString(string(v)); err != nil {
2771
-
return err
2772
-
}
2773
-
2774
-
}
2775
-
2776
-
// t.Registry (string) (string)
2777
-
if len("registry") > 1000000 {
2778
-
return xerrors.Errorf("Value in field \"registry\" was too long")
2779
-
}
2780
-
2781
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("registry"))); err != nil {
2782
-
return err
2783
-
}
2784
-
if _, err := cw.WriteString(string("registry")); err != nil {
2785
-
return err
2786
-
}
2787
-
2788
-
if len(t.Registry) > 1000000 {
2789
-
return xerrors.Errorf("Value in field t.Registry was too long")
2790
-
}
2791
-
2792
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Registry))); err != nil {
2793
-
return err
2794
-
}
2795
-
if _, err := cw.WriteString(string(t.Registry)); err != nil {
2796
-
return err
2797
-
}
2798
-
return nil
2799
-
}
2800
-
2801
-
func (t *Pipeline_Dependency) UnmarshalCBOR(r io.Reader) (err error) {
2802
-
*t = Pipeline_Dependency{}
2803
-
2804
-
cr := cbg.NewCborReader(r)
2805
-
2806
-
maj, extra, err := cr.ReadHeader()
2807
-
if err != nil {
2808
-
return err
2809
-
}
2810
-
defer func() {
2811
-
if err == io.EOF {
2812
-
err = io.ErrUnexpectedEOF
2813
-
}
2814
-
}()
2815
-
2816
-
if maj != cbg.MajMap {
2817
-
return fmt.Errorf("cbor input should be of type map")
2818
-
}
2819
-
2820
-
if extra > cbg.MaxLength {
2821
-
return fmt.Errorf("Pipeline_Dependency: map struct too large (%d)", extra)
2822
-
}
2823
-
2824
-
n := extra
2825
-
2826
-
nameBuf := make([]byte, 8)
2827
-
for i := uint64(0); i < n; i++ {
2828
-
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
2829
-
if err != nil {
2830
-
return err
2831
-
}
2832
-
2833
-
if !ok {
2834
-
// Field doesn't exist on this type, so ignore it
2835
-
if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
2836
-
return err
2837
-
}
2838
-
continue
2839
-
}
2840
-
2841
-
switch string(nameBuf[:nameLen]) {
2842
-
// t.Packages ([]string) (slice)
2843
-
case "packages":
2844
-
2845
-
maj, extra, err = cr.ReadHeader()
2846
-
if err != nil {
2847
-
return err
2848
-
}
2849
-
2850
-
if extra > 8192 {
2851
-
return fmt.Errorf("t.Packages: array too large (%d)", extra)
2852
-
}
2853
-
2854
-
if maj != cbg.MajArray {
2855
-
return fmt.Errorf("expected cbor array")
2856
-
}
2857
-
2858
-
if extra > 0 {
2859
-
t.Packages = make([]string, extra)
2860
-
}
2861
-
2862
-
for i := 0; i < int(extra); i++ {
2863
-
{
2864
-
var maj byte
2865
-
var extra uint64
2866
-
var err error
2867
-
_ = maj
2868
-
_ = extra
2869
-
_ = err
2870
-
2871
-
{
2872
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
2873
-
if err != nil {
2874
-
return err
2875
-
}
2876
-
2877
-
t.Packages[i] = string(sval)
2878
-
}
2879
-
2880
-
}
2881
-
}
2882
-
// t.Registry (string) (string)
2883
-
case "registry":
2884
-
2885
-
{
2886
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
2887
-
if err != nil {
2888
-
return err
2889
-
}
2890
-
2891
-
t.Registry = string(sval)
2892
-
}
2893
-
2894
-
default:
2895
-
// Field doesn't exist on this type, so ignore it
2896
-
if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
2897
-
return err
2898
-
}
2899
-
}
2900
-
}
2901
-
2902
-
return nil
2903
-
}
2904
2731
func (t *Pipeline_ManualTriggerData) MarshalCBOR(w io.Writer) error {
2905
2732
if t == nil {
2906
2733
_, err := w.Write(cbg.CborNull)
···
3916
3743
3917
3744
return nil
3918
3745
}
3919
-
func (t *Pipeline_Step) MarshalCBOR(w io.Writer) error {
3920
-
if t == nil {
3921
-
_, err := w.Write(cbg.CborNull)
3922
-
return err
3923
-
}
3924
-
3925
-
cw := cbg.NewCborWriter(w)
3926
-
fieldCount := 3
3927
-
3928
-
if t.Environment == nil {
3929
-
fieldCount--
3930
-
}
3931
-
3932
-
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
3933
-
return err
3934
-
}
3935
-
3936
-
// t.Name (string) (string)
3937
-
if len("name") > 1000000 {
3938
-
return xerrors.Errorf("Value in field \"name\" was too long")
3939
-
}
3940
-
3941
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("name"))); err != nil {
3942
-
return err
3943
-
}
3944
-
if _, err := cw.WriteString(string("name")); err != nil {
3945
-
return err
3946
-
}
3947
-
3948
-
if len(t.Name) > 1000000 {
3949
-
return xerrors.Errorf("Value in field t.Name was too long")
3950
-
}
3951
-
3952
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil {
3953
-
return err
3954
-
}
3955
-
if _, err := cw.WriteString(string(t.Name)); err != nil {
3956
-
return err
3957
-
}
3958
-
3959
-
// t.Command (string) (string)
3960
-
if len("command") > 1000000 {
3961
-
return xerrors.Errorf("Value in field \"command\" was too long")
3962
-
}
3963
-
3964
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("command"))); err != nil {
3965
-
return err
3966
-
}
3967
-
if _, err := cw.WriteString(string("command")); err != nil {
3968
-
return err
3969
-
}
3970
-
3971
-
if len(t.Command) > 1000000 {
3972
-
return xerrors.Errorf("Value in field t.Command was too long")
3973
-
}
3974
-
3975
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Command))); err != nil {
3976
-
return err
3977
-
}
3978
-
if _, err := cw.WriteString(string(t.Command)); err != nil {
3979
-
return err
3980
-
}
3981
-
3982
-
// t.Environment ([]*tangled.Pipeline_Pair) (slice)
3983
-
if t.Environment != nil {
3984
-
3985
-
if len("environment") > 1000000 {
3986
-
return xerrors.Errorf("Value in field \"environment\" was too long")
3987
-
}
3988
-
3989
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
3990
-
return err
3991
-
}
3992
-
if _, err := cw.WriteString(string("environment")); err != nil {
3993
-
return err
3994
-
}
3995
-
3996
-
if len(t.Environment) > 8192 {
3997
-
return xerrors.Errorf("Slice value in field t.Environment was too long")
3998
-
}
3999
-
4000
-
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
4001
-
return err
4002
-
}
4003
-
for _, v := range t.Environment {
4004
-
if err := v.MarshalCBOR(cw); err != nil {
4005
-
return err
4006
-
}
4007
-
4008
-
}
4009
-
}
4010
-
return nil
4011
-
}
4012
-
4013
-
func (t *Pipeline_Step) UnmarshalCBOR(r io.Reader) (err error) {
4014
-
*t = Pipeline_Step{}
4015
-
4016
-
cr := cbg.NewCborReader(r)
4017
-
4018
-
maj, extra, err := cr.ReadHeader()
4019
-
if err != nil {
4020
-
return err
4021
-
}
4022
-
defer func() {
4023
-
if err == io.EOF {
4024
-
err = io.ErrUnexpectedEOF
4025
-
}
4026
-
}()
4027
-
4028
-
if maj != cbg.MajMap {
4029
-
return fmt.Errorf("cbor input should be of type map")
4030
-
}
4031
-
4032
-
if extra > cbg.MaxLength {
4033
-
return fmt.Errorf("Pipeline_Step: map struct too large (%d)", extra)
4034
-
}
4035
-
4036
-
n := extra
4037
-
4038
-
nameBuf := make([]byte, 11)
4039
-
for i := uint64(0); i < n; i++ {
4040
-
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
4041
-
if err != nil {
4042
-
return err
4043
-
}
4044
-
4045
-
if !ok {
4046
-
// Field doesn't exist on this type, so ignore it
4047
-
if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
4048
-
return err
4049
-
}
4050
-
continue
4051
-
}
4052
-
4053
-
switch string(nameBuf[:nameLen]) {
4054
-
// t.Name (string) (string)
4055
-
case "name":
4056
-
4057
-
{
4058
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
4059
-
if err != nil {
4060
-
return err
4061
-
}
4062
-
4063
-
t.Name = string(sval)
4064
-
}
4065
-
// t.Command (string) (string)
4066
-
case "command":
4067
-
4068
-
{
4069
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
4070
-
if err != nil {
4071
-
return err
4072
-
}
4073
-
4074
-
t.Command = string(sval)
4075
-
}
4076
-
// t.Environment ([]*tangled.Pipeline_Pair) (slice)
4077
-
case "environment":
4078
-
4079
-
maj, extra, err = cr.ReadHeader()
4080
-
if err != nil {
4081
-
return err
4082
-
}
4083
-
4084
-
if extra > 8192 {
4085
-
return fmt.Errorf("t.Environment: array too large (%d)", extra)
4086
-
}
4087
-
4088
-
if maj != cbg.MajArray {
4089
-
return fmt.Errorf("expected cbor array")
4090
-
}
4091
-
4092
-
if extra > 0 {
4093
-
t.Environment = make([]*Pipeline_Pair, extra)
4094
-
}
4095
-
4096
-
for i := 0; i < int(extra); i++ {
4097
-
{
4098
-
var maj byte
4099
-
var extra uint64
4100
-
var err error
4101
-
_ = maj
4102
-
_ = extra
4103
-
_ = err
4104
-
4105
-
{
4106
-
4107
-
b, err := cr.ReadByte()
4108
-
if err != nil {
4109
-
return err
4110
-
}
4111
-
if b != cbg.CborNull[0] {
4112
-
if err := cr.UnreadByte(); err != nil {
4113
-
return err
4114
-
}
4115
-
t.Environment[i] = new(Pipeline_Pair)
4116
-
if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
4117
-
return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
4118
-
}
4119
-
}
4120
-
4121
-
}
4122
-
4123
-
}
4124
-
}
4125
-
4126
-
default:
4127
-
// Field doesn't exist on this type, so ignore it
4128
-
if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
4129
-
return err
4130
-
}
4131
-
}
4132
-
}
4133
-
4134
-
return nil
4135
-
}
4136
3746
func (t *Pipeline_TriggerMetadata) MarshalCBOR(w io.Writer) error {
4137
3747
if t == nil {
4138
3748
_, err := w.Write(cbg.CborNull)
···
4609
4219
4610
4220
cw := cbg.NewCborWriter(w)
4611
4221
4612
-
if _, err := cw.Write([]byte{165}); err != nil {
4222
+
if _, err := cw.Write([]byte{164}); err != nil {
4223
+
return err
4224
+
}
4225
+
4226
+
// t.Raw (string) (string)
4227
+
if len("raw") > 1000000 {
4228
+
return xerrors.Errorf("Value in field \"raw\" was too long")
4229
+
}
4230
+
4231
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("raw"))); err != nil {
4232
+
return err
4233
+
}
4234
+
if _, err := cw.WriteString(string("raw")); err != nil {
4235
+
return err
4236
+
}
4237
+
4238
+
if len(t.Raw) > 1000000 {
4239
+
return xerrors.Errorf("Value in field t.Raw was too long")
4240
+
}
4241
+
4242
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Raw))); err != nil {
4243
+
return err
4244
+
}
4245
+
if _, err := cw.WriteString(string(t.Raw)); err != nil {
4613
4246
return err
4614
4247
}
4615
4248
···
4652
4285
return err
4653
4286
}
4654
4287
4655
-
// t.Steps ([]*tangled.Pipeline_Step) (slice)
4656
-
if len("steps") > 1000000 {
4657
-
return xerrors.Errorf("Value in field \"steps\" was too long")
4288
+
// t.Engine (string) (string)
4289
+
if len("engine") > 1000000 {
4290
+
return xerrors.Errorf("Value in field \"engine\" was too long")
4658
4291
}
4659
4292
4660
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("steps"))); err != nil {
4293
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("engine"))); err != nil {
4661
4294
return err
4662
4295
}
4663
-
if _, err := cw.WriteString(string("steps")); err != nil {
4296
+
if _, err := cw.WriteString(string("engine")); err != nil {
4664
4297
return err
4665
4298
}
4666
4299
4667
-
if len(t.Steps) > 8192 {
4668
-
return xerrors.Errorf("Slice value in field t.Steps was too long")
4300
+
if len(t.Engine) > 1000000 {
4301
+
return xerrors.Errorf("Value in field t.Engine was too long")
4669
4302
}
4670
4303
4671
-
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Steps))); err != nil {
4304
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Engine))); err != nil {
4672
4305
return err
4673
4306
}
4674
-
for _, v := range t.Steps {
4675
-
if err := v.MarshalCBOR(cw); err != nil {
4676
-
return err
4677
-
}
4678
-
4679
-
}
4680
-
4681
-
// t.Environment ([]*tangled.Pipeline_Pair) (slice)
4682
-
if len("environment") > 1000000 {
4683
-
return xerrors.Errorf("Value in field \"environment\" was too long")
4684
-
}
4685
-
4686
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
4307
+
if _, err := cw.WriteString(string(t.Engine)); err != nil {
4687
4308
return err
4688
4309
}
4689
-
if _, err := cw.WriteString(string("environment")); err != nil {
4690
-
return err
4691
-
}
4692
-
4693
-
if len(t.Environment) > 8192 {
4694
-
return xerrors.Errorf("Slice value in field t.Environment was too long")
4695
-
}
4696
-
4697
-
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
4698
-
return err
4699
-
}
4700
-
for _, v := range t.Environment {
4701
-
if err := v.MarshalCBOR(cw); err != nil {
4702
-
return err
4703
-
}
4704
-
4705
-
}
4706
-
4707
-
// t.Dependencies ([]*tangled.Pipeline_Dependency) (slice)
4708
-
if len("dependencies") > 1000000 {
4709
-
return xerrors.Errorf("Value in field \"dependencies\" was too long")
4710
-
}
4711
-
4712
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("dependencies"))); err != nil {
4713
-
return err
4714
-
}
4715
-
if _, err := cw.WriteString(string("dependencies")); err != nil {
4716
-
return err
4717
-
}
4718
-
4719
-
if len(t.Dependencies) > 8192 {
4720
-
return xerrors.Errorf("Slice value in field t.Dependencies was too long")
4721
-
}
4722
-
4723
-
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Dependencies))); err != nil {
4724
-
return err
4725
-
}
4726
-
for _, v := range t.Dependencies {
4727
-
if err := v.MarshalCBOR(cw); err != nil {
4728
-
return err
4729
-
}
4730
-
4731
-
}
4732
4310
return nil
4733
4311
}
4734
4312
···
4757
4335
4758
4336
n := extra
4759
4337
4760
-
nameBuf := make([]byte, 12)
4338
+
nameBuf := make([]byte, 6)
4761
4339
for i := uint64(0); i < n; i++ {
4762
4340
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
4763
4341
if err != nil {
···
4773
4351
}
4774
4352
4775
4353
switch string(nameBuf[:nameLen]) {
4776
-
// t.Name (string) (string)
4354
+
// t.Raw (string) (string)
4355
+
case "raw":
4356
+
4357
+
{
4358
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
4359
+
if err != nil {
4360
+
return err
4361
+
}
4362
+
4363
+
t.Raw = string(sval)
4364
+
}
4365
+
// t.Name (string) (string)
4777
4366
case "name":
4778
4367
4779
4368
{
···
4804
4393
}
4805
4394
4806
4395
}
4807
-
// t.Steps ([]*tangled.Pipeline_Step) (slice)
4808
-
case "steps":
4809
-
4810
-
maj, extra, err = cr.ReadHeader()
4811
-
if err != nil {
4812
-
return err
4813
-
}
4814
-
4815
-
if extra > 8192 {
4816
-
return fmt.Errorf("t.Steps: array too large (%d)", extra)
4817
-
}
4818
-
4819
-
if maj != cbg.MajArray {
4820
-
return fmt.Errorf("expected cbor array")
4821
-
}
4822
-
4823
-
if extra > 0 {
4824
-
t.Steps = make([]*Pipeline_Step, extra)
4825
-
}
4826
-
4827
-
for i := 0; i < int(extra); i++ {
4828
-
{
4829
-
var maj byte
4830
-
var extra uint64
4831
-
var err error
4832
-
_ = maj
4833
-
_ = extra
4834
-
_ = err
4835
-
4836
-
{
4837
-
4838
-
b, err := cr.ReadByte()
4839
-
if err != nil {
4840
-
return err
4841
-
}
4842
-
if b != cbg.CborNull[0] {
4843
-
if err := cr.UnreadByte(); err != nil {
4844
-
return err
4845
-
}
4846
-
t.Steps[i] = new(Pipeline_Step)
4847
-
if err := t.Steps[i].UnmarshalCBOR(cr); err != nil {
4848
-
return xerrors.Errorf("unmarshaling t.Steps[i] pointer: %w", err)
4849
-
}
4850
-
}
4851
-
4852
-
}
4853
-
4854
-
}
4855
-
}
4856
-
// t.Environment ([]*tangled.Pipeline_Pair) (slice)
4857
-
case "environment":
4858
-
4859
-
maj, extra, err = cr.ReadHeader()
4860
-
if err != nil {
4861
-
return err
4862
-
}
4863
-
4864
-
if extra > 8192 {
4865
-
return fmt.Errorf("t.Environment: array too large (%d)", extra)
4866
-
}
4867
-
4868
-
if maj != cbg.MajArray {
4869
-
return fmt.Errorf("expected cbor array")
4870
-
}
4871
-
4872
-
if extra > 0 {
4873
-
t.Environment = make([]*Pipeline_Pair, extra)
4874
-
}
4875
-
4876
-
for i := 0; i < int(extra); i++ {
4877
-
{
4878
-
var maj byte
4879
-
var extra uint64
4880
-
var err error
4881
-
_ = maj
4882
-
_ = extra
4883
-
_ = err
4884
-
4885
-
{
4886
-
4887
-
b, err := cr.ReadByte()
4888
-
if err != nil {
4889
-
return err
4890
-
}
4891
-
if b != cbg.CborNull[0] {
4892
-
if err := cr.UnreadByte(); err != nil {
4893
-
return err
4894
-
}
4895
-
t.Environment[i] = new(Pipeline_Pair)
4896
-
if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
4897
-
return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
4898
-
}
4899
-
}
4396
+
// t.Engine (string) (string)
4397
+
case "engine":
4900
4398
4901
-
}
4902
-
4399
+
{
4400
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
4401
+
if err != nil {
4402
+
return err
4903
4403
}
4904
-
}
4905
-
// t.Dependencies ([]*tangled.Pipeline_Dependency) (slice)
4906
-
case "dependencies":
4907
4404
4908
-
maj, extra, err = cr.ReadHeader()
4909
-
if err != nil {
4910
-
return err
4911
-
}
4912
-
4913
-
if extra > 8192 {
4914
-
return fmt.Errorf("t.Dependencies: array too large (%d)", extra)
4915
-
}
4916
-
4917
-
if maj != cbg.MajArray {
4918
-
return fmt.Errorf("expected cbor array")
4919
-
}
4920
-
4921
-
if extra > 0 {
4922
-
t.Dependencies = make([]*Pipeline_Dependency, extra)
4923
-
}
4924
-
4925
-
for i := 0; i < int(extra); i++ {
4926
-
{
4927
-
var maj byte
4928
-
var extra uint64
4929
-
var err error
4930
-
_ = maj
4931
-
_ = extra
4932
-
_ = err
4933
-
4934
-
{
4935
-
4936
-
b, err := cr.ReadByte()
4937
-
if err != nil {
4938
-
return err
4939
-
}
4940
-
if b != cbg.CborNull[0] {
4941
-
if err := cr.UnreadByte(); err != nil {
4942
-
return err
4943
-
}
4944
-
t.Dependencies[i] = new(Pipeline_Dependency)
4945
-
if err := t.Dependencies[i].UnmarshalCBOR(cr); err != nil {
4946
-
return xerrors.Errorf("unmarshaling t.Dependencies[i] pointer: %w", err)
4947
-
}
4948
-
}
4949
-
4950
-
}
4951
-
4952
-
}
4405
+
t.Engine = string(sval)
4953
4406
}
4954
4407
4955
4408
default:
···
6059
5512
}
6060
5513
6061
5514
cw := cbg.NewCborWriter(w)
6062
-
fieldCount := 7
5515
+
fieldCount := 6
6063
5516
6064
5517
if t.Body == nil {
6065
5518
fieldCount--
···
6189
5642
return err
6190
5643
}
6191
5644
6192
-
// t.IssueId (int64) (int64)
6193
-
if len("issueId") > 1000000 {
6194
-
return xerrors.Errorf("Value in field \"issueId\" was too long")
6195
-
}
6196
-
6197
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("issueId"))); err != nil {
6198
-
return err
6199
-
}
6200
-
if _, err := cw.WriteString(string("issueId")); err != nil {
6201
-
return err
6202
-
}
6203
-
6204
-
if t.IssueId >= 0 {
6205
-
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.IssueId)); err != nil {
6206
-
return err
6207
-
}
6208
-
} else {
6209
-
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.IssueId-1)); err != nil {
6210
-
return err
6211
-
}
6212
-
}
6213
-
6214
5645
// t.CreatedAt (string) (string)
6215
5646
if len("createdAt") > 1000000 {
6216
5647
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
6342
5773
6343
5774
t.Title = string(sval)
6344
5775
}
6345
-
// t.IssueId (int64) (int64)
6346
-
case "issueId":
6347
-
{
6348
-
maj, extra, err := cr.ReadHeader()
6349
-
if err != nil {
6350
-
return err
6351
-
}
6352
-
var extraI int64
6353
-
switch maj {
6354
-
case cbg.MajUnsignedInt:
6355
-
extraI = int64(extra)
6356
-
if extraI < 0 {
6357
-
return fmt.Errorf("int64 positive overflow")
6358
-
}
6359
-
case cbg.MajNegativeInt:
6360
-
extraI = int64(extra)
6361
-
if extraI < 0 {
6362
-
return fmt.Errorf("int64 negative overflow")
6363
-
}
6364
-
extraI = -1 - extraI
6365
-
default:
6366
-
return fmt.Errorf("wrong type for int64 field: %d", maj)
6367
-
}
6368
-
6369
-
t.IssueId = int64(extraI)
6370
-
}
6371
5776
// t.CreatedAt (string) (string)
6372
5777
case "createdAt":
6373
5778
···
6397
5802
}
6398
5803
6399
5804
cw := cbg.NewCborWriter(w)
6400
-
fieldCount := 7
6401
-
6402
-
if t.CommentId == nil {
6403
-
fieldCount--
6404
-
}
5805
+
fieldCount := 6
6405
5806
6406
5807
if t.Owner == nil {
6407
5808
fieldCount--
···
6544
5945
}
6545
5946
}
6546
5947
6547
-
// t.CommentId (int64) (int64)
6548
-
if t.CommentId != nil {
6549
-
6550
-
if len("commentId") > 1000000 {
6551
-
return xerrors.Errorf("Value in field \"commentId\" was too long")
6552
-
}
6553
-
6554
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commentId"))); err != nil {
6555
-
return err
6556
-
}
6557
-
if _, err := cw.WriteString(string("commentId")); err != nil {
6558
-
return err
6559
-
}
6560
-
6561
-
if t.CommentId == nil {
6562
-
if _, err := cw.Write(cbg.CborNull); err != nil {
6563
-
return err
6564
-
}
6565
-
} else {
6566
-
if *t.CommentId >= 0 {
6567
-
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.CommentId)); err != nil {
6568
-
return err
6569
-
}
6570
-
} else {
6571
-
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-*t.CommentId-1)); err != nil {
6572
-
return err
6573
-
}
6574
-
}
6575
-
}
6576
-
6577
-
}
6578
-
6579
5948
// t.CreatedAt (string) (string)
6580
5949
if len("createdAt") > 1000000 {
6581
5950
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
6715
6084
}
6716
6085
6717
6086
t.Owner = (*string)(&sval)
6718
-
}
6719
-
}
6720
-
// t.CommentId (int64) (int64)
6721
-
case "commentId":
6722
-
{
6723
-
6724
-
b, err := cr.ReadByte()
6725
-
if err != nil {
6726
-
return err
6727
-
}
6728
-
if b != cbg.CborNull[0] {
6729
-
if err := cr.UnreadByte(); err != nil {
6730
-
return err
6731
-
}
6732
-
maj, extra, err := cr.ReadHeader()
6733
-
if err != nil {
6734
-
return err
6735
-
}
6736
-
var extraI int64
6737
-
switch maj {
6738
-
case cbg.MajUnsignedInt:
6739
-
extraI = int64(extra)
6740
-
if extraI < 0 {
6741
-
return fmt.Errorf("int64 positive overflow")
6742
-
}
6743
-
case cbg.MajNegativeInt:
6744
-
extraI = int64(extra)
6745
-
if extraI < 0 {
6746
-
return fmt.Errorf("int64 negative overflow")
6747
-
}
6748
-
extraI = -1 - extraI
6749
-
default:
6750
-
return fmt.Errorf("wrong type for int64 field: %d", maj)
6751
-
}
6752
-
6753
-
t.CommentId = (*int64)(&extraI)
6754
6087
}
6755
6088
}
6756
6089
// t.CreatedAt (string) (string)
-1
api/tangled/issuecomment.go
-1
api/tangled/issuecomment.go
···
19
19
type RepoIssueComment struct {
20
20
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue.comment" cborgen:"$type,const=sh.tangled.repo.issue.comment"`
21
21
Body string `json:"body" cborgen:"body"`
22
-
CommentId *int64 `json:"commentId,omitempty" cborgen:"commentId,omitempty"`
23
22
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
24
23
Issue string `json:"issue" cborgen:"issue"`
25
24
Owner *string `json:"owner,omitempty" cborgen:"owner,omitempty"`
-1
api/tangled/repoissue.go
-1
api/tangled/repoissue.go
···
20
20
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue" cborgen:"$type,const=sh.tangled.repo.issue"`
21
21
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
22
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
-
IssueId int64 `json:"issueId" cborgen:"issueId"`
24
23
Owner string `json:"owner" cborgen:"owner"`
25
24
Repo string `json:"repo" cborgen:"repo"`
26
25
Title string `json:"title" cborgen:"title"`
+4
-18
api/tangled/tangledpipeline.go
+4
-18
api/tangled/tangledpipeline.go
···
29
29
Submodules bool `json:"submodules" cborgen:"submodules"`
30
30
}
31
31
32
-
// Pipeline_Dependency is a "dependency" in the sh.tangled.pipeline schema.
33
-
type Pipeline_Dependency struct {
34
-
Packages []string `json:"packages" cborgen:"packages"`
35
-
Registry string `json:"registry" cborgen:"registry"`
36
-
}
37
-
38
32
// Pipeline_ManualTriggerData is a "manualTriggerData" in the sh.tangled.pipeline schema.
39
33
type Pipeline_ManualTriggerData struct {
40
34
Inputs []*Pipeline_Pair `json:"inputs,omitempty" cborgen:"inputs,omitempty"`
···
61
55
Ref string `json:"ref" cborgen:"ref"`
62
56
}
63
57
64
-
// Pipeline_Step is a "step" in the sh.tangled.pipeline schema.
65
-
type Pipeline_Step struct {
66
-
Command string `json:"command" cborgen:"command"`
67
-
Environment []*Pipeline_Pair `json:"environment,omitempty" cborgen:"environment,omitempty"`
68
-
Name string `json:"name" cborgen:"name"`
69
-
}
70
-
71
58
// Pipeline_TriggerMetadata is a "triggerMetadata" in the sh.tangled.pipeline schema.
72
59
type Pipeline_TriggerMetadata struct {
73
60
Kind string `json:"kind" cborgen:"kind"`
···
87
74
88
75
// Pipeline_Workflow is a "workflow" in the sh.tangled.pipeline schema.
89
76
type Pipeline_Workflow struct {
90
-
Clone *Pipeline_CloneOpts `json:"clone" cborgen:"clone"`
91
-
Dependencies []*Pipeline_Dependency `json:"dependencies" cborgen:"dependencies"`
92
-
Environment []*Pipeline_Pair `json:"environment" cborgen:"environment"`
93
-
Name string `json:"name" cborgen:"name"`
94
-
Steps []*Pipeline_Step `json:"steps" cborgen:"steps"`
77
+
Clone *Pipeline_CloneOpts `json:"clone" cborgen:"clone"`
78
+
Engine string `json:"engine" cborgen:"engine"`
79
+
Name string `json:"name" cborgen:"name"`
80
+
Raw string `json:"raw" cborgen:"raw"`
95
81
}
+15
appview/db/db.go
+15
appview/db/db.go
···
470
470
id integer primary key autoincrement,
471
471
name text unique
472
472
);
473
+
474
+
-- indexes for better star query performance
475
+
create index if not exists idx_stars_created on stars(created);
476
+
create index if not exists idx_stars_repo_at_created on stars(repo_at, created);
473
477
`)
474
478
if err != nil {
475
479
return nil, err
···
660
664
661
665
// rename new table
662
666
_, err = tx.Exec(`alter table collaborators_new rename to collaborators`)
667
+
return err
668
+
})
669
+
670
+
runMigration(conn, "add-rkey-to-issues", func(tx *sql.Tx) error {
671
+
_, err := tx.Exec(`
672
+
alter table issues add column rkey text not null default '';
673
+
674
+
-- get last url section from issue_at and save to rkey column
675
+
update issues
676
+
set rkey = replace(issue_at, rtrim(issue_at, replace(issue_at, '/', '')), '');
677
+
`)
663
678
return err
664
679
})
665
680
+208
-17
appview/db/issues.go
+208
-17
appview/db/issues.go
···
2
2
3
3
import (
4
4
"database/sql"
5
+
"fmt"
6
+
mathrand "math/rand/v2"
7
+
"strings"
5
8
"time"
6
9
7
10
"github.com/bluesky-social/indigo/atproto/syntax"
11
+
"tangled.sh/tangled.sh/core/api/tangled"
8
12
"tangled.sh/tangled.sh/core/appview/pagination"
9
13
)
10
14
···
13
17
RepoAt syntax.ATURI
14
18
OwnerDid string
15
19
IssueId int
16
-
IssueAt string
20
+
Rkey string
17
21
Created time.Time
18
22
Title string
19
23
Body string
···
42
46
Edited *time.Time
43
47
}
44
48
49
+
func (i *Issue) AtUri() syntax.ATURI {
50
+
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.OwnerDid, tangled.RepoIssueNSID, i.Rkey))
51
+
}
52
+
53
+
func IssueFromRecord(did, rkey string, record tangled.RepoIssue) Issue {
54
+
created, err := time.Parse(time.RFC3339, record.CreatedAt)
55
+
if err != nil {
56
+
created = time.Now()
57
+
}
58
+
59
+
body := ""
60
+
if record.Body != nil {
61
+
body = *record.Body
62
+
}
63
+
64
+
return Issue{
65
+
RepoAt: syntax.ATURI(record.Repo),
66
+
OwnerDid: record.Owner,
67
+
Rkey: rkey,
68
+
Created: created,
69
+
Title: record.Title,
70
+
Body: body,
71
+
Open: true, // new issues are open by default
72
+
}
73
+
}
74
+
75
+
func ResolveIssueFromAtUri(e Execer, issueUri syntax.ATURI) (syntax.ATURI, int, error) {
76
+
ownerDid := issueUri.Authority().String()
77
+
issueRkey := issueUri.RecordKey().String()
78
+
79
+
var repoAt string
80
+
var issueId int
81
+
82
+
query := `select repo_at, issue_id from issues where owner_did = ? and rkey = ?`
83
+
err := e.QueryRow(query, ownerDid, issueRkey).Scan(&repoAt, &issueId)
84
+
if err != nil {
85
+
return "", 0, err
86
+
}
87
+
88
+
return syntax.ATURI(repoAt), issueId, nil
89
+
}
90
+
91
+
func IssueCommentFromRecord(e Execer, did, rkey string, record tangled.RepoIssueComment) (Comment, error) {
92
+
created, err := time.Parse(time.RFC3339, record.CreatedAt)
93
+
if err != nil {
94
+
created = time.Now()
95
+
}
96
+
97
+
ownerDid := did
98
+
if record.Owner != nil {
99
+
ownerDid = *record.Owner
100
+
}
101
+
102
+
issueUri, err := syntax.ParseATURI(record.Issue)
103
+
if err != nil {
104
+
return Comment{}, err
105
+
}
106
+
107
+
repoAt, issueId, err := ResolveIssueFromAtUri(e, issueUri)
108
+
if err != nil {
109
+
return Comment{}, err
110
+
}
111
+
112
+
comment := Comment{
113
+
OwnerDid: ownerDid,
114
+
RepoAt: repoAt,
115
+
Rkey: rkey,
116
+
Body: record.Body,
117
+
Issue: issueId,
118
+
CommentId: mathrand.IntN(1000000),
119
+
Created: &created,
120
+
}
121
+
122
+
return comment, nil
123
+
}
124
+
45
125
func NewIssue(tx *sql.Tx, issue *Issue) error {
46
126
defer tx.Rollback()
47
127
···
67
147
issue.IssueId = nextId
68
148
69
149
res, err := tx.Exec(`
70
-
insert into issues (repo_at, owner_did, issue_id, title, body)
71
-
values (?, ?, ?, ?, ?)
72
-
`, issue.RepoAt, issue.OwnerDid, issue.IssueId, issue.Title, issue.Body)
150
+
insert into issues (repo_at, owner_did, rkey, issue_at, issue_id, title, body)
151
+
values (?, ?, ?, ?, ?, ?, ?)
152
+
`, issue.RepoAt, issue.OwnerDid, issue.Rkey, issue.AtUri(), issue.IssueId, issue.Title, issue.Body)
73
153
if err != nil {
74
154
return err
75
155
}
···
87
167
return nil
88
168
}
89
169
90
-
func SetIssueAt(e Execer, repoAt syntax.ATURI, issueId int, issueAt string) error {
91
-
_, err := e.Exec(`update issues set issue_at = ? where repo_at = ? and issue_id = ?`, issueAt, repoAt, issueId)
92
-
return err
93
-
}
94
-
95
170
func GetIssueAt(e Execer, repoAt syntax.ATURI, issueId int) (string, error) {
96
171
var issueAt string
97
172
err := e.QueryRow(`select issue_at from issues where repo_at = ? and issue_id = ?`, repoAt, issueId).Scan(&issueAt)
···
104
179
return ownerDid, err
105
180
}
106
181
107
-
func GetIssues(e Execer, repoAt syntax.ATURI, isOpen bool, page pagination.Page) ([]Issue, error) {
182
+
func GetIssuesPaginated(e Execer, repoAt syntax.ATURI, isOpen bool, page pagination.Page) ([]Issue, error) {
108
183
var issues []Issue
109
184
openValue := 0
110
185
if isOpen {
···
117
192
select
118
193
i.id,
119
194
i.owner_did,
195
+
i.rkey,
120
196
i.issue_id,
121
197
i.created,
122
198
i.title,
···
136
212
select
137
213
id,
138
214
owner_did,
215
+
rkey,
139
216
issue_id,
140
217
created,
141
218
title,
142
219
body,
143
220
open,
144
221
comment_count
145
-
from
222
+
from
146
223
numbered_issue
147
-
where
224
+
where
148
225
row_num between ? and ?`,
149
226
repoAt, openValue, page.Offset+1, page.Offset+page.Limit)
150
227
if err != nil {
···
156
233
var issue Issue
157
234
var createdAt string
158
235
var metadata IssueMetadata
159
-
err := rows.Scan(&issue.ID, &issue.OwnerDid, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &metadata.CommentCount)
236
+
err := rows.Scan(&issue.ID, &issue.OwnerDid, &issue.Rkey, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &metadata.CommentCount)
160
237
if err != nil {
161
238
return nil, err
162
239
}
···
178
255
return issues, nil
179
256
}
180
257
258
+
func GetIssuesWithLimit(e Execer, limit int, filters ...filter) ([]Issue, error) {
259
+
issues := make([]Issue, 0, limit)
260
+
261
+
var conditions []string
262
+
var args []any
263
+
for _, filter := range filters {
264
+
conditions = append(conditions, filter.Condition())
265
+
args = append(args, filter.Arg()...)
266
+
}
267
+
268
+
whereClause := ""
269
+
if conditions != nil {
270
+
whereClause = " where " + strings.Join(conditions, " and ")
271
+
}
272
+
limitClause := ""
273
+
if limit != 0 {
274
+
limitClause = fmt.Sprintf(" limit %d ", limit)
275
+
}
276
+
277
+
query := fmt.Sprintf(
278
+
`select
279
+
i.id,
280
+
i.owner_did,
281
+
i.repo_at,
282
+
i.issue_id,
283
+
i.created,
284
+
i.title,
285
+
i.body,
286
+
i.open
287
+
from
288
+
issues i
289
+
%s
290
+
order by
291
+
i.created desc
292
+
%s`,
293
+
whereClause, limitClause)
294
+
295
+
rows, err := e.Query(query, args...)
296
+
if err != nil {
297
+
return nil, err
298
+
}
299
+
defer rows.Close()
300
+
301
+
for rows.Next() {
302
+
var issue Issue
303
+
var issueCreatedAt string
304
+
err := rows.Scan(
305
+
&issue.ID,
306
+
&issue.OwnerDid,
307
+
&issue.RepoAt,
308
+
&issue.IssueId,
309
+
&issueCreatedAt,
310
+
&issue.Title,
311
+
&issue.Body,
312
+
&issue.Open,
313
+
)
314
+
if err != nil {
315
+
return nil, err
316
+
}
317
+
318
+
issueCreatedTime, err := time.Parse(time.RFC3339, issueCreatedAt)
319
+
if err != nil {
320
+
return nil, err
321
+
}
322
+
issue.Created = issueCreatedTime
323
+
324
+
issues = append(issues, issue)
325
+
}
326
+
327
+
if err := rows.Err(); err != nil {
328
+
return nil, err
329
+
}
330
+
331
+
return issues, nil
332
+
}
333
+
334
+
func GetIssues(e Execer, filters ...filter) ([]Issue, error) {
335
+
return GetIssuesWithLimit(e, 0, filters...)
336
+
}
337
+
181
338
// timeframe here is directly passed into the sql query filter, and any
182
339
// timeframe in the past should be negative; e.g.: "-3 months"
183
340
func GetIssuesByOwnerDid(e Execer, ownerDid string, timeframe string) ([]Issue, error) {
···
187
344
`select
188
345
i.id,
189
346
i.owner_did,
347
+
i.rkey,
190
348
i.repo_at,
191
349
i.issue_id,
192
350
i.created,
···
219
377
err := rows.Scan(
220
378
&issue.ID,
221
379
&issue.OwnerDid,
380
+
&issue.Rkey,
222
381
&issue.RepoAt,
223
382
&issue.IssueId,
224
383
&issueCreatedAt,
···
262
421
}
263
422
264
423
func GetIssue(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, error) {
265
-
query := `select id, owner_did, created, title, body, open from issues where repo_at = ? and issue_id = ?`
424
+
query := `select id, owner_did, rkey, created, title, body, open from issues where repo_at = ? and issue_id = ?`
266
425
row := e.QueryRow(query, repoAt, issueId)
267
426
268
427
var issue Issue
269
428
var createdAt string
270
-
err := row.Scan(&issue.ID, &issue.OwnerDid, &createdAt, &issue.Title, &issue.Body, &issue.Open)
429
+
err := row.Scan(&issue.ID, &issue.OwnerDid, &issue.Rkey, &createdAt, &issue.Title, &issue.Body, &issue.Open)
271
430
if err != nil {
272
431
return nil, err
273
432
}
···
282
441
}
283
442
284
443
func GetIssueWithComments(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, []Comment, error) {
285
-
query := `select id, owner_did, issue_id, created, title, body, open, issue_at from issues where repo_at = ? and issue_id = ?`
444
+
query := `select id, owner_did, rkey, issue_id, created, title, body, open from issues where repo_at = ? and issue_id = ?`
286
445
row := e.QueryRow(query, repoAt, issueId)
287
446
288
447
var issue Issue
289
448
var createdAt string
290
-
err := row.Scan(&issue.ID, &issue.OwnerDid, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &issue.IssueAt)
449
+
err := row.Scan(&issue.ID, &issue.OwnerDid, &issue.Rkey, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open)
291
450
if err != nil {
292
451
return nil, nil, err
293
452
}
···
464
623
deleted = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
465
624
where repo_at = ? and issue_id = ? and comment_id = ?
466
625
`, repoAt, issueId, commentId)
626
+
return err
627
+
}
628
+
629
+
func UpdateCommentByRkey(e Execer, ownerDid, rkey, newBody string) error {
630
+
_, err := e.Exec(
631
+
`
632
+
update comments
633
+
set body = ?,
634
+
edited = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
635
+
where owner_did = ? and rkey = ?
636
+
`, newBody, ownerDid, rkey)
637
+
return err
638
+
}
639
+
640
+
func DeleteCommentByRkey(e Execer, ownerDid, rkey string) error {
641
+
_, err := e.Exec(
642
+
`
643
+
update comments
644
+
set body = "",
645
+
deleted = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
646
+
where owner_did = ? and rkey = ?
647
+
`, ownerDid, rkey)
648
+
return err
649
+
}
650
+
651
+
func UpdateIssueByRkey(e Execer, ownerDid, rkey, title, body string) error {
652
+
_, err := e.Exec(`update issues set title = ?, body = ? where owner_did = ? and rkey = ?`, title, body, ownerDid, rkey)
653
+
return err
654
+
}
655
+
656
+
func DeleteIssueByRkey(e Execer, ownerDid, rkey string) error {
657
+
_, err := e.Exec(`delete from issues where owner_did = ? and rkey = ?`, ownerDid, rkey)
467
658
return err
468
659
}
469
660
+22
-3
appview/db/pulls.go
+22
-3
appview/db/pulls.go
···
310
310
return pullId - 1, err
311
311
}
312
312
313
-
func GetPulls(e Execer, filters ...filter) ([]*Pull, error) {
313
+
func GetPullsWithLimit(e Execer, limit int, filters ...filter) ([]*Pull, error) {
314
314
pulls := make(map[int]*Pull)
315
315
316
316
var conditions []string
···
323
323
whereClause := ""
324
324
if conditions != nil {
325
325
whereClause = " where " + strings.Join(conditions, " and ")
326
+
}
327
+
limitClause := ""
328
+
if limit != 0 {
329
+
limitClause = fmt.Sprintf(" limit %d ", limit)
326
330
}
327
331
328
332
query := fmt.Sprintf(`
···
344
348
from
345
349
pulls
346
350
%s
347
-
`, whereClause)
351
+
order by
352
+
created desc
353
+
%s
354
+
`, whereClause, limitClause)
348
355
349
356
rows, err := e.Query(query, args...)
350
357
if err != nil {
···
412
419
inClause := strings.TrimSuffix(strings.Repeat("?, ", len(pulls)), ", ")
413
420
submissionsQuery := fmt.Sprintf(`
414
421
select
415
-
id, pull_id, round_number, patch, source_rev
422
+
id, pull_id, round_number, patch, created, source_rev
416
423
from
417
424
pull_submissions
418
425
where
···
438
445
for submissionsRows.Next() {
439
446
var s PullSubmission
440
447
var sourceRev sql.NullString
448
+
var createdAt string
441
449
err := submissionsRows.Scan(
442
450
&s.ID,
443
451
&s.PullId,
444
452
&s.RoundNumber,
445
453
&s.Patch,
454
+
&createdAt,
446
455
&sourceRev,
447
456
)
448
457
if err != nil {
449
458
return nil, err
450
459
}
460
+
461
+
createdTime, err := time.Parse(time.RFC3339, createdAt)
462
+
if err != nil {
463
+
return nil, err
464
+
}
465
+
s.Created = createdTime
451
466
452
467
if sourceRev.Valid {
453
468
s.SourceRev = sourceRev.String
···
511
526
})
512
527
513
528
return orderedByPullId, nil
529
+
}
530
+
531
+
func GetPulls(e Execer, filters ...filter) ([]*Pull, error) {
532
+
return GetPullsWithLimit(e, 0, filters...)
514
533
}
515
534
516
535
func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*Pull, error) {
+9
-10
appview/db/repos.go
+9
-10
appview/db/repos.go
···
19
19
Knot string
20
20
Rkey string
21
21
Created time.Time
22
-
AtUri string
23
22
Description string
24
23
Spindle string
25
24
···
391
390
var description, spindle sql.NullString
392
391
393
392
row := e.QueryRow(`
394
-
select did, name, knot, created, at_uri, description, spindle
393
+
select did, name, knot, created, description, spindle, rkey
395
394
from repos
396
395
where did = ? and name = ?
397
396
`,
···
400
399
)
401
400
402
401
var createdAt string
403
-
if err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &createdAt, &repo.AtUri, &description, &spindle); err != nil {
402
+
if err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &createdAt, &description, &spindle, &repo.Rkey); err != nil {
404
403
return nil, err
405
404
}
406
405
createdAtTime, _ := time.Parse(time.RFC3339, createdAt)
···
421
420
var repo Repo
422
421
var nullableDescription sql.NullString
423
422
424
-
row := e.QueryRow(`select did, name, knot, created, at_uri, description from repos where at_uri = ?`, atUri)
423
+
row := e.QueryRow(`select did, name, knot, created, rkey, description from repos where at_uri = ?`, atUri)
425
424
426
425
var createdAt string
427
-
if err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &createdAt, &repo.AtUri, &nullableDescription); err != nil {
426
+
if err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &createdAt, &repo.Rkey, &nullableDescription); err != nil {
428
427
return nil, err
429
428
}
430
429
createdAtTime, _ := time.Parse(time.RFC3339, createdAt)
···
444
443
`insert into repos
445
444
(did, name, knot, rkey, at_uri, description, source)
446
445
values (?, ?, ?, ?, ?, ?, ?)`,
447
-
repo.Did, repo.Name, repo.Knot, repo.Rkey, repo.AtUri, repo.Description, repo.Source,
446
+
repo.Did, repo.Name, repo.Knot, repo.Rkey, repo.RepoAt().String(), repo.Description, repo.Source,
448
447
)
449
448
return err
450
449
}
···
467
466
var repos []Repo
468
467
469
468
rows, err := e.Query(
470
-
`select did, name, knot, rkey, description, created, at_uri, source
469
+
`select did, name, knot, rkey, description, created, source
471
470
from repos
472
471
where did = ? and source is not null and source != ''
473
472
order by created desc`,
···
484
483
var nullableDescription sql.NullString
485
484
var nullableSource sql.NullString
486
485
487
-
err := rows.Scan(&repo.Did, &repo.Name, &repo.Knot, &repo.Rkey, &nullableDescription, &createdAt, &repo.AtUri, &nullableSource)
486
+
err := rows.Scan(&repo.Did, &repo.Name, &repo.Knot, &repo.Rkey, &nullableDescription, &createdAt, &nullableSource)
488
487
if err != nil {
489
488
return nil, err
490
489
}
···
521
520
var nullableSource sql.NullString
522
521
523
522
row := e.QueryRow(
524
-
`select did, name, knot, rkey, description, created, at_uri, source
523
+
`select did, name, knot, rkey, description, created, source
525
524
from repos
526
525
where did = ? and name = ? and source is not null and source != ''`,
527
526
did, name,
528
527
)
529
528
530
-
err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &repo.Rkey, &nullableDescription, &createdAt, &repo.AtUri, &nullableSource)
529
+
err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &repo.Rkey, &nullableDescription, &createdAt, &nullableSource)
531
530
if err != nil {
532
531
return nil, err
533
532
}
+73
-6
appview/db/star.go
+73
-6
appview/db/star.go
···
47
47
// Get a star record
48
48
func GetStar(e Execer, starredByDid string, repoAt syntax.ATURI) (*Star, error) {
49
49
query := `
50
-
select starred_by_did, repo_at, created, rkey
50
+
select starred_by_did, repo_at, created, rkey
51
51
from stars
52
52
where starred_by_did = ? and repo_at = ?`
53
53
row := e.QueryRow(query, starredByDid, repoAt)
···
119
119
}
120
120
121
121
repoQuery := fmt.Sprintf(
122
-
`select starred_by_did, repo_at, created, rkey
122
+
`select starred_by_did, repo_at, created, rkey
123
123
from stars
124
124
%s
125
125
order by created desc
···
187
187
var stars []Star
188
188
189
189
rows, err := e.Query(`
190
-
select
190
+
select
191
191
s.starred_by_did,
192
192
s.repo_at,
193
193
s.rkey,
···
196
196
r.name,
197
197
r.knot,
198
198
r.rkey,
199
-
r.created,
200
-
r.at_uri
199
+
r.created
201
200
from stars s
202
201
join repos r on s.repo_at = r.at_uri
203
202
`)
···
222
221
&repo.Knot,
223
222
&repo.Rkey,
224
223
&repoCreatedAt,
225
-
&repo.AtUri,
226
224
); err != nil {
227
225
return nil, err
228
226
}
···
246
244
247
245
return stars, nil
248
246
}
247
+
248
+
// GetTopStarredReposLastWeek returns the top 8 most starred repositories from the last week
249
+
func GetTopStarredReposLastWeek(e Execer) ([]Repo, error) {
250
+
// first, get the top repo URIs by star count from the last week
251
+
query := `
252
+
with recent_starred_repos as (
253
+
select distinct repo_at
254
+
from stars
255
+
where created >= datetime('now', '-7 days')
256
+
),
257
+
repo_star_counts as (
258
+
select
259
+
s.repo_at,
260
+
count(*) as star_count
261
+
from stars s
262
+
join recent_starred_repos rsr on s.repo_at = rsr.repo_at
263
+
group by s.repo_at
264
+
)
265
+
select rsc.repo_at
266
+
from repo_star_counts rsc
267
+
order by rsc.star_count desc
268
+
limit 8
269
+
`
270
+
271
+
rows, err := e.Query(query)
272
+
if err != nil {
273
+
return nil, err
274
+
}
275
+
defer rows.Close()
276
+
277
+
var repoUris []string
278
+
for rows.Next() {
279
+
var repoUri string
280
+
err := rows.Scan(&repoUri)
281
+
if err != nil {
282
+
return nil, err
283
+
}
284
+
repoUris = append(repoUris, repoUri)
285
+
}
286
+
287
+
if err := rows.Err(); err != nil {
288
+
return nil, err
289
+
}
290
+
291
+
if len(repoUris) == 0 {
292
+
return []Repo{}, nil
293
+
}
294
+
295
+
// get full repo data
296
+
repos, err := GetRepos(e, 0, FilterIn("at_uri", repoUris))
297
+
if err != nil {
298
+
return nil, err
299
+
}
300
+
301
+
// sort repos by the original trending order
302
+
repoMap := make(map[string]Repo)
303
+
for _, repo := range repos {
304
+
repoMap[repo.RepoAt().String()] = repo
305
+
}
306
+
307
+
orderedRepos := make([]Repo, 0, len(repoUris))
308
+
for _, uri := range repoUris {
309
+
if repo, exists := repoMap[uri]; exists {
310
+
orderedRepos = append(orderedRepos, repo)
311
+
}
312
+
}
313
+
314
+
return orderedRepos, nil
315
+
}
+12
-11
appview/db/strings.go
+12
-11
appview/db/strings.go
···
50
50
func (s String) Validate() error {
51
51
var err error
52
52
53
-
if !strings.Contains(s.Filename, ".") {
54
-
err = errors.Join(err, fmt.Errorf("missing filename extension"))
55
-
}
56
-
57
-
if strings.HasSuffix(s.Filename, ".") {
58
-
err = errors.Join(err, fmt.Errorf("filename ends with `.`"))
59
-
}
60
-
61
53
if utf8.RuneCountInString(s.Filename) > 140 {
62
54
err = errors.Join(err, fmt.Errorf("filename too long"))
63
55
}
···
113
105
filename = excluded.filename,
114
106
description = excluded.description,
115
107
content = excluded.content,
116
-
edited = case
108
+
edited = case
117
109
when
118
110
strings.content != excluded.content
119
111
or strings.filename != excluded.filename
···
131
123
return err
132
124
}
133
125
134
-
func GetStrings(e Execer, filters ...filter) ([]String, error) {
126
+
func GetStrings(e Execer, limit int, filters ...filter) ([]String, error) {
135
127
var all []String
136
128
137
129
var conditions []string
···
146
138
whereClause = " where " + strings.Join(conditions, " and ")
147
139
}
148
140
141
+
limitClause := ""
142
+
if limit != 0 {
143
+
limitClause = fmt.Sprintf(" limit %d ", limit)
144
+
}
145
+
149
146
query := fmt.Sprintf(`select
150
147
did,
151
148
rkey,
···
154
151
content,
155
152
created,
156
153
edited
157
-
from strings %s`,
154
+
from strings
155
+
%s
156
+
order by created desc
157
+
%s`,
158
158
whereClause,
159
+
limitClause,
159
160
)
160
161
161
162
rows, err := e.Query(query, args...)
+179
-6
appview/ingester.go
+179
-6
appview/ingester.go
···
5
5
"encoding/json"
6
6
"fmt"
7
7
"log/slog"
8
+
"strings"
8
9
"time"
9
10
10
11
"github.com/bluesky-social/indigo/atproto/syntax"
···
14
15
"tangled.sh/tangled.sh/core/api/tangled"
15
16
"tangled.sh/tangled.sh/core/appview/config"
16
17
"tangled.sh/tangled.sh/core/appview/db"
18
+
"tangled.sh/tangled.sh/core/appview/pages/markup"
17
19
"tangled.sh/tangled.sh/core/appview/spindleverify"
18
20
"tangled.sh/tangled.sh/core/idresolver"
19
21
"tangled.sh/tangled.sh/core/rbac"
···
61
63
case tangled.ActorProfileNSID:
62
64
err = i.ingestProfile(e)
63
65
case tangled.SpindleMemberNSID:
64
-
err = i.ingestSpindleMember(e)
66
+
err = i.ingestSpindleMember(ctx, e)
65
67
case tangled.SpindleNSID:
66
-
err = i.ingestSpindle(e)
68
+
err = i.ingestSpindle(ctx, e)
67
69
case tangled.StringNSID:
68
70
err = i.ingestString(e)
71
+
case tangled.RepoIssueNSID:
72
+
err = i.ingestIssue(ctx, e)
73
+
case tangled.RepoIssueCommentNSID:
74
+
err = i.ingestIssueComment(e)
69
75
}
70
76
l = i.Logger.With("nsid", e.Commit.Collection)
71
77
}
···
336
342
return nil
337
343
}
338
344
339
-
func (i *Ingester) ingestSpindleMember(e *models.Event) error {
345
+
func (i *Ingester) ingestSpindleMember(ctx context.Context, e *models.Event) error {
340
346
did := e.Did
341
347
var err error
342
348
···
359
365
return fmt.Errorf("failed to enforce permissions: %w", err)
360
366
}
361
367
362
-
memberId, err := i.IdResolver.ResolveIdent(context.Background(), record.Subject)
368
+
memberId, err := i.IdResolver.ResolveIdent(ctx, record.Subject)
363
369
if err != nil {
364
370
return err
365
371
}
···
442
448
return nil
443
449
}
444
450
445
-
func (i *Ingester) ingestSpindle(e *models.Event) error {
451
+
func (i *Ingester) ingestSpindle(ctx context.Context, e *models.Event) error {
446
452
did := e.Did
447
453
var err error
448
454
···
475
481
return err
476
482
}
477
483
478
-
err = spindleverify.RunVerification(context.Background(), instance, did, i.Config.Core.Dev)
484
+
err = spindleverify.RunVerification(ctx, instance, did, i.Config.Core.Dev)
479
485
if err != nil {
480
486
l.Error("failed to add spindle to db", "err", err, "instance", instance)
481
487
return err
···
609
615
610
616
return nil
611
617
}
618
+
619
+
func (i *Ingester) ingestIssue(ctx context.Context, e *models.Event) error {
620
+
did := e.Did
621
+
rkey := e.Commit.RKey
622
+
623
+
var err error
624
+
625
+
l := i.Logger.With("handler", "ingestIssue", "nsid", e.Commit.Collection, "did", did, "rkey", rkey)
626
+
l.Info("ingesting record")
627
+
628
+
ddb, ok := i.Db.Execer.(*db.DB)
629
+
if !ok {
630
+
return fmt.Errorf("failed to index issue record, invalid db cast")
631
+
}
632
+
633
+
switch e.Commit.Operation {
634
+
case models.CommitOperationCreate:
635
+
raw := json.RawMessage(e.Commit.Record)
636
+
record := tangled.RepoIssue{}
637
+
err = json.Unmarshal(raw, &record)
638
+
if err != nil {
639
+
l.Error("invalid record", "err", err)
640
+
return err
641
+
}
642
+
643
+
issue := db.IssueFromRecord(did, rkey, record)
644
+
645
+
sanitizer := markup.NewSanitizer()
646
+
if st := strings.TrimSpace(sanitizer.SanitizeDescription(issue.Title)); st == "" {
647
+
return fmt.Errorf("title is empty after HTML sanitization")
648
+
}
649
+
if sb := strings.TrimSpace(sanitizer.SanitizeDefault(issue.Body)); sb == "" {
650
+
return fmt.Errorf("body is empty after HTML sanitization")
651
+
}
652
+
653
+
tx, err := ddb.BeginTx(ctx, nil)
654
+
if err != nil {
655
+
l.Error("failed to begin transaction", "err", err)
656
+
return err
657
+
}
658
+
659
+
err = db.NewIssue(tx, &issue)
660
+
if err != nil {
661
+
l.Error("failed to create issue", "err", err)
662
+
return err
663
+
}
664
+
665
+
return nil
666
+
667
+
case models.CommitOperationUpdate:
668
+
raw := json.RawMessage(e.Commit.Record)
669
+
record := tangled.RepoIssue{}
670
+
err = json.Unmarshal(raw, &record)
671
+
if err != nil {
672
+
l.Error("invalid record", "err", err)
673
+
return err
674
+
}
675
+
676
+
body := ""
677
+
if record.Body != nil {
678
+
body = *record.Body
679
+
}
680
+
681
+
sanitizer := markup.NewSanitizer()
682
+
if st := strings.TrimSpace(sanitizer.SanitizeDescription(record.Title)); st == "" {
683
+
return fmt.Errorf("title is empty after HTML sanitization")
684
+
}
685
+
if sb := strings.TrimSpace(sanitizer.SanitizeDefault(body)); sb == "" {
686
+
return fmt.Errorf("body is empty after HTML sanitization")
687
+
}
688
+
689
+
err = db.UpdateIssueByRkey(ddb, did, rkey, record.Title, body)
690
+
if err != nil {
691
+
l.Error("failed to update issue", "err", err)
692
+
return err
693
+
}
694
+
695
+
return nil
696
+
697
+
case models.CommitOperationDelete:
698
+
if err := db.DeleteIssueByRkey(ddb, did, rkey); err != nil {
699
+
l.Error("failed to delete", "err", err)
700
+
return fmt.Errorf("failed to delete issue record: %w", err)
701
+
}
702
+
703
+
return nil
704
+
}
705
+
706
+
return fmt.Errorf("unknown operation: %s", e.Commit.Operation)
707
+
}
708
+
709
+
func (i *Ingester) ingestIssueComment(e *models.Event) error {
710
+
did := e.Did
711
+
rkey := e.Commit.RKey
712
+
713
+
var err error
714
+
715
+
l := i.Logger.With("handler", "ingestIssueComment", "nsid", e.Commit.Collection, "did", did, "rkey", rkey)
716
+
l.Info("ingesting record")
717
+
718
+
ddb, ok := i.Db.Execer.(*db.DB)
719
+
if !ok {
720
+
return fmt.Errorf("failed to index issue comment record, invalid db cast")
721
+
}
722
+
723
+
switch e.Commit.Operation {
724
+
case models.CommitOperationCreate:
725
+
raw := json.RawMessage(e.Commit.Record)
726
+
record := tangled.RepoIssueComment{}
727
+
err = json.Unmarshal(raw, &record)
728
+
if err != nil {
729
+
l.Error("invalid record", "err", err)
730
+
return err
731
+
}
732
+
733
+
comment, err := db.IssueCommentFromRecord(ddb, did, rkey, record)
734
+
if err != nil {
735
+
l.Error("failed to parse comment from record", "err", err)
736
+
return err
737
+
}
738
+
739
+
sanitizer := markup.NewSanitizer()
740
+
if sb := strings.TrimSpace(sanitizer.SanitizeDefault(comment.Body)); sb == "" {
741
+
return fmt.Errorf("body is empty after HTML sanitization")
742
+
}
743
+
744
+
err = db.NewIssueComment(ddb, &comment)
745
+
if err != nil {
746
+
l.Error("failed to create issue comment", "err", err)
747
+
return err
748
+
}
749
+
750
+
return nil
751
+
752
+
case models.CommitOperationUpdate:
753
+
raw := json.RawMessage(e.Commit.Record)
754
+
record := tangled.RepoIssueComment{}
755
+
err = json.Unmarshal(raw, &record)
756
+
if err != nil {
757
+
l.Error("invalid record", "err", err)
758
+
return err
759
+
}
760
+
761
+
sanitizer := markup.NewSanitizer()
762
+
if sb := strings.TrimSpace(sanitizer.SanitizeDefault(record.Body)); sb == "" {
763
+
return fmt.Errorf("body is empty after HTML sanitization")
764
+
}
765
+
766
+
err = db.UpdateCommentByRkey(ddb, did, rkey, record.Body)
767
+
if err != nil {
768
+
l.Error("failed to update issue comment", "err", err)
769
+
return err
770
+
}
771
+
772
+
return nil
773
+
774
+
case models.CommitOperationDelete:
775
+
if err := db.DeleteCommentByRkey(ddb, did, rkey); err != nil {
776
+
l.Error("failed to delete", "err", err)
777
+
return fmt.Errorf("failed to delete issue comment record: %w", err)
778
+
}
779
+
780
+
return nil
781
+
}
782
+
783
+
return fmt.Errorf("unknown operation: %s", e.Commit.Operation)
784
+
}
+41
-41
appview/issues/issues.go
+41
-41
appview/issues/issues.go
···
7
7
"net/http"
8
8
"slices"
9
9
"strconv"
10
+
"strings"
10
11
"time"
11
12
12
13
comatproto "github.com/bluesky-social/indigo/api/atproto"
13
14
"github.com/bluesky-social/indigo/atproto/data"
14
-
"github.com/bluesky-social/indigo/atproto/syntax"
15
15
lexutil "github.com/bluesky-social/indigo/lex/util"
16
16
"github.com/go-chi/chi/v5"
17
17
···
21
21
"tangled.sh/tangled.sh/core/appview/notify"
22
22
"tangled.sh/tangled.sh/core/appview/oauth"
23
23
"tangled.sh/tangled.sh/core/appview/pages"
24
+
"tangled.sh/tangled.sh/core/appview/pages/markup"
24
25
"tangled.sh/tangled.sh/core/appview/pagination"
25
26
"tangled.sh/tangled.sh/core/appview/reporesolver"
26
27
"tangled.sh/tangled.sh/core/idresolver"
···
73
74
return
74
75
}
75
76
76
-
issue, comments, err := db.GetIssueWithComments(rp.db, f.RepoAt, issueIdInt)
77
+
issue, comments, err := db.GetIssueWithComments(rp.db, f.RepoAt(), issueIdInt)
77
78
if err != nil {
78
79
log.Println("failed to get issue and comments", err)
79
80
rp.pages.Notice(w, "issues", "Failed to load issue. Try again later.")
80
81
return
81
82
}
82
83
83
-
reactionCountMap, err := db.GetReactionCountMap(rp.db, syntax.ATURI(issue.IssueAt))
84
+
reactionCountMap, err := db.GetReactionCountMap(rp.db, issue.AtUri())
84
85
if err != nil {
85
86
log.Println("failed to get issue reactions")
86
87
rp.pages.Notice(w, "issues", "Failed to load issue. Try again later.")
···
88
89
89
90
userReactions := map[db.ReactionKind]bool{}
90
91
if user != nil {
91
-
userReactions = db.GetReactionStatusMap(rp.db, user.Did, syntax.ATURI(issue.IssueAt))
92
+
userReactions = db.GetReactionStatusMap(rp.db, user.Did, issue.AtUri())
92
93
}
93
94
94
95
issueOwnerIdent, err := rp.idResolver.ResolveIdent(r.Context(), issue.OwnerDid)
···
99
100
rp.pages.RepoSingleIssue(w, pages.RepoSingleIssueParams{
100
101
LoggedInUser: user,
101
102
RepoInfo: f.RepoInfo(user),
102
-
Issue: *issue,
103
+
Issue: issue,
103
104
Comments: comments,
104
105
105
106
IssueOwnerHandle: issueOwnerIdent.Handle.String(),
···
127
128
return
128
129
}
129
130
130
-
issue, err := db.GetIssue(rp.db, f.RepoAt, issueIdInt)
131
+
issue, err := db.GetIssue(rp.db, f.RepoAt(), issueIdInt)
131
132
if err != nil {
132
133
log.Println("failed to get issue", err)
133
134
rp.pages.Notice(w, "issue-action", "Failed to close issue. Try again later.")
···
159
160
Rkey: tid.TID(),
160
161
Record: &lexutil.LexiconTypeDecoder{
161
162
Val: &tangled.RepoIssueState{
162
-
Issue: issue.IssueAt,
163
+
Issue: issue.AtUri().String(),
163
164
State: closed,
164
165
},
165
166
},
···
171
172
return
172
173
}
173
174
174
-
err = db.CloseIssue(rp.db, f.RepoAt, issueIdInt)
175
+
err = db.CloseIssue(rp.db, f.RepoAt(), issueIdInt)
175
176
if err != nil {
176
177
log.Println("failed to close issue", err)
177
178
rp.pages.Notice(w, "issue-action", "Failed to close issue. Try again later.")
···
203
204
return
204
205
}
205
206
206
-
issue, err := db.GetIssue(rp.db, f.RepoAt, issueIdInt)
207
+
issue, err := db.GetIssue(rp.db, f.RepoAt(), issueIdInt)
207
208
if err != nil {
208
209
log.Println("failed to get issue", err)
209
210
rp.pages.Notice(w, "issue-action", "Failed to close issue. Try again later.")
···
220
221
isIssueOwner := user.Did == issue.OwnerDid
221
222
222
223
if isCollaborator || isIssueOwner {
223
-
err := db.ReopenIssue(rp.db, f.RepoAt, issueIdInt)
224
+
err := db.ReopenIssue(rp.db, f.RepoAt(), issueIdInt)
224
225
if err != nil {
225
226
log.Println("failed to reopen issue", err)
226
227
rp.pages.Notice(w, "issue-action", "Failed to reopen issue. Try again later.")
···
264
265
265
266
err := db.NewIssueComment(rp.db, &db.Comment{
266
267
OwnerDid: user.Did,
267
-
RepoAt: f.RepoAt,
268
+
RepoAt: f.RepoAt(),
268
269
Issue: issueIdInt,
269
270
CommentId: commentId,
270
271
Body: body,
···
277
278
}
278
279
279
280
createdAt := time.Now().Format(time.RFC3339)
280
-
commentIdInt64 := int64(commentId)
281
281
ownerDid := user.Did
282
-
issueAt, err := db.GetIssueAt(rp.db, f.RepoAt, issueIdInt)
282
+
issueAt, err := db.GetIssueAt(rp.db, f.RepoAt(), issueIdInt)
283
283
if err != nil {
284
284
log.Println("failed to get issue at", err)
285
285
rp.pages.Notice(w, "issue-comment", "Failed to create comment.")
286
286
return
287
287
}
288
288
289
-
atUri := f.RepoAt.String()
289
+
atUri := f.RepoAt().String()
290
290
client, err := rp.oauth.AuthorizedClient(r)
291
291
if err != nil {
292
292
log.Println("failed to get authorized client", err)
···
301
301
Val: &tangled.RepoIssueComment{
302
302
Repo: &atUri,
303
303
Issue: issueAt,
304
-
CommentId: &commentIdInt64,
305
304
Owner: &ownerDid,
306
305
Body: body,
307
306
CreatedAt: createdAt,
···
343
342
return
344
343
}
345
344
346
-
issue, err := db.GetIssue(rp.db, f.RepoAt, issueIdInt)
345
+
issue, err := db.GetIssue(rp.db, f.RepoAt(), issueIdInt)
347
346
if err != nil {
348
347
log.Println("failed to get issue", err)
349
348
rp.pages.Notice(w, "issues", "Failed to load issue. Try again later.")
350
349
return
351
350
}
352
351
353
-
comment, err := db.GetComment(rp.db, f.RepoAt, issueIdInt, commentIdInt)
352
+
comment, err := db.GetComment(rp.db, f.RepoAt(), issueIdInt, commentIdInt)
354
353
if err != nil {
355
354
http.Error(w, "bad comment id", http.StatusBadRequest)
356
355
return
···
388
387
return
389
388
}
390
389
391
-
issue, err := db.GetIssue(rp.db, f.RepoAt, issueIdInt)
390
+
issue, err := db.GetIssue(rp.db, f.RepoAt(), issueIdInt)
392
391
if err != nil {
393
392
log.Println("failed to get issue", err)
394
393
rp.pages.Notice(w, "issues", "Failed to load issue. Try again later.")
395
394
return
396
395
}
397
396
398
-
comment, err := db.GetComment(rp.db, f.RepoAt, issueIdInt, commentIdInt)
397
+
comment, err := db.GetComment(rp.db, f.RepoAt(), issueIdInt, commentIdInt)
399
398
if err != nil {
400
399
http.Error(w, "bad comment id", http.StatusBadRequest)
401
400
return
···
450
449
repoAt := record["repo"].(string)
451
450
issueAt := record["issue"].(string)
452
451
createdAt := record["createdAt"].(string)
453
-
commentIdInt64 := int64(commentIdInt)
454
452
455
453
_, err = client.RepoPutRecord(r.Context(), &comatproto.RepoPutRecord_Input{
456
454
Collection: tangled.RepoIssueCommentNSID,
···
461
459
Val: &tangled.RepoIssueComment{
462
460
Repo: &repoAt,
463
461
Issue: issueAt,
464
-
CommentId: &commentIdInt64,
465
462
Owner: &comment.OwnerDid,
466
463
Body: newBody,
467
464
CreatedAt: createdAt,
···
506
503
return
507
504
}
508
505
509
-
issue, err := db.GetIssue(rp.db, f.RepoAt, issueIdInt)
506
+
issue, err := db.GetIssue(rp.db, f.RepoAt(), issueIdInt)
510
507
if err != nil {
511
508
log.Println("failed to get issue", err)
512
509
rp.pages.Notice(w, "issues", "Failed to load issue. Try again later.")
···
521
518
return
522
519
}
523
520
524
-
comment, err := db.GetComment(rp.db, f.RepoAt, issueIdInt, commentIdInt)
521
+
comment, err := db.GetComment(rp.db, f.RepoAt(), issueIdInt, commentIdInt)
525
522
if err != nil {
526
523
http.Error(w, "bad comment id", http.StatusBadRequest)
527
524
return
···
539
536
540
537
// optimistic deletion
541
538
deleted := time.Now()
542
-
err = db.DeleteComment(rp.db, f.RepoAt, issueIdInt, commentIdInt)
539
+
err = db.DeleteComment(rp.db, f.RepoAt(), issueIdInt, commentIdInt)
543
540
if err != nil {
544
541
log.Println("failed to delete comment")
545
542
rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "failed to delete comment")
···
603
600
return
604
601
}
605
602
606
-
issues, err := db.GetIssues(rp.db, f.RepoAt, isOpen, page)
603
+
issues, err := db.GetIssuesPaginated(rp.db, f.RepoAt(), isOpen, page)
607
604
if err != nil {
608
605
log.Println("failed to get issues", err)
609
606
rp.pages.Notice(w, "issues", "Failed to load issues. Try again later.")
···
643
640
return
644
641
}
645
642
643
+
sanitizer := markup.NewSanitizer()
644
+
if st := strings.TrimSpace(sanitizer.SanitizeDescription(title)); st == "" {
645
+
rp.pages.Notice(w, "issues", "Title is empty after HTML sanitization")
646
+
return
647
+
}
648
+
if sb := strings.TrimSpace(sanitizer.SanitizeDefault(body)); sb == "" {
649
+
rp.pages.Notice(w, "issues", "Body is empty after HTML sanitization")
650
+
return
651
+
}
652
+
646
653
tx, err := rp.db.BeginTx(r.Context(), nil)
647
654
if err != nil {
648
655
rp.pages.Notice(w, "issues", "Failed to create issue, try again later")
···
650
657
}
651
658
652
659
issue := &db.Issue{
653
-
RepoAt: f.RepoAt,
660
+
RepoAt: f.RepoAt(),
661
+
Rkey: tid.TID(),
654
662
Title: title,
655
663
Body: body,
656
664
OwnerDid: user.Did,
···
668
676
rp.pages.Notice(w, "issues", "Failed to create issue.")
669
677
return
670
678
}
671
-
atUri := f.RepoAt.String()
672
-
resp, err := client.RepoPutRecord(r.Context(), &comatproto.RepoPutRecord_Input{
679
+
atUri := f.RepoAt().String()
680
+
_, err = client.RepoPutRecord(r.Context(), &comatproto.RepoPutRecord_Input{
673
681
Collection: tangled.RepoIssueNSID,
674
682
Repo: user.Did,
675
-
Rkey: tid.TID(),
683
+
Rkey: issue.Rkey,
676
684
Record: &lexutil.LexiconTypeDecoder{
677
685
Val: &tangled.RepoIssue{
678
-
Repo: atUri,
679
-
Title: title,
680
-
Body: &body,
681
-
Owner: user.Did,
682
-
IssueId: int64(issue.IssueId),
686
+
Repo: atUri,
687
+
Title: title,
688
+
Body: &body,
689
+
Owner: user.Did,
683
690
},
684
691
},
685
692
})
686
693
if err != nil {
687
694
log.Println("failed to create issue", err)
688
-
rp.pages.Notice(w, "issues", "Failed to create issue.")
689
-
return
690
-
}
691
-
692
-
err = db.SetIssueAt(rp.db, f.RepoAt, issue.IssueId, resp.Uri)
693
-
if err != nil {
694
-
log.Println("failed to set issue at", err)
695
695
rp.pages.Notice(w, "issues", "Failed to create issue.")
696
696
return
697
697
}
+3
-8
appview/middleware/middleware.go
+3
-8
appview/middleware/middleware.go
···
9
9
"slices"
10
10
"strconv"
11
11
"strings"
12
-
"time"
13
12
14
13
"github.com/bluesky-social/indigo/atproto/identity"
15
14
"github.com/go-chi/chi/v5"
···
222
221
return
223
222
}
224
223
225
-
ctx := context.WithValue(req.Context(), "knot", repo.Knot)
226
-
ctx = context.WithValue(ctx, "repoAt", repo.AtUri)
227
-
ctx = context.WithValue(ctx, "repoDescription", repo.Description)
228
-
ctx = context.WithValue(ctx, "repoSpindle", repo.Spindle)
229
-
ctx = context.WithValue(ctx, "repoAddedAt", repo.Created.Format(time.RFC3339))
224
+
ctx := context.WithValue(req.Context(), "repo", repo)
230
225
next.ServeHTTP(w, req.WithContext(ctx))
231
226
})
232
227
}
···
251
246
return
252
247
}
253
248
254
-
pr, err := db.GetPull(mw.db, f.RepoAt, prIdInt)
249
+
pr, err := db.GetPull(mw.db, f.RepoAt(), prIdInt)
255
250
if err != nil {
256
251
log.Println("failed to get pull and comments", err)
257
252
return
···
292
287
return
293
288
}
294
289
295
-
fullName := f.OwnerHandle() + "/" + f.RepoName
290
+
fullName := f.OwnerHandle() + "/" + f.Name
296
291
297
292
if r.Header.Get("User-Agent") == "Go-http-client/1.1" {
298
293
if r.URL.Query().Get("go-get") == "1" {
+10
-4
appview/pages/funcmap.go
+10
-4
appview/pages/funcmap.go
···
19
19
20
20
"github.com/dustin/go-humanize"
21
21
"github.com/go-enry/go-enry/v2"
22
-
"github.com/microcosm-cc/bluemonday"
23
22
"tangled.sh/tangled.sh/core/appview/filetree"
24
23
"tangled.sh/tangled.sh/core/appview/pages/markup"
25
24
)
···
207
206
}
208
207
return v.Slice(0, min(n, v.Len())).Interface()
209
208
},
210
-
211
209
"markdown": func(text string) template.HTML {
212
-
rctx := &markup.RenderContext{RendererType: markup.RendererTypeDefault}
213
-
return template.HTML(bluemonday.UGCPolicy().Sanitize(rctx.RenderMarkdown(text)))
210
+
p.rctx.RendererType = markup.RendererTypeDefault
211
+
htmlString := p.rctx.RenderMarkdown(text)
212
+
sanitized := p.rctx.SanitizeDefault(htmlString)
213
+
return template.HTML(sanitized)
214
+
},
215
+
"description": func(text string) template.HTML {
216
+
p.rctx.RendererType = markup.RendererTypeDefault
217
+
htmlString := p.rctx.RenderMarkdown(text)
218
+
sanitized := p.rctx.SanitizeDescription(htmlString)
219
+
return template.HTML(sanitized)
214
220
},
215
221
"isNil": func(t any) bool {
216
222
// returns false for other "zero" values
+5
-1
appview/pages/markup/markdown.go
+5
-1
appview/pages/markup/markdown.go
···
161
161
}
162
162
163
163
func (rctx *RenderContext) SanitizeDefault(html string) string {
164
-
return rctx.Sanitizer.defaultPolicy.Sanitize(html)
164
+
return rctx.Sanitizer.SanitizeDefault(html)
165
+
}
166
+
167
+
func (rctx *RenderContext) SanitizeDescription(html string) string {
168
+
return rctx.Sanitizer.SanitizeDescription(html)
165
169
}
166
170
167
171
type MarkdownTransformer struct {
+27
-2
appview/pages/markup/sanitizer.go
+27
-2
appview/pages/markup/sanitizer.go
···
11
11
)
12
12
13
13
type Sanitizer struct {
14
-
defaultPolicy *bluemonday.Policy
14
+
defaultPolicy *bluemonday.Policy
15
+
descriptionPolicy *bluemonday.Policy
15
16
}
16
17
17
18
func NewSanitizer() Sanitizer {
18
19
return Sanitizer{
19
-
defaultPolicy: defaultPolicy(),
20
+
defaultPolicy: defaultPolicy(),
21
+
descriptionPolicy: descriptionPolicy(),
20
22
}
23
+
}
24
+
25
+
func (s *Sanitizer) SanitizeDefault(html string) string {
26
+
return s.defaultPolicy.Sanitize(html)
27
+
}
28
+
func (s *Sanitizer) SanitizeDescription(html string) string {
29
+
return s.descriptionPolicy.Sanitize(html)
21
30
}
22
31
23
32
func defaultPolicy() *bluemonday.Policy {
···
90
99
91
100
return policy
92
101
}
102
+
103
+
func descriptionPolicy() *bluemonday.Policy {
104
+
policy := bluemonday.NewPolicy()
105
+
policy.AllowStandardURLs()
106
+
107
+
// allow italics and bold.
108
+
policy.AllowElements("i", "b", "em", "strong")
109
+
110
+
// allow code.
111
+
policy.AllowElements("code")
112
+
113
+
// allow links
114
+
policy.AllowAttrs("href", "target", "rel").OnElements("a")
115
+
116
+
return policy
117
+
}
+34
-26
appview/pages/pages.go
+34
-26
appview/pages/pages.go
···
299
299
type TimelineParams struct {
300
300
LoggedInUser *oauth.User
301
301
Timeline []db.TimelineEvent
302
+
Repos []db.Repo
302
303
}
303
304
304
305
func (p *Pages) Timeline(w io.Writer, params TimelineParams) error {
305
-
return p.execute("timeline", w, params)
306
+
return p.execute("timeline/timeline", w, params)
306
307
}
307
308
308
309
type SettingsParams struct {
···
520
521
}
521
522
522
523
p.rctx.RepoInfo = params.RepoInfo
524
+
p.rctx.RepoInfo.Ref = params.Ref
523
525
p.rctx.RendererType = markup.RendererTypeRepoMarkdown
524
526
525
527
if params.ReadmeFileName != "" {
···
673
675
}
674
676
}
675
677
676
-
if params.Lines < 5000 {
677
-
c := params.Contents
678
-
formatter := chromahtml.New(
679
-
chromahtml.InlineCode(false),
680
-
chromahtml.WithLineNumbers(true),
681
-
chromahtml.WithLinkableLineNumbers(true, "L"),
682
-
chromahtml.Standalone(false),
683
-
chromahtml.WithClasses(true),
684
-
)
685
-
686
-
lexer := lexers.Get(filepath.Base(params.Path))
687
-
if lexer == nil {
688
-
lexer = lexers.Fallback
689
-
}
678
+
c := params.Contents
679
+
formatter := chromahtml.New(
680
+
chromahtml.InlineCode(false),
681
+
chromahtml.WithLineNumbers(true),
682
+
chromahtml.WithLinkableLineNumbers(true, "L"),
683
+
chromahtml.Standalone(false),
684
+
chromahtml.WithClasses(true),
685
+
)
690
686
691
-
iterator, err := lexer.Tokenise(nil, c)
692
-
if err != nil {
693
-
return fmt.Errorf("chroma tokenize: %w", err)
694
-
}
687
+
lexer := lexers.Get(filepath.Base(params.Path))
688
+
if lexer == nil {
689
+
lexer = lexers.Fallback
690
+
}
695
691
696
-
var code bytes.Buffer
697
-
err = formatter.Format(&code, style, iterator)
698
-
if err != nil {
699
-
return fmt.Errorf("chroma format: %w", err)
700
-
}
692
+
iterator, err := lexer.Tokenise(nil, c)
693
+
if err != nil {
694
+
return fmt.Errorf("chroma tokenize: %w", err)
695
+
}
701
696
702
-
params.Contents = code.String()
697
+
var code bytes.Buffer
698
+
err = formatter.Format(&code, style, iterator)
699
+
if err != nil {
700
+
return fmt.Errorf("chroma format: %w", err)
703
701
}
704
702
703
+
params.Contents = code.String()
705
704
params.Active = "overview"
706
705
return p.executeRepo("repo/blob", w, params)
707
706
}
···
793
792
LoggedInUser *oauth.User
794
793
RepoInfo repoinfo.RepoInfo
795
794
Active string
796
-
Issue db.Issue
795
+
Issue *db.Issue
797
796
Comments []db.Comment
798
797
IssueOwnerHandle string
799
798
···
1158
1157
1159
1158
func (p *Pages) StringsDashboard(w io.Writer, params StringsDashboardParams) error {
1160
1159
return p.execute("strings/dashboard", w, params)
1160
+
}
1161
+
1162
+
type StringTimelineParams struct {
1163
+
LoggedInUser *oauth.User
1164
+
Strings []db.String
1165
+
}
1166
+
1167
+
func (p *Pages) StringsTimeline(w io.Writer, params StringTimelineParams) error {
1168
+
return p.execute("strings/timeline", w, params)
1161
1169
}
1162
1170
1163
1171
type SingleStringParams struct {
-12
appview/pages/templates/layouts/base.html
-12
appview/pages/templates/layouts/base.html
···
24
24
{{ block "mainLayout" . }}
25
25
<div class="px-1 col-span-1 md:col-start-3 md:col-span-8 flex flex-col gap-4">
26
26
{{ block "contentLayout" . }}
27
-
<div class="col-span-1 md:col-span-2">
28
-
{{ block "contentLeft" . }} {{ end }}
29
-
</div>
30
27
<main class="col-span-1 md:col-span-8">
31
28
{{ block "content" . }}{{ end }}
32
29
</main>
33
-
<div class="col-span-1 md:col-span-2">
34
-
{{ block "contentRight" . }} {{ end }}
35
-
</div>
36
30
{{ end }}
37
31
38
32
{{ block "contentAfterLayout" . }}
39
-
<div class="col-span-1 md:col-span-2">
40
-
{{ block "contentAfterLeft" . }} {{ end }}
41
-
</div>
42
33
<main class="col-span-1 md:col-span-8">
43
34
{{ block "contentAfter" . }}{{ end }}
44
35
</main>
45
-
<div class="col-span-1 md:col-span-2">
46
-
{{ block "contentAfterRight" . }} {{ end }}
47
-
</div>
48
36
{{ end }}
49
37
</div>
50
38
{{ end }}
+15
-20
appview/pages/templates/layouts/repobase.html
+15
-20
appview/pages/templates/layouts/repobase.html
···
20
20
</div>
21
21
22
22
<div class="flex items-center gap-2 z-auto">
23
+
<a
24
+
class="btn text-sm no-underline hover:no-underline flex items-center gap-2 group"
25
+
href="/{{ .RepoInfo.FullName }}/feed.atom"
26
+
>
27
+
{{ i "rss" "size-4" }}
28
+
</a>
23
29
{{ template "repo/fragments/repoStar" .RepoInfo }}
24
-
{{ if .RepoInfo.DisableFork }}
25
-
<button
26
-
class="btn text-sm no-underline hover:no-underline flex items-center gap-2 disabled:opacity-50 disabled:cursor-not-allowed"
27
-
disabled
28
-
title="Empty repositories cannot be forked"
29
-
>
30
-
{{ i "git-fork" "w-4 h-4" }}
31
-
fork
32
-
</button>
33
-
{{ else }}
34
-
<a
35
-
class="btn text-sm no-underline hover:no-underline flex items-center gap-2 group"
36
-
hx-boost="true"
37
-
href="/{{ .RepoInfo.FullName }}/fork"
38
-
>
39
-
{{ i "git-fork" "w-4 h-4" }}
40
-
fork
41
-
{{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }}
42
-
</a>
43
-
{{ end }}
30
+
<a
31
+
class="btn text-sm no-underline hover:no-underline flex items-center gap-2 group"
32
+
hx-boost="true"
33
+
href="/{{ .RepoInfo.FullName }}/fork"
34
+
>
35
+
{{ i "git-fork" "w-4 h-4" }}
36
+
fork
37
+
{{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }}
38
+
</a>
44
39
</div>
45
40
</div>
46
41
{{ template "repo/fragments/repoDescription" . }}
+1
-1
appview/pages/templates/repo/commit.html
+1
-1
appview/pages/templates/repo/commit.html
···
118
118
<div class="flex flex-col gap-4 col-span-1 md:col-span-2">
119
119
{{ template "repo/fragments/diffOpts" .DiffOpts }}
120
120
</div>
121
-
<div class="sticky top-0 flex-grow max-h-screen">
121
+
<div class="sticky top-0 flex-grow max-h-screen overflow-y-auto">
122
122
{{ template "repo/fragments/diffChangedFiles" .Diff }}
123
123
</div>
124
124
{{end}}
+1
-1
appview/pages/templates/repo/compare/compare.html
+1
-1
appview/pages/templates/repo/compare/compare.html
···
49
49
<div class="flex flex-col gap-4 col-span-1 md:col-span-2">
50
50
{{ template "repo/fragments/diffOpts" .DiffOpts }}
51
51
</div>
52
-
<div class="sticky top-0 flex-grow max-h-screen">
52
+
<div class="sticky top-0 flex-grow max-h-screen overflow-y-auto">
53
53
{{ template "repo/fragments/diffChangedFiles" .Diff }}
54
54
</div>
55
55
{{end}}
-4
appview/pages/templates/repo/empty.html
-4
appview/pages/templates/repo/empty.html
+3
-3
appview/pages/templates/repo/fragments/cloneDropdown.html
+3
-3
appview/pages/templates/repo/fragments/cloneDropdown.html
···
7
7
<details id="clone-dropdown" class="relative inline-block text-left group">
8
8
<summary class="btn-create cursor-pointer list-none flex items-center gap-2">
9
9
{{ i "download" "w-4 h-4" }}
10
-
<span>code</span>
10
+
<span class="hidden md:inline">code</span>
11
11
<span class="group-open:hidden">
12
12
{{ i "chevron-down" "w-4 h-4" }}
13
13
</span>
···
16
16
</span>
17
17
</summary>
18
18
19
-
<div class="absolute right-0 mt-2 w-96 bg-white dark:bg-gray-800 rounded border border-gray-200 dark:border-gray-700 drop-shadow dark:text-white z-[9999]">
19
+
<div class="absolute right-0 mt-2 w-96 bg-white dark:bg-gray-800 rounded border border-gray-200 dark:border-gray-700 drop-shadow-sm dark:text-white z-[9999]">
20
20
<div class="p-4">
21
21
<div class="mb-3">
22
22
<h3 class="text-sm font-semibold text-gray-900 dark:text-white mb-2">Clone this repository</h3>
···
84
84
function copyToClipboard(button, text) {
85
85
navigator.clipboard.writeText(text).then(() => {
86
86
const originalContent = button.innerHTML;
87
-
button.innerHTML = `{{ i "copy-check" "w-4 h-4" }}`;
87
+
button.innerHTML = `{{ i "check" "w-4 h-4" }}`;
88
88
setTimeout(() => {
89
89
button.innerHTML = originalContent;
90
90
}, 2000);
+1
-1
appview/pages/templates/repo/fragments/interdiffFiles.html
+1
-1
appview/pages/templates/repo/fragments/interdiffFiles.html
···
1
1
{{ define "repo/fragments/interdiffFiles" }}
2
2
{{ $fileTree := fileTree .AffectedFiles }}
3
-
<section class="mt-4 px-6 py-2 border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm min-h-full text-sm">
3
+
<section class="px-6 py-2 border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm min-h-full text-sm">
4
4
<div class="diff-stat">
5
5
<div class="flex gap-2 items-center">
6
6
<strong class="text-sm uppercase dark:text-gray-200">files</strong>
+1
-1
appview/pages/templates/repo/fragments/repoDescription.html
+1
-1
appview/pages/templates/repo/fragments/repoDescription.html
···
1
1
{{ define "repo/fragments/repoDescription" }}
2
2
<span id="repo-description" class="flex flex-wrap items-center gap-2 text-sm" hx-target="this" hx-swap="outerHTML">
3
3
{{ if .RepoInfo.Description }}
4
-
{{ .RepoInfo.Description }}
4
+
{{ .RepoInfo.Description | description }}
5
5
{{ else }}
6
6
<span class="italic">this repo has no description</span>
7
7
{{ end }}
+10
-9
appview/pages/templates/repo/index.html
+10
-9
appview/pages/templates/repo/index.html
···
14
14
{{ end }}
15
15
<div class="flex items-center justify-between pb-5">
16
16
{{ block "branchSelector" . }}{{ end }}
17
-
<div class="flex md:hidden items-center gap-4">
18
-
<a href="/{{ .RepoInfo.FullName }}/commits/{{ .Ref | urlquery }}" class="inline-flex items-center text-sm gap-1">
17
+
<div class="flex md:hidden items-center gap-2">
18
+
<a href="/{{ .RepoInfo.FullName }}/commits/{{ .Ref | urlquery }}" class="inline-flex items-center text-sm gap-1 font-bold">
19
19
{{ i "git-commit-horizontal" "w-4" "h-4" }} {{ .TotalCommits }}
20
20
</a>
21
-
<a href="/{{ .RepoInfo.FullName }}/branches" class="inline-flex items-center text-sm gap-1">
21
+
<a href="/{{ .RepoInfo.FullName }}/branches" class="inline-flex items-center text-sm gap-1 font-bold">
22
22
{{ i "git-branch" "w-4" "h-4" }} {{ len .Branches }}
23
23
</a>
24
-
<a href="/{{ .RepoInfo.FullName }}/tags" class="inline-flex items-center text-sm gap-1">
24
+
<a href="/{{ .RepoInfo.FullName }}/tags" class="inline-flex items-center text-sm gap-1 font-bold">
25
25
{{ i "tags" "w-4" "h-4" }} {{ len .Tags }}
26
26
</a>
27
+
{{ template "repo/fragments/cloneDropdown" . }}
27
28
</div>
28
29
</div>
29
30
<div class="grid grid-cols-1 md:grid-cols-2 gap-2">
···
125
126
</a>
126
127
</div>
127
128
</div>
128
-
129
+
129
130
<!-- Clone dropdown in top right -->
130
-
<div class="flex items-center">
131
+
<div class="hidden md:flex items-center ">
131
132
{{ template "repo/fragments/cloneDropdown" . }}
132
133
</div>
133
134
</div>
···
138
139
{{ $linkstyle := "no-underline hover:underline dark:text-white" }}
139
140
140
141
{{ range .Files }}
141
-
<div class="grid grid-cols-2 gap-4 items-center py-1">
142
-
<div class="col-span-1">
142
+
<div class="grid grid-cols-3 gap-4 items-center py-1">
143
+
<div class="col-span-2">
143
144
{{ $link := printf "/%s/%s/%s/%s" $.RepoInfo.FullName "tree" (urlquery $.Ref) .Name }}
144
145
{{ $icon := "folder" }}
145
146
{{ $iconStyle := "size-4 fill-current" }}
···
157
158
</a>
158
159
</div>
159
160
160
-
<div class="text-xs col-span-1 text-right">
161
+
<div class="text-sm col-span-1 text-right">
161
162
{{ with .LastCommit }}
162
163
<a href="/{{ $.RepoInfo.FullName }}/commit/{{ .Hash }}" class="text-gray-500 dark:text-gray-400">{{ template "repo/fragments/time" .When }}</a>
163
164
{{ end }}
+2
-2
appview/pages/templates/repo/issues/issue.html
+2
-2
appview/pages/templates/repo/issues/issue.html
···
11
11
{{ define "repoContent" }}
12
12
<header class="pb-4">
13
13
<h1 class="text-2xl">
14
-
{{ .Issue.Title }}
14
+
{{ .Issue.Title | description }}
15
15
<span class="text-gray-500 dark:text-gray-400">#{{ .Issue.IssueId }}</span>
16
16
</h1>
17
17
</header>
···
54
54
"Kind" $kind
55
55
"Count" (index $.Reactions $kind)
56
56
"IsReacted" (index $.UserReacted $kind)
57
-
"ThreadAt" $.Issue.IssueAt)
57
+
"ThreadAt" $.Issue.AtUri)
58
58
}}
59
59
{{ end }}
60
60
</div>
+1
-1
appview/pages/templates/repo/issues/issues.html
+1
-1
appview/pages/templates/repo/issues/issues.html
+1
-1
appview/pages/templates/repo/pulls/fragments/pullHeader.html
+1
-1
appview/pages/templates/repo/pulls/fragments/pullHeader.html
+1
-1
appview/pages/templates/repo/pulls/fragments/summarizedPullHeader.html
+1
-1
appview/pages/templates/repo/pulls/fragments/summarizedPullHeader.html
+1
-1
appview/pages/templates/repo/pulls/interdiff.html
+1
-1
appview/pages/templates/repo/pulls/interdiff.html
···
68
68
<div class="flex flex-col gap-4 col-span-1 md:col-span-2">
69
69
{{ template "repo/fragments/diffOpts" .DiffOpts }}
70
70
</div>
71
-
<div class="sticky top-0 flex-grow max-h-screen">
71
+
<div class="sticky top-0 flex-grow max-h-screen overflow-y-auto">
72
72
{{ template "repo/fragments/interdiffFiles" .Interdiff }}
73
73
</div>
74
74
{{end}}
+1
-1
appview/pages/templates/repo/pulls/patch.html
+1
-1
appview/pages/templates/repo/pulls/patch.html
···
73
73
<div class="flex flex-col gap-4 col-span-1 md:col-span-2">
74
74
{{ template "repo/fragments/diffOpts" .DiffOpts }}
75
75
</div>
76
-
<div class="sticky top-0 flex-grow max-h-screen">
76
+
<div class="sticky top-0 flex-grow max-h-screen overflow-y-auto">
77
77
{{ template "repo/fragments/diffChangedFiles" .Diff }}
78
78
</div>
79
79
{{end}}
+1
-1
appview/pages/templates/repo/pulls/pull.html
+1
-1
appview/pages/templates/repo/pulls/pull.html
···
122
122
{{ end }}
123
123
</div>
124
124
<div class="flex items-center">
125
-
<span>{{ .Title }}</span>
125
+
<span>{{ .Title | description }}</span>
126
126
{{ if gt (len .Body) 0 }}
127
127
<button
128
128
class="py-1/2 px-1 mx-2 bg-gray-200 hover:bg-gray-400 rounded dark:bg-gray-700 dark:hover:bg-gray-600"
+1
-1
appview/pages/templates/repo/pulls/pulls.html
+1
-1
appview/pages/templates/repo/pulls/pulls.html
+2
-2
appview/pages/templates/repo/tree.html
+2
-2
appview/pages/templates/repo/tree.html
···
54
54
55
55
{{ range .Files }}
56
56
<div class="grid grid-cols-12 gap-4 items-center py-1">
57
-
<div class="col-span-6 md:col-span-4">
57
+
<div class="col-span-8 md:col-span-4">
58
58
{{ $link := printf "/%s/%s/%s/%s/%s" $.RepoInfo.FullName "tree" (urlquery $.Ref) $.TreePath .Name }}
59
59
{{ $icon := "folder" }}
60
60
{{ $iconStyle := "size-4 fill-current" }}
···
77
77
{{ end }}
78
78
</div>
79
79
80
-
<div class="col-span-6 md:col-span-2 text-right">
80
+
<div class="col-span-4 md:col-span-2 text-sm text-right">
81
81
{{ with .LastCommit }}
82
82
<a href="/{{ $.RepoInfo.FullName }}/commit/{{ .Hash }}" class="text-gray-500 dark:text-gray-400">{{ template "repo/fragments/time" .When }}</a>
83
83
{{ end }}
+3
-2
appview/pages/templates/strings/fragments/form.html
+3
-2
appview/pages/templates/strings/fragments/form.html
···
13
13
type="text"
14
14
id="filename"
15
15
name="filename"
16
-
placeholder="Filename with extension"
16
+
placeholder="Filename"
17
17
required
18
18
value="{{ .String.Filename }}"
19
19
class="md:max-w-64 dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400 px-3 py-2 border rounded"
···
31
31
name="content"
32
32
id="content-textarea"
33
33
wrap="off"
34
-
class="w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400"
34
+
class="w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400 font-mono"
35
35
rows="20"
36
+
spellcheck="false"
36
37
placeholder="Paste your string here!"
37
38
required>{{ .String.Contents }}</textarea>
38
39
<div class="flex justify-between items-center">
+2
-2
appview/pages/templates/strings/string.html
+2
-2
appview/pages/templates/strings/string.html
···
35
35
title="Delete string"
36
36
hx-delete="/strings/{{ .String.Did }}/{{ .String.Rkey }}/"
37
37
hx-swap="none"
38
-
hx-confirm="Are you sure you want to delete the gist `{{ .String.Filename }}`?"
38
+
hx-confirm="Are you sure you want to delete the string `{{ .String.Filename }}`?"
39
39
>
40
40
{{ i "trash-2" "size-4" }}
41
41
<span class="hidden md:inline">delete</span>
···
77
77
{{ end }}
78
78
</div>
79
79
</div>
80
-
<div class="overflow-auto relative">
80
+
<div class="overflow-x-auto overflow-y-hidden relative">
81
81
{{ if .ShowRendered }}
82
82
<div id="blob-contents" class="prose dark:prose-invert">{{ .RenderedContents }}</div>
83
83
{{ else }}
+65
appview/pages/templates/strings/timeline.html
+65
appview/pages/templates/strings/timeline.html
···
1
+
{{ define "title" }} all strings {{ end }}
2
+
3
+
{{ define "topbar" }}
4
+
{{ template "layouts/topbar" $ }}
5
+
{{ end }}
6
+
7
+
{{ define "content" }}
8
+
{{ block "timeline" $ }}{{ end }}
9
+
{{ end }}
10
+
11
+
{{ define "timeline" }}
12
+
<div>
13
+
<div class="p-6">
14
+
<p class="text-xl font-bold dark:text-white">All strings</p>
15
+
</div>
16
+
17
+
<div class="flex flex-col gap-4">
18
+
{{ range $i, $s := .Strings }}
19
+
<div class="relative">
20
+
{{ if ne $i 0 }}
21
+
<div class="absolute left-8 -top-4 w-px h-4 bg-gray-300 dark:bg-gray-600"></div>
22
+
{{ end }}
23
+
<div class="flex flex-col divide-y divide-gray-200 dark:divide-gray-700 border border-gray-200 dark:border-gray-700 rounded-sm">
24
+
{{ template "stringCard" $s }}
25
+
</div>
26
+
</div>
27
+
{{ end }}
28
+
</div>
29
+
</div>
30
+
{{ end }}
31
+
32
+
{{ define "stringCard" }}
33
+
<div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800">
34
+
<div class="font-medium dark:text-white flex gap-2 items-center">
35
+
<a href="/strings/{{ resolve .Did.String }}/{{ .Rkey }}">{{ .Filename }}</a>
36
+
</div>
37
+
{{ with .Description }}
38
+
<div class="text-gray-600 dark:text-gray-300 text-sm">
39
+
{{ . }}
40
+
</div>
41
+
{{ end }}
42
+
43
+
{{ template "stringCardInfo" . }}
44
+
</div>
45
+
{{ end }}
46
+
47
+
{{ define "stringCardInfo" }}
48
+
{{ $stat := .Stats }}
49
+
{{ $resolved := resolve .Did.String }}
50
+
<div class="text-gray-400 pt-4 text-sm font-mono inline-flex items-center gap-2 mt-auto">
51
+
<a href="/strings/{{ $resolved }}" class="flex items-center">
52
+
{{ template "user/fragments/picHandle" $resolved }}
53
+
</a>
54
+
<span class="select-none [&:before]:content-['ยท']"></span>
55
+
<span>{{ $stat.LineCount }} line{{if ne $stat.LineCount 1}}s{{end}}</span>
56
+
<span class="select-none [&:before]:content-['ยท']"></span>
57
+
{{ with .Edited }}
58
+
<span>edited {{ template "repo/fragments/shortTimeAgo" . }}</span>
59
+
{{ else }}
60
+
{{ template "repo/fragments/shortTimeAgo" .Created }}
61
+
{{ end }}
62
+
</div>
63
+
{{ end }}
64
+
65
+
+183
appview/pages/templates/timeline/timeline.html
+183
appview/pages/templates/timeline/timeline.html
···
1
+
{{ define "title" }}timeline{{ end }}
2
+
3
+
{{ define "extrameta" }}
4
+
<meta property="og:title" content="timeline ยท tangled" />
5
+
<meta property="og:type" content="object" />
6
+
<meta property="og:url" content="https://tangled.sh" />
7
+
<meta property="og:description" content="tightly-knit social coding" />
8
+
{{ end }}
9
+
10
+
{{ define "content" }}
11
+
{{ if .LoggedInUser }}
12
+
{{ else }}
13
+
{{ block "hero" $ }}{{ end }}
14
+
{{ end }}
15
+
16
+
{{ block "trending" $ }}{{ end }}
17
+
{{ block "timeline" $ }}{{ end }}
18
+
{{ end }}
19
+
20
+
{{ define "hero" }}
21
+
<div class="flex flex-col text-black dark:text-white p-6 gap-6 max-w-xl">
22
+
<div class="font-bold text-4xl">tightly-knit<br>social coding.</div>
23
+
24
+
<p class="text-lg">
25
+
tangled is new social-enabled git collaboration platform built on <a class="underline" href="https://atproto.com/">atproto</a>.
26
+
</p>
27
+
<p class="text-lg">
28
+
we envision a place where developers have complete ownership of their
29
+
code, open source communities can freely self-govern and most
30
+
importantly, coding can be social and fun again.
31
+
</p>
32
+
33
+
<div class="flex gap-6 items-center">
34
+
<a href="/signup" class="no-underline hover:no-underline ">
35
+
<button class="btn-create flex gap-2 px-4 items-center">
36
+
join now {{ i "arrow-right" "size-4" }}
37
+
</button>
38
+
</a>
39
+
</div>
40
+
</div>
41
+
{{ end }}
42
+
43
+
{{ define "trending" }}
44
+
<div class="w-full md:mx-0 py-4">
45
+
<div class="px-6 pb-4">
46
+
<h3 class="text-xl font-bold dark:text-white flex items-center gap-2">
47
+
Trending
48
+
{{ i "trending-up" "size-4 flex-shrink-0" }}
49
+
</h3>
50
+
</div>
51
+
<div class="flex gap-4 overflow-x-auto scrollbar-hide items-stretch">
52
+
{{ range $index, $repo := .Repos }}
53
+
<div class="flex-none h-full border border-gray-200 dark:border-gray-700 rounded-sm w-96">
54
+
{{ template "user/fragments/repoCard" (list $ $repo true) }}
55
+
</div>
56
+
{{ else }}
57
+
<div class="py-8 px-6 bg-white dark:bg-gray-800 border border-gray-200 dark:border-gray-700 rounded-sm">
58
+
<div class="text-sm text-gray-500 dark:text-gray-400 text-center">
59
+
No trending repositories this week
60
+
</div>
61
+
</div>
62
+
{{ end }}
63
+
</div>
64
+
</div>
65
+
{{ end }}
66
+
67
+
{{ define "timeline" }}
68
+
<div class="py-4">
69
+
<div class="px-6 pb-4">
70
+
<p class="text-xl font-bold dark:text-white">Timeline</p>
71
+
</div>
72
+
73
+
<div class="flex flex-col gap-4">
74
+
{{ range $i, $e := .Timeline }}
75
+
<div class="relative">
76
+
{{ if ne $i 0 }}
77
+
<div class="absolute left-8 -top-4 w-px h-4 bg-gray-300 dark:bg-gray-600"></div>
78
+
{{ end }}
79
+
{{ with $e }}
80
+
<div class="flex flex-col divide-y divide-gray-200 dark:divide-gray-700 border border-gray-200 dark:border-gray-700 rounded-sm">
81
+
{{ if .Repo }}
82
+
{{ block "repoEvent" (list $ .Repo .Source) }} {{ end }}
83
+
{{ else if .Star }}
84
+
{{ block "starEvent" (list $ .Star) }} {{ end }}
85
+
{{ else if .Follow }}
86
+
{{ block "followEvent" (list $ .Follow .Profile .FollowStats) }} {{ end }}
87
+
{{ end }}
88
+
</div>
89
+
{{ end }}
90
+
</div>
91
+
{{ end }}
92
+
</div>
93
+
</div>
94
+
{{ end }}
95
+
96
+
{{ define "repoEvent" }}
97
+
{{ $root := index . 0 }}
98
+
{{ $repo := index . 1 }}
99
+
{{ $source := index . 2 }}
100
+
{{ $userHandle := resolve $repo.Did }}
101
+
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
102
+
{{ template "user/fragments/picHandleLink" $repo.Did }}
103
+
{{ with $source }}
104
+
{{ $sourceDid := resolve .Did }}
105
+
forked
106
+
<a href="/{{ $sourceDid }}/{{ .Name }}"class="no-underline hover:underline">
107
+
{{ $sourceDid }}/{{ .Name }}
108
+
</a>
109
+
to
110
+
<a href="/{{ $userHandle }}/{{ $repo.Name }}" class="no-underline hover:underline">{{ $repo.Name }}</a>
111
+
{{ else }}
112
+
created
113
+
<a href="/{{ $userHandle }}/{{ $repo.Name }}" class="no-underline hover:underline">
114
+
{{ $repo.Name }}
115
+
</a>
116
+
{{ end }}
117
+
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" $repo.Created }}</span>
118
+
</div>
119
+
{{ with $repo }}
120
+
{{ template "user/fragments/repoCard" (list $root . true) }}
121
+
{{ end }}
122
+
{{ end }}
123
+
124
+
{{ define "starEvent" }}
125
+
{{ $root := index . 0 }}
126
+
{{ $star := index . 1 }}
127
+
{{ with $star }}
128
+
{{ $starrerHandle := resolve .StarredByDid }}
129
+
{{ $repoOwnerHandle := resolve .Repo.Did }}
130
+
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
131
+
{{ template "user/fragments/picHandleLink" $starrerHandle }}
132
+
starred
133
+
<a href="/{{ $repoOwnerHandle }}/{{ .Repo.Name }}" class="no-underline hover:underline">
134
+
{{ $repoOwnerHandle | truncateAt30 }}/{{ .Repo.Name }}
135
+
</a>
136
+
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" .Created }}</span>
137
+
</div>
138
+
{{ with .Repo }}
139
+
{{ template "user/fragments/repoCard" (list $root . true) }}
140
+
{{ end }}
141
+
{{ end }}
142
+
{{ end }}
143
+
144
+
145
+
{{ define "followEvent" }}
146
+
{{ $root := index . 0 }}
147
+
{{ $follow := index . 1 }}
148
+
{{ $profile := index . 2 }}
149
+
{{ $stat := index . 3 }}
150
+
151
+
{{ $userHandle := resolve $follow.UserDid }}
152
+
{{ $subjectHandle := resolve $follow.SubjectDid }}
153
+
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
154
+
{{ template "user/fragments/picHandleLink" $userHandle }}
155
+
followed
156
+
{{ template "user/fragments/picHandleLink" $subjectHandle }}
157
+
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" $follow.FollowedAt }}</span>
158
+
</div>
159
+
<div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800 flex items-center gap-4">
160
+
<div class="flex-shrink-0 max-h-full w-24 h-24">
161
+
<img class="object-cover rounded-full p-2" src="{{ fullAvatar $subjectHandle }}" />
162
+
</div>
163
+
164
+
<div class="flex-1 min-h-0 justify-around flex flex-col">
165
+
<a href="/{{ $subjectHandle }}">
166
+
<span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $subjectHandle | truncateAt30 }}</span>
167
+
</a>
168
+
{{ with $profile }}
169
+
{{ with .Description }}
170
+
<p class="text-sm pb-2 md:pb-2">{{.}}</p>
171
+
{{ end }}
172
+
{{ end }}
173
+
{{ with $stat }}
174
+
<div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full text-sm">
175
+
<span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
176
+
<span id="followers">{{ .Followers }} followers</span>
177
+
<span class="select-none after:content-['ยท']"></span>
178
+
<span id="following">{{ .Following }} following</span>
179
+
</div>
180
+
{{ end }}
181
+
</div>
182
+
</div>
183
+
{{ end }}
-162
appview/pages/templates/timeline.html
-162
appview/pages/templates/timeline.html
···
1
-
{{ define "title" }}timeline{{ end }}
2
-
3
-
{{ define "extrameta" }}
4
-
<meta property="og:title" content="timeline ยท tangled" />
5
-
<meta property="og:type" content="object" />
6
-
<meta property="og:url" content="https://tangled.sh" />
7
-
<meta property="og:description" content="see what's tangling" />
8
-
{{ end }}
9
-
10
-
{{ define "topbar" }}
11
-
{{ template "layouts/topbar" $ }}
12
-
{{ end }}
13
-
14
-
{{ define "content" }}
15
-
{{ with .LoggedInUser }}
16
-
{{ block "timeline" $ }}{{ end }}
17
-
{{ else }}
18
-
{{ block "hero" $ }}{{ end }}
19
-
{{ block "timeline" $ }}{{ end }}
20
-
{{ end }}
21
-
{{ end }}
22
-
23
-
{{ define "hero" }}
24
-
<div class="flex flex-col text-black dark:text-white p-6 gap-6 max-w-xl">
25
-
<div class="font-bold text-4xl">tightly-knit<br>social coding.</div>
26
-
27
-
<p class="text-lg">
28
-
tangled is new social-enabled git collaboration platform built on <a class="underline" href="https://atproto.com/">atproto</a>.
29
-
</p>
30
-
<p class="text-lg">
31
-
we envision a place where developers have complete ownership of their
32
-
code, open source communities can freely self-govern and most
33
-
importantly, coding can be social and fun again.
34
-
</p>
35
-
36
-
<div class="flex gap-6 items-center">
37
-
<a href="/signup" class="no-underline hover:no-underline ">
38
-
<button class="btn-create flex gap-2 px-4 items-center">
39
-
join now {{ i "arrow-right" "size-4" }}
40
-
</button>
41
-
</a>
42
-
</div>
43
-
</div>
44
-
{{ end }}
45
-
46
-
{{ define "timeline" }}
47
-
<div>
48
-
<div class="p-6">
49
-
<p class="text-xl font-bold dark:text-white">Timeline</p>
50
-
</div>
51
-
52
-
<div class="flex flex-col gap-4">
53
-
{{ range $i, $e := .Timeline }}
54
-
<div class="relative">
55
-
{{ if ne $i 0 }}
56
-
<div class="absolute left-8 -top-4 w-px h-4 bg-gray-300 dark:bg-gray-600"></div>
57
-
{{ end }}
58
-
{{ with $e }}
59
-
<div class="flex flex-col divide-y divide-gray-200 dark:divide-gray-700 border border-gray-200 dark:border-gray-700 rounded-sm">
60
-
{{ if .Repo }}
61
-
{{ block "repoEvent" (list $ .Repo .Source) }} {{ end }}
62
-
{{ else if .Star }}
63
-
{{ block "starEvent" (list $ .Star) }} {{ end }}
64
-
{{ else if .Follow }}
65
-
{{ block "followEvent" (list $ .Follow .Profile .FollowStats) }} {{ end }}
66
-
{{ end }}
67
-
</div>
68
-
{{ end }}
69
-
</div>
70
-
{{ end }}
71
-
</div>
72
-
</div>
73
-
{{ end }}
74
-
75
-
{{ define "repoEvent" }}
76
-
{{ $root := index . 0 }}
77
-
{{ $repo := index . 1 }}
78
-
{{ $source := index . 2 }}
79
-
{{ $userHandle := resolve $repo.Did }}
80
-
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
81
-
{{ template "user/fragments/picHandleLink" $repo.Did }}
82
-
{{ with $source }}
83
-
{{ $sourceDid := resolve .Did }}
84
-
forked
85
-
<a href="/{{ $sourceDid }}/{{ .Name }}"class="no-underline hover:underline">
86
-
{{ $sourceDid }}/{{ .Name }}
87
-
</a>
88
-
to
89
-
<a href="/{{ $userHandle }}/{{ $repo.Name }}" class="no-underline hover:underline">{{ $repo.Name }}</a>
90
-
{{ else }}
91
-
created
92
-
<a href="/{{ $userHandle }}/{{ $repo.Name }}" class="no-underline hover:underline">
93
-
{{ $repo.Name }}
94
-
</a>
95
-
{{ end }}
96
-
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" $repo.Created }}</span>
97
-
</div>
98
-
{{ with $repo }}
99
-
{{ template "user/fragments/repoCard" (list $root . true) }}
100
-
{{ end }}
101
-
{{ end }}
102
-
103
-
{{ define "starEvent" }}
104
-
{{ $root := index . 0 }}
105
-
{{ $star := index . 1 }}
106
-
{{ with $star }}
107
-
{{ $starrerHandle := resolve .StarredByDid }}
108
-
{{ $repoOwnerHandle := resolve .Repo.Did }}
109
-
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
110
-
{{ template "user/fragments/picHandleLink" $starrerHandle }}
111
-
starred
112
-
<a href="/{{ $repoOwnerHandle }}/{{ .Repo.Name }}" class="no-underline hover:underline">
113
-
{{ $repoOwnerHandle | truncateAt30 }}/{{ .Repo.Name }}
114
-
</a>
115
-
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" .Created }}</span>
116
-
</div>
117
-
{{ with .Repo }}
118
-
{{ template "user/fragments/repoCard" (list $root . true) }}
119
-
{{ end }}
120
-
{{ end }}
121
-
{{ end }}
122
-
123
-
124
-
{{ define "followEvent" }}
125
-
{{ $root := index . 0 }}
126
-
{{ $follow := index . 1 }}
127
-
{{ $profile := index . 2 }}
128
-
{{ $stat := index . 3 }}
129
-
130
-
{{ $userHandle := resolve $follow.UserDid }}
131
-
{{ $subjectHandle := resolve $follow.SubjectDid }}
132
-
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
133
-
{{ template "user/fragments/picHandleLink" $userHandle }}
134
-
followed
135
-
{{ template "user/fragments/picHandleLink" $subjectHandle }}
136
-
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" $follow.FollowedAt }}</span>
137
-
</div>
138
-
<div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800 flex items-center gap-4">
139
-
<div class="flex-shrink-0 max-h-full w-24 h-24">
140
-
<img class="object-cover rounded-full p-2" src="{{ fullAvatar $subjectHandle }}" />
141
-
</div>
142
-
143
-
<div class="flex-1 min-h-0 justify-around flex flex-col">
144
-
<a href="/{{ $subjectHandle }}">
145
-
<span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $subjectHandle | truncateAt30 }}</span>
146
-
</a>
147
-
{{ with $profile }}
148
-
{{ with .Description }}
149
-
<p class="text-sm pb-2 md:pb-2">{{.}}</p>
150
-
{{ end }}
151
-
{{ end }}
152
-
{{ with $stat }}
153
-
<div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full text-sm">
154
-
<span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
155
-
<span id="followers">{{ .Followers }} followers</span>
156
-
<span class="select-none after:content-['ยท']"></span>
157
-
<span id="following">{{ .Following }} following</span>
158
-
</div>
159
-
{{ end }}
160
-
</div>
161
-
</div>
162
-
{{ end }}
+5
-5
appview/pages/templates/user/fragments/repoCard.html
+5
-5
appview/pages/templates/user/fragments/repoCard.html
···
4
4
{{ $fullName := index . 2 }}
5
5
6
6
{{ with $repo }}
7
-
<div class="py-4 px-6 gap-2 flex flex-col drop-shadow-sm rounded bg-white dark:bg-gray-800">
7
+
<div class="py-4 px-6 gap-1 flex flex-col drop-shadow-sm rounded bg-white dark:bg-gray-800 min-h-32">
8
8
<div class="font-medium dark:text-white flex items-center">
9
9
{{ if .Source }}
10
10
{{ i "git-fork" "w-4 h-4 mr-1.5 shrink-0" }}
···
14
14
15
15
{{ $repoOwner := resolve .Did }}
16
16
{{- if $fullName -}}
17
-
<a href="/{{ $repoOwner }}/{{ .Name }}">{{ $repoOwner }}/{{ .Name }}</a>
17
+
<a href="/{{ $repoOwner }}/{{ .Name }}" class="truncate">{{ $repoOwner }}/{{ .Name }}</a>
18
18
{{- else -}}
19
-
<a href="/{{ $repoOwner }}/{{ .Name }}">{{ .Name }}</a>
19
+
<a href="/{{ $repoOwner }}/{{ .Name }}" class="truncate">{{ .Name }}</a>
20
20
{{- end -}}
21
21
</div>
22
22
{{ with .Description }}
23
-
<div class="text-gray-600 dark:text-gray-300 text-sm">
24
-
{{ . }}
23
+
<div class="text-gray-600 dark:text-gray-300 text-sm line-clamp-2">
24
+
{{ . | description }}
25
25
</div>
26
26
{{ end }}
27
27
+30
-29
appview/pulls/pulls.go
+30
-29
appview/pulls/pulls.go
···
19
19
"tangled.sh/tangled.sh/core/appview/notify"
20
20
"tangled.sh/tangled.sh/core/appview/oauth"
21
21
"tangled.sh/tangled.sh/core/appview/pages"
22
+
"tangled.sh/tangled.sh/core/appview/pages/markup"
22
23
"tangled.sh/tangled.sh/core/appview/reporesolver"
23
24
"tangled.sh/tangled.sh/core/idresolver"
24
25
"tangled.sh/tangled.sh/core/knotclient"
···
28
29
29
30
"github.com/bluekeyes/go-gitdiff/gitdiff"
30
31
comatproto "github.com/bluesky-social/indigo/api/atproto"
31
-
"github.com/bluesky-social/indigo/atproto/syntax"
32
32
lexutil "github.com/bluesky-social/indigo/lex/util"
33
33
"github.com/go-chi/chi/v5"
34
34
"github.com/google/uuid"
···
246
246
patch = mergeable.CombinedPatch()
247
247
}
248
248
249
-
resp, err := ksClient.MergeCheck([]byte(patch), f.OwnerDid(), f.RepoName, pull.TargetBranch)
249
+
resp, err := ksClient.MergeCheck([]byte(patch), f.OwnerDid(), f.Name, pull.TargetBranch)
250
250
if err != nil {
251
251
log.Println("failed to check for mergeability:", err)
252
252
return types.MergeCheckResponse{
···
307
307
// pulls within the same repo
308
308
knot = f.Knot
309
309
ownerDid = f.OwnerDid()
310
-
repoName = f.RepoName
310
+
repoName = f.Name
311
311
}
312
312
313
313
us, err := knotclient.NewUnsignedClient(knot, s.config.Core.Dev)
···
483
483
484
484
pulls, err := db.GetPulls(
485
485
s.db,
486
-
db.FilterEq("repo_at", f.RepoAt),
486
+
db.FilterEq("repo_at", f.RepoAt()),
487
487
db.FilterEq("state", state),
488
488
)
489
489
if err != nil {
···
610
610
createdAt := time.Now().Format(time.RFC3339)
611
611
ownerDid := user.Did
612
612
613
-
pullAt, err := db.GetPullAt(s.db, f.RepoAt, pull.PullId)
613
+
pullAt, err := db.GetPullAt(s.db, f.RepoAt(), pull.PullId)
614
614
if err != nil {
615
615
log.Println("failed to get pull at", err)
616
616
s.pages.Notice(w, "pull-comment", "Failed to create comment.")
617
617
return
618
618
}
619
619
620
-
atUri := f.RepoAt.String()
620
+
atUri := f.RepoAt().String()
621
621
client, err := s.oauth.AuthorizedClient(r)
622
622
if err != nil {
623
623
log.Println("failed to get authorized client", err)
···
646
646
647
647
comment := &db.PullComment{
648
648
OwnerDid: user.Did,
649
-
RepoAt: f.RepoAt.String(),
649
+
RepoAt: f.RepoAt().String(),
650
650
PullId: pull.PullId,
651
651
Body: body,
652
652
CommentAt: atResp.Uri,
···
692
692
return
693
693
}
694
694
695
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
695
+
result, err := us.Branches(f.OwnerDid(), f.Name)
696
696
if err != nil {
697
697
log.Println("failed to fetch branches", err)
698
698
return
···
738
738
if isPatchBased && !patchutil.IsFormatPatch(patch) {
739
739
if title == "" {
740
740
s.pages.Notice(w, "pull", "Title is required for git-diff patches.")
741
+
return
742
+
}
743
+
sanitizer := markup.NewSanitizer()
744
+
if st := strings.TrimSpace(sanitizer.SanitizeDescription(title)); (st) == "" {
745
+
s.pages.Notice(w, "pull", "Title is empty after HTML sanitization")
741
746
return
742
747
}
743
748
}
···
816
821
return
817
822
}
818
823
819
-
comparison, err := ksClient.Compare(f.OwnerDid(), f.RepoName, targetBranch, sourceBranch)
824
+
comparison, err := ksClient.Compare(f.OwnerDid(), f.Name, targetBranch, sourceBranch)
820
825
if err != nil {
821
826
log.Println("failed to compare", err)
822
827
s.pages.Notice(w, "pull", err.Error())
···
918
923
return
919
924
}
920
925
921
-
forkAtUri, err := syntax.ParseATURI(fork.AtUri)
922
-
if err != nil {
923
-
log.Println("failed to parse fork AT URI", err)
924
-
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
925
-
return
926
-
}
926
+
forkAtUri := fork.RepoAt()
927
+
forkAtUriStr := forkAtUri.String()
927
928
928
929
pullSource := &db.PullSource{
929
930
Branch: sourceBranch,
···
931
932
}
932
933
recordPullSource := &tangled.RepoPull_Source{
933
934
Branch: sourceBranch,
934
-
Repo: &fork.AtUri,
935
+
Repo: &forkAtUriStr,
935
936
Sha: sourceRev,
936
937
}
937
938
···
1007
1008
Body: body,
1008
1009
TargetBranch: targetBranch,
1009
1010
OwnerDid: user.Did,
1010
-
RepoAt: f.RepoAt,
1011
+
RepoAt: f.RepoAt(),
1011
1012
Rkey: rkey,
1012
1013
Submissions: []*db.PullSubmission{
1013
1014
&initialSubmission,
···
1020
1021
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1021
1022
return
1022
1023
}
1023
-
pullId, err := db.NextPullId(tx, f.RepoAt)
1024
+
pullId, err := db.NextPullId(tx, f.RepoAt())
1024
1025
if err != nil {
1025
1026
log.Println("failed to get pull id", err)
1026
1027
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
···
1035
1036
Val: &tangled.RepoPull{
1036
1037
Title: title,
1037
1038
PullId: int64(pullId),
1038
-
TargetRepo: string(f.RepoAt),
1039
+
TargetRepo: string(f.RepoAt()),
1039
1040
TargetBranch: targetBranch,
1040
1041
Patch: patch,
1041
1042
Source: recordPullSource,
···
1213
1214
return
1214
1215
}
1215
1216
1216
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
1217
+
result, err := us.Branches(f.OwnerDid(), f.Name)
1217
1218
if err != nil {
1218
1219
log.Println("failed to reach knotserver", err)
1219
1220
return
···
1297
1298
return
1298
1299
}
1299
1300
1300
-
targetResult, err := targetBranchesClient.Branches(f.OwnerDid(), f.RepoName)
1301
+
targetResult, err := targetBranchesClient.Branches(f.OwnerDid(), f.Name)
1301
1302
if err != nil {
1302
1303
log.Println("failed to reach knotserver for target branches", err)
1303
1304
return
···
1413
1414
return
1414
1415
}
1415
1416
1416
-
comparison, err := ksClient.Compare(f.OwnerDid(), f.RepoName, pull.TargetBranch, pull.PullSource.Branch)
1417
+
comparison, err := ksClient.Compare(f.OwnerDid(), f.Name, pull.TargetBranch, pull.PullSource.Branch)
1417
1418
if err != nil {
1418
1419
log.Printf("compare request failed: %s", err)
1419
1420
s.pages.Notice(w, "resubmit-error", err.Error())
···
1597
1598
Val: &tangled.RepoPull{
1598
1599
Title: pull.Title,
1599
1600
PullId: int64(pull.PullId),
1600
-
TargetRepo: string(f.RepoAt),
1601
+
TargetRepo: string(f.RepoAt()),
1601
1602
TargetBranch: pull.TargetBranch,
1602
1603
Patch: patch, // new patch
1603
1604
Source: recordPullSource,
···
1934
1935
}
1935
1936
1936
1937
// Merge the pull request
1937
-
resp, err := ksClient.Merge([]byte(patch), f.OwnerDid(), f.RepoName, pull.TargetBranch, pull.Title, pull.Body, ident.Handle.String(), email.Address)
1938
+
resp, err := ksClient.Merge([]byte(patch), f.OwnerDid(), f.Name, pull.TargetBranch, pull.Title, pull.Body, ident.Handle.String(), email.Address)
1938
1939
if err != nil {
1939
1940
log.Printf("failed to merge pull request: %s", err)
1940
1941
s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
···
1956
1957
defer tx.Rollback()
1957
1958
1958
1959
for _, p := range pullsToMerge {
1959
-
err := db.MergePull(tx, f.RepoAt, p.PullId)
1960
+
err := db.MergePull(tx, f.RepoAt(), p.PullId)
1960
1961
if err != nil {
1961
1962
log.Printf("failed to update pull request status in database: %s", err)
1962
1963
s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
···
1972
1973
return
1973
1974
}
1974
1975
1975
-
s.pages.HxLocation(w, fmt.Sprintf("/@%s/%s/pulls/%d", f.OwnerHandle(), f.RepoName, pull.PullId))
1976
+
s.pages.HxLocation(w, fmt.Sprintf("/@%s/%s/pulls/%d", f.OwnerHandle(), f.Name, pull.PullId))
1976
1977
}
1977
1978
1978
1979
func (s *Pulls) ClosePull(w http.ResponseWriter, r *http.Request) {
···
2024
2025
2025
2026
for _, p := range pullsToClose {
2026
2027
// Close the pull in the database
2027
-
err = db.ClosePull(tx, f.RepoAt, p.PullId)
2028
+
err = db.ClosePull(tx, f.RepoAt(), p.PullId)
2028
2029
if err != nil {
2029
2030
log.Println("failed to close pull", err)
2030
2031
s.pages.Notice(w, "pull-close", "Failed to close pull.")
···
2092
2093
2093
2094
for _, p := range pullsToReopen {
2094
2095
// Close the pull in the database
2095
-
err = db.ReopenPull(tx, f.RepoAt, p.PullId)
2096
+
err = db.ReopenPull(tx, f.RepoAt(), p.PullId)
2096
2097
if err != nil {
2097
2098
log.Println("failed to close pull", err)
2098
2099
s.pages.Notice(w, "pull-close", "Failed to close pull.")
···
2144
2145
Body: body,
2145
2146
TargetBranch: targetBranch,
2146
2147
OwnerDid: user.Did,
2147
-
RepoAt: f.RepoAt,
2148
+
RepoAt: f.RepoAt(),
2148
2149
Rkey: rkey,
2149
2150
Submissions: []*db.PullSubmission{
2150
2151
&initialSubmission,
+6
-6
appview/repo/artifact.go
+6
-6
appview/repo/artifact.go
···
76
76
Artifact: uploadBlobResp.Blob,
77
77
CreatedAt: createdAt.Format(time.RFC3339),
78
78
Name: handler.Filename,
79
-
Repo: f.RepoAt.String(),
79
+
Repo: f.RepoAt().String(),
80
80
Tag: tag.Tag.Hash[:],
81
81
},
82
82
},
···
100
100
artifact := db.Artifact{
101
101
Did: user.Did,
102
102
Rkey: rkey,
103
-
RepoAt: f.RepoAt,
103
+
RepoAt: f.RepoAt(),
104
104
Tag: tag.Tag.Hash,
105
105
CreatedAt: createdAt,
106
106
BlobCid: cid.Cid(uploadBlobResp.Blob.Ref),
···
155
155
156
156
artifacts, err := db.GetArtifact(
157
157
rp.db,
158
-
db.FilterEq("repo_at", f.RepoAt),
158
+
db.FilterEq("repo_at", f.RepoAt()),
159
159
db.FilterEq("tag", tag.Tag.Hash[:]),
160
160
db.FilterEq("name", filename),
161
161
)
···
197
197
198
198
artifacts, err := db.GetArtifact(
199
199
rp.db,
200
-
db.FilterEq("repo_at", f.RepoAt),
200
+
db.FilterEq("repo_at", f.RepoAt()),
201
201
db.FilterEq("tag", tag[:]),
202
202
db.FilterEq("name", filename),
203
203
)
···
239
239
defer tx.Rollback()
240
240
241
241
err = db.DeleteArtifact(tx,
242
-
db.FilterEq("repo_at", f.RepoAt),
242
+
db.FilterEq("repo_at", f.RepoAt()),
243
243
db.FilterEq("tag", artifact.Tag[:]),
244
244
db.FilterEq("name", filename),
245
245
)
···
270
270
return nil, err
271
271
}
272
272
273
-
result, err := us.Tags(f.OwnerDid(), f.RepoName)
273
+
result, err := us.Tags(f.OwnerDid(), f.Name)
274
274
if err != nil {
275
275
log.Println("failed to reach knotserver", err)
276
276
return nil, err
+165
appview/repo/feed.go
+165
appview/repo/feed.go
···
1
+
package repo
2
+
3
+
import (
4
+
"context"
5
+
"fmt"
6
+
"log"
7
+
"net/http"
8
+
"slices"
9
+
"time"
10
+
11
+
"tangled.sh/tangled.sh/core/appview/db"
12
+
"tangled.sh/tangled.sh/core/appview/reporesolver"
13
+
14
+
"github.com/bluesky-social/indigo/atproto/syntax"
15
+
"github.com/gorilla/feeds"
16
+
)
17
+
18
+
func (rp *Repo) getRepoFeed(ctx context.Context, f *reporesolver.ResolvedRepo) (*feeds.Feed, error) {
19
+
const feedLimitPerType = 100
20
+
21
+
pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, db.FilterEq("repo_at", f.RepoAt()))
22
+
if err != nil {
23
+
return nil, err
24
+
}
25
+
26
+
issues, err := db.GetIssuesWithLimit(rp.db, feedLimitPerType, db.FilterEq("repo_at", f.RepoAt()))
27
+
if err != nil {
28
+
return nil, err
29
+
}
30
+
31
+
feed := &feeds.Feed{
32
+
Title: fmt.Sprintf("activity feed for %s", f.OwnerSlashRepo()),
33
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s", rp.config.Core.AppviewHost, f.OwnerSlashRepo()), Type: "text/html", Rel: "alternate"},
34
+
Items: make([]*feeds.Item, 0),
35
+
Updated: time.UnixMilli(0),
36
+
}
37
+
38
+
for _, pull := range pulls {
39
+
items, err := rp.createPullItems(ctx, pull, f)
40
+
if err != nil {
41
+
return nil, err
42
+
}
43
+
feed.Items = append(feed.Items, items...)
44
+
}
45
+
46
+
for _, issue := range issues {
47
+
item, err := rp.createIssueItem(ctx, issue, f)
48
+
if err != nil {
49
+
return nil, err
50
+
}
51
+
feed.Items = append(feed.Items, item)
52
+
}
53
+
54
+
slices.SortFunc(feed.Items, func(a, b *feeds.Item) int {
55
+
if a.Created.After(b.Created) {
56
+
return -1
57
+
}
58
+
return 1
59
+
})
60
+
61
+
if len(feed.Items) > 0 {
62
+
feed.Updated = feed.Items[0].Created
63
+
}
64
+
65
+
return feed, nil
66
+
}
67
+
68
+
func (rp *Repo) createPullItems(ctx context.Context, pull *db.Pull, f *reporesolver.ResolvedRepo) ([]*feeds.Item, error) {
69
+
owner, err := rp.idResolver.ResolveIdent(ctx, pull.OwnerDid)
70
+
if err != nil {
71
+
return nil, err
72
+
}
73
+
74
+
var items []*feeds.Item
75
+
76
+
state := rp.getPullState(pull)
77
+
description := rp.buildPullDescription(owner.Handle, state, pull, f.OwnerSlashRepo())
78
+
79
+
mainItem := &feeds.Item{
80
+
Title: fmt.Sprintf("[PR #%d] %s", pull.PullId, pull.Title),
81
+
Description: description,
82
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId)},
83
+
Created: pull.Created,
84
+
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
85
+
}
86
+
items = append(items, mainItem)
87
+
88
+
for _, round := range pull.Submissions {
89
+
if round == nil || round.RoundNumber == 0 {
90
+
continue
91
+
}
92
+
93
+
roundItem := &feeds.Item{
94
+
Title: fmt.Sprintf("[PR #%d] %s (round #%d)", pull.PullId, pull.Title, round.RoundNumber),
95
+
Description: fmt.Sprintf("@%s submitted changes (at round #%d) on PR #%d in %s", owner.Handle, round.RoundNumber, pull.PullId, f.OwnerSlashRepo()),
96
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d/round/%d/", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId, round.RoundNumber)},
97
+
Created: round.Created,
98
+
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
99
+
}
100
+
items = append(items, roundItem)
101
+
}
102
+
103
+
return items, nil
104
+
}
105
+
106
+
func (rp *Repo) createIssueItem(ctx context.Context, issue db.Issue, f *reporesolver.ResolvedRepo) (*feeds.Item, error) {
107
+
owner, err := rp.idResolver.ResolveIdent(ctx, issue.OwnerDid)
108
+
if err != nil {
109
+
return nil, err
110
+
}
111
+
112
+
state := "closed"
113
+
if issue.Open {
114
+
state = "opened"
115
+
}
116
+
117
+
return &feeds.Item{
118
+
Title: fmt.Sprintf("[Issue #%d] %s", issue.IssueId, issue.Title),
119
+
Description: fmt.Sprintf("@%s %s issue #%d in %s", owner.Handle, state, issue.IssueId, f.OwnerSlashRepo()),
120
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/issues/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), issue.IssueId)},
121
+
Created: issue.Created,
122
+
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
123
+
}, nil
124
+
}
125
+
126
+
func (rp *Repo) getPullState(pull *db.Pull) string {
127
+
if pull.State == db.PullOpen {
128
+
return "opened"
129
+
}
130
+
return pull.State.String()
131
+
}
132
+
133
+
func (rp *Repo) buildPullDescription(handle syntax.Handle, state string, pull *db.Pull, repoName string) string {
134
+
base := fmt.Sprintf("@%s %s pull request #%d", handle, state, pull.PullId)
135
+
136
+
if pull.State == db.PullMerged {
137
+
return fmt.Sprintf("%s (on round #%d) in %s", base, pull.LastRoundNumber(), repoName)
138
+
}
139
+
140
+
return fmt.Sprintf("%s in %s", base, repoName)
141
+
}
142
+
143
+
func (rp *Repo) RepoAtomFeed(w http.ResponseWriter, r *http.Request) {
144
+
f, err := rp.repoResolver.Resolve(r)
145
+
if err != nil {
146
+
log.Println("failed to fully resolve repo:", err)
147
+
return
148
+
}
149
+
150
+
feed, err := rp.getRepoFeed(r.Context(), f)
151
+
if err != nil {
152
+
log.Println("failed to get repo feed:", err)
153
+
rp.pages.Error500(w)
154
+
return
155
+
}
156
+
157
+
atom, err := feed.ToAtom()
158
+
if err != nil {
159
+
rp.pages.Error500(w)
160
+
return
161
+
}
162
+
163
+
w.Header().Set("content-type", "application/atom+xml")
164
+
w.Write([]byte(atom))
165
+
}
+15
-12
appview/repo/index.go
+15
-12
appview/repo/index.go
···
24
24
25
25
func (rp *Repo) RepoIndex(w http.ResponseWriter, r *http.Request) {
26
26
ref := chi.URLParam(r, "ref")
27
+
27
28
f, err := rp.repoResolver.Resolve(r)
28
29
if err != nil {
29
30
log.Println("failed to fully resolve repo", err)
···
37
38
return
38
39
}
39
40
40
-
result, err := us.Index(f.OwnerDid(), f.RepoName, ref)
41
+
result, err := us.Index(f.OwnerDid(), f.Name, ref)
41
42
if err != nil {
42
43
rp.pages.Error503(w)
43
44
log.Println("failed to reach knotserver", err)
···
118
119
119
120
var forkInfo *types.ForkInfo
120
121
if user != nil && (repoInfo.Roles.IsOwner() || repoInfo.Roles.IsCollaborator()) {
121
-
forkInfo, err = getForkInfo(repoInfo, rp, f, user, signedClient)
122
+
forkInfo, err = getForkInfo(repoInfo, rp, f, result.Ref, user, signedClient)
122
123
if err != nil {
123
124
log.Printf("Failed to fetch fork information: %v", err)
124
125
return
···
126
127
}
127
128
128
129
// TODO: a bit dirty
129
-
languageInfo, err := rp.getLanguageInfo(f, signedClient, chi.URLParam(r, "ref") == "")
130
+
languageInfo, err := rp.getLanguageInfo(f, signedClient, result.Ref, ref == "")
130
131
if err != nil {
131
132
log.Printf("failed to compute language percentages: %s", err)
132
133
// non-fatal
···
161
162
func (rp *Repo) getLanguageInfo(
162
163
f *reporesolver.ResolvedRepo,
163
164
signedClient *knotclient.SignedClient,
165
+
currentRef string,
164
166
isDefaultRef bool,
165
167
) ([]types.RepoLanguageDetails, error) {
166
168
// first attempt to fetch from db
167
169
langs, err := db.GetRepoLanguages(
168
170
rp.db,
169
-
db.FilterEq("repo_at", f.RepoAt),
170
-
db.FilterEq("ref", f.Ref),
171
+
db.FilterEq("repo_at", f.RepoAt()),
172
+
db.FilterEq("ref", currentRef),
171
173
)
172
174
173
175
if err != nil || langs == nil {
174
176
// non-fatal, fetch langs from ks
175
-
ls, err := signedClient.RepoLanguages(f.OwnerDid(), f.RepoName, f.Ref)
177
+
ls, err := signedClient.RepoLanguages(f.OwnerDid(), f.Name, currentRef)
176
178
if err != nil {
177
179
return nil, err
178
180
}
···
182
184
183
185
for l, s := range ls.Languages {
184
186
langs = append(langs, db.RepoLanguage{
185
-
RepoAt: f.RepoAt,
186
-
Ref: f.Ref,
187
+
RepoAt: f.RepoAt(),
188
+
Ref: currentRef,
187
189
IsDefaultRef: isDefaultRef,
188
190
Language: l,
189
191
Bytes: s,
···
234
236
repoInfo repoinfo.RepoInfo,
235
237
rp *Repo,
236
238
f *reporesolver.ResolvedRepo,
239
+
currentRef string,
237
240
user *oauth.User,
238
241
signedClient *knotclient.SignedClient,
239
242
) (*types.ForkInfo, error) {
···
264
267
}
265
268
266
269
if !slices.ContainsFunc(result.Branches, func(branch types.Branch) bool {
267
-
return branch.Name == f.Ref
270
+
return branch.Name == currentRef
268
271
}) {
269
272
forkInfo.Status = types.MissingBranch
270
273
return &forkInfo, nil
271
274
}
272
275
273
-
newHiddenRefResp, err := signedClient.NewHiddenRef(user.Did, repoInfo.Name, f.Ref, f.Ref)
276
+
newHiddenRefResp, err := signedClient.NewHiddenRef(user.Did, repoInfo.Name, currentRef, currentRef)
274
277
if err != nil || newHiddenRefResp.StatusCode != http.StatusNoContent {
275
278
log.Printf("failed to update tracking branch: %s", err)
276
279
return nil, err
277
280
}
278
281
279
-
hiddenRef := fmt.Sprintf("hidden/%s/%s", f.Ref, f.Ref)
282
+
hiddenRef := fmt.Sprintf("hidden/%s/%s", currentRef, currentRef)
280
283
281
284
var status types.AncestorCheckResponse
282
-
forkSyncableResp, err := signedClient.RepoForkAheadBehind(user.Did, string(f.RepoAt), repoInfo.Name, f.Ref, hiddenRef)
285
+
forkSyncableResp, err := signedClient.RepoForkAheadBehind(user.Did, string(f.RepoAt()), repoInfo.Name, currentRef, hiddenRef)
283
286
if err != nil {
284
287
log.Printf("failed to check if fork is ahead/behind: %s", err)
285
288
return nil, err
+70
-50
appview/repo/repo.go
+70
-50
appview/repo/repo.go
···
95
95
} else {
96
96
uri = "https"
97
97
}
98
-
url := fmt.Sprintf("%s://%s/%s/%s/archive/%s.tar.gz", uri, f.Knot, f.OwnerDid(), f.RepoName, url.PathEscape(refParam))
98
+
url := fmt.Sprintf("%s://%s/%s/%s/archive/%s.tar.gz", uri, f.Knot, f.OwnerDid(), f.Name, url.PathEscape(refParam))
99
99
100
100
http.Redirect(w, r, url, http.StatusFound)
101
101
}
···
123
123
return
124
124
}
125
125
126
-
repolog, err := us.Log(f.OwnerDid(), f.RepoName, ref, page)
126
+
repolog, err := us.Log(f.OwnerDid(), f.Name, ref, page)
127
127
if err != nil {
128
128
log.Println("failed to reach knotserver", err)
129
129
return
130
130
}
131
131
132
-
tagResult, err := us.Tags(f.OwnerDid(), f.RepoName)
132
+
tagResult, err := us.Tags(f.OwnerDid(), f.Name)
133
133
if err != nil {
134
134
log.Println("failed to reach knotserver", err)
135
135
return
···
144
144
tagMap[hash] = append(tagMap[hash], tag.Name)
145
145
}
146
146
147
-
branchResult, err := us.Branches(f.OwnerDid(), f.RepoName)
147
+
branchResult, err := us.Branches(f.OwnerDid(), f.Name)
148
148
if err != nil {
149
149
log.Println("failed to reach knotserver", err)
150
150
return
···
212
212
return
213
213
}
214
214
215
-
repoAt := f.RepoAt
215
+
repoAt := f.RepoAt()
216
216
rkey := repoAt.RecordKey().String()
217
217
if rkey == "" {
218
218
log.Println("invalid aturi for repo", err)
···
262
262
Record: &lexutil.LexiconTypeDecoder{
263
263
Val: &tangled.Repo{
264
264
Knot: f.Knot,
265
-
Name: f.RepoName,
265
+
Name: f.Name,
266
266
Owner: user.Did,
267
-
CreatedAt: f.CreatedAt,
267
+
CreatedAt: f.Created.Format(time.RFC3339),
268
268
Description: &newDescription,
269
269
Spindle: &f.Spindle,
270
270
},
···
310
310
return
311
311
}
312
312
313
-
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/commit/%s", protocol, f.Knot, f.OwnerDid(), f.RepoName, ref))
313
+
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/commit/%s", protocol, f.Knot, f.OwnerDid(), f.Repo.Name, ref))
314
314
if err != nil {
315
315
log.Println("failed to reach knotserver", err)
316
316
return
···
375
375
if !rp.config.Core.Dev {
376
376
protocol = "https"
377
377
}
378
-
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/tree/%s/%s", protocol, f.Knot, f.OwnerDid(), f.RepoName, ref, treePath))
378
+
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/tree/%s/%s", protocol, f.Knot, f.OwnerDid(), f.Repo.Name, ref, treePath))
379
379
if err != nil {
380
380
log.Println("failed to reach knotserver", err)
381
381
return
···
405
405
user := rp.oauth.GetUser(r)
406
406
407
407
var breadcrumbs [][]string
408
-
breadcrumbs = append(breadcrumbs, []string{f.RepoName, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), ref)})
408
+
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), ref)})
409
409
if treePath != "" {
410
410
for idx, elem := range strings.Split(treePath, "/") {
411
411
breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], elem)})
···
436
436
return
437
437
}
438
438
439
-
result, err := us.Tags(f.OwnerDid(), f.RepoName)
439
+
result, err := us.Tags(f.OwnerDid(), f.Name)
440
440
if err != nil {
441
441
log.Println("failed to reach knotserver", err)
442
442
return
443
443
}
444
444
445
-
artifacts, err := db.GetArtifact(rp.db, db.FilterEq("repo_at", f.RepoAt))
445
+
artifacts, err := db.GetArtifact(rp.db, db.FilterEq("repo_at", f.RepoAt()))
446
446
if err != nil {
447
447
log.Println("failed grab artifacts", err)
448
448
return
···
493
493
return
494
494
}
495
495
496
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
496
+
result, err := us.Branches(f.OwnerDid(), f.Name)
497
497
if err != nil {
498
498
log.Println("failed to reach knotserver", err)
499
499
return
···
522
522
if !rp.config.Core.Dev {
523
523
protocol = "https"
524
524
}
525
-
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/blob/%s/%s", protocol, f.Knot, f.OwnerDid(), f.RepoName, ref, filePath))
525
+
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/blob/%s/%s", protocol, f.Knot, f.OwnerDid(), f.Repo.Name, ref, filePath))
526
526
if err != nil {
527
527
log.Println("failed to reach knotserver", err)
528
528
return
···
542
542
}
543
543
544
544
var breadcrumbs [][]string
545
-
breadcrumbs = append(breadcrumbs, []string{f.RepoName, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), ref)})
545
+
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), ref)})
546
546
if filePath != "" {
547
547
for idx, elem := range strings.Split(filePath, "/") {
548
548
breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], elem)})
···
575
575
576
576
// fetch the actual binary content like in RepoBlobRaw
577
577
578
-
blobURL := fmt.Sprintf("%s://%s/%s/%s/raw/%s/%s", protocol, f.Knot, f.OwnerDid(), f.RepoName, ref, filePath)
578
+
blobURL := fmt.Sprintf("%s://%s/%s/%s/raw/%s/%s", protocol, f.Knot, f.OwnerDid(), f.Name, ref, filePath)
579
579
contentSrc = blobURL
580
580
if !rp.config.Core.Dev {
581
581
contentSrc = markup.GenerateCamoURL(rp.config.Camo.Host, rp.config.Camo.SharedSecret, blobURL)
···
612
612
if !rp.config.Core.Dev {
613
613
protocol = "https"
614
614
}
615
-
blobURL := fmt.Sprintf("%s://%s/%s/%s/raw/%s/%s", protocol, f.Knot, f.OwnerDid(), f.RepoName, ref, filePath)
616
-
resp, err := http.Get(blobURL)
615
+
616
+
blobURL := fmt.Sprintf("%s://%s/%s/%s/raw/%s/%s", protocol, f.Knot, f.OwnerDid(), f.Repo.Name, ref, filePath)
617
+
618
+
req, err := http.NewRequest("GET", blobURL, nil)
617
619
if err != nil {
618
-
log.Println("failed to reach knotserver:", err)
620
+
log.Println("failed to create request", err)
621
+
return
622
+
}
623
+
624
+
// forward the If-None-Match header
625
+
if clientETag := r.Header.Get("If-None-Match"); clientETag != "" {
626
+
req.Header.Set("If-None-Match", clientETag)
627
+
}
628
+
629
+
client := &http.Client{}
630
+
resp, err := client.Do(req)
631
+
if err != nil {
632
+
log.Println("failed to reach knotserver", err)
619
633
rp.pages.Error503(w)
620
634
return
621
635
}
622
636
defer resp.Body.Close()
637
+
638
+
// forward 304 not modified
639
+
if resp.StatusCode == http.StatusNotModified {
640
+
w.WriteHeader(http.StatusNotModified)
641
+
return
642
+
}
623
643
624
644
if resp.StatusCode != http.StatusOK {
625
645
log.Printf("knotserver returned non-OK status for raw blob %s: %d", blobURL, resp.StatusCode)
···
668
688
return
669
689
}
670
690
671
-
repoAt := f.RepoAt
691
+
repoAt := f.RepoAt()
672
692
rkey := repoAt.RecordKey().String()
673
693
if rkey == "" {
674
694
fail("Failed to resolve repo. Try again later", err)
···
722
742
Record: &lexutil.LexiconTypeDecoder{
723
743
Val: &tangled.Repo{
724
744
Knot: f.Knot,
725
-
Name: f.RepoName,
745
+
Name: f.Name,
726
746
Owner: user.Did,
727
-
CreatedAt: f.CreatedAt,
747
+
CreatedAt: f.Created.Format(time.RFC3339),
728
748
Description: &f.Description,
729
749
Spindle: spindlePtr,
730
750
},
···
805
825
Record: &lexutil.LexiconTypeDecoder{
806
826
Val: &tangled.RepoCollaborator{
807
827
Subject: collaboratorIdent.DID.String(),
808
-
Repo: string(f.RepoAt),
828
+
Repo: string(f.RepoAt()),
809
829
CreatedAt: createdAt.Format(time.RFC3339),
810
830
}},
811
831
})
···
830
850
return
831
851
}
832
852
833
-
ksResp, err := ksClient.AddCollaborator(f.OwnerDid(), f.RepoName, collaboratorIdent.DID.String())
853
+
ksResp, err := ksClient.AddCollaborator(f.OwnerDid(), f.Name, collaboratorIdent.DID.String())
834
854
if err != nil {
835
855
fail("Knot was unreachable.", err)
836
856
return
···
864
884
Did: syntax.DID(currentUser.Did),
865
885
Rkey: rkey,
866
886
SubjectDid: collaboratorIdent.DID,
867
-
RepoAt: f.RepoAt,
887
+
RepoAt: f.RepoAt(),
868
888
Created: createdAt,
869
889
})
870
890
if err != nil {
···
902
922
log.Println("failed to get authorized client", err)
903
923
return
904
924
}
905
-
repoRkey := f.RepoAt.RecordKey().String()
906
925
_, err = xrpcClient.RepoDeleteRecord(r.Context(), &comatproto.RepoDeleteRecord_Input{
907
926
Collection: tangled.RepoNSID,
908
927
Repo: user.Did,
909
-
Rkey: repoRkey,
928
+
Rkey: f.Rkey,
910
929
})
911
930
if err != nil {
912
931
log.Printf("failed to delete record: %s", err)
913
932
rp.pages.Notice(w, "settings-delete", "Failed to delete repository from PDS.")
914
933
return
915
934
}
916
-
log.Println("removed repo record ", f.RepoAt.String())
935
+
log.Println("removed repo record ", f.RepoAt().String())
917
936
918
937
secret, err := db.GetRegistrationKey(rp.db, f.Knot)
919
938
if err != nil {
···
927
946
return
928
947
}
929
948
930
-
ksResp, err := ksClient.RemoveRepo(f.OwnerDid(), f.RepoName)
949
+
ksResp, err := ksClient.RemoveRepo(f.OwnerDid(), f.Name)
931
950
if err != nil {
932
951
log.Printf("failed to make request to %s: %s", f.Knot, err)
933
952
return
···
973
992
}
974
993
975
994
// remove repo from db
976
-
err = db.RemoveRepo(tx, f.OwnerDid(), f.RepoName)
995
+
err = db.RemoveRepo(tx, f.OwnerDid(), f.Name)
977
996
if err != nil {
978
997
rp.pages.Notice(w, "settings-delete", "Failed to update appview")
979
998
return
···
1022
1041
return
1023
1042
}
1024
1043
1025
-
ksResp, err := ksClient.SetDefaultBranch(f.OwnerDid(), f.RepoName, branch)
1044
+
ksResp, err := ksClient.SetDefaultBranch(f.OwnerDid(), f.Name, branch)
1026
1045
if err != nil {
1027
1046
log.Printf("failed to make request to %s: %s", f.Knot, err)
1028
1047
return
···
1090
1109
r.Context(),
1091
1110
spindleClient,
1092
1111
&tangled.RepoAddSecret_Input{
1093
-
Repo: f.RepoAt.String(),
1112
+
Repo: f.RepoAt().String(),
1094
1113
Key: key,
1095
1114
Value: value,
1096
1115
},
···
1108
1127
r.Context(),
1109
1128
spindleClient,
1110
1129
&tangled.RepoRemoveSecret_Input{
1111
-
Repo: f.RepoAt.String(),
1130
+
Repo: f.RepoAt().String(),
1112
1131
Key: key,
1113
1132
},
1114
1133
)
···
1170
1189
// return
1171
1190
// }
1172
1191
1173
-
// result, err := us.Branches(f.OwnerDid(), f.RepoName)
1192
+
// result, err := us.Branches(f.OwnerDid(), f.Name)
1174
1193
// if err != nil {
1175
1194
// log.Println("failed to reach knotserver", err)
1176
1195
// return
···
1192
1211
// oauth.WithDev(rp.config.Core.Dev),
1193
1212
// ); err != nil {
1194
1213
// log.Println("failed to create spindle client", err)
1195
-
// } else if resp, err := tangled.RepoListSecrets(r.Context(), spindleClient, f.RepoAt.String()); err != nil {
1214
+
// } else if resp, err := tangled.RepoListSecrets(r.Context(), spindleClient, f.RepoAt().String()); err != nil {
1196
1215
// log.Println("failed to fetch secrets", err)
1197
1216
// } else {
1198
1217
// secrets = resp.Secrets
···
1221
1240
return
1222
1241
}
1223
1242
1224
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
1243
+
result, err := us.Branches(f.OwnerDid(), f.Name)
1225
1244
if err != nil {
1226
1245
log.Println("failed to reach knotserver", err)
1227
1246
return
···
1275
1294
oauth.WithDev(rp.config.Core.Dev),
1276
1295
); err != nil {
1277
1296
log.Println("failed to create spindle client", err)
1278
-
} else if resp, err := tangled.RepoListSecrets(r.Context(), spindleClient, f.RepoAt.String()); err != nil {
1297
+
} else if resp, err := tangled.RepoListSecrets(r.Context(), spindleClient, f.RepoAt().String()); err != nil {
1279
1298
log.Println("failed to fetch secrets", err)
1280
1299
} else {
1281
1300
secrets = resp.Secrets
···
1316
1335
}
1317
1336
1318
1337
func (rp *Repo) SyncRepoFork(w http.ResponseWriter, r *http.Request) {
1338
+
ref := chi.URLParam(r, "ref")
1339
+
1319
1340
user := rp.oauth.GetUser(r)
1320
1341
f, err := rp.repoResolver.Resolve(r)
1321
1342
if err != nil {
···
1343
1364
} else {
1344
1365
uri = "https"
1345
1366
}
1346
-
forkName := fmt.Sprintf("%s", f.RepoName)
1347
-
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.RepoName)
1367
+
forkName := fmt.Sprintf("%s", f.Name)
1368
+
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.Repo.Name)
1348
1369
1349
-
_, err = client.SyncRepoFork(user.Did, forkSourceUrl, forkName, f.Ref)
1370
+
_, err = client.SyncRepoFork(user.Did, forkSourceUrl, forkName, ref)
1350
1371
if err != nil {
1351
1372
rp.pages.Notice(w, "repo", "Failed to sync repository fork.")
1352
1373
return
···
1394
1415
return
1395
1416
}
1396
1417
1397
-
forkName := fmt.Sprintf("%s", f.RepoName)
1418
+
forkName := fmt.Sprintf("%s", f.Name)
1398
1419
1399
1420
// this check is *only* to see if the forked repo name already exists
1400
1421
// in the user's account.
1401
-
existingRepo, err := db.GetRepo(rp.db, user.Did, f.RepoName)
1422
+
existingRepo, err := db.GetRepo(rp.db, user.Did, f.Name)
1402
1423
if err != nil {
1403
1424
if errors.Is(err, sql.ErrNoRows) {
1404
1425
// no existing repo with this name found, we can use the name as is
···
1429
1450
} else {
1430
1451
uri = "https"
1431
1452
}
1432
-
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.RepoName)
1433
-
sourceAt := f.RepoAt.String()
1453
+
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.Repo.Name)
1454
+
sourceAt := f.RepoAt().String()
1434
1455
1435
1456
rkey := tid.TID()
1436
1457
repo := &db.Repo{
···
1499
1520
}
1500
1521
log.Println("created repo record: ", atresp.Uri)
1501
1522
1502
-
repo.AtUri = atresp.Uri
1503
1523
err = db.AddRepo(tx, repo)
1504
1524
if err != nil {
1505
1525
log.Println(err)
···
1550
1570
return
1551
1571
}
1552
1572
1553
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
1573
+
result, err := us.Branches(f.OwnerDid(), f.Name)
1554
1574
if err != nil {
1555
1575
rp.pages.Notice(w, "compare-error", "Failed to produce comparison. Try again later.")
1556
1576
log.Println("failed to reach knotserver", err)
···
1580
1600
head = queryHead
1581
1601
}
1582
1602
1583
-
tags, err := us.Tags(f.OwnerDid(), f.RepoName)
1603
+
tags, err := us.Tags(f.OwnerDid(), f.Name)
1584
1604
if err != nil {
1585
1605
rp.pages.Notice(w, "compare-error", "Failed to produce comparison. Try again later.")
1586
1606
log.Println("failed to reach knotserver", err)
···
1642
1662
return
1643
1663
}
1644
1664
1645
-
branches, err := us.Branches(f.OwnerDid(), f.RepoName)
1665
+
branches, err := us.Branches(f.OwnerDid(), f.Name)
1646
1666
if err != nil {
1647
1667
rp.pages.Notice(w, "compare-error", "Failed to produce comparison. Try again later.")
1648
1668
log.Println("failed to reach knotserver", err)
1649
1669
return
1650
1670
}
1651
1671
1652
-
tags, err := us.Tags(f.OwnerDid(), f.RepoName)
1672
+
tags, err := us.Tags(f.OwnerDid(), f.Name)
1653
1673
if err != nil {
1654
1674
rp.pages.Notice(w, "compare-error", "Failed to produce comparison. Try again later.")
1655
1675
log.Println("failed to reach knotserver", err)
1656
1676
return
1657
1677
}
1658
1678
1659
-
formatPatch, err := us.Compare(f.OwnerDid(), f.RepoName, base, head)
1679
+
formatPatch, err := us.Compare(f.OwnerDid(), f.Name, base, head)
1660
1680
if err != nil {
1661
1681
rp.pages.Notice(w, "compare-error", "Failed to produce comparison. Try again later.")
1662
1682
log.Println("failed to compare", err)
+1
appview/repo/router.go
+1
appview/repo/router.go
+37
-104
appview/reporesolver/resolver.go
+37
-104
appview/reporesolver/resolver.go
···
7
7
"fmt"
8
8
"log"
9
9
"net/http"
10
-
"net/url"
11
10
"path"
11
+
"regexp"
12
12
"strings"
13
13
14
14
"github.com/bluesky-social/indigo/atproto/identity"
15
-
"github.com/bluesky-social/indigo/atproto/syntax"
16
15
securejoin "github.com/cyphar/filepath-securejoin"
17
16
"github.com/go-chi/chi/v5"
18
17
"tangled.sh/tangled.sh/core/appview/config"
···
21
20
"tangled.sh/tangled.sh/core/appview/pages"
22
21
"tangled.sh/tangled.sh/core/appview/pages/repoinfo"
23
22
"tangled.sh/tangled.sh/core/idresolver"
24
-
"tangled.sh/tangled.sh/core/knotclient"
25
23
"tangled.sh/tangled.sh/core/rbac"
26
24
)
27
25
28
26
type ResolvedRepo struct {
29
-
Knot string
30
-
OwnerId identity.Identity
31
-
RepoName string
32
-
RepoAt syntax.ATURI
33
-
Description string
34
-
Spindle string
35
-
CreatedAt string
36
-
Ref string
37
-
CurrentDir string
27
+
db.Repo
28
+
OwnerId identity.Identity
29
+
CurrentDir string
30
+
Ref string
38
31
39
32
rr *RepoResolver
40
33
}
···
51
44
}
52
45
53
46
func (rr *RepoResolver) Resolve(r *http.Request) (*ResolvedRepo, error) {
54
-
repoName := chi.URLParam(r, "repo")
55
-
knot, ok := r.Context().Value("knot").(string)
47
+
repo, ok := r.Context().Value("repo").(*db.Repo)
56
48
if !ok {
57
-
log.Println("malformed middleware")
49
+
log.Println("malformed middleware: `repo` not exist in context")
58
50
return nil, fmt.Errorf("malformed middleware")
59
51
}
60
52
id, ok := r.Context().Value("resolvedId").(identity.Identity)
···
63
55
return nil, fmt.Errorf("malformed middleware")
64
56
}
65
57
66
-
repoAt, ok := r.Context().Value("repoAt").(string)
67
-
if !ok {
68
-
log.Println("malformed middleware")
69
-
return nil, fmt.Errorf("malformed middleware")
70
-
}
71
-
72
-
parsedRepoAt, err := syntax.ParseATURI(repoAt)
73
-
if err != nil {
74
-
log.Println("malformed repo at-uri")
75
-
return nil, fmt.Errorf("malformed middleware")
76
-
}
77
-
58
+
currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath()))
78
59
ref := chi.URLParam(r, "ref")
79
60
80
-
if ref == "" {
81
-
us, err := knotclient.NewUnsignedClient(knot, rr.config.Core.Dev)
82
-
if err != nil {
83
-
return nil, err
84
-
}
85
-
86
-
defaultBranch, err := us.DefaultBranch(id.DID.String(), repoName)
87
-
if err != nil {
88
-
return nil, err
89
-
}
90
-
91
-
ref = defaultBranch.Branch
92
-
}
93
-
94
-
currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath(), ref))
95
-
96
-
// pass through values from the middleware
97
-
description, ok := r.Context().Value("repoDescription").(string)
98
-
addedAt, ok := r.Context().Value("repoAddedAt").(string)
99
-
spindle, ok := r.Context().Value("repoSpindle").(string)
100
-
101
61
return &ResolvedRepo{
102
-
Knot: knot,
103
-
OwnerId: id,
104
-
RepoName: repoName,
105
-
RepoAt: parsedRepoAt,
106
-
Description: description,
107
-
CreatedAt: addedAt,
108
-
Ref: ref,
109
-
CurrentDir: currentDir,
110
-
Spindle: spindle,
62
+
Repo: *repo,
63
+
OwnerId: id,
64
+
CurrentDir: currentDir,
65
+
Ref: ref,
111
66
112
67
rr: rr,
113
68
}, nil
···
126
81
127
82
var p string
128
83
if handle != "" && !handle.IsInvalidHandle() {
129
-
p, _ = securejoin.SecureJoin(fmt.Sprintf("@%s", handle), f.RepoName)
84
+
p, _ = securejoin.SecureJoin(fmt.Sprintf("@%s", handle), f.Name)
130
85
} else {
131
-
p, _ = securejoin.SecureJoin(f.OwnerDid(), f.RepoName)
86
+
p, _ = securejoin.SecureJoin(f.OwnerDid(), f.Name)
132
87
}
133
88
134
-
return p
135
-
}
136
-
137
-
func (f *ResolvedRepo) DidSlashRepo() string {
138
-
p, _ := securejoin.SecureJoin(f.OwnerDid(), f.RepoName)
139
89
return p
140
90
}
141
91
···
187
137
// this function is a bit weird since it now returns RepoInfo from an entirely different
188
138
// package. we should refactor this or get rid of RepoInfo entirely.
189
139
func (f *ResolvedRepo) RepoInfo(user *oauth.User) repoinfo.RepoInfo {
140
+
repoAt := f.RepoAt()
190
141
isStarred := false
191
142
if user != nil {
192
-
isStarred = db.GetStarStatus(f.rr.execer, user.Did, syntax.ATURI(f.RepoAt))
143
+
isStarred = db.GetStarStatus(f.rr.execer, user.Did, repoAt)
193
144
}
194
145
195
-
starCount, err := db.GetStarCount(f.rr.execer, f.RepoAt)
146
+
starCount, err := db.GetStarCount(f.rr.execer, repoAt)
196
147
if err != nil {
197
-
log.Println("failed to get star count for ", f.RepoAt)
148
+
log.Println("failed to get star count for ", repoAt)
198
149
}
199
-
issueCount, err := db.GetIssueCount(f.rr.execer, f.RepoAt)
150
+
issueCount, err := db.GetIssueCount(f.rr.execer, repoAt)
200
151
if err != nil {
201
-
log.Println("failed to get issue count for ", f.RepoAt)
152
+
log.Println("failed to get issue count for ", repoAt)
202
153
}
203
-
pullCount, err := db.GetPullCount(f.rr.execer, f.RepoAt)
154
+
pullCount, err := db.GetPullCount(f.rr.execer, repoAt)
204
155
if err != nil {
205
-
log.Println("failed to get issue count for ", f.RepoAt)
156
+
log.Println("failed to get issue count for ", repoAt)
206
157
}
207
-
source, err := db.GetRepoSource(f.rr.execer, f.RepoAt)
158
+
source, err := db.GetRepoSource(f.rr.execer, repoAt)
208
159
if errors.Is(err, sql.ErrNoRows) {
209
160
source = ""
210
161
} else if err != nil {
211
-
log.Println("failed to get repo source for ", f.RepoAt, err)
162
+
log.Println("failed to get repo source for ", repoAt, err)
212
163
}
213
164
214
165
var sourceRepo *db.Repo
···
228
179
}
229
180
230
181
knot := f.Knot
231
-
var disableFork bool
232
-
us, err := knotclient.NewUnsignedClient(knot, f.rr.config.Core.Dev)
233
-
if err != nil {
234
-
log.Printf("failed to create unsigned client for %s: %v", knot, err)
235
-
} else {
236
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
237
-
if err != nil {
238
-
log.Printf("failed to get branches for %s/%s: %v", f.OwnerDid(), f.RepoName, err)
239
-
}
240
-
241
-
if len(result.Branches) == 0 {
242
-
disableFork = true
243
-
}
244
-
}
245
182
246
183
repoInfo := repoinfo.RepoInfo{
247
184
OwnerDid: f.OwnerDid(),
248
185
OwnerHandle: f.OwnerHandle(),
249
-
Name: f.RepoName,
250
-
RepoAt: f.RepoAt,
186
+
Name: f.Name,
187
+
RepoAt: repoAt,
251
188
Description: f.Description,
252
-
Ref: f.Ref,
253
189
IsStarred: isStarred,
254
190
Knot: knot,
255
191
Spindle: f.Spindle,
···
259
195
IssueCount: issueCount,
260
196
PullCount: pullCount,
261
197
},
262
-
DisableFork: disableFork,
263
-
CurrentDir: f.CurrentDir,
198
+
CurrentDir: f.CurrentDir,
199
+
Ref: f.Ref,
264
200
}
265
201
266
202
if sourceRepo != nil {
···
284
220
// after the ref. for example:
285
221
//
286
222
// /@icyphox.sh/foorepo/blob/main/abc/xyz/ => abc/xyz/
287
-
func extractPathAfterRef(fullPath, ref string) string {
223
+
func extractPathAfterRef(fullPath string) string {
288
224
fullPath = strings.TrimPrefix(fullPath, "/")
289
225
290
-
ref = url.PathEscape(ref)
226
+
// match blob/, tree/, or raw/ followed by any ref and then a slash
227
+
//
228
+
// captures everything after the final slash
229
+
pattern := `(?:blob|tree|raw)/[^/]+/(.*)$`
291
230
292
-
prefixes := []string{
293
-
fmt.Sprintf("blob/%s/", ref),
294
-
fmt.Sprintf("tree/%s/", ref),
295
-
fmt.Sprintf("raw/%s/", ref),
296
-
}
231
+
re := regexp.MustCompile(pattern)
232
+
matches := re.FindStringSubmatch(fullPath)
297
233
298
-
for _, prefix := range prefixes {
299
-
idx := strings.Index(fullPath, prefix)
300
-
if idx != -1 {
301
-
return fullPath[idx+len(prefix):]
302
-
}
234
+
if len(matches) > 1 {
235
+
return matches[1]
303
236
}
304
237
305
238
return ""
+9
-12
appview/state/git_http.go
+9
-12
appview/state/git_http.go
···
3
3
import (
4
4
"fmt"
5
5
"io"
6
+
"maps"
6
7
"net/http"
7
8
8
9
"github.com/bluesky-social/indigo/atproto/identity"
9
10
"github.com/go-chi/chi/v5"
11
+
"tangled.sh/tangled.sh/core/appview/db"
10
12
)
11
13
12
14
func (s *State) InfoRefs(w http.ResponseWriter, r *http.Request) {
13
15
user := r.Context().Value("resolvedId").(identity.Identity)
14
-
knot := r.Context().Value("knot").(string)
15
-
repo := chi.URLParam(r, "repo")
16
+
repo := r.Context().Value("repo").(*db.Repo)
16
17
17
18
scheme := "https"
18
19
if s.config.Core.Dev {
19
20
scheme = "http"
20
21
}
21
22
22
-
targetURL := fmt.Sprintf("%s://%s/%s/%s/info/refs?%s", scheme, knot, user.DID, repo, r.URL.RawQuery)
23
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/info/refs?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
23
24
s.proxyRequest(w, r, targetURL)
24
25
25
26
}
···
30
31
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
31
32
return
32
33
}
33
-
knot := r.Context().Value("knot").(string)
34
-
repo := chi.URLParam(r, "repo")
34
+
repo := r.Context().Value("repo").(*db.Repo)
35
35
36
36
scheme := "https"
37
37
if s.config.Core.Dev {
38
38
scheme = "http"
39
39
}
40
40
41
-
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-pack?%s", scheme, knot, user.DID, repo, r.URL.RawQuery)
41
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-pack?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
42
42
s.proxyRequest(w, r, targetURL)
43
43
}
44
44
···
48
48
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
49
49
return
50
50
}
51
-
knot := r.Context().Value("knot").(string)
52
-
repo := chi.URLParam(r, "repo")
51
+
repo := r.Context().Value("repo").(*db.Repo)
53
52
54
53
scheme := "https"
55
54
if s.config.Core.Dev {
56
55
scheme = "http"
57
56
}
58
57
59
-
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-receive-pack?%s", scheme, knot, user.DID, repo, r.URL.RawQuery)
58
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-receive-pack?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
60
59
s.proxyRequest(w, r, targetURL)
61
60
}
62
61
···
85
84
defer resp.Body.Close()
86
85
87
86
// Copy response headers
88
-
for k, v := range resp.Header {
89
-
w.Header()[k] = v
90
-
}
87
+
maps.Copy(w.Header(), resp.Header)
91
88
92
89
// Set response status code
93
90
w.WriteHeader(resp.StatusCode)
+95
-82
appview/state/profile.go
+95
-82
appview/state/profile.go
···
89
89
log.Printf("failed to create profile timeline for %s: %s", ident.DID.String(), err)
90
90
}
91
91
92
-
var didsToResolve []string
93
-
for _, r := range collaboratingRepos {
94
-
didsToResolve = append(didsToResolve, r.Did)
95
-
}
96
-
for _, byMonth := range timeline.ByMonth {
97
-
for _, pe := range byMonth.PullEvents.Items {
98
-
didsToResolve = append(didsToResolve, pe.Repo.Did)
99
-
}
100
-
for _, ie := range byMonth.IssueEvents.Items {
101
-
didsToResolve = append(didsToResolve, ie.Metadata.Repo.Did)
102
-
}
103
-
for _, re := range byMonth.RepoEvents {
104
-
didsToResolve = append(didsToResolve, re.Repo.Did)
105
-
if re.Source != nil {
106
-
didsToResolve = append(didsToResolve, re.Source.Did)
107
-
}
108
-
}
109
-
}
110
-
111
92
followers, following, err := db.GetFollowerFollowingCount(s.db, ident.DID.String())
112
93
if err != nil {
113
94
log.Printf("getting follow stats repos for %s: %s", ident.DID.String(), err)
···
194
175
})
195
176
}
196
177
197
-
func (s *State) feedFromRequest(w http.ResponseWriter, r *http.Request) *feeds.Feed {
178
+
func (s *State) AtomFeedPage(w http.ResponseWriter, r *http.Request) {
198
179
ident, ok := r.Context().Value("resolvedId").(identity.Identity)
199
180
if !ok {
200
181
s.pages.Error404(w)
201
-
return nil
182
+
return
202
183
}
203
184
204
-
feed, err := s.GetProfileFeed(r.Context(), ident.Handle.String(), ident.DID.String())
185
+
feed, err := s.getProfileFeed(r.Context(), &ident)
205
186
if err != nil {
206
187
s.pages.Error500(w)
207
-
return nil
188
+
return
208
189
}
209
190
210
-
return feed
211
-
}
212
-
213
-
func (s *State) AtomFeedPage(w http.ResponseWriter, r *http.Request) {
214
-
feed := s.feedFromRequest(w, r)
215
191
if feed == nil {
216
192
return
217
193
}
···
226
202
w.Write([]byte(atom))
227
203
}
228
204
229
-
func (s *State) GetProfileFeed(ctx context.Context, handle string, did string) (*feeds.Feed, error) {
230
-
timeline, err := db.MakeProfileTimeline(s.db, did)
205
+
func (s *State) getProfileFeed(ctx context.Context, id *identity.Identity) (*feeds.Feed, error) {
206
+
timeline, err := db.MakeProfileTimeline(s.db, id.DID.String())
231
207
if err != nil {
232
208
return nil, err
233
209
}
234
210
235
211
author := &feeds.Author{
236
-
Name: fmt.Sprintf("@%s", handle),
212
+
Name: fmt.Sprintf("@%s", id.Handle),
237
213
}
238
-
feed := &feeds.Feed{
239
-
Title: fmt.Sprintf("timeline feed for %s", author.Name),
240
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s", s.config.Core.AppviewHost, handle), Type: "text/html", Rel: "alternate"},
214
+
215
+
feed := feeds.Feed{
216
+
Title: fmt.Sprintf("%s's timeline", author.Name),
217
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s", s.config.Core.AppviewHost, id.Handle), Type: "text/html", Rel: "alternate"},
241
218
Items: make([]*feeds.Item, 0),
242
219
Updated: time.UnixMilli(0),
243
220
Author: author,
244
221
}
222
+
245
223
for _, byMonth := range timeline.ByMonth {
246
-
for _, pull := range byMonth.PullEvents.Items {
247
-
owner, err := s.idResolver.ResolveIdent(ctx, pull.Repo.Did)
248
-
if err != nil {
249
-
return nil, err
250
-
}
251
-
feed.Items = append(feed.Items, &feeds.Item{
252
-
Title: fmt.Sprintf("%s created pull request '%s' in @%s/%s", author.Name, pull.Title, owner.Handle, pull.Repo.Name),
253
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s/pulls/%d", s.config.Core.AppviewHost, owner.Handle, pull.Repo.Name, pull.PullId), Type: "text/html", Rel: "alternate"},
254
-
Created: pull.Created,
255
-
Author: author,
256
-
})
257
-
for _, submission := range pull.Submissions {
258
-
feed.Items = append(feed.Items, &feeds.Item{
259
-
Title: fmt.Sprintf("%s submitted pull request '%s' (round #%d) in @%s/%s", author.Name, pull.Title, submission.RoundNumber, owner.Handle, pull.Repo.Name),
260
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s/pulls/%d", s.config.Core.AppviewHost, owner.Handle, pull.Repo.Name, pull.PullId), Type: "text/html", Rel: "alternate"},
261
-
Created: submission.Created,
262
-
Author: author,
263
-
})
264
-
}
224
+
if err := s.addPullRequestItems(ctx, &feed, byMonth.PullEvents.Items, author); err != nil {
225
+
return nil, err
265
226
}
266
-
for _, issue := range byMonth.IssueEvents.Items {
267
-
owner, err := s.idResolver.ResolveIdent(ctx, issue.Metadata.Repo.Did)
268
-
if err != nil {
269
-
return nil, err
270
-
}
271
-
feed.Items = append(feed.Items, &feeds.Item{
272
-
Title: fmt.Sprintf("%s created issue '%s' in @%s/%s", author.Name, issue.Title, owner.Handle, issue.Metadata.Repo.Name),
273
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s/issues/%d", s.config.Core.AppviewHost, owner.Handle, issue.Metadata.Repo.Name, issue.IssueId), Type: "text/html", Rel: "alternate"},
274
-
Created: issue.Created,
275
-
Author: author,
276
-
})
227
+
if err := s.addIssueItems(ctx, &feed, byMonth.IssueEvents.Items, author); err != nil {
228
+
return nil, err
277
229
}
278
-
for _, repo := range byMonth.RepoEvents {
279
-
var title string
280
-
if repo.Source != nil {
281
-
id, err := s.idResolver.ResolveIdent(ctx, repo.Source.Did)
282
-
if err != nil {
283
-
return nil, err
284
-
}
285
-
title = fmt.Sprintf("%s forked repository @%s/%s to '%s'", author.Name, id.Handle, repo.Source.Name, repo.Repo.Name)
286
-
} else {
287
-
title = fmt.Sprintf("%s created repository '%s'", author.Name, repo.Repo.Name)
288
-
}
289
-
feed.Items = append(feed.Items, &feeds.Item{
290
-
Title: title,
291
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s", s.config.Core.AppviewHost, handle, repo.Repo.Name), Type: "text/html", Rel: "alternate"},
292
-
Created: repo.Repo.Created,
293
-
Author: author,
294
-
})
230
+
if err := s.addRepoItems(ctx, &feed, byMonth.RepoEvents, author); err != nil {
231
+
return nil, err
295
232
}
296
233
}
234
+
297
235
slices.SortFunc(feed.Items, func(a *feeds.Item, b *feeds.Item) int {
298
236
return int(b.Created.UnixMilli()) - int(a.Created.UnixMilli())
299
237
})
238
+
300
239
if len(feed.Items) > 0 {
301
240
feed.Updated = feed.Items[0].Created
302
241
}
303
242
304
-
return feed, nil
243
+
return &feed, nil
244
+
}
245
+
246
+
func (s *State) addPullRequestItems(ctx context.Context, feed *feeds.Feed, pulls []*db.Pull, author *feeds.Author) error {
247
+
for _, pull := range pulls {
248
+
owner, err := s.idResolver.ResolveIdent(ctx, pull.Repo.Did)
249
+
if err != nil {
250
+
return err
251
+
}
252
+
253
+
// Add pull request creation item
254
+
feed.Items = append(feed.Items, s.createPullRequestItem(pull, owner, author))
255
+
}
256
+
return nil
257
+
}
258
+
259
+
func (s *State) addIssueItems(ctx context.Context, feed *feeds.Feed, issues []*db.Issue, author *feeds.Author) error {
260
+
for _, issue := range issues {
261
+
owner, err := s.idResolver.ResolveIdent(ctx, issue.Metadata.Repo.Did)
262
+
if err != nil {
263
+
return err
264
+
}
265
+
266
+
feed.Items = append(feed.Items, s.createIssueItem(issue, owner, author))
267
+
}
268
+
return nil
269
+
}
270
+
271
+
func (s *State) addRepoItems(ctx context.Context, feed *feeds.Feed, repos []db.RepoEvent, author *feeds.Author) error {
272
+
for _, repo := range repos {
273
+
item, err := s.createRepoItem(ctx, repo, author)
274
+
if err != nil {
275
+
return err
276
+
}
277
+
feed.Items = append(feed.Items, item)
278
+
}
279
+
return nil
280
+
}
281
+
282
+
func (s *State) createPullRequestItem(pull *db.Pull, owner *identity.Identity, author *feeds.Author) *feeds.Item {
283
+
return &feeds.Item{
284
+
Title: fmt.Sprintf("%s created pull request '%s' in @%s/%s", author.Name, pull.Title, owner.Handle, pull.Repo.Name),
285
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s/pulls/%d", s.config.Core.AppviewHost, owner.Handle, pull.Repo.Name, pull.PullId), Type: "text/html", Rel: "alternate"},
286
+
Created: pull.Created,
287
+
Author: author,
288
+
}
289
+
}
290
+
291
+
func (s *State) createIssueItem(issue *db.Issue, owner *identity.Identity, author *feeds.Author) *feeds.Item {
292
+
return &feeds.Item{
293
+
Title: fmt.Sprintf("%s created issue '%s' in @%s/%s", author.Name, issue.Title, owner.Handle, issue.Metadata.Repo.Name),
294
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s/issues/%d", s.config.Core.AppviewHost, owner.Handle, issue.Metadata.Repo.Name, issue.IssueId), Type: "text/html", Rel: "alternate"},
295
+
Created: issue.Created,
296
+
Author: author,
297
+
}
298
+
}
299
+
300
+
func (s *State) createRepoItem(ctx context.Context, repo db.RepoEvent, author *feeds.Author) (*feeds.Item, error) {
301
+
var title string
302
+
if repo.Source != nil {
303
+
sourceOwner, err := s.idResolver.ResolveIdent(ctx, repo.Source.Did)
304
+
if err != nil {
305
+
return nil, err
306
+
}
307
+
title = fmt.Sprintf("%s forked repository @%s/%s to '%s'", author.Name, sourceOwner.Handle, repo.Source.Name, repo.Repo.Name)
308
+
} else {
309
+
title = fmt.Sprintf("%s created repository '%s'", author.Name, repo.Repo.Name)
310
+
}
311
+
312
+
return &feeds.Item{
313
+
Title: title,
314
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s", s.config.Core.AppviewHost, author.Name[1:], repo.Repo.Name), Type: "text/html", Rel: "alternate"}, // Remove @ prefix
315
+
Created: repo.Repo.Created,
316
+
Author: author,
317
+
}, nil
305
318
}
306
319
307
320
func (s *State) UpdateProfileBio(w http.ResponseWriter, r *http.Request) {
+11
-3
appview/state/router.go
+11
-3
appview/state/router.go
···
35
35
router.Get("/favicon.svg", s.Favicon)
36
36
router.Get("/favicon.ico", s.Favicon)
37
37
38
+
userRouter := s.UserRouter(&middleware)
39
+
standardRouter := s.StandardRouter(&middleware)
40
+
38
41
router.HandleFunc("/*", func(w http.ResponseWriter, r *http.Request) {
39
42
pat := chi.URLParam(r, "*")
40
43
if strings.HasPrefix(pat, "did:") || strings.HasPrefix(pat, "@") {
41
-
s.UserRouter(&middleware).ServeHTTP(w, r)
44
+
userRouter.ServeHTTP(w, r)
42
45
} else {
43
46
// Check if the first path element is a valid handle without '@' or a flattened DID
44
47
pathParts := strings.SplitN(pat, "/", 2)
···
61
64
return
62
65
}
63
66
}
64
-
s.StandardRouter(&middleware).ServeHTTP(w, r)
67
+
standardRouter.ServeHTTP(w, r)
65
68
}
66
69
})
67
70
···
75
78
r.Get("/", s.Profile)
76
79
r.Get("/feed.atom", s.AtomFeedPage)
77
80
81
+
// redirect /@handle/repo.git -> /@handle/repo
82
+
r.Get("/{repo}.git", func(w http.ResponseWriter, r *http.Request) {
83
+
nonDotGitPath := strings.TrimSuffix(r.URL.Path, ".git")
84
+
http.Redirect(w, r, nonDotGitPath, http.StatusMovedPermanently)
85
+
})
86
+
78
87
r.With(mw.ResolveRepo()).Route("/{repo}", func(r chi.Router) {
79
88
r.Use(mw.GoImport())
80
-
81
89
r.Mount("/", s.RepoRouter(mw))
82
90
r.Mount("/issues", s.IssuesRouter(mw))
83
91
r.Mount("/pulls", s.PullsRouter(mw))
+16
-1
appview/state/state.go
+16
-1
appview/state/state.go
···
94
94
tangled.SpindleMemberNSID,
95
95
tangled.SpindleNSID,
96
96
tangled.StringNSID,
97
+
tangled.RepoIssueNSID,
98
+
tangled.RepoIssueCommentNSID,
97
99
},
98
100
nil,
99
101
slog.Default(),
···
191
193
if err != nil {
192
194
log.Println(err)
193
195
s.pages.Notice(w, "timeline", "Uh oh! Failed to load timeline.")
196
+
}
197
+
198
+
repos, err := db.GetTopStarredReposLastWeek(s.db)
199
+
if err != nil {
200
+
log.Println(err)
201
+
s.pages.Notice(w, "topstarredrepos", "Unable to load.")
202
+
return
194
203
}
195
204
196
205
s.pages.Timeline(w, pages.TimelineParams{
197
206
LoggedInUser: user,
198
207
Timeline: timeline,
208
+
Repos: repos,
199
209
})
200
210
}
201
211
···
263
273
return nil
264
274
}
265
275
276
+
func stripGitExt(name string) string {
277
+
return strings.TrimSuffix(name, ".git")
278
+
}
279
+
266
280
func (s *State) NewRepo(w http.ResponseWriter, r *http.Request) {
267
281
switch r.Method {
268
282
case http.MethodGet:
···
297
311
s.pages.Notice(w, "repo", err.Error())
298
312
return
299
313
}
314
+
315
+
repoName = stripGitExt(repoName)
300
316
301
317
defaultBranch := r.FormValue("branch")
302
318
if defaultBranch == "" {
···
394
410
// continue
395
411
}
396
412
397
-
repo.AtUri = atresp.Uri
398
413
err = db.AddRepo(tx, repo)
399
414
if err != nil {
400
415
log.Println(err)
+23
-12
appview/strings/strings.go
+23
-12
appview/strings/strings.go
···
7
7
"path"
8
8
"slices"
9
9
"strconv"
10
-
"strings"
11
10
"time"
12
11
13
12
"tangled.sh/tangled.sh/core/api/tangled"
···
44
43
r := chi.NewRouter()
45
44
46
45
r.
46
+
Get("/", s.timeline)
47
+
48
+
r.
47
49
With(mw.ResolveIdent()).
48
50
Route("/{user}", func(r chi.Router) {
49
51
r.Get("/", s.dashboard)
···
70
72
return r
71
73
}
72
74
75
+
func (s *Strings) timeline(w http.ResponseWriter, r *http.Request) {
76
+
l := s.Logger.With("handler", "timeline")
77
+
78
+
strings, err := db.GetStrings(s.Db, 50)
79
+
if err != nil {
80
+
l.Error("failed to fetch string", "err", err)
81
+
w.WriteHeader(http.StatusInternalServerError)
82
+
return
83
+
}
84
+
85
+
s.Pages.StringsTimeline(w, pages.StringTimelineParams{
86
+
LoggedInUser: s.OAuth.GetUser(r),
87
+
Strings: strings,
88
+
})
89
+
}
90
+
73
91
func (s *Strings) contents(w http.ResponseWriter, r *http.Request) {
74
92
l := s.Logger.With("handler", "contents")
75
93
···
91
109
92
110
strings, err := db.GetStrings(
93
111
s.Db,
112
+
0,
94
113
db.FilterEq("did", id.DID),
95
114
db.FilterEq("rkey", rkey),
96
115
)
···
154
173
155
174
all, err := db.GetStrings(
156
175
s.Db,
176
+
0,
157
177
db.FilterEq("did", id.DID),
158
178
)
159
179
if err != nil {
···
225
245
// get the string currently being edited
226
246
all, err := db.GetStrings(
227
247
s.Db,
248
+
0,
228
249
db.FilterEq("did", id.DID),
229
250
db.FilterEq("rkey", rkey),
230
251
)
···
266
287
fail("Empty filename.", nil)
267
288
return
268
289
}
269
-
if !strings.Contains(filename, ".") {
270
-
// TODO: make this a htmx form validation
271
-
fail("No extension provided for filename.", nil)
272
-
return
273
-
}
274
290
275
291
content := r.FormValue("content")
276
292
if content == "" {
···
353
369
fail("Empty filename.", nil)
354
370
return
355
371
}
356
-
if !strings.Contains(filename, ".") {
357
-
// TODO: make this a htmx form validation
358
-
fail("No extension provided for filename.", nil)
359
-
return
360
-
}
361
372
362
373
content := r.FormValue("content")
363
374
if content == "" {
···
434
445
}
435
446
436
447
if user.Did != id.DID.String() {
437
-
fail("You cannot delete this gist", fmt.Errorf("unauthorized deletion, %s != %s", user.Did, id.DID.String()))
448
+
fail("You cannot delete this string", fmt.Errorf("unauthorized deletion, %s != %s", user.Did, id.DID.String()))
438
449
return
439
450
}
440
451
-2
cmd/gen.go
-2
cmd/gen.go
···
27
27
tangled.KnotMember{},
28
28
tangled.Pipeline{},
29
29
tangled.Pipeline_CloneOpts{},
30
-
tangled.Pipeline_Dependency{},
31
30
tangled.Pipeline_ManualTriggerData{},
32
31
tangled.Pipeline_Pair{},
33
32
tangled.Pipeline_PullRequestTriggerData{},
34
33
tangled.Pipeline_PushTriggerData{},
35
34
tangled.PipelineStatus{},
36
-
tangled.Pipeline_Step{},
37
35
tangled.Pipeline_TriggerMetadata{},
38
36
tangled.Pipeline_TriggerRepo{},
39
37
tangled.Pipeline_Workflow{},
+26
-3
docs/spindle/pipeline.md
+26
-3
docs/spindle/pipeline.md
···
4
4
repo. Generally:
5
5
6
6
* Pipelines are defined in YAML.
7
-
* Dependencies can be specified from
8
-
[Nixpkgs](https://search.nixos.org) or custom registries.
9
-
* Environment variables can be set globally or per-step.
7
+
* Workflows can run using different *engines*.
8
+
9
+
The most barebones workflow looks like this:
10
+
11
+
```yaml
12
+
when:
13
+
- event: ["push"]
14
+
branch: ["main"]
15
+
16
+
engine: "nixery"
17
+
18
+
# optional
19
+
clone:
20
+
skip: false
21
+
depth: 50
22
+
submodules: true
23
+
```
24
+
25
+
The `when` and `engine` fields are required, while every other aspect
26
+
of how the definition is parsed is up to the engine. Currently, a spindle
27
+
provides at least one of these built-in engines:
28
+
29
+
## `nixery`
30
+
31
+
The Nixery engine uses an instance of [Nixery](https://nixery.dev) to run
32
+
steps that use dependencies from [Nixpkgs](https://github.com/NixOS/nixpkgs).
10
33
11
34
Here's an example that uses all fields:
12
35
+1
-2
input.css
+1
-2
input.css
···
78
78
@supports (font-variation-settings: normal) {
79
79
html {
80
80
font-feature-settings:
81
-
"ss01" 1,
82
81
"kern" 1,
83
82
"liga" 1,
84
83
"cv05" 1,
···
104
103
}
105
104
106
105
code {
107
-
@apply font-mono rounded bg-gray-100 dark:bg-gray-700;
106
+
@apply font-mono rounded bg-gray-100 dark:bg-gray-700 text-black dark:text-white;
108
107
}
109
108
}
110
109
+8
-10
knotserver/git/fork.go
+8
-10
knotserver/git/fork.go
···
10
10
)
11
11
12
12
func Fork(repoPath, source string) error {
13
-
_, err := git.PlainClone(repoPath, true, &git.CloneOptions{
14
-
URL: source,
15
-
SingleBranch: false,
16
-
})
17
-
18
-
if err != nil {
13
+
cloneCmd := exec.Command("git", "clone", "--bare", source, repoPath)
14
+
if err := cloneCmd.Run(); err != nil {
19
15
return fmt.Errorf("failed to bare clone repository: %w", err)
20
16
}
21
17
22
-
err = exec.Command("git", "-C", repoPath, "config", "receive.hideRefs", "refs/hidden").Run()
23
-
if err != nil {
18
+
configureCmd := exec.Command("git", "-C", repoPath, "config", "receive.hideRefs", "refs/hidden")
19
+
if err := configureCmd.Run(); err != nil {
24
20
return fmt.Errorf("failed to configure hidden refs: %w", err)
25
21
}
26
22
27
23
return nil
28
24
}
29
25
30
-
func (g *GitRepo) Sync(branch string) error {
26
+
func (g *GitRepo) Sync() error {
27
+
branch := g.h.String()
28
+
31
29
fetchOpts := &git.FetchOptions{
32
30
RefSpecs: []config.RefSpec{
33
-
config.RefSpec(fmt.Sprintf("+refs/heads/%s:refs/heads/%s", branch, branch)),
31
+
config.RefSpec("+" + branch + ":" + branch), // +refs/heads/master:refs/heads/master
34
32
},
35
33
}
36
34
+1
knotserver/git.go
+1
knotserver/git.go
···
129
129
// If the appview gave us the repository owner's handle we can attempt to
130
130
// construct the correct ssh url.
131
131
ownerHandle := r.Header.Get("x-tangled-repo-owner-handle")
132
+
ownerHandle = strings.TrimPrefix(ownerHandle, "@")
132
133
if ownerHandle != "" && !strings.ContainsAny(ownerHandle, ":") {
133
134
hostname := d.c.Server.Hostname
134
135
if strings.Contains(hostname, ":") {
+2
-2
knotserver/handler.go
+2
-2
knotserver/handler.go
···
142
142
r.Delete("/", h.RemoveRepo)
143
143
r.Route("/fork", func(r chi.Router) {
144
144
r.Post("/", h.RepoFork)
145
-
r.Post("/sync/{branch}", h.RepoForkSync)
146
-
r.Get("/sync/{branch}", h.RepoForkAheadBehind)
145
+
r.Post("/sync/*", h.RepoForkSync)
146
+
r.Get("/sync/*", h.RepoForkAheadBehind)
147
147
})
148
148
})
149
149
+4
-4
knotserver/internal.go
+4
-4
knotserver/internal.go
···
242
242
return err
243
243
}
244
244
245
+
for _, e := range compiler.Diagnostics.Errors {
246
+
*clientMsgs = append(*clientMsgs, e.String())
247
+
}
248
+
245
249
if pushOptions.verboseCi {
246
250
if compiler.Diagnostics.IsEmpty() {
247
251
*clientMsgs = append(*clientMsgs, "success: pipeline compiled with no diagnostics")
248
-
}
249
-
250
-
for _, e := range compiler.Diagnostics.Errors {
251
-
*clientMsgs = append(*clientMsgs, e.String())
252
252
}
253
253
254
254
for _, w := range compiler.Diagnostics.Warnings {
+16
-11
knotserver/routes.go
+16
-11
knotserver/routes.go
···
286
286
mimeType = "image/svg+xml"
287
287
}
288
288
289
+
contentHash := sha256.Sum256(contents)
290
+
eTag := fmt.Sprintf("\"%x\"", contentHash)
291
+
289
292
// allow image, video, and text/plain files to be served directly
290
293
switch {
291
-
case strings.HasPrefix(mimeType, "image/"):
292
-
// allowed
293
-
case strings.HasPrefix(mimeType, "video/"):
294
-
// allowed
294
+
case strings.HasPrefix(mimeType, "image/"), strings.HasPrefix(mimeType, "video/"):
295
+
if clientETag := r.Header.Get("If-None-Match"); clientETag == eTag {
296
+
w.WriteHeader(http.StatusNotModified)
297
+
return
298
+
}
299
+
w.Header().Set("ETag", eTag)
300
+
295
301
case strings.HasPrefix(mimeType, "text/plain"):
296
-
// allowed
302
+
w.Header().Set("Cache-Control", "public, no-cache")
303
+
297
304
default:
298
305
l.Error("attempted to serve disallowed file type", "mimetype", mimeType)
299
306
writeError(w, "only image, video, and text files can be accessed directly", http.StatusForbidden)
300
307
return
301
308
}
302
309
303
-
w.Header().Set("Cache-Control", "public, max-age=86400") // cache for 24 hours
304
-
w.Header().Set("ETag", fmt.Sprintf("%x", sha256.Sum256(contents)))
305
310
w.Header().Set("Content-Type", mimeType)
306
311
w.Write(contents)
307
312
}
···
710
715
}
711
716
712
717
func (h *Handle) RepoForkAheadBehind(w http.ResponseWriter, r *http.Request) {
713
-
l := h.l.With("handler", "RepoForkSync")
718
+
l := h.l.With("handler", "RepoForkAheadBehind")
714
719
715
720
data := struct {
716
721
Did string `json:"did"`
···
845
850
name = filepath.Base(source)
846
851
}
847
852
848
-
branch := chi.URLParam(r, "branch")
853
+
branch := chi.URLParam(r, "*")
849
854
branch, _ = url.PathUnescape(branch)
850
855
851
856
relativeRepoPath := filepath.Join(did, name)
852
857
repoPath, _ := securejoin.SecureJoin(h.c.Repo.ScanPath, relativeRepoPath)
853
858
854
-
gr, err := git.PlainOpen(repoPath)
859
+
gr, err := git.Open(repoPath, branch)
855
860
if err != nil {
856
861
log.Println(err)
857
862
notFound(w)
858
863
return
859
864
}
860
865
861
-
err = gr.Sync(branch)
866
+
err = gr.Sync()
862
867
if err != nil {
863
868
l.Error("error syncing repo fork", "error", err.Error())
864
869
writeError(w, err.Error(), http.StatusInternalServerError)
+1
-8
lexicons/issue/comment.json
+1
-8
lexicons/issue/comment.json
···
9
9
"key": "tid",
10
10
"record": {
11
11
"type": "object",
12
-
"required": [
13
-
"issue",
14
-
"body",
15
-
"createdAt"
16
-
],
12
+
"required": ["issue", "body", "createdAt"],
17
13
"properties": {
18
14
"issue": {
19
15
"type": "string",
···
22
18
"repo": {
23
19
"type": "string",
24
20
"format": "at-uri"
25
-
},
26
-
"commentId": {
27
-
"type": "integer"
28
21
},
29
22
"owner": {
30
23
"type": "string",
+1
-10
lexicons/issue/issue.json
+1
-10
lexicons/issue/issue.json
···
9
9
"key": "tid",
10
10
"record": {
11
11
"type": "object",
12
-
"required": [
13
-
"repo",
14
-
"issueId",
15
-
"owner",
16
-
"title",
17
-
"createdAt"
18
-
],
12
+
"required": ["repo", "owner", "title", "createdAt"],
19
13
"properties": {
20
14
"repo": {
21
15
"type": "string",
22
16
"format": "at-uri"
23
-
},
24
-
"issueId": {
25
-
"type": "integer"
26
17
},
27
18
"owner": {
28
19
"type": "string",
+7
-63
lexicons/pipeline/pipeline.json
+7
-63
lexicons/pipeline/pipeline.json
···
149
149
"type": "object",
150
150
"required": [
151
151
"name",
152
-
"dependencies",
153
-
"steps",
154
-
"environment",
155
-
"clone"
152
+
"engine",
153
+
"clone",
154
+
"raw"
156
155
],
157
156
"properties": {
158
157
"name": {
159
158
"type": "string"
160
159
},
161
-
"dependencies": {
162
-
"type": "array",
163
-
"items": {
164
-
"type": "ref",
165
-
"ref": "#dependency"
166
-
}
167
-
},
168
-
"steps": {
169
-
"type": "array",
170
-
"items": {
171
-
"type": "ref",
172
-
"ref": "#step"
173
-
}
174
-
},
175
-
"environment": {
176
-
"type": "array",
177
-
"items": {
178
-
"type": "ref",
179
-
"ref": "#pair"
180
-
}
160
+
"engine": {
161
+
"type": "string"
181
162
},
182
163
"clone": {
183
164
"type": "ref",
184
165
"ref": "#cloneOpts"
185
-
}
186
-
}
187
-
},
188
-
"dependency": {
189
-
"type": "object",
190
-
"required": [
191
-
"registry",
192
-
"packages"
193
-
],
194
-
"properties": {
195
-
"registry": {
166
+
},
167
+
"raw": {
196
168
"type": "string"
197
-
},
198
-
"packages": {
199
-
"type": "array",
200
-
"items": {
201
-
"type": "string"
202
-
}
203
169
}
204
170
}
205
171
},
···
219
185
},
220
186
"submodules": {
221
187
"type": "boolean"
222
-
}
223
-
}
224
-
},
225
-
"step": {
226
-
"type": "object",
227
-
"required": [
228
-
"name",
229
-
"command"
230
-
],
231
-
"properties": {
232
-
"name": {
233
-
"type": "string"
234
-
},
235
-
"command": {
236
-
"type": "string"
237
-
},
238
-
"environment": {
239
-
"type": "array",
240
-
"items": {
241
-
"type": "ref",
242
-
"ref": "#pair"
243
-
}
244
188
}
245
189
}
246
190
},
+2
-2
nix/modules/spindle.nix
+2
-2
nix/modules/spindle.nix
···
111
111
"SPINDLE_SERVER_SECRETS_PROVIDER=${cfg.server.secrets.provider}"
112
112
"SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=${cfg.server.secrets.openbao.proxyAddr}"
113
113
"SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=${cfg.server.secrets.openbao.mount}"
114
-
"SPINDLE_PIPELINES_NIXERY=${cfg.pipelines.nixery}"
115
-
"SPINDLE_PIPELINES_WORKFLOW_TIMEOUT=${cfg.pipelines.workflowTimeout}"
114
+
"SPINDLE_NIXERY_PIPELINES_NIXERY=${cfg.pipelines.nixery}"
115
+
"SPINDLE_NIXERY_PIPELINES_WORKFLOW_TIMEOUT=${cfg.pipelines.workflowTimeout}"
116
116
];
117
117
ExecStart = "${cfg.package}/bin/spindle";
118
118
Restart = "always";
+4
-4
spindle/config/config.go
+4
-4
spindle/config/config.go
···
16
16
Dev bool `env:"DEV, default=false"`
17
17
Owner string `env:"OWNER, required"`
18
18
Secrets Secrets `env:",prefix=SECRETS_"`
19
+
LogDir string `env:"LOG_DIR, default=/var/log/spindle"`
19
20
}
20
21
21
22
func (s Server) Did() syntax.DID {
···
32
33
Mount string `env:"MOUNT, default=spindle"`
33
34
}
34
35
35
-
type Pipelines struct {
36
+
type NixeryPipelines struct {
36
37
Nixery string `env:"NIXERY, default=nixery.tangled.sh"`
37
38
WorkflowTimeout string `env:"WORKFLOW_TIMEOUT, default=5m"`
38
-
LogDir string `env:"LOG_DIR, default=/var/log/spindle"`
39
39
}
40
40
41
41
type Config struct {
42
-
Server Server `env:",prefix=SPINDLE_SERVER_"`
43
-
Pipelines Pipelines `env:",prefix=SPINDLE_PIPELINES_"`
42
+
Server Server `env:",prefix=SPINDLE_SERVER_"`
43
+
NixeryPipelines NixeryPipelines `env:",prefix=SPINDLE_NIXERY_PIPELINES_"`
44
44
}
45
45
46
46
func Load(ctx context.Context) (*Config, error) {
-21
spindle/engine/ansi_stripper.go
-21
spindle/engine/ansi_stripper.go
···
1
-
package engine
2
-
3
-
import (
4
-
"io"
5
-
6
-
"regexp"
7
-
)
8
-
9
-
// regex to match ANSI escape codes (e.g., color codes, cursor moves)
10
-
const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))"
11
-
12
-
var re = regexp.MustCompile(ansi)
13
-
14
-
type ansiStrippingWriter struct {
15
-
underlying io.Writer
16
-
}
17
-
18
-
func (w *ansiStrippingWriter) Write(p []byte) (int, error) {
19
-
clean := re.ReplaceAll(p, []byte{})
20
-
return w.underlying.Write(clean)
21
-
}
+68
-415
spindle/engine/engine.go
+68
-415
spindle/engine/engine.go
···
4
4
"context"
5
5
"errors"
6
6
"fmt"
7
-
"io"
8
7
"log/slog"
9
-
"os"
10
-
"strings"
11
-
"sync"
12
-
"time"
13
8
14
9
securejoin "github.com/cyphar/filepath-securejoin"
15
-
"github.com/docker/docker/api/types/container"
16
-
"github.com/docker/docker/api/types/image"
17
-
"github.com/docker/docker/api/types/mount"
18
-
"github.com/docker/docker/api/types/network"
19
-
"github.com/docker/docker/api/types/volume"
20
-
"github.com/docker/docker/client"
21
-
"github.com/docker/docker/pkg/stdcopy"
22
10
"golang.org/x/sync/errgroup"
23
-
"tangled.sh/tangled.sh/core/log"
24
11
"tangled.sh/tangled.sh/core/notifier"
25
12
"tangled.sh/tangled.sh/core/spindle/config"
26
13
"tangled.sh/tangled.sh/core/spindle/db"
···
28
15
"tangled.sh/tangled.sh/core/spindle/secrets"
29
16
)
30
17
31
-
const (
32
-
workspaceDir = "/tangled/workspace"
18
+
var (
19
+
ErrTimedOut = errors.New("timed out")
20
+
ErrWorkflowFailed = errors.New("workflow failed")
33
21
)
34
22
35
-
type cleanupFunc func(context.Context) error
36
-
37
-
type Engine struct {
38
-
docker client.APIClient
39
-
l *slog.Logger
40
-
db *db.DB
41
-
n *notifier.Notifier
42
-
cfg *config.Config
43
-
vault secrets.Manager
44
-
45
-
cleanupMu sync.Mutex
46
-
cleanup map[string][]cleanupFunc
47
-
}
48
-
49
-
func New(ctx context.Context, cfg *config.Config, db *db.DB, n *notifier.Notifier, vault secrets.Manager) (*Engine, error) {
50
-
dcli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
51
-
if err != nil {
52
-
return nil, err
53
-
}
54
-
55
-
l := log.FromContext(ctx).With("component", "spindle")
56
-
57
-
e := &Engine{
58
-
docker: dcli,
59
-
l: l,
60
-
db: db,
61
-
n: n,
62
-
cfg: cfg,
63
-
vault: vault,
64
-
}
65
-
66
-
e.cleanup = make(map[string][]cleanupFunc)
67
-
68
-
return e, nil
69
-
}
70
-
71
-
func (e *Engine) StartWorkflows(ctx context.Context, pipeline *models.Pipeline, pipelineId models.PipelineId) {
72
-
e.l.Info("starting all workflows in parallel", "pipeline", pipelineId)
23
+
func StartWorkflows(l *slog.Logger, vault secrets.Manager, cfg *config.Config, db *db.DB, n *notifier.Notifier, ctx context.Context, pipeline *models.Pipeline, pipelineId models.PipelineId) {
24
+
l.Info("starting all workflows in parallel", "pipeline", pipelineId)
73
25
74
26
// extract secrets
75
27
var allSecrets []secrets.UnlockedSecret
76
28
if didSlashRepo, err := securejoin.SecureJoin(pipeline.RepoOwner, pipeline.RepoName); err == nil {
77
-
if res, err := e.vault.GetSecretsUnlocked(ctx, secrets.DidSlashRepo(didSlashRepo)); err == nil {
29
+
if res, err := vault.GetSecretsUnlocked(ctx, secrets.DidSlashRepo(didSlashRepo)); err == nil {
78
30
allSecrets = res
79
31
}
80
32
}
81
33
82
-
workflowTimeoutStr := e.cfg.Pipelines.WorkflowTimeout
83
-
workflowTimeout, err := time.ParseDuration(workflowTimeoutStr)
84
-
if err != nil {
85
-
e.l.Error("failed to parse workflow timeout", "error", err, "timeout", workflowTimeoutStr)
86
-
workflowTimeout = 5 * time.Minute
87
-
}
88
-
e.l.Info("using workflow timeout", "timeout", workflowTimeout)
89
-
90
34
eg, ctx := errgroup.WithContext(ctx)
91
-
for _, w := range pipeline.Workflows {
92
-
eg.Go(func() error {
93
-
wid := models.WorkflowId{
94
-
PipelineId: pipelineId,
95
-
Name: w.Name,
96
-
}
97
-
98
-
err := e.db.StatusRunning(wid, e.n)
99
-
if err != nil {
100
-
return err
101
-
}
35
+
for eng, wfs := range pipeline.Workflows {
36
+
workflowTimeout := eng.WorkflowTimeout()
37
+
l.Info("using workflow timeout", "timeout", workflowTimeout)
102
38
103
-
err = e.SetupWorkflow(ctx, wid)
104
-
if err != nil {
105
-
e.l.Error("setting up worklow", "wid", wid, "err", err)
106
-
return err
107
-
}
108
-
defer e.DestroyWorkflow(ctx, wid)
109
-
110
-
reader, err := e.docker.ImagePull(ctx, w.Image, image.PullOptions{})
111
-
if err != nil {
112
-
e.l.Error("pipeline image pull failed!", "image", w.Image, "workflowId", wid, "error", err.Error())
39
+
for _, w := range wfs {
40
+
eg.Go(func() error {
41
+
wid := models.WorkflowId{
42
+
PipelineId: pipelineId,
43
+
Name: w.Name,
44
+
}
113
45
114
-
err := e.db.StatusFailed(wid, err.Error(), -1, e.n)
46
+
err := db.StatusRunning(wid, n)
115
47
if err != nil {
116
48
return err
117
49
}
118
50
119
-
return fmt.Errorf("pulling image: %w", err)
120
-
}
121
-
defer reader.Close()
122
-
io.Copy(os.Stdout, reader)
123
-
124
-
ctx, cancel := context.WithTimeout(ctx, workflowTimeout)
125
-
defer cancel()
51
+
err = eng.SetupWorkflow(ctx, wid, &w)
52
+
if err != nil {
53
+
// TODO(winter): Should this always set StatusFailed?
54
+
// In the original, we only do in a subset of cases.
55
+
l.Error("setting up worklow", "wid", wid, "err", err)
126
56
127
-
err = e.StartSteps(ctx, wid, w, allSecrets)
128
-
if err != nil {
129
-
if errors.Is(err, ErrTimedOut) {
130
-
dbErr := e.db.StatusTimeout(wid, e.n)
131
-
if dbErr != nil {
132
-
return dbErr
57
+
destroyErr := eng.DestroyWorkflow(ctx, wid)
58
+
if destroyErr != nil {
59
+
l.Error("failed to destroy workflow after setup failure", "error", destroyErr)
133
60
}
134
-
} else {
135
-
dbErr := e.db.StatusFailed(wid, err.Error(), -1, e.n)
61
+
62
+
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
136
63
if dbErr != nil {
137
64
return dbErr
138
65
}
66
+
return err
139
67
}
68
+
defer eng.DestroyWorkflow(ctx, wid)
140
69
141
-
return fmt.Errorf("starting steps image: %w", err)
142
-
}
70
+
wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid)
71
+
if err != nil {
72
+
l.Warn("failed to setup step logger; logs will not be persisted", "error", err)
73
+
wfLogger = nil
74
+
} else {
75
+
defer wfLogger.Close()
76
+
}
143
77
144
-
err = e.db.StatusSuccess(wid, e.n)
145
-
if err != nil {
146
-
return err
147
-
}
78
+
ctx, cancel := context.WithTimeout(ctx, workflowTimeout)
79
+
defer cancel()
148
80
149
-
return nil
150
-
})
151
-
}
81
+
for stepIdx, step := range w.Steps {
82
+
if wfLogger != nil {
83
+
ctl := wfLogger.ControlWriter(stepIdx, step)
84
+
ctl.Write([]byte(step.Name()))
85
+
}
152
86
153
-
if err = eg.Wait(); err != nil {
154
-
e.l.Error("failed to run one or more workflows", "err", err)
155
-
} else {
156
-
e.l.Error("successfully ran full pipeline")
157
-
}
158
-
}
87
+
err = eng.RunStep(ctx, wid, &w, stepIdx, allSecrets, wfLogger)
88
+
if err != nil {
89
+
if errors.Is(err, ErrTimedOut) {
90
+
dbErr := db.StatusTimeout(wid, n)
91
+
if dbErr != nil {
92
+
return dbErr
93
+
}
94
+
} else {
95
+
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
96
+
if dbErr != nil {
97
+
return dbErr
98
+
}
99
+
}
159
100
160
-
// SetupWorkflow sets up a new network for the workflow and volumes for
161
-
// the workspace and Nix store. These are persisted across steps and are
162
-
// destroyed at the end of the workflow.
163
-
func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId) error {
164
-
e.l.Info("setting up workflow", "workflow", wid)
101
+
return fmt.Errorf("starting steps image: %w", err)
102
+
}
103
+
}
165
104
166
-
_, err := e.docker.VolumeCreate(ctx, volume.CreateOptions{
167
-
Name: workspaceVolume(wid),
168
-
Driver: "local",
169
-
})
170
-
if err != nil {
171
-
return err
172
-
}
173
-
e.registerCleanup(wid, func(ctx context.Context) error {
174
-
return e.docker.VolumeRemove(ctx, workspaceVolume(wid), true)
175
-
})
176
-
177
-
_, err = e.docker.VolumeCreate(ctx, volume.CreateOptions{
178
-
Name: nixVolume(wid),
179
-
Driver: "local",
180
-
})
181
-
if err != nil {
182
-
return err
183
-
}
184
-
e.registerCleanup(wid, func(ctx context.Context) error {
185
-
return e.docker.VolumeRemove(ctx, nixVolume(wid), true)
186
-
})
187
-
188
-
_, err = e.docker.NetworkCreate(ctx, networkName(wid), network.CreateOptions{
189
-
Driver: "bridge",
190
-
})
191
-
if err != nil {
192
-
return err
193
-
}
194
-
e.registerCleanup(wid, func(ctx context.Context) error {
195
-
return e.docker.NetworkRemove(ctx, networkName(wid))
196
-
})
105
+
err = db.StatusSuccess(wid, n)
106
+
if err != nil {
107
+
return err
108
+
}
197
109
198
-
return nil
199
-
}
200
-
201
-
// StartSteps starts all steps sequentially with the same base image.
202
-
// ONLY marks pipeline as failed if container's exit code is non-zero.
203
-
// All other errors are bubbled up.
204
-
// Fixed version of the step execution logic
205
-
func (e *Engine) StartSteps(ctx context.Context, wid models.WorkflowId, w models.Workflow, secrets []secrets.UnlockedSecret) error {
206
-
workflowEnvs := ConstructEnvs(w.Environment)
207
-
for _, s := range secrets {
208
-
workflowEnvs.AddEnv(s.Key, s.Value)
209
-
}
210
-
211
-
for stepIdx, step := range w.Steps {
212
-
select {
213
-
case <-ctx.Done():
214
-
return ctx.Err()
215
-
default:
216
-
}
217
-
218
-
envs := append(EnvVars(nil), workflowEnvs...)
219
-
for k, v := range step.Environment {
220
-
envs.AddEnv(k, v)
221
-
}
222
-
envs.AddEnv("HOME", workspaceDir)
223
-
e.l.Debug("envs for step", "step", step.Name, "envs", envs.Slice())
224
-
225
-
hostConfig := hostConfig(wid)
226
-
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
227
-
Image: w.Image,
228
-
Cmd: []string{"bash", "-c", step.Command},
229
-
WorkingDir: workspaceDir,
230
-
Tty: false,
231
-
Hostname: "spindle",
232
-
Env: envs.Slice(),
233
-
}, hostConfig, nil, nil, "")
234
-
defer e.DestroyStep(ctx, resp.ID)
235
-
if err != nil {
236
-
return fmt.Errorf("creating container: %w", err)
237
-
}
238
-
239
-
err = e.docker.NetworkConnect(ctx, networkName(wid), resp.ID, nil)
240
-
if err != nil {
241
-
return fmt.Errorf("connecting network: %w", err)
242
-
}
243
-
244
-
err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{})
245
-
if err != nil {
246
-
return err
247
-
}
248
-
e.l.Info("started container", "name", resp.ID, "step", step.Name)
249
-
250
-
// start tailing logs in background
251
-
tailDone := make(chan error, 1)
252
-
go func() {
253
-
tailDone <- e.TailStep(ctx, resp.ID, wid, stepIdx, step)
254
-
}()
255
-
256
-
// wait for container completion or timeout
257
-
waitDone := make(chan struct{})
258
-
var state *container.State
259
-
var waitErr error
260
-
261
-
go func() {
262
-
defer close(waitDone)
263
-
state, waitErr = e.WaitStep(ctx, resp.ID)
264
-
}()
265
-
266
-
select {
267
-
case <-waitDone:
268
-
269
-
// wait for tailing to complete
270
-
<-tailDone
271
-
272
-
case <-ctx.Done():
273
-
e.l.Warn("step timed out; killing container", "container", resp.ID, "step", step.Name)
274
-
err = e.DestroyStep(context.Background(), resp.ID)
275
-
if err != nil {
276
-
e.l.Error("failed to destroy step", "container", resp.ID, "error", err)
277
-
}
278
-
279
-
// wait for both goroutines to finish
280
-
<-waitDone
281
-
<-tailDone
282
-
283
-
return ErrTimedOut
284
-
}
285
-
286
-
select {
287
-
case <-ctx.Done():
288
-
return ctx.Err()
289
-
default:
290
-
}
291
-
292
-
if waitErr != nil {
293
-
return waitErr
294
-
}
295
-
296
-
err = e.DestroyStep(ctx, resp.ID)
297
-
if err != nil {
298
-
return err
299
-
}
300
-
301
-
if state.ExitCode != 0 {
302
-
e.l.Error("workflow failed!", "workflow_id", wid.String(), "error", state.Error, "exit_code", state.ExitCode, "oom_killed", state.OOMKilled)
303
-
if state.OOMKilled {
304
-
return ErrOOMKilled
305
-
}
306
-
return ErrWorkflowFailed
110
+
return nil
111
+
})
307
112
}
308
113
}
309
114
310
-
return nil
311
-
}
312
-
313
-
func (e *Engine) WaitStep(ctx context.Context, containerID string) (*container.State, error) {
314
-
wait, errCh := e.docker.ContainerWait(ctx, containerID, container.WaitConditionNotRunning)
315
-
select {
316
-
case err := <-errCh:
317
-
if err != nil {
318
-
return nil, err
319
-
}
320
-
case <-wait:
321
-
}
322
-
323
-
e.l.Info("waited for container", "name", containerID)
324
-
325
-
info, err := e.docker.ContainerInspect(ctx, containerID)
326
-
if err != nil {
327
-
return nil, err
328
-
}
329
-
330
-
return info.State, nil
331
-
}
332
-
333
-
func (e *Engine) TailStep(ctx context.Context, containerID string, wid models.WorkflowId, stepIdx int, step models.Step) error {
334
-
wfLogger, err := NewWorkflowLogger(e.cfg.Pipelines.LogDir, wid)
335
-
if err != nil {
336
-
e.l.Warn("failed to setup step logger; logs will not be persisted", "error", err)
337
-
return err
115
+
if err := eg.Wait(); err != nil {
116
+
l.Error("failed to run one or more workflows", "err", err)
117
+
} else {
118
+
l.Error("successfully ran full pipeline")
338
119
}
339
-
defer wfLogger.Close()
340
-
341
-
ctl := wfLogger.ControlWriter(stepIdx, step)
342
-
ctl.Write([]byte(step.Name))
343
-
344
-
logs, err := e.docker.ContainerLogs(ctx, containerID, container.LogsOptions{
345
-
Follow: true,
346
-
ShowStdout: true,
347
-
ShowStderr: true,
348
-
Details: false,
349
-
Timestamps: false,
350
-
})
351
-
if err != nil {
352
-
return err
353
-
}
354
-
355
-
_, err = stdcopy.StdCopy(
356
-
wfLogger.DataWriter("stdout"),
357
-
wfLogger.DataWriter("stderr"),
358
-
logs,
359
-
)
360
-
if err != nil && err != io.EOF && !errors.Is(err, context.DeadlineExceeded) {
361
-
return fmt.Errorf("failed to copy logs: %w", err)
362
-
}
363
-
364
-
return nil
365
-
}
366
-
367
-
func (e *Engine) DestroyStep(ctx context.Context, containerID string) error {
368
-
err := e.docker.ContainerKill(ctx, containerID, "9") // SIGKILL
369
-
if err != nil && !isErrContainerNotFoundOrNotRunning(err) {
370
-
return err
371
-
}
372
-
373
-
if err := e.docker.ContainerRemove(ctx, containerID, container.RemoveOptions{
374
-
RemoveVolumes: true,
375
-
RemoveLinks: false,
376
-
Force: false,
377
-
}); err != nil && !isErrContainerNotFoundOrNotRunning(err) {
378
-
return err
379
-
}
380
-
381
-
return nil
382
-
}
383
-
384
-
func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error {
385
-
e.cleanupMu.Lock()
386
-
key := wid.String()
387
-
388
-
fns := e.cleanup[key]
389
-
delete(e.cleanup, key)
390
-
e.cleanupMu.Unlock()
391
-
392
-
for _, fn := range fns {
393
-
if err := fn(ctx); err != nil {
394
-
e.l.Error("failed to cleanup workflow resource", "workflowId", wid, "error", err)
395
-
}
396
-
}
397
-
return nil
398
-
}
399
-
400
-
func (e *Engine) registerCleanup(wid models.WorkflowId, fn cleanupFunc) {
401
-
e.cleanupMu.Lock()
402
-
defer e.cleanupMu.Unlock()
403
-
404
-
key := wid.String()
405
-
e.cleanup[key] = append(e.cleanup[key], fn)
406
-
}
407
-
408
-
func workspaceVolume(wid models.WorkflowId) string {
409
-
return fmt.Sprintf("workspace-%s", wid)
410
-
}
411
-
412
-
func nixVolume(wid models.WorkflowId) string {
413
-
return fmt.Sprintf("nix-%s", wid)
414
-
}
415
-
416
-
func networkName(wid models.WorkflowId) string {
417
-
return fmt.Sprintf("workflow-network-%s", wid)
418
-
}
419
-
420
-
func hostConfig(wid models.WorkflowId) *container.HostConfig {
421
-
hostConfig := &container.HostConfig{
422
-
Mounts: []mount.Mount{
423
-
{
424
-
Type: mount.TypeVolume,
425
-
Source: workspaceVolume(wid),
426
-
Target: workspaceDir,
427
-
},
428
-
{
429
-
Type: mount.TypeVolume,
430
-
Source: nixVolume(wid),
431
-
Target: "/nix",
432
-
},
433
-
{
434
-
Type: mount.TypeTmpfs,
435
-
Target: "/tmp",
436
-
ReadOnly: false,
437
-
TmpfsOptions: &mount.TmpfsOptions{
438
-
Mode: 0o1777, // world-writeable sticky bit
439
-
Options: [][]string{
440
-
{"exec"},
441
-
},
442
-
},
443
-
},
444
-
{
445
-
Type: mount.TypeVolume,
446
-
Source: "etc-nix-" + wid.String(),
447
-
Target: "/etc/nix",
448
-
},
449
-
},
450
-
ReadonlyRootfs: false,
451
-
CapDrop: []string{"ALL"},
452
-
CapAdd: []string{"CAP_DAC_OVERRIDE"},
453
-
SecurityOpt: []string{"no-new-privileges"},
454
-
ExtraHosts: []string{"host.docker.internal:host-gateway"},
455
-
}
456
-
457
-
return hostConfig
458
-
}
459
-
460
-
// thanks woodpecker
461
-
func isErrContainerNotFoundOrNotRunning(err error) bool {
462
-
// Error response from daemon: Cannot kill container: ...: No such container: ...
463
-
// Error response from daemon: Cannot kill container: ...: Container ... is not running"
464
-
// Error response from podman daemon: can only kill running containers. ... is in state exited
465
-
// Error: No such container: ...
466
-
return err != nil && (strings.Contains(err.Error(), "No such container") || strings.Contains(err.Error(), "is not running") || strings.Contains(err.Error(), "can only kill running containers"))
467
120
}
-28
spindle/engine/envs.go
-28
spindle/engine/envs.go
···
1
-
package engine
2
-
3
-
import (
4
-
"fmt"
5
-
)
6
-
7
-
type EnvVars []string
8
-
9
-
// ConstructEnvs converts a tangled.Pipeline_Step_Environment_Elem.{Key,Value}
10
-
// representation into a docker-friendly []string{"KEY=value", ...} slice.
11
-
func ConstructEnvs(envs map[string]string) EnvVars {
12
-
var dockerEnvs EnvVars
13
-
for k, v := range envs {
14
-
ev := fmt.Sprintf("%s=%s", k, v)
15
-
dockerEnvs = append(dockerEnvs, ev)
16
-
}
17
-
return dockerEnvs
18
-
}
19
-
20
-
// Slice returns the EnvVar as a []string slice.
21
-
func (ev EnvVars) Slice() []string {
22
-
return ev
23
-
}
24
-
25
-
// AddEnv adds a key=value string to the EnvVar.
26
-
func (ev *EnvVars) AddEnv(key, value string) {
27
-
*ev = append(*ev, fmt.Sprintf("%s=%s", key, value))
28
-
}
-48
spindle/engine/envs_test.go
-48
spindle/engine/envs_test.go
···
1
-
package engine
2
-
3
-
import (
4
-
"testing"
5
-
6
-
"github.com/stretchr/testify/assert"
7
-
)
8
-
9
-
func TestConstructEnvs(t *testing.T) {
10
-
tests := []struct {
11
-
name string
12
-
in map[string]string
13
-
want EnvVars
14
-
}{
15
-
{
16
-
name: "empty input",
17
-
in: make(map[string]string),
18
-
want: EnvVars{},
19
-
},
20
-
{
21
-
name: "single env var",
22
-
in: map[string]string{"FOO": "bar"},
23
-
want: EnvVars{"FOO=bar"},
24
-
},
25
-
{
26
-
name: "multiple env vars",
27
-
in: map[string]string{"FOO": "bar", "BAZ": "qux"},
28
-
want: EnvVars{"FOO=bar", "BAZ=qux"},
29
-
},
30
-
}
31
-
for _, tt := range tests {
32
-
t.Run(tt.name, func(t *testing.T) {
33
-
got := ConstructEnvs(tt.in)
34
-
if got == nil {
35
-
got = EnvVars{}
36
-
}
37
-
assert.ElementsMatch(t, tt.want, got)
38
-
})
39
-
}
40
-
}
41
-
42
-
func TestAddEnv(t *testing.T) {
43
-
ev := EnvVars{}
44
-
ev.AddEnv("FOO", "bar")
45
-
ev.AddEnv("BAZ", "qux")
46
-
want := EnvVars{"FOO=bar", "BAZ=qux"}
47
-
assert.ElementsMatch(t, want, ev)
48
-
}
-9
spindle/engine/errors.go
-9
spindle/engine/errors.go
-84
spindle/engine/logger.go
-84
spindle/engine/logger.go
···
1
-
package engine
2
-
3
-
import (
4
-
"encoding/json"
5
-
"fmt"
6
-
"io"
7
-
"os"
8
-
"path/filepath"
9
-
"strings"
10
-
11
-
"tangled.sh/tangled.sh/core/spindle/models"
12
-
)
13
-
14
-
type WorkflowLogger struct {
15
-
file *os.File
16
-
encoder *json.Encoder
17
-
}
18
-
19
-
func NewWorkflowLogger(baseDir string, wid models.WorkflowId) (*WorkflowLogger, error) {
20
-
path := LogFilePath(baseDir, wid)
21
-
22
-
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
23
-
if err != nil {
24
-
return nil, fmt.Errorf("creating log file: %w", err)
25
-
}
26
-
27
-
return &WorkflowLogger{
28
-
file: file,
29
-
encoder: json.NewEncoder(file),
30
-
}, nil
31
-
}
32
-
33
-
func LogFilePath(baseDir string, workflowID models.WorkflowId) string {
34
-
logFilePath := filepath.Join(baseDir, fmt.Sprintf("%s.log", workflowID.String()))
35
-
return logFilePath
36
-
}
37
-
38
-
func (l *WorkflowLogger) Close() error {
39
-
return l.file.Close()
40
-
}
41
-
42
-
func (l *WorkflowLogger) DataWriter(stream string) io.Writer {
43
-
// TODO: emit stream
44
-
return &dataWriter{
45
-
logger: l,
46
-
stream: stream,
47
-
}
48
-
}
49
-
50
-
func (l *WorkflowLogger) ControlWriter(idx int, step models.Step) io.Writer {
51
-
return &controlWriter{
52
-
logger: l,
53
-
idx: idx,
54
-
step: step,
55
-
}
56
-
}
57
-
58
-
type dataWriter struct {
59
-
logger *WorkflowLogger
60
-
stream string
61
-
}
62
-
63
-
func (w *dataWriter) Write(p []byte) (int, error) {
64
-
line := strings.TrimRight(string(p), "\r\n")
65
-
entry := models.NewDataLogLine(line, w.stream)
66
-
if err := w.logger.encoder.Encode(entry); err != nil {
67
-
return 0, err
68
-
}
69
-
return len(p), nil
70
-
}
71
-
72
-
type controlWriter struct {
73
-
logger *WorkflowLogger
74
-
idx int
75
-
step models.Step
76
-
}
77
-
78
-
func (w *controlWriter) Write(_ []byte) (int, error) {
79
-
entry := models.NewControlLogLine(w.idx, w.step)
80
-
if err := w.logger.encoder.Encode(entry); err != nil {
81
-
return 0, err
82
-
}
83
-
return len(w.step.Name), nil
84
-
}
+21
spindle/engines/nixery/ansi_stripper.go
+21
spindle/engines/nixery/ansi_stripper.go
···
1
+
package nixery
2
+
3
+
import (
4
+
"io"
5
+
6
+
"regexp"
7
+
)
8
+
9
+
// regex to match ANSI escape codes (e.g., color codes, cursor moves)
10
+
const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))"
11
+
12
+
var re = regexp.MustCompile(ansi)
13
+
14
+
type ansiStrippingWriter struct {
15
+
underlying io.Writer
16
+
}
17
+
18
+
func (w *ansiStrippingWriter) Write(p []byte) (int, error) {
19
+
clean := re.ReplaceAll(p, []byte{})
20
+
return w.underlying.Write(clean)
21
+
}
+418
spindle/engines/nixery/engine.go
+418
spindle/engines/nixery/engine.go
···
1
+
package nixery
2
+
3
+
import (
4
+
"context"
5
+
"errors"
6
+
"fmt"
7
+
"io"
8
+
"log/slog"
9
+
"os"
10
+
"path"
11
+
"runtime"
12
+
"sync"
13
+
"time"
14
+
15
+
"github.com/docker/docker/api/types/container"
16
+
"github.com/docker/docker/api/types/image"
17
+
"github.com/docker/docker/api/types/mount"
18
+
"github.com/docker/docker/api/types/network"
19
+
"github.com/docker/docker/client"
20
+
"github.com/docker/docker/pkg/stdcopy"
21
+
"gopkg.in/yaml.v3"
22
+
"tangled.sh/tangled.sh/core/api/tangled"
23
+
"tangled.sh/tangled.sh/core/log"
24
+
"tangled.sh/tangled.sh/core/spindle/config"
25
+
"tangled.sh/tangled.sh/core/spindle/engine"
26
+
"tangled.sh/tangled.sh/core/spindle/models"
27
+
"tangled.sh/tangled.sh/core/spindle/secrets"
28
+
)
29
+
30
+
const (
31
+
workspaceDir = "/tangled/workspace"
32
+
homeDir = "/tangled/home"
33
+
)
34
+
35
+
type cleanupFunc func(context.Context) error
36
+
37
+
type Engine struct {
38
+
docker client.APIClient
39
+
l *slog.Logger
40
+
cfg *config.Config
41
+
42
+
cleanupMu sync.Mutex
43
+
cleanup map[string][]cleanupFunc
44
+
}
45
+
46
+
type Step struct {
47
+
name string
48
+
kind models.StepKind
49
+
command string
50
+
environment map[string]string
51
+
}
52
+
53
+
func (s Step) Name() string {
54
+
return s.name
55
+
}
56
+
57
+
func (s Step) Command() string {
58
+
return s.command
59
+
}
60
+
61
+
func (s Step) Kind() models.StepKind {
62
+
return s.kind
63
+
}
64
+
65
+
// setupSteps get added to start of Steps
66
+
type setupSteps []models.Step
67
+
68
+
// addStep adds a step to the beginning of the workflow's steps.
69
+
func (ss *setupSteps) addStep(step models.Step) {
70
+
*ss = append(*ss, step)
71
+
}
72
+
73
+
type addlFields struct {
74
+
image string
75
+
container string
76
+
env map[string]string
77
+
}
78
+
79
+
func (e *Engine) InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*models.Workflow, error) {
80
+
swf := &models.Workflow{}
81
+
addl := addlFields{}
82
+
83
+
dwf := &struct {
84
+
Steps []struct {
85
+
Command string `yaml:"command"`
86
+
Name string `yaml:"name"`
87
+
Environment map[string]string `yaml:"environment"`
88
+
} `yaml:"steps"`
89
+
Dependencies map[string][]string `yaml:"dependencies"`
90
+
Environment map[string]string `yaml:"environment"`
91
+
}{}
92
+
err := yaml.Unmarshal([]byte(twf.Raw), &dwf)
93
+
if err != nil {
94
+
return nil, err
95
+
}
96
+
97
+
for _, dstep := range dwf.Steps {
98
+
sstep := Step{}
99
+
sstep.environment = dstep.Environment
100
+
sstep.command = dstep.Command
101
+
sstep.name = dstep.Name
102
+
sstep.kind = models.StepKindUser
103
+
swf.Steps = append(swf.Steps, sstep)
104
+
}
105
+
swf.Name = twf.Name
106
+
addl.env = dwf.Environment
107
+
addl.image = workflowImage(dwf.Dependencies, e.cfg.NixeryPipelines.Nixery)
108
+
109
+
setup := &setupSteps{}
110
+
111
+
setup.addStep(nixConfStep())
112
+
setup.addStep(cloneStep(twf, *tpl.TriggerMetadata, e.cfg.Server.Dev))
113
+
// this step could be empty
114
+
if s := dependencyStep(dwf.Dependencies); s != nil {
115
+
setup.addStep(*s)
116
+
}
117
+
118
+
// append setup steps in order to the start of workflow steps
119
+
swf.Steps = append(*setup, swf.Steps...)
120
+
swf.Data = addl
121
+
122
+
return swf, nil
123
+
}
124
+
125
+
func (e *Engine) WorkflowTimeout() time.Duration {
126
+
workflowTimeoutStr := e.cfg.NixeryPipelines.WorkflowTimeout
127
+
workflowTimeout, err := time.ParseDuration(workflowTimeoutStr)
128
+
if err != nil {
129
+
e.l.Error("failed to parse workflow timeout", "error", err, "timeout", workflowTimeoutStr)
130
+
workflowTimeout = 5 * time.Minute
131
+
}
132
+
133
+
return workflowTimeout
134
+
}
135
+
136
+
func workflowImage(deps map[string][]string, nixery string) string {
137
+
var dependencies string
138
+
for reg, ds := range deps {
139
+
if reg == "nixpkgs" {
140
+
dependencies = path.Join(ds...)
141
+
}
142
+
}
143
+
144
+
// load defaults from somewhere else
145
+
dependencies = path.Join(dependencies, "bash", "git", "coreutils", "nix")
146
+
147
+
if runtime.GOARCH == "arm64" {
148
+
dependencies = path.Join("arm64", dependencies)
149
+
}
150
+
151
+
return path.Join(nixery, dependencies)
152
+
}
153
+
154
+
func New(ctx context.Context, cfg *config.Config) (*Engine, error) {
155
+
dcli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
156
+
if err != nil {
157
+
return nil, err
158
+
}
159
+
160
+
l := log.FromContext(ctx).With("component", "spindle")
161
+
162
+
e := &Engine{
163
+
docker: dcli,
164
+
l: l,
165
+
cfg: cfg,
166
+
}
167
+
168
+
e.cleanup = make(map[string][]cleanupFunc)
169
+
170
+
return e, nil
171
+
}
172
+
173
+
func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId, wf *models.Workflow) error {
174
+
e.l.Info("setting up workflow", "workflow", wid)
175
+
176
+
_, err := e.docker.NetworkCreate(ctx, networkName(wid), network.CreateOptions{
177
+
Driver: "bridge",
178
+
})
179
+
if err != nil {
180
+
return err
181
+
}
182
+
e.registerCleanup(wid, func(ctx context.Context) error {
183
+
return e.docker.NetworkRemove(ctx, networkName(wid))
184
+
})
185
+
186
+
addl := wf.Data.(addlFields)
187
+
188
+
reader, err := e.docker.ImagePull(ctx, addl.image, image.PullOptions{})
189
+
if err != nil {
190
+
e.l.Error("pipeline image pull failed!", "image", addl.image, "workflowId", wid, "error", err.Error())
191
+
192
+
return fmt.Errorf("pulling image: %w", err)
193
+
}
194
+
defer reader.Close()
195
+
io.Copy(os.Stdout, reader)
196
+
197
+
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
198
+
Image: addl.image,
199
+
Cmd: []string{"cat"},
200
+
OpenStdin: true, // so cat stays alive :3
201
+
Tty: false,
202
+
Hostname: "spindle",
203
+
WorkingDir: workspaceDir,
204
+
// TODO(winter): investigate whether environment variables passed here
205
+
// get propagated to ContainerExec processes
206
+
}, &container.HostConfig{
207
+
Mounts: []mount.Mount{
208
+
{
209
+
Type: mount.TypeTmpfs,
210
+
Target: "/tmp",
211
+
ReadOnly: false,
212
+
TmpfsOptions: &mount.TmpfsOptions{
213
+
Mode: 0o1777, // world-writeable sticky bit
214
+
Options: [][]string{
215
+
{"exec"},
216
+
},
217
+
},
218
+
},
219
+
},
220
+
ReadonlyRootfs: false,
221
+
CapDrop: []string{"ALL"},
222
+
CapAdd: []string{"CAP_DAC_OVERRIDE"},
223
+
SecurityOpt: []string{"no-new-privileges"},
224
+
ExtraHosts: []string{"host.docker.internal:host-gateway"},
225
+
}, nil, nil, "")
226
+
if err != nil {
227
+
return fmt.Errorf("creating container: %w", err)
228
+
}
229
+
e.registerCleanup(wid, func(ctx context.Context) error {
230
+
err = e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{})
231
+
if err != nil {
232
+
return err
233
+
}
234
+
235
+
return e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{
236
+
RemoveVolumes: true,
237
+
RemoveLinks: false,
238
+
Force: false,
239
+
})
240
+
})
241
+
242
+
err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{})
243
+
if err != nil {
244
+
return fmt.Errorf("starting container: %w", err)
245
+
}
246
+
247
+
mkExecResp, err := e.docker.ContainerExecCreate(ctx, resp.ID, container.ExecOptions{
248
+
Cmd: []string{"mkdir", "-p", workspaceDir, homeDir},
249
+
AttachStdout: true, // NOTE(winter): pretty sure this will make it so that when stdout read is done below, mkdir is done. maybe??
250
+
AttachStderr: true, // for good measure, backed up by docker/cli ("If -d is not set, attach to everything by default")
251
+
})
252
+
if err != nil {
253
+
return err
254
+
}
255
+
256
+
// This actually *starts* the command. Thanks, Docker!
257
+
execResp, err := e.docker.ContainerExecAttach(ctx, mkExecResp.ID, container.ExecAttachOptions{})
258
+
if err != nil {
259
+
return err
260
+
}
261
+
defer execResp.Close()
262
+
263
+
// This is apparently best way to wait for the command to complete.
264
+
_, err = io.ReadAll(execResp.Reader)
265
+
if err != nil {
266
+
return err
267
+
}
268
+
269
+
execInspectResp, err := e.docker.ContainerExecInspect(ctx, mkExecResp.ID)
270
+
if err != nil {
271
+
return err
272
+
}
273
+
274
+
if execInspectResp.ExitCode != 0 {
275
+
return fmt.Errorf("mkdir exited with exit code %d", execInspectResp.ExitCode)
276
+
} else if execInspectResp.Running {
277
+
return errors.New("mkdir is somehow still running??")
278
+
}
279
+
280
+
addl.container = resp.ID
281
+
wf.Data = addl
282
+
283
+
return nil
284
+
}
285
+
286
+
func (e *Engine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error {
287
+
addl := w.Data.(addlFields)
288
+
workflowEnvs := ConstructEnvs(addl.env)
289
+
// TODO(winter): should SetupWorkflow also have secret access?
290
+
// IMO yes, but probably worth thinking on.
291
+
for _, s := range secrets {
292
+
workflowEnvs.AddEnv(s.Key, s.Value)
293
+
}
294
+
295
+
step := w.Steps[idx].(Step)
296
+
297
+
select {
298
+
case <-ctx.Done():
299
+
return ctx.Err()
300
+
default:
301
+
}
302
+
303
+
envs := append(EnvVars(nil), workflowEnvs...)
304
+
for k, v := range step.environment {
305
+
envs.AddEnv(k, v)
306
+
}
307
+
envs.AddEnv("HOME", homeDir)
308
+
309
+
mkExecResp, err := e.docker.ContainerExecCreate(ctx, addl.container, container.ExecOptions{
310
+
Cmd: []string{"bash", "-c", step.command},
311
+
AttachStdout: true,
312
+
AttachStderr: true,
313
+
Env: envs,
314
+
})
315
+
if err != nil {
316
+
return fmt.Errorf("creating exec: %w", err)
317
+
}
318
+
319
+
// start tailing logs in background
320
+
tailDone := make(chan error, 1)
321
+
go func() {
322
+
tailDone <- e.tailStep(ctx, wfLogger, mkExecResp.ID, wid, idx, step)
323
+
}()
324
+
325
+
select {
326
+
case <-tailDone:
327
+
328
+
case <-ctx.Done():
329
+
// cleanup will be handled by DestroyWorkflow, since
330
+
// Docker doesn't provide an API to kill an exec run
331
+
// (sure, we could grab the PID and kill it ourselves,
332
+
// but that's wasted effort)
333
+
e.l.Warn("step timed out", "step", step.Name)
334
+
335
+
<-tailDone
336
+
337
+
return engine.ErrTimedOut
338
+
}
339
+
340
+
select {
341
+
case <-ctx.Done():
342
+
return ctx.Err()
343
+
default:
344
+
}
345
+
346
+
execInspectResp, err := e.docker.ContainerExecInspect(ctx, mkExecResp.ID)
347
+
if err != nil {
348
+
return err
349
+
}
350
+
351
+
if execInspectResp.ExitCode != 0 {
352
+
inspectResp, err := e.docker.ContainerInspect(ctx, addl.container)
353
+
if err != nil {
354
+
return err
355
+
}
356
+
357
+
e.l.Error("workflow failed!", "workflow_id", wid.String(), "exit_code", execInspectResp.ExitCode, "oom_killed", inspectResp.State.OOMKilled)
358
+
359
+
if inspectResp.State.OOMKilled {
360
+
return ErrOOMKilled
361
+
}
362
+
return engine.ErrWorkflowFailed
363
+
}
364
+
365
+
return nil
366
+
}
367
+
368
+
func (e *Engine) tailStep(ctx context.Context, wfLogger *models.WorkflowLogger, execID string, wid models.WorkflowId, stepIdx int, step models.Step) error {
369
+
if wfLogger == nil {
370
+
return nil
371
+
}
372
+
373
+
// This actually *starts* the command. Thanks, Docker!
374
+
logs, err := e.docker.ContainerExecAttach(ctx, execID, container.ExecAttachOptions{})
375
+
if err != nil {
376
+
return err
377
+
}
378
+
defer logs.Close()
379
+
380
+
_, err = stdcopy.StdCopy(
381
+
wfLogger.DataWriter("stdout"),
382
+
wfLogger.DataWriter("stderr"),
383
+
logs.Reader,
384
+
)
385
+
if err != nil && err != io.EOF && !errors.Is(err, context.DeadlineExceeded) {
386
+
return fmt.Errorf("failed to copy logs: %w", err)
387
+
}
388
+
389
+
return nil
390
+
}
391
+
392
+
func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error {
393
+
e.cleanupMu.Lock()
394
+
key := wid.String()
395
+
396
+
fns := e.cleanup[key]
397
+
delete(e.cleanup, key)
398
+
e.cleanupMu.Unlock()
399
+
400
+
for _, fn := range fns {
401
+
if err := fn(ctx); err != nil {
402
+
e.l.Error("failed to cleanup workflow resource", "workflowId", wid, "error", err)
403
+
}
404
+
}
405
+
return nil
406
+
}
407
+
408
+
func (e *Engine) registerCleanup(wid models.WorkflowId, fn cleanupFunc) {
409
+
e.cleanupMu.Lock()
410
+
defer e.cleanupMu.Unlock()
411
+
412
+
key := wid.String()
413
+
e.cleanup[key] = append(e.cleanup[key], fn)
414
+
}
415
+
416
+
func networkName(wid models.WorkflowId) string {
417
+
return fmt.Sprintf("workflow-network-%s", wid)
418
+
}
+28
spindle/engines/nixery/envs.go
+28
spindle/engines/nixery/envs.go
···
1
+
package nixery
2
+
3
+
import (
4
+
"fmt"
5
+
)
6
+
7
+
type EnvVars []string
8
+
9
+
// ConstructEnvs converts a tangled.Pipeline_Step_Environment_Elem.{Key,Value}
10
+
// representation into a docker-friendly []string{"KEY=value", ...} slice.
11
+
func ConstructEnvs(envs map[string]string) EnvVars {
12
+
var dockerEnvs EnvVars
13
+
for k, v := range envs {
14
+
ev := fmt.Sprintf("%s=%s", k, v)
15
+
dockerEnvs = append(dockerEnvs, ev)
16
+
}
17
+
return dockerEnvs
18
+
}
19
+
20
+
// Slice returns the EnvVar as a []string slice.
21
+
func (ev EnvVars) Slice() []string {
22
+
return ev
23
+
}
24
+
25
+
// AddEnv adds a key=value string to the EnvVar.
26
+
func (ev *EnvVars) AddEnv(key, value string) {
27
+
*ev = append(*ev, fmt.Sprintf("%s=%s", key, value))
28
+
}
+48
spindle/engines/nixery/envs_test.go
+48
spindle/engines/nixery/envs_test.go
···
1
+
package nixery
2
+
3
+
import (
4
+
"testing"
5
+
6
+
"github.com/stretchr/testify/assert"
7
+
)
8
+
9
+
func TestConstructEnvs(t *testing.T) {
10
+
tests := []struct {
11
+
name string
12
+
in map[string]string
13
+
want EnvVars
14
+
}{
15
+
{
16
+
name: "empty input",
17
+
in: make(map[string]string),
18
+
want: EnvVars{},
19
+
},
20
+
{
21
+
name: "single env var",
22
+
in: map[string]string{"FOO": "bar"},
23
+
want: EnvVars{"FOO=bar"},
24
+
},
25
+
{
26
+
name: "multiple env vars",
27
+
in: map[string]string{"FOO": "bar", "BAZ": "qux"},
28
+
want: EnvVars{"FOO=bar", "BAZ=qux"},
29
+
},
30
+
}
31
+
for _, tt := range tests {
32
+
t.Run(tt.name, func(t *testing.T) {
33
+
got := ConstructEnvs(tt.in)
34
+
if got == nil {
35
+
got = EnvVars{}
36
+
}
37
+
assert.ElementsMatch(t, tt.want, got)
38
+
})
39
+
}
40
+
}
41
+
42
+
func TestAddEnv(t *testing.T) {
43
+
ev := EnvVars{}
44
+
ev.AddEnv("FOO", "bar")
45
+
ev.AddEnv("BAZ", "qux")
46
+
want := EnvVars{"FOO=bar", "BAZ=qux"}
47
+
assert.ElementsMatch(t, want, ev)
48
+
}
+7
spindle/engines/nixery/errors.go
+7
spindle/engines/nixery/errors.go
+126
spindle/engines/nixery/setup_steps.go
+126
spindle/engines/nixery/setup_steps.go
···
1
+
package nixery
2
+
3
+
import (
4
+
"fmt"
5
+
"path"
6
+
"strings"
7
+
8
+
"tangled.sh/tangled.sh/core/api/tangled"
9
+
"tangled.sh/tangled.sh/core/workflow"
10
+
)
11
+
12
+
func nixConfStep() Step {
13
+
setupCmd := `mkdir -p /etc/nix
14
+
echo 'extra-experimental-features = nix-command flakes' >> /etc/nix/nix.conf
15
+
echo 'build-users-group = ' >> /etc/nix/nix.conf`
16
+
return Step{
17
+
command: setupCmd,
18
+
name: "Configure Nix",
19
+
}
20
+
}
21
+
22
+
// cloneOptsAsSteps processes clone options and adds corresponding steps
23
+
// to the beginning of the workflow's step list if cloning is not skipped.
24
+
//
25
+
// the steps to do here are:
26
+
// - git init
27
+
// - git remote add origin <url>
28
+
// - git fetch --depth=<d> --recurse-submodules=<yes|no> <sha>
29
+
// - git checkout FETCH_HEAD
30
+
func cloneStep(twf tangled.Pipeline_Workflow, tr tangled.Pipeline_TriggerMetadata, dev bool) Step {
31
+
if twf.Clone.Skip {
32
+
return Step{}
33
+
}
34
+
35
+
var commands []string
36
+
37
+
// initialize git repo in workspace
38
+
commands = append(commands, "git init")
39
+
40
+
// add repo as git remote
41
+
scheme := "https://"
42
+
if dev {
43
+
scheme = "http://"
44
+
tr.Repo.Knot = strings.ReplaceAll(tr.Repo.Knot, "localhost", "host.docker.internal")
45
+
}
46
+
url := scheme + path.Join(tr.Repo.Knot, tr.Repo.Did, tr.Repo.Repo)
47
+
commands = append(commands, fmt.Sprintf("git remote add origin %s", url))
48
+
49
+
// run git fetch
50
+
{
51
+
var fetchArgs []string
52
+
53
+
// default clone depth is 1
54
+
depth := 1
55
+
if twf.Clone.Depth > 1 {
56
+
depth = int(twf.Clone.Depth)
57
+
}
58
+
fetchArgs = append(fetchArgs, fmt.Sprintf("--depth=%d", depth))
59
+
60
+
// optionally recurse submodules
61
+
if twf.Clone.Submodules {
62
+
fetchArgs = append(fetchArgs, "--recurse-submodules=yes")
63
+
}
64
+
65
+
// set remote to fetch from
66
+
fetchArgs = append(fetchArgs, "origin")
67
+
68
+
// set revision to checkout
69
+
switch workflow.TriggerKind(tr.Kind) {
70
+
case workflow.TriggerKindManual:
71
+
// TODO: unimplemented
72
+
case workflow.TriggerKindPush:
73
+
fetchArgs = append(fetchArgs, tr.Push.NewSha)
74
+
case workflow.TriggerKindPullRequest:
75
+
fetchArgs = append(fetchArgs, tr.PullRequest.SourceSha)
76
+
}
77
+
78
+
commands = append(commands, fmt.Sprintf("git fetch %s", strings.Join(fetchArgs, " ")))
79
+
}
80
+
81
+
// run git checkout
82
+
commands = append(commands, "git checkout FETCH_HEAD")
83
+
84
+
cloneStep := Step{
85
+
command: strings.Join(commands, "\n"),
86
+
name: "Clone repository into workspace",
87
+
}
88
+
return cloneStep
89
+
}
90
+
91
+
// dependencyStep processes dependencies defined in the workflow.
92
+
// For dependencies using a custom registry (i.e. not nixpkgs), it collects
93
+
// all packages and adds a single 'nix profile install' step to the
94
+
// beginning of the workflow's step list.
95
+
func dependencyStep(deps map[string][]string) *Step {
96
+
var customPackages []string
97
+
98
+
for registry, packages := range deps {
99
+
if registry == "nixpkgs" {
100
+
continue
101
+
}
102
+
103
+
if len(packages) == 0 {
104
+
customPackages = append(customPackages, registry)
105
+
}
106
+
// collect packages from custom registries
107
+
for _, pkg := range packages {
108
+
customPackages = append(customPackages, fmt.Sprintf("'%s#%s'", registry, pkg))
109
+
}
110
+
}
111
+
112
+
if len(customPackages) > 0 {
113
+
installCmd := "nix --extra-experimental-features nix-command --extra-experimental-features flakes profile install"
114
+
cmd := fmt.Sprintf("%s %s", installCmd, strings.Join(customPackages, " "))
115
+
installStep := Step{
116
+
command: cmd,
117
+
name: "Install custom dependencies",
118
+
environment: map[string]string{
119
+
"NIX_NO_COLOR": "1",
120
+
"NIX_SHOW_DOWNLOAD_PROGRESS": "0",
121
+
},
122
+
}
123
+
return &installStep
124
+
}
125
+
return nil
126
+
}
+17
spindle/models/engine.go
+17
spindle/models/engine.go
···
1
+
package models
2
+
3
+
import (
4
+
"context"
5
+
"time"
6
+
7
+
"tangled.sh/tangled.sh/core/api/tangled"
8
+
"tangled.sh/tangled.sh/core/spindle/secrets"
9
+
)
10
+
11
+
type Engine interface {
12
+
InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*Workflow, error)
13
+
SetupWorkflow(ctx context.Context, wid WorkflowId, wf *Workflow) error
14
+
WorkflowTimeout() time.Duration
15
+
DestroyWorkflow(ctx context.Context, wid WorkflowId) error
16
+
RunStep(ctx context.Context, wid WorkflowId, w *Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *WorkflowLogger) error
17
+
}
+82
spindle/models/logger.go
+82
spindle/models/logger.go
···
1
+
package models
2
+
3
+
import (
4
+
"encoding/json"
5
+
"fmt"
6
+
"io"
7
+
"os"
8
+
"path/filepath"
9
+
"strings"
10
+
)
11
+
12
+
type WorkflowLogger struct {
13
+
file *os.File
14
+
encoder *json.Encoder
15
+
}
16
+
17
+
func NewWorkflowLogger(baseDir string, wid WorkflowId) (*WorkflowLogger, error) {
18
+
path := LogFilePath(baseDir, wid)
19
+
20
+
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
21
+
if err != nil {
22
+
return nil, fmt.Errorf("creating log file: %w", err)
23
+
}
24
+
25
+
return &WorkflowLogger{
26
+
file: file,
27
+
encoder: json.NewEncoder(file),
28
+
}, nil
29
+
}
30
+
31
+
func LogFilePath(baseDir string, workflowID WorkflowId) string {
32
+
logFilePath := filepath.Join(baseDir, fmt.Sprintf("%s.log", workflowID.String()))
33
+
return logFilePath
34
+
}
35
+
36
+
func (l *WorkflowLogger) Close() error {
37
+
return l.file.Close()
38
+
}
39
+
40
+
func (l *WorkflowLogger) DataWriter(stream string) io.Writer {
41
+
// TODO: emit stream
42
+
return &dataWriter{
43
+
logger: l,
44
+
stream: stream,
45
+
}
46
+
}
47
+
48
+
func (l *WorkflowLogger) ControlWriter(idx int, step Step) io.Writer {
49
+
return &controlWriter{
50
+
logger: l,
51
+
idx: idx,
52
+
step: step,
53
+
}
54
+
}
55
+
56
+
type dataWriter struct {
57
+
logger *WorkflowLogger
58
+
stream string
59
+
}
60
+
61
+
func (w *dataWriter) Write(p []byte) (int, error) {
62
+
line := strings.TrimRight(string(p), "\r\n")
63
+
entry := NewDataLogLine(line, w.stream)
64
+
if err := w.logger.encoder.Encode(entry); err != nil {
65
+
return 0, err
66
+
}
67
+
return len(p), nil
68
+
}
69
+
70
+
type controlWriter struct {
71
+
logger *WorkflowLogger
72
+
idx int
73
+
step Step
74
+
}
75
+
76
+
func (w *controlWriter) Write(_ []byte) (int, error) {
77
+
entry := NewControlLogLine(w.idx, w.step)
78
+
if err := w.logger.encoder.Encode(entry); err != nil {
79
+
return 0, err
80
+
}
81
+
return len(w.step.Name()), nil
82
+
}
+3
-3
spindle/models/models.go
+3
-3
spindle/models/models.go
···
104
104
func NewControlLogLine(idx int, step Step) LogLine {
105
105
return LogLine{
106
106
Kind: LogKindControl,
107
-
Content: step.Name,
107
+
Content: step.Name(),
108
108
StepId: idx,
109
-
StepKind: step.Kind,
110
-
StepCommand: step.Command,
109
+
StepKind: step.Kind(),
110
+
StepCommand: step.Command(),
111
111
}
112
112
}
+8
-103
spindle/models/pipeline.go
+8
-103
spindle/models/pipeline.go
···
1
1
package models
2
2
3
-
import (
4
-
"path"
5
-
6
-
"tangled.sh/tangled.sh/core/api/tangled"
7
-
"tangled.sh/tangled.sh/core/spindle/config"
8
-
)
9
-
10
3
type Pipeline struct {
11
4
RepoOwner string
12
5
RepoName string
13
-
Workflows []Workflow
6
+
Workflows map[Engine][]Workflow
14
7
}
15
8
16
-
type Step struct {
17
-
Command string
18
-
Name string
19
-
Environment map[string]string
20
-
Kind StepKind
9
+
type Step interface {
10
+
Name() string
11
+
Command() string
12
+
Kind() StepKind
21
13
}
22
14
23
15
type StepKind int
···
30
22
)
31
23
32
24
type Workflow struct {
33
-
Steps []Step
34
-
Environment map[string]string
35
-
Name string
36
-
Image string
37
-
}
38
-
39
-
// setupSteps get added to start of Steps
40
-
type setupSteps []Step
41
-
42
-
// addStep adds a step to the beginning of the workflow's steps.
43
-
func (ss *setupSteps) addStep(step Step) {
44
-
*ss = append(*ss, step)
45
-
}
46
-
47
-
// ToPipeline converts a tangled.Pipeline into a model.Pipeline.
48
-
// In the process, dependencies are resolved: nixpkgs deps
49
-
// are constructed atop nixery and set as the Workflow.Image,
50
-
// and ones from custom registries
51
-
func ToPipeline(pl tangled.Pipeline, cfg config.Config) *Pipeline {
52
-
workflows := []Workflow{}
53
-
54
-
for _, twf := range pl.Workflows {
55
-
swf := &Workflow{}
56
-
for _, tstep := range twf.Steps {
57
-
sstep := Step{}
58
-
sstep.Environment = stepEnvToMap(tstep.Environment)
59
-
sstep.Command = tstep.Command
60
-
sstep.Name = tstep.Name
61
-
sstep.Kind = StepKindUser
62
-
swf.Steps = append(swf.Steps, sstep)
63
-
}
64
-
swf.Name = twf.Name
65
-
swf.Environment = workflowEnvToMap(twf.Environment)
66
-
swf.Image = workflowImage(twf.Dependencies, cfg.Pipelines.Nixery)
67
-
68
-
setup := &setupSteps{}
69
-
70
-
setup.addStep(nixConfStep())
71
-
setup.addStep(cloneStep(*twf, *pl.TriggerMetadata, cfg.Server.Dev))
72
-
// this step could be empty
73
-
if s := dependencyStep(*twf); s != nil {
74
-
setup.addStep(*s)
75
-
}
76
-
77
-
// append setup steps in order to the start of workflow steps
78
-
swf.Steps = append(*setup, swf.Steps...)
79
-
80
-
workflows = append(workflows, *swf)
81
-
}
82
-
repoOwner := pl.TriggerMetadata.Repo.Did
83
-
repoName := pl.TriggerMetadata.Repo.Repo
84
-
return &Pipeline{
85
-
RepoOwner: repoOwner,
86
-
RepoName: repoName,
87
-
Workflows: workflows,
88
-
}
89
-
}
90
-
91
-
func workflowEnvToMap(envs []*tangled.Pipeline_Pair) map[string]string {
92
-
envMap := map[string]string{}
93
-
for _, env := range envs {
94
-
if env != nil {
95
-
envMap[env.Key] = env.Value
96
-
}
97
-
}
98
-
return envMap
99
-
}
100
-
101
-
func stepEnvToMap(envs []*tangled.Pipeline_Pair) map[string]string {
102
-
envMap := map[string]string{}
103
-
for _, env := range envs {
104
-
if env != nil {
105
-
envMap[env.Key] = env.Value
106
-
}
107
-
}
108
-
return envMap
109
-
}
110
-
111
-
func workflowImage(deps []*tangled.Pipeline_Dependency, nixery string) string {
112
-
var dependencies string
113
-
for _, d := range deps {
114
-
if d.Registry == "nixpkgs" {
115
-
dependencies = path.Join(d.Packages...)
116
-
}
117
-
}
118
-
119
-
// load defaults from somewhere else
120
-
dependencies = path.Join(dependencies, "bash", "git", "coreutils", "nix")
121
-
122
-
return path.Join(nixery, dependencies)
25
+
Steps []Step
26
+
Name string
27
+
Data any
123
28
}
-128
spindle/models/setup_steps.go
-128
spindle/models/setup_steps.go
···
1
-
package models
2
-
3
-
import (
4
-
"fmt"
5
-
"path"
6
-
"strings"
7
-
8
-
"tangled.sh/tangled.sh/core/api/tangled"
9
-
"tangled.sh/tangled.sh/core/workflow"
10
-
)
11
-
12
-
func nixConfStep() Step {
13
-
setupCmd := `echo 'extra-experimental-features = nix-command flakes' >> /etc/nix/nix.conf
14
-
echo 'build-users-group = ' >> /etc/nix/nix.conf`
15
-
return Step{
16
-
Command: setupCmd,
17
-
Name: "Configure Nix",
18
-
}
19
-
}
20
-
21
-
// cloneOptsAsSteps processes clone options and adds corresponding steps
22
-
// to the beginning of the workflow's step list if cloning is not skipped.
23
-
//
24
-
// the steps to do here are:
25
-
// - git init
26
-
// - git remote add origin <url>
27
-
// - git fetch --depth=<d> --recurse-submodules=<yes|no> <sha>
28
-
// - git checkout FETCH_HEAD
29
-
func cloneStep(twf tangled.Pipeline_Workflow, tr tangled.Pipeline_TriggerMetadata, dev bool) Step {
30
-
if twf.Clone.Skip {
31
-
return Step{}
32
-
}
33
-
34
-
var commands []string
35
-
36
-
// initialize git repo in workspace
37
-
commands = append(commands, "git init")
38
-
39
-
// add repo as git remote
40
-
scheme := "https://"
41
-
if dev {
42
-
scheme = "http://"
43
-
tr.Repo.Knot = strings.ReplaceAll(tr.Repo.Knot, "localhost", "host.docker.internal")
44
-
}
45
-
url := scheme + path.Join(tr.Repo.Knot, tr.Repo.Did, tr.Repo.Repo)
46
-
commands = append(commands, fmt.Sprintf("git remote add origin %s", url))
47
-
48
-
// run git fetch
49
-
{
50
-
var fetchArgs []string
51
-
52
-
// default clone depth is 1
53
-
depth := 1
54
-
if twf.Clone.Depth > 1 {
55
-
depth = int(twf.Clone.Depth)
56
-
}
57
-
fetchArgs = append(fetchArgs, fmt.Sprintf("--depth=%d", depth))
58
-
59
-
// optionally recurse submodules
60
-
if twf.Clone.Submodules {
61
-
fetchArgs = append(fetchArgs, "--recurse-submodules=yes")
62
-
}
63
-
64
-
// set remote to fetch from
65
-
fetchArgs = append(fetchArgs, "origin")
66
-
67
-
// set revision to checkout
68
-
switch workflow.TriggerKind(tr.Kind) {
69
-
case workflow.TriggerKindManual:
70
-
// TODO: unimplemented
71
-
case workflow.TriggerKindPush:
72
-
fetchArgs = append(fetchArgs, tr.Push.NewSha)
73
-
case workflow.TriggerKindPullRequest:
74
-
fetchArgs = append(fetchArgs, tr.PullRequest.SourceSha)
75
-
}
76
-
77
-
commands = append(commands, fmt.Sprintf("git fetch %s", strings.Join(fetchArgs, " ")))
78
-
}
79
-
80
-
// run git checkout
81
-
commands = append(commands, "git checkout FETCH_HEAD")
82
-
83
-
cloneStep := Step{
84
-
Command: strings.Join(commands, "\n"),
85
-
Name: "Clone repository into workspace",
86
-
}
87
-
return cloneStep
88
-
}
89
-
90
-
// dependencyStep processes dependencies defined in the workflow.
91
-
// For dependencies using a custom registry (i.e. not nixpkgs), it collects
92
-
// all packages and adds a single 'nix profile install' step to the
93
-
// beginning of the workflow's step list.
94
-
func dependencyStep(twf tangled.Pipeline_Workflow) *Step {
95
-
var customPackages []string
96
-
97
-
for _, d := range twf.Dependencies {
98
-
registry := d.Registry
99
-
packages := d.Packages
100
-
101
-
if registry == "nixpkgs" {
102
-
continue
103
-
}
104
-
105
-
if len(packages) == 0 {
106
-
customPackages = append(customPackages, registry)
107
-
}
108
-
// collect packages from custom registries
109
-
for _, pkg := range packages {
110
-
customPackages = append(customPackages, fmt.Sprintf("'%s#%s'", registry, pkg))
111
-
}
112
-
}
113
-
114
-
if len(customPackages) > 0 {
115
-
installCmd := "nix --extra-experimental-features nix-command --extra-experimental-features flakes profile install"
116
-
cmd := fmt.Sprintf("%s %s", installCmd, strings.Join(customPackages, " "))
117
-
installStep := Step{
118
-
Command: cmd,
119
-
Name: "Install custom dependencies",
120
-
Environment: map[string]string{
121
-
"NIX_NO_COLOR": "1",
122
-
"NIX_SHOW_DOWNLOAD_PROGRESS": "0",
123
-
},
124
-
}
125
-
return &installStep
126
-
}
127
-
return nil
128
-
}
+38
-8
spindle/server.go
+38
-8
spindle/server.go
···
20
20
"tangled.sh/tangled.sh/core/spindle/config"
21
21
"tangled.sh/tangled.sh/core/spindle/db"
22
22
"tangled.sh/tangled.sh/core/spindle/engine"
23
+
"tangled.sh/tangled.sh/core/spindle/engines/nixery"
23
24
"tangled.sh/tangled.sh/core/spindle/models"
24
25
"tangled.sh/tangled.sh/core/spindle/queue"
25
26
"tangled.sh/tangled.sh/core/spindle/secrets"
···
39
40
e *rbac.Enforcer
40
41
l *slog.Logger
41
42
n *notifier.Notifier
42
-
eng *engine.Engine
43
+
engs map[string]models.Engine
43
44
jq *queue.Queue
44
45
cfg *config.Config
45
46
ks *eventconsumer.Consumer
···
93
94
return fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
94
95
}
95
96
96
-
eng, err := engine.New(ctx, cfg, d, &n, vault)
97
+
nixeryEng, err := nixery.New(ctx, cfg)
97
98
if err != nil {
98
99
return err
99
100
}
···
128
129
db: d,
129
130
l: logger,
130
131
n: &n,
131
-
eng: eng,
132
+
engs: map[string]models.Engine{"nixery": nixeryEng},
132
133
jq: jq,
133
134
cfg: cfg,
134
135
res: resolver,
···
216
217
Logger: logger,
217
218
Db: s.db,
218
219
Enforcer: s.e,
219
-
Engine: s.eng,
220
+
Engines: s.engs,
220
221
Config: s.cfg,
221
222
Resolver: s.res,
222
223
Vault: s.vault,
···
261
262
Rkey: msg.Rkey,
262
263
}
263
264
265
+
workflows := make(map[models.Engine][]models.Workflow)
266
+
264
267
for _, w := range tpl.Workflows {
265
268
if w != nil {
266
-
err := s.db.StatusPending(models.WorkflowId{
269
+
if _, ok := s.engs[w.Engine]; !ok {
270
+
err = s.db.StatusFailed(models.WorkflowId{
271
+
PipelineId: pipelineId,
272
+
Name: w.Name,
273
+
}, fmt.Sprintf("unknown engine %#v", w.Engine), -1, s.n)
274
+
if err != nil {
275
+
return err
276
+
}
277
+
278
+
continue
279
+
}
280
+
281
+
eng := s.engs[w.Engine]
282
+
283
+
if _, ok := workflows[eng]; !ok {
284
+
workflows[eng] = []models.Workflow{}
285
+
}
286
+
287
+
ewf, err := s.engs[w.Engine].InitWorkflow(*w, tpl)
288
+
if err != nil {
289
+
return err
290
+
}
291
+
292
+
workflows[eng] = append(workflows[eng], *ewf)
293
+
294
+
err = s.db.StatusPending(models.WorkflowId{
267
295
PipelineId: pipelineId,
268
296
Name: w.Name,
269
297
}, s.n)
···
273
301
}
274
302
}
275
303
276
-
spl := models.ToPipeline(tpl, *s.cfg)
277
-
278
304
ok := s.jq.Enqueue(queue.Job{
279
305
Run: func() error {
280
-
s.eng.StartWorkflows(ctx, spl, pipelineId)
306
+
engine.StartWorkflows(s.l, s.vault, s.cfg, s.db, s.n, ctx, &models.Pipeline{
307
+
RepoOwner: tpl.TriggerMetadata.Repo.Did,
308
+
RepoName: tpl.TriggerMetadata.Repo.Repo,
309
+
Workflows: workflows,
310
+
}, pipelineId)
281
311
return nil
282
312
},
283
313
OnFail: func(jobError error) {
+32
-2
spindle/stream.go
+32
-2
spindle/stream.go
···
6
6
"fmt"
7
7
"io"
8
8
"net/http"
9
+
"os"
9
10
"strconv"
10
11
"time"
11
12
12
-
"tangled.sh/tangled.sh/core/spindle/engine"
13
13
"tangled.sh/tangled.sh/core/spindle/models"
14
14
15
15
"github.com/go-chi/chi/v5"
···
143
143
}
144
144
isFinished := models.StatusKind(status.Status).IsFinish()
145
145
146
-
filePath := engine.LogFilePath(s.cfg.Pipelines.LogDir, wid)
146
+
filePath := models.LogFilePath(s.cfg.Server.LogDir, wid)
147
+
148
+
if status.Status == models.StatusKindFailed.String() && status.Error != nil {
149
+
if _, err := os.Stat(filePath); os.IsNotExist(err) {
150
+
msgs := []models.LogLine{
151
+
{
152
+
Kind: models.LogKindControl,
153
+
Content: "",
154
+
StepId: 0,
155
+
StepKind: models.StepKindUser,
156
+
},
157
+
{
158
+
Kind: models.LogKindData,
159
+
Content: *status.Error,
160
+
},
161
+
}
162
+
163
+
for _, msg := range msgs {
164
+
b, err := json.Marshal(msg)
165
+
if err != nil {
166
+
return err
167
+
}
168
+
169
+
if err := conn.WriteMessage(websocket.TextMessage, b); err != nil {
170
+
return fmt.Errorf("failed to write to websocket: %w", err)
171
+
}
172
+
}
173
+
174
+
return nil
175
+
}
176
+
}
147
177
148
178
config := tail.Config{
149
179
Follow: !isFinished,
+2
-2
spindle/xrpc/xrpc.go
+2
-2
spindle/xrpc/xrpc.go
···
17
17
"tangled.sh/tangled.sh/core/rbac"
18
18
"tangled.sh/tangled.sh/core/spindle/config"
19
19
"tangled.sh/tangled.sh/core/spindle/db"
20
-
"tangled.sh/tangled.sh/core/spindle/engine"
20
+
"tangled.sh/tangled.sh/core/spindle/models"
21
21
"tangled.sh/tangled.sh/core/spindle/secrets"
22
22
)
23
23
···
27
27
Logger *slog.Logger
28
28
Db *db.DB
29
29
Enforcer *rbac.Enforcer
30
-
Engine *engine.Engine
30
+
Engines map[string]models.Engine
31
31
Config *config.Config
32
32
Resolver *idresolver.Resolver
33
33
Vault secrets.Manager
+17
-36
workflow/compile.go
+17
-36
workflow/compile.go
···
1
1
package workflow
2
2
3
3
import (
4
+
"errors"
4
5
"fmt"
5
6
6
7
"tangled.sh/tangled.sh/core/api/tangled"
···
63
64
return fmt.Sprintf("warning: %s: %s: %s", w.Path, w.Type, w.Reason)
64
65
}
65
66
67
+
var (
68
+
MissingEngine error = errors.New("missing engine")
69
+
)
70
+
66
71
type WarningKind string
67
72
68
73
var (
···
95
100
for _, wf := range p {
96
101
cw := compiler.compileWorkflow(wf)
97
102
98
-
// empty workflows are not added to the pipeline
99
-
if len(cw.Steps) == 0 {
103
+
if cw == nil {
100
104
continue
101
105
}
102
106
103
-
cp.Workflows = append(cp.Workflows, &cw)
107
+
cp.Workflows = append(cp.Workflows, cw)
104
108
}
105
109
106
110
return cp
107
111
}
108
112
109
-
func (compiler *Compiler) compileWorkflow(w Workflow) tangled.Pipeline_Workflow {
110
-
cw := tangled.Pipeline_Workflow{}
113
+
func (compiler *Compiler) compileWorkflow(w Workflow) *tangled.Pipeline_Workflow {
114
+
cw := &tangled.Pipeline_Workflow{}
111
115
112
116
if !w.Match(compiler.Trigger) {
113
117
compiler.Diagnostics.AddWarning(
···
115
119
WorkflowSkipped,
116
120
fmt.Sprintf("did not match trigger %s", compiler.Trigger.Kind),
117
121
)
118
-
return cw
119
-
}
120
-
121
-
if len(w.Steps) == 0 {
122
-
compiler.Diagnostics.AddWarning(
123
-
w.Name,
124
-
WorkflowSkipped,
125
-
"empty workflow",
126
-
)
127
-
return cw
122
+
return nil
128
123
}
129
124
130
125
// validate clone options
131
126
compiler.analyzeCloneOptions(w)
132
127
133
128
cw.Name = w.Name
134
-
cw.Dependencies = w.Dependencies.AsRecord()
135
-
for _, s := range w.Steps {
136
-
step := tangled.Pipeline_Step{
137
-
Command: s.Command,
138
-
Name: s.Name,
139
-
}
140
-
for k, v := range s.Environment {
141
-
e := &tangled.Pipeline_Pair{
142
-
Key: k,
143
-
Value: v,
144
-
}
145
-
step.Environment = append(step.Environment, e)
146
-
}
147
-
cw.Steps = append(cw.Steps, &step)
129
+
130
+
if w.Engine == "" {
131
+
compiler.Diagnostics.AddError(w.Name, MissingEngine)
132
+
return nil
148
133
}
149
-
for k, v := range w.Environment {
150
-
e := &tangled.Pipeline_Pair{
151
-
Key: k,
152
-
Value: v,
153
-
}
154
-
cw.Environment = append(cw.Environment, e)
155
-
}
134
+
135
+
cw.Engine = w.Engine
136
+
cw.Raw = w.Raw
156
137
157
138
o := w.CloneOpts.AsRecord()
158
139
cw.Clone = &o
+23
-29
workflow/compile_test.go
+23
-29
workflow/compile_test.go
···
26
26
27
27
func TestCompileWorkflow_MatchingWorkflowWithSteps(t *testing.T) {
28
28
wf := Workflow{
29
-
Name: ".tangled/workflows/test.yml",
30
-
When: when,
31
-
Steps: []Step{
32
-
{Name: "Test", Command: "go test ./..."},
33
-
},
29
+
Name: ".tangled/workflows/test.yml",
30
+
Engine: "nixery",
31
+
When: when,
34
32
CloneOpts: CloneOpts{}, // default true
35
33
}
36
34
···
43
41
assert.False(t, c.Diagnostics.IsErr())
44
42
}
45
43
46
-
func TestCompileWorkflow_EmptySteps(t *testing.T) {
47
-
wf := Workflow{
48
-
Name: ".tangled/workflows/empty.yml",
49
-
When: when,
50
-
Steps: []Step{}, // no steps
51
-
}
52
-
53
-
c := Compiler{Trigger: trigger}
54
-
cp := c.Compile([]Workflow{wf})
55
-
56
-
assert.Len(t, cp.Workflows, 0)
57
-
assert.Len(t, c.Diagnostics.Warnings, 1)
58
-
assert.Equal(t, WorkflowSkipped, c.Diagnostics.Warnings[0].Type)
59
-
}
60
-
61
44
func TestCompileWorkflow_TriggerMismatch(t *testing.T) {
62
45
wf := Workflow{
63
-
Name: ".tangled/workflows/mismatch.yml",
46
+
Name: ".tangled/workflows/mismatch.yml",
47
+
Engine: "nixery",
64
48
When: []Constraint{
65
49
{
66
50
Event: []string{"push"},
67
51
Branch: []string{"master"}, // different branch
68
52
},
69
53
},
70
-
Steps: []Step{
71
-
{Name: "Lint", Command: "golint ./..."},
72
-
},
73
54
}
74
55
75
56
c := Compiler{Trigger: trigger}
···
82
63
83
64
func TestCompileWorkflow_CloneFalseWithShallowTrue(t *testing.T) {
84
65
wf := Workflow{
85
-
Name: ".tangled/workflows/clone_skip.yml",
86
-
When: when,
87
-
Steps: []Step{
88
-
{Name: "Skip", Command: "echo skip"},
89
-
},
66
+
Name: ".tangled/workflows/clone_skip.yml",
67
+
Engine: "nixery",
68
+
When: when,
90
69
CloneOpts: CloneOpts{
91
70
Skip: true,
92
71
Depth: 1,
···
101
80
assert.Len(t, c.Diagnostics.Warnings, 1)
102
81
assert.Equal(t, InvalidConfiguration, c.Diagnostics.Warnings[0].Type)
103
82
}
83
+
84
+
func TestCompileWorkflow_MissingEngine(t *testing.T) {
85
+
wf := Workflow{
86
+
Name: ".tangled/workflows/missing_engine.yml",
87
+
When: when,
88
+
Engine: "",
89
+
}
90
+
91
+
c := Compiler{Trigger: trigger}
92
+
cp := c.Compile([]Workflow{wf})
93
+
94
+
assert.Len(t, cp.Workflows, 0)
95
+
assert.Len(t, c.Diagnostics.Errors, 1)
96
+
assert.Equal(t, MissingEngine, c.Diagnostics.Errors[0].Error)
97
+
}
+6
-33
workflow/def.go
+6
-33
workflow/def.go
···
24
24
25
25
// this is simply a structural representation of the workflow file
26
26
Workflow struct {
27
-
Name string `yaml:"-"` // name of the workflow file
28
-
When []Constraint `yaml:"when"`
29
-
Dependencies Dependencies `yaml:"dependencies"`
30
-
Steps []Step `yaml:"steps"`
31
-
Environment map[string]string `yaml:"environment"`
32
-
CloneOpts CloneOpts `yaml:"clone"`
27
+
Name string `yaml:"-"` // name of the workflow file
28
+
Engine string `yaml:"engine"`
29
+
When []Constraint `yaml:"when"`
30
+
CloneOpts CloneOpts `yaml:"clone"`
31
+
Raw string `yaml:"-"`
33
32
}
34
33
35
34
Constraint struct {
36
35
Event StringList `yaml:"event"`
37
36
Branch StringList `yaml:"branch"` // this is optional, and only applied on "push" events
38
37
}
39
-
40
-
Dependencies map[string][]string
41
38
42
39
CloneOpts struct {
43
40
Skip bool `yaml:"skip"`
44
41
Depth int `yaml:"depth"`
45
42
IncludeSubmodules bool `yaml:"submodules"`
46
-
}
47
-
48
-
Step struct {
49
-
Name string `yaml:"name"`
50
-
Command string `yaml:"command"`
51
-
Environment map[string]string `yaml:"environment"`
52
43
}
53
44
54
45
StringList []string
···
77
68
}
78
69
79
70
wf.Name = name
71
+
wf.Raw = string(contents)
80
72
81
73
return wf, nil
82
74
}
···
173
165
}
174
166
175
167
return errors.New("failed to unmarshal StringOrSlice")
176
-
}
177
-
178
-
// conversion utilities to atproto records
179
-
func (d Dependencies) AsRecord() []*tangled.Pipeline_Dependency {
180
-
var deps []*tangled.Pipeline_Dependency
181
-
for registry, packages := range d {
182
-
deps = append(deps, &tangled.Pipeline_Dependency{
183
-
Registry: registry,
184
-
Packages: packages,
185
-
})
186
-
}
187
-
return deps
188
-
}
189
-
190
-
func (s Step) AsRecord() tangled.Pipeline_Step {
191
-
return tangled.Pipeline_Step{
192
-
Command: s.Command,
193
-
Name: s.Name,
194
-
}
195
168
}
196
169
197
170
func (c CloneOpts) AsRecord() tangled.Pipeline_CloneOpts {
+1
-86
workflow/def_test.go
+1
-86
workflow/def_test.go
···
10
10
yamlData := `
11
11
when:
12
12
- event: ["push", "pull_request"]
13
-
branch: ["main", "develop"]
14
-
15
-
dependencies:
16
-
nixpkgs:
17
-
- go
18
-
- git
19
-
- curl
20
-
21
-
steps:
22
-
- name: "Test"
23
-
command: |
24
-
go test ./...`
13
+
branch: ["main", "develop"]`
25
14
26
15
wf, err := FromFile("test.yml", []byte(yamlData))
27
16
assert.NoError(t, err, "YAML should unmarshal without error")
···
30
19
assert.ElementsMatch(t, []string{"main", "develop"}, wf.When[0].Branch)
31
20
assert.ElementsMatch(t, []string{"push", "pull_request"}, wf.When[0].Event)
32
21
33
-
assert.Len(t, wf.Steps, 1)
34
-
assert.Equal(t, "Test", wf.Steps[0].Name)
35
-
assert.Equal(t, "go test ./...", wf.Steps[0].Command)
36
-
37
-
pkgs, ok := wf.Dependencies["nixpkgs"]
38
-
assert.True(t, ok, "`nixpkgs` should be present in dependencies")
39
-
assert.ElementsMatch(t, []string{"go", "git", "curl"}, pkgs)
40
-
41
22
assert.False(t, wf.CloneOpts.Skip, "Skip should default to false")
42
23
}
43
24
44
-
func TestUnmarshalCustomRegistry(t *testing.T) {
45
-
yamlData := `
46
-
when:
47
-
- event: push
48
-
branch: main
49
-
50
-
dependencies:
51
-
git+https://tangled.sh/@oppi.li/tbsp:
52
-
- tbsp
53
-
git+https://git.peppe.rs/languages/statix:
54
-
- statix
55
-
56
-
steps:
57
-
- name: "Check"
58
-
command: |
59
-
statix check`
60
-
61
-
wf, err := FromFile("test.yml", []byte(yamlData))
62
-
assert.NoError(t, err, "YAML should unmarshal without error")
63
-
64
-
assert.ElementsMatch(t, []string{"push"}, wf.When[0].Event)
65
-
assert.ElementsMatch(t, []string{"main"}, wf.When[0].Branch)
66
-
67
-
assert.ElementsMatch(t, []string{"tbsp"}, wf.Dependencies["git+https://tangled.sh/@oppi.li/tbsp"])
68
-
assert.ElementsMatch(t, []string{"statix"}, wf.Dependencies["git+https://git.peppe.rs/languages/statix"])
69
-
}
70
-
71
25
func TestUnmarshalCloneFalse(t *testing.T) {
72
26
yamlData := `
73
27
when:
···
75
29
76
30
clone:
77
31
skip: true
78
-
79
-
dependencies:
80
-
nixpkgs:
81
-
- python3
82
-
83
-
steps:
84
-
- name: Notify
85
-
command: |
86
-
python3 ./notify.py
87
32
`
88
33
89
34
wf, err := FromFile("test.yml", []byte(yamlData))
···
93
38
94
39
assert.True(t, wf.CloneOpts.Skip, "Skip should be false")
95
40
}
96
-
97
-
func TestUnmarshalEnv(t *testing.T) {
98
-
yamlData := `
99
-
when:
100
-
- event: ["pull_request_close"]
101
-
102
-
clone:
103
-
skip: false
104
-
105
-
environment:
106
-
HOME: /home/foo bar/baz
107
-
CGO_ENABLED: 1
108
-
109
-
steps:
110
-
- name: Something
111
-
command: echo "hello"
112
-
environment:
113
-
FOO: bar
114
-
BAZ: qux
115
-
`
116
-
117
-
wf, err := FromFile("test.yml", []byte(yamlData))
118
-
assert.NoError(t, err)
119
-
120
-
assert.Len(t, wf.Environment, 2)
121
-
assert.Equal(t, "/home/foo bar/baz", wf.Environment["HOME"])
122
-
assert.Equal(t, "1", wf.Environment["CGO_ENABLED"])
123
-
assert.Equal(t, "bar", wf.Steps[0].Environment["FOO"])
124
-
assert.Equal(t, "qux", wf.Steps[0].Environment["BAZ"])
125
-
}