+1
-1
.air/appview.toml
+1
-1
.air/appview.toml
+2
-1
.gitignore
+2
-1
.gitignore
+12
.prettierrc.json
+12
.prettierrc.json
+2
.tangled/workflows/build.yml
+2
.tangled/workflows/build.yml
+3
-12
.tangled/workflows/fmt.yml
+3
-12
.tangled/workflows/fmt.yml
···
2
2
- event: ["push", "pull_request"]
3
3
branch: ["master"]
4
4
5
-
dependencies:
6
-
nixpkgs:
7
-
- go
8
-
- alejandra
5
+
engine: nixery
9
6
10
7
steps:
11
-
- name: "nix fmt"
8
+
- name: "Check formatting"
12
9
command: |
13
-
alejandra -c nix/**/*.nix flake.nix
14
-
15
-
- name: "go fmt"
16
-
command: |
17
-
unformatted=$(gofmt -l .)
18
-
test -z "$unformatted" || (echo "$unformatted" && exit 1)
19
-
10
+
nix run .#fmt -- --ci
+2
.tangled/workflows/test.yml
+2
.tangled/workflows/test.yml
-16
.zed/settings.json
-16
.zed/settings.json
···
1
-
// Folder-specific settings
2
-
//
3
-
// For a full list of overridable settings, and general information on folder-specific settings,
4
-
// see the documentation: https://zed.dev/docs/configuring-zed#settings-files
5
-
{
6
-
"languages": {
7
-
"HTML": {
8
-
"prettier": {
9
-
"format_on_save": false,
10
-
"allowed": true,
11
-
"parser": "go-template",
12
-
"plugins": ["prettier-plugin-go-template"]
13
-
}
14
-
}
15
-
}
16
-
}
+55
-722
api/tangled/cbor_gen.go
+55
-722
api/tangled/cbor_gen.go
···
2728
2728
2729
2729
return nil
2730
2730
}
2731
-
func (t *Pipeline_Dependency) MarshalCBOR(w io.Writer) error {
2732
-
if t == nil {
2733
-
_, err := w.Write(cbg.CborNull)
2734
-
return err
2735
-
}
2736
-
2737
-
cw := cbg.NewCborWriter(w)
2738
-
2739
-
if _, err := cw.Write([]byte{162}); err != nil {
2740
-
return err
2741
-
}
2742
-
2743
-
// t.Packages ([]string) (slice)
2744
-
if len("packages") > 1000000 {
2745
-
return xerrors.Errorf("Value in field \"packages\" was too long")
2746
-
}
2747
-
2748
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("packages"))); err != nil {
2749
-
return err
2750
-
}
2751
-
if _, err := cw.WriteString(string("packages")); err != nil {
2752
-
return err
2753
-
}
2754
-
2755
-
if len(t.Packages) > 8192 {
2756
-
return xerrors.Errorf("Slice value in field t.Packages was too long")
2757
-
}
2758
-
2759
-
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Packages))); err != nil {
2760
-
return err
2761
-
}
2762
-
for _, v := range t.Packages {
2763
-
if len(v) > 1000000 {
2764
-
return xerrors.Errorf("Value in field v was too long")
2765
-
}
2766
-
2767
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
2768
-
return err
2769
-
}
2770
-
if _, err := cw.WriteString(string(v)); err != nil {
2771
-
return err
2772
-
}
2773
-
2774
-
}
2775
-
2776
-
// t.Registry (string) (string)
2777
-
if len("registry") > 1000000 {
2778
-
return xerrors.Errorf("Value in field \"registry\" was too long")
2779
-
}
2780
-
2781
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("registry"))); err != nil {
2782
-
return err
2783
-
}
2784
-
if _, err := cw.WriteString(string("registry")); err != nil {
2785
-
return err
2786
-
}
2787
-
2788
-
if len(t.Registry) > 1000000 {
2789
-
return xerrors.Errorf("Value in field t.Registry was too long")
2790
-
}
2791
-
2792
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Registry))); err != nil {
2793
-
return err
2794
-
}
2795
-
if _, err := cw.WriteString(string(t.Registry)); err != nil {
2796
-
return err
2797
-
}
2798
-
return nil
2799
-
}
2800
-
2801
-
func (t *Pipeline_Dependency) UnmarshalCBOR(r io.Reader) (err error) {
2802
-
*t = Pipeline_Dependency{}
2803
-
2804
-
cr := cbg.NewCborReader(r)
2805
-
2806
-
maj, extra, err := cr.ReadHeader()
2807
-
if err != nil {
2808
-
return err
2809
-
}
2810
-
defer func() {
2811
-
if err == io.EOF {
2812
-
err = io.ErrUnexpectedEOF
2813
-
}
2814
-
}()
2815
-
2816
-
if maj != cbg.MajMap {
2817
-
return fmt.Errorf("cbor input should be of type map")
2818
-
}
2819
-
2820
-
if extra > cbg.MaxLength {
2821
-
return fmt.Errorf("Pipeline_Dependency: map struct too large (%d)", extra)
2822
-
}
2823
-
2824
-
n := extra
2825
-
2826
-
nameBuf := make([]byte, 8)
2827
-
for i := uint64(0); i < n; i++ {
2828
-
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
2829
-
if err != nil {
2830
-
return err
2831
-
}
2832
-
2833
-
if !ok {
2834
-
// Field doesn't exist on this type, so ignore it
2835
-
if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
2836
-
return err
2837
-
}
2838
-
continue
2839
-
}
2840
-
2841
-
switch string(nameBuf[:nameLen]) {
2842
-
// t.Packages ([]string) (slice)
2843
-
case "packages":
2844
-
2845
-
maj, extra, err = cr.ReadHeader()
2846
-
if err != nil {
2847
-
return err
2848
-
}
2849
-
2850
-
if extra > 8192 {
2851
-
return fmt.Errorf("t.Packages: array too large (%d)", extra)
2852
-
}
2853
-
2854
-
if maj != cbg.MajArray {
2855
-
return fmt.Errorf("expected cbor array")
2856
-
}
2857
-
2858
-
if extra > 0 {
2859
-
t.Packages = make([]string, extra)
2860
-
}
2861
-
2862
-
for i := 0; i < int(extra); i++ {
2863
-
{
2864
-
var maj byte
2865
-
var extra uint64
2866
-
var err error
2867
-
_ = maj
2868
-
_ = extra
2869
-
_ = err
2870
-
2871
-
{
2872
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
2873
-
if err != nil {
2874
-
return err
2875
-
}
2876
-
2877
-
t.Packages[i] = string(sval)
2878
-
}
2879
-
2880
-
}
2881
-
}
2882
-
// t.Registry (string) (string)
2883
-
case "registry":
2884
-
2885
-
{
2886
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
2887
-
if err != nil {
2888
-
return err
2889
-
}
2890
-
2891
-
t.Registry = string(sval)
2892
-
}
2893
-
2894
-
default:
2895
-
// Field doesn't exist on this type, so ignore it
2896
-
if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
2897
-
return err
2898
-
}
2899
-
}
2900
-
}
2901
-
2902
-
return nil
2903
-
}
2904
2731
func (t *Pipeline_ManualTriggerData) MarshalCBOR(w io.Writer) error {
2905
2732
if t == nil {
2906
2733
_, err := w.Write(cbg.CborNull)
···
3916
3743
3917
3744
return nil
3918
3745
}
3919
-
func (t *Pipeline_Step) MarshalCBOR(w io.Writer) error {
3920
-
if t == nil {
3921
-
_, err := w.Write(cbg.CborNull)
3922
-
return err
3923
-
}
3924
-
3925
-
cw := cbg.NewCborWriter(w)
3926
-
fieldCount := 3
3927
-
3928
-
if t.Environment == nil {
3929
-
fieldCount--
3930
-
}
3931
-
3932
-
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
3933
-
return err
3934
-
}
3935
-
3936
-
// t.Name (string) (string)
3937
-
if len("name") > 1000000 {
3938
-
return xerrors.Errorf("Value in field \"name\" was too long")
3939
-
}
3940
-
3941
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("name"))); err != nil {
3942
-
return err
3943
-
}
3944
-
if _, err := cw.WriteString(string("name")); err != nil {
3945
-
return err
3946
-
}
3947
-
3948
-
if len(t.Name) > 1000000 {
3949
-
return xerrors.Errorf("Value in field t.Name was too long")
3950
-
}
3951
-
3952
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil {
3953
-
return err
3954
-
}
3955
-
if _, err := cw.WriteString(string(t.Name)); err != nil {
3956
-
return err
3957
-
}
3958
-
3959
-
// t.Command (string) (string)
3960
-
if len("command") > 1000000 {
3961
-
return xerrors.Errorf("Value in field \"command\" was too long")
3962
-
}
3963
-
3964
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("command"))); err != nil {
3965
-
return err
3966
-
}
3967
-
if _, err := cw.WriteString(string("command")); err != nil {
3968
-
return err
3969
-
}
3970
-
3971
-
if len(t.Command) > 1000000 {
3972
-
return xerrors.Errorf("Value in field t.Command was too long")
3973
-
}
3974
-
3975
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Command))); err != nil {
3976
-
return err
3977
-
}
3978
-
if _, err := cw.WriteString(string(t.Command)); err != nil {
3979
-
return err
3980
-
}
3981
-
3982
-
// t.Environment ([]*tangled.Pipeline_Pair) (slice)
3983
-
if t.Environment != nil {
3984
-
3985
-
if len("environment") > 1000000 {
3986
-
return xerrors.Errorf("Value in field \"environment\" was too long")
3987
-
}
3988
-
3989
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
3990
-
return err
3991
-
}
3992
-
if _, err := cw.WriteString(string("environment")); err != nil {
3993
-
return err
3994
-
}
3995
-
3996
-
if len(t.Environment) > 8192 {
3997
-
return xerrors.Errorf("Slice value in field t.Environment was too long")
3998
-
}
3999
-
4000
-
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
4001
-
return err
4002
-
}
4003
-
for _, v := range t.Environment {
4004
-
if err := v.MarshalCBOR(cw); err != nil {
4005
-
return err
4006
-
}
4007
-
4008
-
}
4009
-
}
4010
-
return nil
4011
-
}
4012
-
4013
-
func (t *Pipeline_Step) UnmarshalCBOR(r io.Reader) (err error) {
4014
-
*t = Pipeline_Step{}
4015
-
4016
-
cr := cbg.NewCborReader(r)
4017
-
4018
-
maj, extra, err := cr.ReadHeader()
4019
-
if err != nil {
4020
-
return err
4021
-
}
4022
-
defer func() {
4023
-
if err == io.EOF {
4024
-
err = io.ErrUnexpectedEOF
4025
-
}
4026
-
}()
4027
-
4028
-
if maj != cbg.MajMap {
4029
-
return fmt.Errorf("cbor input should be of type map")
4030
-
}
4031
-
4032
-
if extra > cbg.MaxLength {
4033
-
return fmt.Errorf("Pipeline_Step: map struct too large (%d)", extra)
4034
-
}
4035
-
4036
-
n := extra
4037
-
4038
-
nameBuf := make([]byte, 11)
4039
-
for i := uint64(0); i < n; i++ {
4040
-
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
4041
-
if err != nil {
4042
-
return err
4043
-
}
4044
-
4045
-
if !ok {
4046
-
// Field doesn't exist on this type, so ignore it
4047
-
if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil {
4048
-
return err
4049
-
}
4050
-
continue
4051
-
}
4052
-
4053
-
switch string(nameBuf[:nameLen]) {
4054
-
// t.Name (string) (string)
4055
-
case "name":
4056
-
4057
-
{
4058
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
4059
-
if err != nil {
4060
-
return err
4061
-
}
4062
-
4063
-
t.Name = string(sval)
4064
-
}
4065
-
// t.Command (string) (string)
4066
-
case "command":
4067
-
4068
-
{
4069
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
4070
-
if err != nil {
4071
-
return err
4072
-
}
4073
-
4074
-
t.Command = string(sval)
4075
-
}
4076
-
// t.Environment ([]*tangled.Pipeline_Pair) (slice)
4077
-
case "environment":
4078
-
4079
-
maj, extra, err = cr.ReadHeader()
4080
-
if err != nil {
4081
-
return err
4082
-
}
4083
-
4084
-
if extra > 8192 {
4085
-
return fmt.Errorf("t.Environment: array too large (%d)", extra)
4086
-
}
4087
-
4088
-
if maj != cbg.MajArray {
4089
-
return fmt.Errorf("expected cbor array")
4090
-
}
4091
-
4092
-
if extra > 0 {
4093
-
t.Environment = make([]*Pipeline_Pair, extra)
4094
-
}
4095
-
4096
-
for i := 0; i < int(extra); i++ {
4097
-
{
4098
-
var maj byte
4099
-
var extra uint64
4100
-
var err error
4101
-
_ = maj
4102
-
_ = extra
4103
-
_ = err
4104
-
4105
-
{
4106
-
4107
-
b, err := cr.ReadByte()
4108
-
if err != nil {
4109
-
return err
4110
-
}
4111
-
if b != cbg.CborNull[0] {
4112
-
if err := cr.UnreadByte(); err != nil {
4113
-
return err
4114
-
}
4115
-
t.Environment[i] = new(Pipeline_Pair)
4116
-
if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
4117
-
return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
4118
-
}
4119
-
}
4120
-
4121
-
}
4122
-
4123
-
}
4124
-
}
4125
-
4126
-
default:
4127
-
// Field doesn't exist on this type, so ignore it
4128
-
if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil {
4129
-
return err
4130
-
}
4131
-
}
4132
-
}
4133
-
4134
-
return nil
4135
-
}
4136
3746
func (t *Pipeline_TriggerMetadata) MarshalCBOR(w io.Writer) error {
4137
3747
if t == nil {
4138
3748
_, err := w.Write(cbg.CborNull)
···
4609
4219
4610
4220
cw := cbg.NewCborWriter(w)
4611
4221
4612
-
if _, err := cw.Write([]byte{165}); err != nil {
4222
+
if _, err := cw.Write([]byte{164}); err != nil {
4223
+
return err
4224
+
}
4225
+
4226
+
// t.Raw (string) (string)
4227
+
if len("raw") > 1000000 {
4228
+
return xerrors.Errorf("Value in field \"raw\" was too long")
4229
+
}
4230
+
4231
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("raw"))); err != nil {
4232
+
return err
4233
+
}
4234
+
if _, err := cw.WriteString(string("raw")); err != nil {
4235
+
return err
4236
+
}
4237
+
4238
+
if len(t.Raw) > 1000000 {
4239
+
return xerrors.Errorf("Value in field t.Raw was too long")
4240
+
}
4241
+
4242
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Raw))); err != nil {
4243
+
return err
4244
+
}
4245
+
if _, err := cw.WriteString(string(t.Raw)); err != nil {
4613
4246
return err
4614
4247
}
4615
4248
···
4652
4285
return err
4653
4286
}
4654
4287
4655
-
// t.Steps ([]*tangled.Pipeline_Step) (slice)
4656
-
if len("steps") > 1000000 {
4657
-
return xerrors.Errorf("Value in field \"steps\" was too long")
4288
+
// t.Engine (string) (string)
4289
+
if len("engine") > 1000000 {
4290
+
return xerrors.Errorf("Value in field \"engine\" was too long")
4658
4291
}
4659
4292
4660
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("steps"))); err != nil {
4293
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("engine"))); err != nil {
4661
4294
return err
4662
4295
}
4663
-
if _, err := cw.WriteString(string("steps")); err != nil {
4296
+
if _, err := cw.WriteString(string("engine")); err != nil {
4664
4297
return err
4665
4298
}
4666
4299
4667
-
if len(t.Steps) > 8192 {
4668
-
return xerrors.Errorf("Slice value in field t.Steps was too long")
4300
+
if len(t.Engine) > 1000000 {
4301
+
return xerrors.Errorf("Value in field t.Engine was too long")
4669
4302
}
4670
4303
4671
-
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Steps))); err != nil {
4304
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Engine))); err != nil {
4672
4305
return err
4673
4306
}
4674
-
for _, v := range t.Steps {
4675
-
if err := v.MarshalCBOR(cw); err != nil {
4676
-
return err
4677
-
}
4678
-
4679
-
}
4680
-
4681
-
// t.Environment ([]*tangled.Pipeline_Pair) (slice)
4682
-
if len("environment") > 1000000 {
4683
-
return xerrors.Errorf("Value in field \"environment\" was too long")
4684
-
}
4685
-
4686
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("environment"))); err != nil {
4307
+
if _, err := cw.WriteString(string(t.Engine)); err != nil {
4687
4308
return err
4688
4309
}
4689
-
if _, err := cw.WriteString(string("environment")); err != nil {
4690
-
return err
4691
-
}
4692
-
4693
-
if len(t.Environment) > 8192 {
4694
-
return xerrors.Errorf("Slice value in field t.Environment was too long")
4695
-
}
4696
-
4697
-
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Environment))); err != nil {
4698
-
return err
4699
-
}
4700
-
for _, v := range t.Environment {
4701
-
if err := v.MarshalCBOR(cw); err != nil {
4702
-
return err
4703
-
}
4704
-
4705
-
}
4706
-
4707
-
// t.Dependencies ([]*tangled.Pipeline_Dependency) (slice)
4708
-
if len("dependencies") > 1000000 {
4709
-
return xerrors.Errorf("Value in field \"dependencies\" was too long")
4710
-
}
4711
-
4712
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("dependencies"))); err != nil {
4713
-
return err
4714
-
}
4715
-
if _, err := cw.WriteString(string("dependencies")); err != nil {
4716
-
return err
4717
-
}
4718
-
4719
-
if len(t.Dependencies) > 8192 {
4720
-
return xerrors.Errorf("Slice value in field t.Dependencies was too long")
4721
-
}
4722
-
4723
-
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Dependencies))); err != nil {
4724
-
return err
4725
-
}
4726
-
for _, v := range t.Dependencies {
4727
-
if err := v.MarshalCBOR(cw); err != nil {
4728
-
return err
4729
-
}
4730
-
4731
-
}
4732
4310
return nil
4733
4311
}
4734
4312
···
4757
4335
4758
4336
n := extra
4759
4337
4760
-
nameBuf := make([]byte, 12)
4338
+
nameBuf := make([]byte, 6)
4761
4339
for i := uint64(0); i < n; i++ {
4762
4340
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
4763
4341
if err != nil {
···
4773
4351
}
4774
4352
4775
4353
switch string(nameBuf[:nameLen]) {
4776
-
// t.Name (string) (string)
4354
+
// t.Raw (string) (string)
4355
+
case "raw":
4356
+
4357
+
{
4358
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
4359
+
if err != nil {
4360
+
return err
4361
+
}
4362
+
4363
+
t.Raw = string(sval)
4364
+
}
4365
+
// t.Name (string) (string)
4777
4366
case "name":
4778
4367
4779
4368
{
···
4804
4393
}
4805
4394
4806
4395
}
4807
-
// t.Steps ([]*tangled.Pipeline_Step) (slice)
4808
-
case "steps":
4809
-
4810
-
maj, extra, err = cr.ReadHeader()
4811
-
if err != nil {
4812
-
return err
4813
-
}
4814
-
4815
-
if extra > 8192 {
4816
-
return fmt.Errorf("t.Steps: array too large (%d)", extra)
4817
-
}
4818
-
4819
-
if maj != cbg.MajArray {
4820
-
return fmt.Errorf("expected cbor array")
4821
-
}
4822
-
4823
-
if extra > 0 {
4824
-
t.Steps = make([]*Pipeline_Step, extra)
4825
-
}
4826
-
4827
-
for i := 0; i < int(extra); i++ {
4828
-
{
4829
-
var maj byte
4830
-
var extra uint64
4831
-
var err error
4832
-
_ = maj
4833
-
_ = extra
4834
-
_ = err
4835
-
4836
-
{
4837
-
4838
-
b, err := cr.ReadByte()
4839
-
if err != nil {
4840
-
return err
4841
-
}
4842
-
if b != cbg.CborNull[0] {
4843
-
if err := cr.UnreadByte(); err != nil {
4844
-
return err
4845
-
}
4846
-
t.Steps[i] = new(Pipeline_Step)
4847
-
if err := t.Steps[i].UnmarshalCBOR(cr); err != nil {
4848
-
return xerrors.Errorf("unmarshaling t.Steps[i] pointer: %w", err)
4849
-
}
4850
-
}
4851
-
4852
-
}
4853
-
4854
-
}
4855
-
}
4856
-
// t.Environment ([]*tangled.Pipeline_Pair) (slice)
4857
-
case "environment":
4858
-
4859
-
maj, extra, err = cr.ReadHeader()
4860
-
if err != nil {
4861
-
return err
4862
-
}
4863
-
4864
-
if extra > 8192 {
4865
-
return fmt.Errorf("t.Environment: array too large (%d)", extra)
4866
-
}
4867
-
4868
-
if maj != cbg.MajArray {
4869
-
return fmt.Errorf("expected cbor array")
4870
-
}
4871
-
4872
-
if extra > 0 {
4873
-
t.Environment = make([]*Pipeline_Pair, extra)
4874
-
}
4875
-
4876
-
for i := 0; i < int(extra); i++ {
4877
-
{
4878
-
var maj byte
4879
-
var extra uint64
4880
-
var err error
4881
-
_ = maj
4882
-
_ = extra
4883
-
_ = err
4884
-
4885
-
{
4886
-
4887
-
b, err := cr.ReadByte()
4888
-
if err != nil {
4889
-
return err
4890
-
}
4891
-
if b != cbg.CborNull[0] {
4892
-
if err := cr.UnreadByte(); err != nil {
4893
-
return err
4894
-
}
4895
-
t.Environment[i] = new(Pipeline_Pair)
4896
-
if err := t.Environment[i].UnmarshalCBOR(cr); err != nil {
4897
-
return xerrors.Errorf("unmarshaling t.Environment[i] pointer: %w", err)
4898
-
}
4899
-
}
4396
+
// t.Engine (string) (string)
4397
+
case "engine":
4900
4398
4901
-
}
4902
-
4399
+
{
4400
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
4401
+
if err != nil {
4402
+
return err
4903
4403
}
4904
-
}
4905
-
// t.Dependencies ([]*tangled.Pipeline_Dependency) (slice)
4906
-
case "dependencies":
4907
4404
4908
-
maj, extra, err = cr.ReadHeader()
4909
-
if err != nil {
4910
-
return err
4911
-
}
4912
-
4913
-
if extra > 8192 {
4914
-
return fmt.Errorf("t.Dependencies: array too large (%d)", extra)
4915
-
}
4916
-
4917
-
if maj != cbg.MajArray {
4918
-
return fmt.Errorf("expected cbor array")
4919
-
}
4920
-
4921
-
if extra > 0 {
4922
-
t.Dependencies = make([]*Pipeline_Dependency, extra)
4923
-
}
4924
-
4925
-
for i := 0; i < int(extra); i++ {
4926
-
{
4927
-
var maj byte
4928
-
var extra uint64
4929
-
var err error
4930
-
_ = maj
4931
-
_ = extra
4932
-
_ = err
4933
-
4934
-
{
4935
-
4936
-
b, err := cr.ReadByte()
4937
-
if err != nil {
4938
-
return err
4939
-
}
4940
-
if b != cbg.CborNull[0] {
4941
-
if err := cr.UnreadByte(); err != nil {
4942
-
return err
4943
-
}
4944
-
t.Dependencies[i] = new(Pipeline_Dependency)
4945
-
if err := t.Dependencies[i].UnmarshalCBOR(cr); err != nil {
4946
-
return xerrors.Errorf("unmarshaling t.Dependencies[i] pointer: %w", err)
4947
-
}
4948
-
}
4949
-
4950
-
}
4951
-
4952
-
}
4405
+
t.Engine = string(sval)
4953
4406
}
4954
4407
4955
4408
default:
···
6059
5512
}
6060
5513
6061
5514
cw := cbg.NewCborWriter(w)
6062
-
fieldCount := 7
5515
+
fieldCount := 6
6063
5516
6064
5517
if t.Body == nil {
6065
5518
fieldCount--
···
6189
5642
return err
6190
5643
}
6191
5644
6192
-
// t.IssueId (int64) (int64)
6193
-
if len("issueId") > 1000000 {
6194
-
return xerrors.Errorf("Value in field \"issueId\" was too long")
6195
-
}
6196
-
6197
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("issueId"))); err != nil {
6198
-
return err
6199
-
}
6200
-
if _, err := cw.WriteString(string("issueId")); err != nil {
6201
-
return err
6202
-
}
6203
-
6204
-
if t.IssueId >= 0 {
6205
-
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.IssueId)); err != nil {
6206
-
return err
6207
-
}
6208
-
} else {
6209
-
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.IssueId-1)); err != nil {
6210
-
return err
6211
-
}
6212
-
}
6213
-
6214
5645
// t.CreatedAt (string) (string)
6215
5646
if len("createdAt") > 1000000 {
6216
5647
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
6342
5773
6343
5774
t.Title = string(sval)
6344
5775
}
6345
-
// t.IssueId (int64) (int64)
6346
-
case "issueId":
6347
-
{
6348
-
maj, extra, err := cr.ReadHeader()
6349
-
if err != nil {
6350
-
return err
6351
-
}
6352
-
var extraI int64
6353
-
switch maj {
6354
-
case cbg.MajUnsignedInt:
6355
-
extraI = int64(extra)
6356
-
if extraI < 0 {
6357
-
return fmt.Errorf("int64 positive overflow")
6358
-
}
6359
-
case cbg.MajNegativeInt:
6360
-
extraI = int64(extra)
6361
-
if extraI < 0 {
6362
-
return fmt.Errorf("int64 negative overflow")
6363
-
}
6364
-
extraI = -1 - extraI
6365
-
default:
6366
-
return fmt.Errorf("wrong type for int64 field: %d", maj)
6367
-
}
6368
-
6369
-
t.IssueId = int64(extraI)
6370
-
}
6371
5776
// t.CreatedAt (string) (string)
6372
5777
case "createdAt":
6373
5778
···
6397
5802
}
6398
5803
6399
5804
cw := cbg.NewCborWriter(w)
6400
-
fieldCount := 7
6401
-
6402
-
if t.CommentId == nil {
6403
-
fieldCount--
6404
-
}
5805
+
fieldCount := 6
6405
5806
6406
5807
if t.Owner == nil {
6407
5808
fieldCount--
···
6544
5945
}
6545
5946
}
6546
5947
6547
-
// t.CommentId (int64) (int64)
6548
-
if t.CommentId != nil {
6549
-
6550
-
if len("commentId") > 1000000 {
6551
-
return xerrors.Errorf("Value in field \"commentId\" was too long")
6552
-
}
6553
-
6554
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("commentId"))); err != nil {
6555
-
return err
6556
-
}
6557
-
if _, err := cw.WriteString(string("commentId")); err != nil {
6558
-
return err
6559
-
}
6560
-
6561
-
if t.CommentId == nil {
6562
-
if _, err := cw.Write(cbg.CborNull); err != nil {
6563
-
return err
6564
-
}
6565
-
} else {
6566
-
if *t.CommentId >= 0 {
6567
-
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(*t.CommentId)); err != nil {
6568
-
return err
6569
-
}
6570
-
} else {
6571
-
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-*t.CommentId-1)); err != nil {
6572
-
return err
6573
-
}
6574
-
}
6575
-
}
6576
-
6577
-
}
6578
-
6579
5948
// t.CreatedAt (string) (string)
6580
5949
if len("createdAt") > 1000000 {
6581
5950
return xerrors.Errorf("Value in field \"createdAt\" was too long")
···
6715
6084
}
6716
6085
6717
6086
t.Owner = (*string)(&sval)
6718
-
}
6719
-
}
6720
-
// t.CommentId (int64) (int64)
6721
-
case "commentId":
6722
-
{
6723
-
6724
-
b, err := cr.ReadByte()
6725
-
if err != nil {
6726
-
return err
6727
-
}
6728
-
if b != cbg.CborNull[0] {
6729
-
if err := cr.UnreadByte(); err != nil {
6730
-
return err
6731
-
}
6732
-
maj, extra, err := cr.ReadHeader()
6733
-
if err != nil {
6734
-
return err
6735
-
}
6736
-
var extraI int64
6737
-
switch maj {
6738
-
case cbg.MajUnsignedInt:
6739
-
extraI = int64(extra)
6740
-
if extraI < 0 {
6741
-
return fmt.Errorf("int64 positive overflow")
6742
-
}
6743
-
case cbg.MajNegativeInt:
6744
-
extraI = int64(extra)
6745
-
if extraI < 0 {
6746
-
return fmt.Errorf("int64 negative overflow")
6747
-
}
6748
-
extraI = -1 - extraI
6749
-
default:
6750
-
return fmt.Errorf("wrong type for int64 field: %d", maj)
6751
-
}
6752
-
6753
-
t.CommentId = (*int64)(&extraI)
6754
6087
}
6755
6088
}
6756
6089
// t.CreatedAt (string) (string)
-1
api/tangled/issuecomment.go
-1
api/tangled/issuecomment.go
···
19
19
type RepoIssueComment struct {
20
20
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue.comment" cborgen:"$type,const=sh.tangled.repo.issue.comment"`
21
21
Body string `json:"body" cborgen:"body"`
22
-
CommentId *int64 `json:"commentId,omitempty" cborgen:"commentId,omitempty"`
23
22
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
24
23
Issue string `json:"issue" cborgen:"issue"`
25
24
Owner *string `json:"owner,omitempty" cborgen:"owner,omitempty"`
-1
api/tangled/repoissue.go
-1
api/tangled/repoissue.go
···
20
20
LexiconTypeID string `json:"$type,const=sh.tangled.repo.issue" cborgen:"$type,const=sh.tangled.repo.issue"`
21
21
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
22
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
-
IssueId int64 `json:"issueId" cborgen:"issueId"`
24
23
Owner string `json:"owner" cborgen:"owner"`
25
24
Repo string `json:"repo" cborgen:"repo"`
26
25
Title string `json:"title" cborgen:"title"`
+4
-18
api/tangled/tangledpipeline.go
+4
-18
api/tangled/tangledpipeline.go
···
29
29
Submodules bool `json:"submodules" cborgen:"submodules"`
30
30
}
31
31
32
-
// Pipeline_Dependency is a "dependency" in the sh.tangled.pipeline schema.
33
-
type Pipeline_Dependency struct {
34
-
Packages []string `json:"packages" cborgen:"packages"`
35
-
Registry string `json:"registry" cborgen:"registry"`
36
-
}
37
-
38
32
// Pipeline_ManualTriggerData is a "manualTriggerData" in the sh.tangled.pipeline schema.
39
33
type Pipeline_ManualTriggerData struct {
40
34
Inputs []*Pipeline_Pair `json:"inputs,omitempty" cborgen:"inputs,omitempty"`
···
61
55
Ref string `json:"ref" cborgen:"ref"`
62
56
}
63
57
64
-
// Pipeline_Step is a "step" in the sh.tangled.pipeline schema.
65
-
type Pipeline_Step struct {
66
-
Command string `json:"command" cborgen:"command"`
67
-
Environment []*Pipeline_Pair `json:"environment,omitempty" cborgen:"environment,omitempty"`
68
-
Name string `json:"name" cborgen:"name"`
69
-
}
70
-
71
58
// Pipeline_TriggerMetadata is a "triggerMetadata" in the sh.tangled.pipeline schema.
72
59
type Pipeline_TriggerMetadata struct {
73
60
Kind string `json:"kind" cborgen:"kind"`
···
87
74
88
75
// Pipeline_Workflow is a "workflow" in the sh.tangled.pipeline schema.
89
76
type Pipeline_Workflow struct {
90
-
Clone *Pipeline_CloneOpts `json:"clone" cborgen:"clone"`
91
-
Dependencies []*Pipeline_Dependency `json:"dependencies" cborgen:"dependencies"`
92
-
Environment []*Pipeline_Pair `json:"environment" cborgen:"environment"`
93
-
Name string `json:"name" cborgen:"name"`
94
-
Steps []*Pipeline_Step `json:"steps" cborgen:"steps"`
77
+
Clone *Pipeline_CloneOpts `json:"clone" cborgen:"clone"`
78
+
Engine string `json:"engine" cborgen:"engine"`
79
+
Name string `json:"name" cborgen:"name"`
80
+
Raw string `json:"raw" cborgen:"raw"`
95
81
}
+1
appview/cache/session/store.go
+1
appview/cache/session/store.go
+46
-23
appview/db/db.go
+46
-23
appview/db/db.go
···
27
27
}
28
28
29
29
func Make(dbPath string) (*DB, error) {
30
-
db, err := sql.Open("sqlite3", dbPath)
30
+
// https://github.com/mattn/go-sqlite3#connection-string
31
+
opts := []string{
32
+
"_foreign_keys=1",
33
+
"_journal_mode=WAL",
34
+
"_synchronous=NORMAL",
35
+
"_auto_vacuum=incremental",
36
+
}
37
+
38
+
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
31
39
if err != nil {
32
40
return nil, err
33
41
}
34
-
_, err = db.Exec(`
35
-
pragma journal_mode = WAL;
36
-
pragma synchronous = normal;
37
-
pragma foreign_keys = on;
38
-
pragma temp_store = memory;
39
-
pragma mmap_size = 30000000000;
40
-
pragma page_size = 32768;
41
-
pragma auto_vacuum = incremental;
42
-
pragma busy_timeout = 5000;
42
+
43
+
ctx := context.Background()
43
44
45
+
conn, err := db.Conn(ctx)
46
+
if err != nil {
47
+
return nil, err
48
+
}
49
+
defer conn.Close()
50
+
51
+
_, err = conn.ExecContext(ctx, `
44
52
create table if not exists registrations (
45
53
id integer primary key autoincrement,
46
54
domain text not null unique,
···
462
470
id integer primary key autoincrement,
463
471
name text unique
464
472
);
473
+
474
+
-- indexes for better star query performance
475
+
create index if not exists idx_stars_created on stars(created);
476
+
create index if not exists idx_stars_repo_at_created on stars(repo_at, created);
465
477
`)
466
478
if err != nil {
467
479
return nil, err
468
480
}
469
481
470
482
// run migrations
471
-
runMigration(db, "add-description-to-repos", func(tx *sql.Tx) error {
483
+
runMigration(conn, "add-description-to-repos", func(tx *sql.Tx) error {
472
484
tx.Exec(`
473
485
alter table repos add column description text check (length(description) <= 200);
474
486
`)
475
487
return nil
476
488
})
477
489
478
-
runMigration(db, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
490
+
runMigration(conn, "add-rkey-to-pubkeys", func(tx *sql.Tx) error {
479
491
// add unconstrained column
480
492
_, err := tx.Exec(`
481
493
alter table public_keys
···
498
510
return nil
499
511
})
500
512
501
-
runMigration(db, "add-rkey-to-comments", func(tx *sql.Tx) error {
513
+
runMigration(conn, "add-rkey-to-comments", func(tx *sql.Tx) error {
502
514
_, err := tx.Exec(`
503
515
alter table comments drop column comment_at;
504
516
alter table comments add column rkey text;
···
506
518
return err
507
519
})
508
520
509
-
runMigration(db, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
521
+
runMigration(conn, "add-deleted-and-edited-to-issue-comments", func(tx *sql.Tx) error {
510
522
_, err := tx.Exec(`
511
523
alter table comments add column deleted text; -- timestamp
512
524
alter table comments add column edited text; -- timestamp
···
514
526
return err
515
527
})
516
528
517
-
runMigration(db, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
529
+
runMigration(conn, "add-source-info-to-pulls-and-submissions", func(tx *sql.Tx) error {
518
530
_, err := tx.Exec(`
519
531
alter table pulls add column source_branch text;
520
532
alter table pulls add column source_repo_at text;
···
523
535
return err
524
536
})
525
537
526
-
runMigration(db, "add-source-to-repos", func(tx *sql.Tx) error {
538
+
runMigration(conn, "add-source-to-repos", func(tx *sql.Tx) error {
527
539
_, err := tx.Exec(`
528
540
alter table repos add column source text;
529
541
`)
···
534
546
// NOTE: this cannot be done in a transaction, so it is run outside [0]
535
547
//
536
548
// [0]: https://sqlite.org/pragma.html#pragma_foreign_keys
537
-
db.Exec("pragma foreign_keys = off;")
538
-
runMigration(db, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
549
+
conn.ExecContext(ctx, "pragma foreign_keys = off;")
550
+
runMigration(conn, "recreate-pulls-column-for-stacking-support", func(tx *sql.Tx) error {
539
551
_, err := tx.Exec(`
540
552
create table pulls_new (
541
553
-- identifiers
···
590
602
`)
591
603
return err
592
604
})
593
-
db.Exec("pragma foreign_keys = on;")
605
+
conn.ExecContext(ctx, "pragma foreign_keys = on;")
594
606
595
607
// run migrations
596
-
runMigration(db, "add-spindle-to-repos", func(tx *sql.Tx) error {
608
+
runMigration(conn, "add-spindle-to-repos", func(tx *sql.Tx) error {
597
609
tx.Exec(`
598
610
alter table repos add column spindle text;
599
611
`)
···
601
613
})
602
614
603
615
// recreate and add rkey + created columns with default constraint
604
-
runMigration(db, "rework-collaborators-table", func(tx *sql.Tx) error {
616
+
runMigration(conn, "rework-collaborators-table", func(tx *sql.Tx) error {
605
617
// create new table
606
618
// - repo_at instead of repo integer
607
619
// - rkey field
···
655
667
return err
656
668
})
657
669
670
+
runMigration(conn, "add-rkey-to-issues", func(tx *sql.Tx) error {
671
+
_, err := tx.Exec(`
672
+
alter table issues add column rkey text not null default '';
673
+
674
+
-- get last url section from issue_at and save to rkey column
675
+
update issues
676
+
set rkey = replace(issue_at, rtrim(issue_at, replace(issue_at, '/', '')), '');
677
+
`)
678
+
return err
679
+
})
680
+
658
681
return &DB{db}, nil
659
682
}
660
683
661
684
type migrationFn = func(*sql.Tx) error
662
685
663
-
func runMigration(d *sql.DB, name string, migrationFn migrationFn) error {
664
-
tx, err := d.Begin()
686
+
func runMigration(c *sql.Conn, name string, migrationFn migrationFn) error {
687
+
tx, err := c.BeginTx(context.Background(), nil)
665
688
if err != nil {
666
689
return err
667
690
}
+1
-1
appview/db/follow.go
+1
-1
appview/db/follow.go
+208
-17
appview/db/issues.go
+208
-17
appview/db/issues.go
···
2
2
3
3
import (
4
4
"database/sql"
5
+
"fmt"
6
+
mathrand "math/rand/v2"
7
+
"strings"
5
8
"time"
6
9
7
10
"github.com/bluesky-social/indigo/atproto/syntax"
11
+
"tangled.sh/tangled.sh/core/api/tangled"
8
12
"tangled.sh/tangled.sh/core/appview/pagination"
9
13
)
10
14
···
13
17
RepoAt syntax.ATURI
14
18
OwnerDid string
15
19
IssueId int
16
-
IssueAt string
20
+
Rkey string
17
21
Created time.Time
18
22
Title string
19
23
Body string
···
42
46
Edited *time.Time
43
47
}
44
48
49
+
func (i *Issue) AtUri() syntax.ATURI {
50
+
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", i.OwnerDid, tangled.RepoIssueNSID, i.Rkey))
51
+
}
52
+
53
+
func IssueFromRecord(did, rkey string, record tangled.RepoIssue) Issue {
54
+
created, err := time.Parse(time.RFC3339, record.CreatedAt)
55
+
if err != nil {
56
+
created = time.Now()
57
+
}
58
+
59
+
body := ""
60
+
if record.Body != nil {
61
+
body = *record.Body
62
+
}
63
+
64
+
return Issue{
65
+
RepoAt: syntax.ATURI(record.Repo),
66
+
OwnerDid: record.Owner,
67
+
Rkey: rkey,
68
+
Created: created,
69
+
Title: record.Title,
70
+
Body: body,
71
+
Open: true, // new issues are open by default
72
+
}
73
+
}
74
+
75
+
func ResolveIssueFromAtUri(e Execer, issueUri syntax.ATURI) (syntax.ATURI, int, error) {
76
+
ownerDid := issueUri.Authority().String()
77
+
issueRkey := issueUri.RecordKey().String()
78
+
79
+
var repoAt string
80
+
var issueId int
81
+
82
+
query := `select repo_at, issue_id from issues where owner_did = ? and rkey = ?`
83
+
err := e.QueryRow(query, ownerDid, issueRkey).Scan(&repoAt, &issueId)
84
+
if err != nil {
85
+
return "", 0, err
86
+
}
87
+
88
+
return syntax.ATURI(repoAt), issueId, nil
89
+
}
90
+
91
+
func IssueCommentFromRecord(e Execer, did, rkey string, record tangled.RepoIssueComment) (Comment, error) {
92
+
created, err := time.Parse(time.RFC3339, record.CreatedAt)
93
+
if err != nil {
94
+
created = time.Now()
95
+
}
96
+
97
+
ownerDid := did
98
+
if record.Owner != nil {
99
+
ownerDid = *record.Owner
100
+
}
101
+
102
+
issueUri, err := syntax.ParseATURI(record.Issue)
103
+
if err != nil {
104
+
return Comment{}, err
105
+
}
106
+
107
+
repoAt, issueId, err := ResolveIssueFromAtUri(e, issueUri)
108
+
if err != nil {
109
+
return Comment{}, err
110
+
}
111
+
112
+
comment := Comment{
113
+
OwnerDid: ownerDid,
114
+
RepoAt: repoAt,
115
+
Rkey: rkey,
116
+
Body: record.Body,
117
+
Issue: issueId,
118
+
CommentId: mathrand.IntN(1000000),
119
+
Created: &created,
120
+
}
121
+
122
+
return comment, nil
123
+
}
124
+
45
125
func NewIssue(tx *sql.Tx, issue *Issue) error {
46
126
defer tx.Rollback()
47
127
···
67
147
issue.IssueId = nextId
68
148
69
149
res, err := tx.Exec(`
70
-
insert into issues (repo_at, owner_did, issue_id, title, body)
71
-
values (?, ?, ?, ?, ?)
72
-
`, issue.RepoAt, issue.OwnerDid, issue.IssueId, issue.Title, issue.Body)
150
+
insert into issues (repo_at, owner_did, rkey, issue_at, issue_id, title, body)
151
+
values (?, ?, ?, ?, ?, ?, ?)
152
+
`, issue.RepoAt, issue.OwnerDid, issue.Rkey, issue.AtUri(), issue.IssueId, issue.Title, issue.Body)
73
153
if err != nil {
74
154
return err
75
155
}
···
87
167
return nil
88
168
}
89
169
90
-
func SetIssueAt(e Execer, repoAt syntax.ATURI, issueId int, issueAt string) error {
91
-
_, err := e.Exec(`update issues set issue_at = ? where repo_at = ? and issue_id = ?`, issueAt, repoAt, issueId)
92
-
return err
93
-
}
94
-
95
170
func GetIssueAt(e Execer, repoAt syntax.ATURI, issueId int) (string, error) {
96
171
var issueAt string
97
172
err := e.QueryRow(`select issue_at from issues where repo_at = ? and issue_id = ?`, repoAt, issueId).Scan(&issueAt)
···
104
179
return ownerDid, err
105
180
}
106
181
107
-
func GetIssues(e Execer, repoAt syntax.ATURI, isOpen bool, page pagination.Page) ([]Issue, error) {
182
+
func GetIssuesPaginated(e Execer, repoAt syntax.ATURI, isOpen bool, page pagination.Page) ([]Issue, error) {
108
183
var issues []Issue
109
184
openValue := 0
110
185
if isOpen {
···
117
192
select
118
193
i.id,
119
194
i.owner_did,
195
+
i.rkey,
120
196
i.issue_id,
121
197
i.created,
122
198
i.title,
···
136
212
select
137
213
id,
138
214
owner_did,
215
+
rkey,
139
216
issue_id,
140
217
created,
141
218
title,
142
219
body,
143
220
open,
144
221
comment_count
145
-
from
222
+
from
146
223
numbered_issue
147
-
where
224
+
where
148
225
row_num between ? and ?`,
149
226
repoAt, openValue, page.Offset+1, page.Offset+page.Limit)
150
227
if err != nil {
···
156
233
var issue Issue
157
234
var createdAt string
158
235
var metadata IssueMetadata
159
-
err := rows.Scan(&issue.ID, &issue.OwnerDid, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &metadata.CommentCount)
236
+
err := rows.Scan(&issue.ID, &issue.OwnerDid, &issue.Rkey, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &metadata.CommentCount)
160
237
if err != nil {
161
238
return nil, err
162
239
}
···
178
255
return issues, nil
179
256
}
180
257
258
+
func GetIssuesWithLimit(e Execer, limit int, filters ...filter) ([]Issue, error) {
259
+
issues := make([]Issue, 0, limit)
260
+
261
+
var conditions []string
262
+
var args []any
263
+
for _, filter := range filters {
264
+
conditions = append(conditions, filter.Condition())
265
+
args = append(args, filter.Arg()...)
266
+
}
267
+
268
+
whereClause := ""
269
+
if conditions != nil {
270
+
whereClause = " where " + strings.Join(conditions, " and ")
271
+
}
272
+
limitClause := ""
273
+
if limit != 0 {
274
+
limitClause = fmt.Sprintf(" limit %d ", limit)
275
+
}
276
+
277
+
query := fmt.Sprintf(
278
+
`select
279
+
i.id,
280
+
i.owner_did,
281
+
i.repo_at,
282
+
i.issue_id,
283
+
i.created,
284
+
i.title,
285
+
i.body,
286
+
i.open
287
+
from
288
+
issues i
289
+
%s
290
+
order by
291
+
i.created desc
292
+
%s`,
293
+
whereClause, limitClause)
294
+
295
+
rows, err := e.Query(query, args...)
296
+
if err != nil {
297
+
return nil, err
298
+
}
299
+
defer rows.Close()
300
+
301
+
for rows.Next() {
302
+
var issue Issue
303
+
var issueCreatedAt string
304
+
err := rows.Scan(
305
+
&issue.ID,
306
+
&issue.OwnerDid,
307
+
&issue.RepoAt,
308
+
&issue.IssueId,
309
+
&issueCreatedAt,
310
+
&issue.Title,
311
+
&issue.Body,
312
+
&issue.Open,
313
+
)
314
+
if err != nil {
315
+
return nil, err
316
+
}
317
+
318
+
issueCreatedTime, err := time.Parse(time.RFC3339, issueCreatedAt)
319
+
if err != nil {
320
+
return nil, err
321
+
}
322
+
issue.Created = issueCreatedTime
323
+
324
+
issues = append(issues, issue)
325
+
}
326
+
327
+
if err := rows.Err(); err != nil {
328
+
return nil, err
329
+
}
330
+
331
+
return issues, nil
332
+
}
333
+
334
+
func GetIssues(e Execer, filters ...filter) ([]Issue, error) {
335
+
return GetIssuesWithLimit(e, 0, filters...)
336
+
}
337
+
181
338
// timeframe here is directly passed into the sql query filter, and any
182
339
// timeframe in the past should be negative; e.g.: "-3 months"
183
340
func GetIssuesByOwnerDid(e Execer, ownerDid string, timeframe string) ([]Issue, error) {
···
187
344
`select
188
345
i.id,
189
346
i.owner_did,
347
+
i.rkey,
190
348
i.repo_at,
191
349
i.issue_id,
192
350
i.created,
···
219
377
err := rows.Scan(
220
378
&issue.ID,
221
379
&issue.OwnerDid,
380
+
&issue.Rkey,
222
381
&issue.RepoAt,
223
382
&issue.IssueId,
224
383
&issueCreatedAt,
···
262
421
}
263
422
264
423
func GetIssue(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, error) {
265
-
query := `select id, owner_did, created, title, body, open from issues where repo_at = ? and issue_id = ?`
424
+
query := `select id, owner_did, rkey, created, title, body, open from issues where repo_at = ? and issue_id = ?`
266
425
row := e.QueryRow(query, repoAt, issueId)
267
426
268
427
var issue Issue
269
428
var createdAt string
270
-
err := row.Scan(&issue.ID, &issue.OwnerDid, &createdAt, &issue.Title, &issue.Body, &issue.Open)
429
+
err := row.Scan(&issue.ID, &issue.OwnerDid, &issue.Rkey, &createdAt, &issue.Title, &issue.Body, &issue.Open)
271
430
if err != nil {
272
431
return nil, err
273
432
}
···
282
441
}
283
442
284
443
func GetIssueWithComments(e Execer, repoAt syntax.ATURI, issueId int) (*Issue, []Comment, error) {
285
-
query := `select id, owner_did, issue_id, created, title, body, open, issue_at from issues where repo_at = ? and issue_id = ?`
444
+
query := `select id, owner_did, rkey, issue_id, created, title, body, open from issues where repo_at = ? and issue_id = ?`
286
445
row := e.QueryRow(query, repoAt, issueId)
287
446
288
447
var issue Issue
289
448
var createdAt string
290
-
err := row.Scan(&issue.ID, &issue.OwnerDid, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open, &issue.IssueAt)
449
+
err := row.Scan(&issue.ID, &issue.OwnerDid, &issue.Rkey, &issue.IssueId, &createdAt, &issue.Title, &issue.Body, &issue.Open)
291
450
if err != nil {
292
451
return nil, nil, err
293
452
}
···
464
623
deleted = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
465
624
where repo_at = ? and issue_id = ? and comment_id = ?
466
625
`, repoAt, issueId, commentId)
626
+
return err
627
+
}
628
+
629
+
func UpdateCommentByRkey(e Execer, ownerDid, rkey, newBody string) error {
630
+
_, err := e.Exec(
631
+
`
632
+
update comments
633
+
set body = ?,
634
+
edited = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
635
+
where owner_did = ? and rkey = ?
636
+
`, newBody, ownerDid, rkey)
637
+
return err
638
+
}
639
+
640
+
func DeleteCommentByRkey(e Execer, ownerDid, rkey string) error {
641
+
_, err := e.Exec(
642
+
`
643
+
update comments
644
+
set body = "",
645
+
deleted = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
646
+
where owner_did = ? and rkey = ?
647
+
`, ownerDid, rkey)
648
+
return err
649
+
}
650
+
651
+
func UpdateIssueByRkey(e Execer, ownerDid, rkey, title, body string) error {
652
+
_, err := e.Exec(`update issues set title = ?, body = ? where owner_did = ? and rkey = ?`, title, body, ownerDid, rkey)
653
+
return err
654
+
}
655
+
656
+
func DeleteIssueByRkey(e Execer, ownerDid, rkey string) error {
657
+
_, err := e.Exec(`delete from issues where owner_did = ? and rkey = ?`, ownerDid, rkey)
467
658
return err
468
659
}
469
660
+22
-3
appview/db/pulls.go
+22
-3
appview/db/pulls.go
···
310
310
return pullId - 1, err
311
311
}
312
312
313
-
func GetPulls(e Execer, filters ...filter) ([]*Pull, error) {
313
+
func GetPullsWithLimit(e Execer, limit int, filters ...filter) ([]*Pull, error) {
314
314
pulls := make(map[int]*Pull)
315
315
316
316
var conditions []string
···
323
323
whereClause := ""
324
324
if conditions != nil {
325
325
whereClause = " where " + strings.Join(conditions, " and ")
326
+
}
327
+
limitClause := ""
328
+
if limit != 0 {
329
+
limitClause = fmt.Sprintf(" limit %d ", limit)
326
330
}
327
331
328
332
query := fmt.Sprintf(`
···
344
348
from
345
349
pulls
346
350
%s
347
-
`, whereClause)
351
+
order by
352
+
created desc
353
+
%s
354
+
`, whereClause, limitClause)
348
355
349
356
rows, err := e.Query(query, args...)
350
357
if err != nil {
···
412
419
inClause := strings.TrimSuffix(strings.Repeat("?, ", len(pulls)), ", ")
413
420
submissionsQuery := fmt.Sprintf(`
414
421
select
415
-
id, pull_id, round_number, patch, source_rev
422
+
id, pull_id, round_number, patch, created, source_rev
416
423
from
417
424
pull_submissions
418
425
where
···
438
445
for submissionsRows.Next() {
439
446
var s PullSubmission
440
447
var sourceRev sql.NullString
448
+
var createdAt string
441
449
err := submissionsRows.Scan(
442
450
&s.ID,
443
451
&s.PullId,
444
452
&s.RoundNumber,
445
453
&s.Patch,
454
+
&createdAt,
446
455
&sourceRev,
447
456
)
448
457
if err != nil {
449
458
return nil, err
450
459
}
460
+
461
+
createdTime, err := time.Parse(time.RFC3339, createdAt)
462
+
if err != nil {
463
+
return nil, err
464
+
}
465
+
s.Created = createdTime
451
466
452
467
if sourceRev.Valid {
453
468
s.SourceRev = sourceRev.String
···
511
526
})
512
527
513
528
return orderedByPullId, nil
529
+
}
530
+
531
+
func GetPulls(e Execer, filters ...filter) ([]*Pull, error) {
532
+
return GetPullsWithLimit(e, 0, filters...)
514
533
}
515
534
516
535
func GetPull(e Execer, repoAt syntax.ATURI, pullId int) (*Pull, error) {
+7
-7
appview/db/reaction.go
+7
-7
appview/db/reaction.go
···
11
11
12
12
const (
13
13
Like ReactionKind = "๐"
14
-
Unlike = "๐"
15
-
Laugh = "๐"
16
-
Celebration = "๐"
17
-
Confused = "๐ซค"
18
-
Heart = "โค๏ธ"
19
-
Rocket = "๐"
20
-
Eyes = "๐"
14
+
Unlike ReactionKind = "๐"
15
+
Laugh ReactionKind = "๐"
16
+
Celebration ReactionKind = "๐"
17
+
Confused ReactionKind = "๐ซค"
18
+
Heart ReactionKind = "โค๏ธ"
19
+
Rocket ReactionKind = "๐"
20
+
Eyes ReactionKind = "๐"
21
21
)
22
22
23
23
func (rk ReactionKind) String() string {
+9
-10
appview/db/repos.go
+9
-10
appview/db/repos.go
···
19
19
Knot string
20
20
Rkey string
21
21
Created time.Time
22
-
AtUri string
23
22
Description string
24
23
Spindle string
25
24
···
391
390
var description, spindle sql.NullString
392
391
393
392
row := e.QueryRow(`
394
-
select did, name, knot, created, at_uri, description, spindle
393
+
select did, name, knot, created, description, spindle, rkey
395
394
from repos
396
395
where did = ? and name = ?
397
396
`,
···
400
399
)
401
400
402
401
var createdAt string
403
-
if err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &createdAt, &repo.AtUri, &description, &spindle); err != nil {
402
+
if err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &createdAt, &description, &spindle, &repo.Rkey); err != nil {
404
403
return nil, err
405
404
}
406
405
createdAtTime, _ := time.Parse(time.RFC3339, createdAt)
···
421
420
var repo Repo
422
421
var nullableDescription sql.NullString
423
422
424
-
row := e.QueryRow(`select did, name, knot, created, at_uri, description from repos where at_uri = ?`, atUri)
423
+
row := e.QueryRow(`select did, name, knot, created, rkey, description from repos where at_uri = ?`, atUri)
425
424
426
425
var createdAt string
427
-
if err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &createdAt, &repo.AtUri, &nullableDescription); err != nil {
426
+
if err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &createdAt, &repo.Rkey, &nullableDescription); err != nil {
428
427
return nil, err
429
428
}
430
429
createdAtTime, _ := time.Parse(time.RFC3339, createdAt)
···
444
443
`insert into repos
445
444
(did, name, knot, rkey, at_uri, description, source)
446
445
values (?, ?, ?, ?, ?, ?, ?)`,
447
-
repo.Did, repo.Name, repo.Knot, repo.Rkey, repo.AtUri, repo.Description, repo.Source,
446
+
repo.Did, repo.Name, repo.Knot, repo.Rkey, repo.RepoAt().String(), repo.Description, repo.Source,
448
447
)
449
448
return err
450
449
}
···
467
466
var repos []Repo
468
467
469
468
rows, err := e.Query(
470
-
`select did, name, knot, rkey, description, created, at_uri, source
469
+
`select did, name, knot, rkey, description, created, source
471
470
from repos
472
471
where did = ? and source is not null and source != ''
473
472
order by created desc`,
···
484
483
var nullableDescription sql.NullString
485
484
var nullableSource sql.NullString
486
485
487
-
err := rows.Scan(&repo.Did, &repo.Name, &repo.Knot, &repo.Rkey, &nullableDescription, &createdAt, &repo.AtUri, &nullableSource)
486
+
err := rows.Scan(&repo.Did, &repo.Name, &repo.Knot, &repo.Rkey, &nullableDescription, &createdAt, &nullableSource)
488
487
if err != nil {
489
488
return nil, err
490
489
}
···
521
520
var nullableSource sql.NullString
522
521
523
522
row := e.QueryRow(
524
-
`select did, name, knot, rkey, description, created, at_uri, source
523
+
`select did, name, knot, rkey, description, created, source
525
524
from repos
526
525
where did = ? and name = ? and source is not null and source != ''`,
527
526
did, name,
528
527
)
529
528
530
-
err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &repo.Rkey, &nullableDescription, &createdAt, &repo.AtUri, &nullableSource)
529
+
err := row.Scan(&repo.Did, &repo.Name, &repo.Knot, &repo.Rkey, &nullableDescription, &createdAt, &nullableSource)
531
530
if err != nil {
532
531
return nil, err
533
532
}
+73
-6
appview/db/star.go
+73
-6
appview/db/star.go
···
47
47
// Get a star record
48
48
func GetStar(e Execer, starredByDid string, repoAt syntax.ATURI) (*Star, error) {
49
49
query := `
50
-
select starred_by_did, repo_at, created, rkey
50
+
select starred_by_did, repo_at, created, rkey
51
51
from stars
52
52
where starred_by_did = ? and repo_at = ?`
53
53
row := e.QueryRow(query, starredByDid, repoAt)
···
119
119
}
120
120
121
121
repoQuery := fmt.Sprintf(
122
-
`select starred_by_did, repo_at, created, rkey
122
+
`select starred_by_did, repo_at, created, rkey
123
123
from stars
124
124
%s
125
125
order by created desc
···
187
187
var stars []Star
188
188
189
189
rows, err := e.Query(`
190
-
select
190
+
select
191
191
s.starred_by_did,
192
192
s.repo_at,
193
193
s.rkey,
···
196
196
r.name,
197
197
r.knot,
198
198
r.rkey,
199
-
r.created,
200
-
r.at_uri
199
+
r.created
201
200
from stars s
202
201
join repos r on s.repo_at = r.at_uri
203
202
`)
···
222
221
&repo.Knot,
223
222
&repo.Rkey,
224
223
&repoCreatedAt,
225
-
&repo.AtUri,
226
224
); err != nil {
227
225
return nil, err
228
226
}
···
246
244
247
245
return stars, nil
248
246
}
247
+
248
+
// GetTopStarredReposLastWeek returns the top 8 most starred repositories from the last week
249
+
func GetTopStarredReposLastWeek(e Execer) ([]Repo, error) {
250
+
// first, get the top repo URIs by star count from the last week
251
+
query := `
252
+
with recent_starred_repos as (
253
+
select distinct repo_at
254
+
from stars
255
+
where created >= datetime('now', '-7 days')
256
+
),
257
+
repo_star_counts as (
258
+
select
259
+
s.repo_at,
260
+
count(*) as star_count
261
+
from stars s
262
+
join recent_starred_repos rsr on s.repo_at = rsr.repo_at
263
+
group by s.repo_at
264
+
)
265
+
select rsc.repo_at
266
+
from repo_star_counts rsc
267
+
order by rsc.star_count desc
268
+
limit 8
269
+
`
270
+
271
+
rows, err := e.Query(query)
272
+
if err != nil {
273
+
return nil, err
274
+
}
275
+
defer rows.Close()
276
+
277
+
var repoUris []string
278
+
for rows.Next() {
279
+
var repoUri string
280
+
err := rows.Scan(&repoUri)
281
+
if err != nil {
282
+
return nil, err
283
+
}
284
+
repoUris = append(repoUris, repoUri)
285
+
}
286
+
287
+
if err := rows.Err(); err != nil {
288
+
return nil, err
289
+
}
290
+
291
+
if len(repoUris) == 0 {
292
+
return []Repo{}, nil
293
+
}
294
+
295
+
// get full repo data
296
+
repos, err := GetRepos(e, 0, FilterIn("at_uri", repoUris))
297
+
if err != nil {
298
+
return nil, err
299
+
}
300
+
301
+
// sort repos by the original trending order
302
+
repoMap := make(map[string]Repo)
303
+
for _, repo := range repos {
304
+
repoMap[repo.RepoAt().String()] = repo
305
+
}
306
+
307
+
orderedRepos := make([]Repo, 0, len(repoUris))
308
+
for _, uri := range repoUris {
309
+
if repo, exists := repoMap[uri]; exists {
310
+
orderedRepos = append(orderedRepos, repo)
311
+
}
312
+
}
313
+
314
+
return orderedRepos, nil
315
+
}
+12
-11
appview/db/strings.go
+12
-11
appview/db/strings.go
···
50
50
func (s String) Validate() error {
51
51
var err error
52
52
53
-
if !strings.Contains(s.Filename, ".") {
54
-
err = errors.Join(err, fmt.Errorf("missing filename extension"))
55
-
}
56
-
57
-
if strings.HasSuffix(s.Filename, ".") {
58
-
err = errors.Join(err, fmt.Errorf("filename ends with `.`"))
59
-
}
60
-
61
53
if utf8.RuneCountInString(s.Filename) > 140 {
62
54
err = errors.Join(err, fmt.Errorf("filename too long"))
63
55
}
···
113
105
filename = excluded.filename,
114
106
description = excluded.description,
115
107
content = excluded.content,
116
-
edited = case
108
+
edited = case
117
109
when
118
110
strings.content != excluded.content
119
111
or strings.filename != excluded.filename
···
131
123
return err
132
124
}
133
125
134
-
func GetStrings(e Execer, filters ...filter) ([]String, error) {
126
+
func GetStrings(e Execer, limit int, filters ...filter) ([]String, error) {
135
127
var all []String
136
128
137
129
var conditions []string
···
146
138
whereClause = " where " + strings.Join(conditions, " and ")
147
139
}
148
140
141
+
limitClause := ""
142
+
if limit != 0 {
143
+
limitClause = fmt.Sprintf(" limit %d ", limit)
144
+
}
145
+
149
146
query := fmt.Sprintf(`select
150
147
did,
151
148
rkey,
···
154
151
content,
155
152
created,
156
153
edited
157
-
from strings %s`,
154
+
from strings
155
+
%s
156
+
order by created desc
157
+
%s`,
158
158
whereClause,
159
+
limitClause,
159
160
)
160
161
161
162
rows, err := e.Query(query, args...)
+1
-1
appview/db/timeline.go
+1
-1
appview/db/timeline.go
+181
-8
appview/ingester.go
+181
-8
appview/ingester.go
···
5
5
"encoding/json"
6
6
"fmt"
7
7
"log/slog"
8
+
"strings"
8
9
"time"
9
10
10
11
"github.com/bluesky-social/indigo/atproto/syntax"
···
14
15
"tangled.sh/tangled.sh/core/api/tangled"
15
16
"tangled.sh/tangled.sh/core/appview/config"
16
17
"tangled.sh/tangled.sh/core/appview/db"
18
+
"tangled.sh/tangled.sh/core/appview/pages/markup"
17
19
"tangled.sh/tangled.sh/core/appview/spindleverify"
18
20
"tangled.sh/tangled.sh/core/idresolver"
19
21
"tangled.sh/tangled.sh/core/rbac"
···
61
63
case tangled.ActorProfileNSID:
62
64
err = i.ingestProfile(e)
63
65
case tangled.SpindleMemberNSID:
64
-
err = i.ingestSpindleMember(e)
66
+
err = i.ingestSpindleMember(ctx, e)
65
67
case tangled.SpindleNSID:
66
-
err = i.ingestSpindle(e)
68
+
err = i.ingestSpindle(ctx, e)
67
69
case tangled.StringNSID:
68
70
err = i.ingestString(e)
71
+
case tangled.RepoIssueNSID:
72
+
err = i.ingestIssue(ctx, e)
73
+
case tangled.RepoIssueCommentNSID:
74
+
err = i.ingestIssueComment(e)
69
75
}
70
76
l = i.Logger.With("nsid", e.Commit.Collection)
71
77
}
72
78
73
79
if err != nil {
74
-
l.Error("error ingesting record", "err", err)
80
+
l.Debug("error ingesting record", "err", err)
75
81
}
76
82
77
-
return err
83
+
return nil
78
84
}
79
85
}
80
86
···
336
342
return nil
337
343
}
338
344
339
-
func (i *Ingester) ingestSpindleMember(e *models.Event) error {
345
+
func (i *Ingester) ingestSpindleMember(ctx context.Context, e *models.Event) error {
340
346
did := e.Did
341
347
var err error
342
348
···
359
365
return fmt.Errorf("failed to enforce permissions: %w", err)
360
366
}
361
367
362
-
memberId, err := i.IdResolver.ResolveIdent(context.Background(), record.Subject)
368
+
memberId, err := i.IdResolver.ResolveIdent(ctx, record.Subject)
363
369
if err != nil {
364
370
return err
365
371
}
···
442
448
return nil
443
449
}
444
450
445
-
func (i *Ingester) ingestSpindle(e *models.Event) error {
451
+
func (i *Ingester) ingestSpindle(ctx context.Context, e *models.Event) error {
446
452
did := e.Did
447
453
var err error
448
454
···
475
481
return err
476
482
}
477
483
478
-
err = spindleverify.RunVerification(context.Background(), instance, did, i.Config.Core.Dev)
484
+
err = spindleverify.RunVerification(ctx, instance, did, i.Config.Core.Dev)
479
485
if err != nil {
480
486
l.Error("failed to add spindle to db", "err", err, "instance", instance)
481
487
return err
···
609
615
610
616
return nil
611
617
}
618
+
619
+
func (i *Ingester) ingestIssue(ctx context.Context, e *models.Event) error {
620
+
did := e.Did
621
+
rkey := e.Commit.RKey
622
+
623
+
var err error
624
+
625
+
l := i.Logger.With("handler", "ingestIssue", "nsid", e.Commit.Collection, "did", did, "rkey", rkey)
626
+
l.Info("ingesting record")
627
+
628
+
ddb, ok := i.Db.Execer.(*db.DB)
629
+
if !ok {
630
+
return fmt.Errorf("failed to index issue record, invalid db cast")
631
+
}
632
+
633
+
switch e.Commit.Operation {
634
+
case models.CommitOperationCreate:
635
+
raw := json.RawMessage(e.Commit.Record)
636
+
record := tangled.RepoIssue{}
637
+
err = json.Unmarshal(raw, &record)
638
+
if err != nil {
639
+
l.Error("invalid record", "err", err)
640
+
return err
641
+
}
642
+
643
+
issue := db.IssueFromRecord(did, rkey, record)
644
+
645
+
sanitizer := markup.NewSanitizer()
646
+
if st := strings.TrimSpace(sanitizer.SanitizeDescription(issue.Title)); st == "" {
647
+
return fmt.Errorf("title is empty after HTML sanitization")
648
+
}
649
+
if sb := strings.TrimSpace(sanitizer.SanitizeDefault(issue.Body)); sb == "" {
650
+
return fmt.Errorf("body is empty after HTML sanitization")
651
+
}
652
+
653
+
tx, err := ddb.BeginTx(ctx, nil)
654
+
if err != nil {
655
+
l.Error("failed to begin transaction", "err", err)
656
+
return err
657
+
}
658
+
659
+
err = db.NewIssue(tx, &issue)
660
+
if err != nil {
661
+
l.Error("failed to create issue", "err", err)
662
+
return err
663
+
}
664
+
665
+
return nil
666
+
667
+
case models.CommitOperationUpdate:
668
+
raw := json.RawMessage(e.Commit.Record)
669
+
record := tangled.RepoIssue{}
670
+
err = json.Unmarshal(raw, &record)
671
+
if err != nil {
672
+
l.Error("invalid record", "err", err)
673
+
return err
674
+
}
675
+
676
+
body := ""
677
+
if record.Body != nil {
678
+
body = *record.Body
679
+
}
680
+
681
+
sanitizer := markup.NewSanitizer()
682
+
if st := strings.TrimSpace(sanitizer.SanitizeDescription(record.Title)); st == "" {
683
+
return fmt.Errorf("title is empty after HTML sanitization")
684
+
}
685
+
if sb := strings.TrimSpace(sanitizer.SanitizeDefault(body)); sb == "" {
686
+
return fmt.Errorf("body is empty after HTML sanitization")
687
+
}
688
+
689
+
err = db.UpdateIssueByRkey(ddb, did, rkey, record.Title, body)
690
+
if err != nil {
691
+
l.Error("failed to update issue", "err", err)
692
+
return err
693
+
}
694
+
695
+
return nil
696
+
697
+
case models.CommitOperationDelete:
698
+
if err := db.DeleteIssueByRkey(ddb, did, rkey); err != nil {
699
+
l.Error("failed to delete", "err", err)
700
+
return fmt.Errorf("failed to delete issue record: %w", err)
701
+
}
702
+
703
+
return nil
704
+
}
705
+
706
+
return fmt.Errorf("unknown operation: %s", e.Commit.Operation)
707
+
}
708
+
709
+
func (i *Ingester) ingestIssueComment(e *models.Event) error {
710
+
did := e.Did
711
+
rkey := e.Commit.RKey
712
+
713
+
var err error
714
+
715
+
l := i.Logger.With("handler", "ingestIssueComment", "nsid", e.Commit.Collection, "did", did, "rkey", rkey)
716
+
l.Info("ingesting record")
717
+
718
+
ddb, ok := i.Db.Execer.(*db.DB)
719
+
if !ok {
720
+
return fmt.Errorf("failed to index issue comment record, invalid db cast")
721
+
}
722
+
723
+
switch e.Commit.Operation {
724
+
case models.CommitOperationCreate:
725
+
raw := json.RawMessage(e.Commit.Record)
726
+
record := tangled.RepoIssueComment{}
727
+
err = json.Unmarshal(raw, &record)
728
+
if err != nil {
729
+
l.Error("invalid record", "err", err)
730
+
return err
731
+
}
732
+
733
+
comment, err := db.IssueCommentFromRecord(ddb, did, rkey, record)
734
+
if err != nil {
735
+
l.Error("failed to parse comment from record", "err", err)
736
+
return err
737
+
}
738
+
739
+
sanitizer := markup.NewSanitizer()
740
+
if sb := strings.TrimSpace(sanitizer.SanitizeDefault(comment.Body)); sb == "" {
741
+
return fmt.Errorf("body is empty after HTML sanitization")
742
+
}
743
+
744
+
err = db.NewIssueComment(ddb, &comment)
745
+
if err != nil {
746
+
l.Error("failed to create issue comment", "err", err)
747
+
return err
748
+
}
749
+
750
+
return nil
751
+
752
+
case models.CommitOperationUpdate:
753
+
raw := json.RawMessage(e.Commit.Record)
754
+
record := tangled.RepoIssueComment{}
755
+
err = json.Unmarshal(raw, &record)
756
+
if err != nil {
757
+
l.Error("invalid record", "err", err)
758
+
return err
759
+
}
760
+
761
+
sanitizer := markup.NewSanitizer()
762
+
if sb := strings.TrimSpace(sanitizer.SanitizeDefault(record.Body)); sb == "" {
763
+
return fmt.Errorf("body is empty after HTML sanitization")
764
+
}
765
+
766
+
err = db.UpdateCommentByRkey(ddb, did, rkey, record.Body)
767
+
if err != nil {
768
+
l.Error("failed to update issue comment", "err", err)
769
+
return err
770
+
}
771
+
772
+
return nil
773
+
774
+
case models.CommitOperationDelete:
775
+
if err := db.DeleteCommentByRkey(ddb, did, rkey); err != nil {
776
+
l.Error("failed to delete", "err", err)
777
+
return fmt.Errorf("failed to delete issue comment record: %w", err)
778
+
}
779
+
780
+
return nil
781
+
}
782
+
783
+
return fmt.Errorf("unknown operation: %s", e.Commit.Operation)
784
+
}
+41
-95
appview/issues/issues.go
+41
-95
appview/issues/issues.go
···
7
7
"net/http"
8
8
"slices"
9
9
"strconv"
10
+
"strings"
10
11
"time"
11
12
12
13
comatproto "github.com/bluesky-social/indigo/api/atproto"
13
14
"github.com/bluesky-social/indigo/atproto/data"
14
-
"github.com/bluesky-social/indigo/atproto/syntax"
15
15
lexutil "github.com/bluesky-social/indigo/lex/util"
16
16
"github.com/go-chi/chi/v5"
17
17
···
21
21
"tangled.sh/tangled.sh/core/appview/notify"
22
22
"tangled.sh/tangled.sh/core/appview/oauth"
23
23
"tangled.sh/tangled.sh/core/appview/pages"
24
+
"tangled.sh/tangled.sh/core/appview/pages/markup"
24
25
"tangled.sh/tangled.sh/core/appview/pagination"
25
26
"tangled.sh/tangled.sh/core/appview/reporesolver"
26
27
"tangled.sh/tangled.sh/core/idresolver"
···
73
74
return
74
75
}
75
76
76
-
issue, comments, err := db.GetIssueWithComments(rp.db, f.RepoAt, issueIdInt)
77
+
issue, comments, err := db.GetIssueWithComments(rp.db, f.RepoAt(), issueIdInt)
77
78
if err != nil {
78
79
log.Println("failed to get issue and comments", err)
79
80
rp.pages.Notice(w, "issues", "Failed to load issue. Try again later.")
80
81
return
81
82
}
82
83
83
-
reactionCountMap, err := db.GetReactionCountMap(rp.db, syntax.ATURI(issue.IssueAt))
84
+
reactionCountMap, err := db.GetReactionCountMap(rp.db, issue.AtUri())
84
85
if err != nil {
85
86
log.Println("failed to get issue reactions")
86
87
rp.pages.Notice(w, "issues", "Failed to load issue. Try again later.")
···
88
89
89
90
userReactions := map[db.ReactionKind]bool{}
90
91
if user != nil {
91
-
userReactions = db.GetReactionStatusMap(rp.db, user.Did, syntax.ATURI(issue.IssueAt))
92
+
userReactions = db.GetReactionStatusMap(rp.db, user.Did, issue.AtUri())
92
93
}
93
94
94
95
issueOwnerIdent, err := rp.idResolver.ResolveIdent(r.Context(), issue.OwnerDid)
···
96
97
log.Println("failed to resolve issue owner", err)
97
98
}
98
99
99
-
identsToResolve := make([]string, len(comments))
100
-
for i, comment := range comments {
101
-
identsToResolve[i] = comment.OwnerDid
102
-
}
103
-
resolvedIds := rp.idResolver.ResolveIdents(r.Context(), identsToResolve)
104
-
didHandleMap := make(map[string]string)
105
-
for _, identity := range resolvedIds {
106
-
if !identity.Handle.IsInvalidHandle() {
107
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
108
-
} else {
109
-
didHandleMap[identity.DID.String()] = identity.DID.String()
110
-
}
111
-
}
112
-
113
100
rp.pages.RepoSingleIssue(w, pages.RepoSingleIssueParams{
114
101
LoggedInUser: user,
115
102
RepoInfo: f.RepoInfo(user),
116
-
Issue: *issue,
103
+
Issue: issue,
117
104
Comments: comments,
118
105
119
106
IssueOwnerHandle: issueOwnerIdent.Handle.String(),
120
-
DidHandleMap: didHandleMap,
121
107
122
108
OrderedReactionKinds: db.OrderedReactionKinds,
123
109
Reactions: reactionCountMap,
···
142
128
return
143
129
}
144
130
145
-
issue, err := db.GetIssue(rp.db, f.RepoAt, issueIdInt)
131
+
issue, err := db.GetIssue(rp.db, f.RepoAt(), issueIdInt)
146
132
if err != nil {
147
133
log.Println("failed to get issue", err)
148
134
rp.pages.Notice(w, "issue-action", "Failed to close issue. Try again later.")
···
174
160
Rkey: tid.TID(),
175
161
Record: &lexutil.LexiconTypeDecoder{
176
162
Val: &tangled.RepoIssueState{
177
-
Issue: issue.IssueAt,
163
+
Issue: issue.AtUri().String(),
178
164
State: closed,
179
165
},
180
166
},
···
186
172
return
187
173
}
188
174
189
-
err = db.CloseIssue(rp.db, f.RepoAt, issueIdInt)
175
+
err = db.CloseIssue(rp.db, f.RepoAt(), issueIdInt)
190
176
if err != nil {
191
177
log.Println("failed to close issue", err)
192
178
rp.pages.Notice(w, "issue-action", "Failed to close issue. Try again later.")
···
218
204
return
219
205
}
220
206
221
-
issue, err := db.GetIssue(rp.db, f.RepoAt, issueIdInt)
207
+
issue, err := db.GetIssue(rp.db, f.RepoAt(), issueIdInt)
222
208
if err != nil {
223
209
log.Println("failed to get issue", err)
224
210
rp.pages.Notice(w, "issue-action", "Failed to close issue. Try again later.")
···
235
221
isIssueOwner := user.Did == issue.OwnerDid
236
222
237
223
if isCollaborator || isIssueOwner {
238
-
err := db.ReopenIssue(rp.db, f.RepoAt, issueIdInt)
224
+
err := db.ReopenIssue(rp.db, f.RepoAt(), issueIdInt)
239
225
if err != nil {
240
226
log.Println("failed to reopen issue", err)
241
227
rp.pages.Notice(w, "issue-action", "Failed to reopen issue. Try again later.")
···
279
265
280
266
err := db.NewIssueComment(rp.db, &db.Comment{
281
267
OwnerDid: user.Did,
282
-
RepoAt: f.RepoAt,
268
+
RepoAt: f.RepoAt(),
283
269
Issue: issueIdInt,
284
270
CommentId: commentId,
285
271
Body: body,
···
292
278
}
293
279
294
280
createdAt := time.Now().Format(time.RFC3339)
295
-
commentIdInt64 := int64(commentId)
296
281
ownerDid := user.Did
297
-
issueAt, err := db.GetIssueAt(rp.db, f.RepoAt, issueIdInt)
282
+
issueAt, err := db.GetIssueAt(rp.db, f.RepoAt(), issueIdInt)
298
283
if err != nil {
299
284
log.Println("failed to get issue at", err)
300
285
rp.pages.Notice(w, "issue-comment", "Failed to create comment.")
301
286
return
302
287
}
303
288
304
-
atUri := f.RepoAt.String()
289
+
atUri := f.RepoAt().String()
305
290
client, err := rp.oauth.AuthorizedClient(r)
306
291
if err != nil {
307
292
log.Println("failed to get authorized client", err)
···
316
301
Val: &tangled.RepoIssueComment{
317
302
Repo: &atUri,
318
303
Issue: issueAt,
319
-
CommentId: &commentIdInt64,
320
304
Owner: &ownerDid,
321
305
Body: body,
322
306
CreatedAt: createdAt,
···
358
342
return
359
343
}
360
344
361
-
issue, err := db.GetIssue(rp.db, f.RepoAt, issueIdInt)
345
+
issue, err := db.GetIssue(rp.db, f.RepoAt(), issueIdInt)
362
346
if err != nil {
363
347
log.Println("failed to get issue", err)
364
348
rp.pages.Notice(w, "issues", "Failed to load issue. Try again later.")
365
349
return
366
350
}
367
351
368
-
comment, err := db.GetComment(rp.db, f.RepoAt, issueIdInt, commentIdInt)
352
+
comment, err := db.GetComment(rp.db, f.RepoAt(), issueIdInt, commentIdInt)
369
353
if err != nil {
370
354
http.Error(w, "bad comment id", http.StatusBadRequest)
371
355
return
372
356
}
373
357
374
-
identity, err := rp.idResolver.ResolveIdent(r.Context(), comment.OwnerDid)
375
-
if err != nil {
376
-
log.Println("failed to resolve did")
377
-
return
378
-
}
379
-
380
-
didHandleMap := make(map[string]string)
381
-
if !identity.Handle.IsInvalidHandle() {
382
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
383
-
} else {
384
-
didHandleMap[identity.DID.String()] = identity.DID.String()
385
-
}
386
-
387
358
rp.pages.SingleIssueCommentFragment(w, pages.SingleIssueCommentParams{
388
359
LoggedInUser: user,
389
360
RepoInfo: f.RepoInfo(user),
390
-
DidHandleMap: didHandleMap,
391
361
Issue: issue,
392
362
Comment: comment,
393
363
})
···
417
387
return
418
388
}
419
389
420
-
issue, err := db.GetIssue(rp.db, f.RepoAt, issueIdInt)
390
+
issue, err := db.GetIssue(rp.db, f.RepoAt(), issueIdInt)
421
391
if err != nil {
422
392
log.Println("failed to get issue", err)
423
393
rp.pages.Notice(w, "issues", "Failed to load issue. Try again later.")
424
394
return
425
395
}
426
396
427
-
comment, err := db.GetComment(rp.db, f.RepoAt, issueIdInt, commentIdInt)
397
+
comment, err := db.GetComment(rp.db, f.RepoAt(), issueIdInt, commentIdInt)
428
398
if err != nil {
429
399
http.Error(w, "bad comment id", http.StatusBadRequest)
430
400
return
···
479
449
repoAt := record["repo"].(string)
480
450
issueAt := record["issue"].(string)
481
451
createdAt := record["createdAt"].(string)
482
-
commentIdInt64 := int64(commentIdInt)
483
452
484
453
_, err = client.RepoPutRecord(r.Context(), &comatproto.RepoPutRecord_Input{
485
454
Collection: tangled.RepoIssueCommentNSID,
···
490
459
Val: &tangled.RepoIssueComment{
491
460
Repo: &repoAt,
492
461
Issue: issueAt,
493
-
CommentId: &commentIdInt64,
494
462
Owner: &comment.OwnerDid,
495
463
Body: newBody,
496
464
CreatedAt: createdAt,
···
503
471
}
504
472
505
473
// optimistic update for htmx
506
-
didHandleMap := map[string]string{
507
-
user.Did: user.Handle,
508
-
}
509
474
comment.Body = newBody
510
475
comment.Edited = &edited
511
476
···
513
478
rp.pages.SingleIssueCommentFragment(w, pages.SingleIssueCommentParams{
514
479
LoggedInUser: user,
515
480
RepoInfo: f.RepoInfo(user),
516
-
DidHandleMap: didHandleMap,
517
481
Issue: issue,
518
482
Comment: comment,
519
483
})
···
539
503
return
540
504
}
541
505
542
-
issue, err := db.GetIssue(rp.db, f.RepoAt, issueIdInt)
506
+
issue, err := db.GetIssue(rp.db, f.RepoAt(), issueIdInt)
543
507
if err != nil {
544
508
log.Println("failed to get issue", err)
545
509
rp.pages.Notice(w, "issues", "Failed to load issue. Try again later.")
···
554
518
return
555
519
}
556
520
557
-
comment, err := db.GetComment(rp.db, f.RepoAt, issueIdInt, commentIdInt)
521
+
comment, err := db.GetComment(rp.db, f.RepoAt(), issueIdInt, commentIdInt)
558
522
if err != nil {
559
523
http.Error(w, "bad comment id", http.StatusBadRequest)
560
524
return
···
572
536
573
537
// optimistic deletion
574
538
deleted := time.Now()
575
-
err = db.DeleteComment(rp.db, f.RepoAt, issueIdInt, commentIdInt)
539
+
err = db.DeleteComment(rp.db, f.RepoAt(), issueIdInt, commentIdInt)
576
540
if err != nil {
577
541
log.Println("failed to delete comment")
578
542
rp.pages.Notice(w, fmt.Sprintf("comment-%s-status", commentId), "failed to delete comment")
···
598
562
}
599
563
600
564
// optimistic update for htmx
601
-
didHandleMap := map[string]string{
602
-
user.Did: user.Handle,
603
-
}
604
565
comment.Body = ""
605
566
comment.Deleted = &deleted
606
567
···
608
569
rp.pages.SingleIssueCommentFragment(w, pages.SingleIssueCommentParams{
609
570
LoggedInUser: user,
610
571
RepoInfo: f.RepoInfo(user),
611
-
DidHandleMap: didHandleMap,
612
572
Issue: issue,
613
573
Comment: comment,
614
574
})
615
-
return
616
575
}
617
576
618
577
func (rp *Issues) RepoIssues(w http.ResponseWriter, r *http.Request) {
···
641
600
return
642
601
}
643
602
644
-
issues, err := db.GetIssues(rp.db, f.RepoAt, isOpen, page)
603
+
issues, err := db.GetIssuesPaginated(rp.db, f.RepoAt(), isOpen, page)
645
604
if err != nil {
646
605
log.Println("failed to get issues", err)
647
606
rp.pages.Notice(w, "issues", "Failed to load issues. Try again later.")
648
607
return
649
608
}
650
609
651
-
identsToResolve := make([]string, len(issues))
652
-
for i, issue := range issues {
653
-
identsToResolve[i] = issue.OwnerDid
654
-
}
655
-
resolvedIds := rp.idResolver.ResolveIdents(r.Context(), identsToResolve)
656
-
didHandleMap := make(map[string]string)
657
-
for _, identity := range resolvedIds {
658
-
if !identity.Handle.IsInvalidHandle() {
659
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
660
-
} else {
661
-
didHandleMap[identity.DID.String()] = identity.DID.String()
662
-
}
663
-
}
664
-
665
610
rp.pages.RepoIssues(w, pages.RepoIssuesParams{
666
611
LoggedInUser: rp.oauth.GetUser(r),
667
612
RepoInfo: f.RepoInfo(user),
668
613
Issues: issues,
669
-
DidHandleMap: didHandleMap,
670
614
FilteringByOpen: isOpen,
671
615
Page: page,
672
616
})
673
-
return
674
617
}
675
618
676
619
func (rp *Issues) NewIssue(w http.ResponseWriter, r *http.Request) {
···
697
640
return
698
641
}
699
642
643
+
sanitizer := markup.NewSanitizer()
644
+
if st := strings.TrimSpace(sanitizer.SanitizeDescription(title)); st == "" {
645
+
rp.pages.Notice(w, "issues", "Title is empty after HTML sanitization")
646
+
return
647
+
}
648
+
if sb := strings.TrimSpace(sanitizer.SanitizeDefault(body)); sb == "" {
649
+
rp.pages.Notice(w, "issues", "Body is empty after HTML sanitization")
650
+
return
651
+
}
652
+
700
653
tx, err := rp.db.BeginTx(r.Context(), nil)
701
654
if err != nil {
702
655
rp.pages.Notice(w, "issues", "Failed to create issue, try again later")
···
704
657
}
705
658
706
659
issue := &db.Issue{
707
-
RepoAt: f.RepoAt,
660
+
RepoAt: f.RepoAt(),
661
+
Rkey: tid.TID(),
708
662
Title: title,
709
663
Body: body,
710
664
OwnerDid: user.Did,
···
722
676
rp.pages.Notice(w, "issues", "Failed to create issue.")
723
677
return
724
678
}
725
-
atUri := f.RepoAt.String()
726
-
resp, err := client.RepoPutRecord(r.Context(), &comatproto.RepoPutRecord_Input{
679
+
atUri := f.RepoAt().String()
680
+
_, err = client.RepoPutRecord(r.Context(), &comatproto.RepoPutRecord_Input{
727
681
Collection: tangled.RepoIssueNSID,
728
682
Repo: user.Did,
729
-
Rkey: tid.TID(),
683
+
Rkey: issue.Rkey,
730
684
Record: &lexutil.LexiconTypeDecoder{
731
685
Val: &tangled.RepoIssue{
732
-
Repo: atUri,
733
-
Title: title,
734
-
Body: &body,
735
-
Owner: user.Did,
736
-
IssueId: int64(issue.IssueId),
686
+
Repo: atUri,
687
+
Title: title,
688
+
Body: &body,
689
+
Owner: user.Did,
737
690
},
738
691
},
739
692
})
740
693
if err != nil {
741
694
log.Println("failed to create issue", err)
742
-
rp.pages.Notice(w, "issues", "Failed to create issue.")
743
-
return
744
-
}
745
-
746
-
err = db.SetIssueAt(rp.db, f.RepoAt, issue.IssueId, resp.Uri)
747
-
if err != nil {
748
-
log.Println("failed to set issue at", err)
749
695
rp.pages.Notice(w, "issues", "Failed to create issue.")
750
696
return
751
697
}
-16
appview/knots/knots.go
-16
appview/knots/knots.go
···
334
334
repoByMember[r.Did] = append(repoByMember[r.Did], r)
335
335
}
336
336
337
-
var didsToResolve []string
338
-
for _, m := range members {
339
-
didsToResolve = append(didsToResolve, m)
340
-
}
341
-
didsToResolve = append(didsToResolve, reg.ByDid)
342
-
resolvedIds := k.IdResolver.ResolveIdents(r.Context(), didsToResolve)
343
-
didHandleMap := make(map[string]string)
344
-
for _, identity := range resolvedIds {
345
-
if !identity.Handle.IsInvalidHandle() {
346
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
347
-
} else {
348
-
didHandleMap[identity.DID.String()] = identity.DID.String()
349
-
}
350
-
}
351
-
352
337
k.Pages.Knot(w, pages.KnotParams{
353
338
LoggedInUser: user,
354
-
DidHandleMap: didHandleMap,
355
339
Registration: reg,
356
340
Members: members,
357
341
Repos: repoByMember,
+13
-10
appview/middleware/middleware.go
+13
-10
appview/middleware/middleware.go
···
5
5
"fmt"
6
6
"log"
7
7
"net/http"
8
+
"net/url"
8
9
"slices"
9
10
"strconv"
10
11
"strings"
11
-
"time"
12
12
13
13
"github.com/bluesky-social/indigo/atproto/identity"
14
14
"github.com/go-chi/chi/v5"
···
46
46
func AuthMiddleware(a *oauth.OAuth) middlewareFunc {
47
47
return func(next http.Handler) http.Handler {
48
48
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
49
+
returnURL := "/"
50
+
if u, err := url.Parse(r.Header.Get("Referer")); err == nil {
51
+
returnURL = u.RequestURI()
52
+
}
53
+
54
+
loginURL := fmt.Sprintf("/login?return_url=%s", url.QueryEscape(returnURL))
55
+
49
56
redirectFunc := func(w http.ResponseWriter, r *http.Request) {
50
-
http.Redirect(w, r, "/login", http.StatusTemporaryRedirect)
57
+
http.Redirect(w, r, loginURL, http.StatusTemporaryRedirect)
51
58
}
52
59
if r.Header.Get("HX-Request") == "true" {
53
60
redirectFunc = func(w http.ResponseWriter, _ *http.Request) {
54
-
w.Header().Set("HX-Redirect", "/login")
61
+
w.Header().Set("HX-Redirect", loginURL)
55
62
w.WriteHeader(http.StatusOK)
56
63
}
57
64
}
···
214
221
return
215
222
}
216
223
217
-
ctx := context.WithValue(req.Context(), "knot", repo.Knot)
218
-
ctx = context.WithValue(ctx, "repoAt", repo.AtUri)
219
-
ctx = context.WithValue(ctx, "repoDescription", repo.Description)
220
-
ctx = context.WithValue(ctx, "repoSpindle", repo.Spindle)
221
-
ctx = context.WithValue(ctx, "repoAddedAt", repo.Created.Format(time.RFC3339))
224
+
ctx := context.WithValue(req.Context(), "repo", repo)
222
225
next.ServeHTTP(w, req.WithContext(ctx))
223
226
})
224
227
}
···
243
246
return
244
247
}
245
248
246
-
pr, err := db.GetPull(mw.db, f.RepoAt, prIdInt)
249
+
pr, err := db.GetPull(mw.db, f.RepoAt(), prIdInt)
247
250
if err != nil {
248
251
log.Println("failed to get pull and comments", err)
249
252
return
···
284
287
return
285
288
}
286
289
287
-
fullName := f.OwnerHandle() + "/" + f.RepoName
290
+
fullName := f.OwnerHandle() + "/" + f.Name
288
291
289
292
if r.Header.Get("User-Agent") == "Go-http-client/1.1" {
290
293
if r.URL.Query().Get("go-get") == "1" {
+17
-2
appview/oauth/handler/handler.go
+17
-2
appview/oauth/handler/handler.go
···
109
109
func (o *OAuthHandler) login(w http.ResponseWriter, r *http.Request) {
110
110
switch r.Method {
111
111
case http.MethodGet:
112
-
o.pages.Login(w, pages.LoginParams{})
112
+
returnURL := r.URL.Query().Get("return_url")
113
+
o.pages.Login(w, pages.LoginParams{
114
+
ReturnUrl: returnURL,
115
+
})
113
116
case http.MethodPost:
114
117
handle := r.FormValue("handle")
115
118
···
194
197
DpopAuthserverNonce: parResp.DpopAuthserverNonce,
195
198
DpopPrivateJwk: string(dpopKeyJson),
196
199
State: parResp.State,
200
+
ReturnUrl: r.FormValue("return_url"),
197
201
})
198
202
if err != nil {
199
203
log.Println("failed to save oauth request:", err)
···
249
253
return
250
254
}
251
255
256
+
if iss != oauthRequest.AuthserverIss {
257
+
log.Println("mismatched iss:", iss, "!=", oauthRequest.AuthserverIss, "for state:", state)
258
+
o.pages.Notice(w, "login-msg", "Failed to authenticate. Try again later.")
259
+
return
260
+
}
261
+
252
262
self := o.oauth.ClientMetadata()
253
263
254
264
oauthClient, err := client.NewClient(
···
311
321
}
312
322
}
313
323
314
-
http.Redirect(w, r, "/", http.StatusFound)
324
+
returnUrl := oauthRequest.ReturnUrl
325
+
if returnUrl == "" {
326
+
returnUrl = "/"
327
+
}
328
+
329
+
http.Redirect(w, r, returnUrl, http.StatusFound)
315
330
}
316
331
317
332
func (o *OAuthHandler) logout(w http.ResponseWriter, r *http.Request) {
+13
-3
appview/oauth/oauth.go
+13
-3
appview/oauth/oauth.go
···
103
103
if err != nil {
104
104
return nil, false, fmt.Errorf("error parsing expiry time: %w", err)
105
105
}
106
-
if expiry.Sub(time.Now()) <= 5*time.Minute {
106
+
if time.Until(expiry) <= 5*time.Minute {
107
107
privateJwk, err := helpers.ParseJWKFromBytes([]byte(session.DpopPrivateJwk))
108
108
if err != nil {
109
109
return nil, false, err
···
224
224
s.service = service
225
225
}
226
226
}
227
+
228
+
// Specify the Duration in seconds for the expiry of this token
229
+
//
230
+
// The time of expiry is calculated as time.Now().Unix() + exp
227
231
func WithExp(exp int64) ServiceClientOpt {
228
232
return func(s *ServiceClientOpts) {
229
-
s.exp = exp
233
+
s.exp = time.Now().Unix() + exp
230
234
}
231
235
}
232
236
···
266
270
return nil, err
267
271
}
268
272
273
+
// force expiry to atleast 60 seconds in the future
274
+
sixty := time.Now().Unix() + 60
275
+
if opts.exp < sixty {
276
+
opts.exp = sixty
277
+
}
278
+
269
279
resp, err := authorizedClient.ServerGetServiceAuth(r.Context(), opts.Audience(), opts.exp, opts.lxm)
270
280
if err != nil {
271
281
return nil, err
···
305
315
redirectURIs := makeRedirectURIs(clientURI)
306
316
307
317
if o.config.Core.Dev {
308
-
clientURI = fmt.Sprintf("http://127.0.0.1:3000")
318
+
clientURI = "http://127.0.0.1:3000"
309
319
redirectURIs = makeRedirectURIs(clientURI)
310
320
311
321
query := url.Values{}
+26
-6
appview/pages/funcmap.go
+26
-6
appview/pages/funcmap.go
···
1
1
package pages
2
2
3
3
import (
4
+
"context"
4
5
"crypto/hmac"
5
6
"crypto/sha256"
6
7
"encoding/hex"
···
18
19
19
20
"github.com/dustin/go-humanize"
20
21
"github.com/go-enry/go-enry/v2"
21
-
"github.com/microcosm-cc/bluemonday"
22
22
"tangled.sh/tangled.sh/core/appview/filetree"
23
23
"tangled.sh/tangled.sh/core/appview/pages/markup"
24
24
)
···
27
27
return template.FuncMap{
28
28
"split": func(s string) []string {
29
29
return strings.Split(s, "\n")
30
+
},
31
+
"resolve": func(s string) string {
32
+
identity, err := p.resolver.ResolveIdent(context.Background(), s)
33
+
34
+
if err != nil {
35
+
return s
36
+
}
37
+
38
+
if identity.Handle.IsInvalidHandle() {
39
+
return "handle.invalid"
40
+
}
41
+
42
+
return "@" + identity.Handle.String()
30
43
},
31
44
"truncateAt30": func(s string) string {
32
45
if len(s) <= 30 {
···
74
87
"negf64": func(a float64) float64 {
75
88
return -a
76
89
},
77
-
"cond": func(cond interface{}, a, b string) string {
90
+
"cond": func(cond any, a, b string) string {
78
91
if cond == nil {
79
92
return b
80
93
}
···
167
180
return html.UnescapeString(s)
168
181
},
169
182
"nl2br": func(text string) template.HTML {
170
-
return template.HTML(strings.Replace(template.HTMLEscapeString(text), "\n", "<br>", -1))
183
+
return template.HTML(strings.ReplaceAll(template.HTMLEscapeString(text), "\n", "<br>"))
171
184
},
172
185
"unwrapText": func(text string) string {
173
186
paragraphs := strings.Split(text, "\n\n")
···
193
206
}
194
207
return v.Slice(0, min(n, v.Len())).Interface()
195
208
},
196
-
197
209
"markdown": func(text string) template.HTML {
198
-
rctx := &markup.RenderContext{RendererType: markup.RendererTypeDefault}
199
-
return template.HTML(bluemonday.UGCPolicy().Sanitize(rctx.RenderMarkdown(text)))
210
+
p.rctx.RendererType = markup.RendererTypeDefault
211
+
htmlString := p.rctx.RenderMarkdown(text)
212
+
sanitized := p.rctx.SanitizeDefault(htmlString)
213
+
return template.HTML(sanitized)
214
+
},
215
+
"description": func(text string) template.HTML {
216
+
p.rctx.RendererType = markup.RendererTypeDefault
217
+
htmlString := p.rctx.RenderMarkdown(text)
218
+
sanitized := p.rctx.SanitizeDescription(htmlString)
219
+
return template.HTML(sanitized)
200
220
},
201
221
"isNil": func(t any) bool {
202
222
// returns false for other "zero" values
+61
-31
appview/pages/markup/markdown.go
+61
-31
appview/pages/markup/markdown.go
···
9
9
"path"
10
10
"strings"
11
11
12
-
"github.com/microcosm-cc/bluemonday"
12
+
chromahtml "github.com/alecthomas/chroma/v2/formatters/html"
13
+
"github.com/alecthomas/chroma/v2/styles"
13
14
"github.com/yuin/goldmark"
15
+
highlighting "github.com/yuin/goldmark-highlighting/v2"
14
16
"github.com/yuin/goldmark/ast"
15
17
"github.com/yuin/goldmark/extension"
16
18
"github.com/yuin/goldmark/parser"
···
40
42
repoinfo.RepoInfo
41
43
IsDev bool
42
44
RendererType RendererType
45
+
Sanitizer Sanitizer
43
46
}
44
47
45
48
func (rctx *RenderContext) RenderMarkdown(source string) string {
46
49
md := goldmark.New(
47
-
goldmark.WithExtensions(extension.GFM),
50
+
goldmark.WithExtensions(
51
+
extension.GFM,
52
+
highlighting.NewHighlighting(
53
+
highlighting.WithFormatOptions(
54
+
chromahtml.Standalone(false),
55
+
chromahtml.WithClasses(true),
56
+
),
57
+
highlighting.WithCustomStyle(styles.Get("catppuccin-latte")),
58
+
),
59
+
extension.NewFootnote(
60
+
extension.WithFootnoteIDPrefix([]byte("footnote")),
61
+
),
62
+
),
48
63
goldmark.WithParserOptions(
49
64
parser.WithAutoHeadingID(),
50
65
),
···
145
160
}
146
161
}
147
162
148
-
func (rctx *RenderContext) Sanitize(html string) string {
149
-
policy := bluemonday.UGCPolicy()
150
-
151
-
// video
152
-
policy.AllowElements("video")
153
-
policy.AllowAttrs("controls").OnElements("video")
154
-
policy.AllowElements("source")
155
-
policy.AllowAttrs("src", "type").OnElements("source")
156
-
157
-
// centering content
158
-
policy.AllowElements("center")
163
+
func (rctx *RenderContext) SanitizeDefault(html string) string {
164
+
return rctx.Sanitizer.SanitizeDefault(html)
165
+
}
159
166
160
-
policy.AllowAttrs("align", "style", "width", "height").Globally()
161
-
policy.AllowStyles(
162
-
"margin",
163
-
"padding",
164
-
"text-align",
165
-
"font-weight",
166
-
"text-decoration",
167
-
"padding-left",
168
-
"padding-right",
169
-
"padding-top",
170
-
"padding-bottom",
171
-
"margin-left",
172
-
"margin-right",
173
-
"margin-top",
174
-
"margin-bottom",
175
-
)
176
-
return policy.Sanitize(html)
167
+
func (rctx *RenderContext) SanitizeDescription(html string) string {
168
+
return rctx.Sanitizer.SanitizeDescription(html)
177
169
}
178
170
179
171
type MarkdownTransformer struct {
···
189
181
switch a.rctx.RendererType {
190
182
case RendererTypeRepoMarkdown:
191
183
switch n := n.(type) {
184
+
case *ast.Heading:
185
+
a.rctx.anchorHeadingTransformer(n)
192
186
case *ast.Link:
193
187
a.rctx.relativeLinkTransformer(n)
194
188
case *ast.Image:
···
197
191
}
198
192
case RendererTypeDefault:
199
193
switch n := n.(type) {
194
+
case *ast.Heading:
195
+
a.rctx.anchorHeadingTransformer(n)
200
196
case *ast.Image:
201
197
a.rctx.imageFromKnotAstTransformer(n)
202
198
a.rctx.camoImageLinkAstTransformer(n)
···
211
207
212
208
dst := string(link.Destination)
213
209
214
-
if isAbsoluteUrl(dst) {
210
+
if isAbsoluteUrl(dst) || isFragment(dst) || isMail(dst) {
215
211
return
216
212
}
217
213
···
252
248
img.Destination = []byte(rctx.imageFromKnotTransformer(dst))
253
249
}
254
250
251
+
func (rctx *RenderContext) anchorHeadingTransformer(h *ast.Heading) {
252
+
idGeneric, exists := h.AttributeString("id")
253
+
if !exists {
254
+
return // no id, nothing to do
255
+
}
256
+
id, ok := idGeneric.([]byte)
257
+
if !ok {
258
+
return
259
+
}
260
+
261
+
// create anchor link
262
+
anchor := ast.NewLink()
263
+
anchor.Destination = fmt.Appendf(nil, "#%s", string(id))
264
+
anchor.SetAttribute([]byte("class"), []byte("anchor"))
265
+
266
+
// create icon text
267
+
iconText := ast.NewString([]byte("#"))
268
+
anchor.AppendChild(anchor, iconText)
269
+
270
+
// set class on heading
271
+
h.SetAttribute([]byte("class"), []byte("heading"))
272
+
273
+
// append anchor to heading
274
+
h.AppendChild(h, anchor)
275
+
}
276
+
255
277
// actualPath decides when to join the file path with the
256
278
// current repository directory (essentially only when the link
257
279
// destination is relative. if it's absolute then we assume the
···
271
293
}
272
294
return parsed.IsAbs()
273
295
}
296
+
297
+
func isFragment(link string) bool {
298
+
return strings.HasPrefix(link, "#")
299
+
}
300
+
301
+
func isMail(link string) bool {
302
+
return strings.HasPrefix(link, "mailto:")
303
+
}
+117
appview/pages/markup/sanitizer.go
+117
appview/pages/markup/sanitizer.go
···
1
+
package markup
2
+
3
+
import (
4
+
"maps"
5
+
"regexp"
6
+
"slices"
7
+
"strings"
8
+
9
+
"github.com/alecthomas/chroma/v2"
10
+
"github.com/microcosm-cc/bluemonday"
11
+
)
12
+
13
+
type Sanitizer struct {
14
+
defaultPolicy *bluemonday.Policy
15
+
descriptionPolicy *bluemonday.Policy
16
+
}
17
+
18
+
func NewSanitizer() Sanitizer {
19
+
return Sanitizer{
20
+
defaultPolicy: defaultPolicy(),
21
+
descriptionPolicy: descriptionPolicy(),
22
+
}
23
+
}
24
+
25
+
func (s *Sanitizer) SanitizeDefault(html string) string {
26
+
return s.defaultPolicy.Sanitize(html)
27
+
}
28
+
func (s *Sanitizer) SanitizeDescription(html string) string {
29
+
return s.descriptionPolicy.Sanitize(html)
30
+
}
31
+
32
+
func defaultPolicy() *bluemonday.Policy {
33
+
policy := bluemonday.UGCPolicy()
34
+
35
+
// Allow generally safe attributes
36
+
generalSafeAttrs := []string{
37
+
"abbr", "accept", "accept-charset",
38
+
"accesskey", "action", "align", "alt",
39
+
"aria-describedby", "aria-hidden", "aria-label", "aria-labelledby",
40
+
"axis", "border", "cellpadding", "cellspacing", "char",
41
+
"charoff", "charset", "checked",
42
+
"clear", "cols", "colspan", "color",
43
+
"compact", "coords", "datetime", "dir",
44
+
"disabled", "enctype", "for", "frame",
45
+
"headers", "height", "hreflang",
46
+
"hspace", "ismap", "label", "lang",
47
+
"maxlength", "media", "method",
48
+
"multiple", "name", "nohref", "noshade",
49
+
"nowrap", "open", "prompt", "readonly", "rel", "rev",
50
+
"rows", "rowspan", "rules", "scope",
51
+
"selected", "shape", "size", "span",
52
+
"start", "summary", "tabindex", "target",
53
+
"title", "type", "usemap", "valign", "value",
54
+
"vspace", "width", "itemprop",
55
+
}
56
+
57
+
generalSafeElements := []string{
58
+
"h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "br", "b", "i", "strong", "em", "a", "pre", "code", "img", "tt",
59
+
"div", "ins", "del", "sup", "sub", "p", "ol", "ul", "table", "thead", "tbody", "tfoot", "blockquote", "label",
60
+
"dl", "dt", "dd", "kbd", "q", "samp", "var", "hr", "ruby", "rt", "rp", "li", "tr", "td", "th", "s", "strike", "summary",
61
+
"details", "caption", "figure", "figcaption",
62
+
"abbr", "bdo", "cite", "dfn", "mark", "small", "span", "time", "video", "wbr",
63
+
}
64
+
65
+
policy.AllowAttrs(generalSafeAttrs...).OnElements(generalSafeElements...)
66
+
67
+
// video
68
+
policy.AllowAttrs("src", "autoplay", "controls").OnElements("video")
69
+
70
+
// checkboxes
71
+
policy.AllowAttrs("type").Matching(regexp.MustCompile(`^checkbox$`)).OnElements("input")
72
+
policy.AllowAttrs("checked", "disabled", "data-source-position").OnElements("input")
73
+
74
+
// for code blocks
75
+
policy.AllowAttrs("class").Matching(regexp.MustCompile(`chroma`)).OnElements("pre")
76
+
policy.AllowAttrs("class").Matching(regexp.MustCompile(`anchor|footnote-ref|footnote-backref`)).OnElements("a")
77
+
policy.AllowAttrs("class").Matching(regexp.MustCompile(`heading`)).OnElements("h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8")
78
+
policy.AllowAttrs("class").Matching(regexp.MustCompile(strings.Join(slices.Collect(maps.Values(chroma.StandardTypes)), "|"))).OnElements("span")
79
+
80
+
// centering content
81
+
policy.AllowElements("center")
82
+
83
+
policy.AllowAttrs("align", "style", "width", "height").Globally()
84
+
policy.AllowStyles(
85
+
"margin",
86
+
"padding",
87
+
"text-align",
88
+
"font-weight",
89
+
"text-decoration",
90
+
"padding-left",
91
+
"padding-right",
92
+
"padding-top",
93
+
"padding-bottom",
94
+
"margin-left",
95
+
"margin-right",
96
+
"margin-top",
97
+
"margin-bottom",
98
+
)
99
+
100
+
return policy
101
+
}
102
+
103
+
func descriptionPolicy() *bluemonday.Policy {
104
+
policy := bluemonday.NewPolicy()
105
+
policy.AllowStandardURLs()
106
+
107
+
// allow italics and bold.
108
+
policy.AllowElements("i", "b", "em", "strong")
109
+
110
+
// allow code.
111
+
policy.AllowElements("code")
112
+
113
+
// allow links
114
+
policy.AllowAttrs("href", "target", "rel").OnElements("a")
115
+
116
+
return policy
117
+
}
+52
-49
appview/pages/pages.go
+52
-49
appview/pages/pages.go
···
24
24
"tangled.sh/tangled.sh/core/appview/pages/markup"
25
25
"tangled.sh/tangled.sh/core/appview/pages/repoinfo"
26
26
"tangled.sh/tangled.sh/core/appview/pagination"
27
+
"tangled.sh/tangled.sh/core/idresolver"
27
28
"tangled.sh/tangled.sh/core/patchutil"
28
29
"tangled.sh/tangled.sh/core/types"
29
30
···
45
46
t map[string]*template.Template
46
47
47
48
avatar config.AvatarConfig
49
+
resolver *idresolver.Resolver
48
50
dev bool
49
51
embedFS embed.FS
50
52
templateDir string // Path to templates on disk for dev mode
51
53
rctx *markup.RenderContext
52
54
}
53
55
54
-
func NewPages(config *config.Config) *Pages {
56
+
func NewPages(config *config.Config, res *idresolver.Resolver) *Pages {
55
57
// initialized with safe defaults, can be overriden per use
56
58
rctx := &markup.RenderContext{
57
59
IsDev: config.Core.Dev,
58
60
CamoUrl: config.Camo.Host,
59
61
CamoSecret: config.Camo.SharedSecret,
62
+
Sanitizer: markup.NewSanitizer(),
60
63
}
61
64
62
65
p := &Pages{
···
66
69
avatar: config.Avatar,
67
70
embedFS: Files,
68
71
rctx: rctx,
72
+
resolver: res,
69
73
templateDir: "appview/pages",
70
74
}
71
75
···
256
260
return p.executeOrReload(name, w, "layouts/repobase", params)
257
261
}
258
262
263
+
func (p *Pages) Favicon(w io.Writer) error {
264
+
return p.executePlain("favicon", w, nil)
265
+
}
266
+
259
267
type LoginParams struct {
268
+
ReturnUrl string
260
269
}
261
270
262
271
func (p *Pages) Login(w io.Writer, params LoginParams) error {
···
290
299
type TimelineParams struct {
291
300
LoggedInUser *oauth.User
292
301
Timeline []db.TimelineEvent
293
-
DidHandleMap map[string]string
302
+
Repos []db.Repo
294
303
}
295
304
296
305
func (p *Pages) Timeline(w io.Writer, params TimelineParams) error {
297
-
return p.execute("timeline", w, params)
306
+
return p.execute("timeline/timeline", w, params)
298
307
}
299
308
300
309
type SettingsParams struct {
···
318
327
319
328
type KnotParams struct {
320
329
LoggedInUser *oauth.User
321
-
DidHandleMap map[string]string
322
330
Registration *db.Registration
323
331
Members []string
324
332
Repos map[string][]db.Repo
···
375
383
Spindle db.Spindle
376
384
Members []string
377
385
Repos map[string][]db.Repo
378
-
DidHandleMap map[string]string
379
386
}
380
387
381
388
func (p *Pages) SpindleDashboard(w io.Writer, params SpindleDashboardParams) error {
···
408
415
ProfileTimeline *db.ProfileTimeline
409
416
Card ProfileCard
410
417
Punchcard db.Punchcard
411
-
412
-
DidHandleMap map[string]string
413
418
}
414
419
415
420
type ProfileCard struct {
···
430
435
LoggedInUser *oauth.User
431
436
Repos []db.Repo
432
437
Card ProfileCard
433
-
434
-
DidHandleMap map[string]string
435
438
}
436
439
437
440
func (p *Pages) ReposPage(w io.Writer, params ReposPageParams) error {
···
460
463
LoggedInUser *oauth.User
461
464
Profile *db.Profile
462
465
AllRepos []PinnedRepo
463
-
DidHandleMap map[string]string
464
466
}
465
467
466
468
type PinnedRepo struct {
···
519
521
}
520
522
521
523
p.rctx.RepoInfo = params.RepoInfo
524
+
p.rctx.RepoInfo.Ref = params.Ref
522
525
p.rctx.RendererType = markup.RendererTypeRepoMarkdown
523
526
524
527
if params.ReadmeFileName != "" {
525
-
var htmlString string
526
528
ext := filepath.Ext(params.ReadmeFileName)
527
529
switch ext {
528
530
case ".md", ".markdown", ".mdown", ".mkdn", ".mkd":
529
-
htmlString = p.rctx.Sanitize(htmlString)
530
-
htmlString = p.rctx.RenderMarkdown(params.Readme)
531
531
params.Raw = false
532
-
params.HTMLReadme = template.HTML(htmlString)
532
+
htmlString := p.rctx.RenderMarkdown(params.Readme)
533
+
sanitized := p.rctx.SanitizeDefault(htmlString)
534
+
params.HTMLReadme = template.HTML(sanitized)
533
535
default:
534
536
params.Raw = true
535
537
}
···
668
670
p.rctx.RepoInfo = params.RepoInfo
669
671
p.rctx.RendererType = markup.RendererTypeRepoMarkdown
670
672
htmlString := p.rctx.RenderMarkdown(params.Contents)
671
-
params.RenderedContents = template.HTML(p.rctx.Sanitize(htmlString))
673
+
sanitized := p.rctx.SanitizeDefault(htmlString)
674
+
params.RenderedContents = template.HTML(sanitized)
672
675
}
673
676
}
674
677
675
-
if params.Lines < 5000 {
676
-
c := params.Contents
677
-
formatter := chromahtml.New(
678
-
chromahtml.InlineCode(false),
679
-
chromahtml.WithLineNumbers(true),
680
-
chromahtml.WithLinkableLineNumbers(true, "L"),
681
-
chromahtml.Standalone(false),
682
-
chromahtml.WithClasses(true),
683
-
)
678
+
c := params.Contents
679
+
formatter := chromahtml.New(
680
+
chromahtml.InlineCode(false),
681
+
chromahtml.WithLineNumbers(true),
682
+
chromahtml.WithLinkableLineNumbers(true, "L"),
683
+
chromahtml.Standalone(false),
684
+
chromahtml.WithClasses(true),
685
+
)
684
686
685
-
lexer := lexers.Get(filepath.Base(params.Path))
686
-
if lexer == nil {
687
-
lexer = lexers.Fallback
688
-
}
687
+
lexer := lexers.Get(filepath.Base(params.Path))
688
+
if lexer == nil {
689
+
lexer = lexers.Fallback
690
+
}
689
691
690
-
iterator, err := lexer.Tokenise(nil, c)
691
-
if err != nil {
692
-
return fmt.Errorf("chroma tokenize: %w", err)
693
-
}
692
+
iterator, err := lexer.Tokenise(nil, c)
693
+
if err != nil {
694
+
return fmt.Errorf("chroma tokenize: %w", err)
695
+
}
694
696
695
-
var code bytes.Buffer
696
-
err = formatter.Format(&code, style, iterator)
697
-
if err != nil {
698
-
return fmt.Errorf("chroma format: %w", err)
699
-
}
700
-
701
-
params.Contents = code.String()
697
+
var code bytes.Buffer
698
+
err = formatter.Format(&code, style, iterator)
699
+
if err != nil {
700
+
return fmt.Errorf("chroma format: %w", err)
702
701
}
703
702
703
+
params.Contents = code.String()
704
704
params.Active = "overview"
705
705
return p.executeRepo("repo/blob", w, params)
706
706
}
···
779
779
RepoInfo repoinfo.RepoInfo
780
780
Active string
781
781
Issues []db.Issue
782
-
DidHandleMap map[string]string
783
782
Page pagination.Page
784
783
FilteringByOpen bool
785
784
}
···
793
792
LoggedInUser *oauth.User
794
793
RepoInfo repoinfo.RepoInfo
795
794
Active string
796
-
Issue db.Issue
795
+
Issue *db.Issue
797
796
Comments []db.Comment
798
797
IssueOwnerHandle string
799
-
DidHandleMap map[string]string
800
798
801
799
OrderedReactionKinds []db.ReactionKind
802
800
Reactions map[db.ReactionKind]int
···
850
848
851
849
type SingleIssueCommentParams struct {
852
850
LoggedInUser *oauth.User
853
-
DidHandleMap map[string]string
854
851
RepoInfo repoinfo.RepoInfo
855
852
Issue *db.Issue
856
853
Comment *db.Comment
···
882
879
RepoInfo repoinfo.RepoInfo
883
880
Pulls []*db.Pull
884
881
Active string
885
-
DidHandleMap map[string]string
886
882
FilteringBy db.PullState
887
883
Stacks map[string]db.Stack
888
884
Pipelines map[string]db.Pipeline
···
915
911
LoggedInUser *oauth.User
916
912
RepoInfo repoinfo.RepoInfo
917
913
Active string
918
-
DidHandleMap map[string]string
919
914
Pull *db.Pull
920
915
Stack db.Stack
921
916
AbandonedPulls []*db.Pull
···
935
930
936
931
type RepoPullPatchParams struct {
937
932
LoggedInUser *oauth.User
938
-
DidHandleMap map[string]string
939
933
RepoInfo repoinfo.RepoInfo
940
934
Pull *db.Pull
941
935
Stack db.Stack
···
953
947
954
948
type RepoPullInterdiffParams struct {
955
949
LoggedInUser *oauth.User
956
-
DidHandleMap map[string]string
957
950
RepoInfo repoinfo.RepoInfo
958
951
Pull *db.Pull
959
952
Round int
···
1166
1159
return p.execute("strings/dashboard", w, params)
1167
1160
}
1168
1161
1162
+
type StringTimelineParams struct {
1163
+
LoggedInUser *oauth.User
1164
+
Strings []db.String
1165
+
}
1166
+
1167
+
func (p *Pages) StringsTimeline(w io.Writer, params StringTimelineParams) error {
1168
+
return p.execute("strings/timeline", w, params)
1169
+
}
1170
+
1169
1171
type SingleStringParams struct {
1170
1172
LoggedInUser *oauth.User
1171
1173
ShowRendered bool
···
1182
1184
if params.ShowRendered {
1183
1185
switch markup.GetFormat(params.String.Filename) {
1184
1186
case markup.FormatMarkdown:
1185
-
p.rctx.RendererType = markup.RendererTypeDefault
1187
+
p.rctx.RendererType = markup.RendererTypeRepoMarkdown
1186
1188
htmlString := p.rctx.RenderMarkdown(params.String.Contents)
1187
-
params.RenderedContents = template.HTML(p.rctx.Sanitize(htmlString))
1189
+
sanitized := p.rctx.SanitizeDefault(htmlString)
1190
+
params.RenderedContents = template.HTML(sanitized)
1188
1191
}
1189
1192
}
1190
1193
+26
appview/pages/templates/favicon.html
+26
appview/pages/templates/favicon.html
···
1
+
{{ define "favicon" }}
2
+
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32">
3
+
<style>
4
+
.favicon-text {
5
+
fill: #000000;
6
+
stroke: none;
7
+
}
8
+
9
+
@media (prefers-color-scheme: dark) {
10
+
.favicon-text {
11
+
fill: #ffffff;
12
+
stroke: none;
13
+
}
14
+
}
15
+
</style>
16
+
17
+
<g style="display:inline">
18
+
<path d="M0-2.117h62.177v25.135H0z" style="display:inline;fill:none;fill-opacity:1;stroke-width:.396875" transform="translate(11.01 6.9)"/>
19
+
<path d="M3.64 22.787c-1.697 0-2.943-.45-3.74-1.35-.77-.9-1.156-2.094-1.156-3.585 0-.36.013-.72.038-1.08.052-.385.129-.873.232-1.464L.44 6.826h-5.089l.733-4.394h3.2c.822 0 1.439-.168 1.85-.502.437-.334.72-.938.848-1.812l.771-4.703h5.243L6.84 2.432h7.787l-.733 4.394H6.107L4.257 17.93l.77.27 6.015-4.742 2.775 3.161-2.313 2.005c-.822.694-1.568 1.31-2.236 1.85-.668.515-1.31.952-1.927 1.311a7.406 7.406 0 0 1-1.774.733c-.59.18-1.233.27-1.927.27z"
20
+
aria-label="tangled.sh"
21
+
class="favicon-text"
22
+
style="font-size:16.2278px;font-family:'IBM Plex Mono';-inkscape-font-specification:'IBM Plex Mono, Normal';display:inline;fill-opacity:1"
23
+
transform="translate(11.01 6.9)"/>
24
+
</g>
25
+
</svg>
26
+
{{ end }}
+3
-4
appview/pages/templates/knots/dashboard.html
+3
-4
appview/pages/templates/knots/dashboard.html
···
38
38
<div>
39
39
<div class="flex justify-between items-center">
40
40
<div class="flex items-center gap-2">
41
-
{{ i "user" "size-4" }}
42
-
{{ $user := index $.DidHandleMap . }}
43
-
<a href="/{{ $user }}">{{ $user }} <span class="ml-2 font-mono text-gray-500">{{.}}</span></a>
41
+
{{ template "user/fragments/picHandleLink" . }}
42
+
<span class="ml-2 font-mono text-gray-500">{{.}}</span>
44
43
</div>
45
44
</div>
46
45
<div class="ml-2 pl-2 pt-2 border-l border-gray-200 dark:border-gray-700">
···
48
47
{{ range $repos }}
49
48
<div class="flex gap-2 items-center">
50
49
{{ i "book-marked" "size-4" }}
51
-
<a href="/{{ .Did }}/{{ .Name }}">
50
+
<a href="/{{ resolve .Did }}/{{ .Name }}">
52
51
{{ .Name }}
53
52
</a>
54
53
</div>
-12
appview/pages/templates/layouts/base.html
-12
appview/pages/templates/layouts/base.html
···
24
24
{{ block "mainLayout" . }}
25
25
<div class="px-1 col-span-1 md:col-start-3 md:col-span-8 flex flex-col gap-4">
26
26
{{ block "contentLayout" . }}
27
-
<div class="col-span-1 md:col-span-2">
28
-
{{ block "contentLeft" . }} {{ end }}
29
-
</div>
30
27
<main class="col-span-1 md:col-span-8">
31
28
{{ block "content" . }}{{ end }}
32
29
</main>
33
-
<div class="col-span-1 md:col-span-2">
34
-
{{ block "contentRight" . }} {{ end }}
35
-
</div>
36
30
{{ end }}
37
31
38
32
{{ block "contentAfterLayout" . }}
39
-
<div class="col-span-1 md:col-span-2">
40
-
{{ block "contentAfterLeft" . }} {{ end }}
41
-
</div>
42
33
<main class="col-span-1 md:col-span-8">
43
34
{{ block "contentAfter" . }}{{ end }}
44
35
</main>
45
-
<div class="col-span-1 md:col-span-2">
46
-
{{ block "contentAfterRight" . }} {{ end }}
47
-
</div>
48
36
{{ end }}
49
37
</div>
50
38
{{ end }}
+16
-21
appview/pages/templates/layouts/repobase.html
+16
-21
appview/pages/templates/layouts/repobase.html
···
5
5
{{ if .RepoInfo.Source }}
6
6
<p class="text-sm">
7
7
<div class="flex items-center">
8
-
{{ i "git-fork" "w-3 h-3 mr-1"}}
8
+
{{ i "git-fork" "w-3 h-3 mr-1 shrink-0" }}
9
9
forked from
10
10
{{ $sourceOwner := didOrHandle .RepoInfo.Source.Did .RepoInfo.SourceHandle }}
11
11
<a class="ml-1 underline" href="/{{ $sourceOwner }}/{{ .RepoInfo.Source.Name }}">{{ $sourceOwner }}/{{ .RepoInfo.Source.Name }}</a>
···
20
20
</div>
21
21
22
22
<div class="flex items-center gap-2 z-auto">
23
+
<a
24
+
class="btn text-sm no-underline hover:no-underline flex items-center gap-2 group"
25
+
href="/{{ .RepoInfo.FullName }}/feed.atom"
26
+
>
27
+
{{ i "rss" "size-4" }}
28
+
</a>
23
29
{{ template "repo/fragments/repoStar" .RepoInfo }}
24
-
{{ if .RepoInfo.DisableFork }}
25
-
<button
26
-
class="btn text-sm no-underline hover:no-underline flex items-center gap-2 disabled:opacity-50 disabled:cursor-not-allowed"
27
-
disabled
28
-
title="Empty repositories cannot be forked"
29
-
>
30
-
{{ i "git-fork" "w-4 h-4" }}
31
-
fork
32
-
</button>
33
-
{{ else }}
34
-
<a
35
-
class="btn text-sm no-underline hover:no-underline flex items-center gap-2 group"
36
-
hx-boost="true"
37
-
href="/{{ .RepoInfo.FullName }}/fork"
38
-
>
39
-
{{ i "git-fork" "w-4 h-4" }}
40
-
fork
41
-
{{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }}
42
-
</a>
43
-
{{ end }}
30
+
<a
31
+
class="btn text-sm no-underline hover:no-underline flex items-center gap-2 group"
32
+
hx-boost="true"
33
+
href="/{{ .RepoInfo.FullName }}/fork"
34
+
>
35
+
{{ i "git-fork" "w-4 h-4" }}
36
+
fork
37
+
{{ i "loader-circle" "w-4 h-4 animate-spin hidden group-[.htmx-request]:inline" }}
38
+
</a>
44
39
</div>
45
40
</div>
46
41
{{ template "repo/fragments/repoDescription" . }}
+1
-1
appview/pages/templates/layouts/topbar.html
+1
-1
appview/pages/templates/layouts/topbar.html
···
2
2
<nav class="space-x-4 px-6 py-2 rounded bg-white dark:bg-gray-800 dark:text-white drop-shadow-sm">
3
3
<div class="flex justify-between p-0 items-center">
4
4
<div id="left-items">
5
-
<a href="/" hx-boost="true" class="flex gap-2 font-semibold italic">
5
+
<a href="/" hx-boost="true" class="flex gap-2 font-bold italic">
6
6
tangled<sub>alpha</sub>
7
7
</a>
8
8
</div>
+1
-1
appview/pages/templates/repo/commit.html
+1
-1
appview/pages/templates/repo/commit.html
···
118
118
<div class="flex flex-col gap-4 col-span-1 md:col-span-2">
119
119
{{ template "repo/fragments/diffOpts" .DiffOpts }}
120
120
</div>
121
-
<div class="sticky top-0 flex-grow max-h-screen">
121
+
<div class="sticky top-0 flex-grow max-h-screen overflow-y-auto">
122
122
{{ template "repo/fragments/diffChangedFiles" .Diff }}
123
123
</div>
124
124
{{end}}
+1
-1
appview/pages/templates/repo/compare/compare.html
+1
-1
appview/pages/templates/repo/compare/compare.html
···
49
49
<div class="flex flex-col gap-4 col-span-1 md:col-span-2">
50
50
{{ template "repo/fragments/diffOpts" .DiffOpts }}
51
51
</div>
52
-
<div class="sticky top-0 flex-grow max-h-screen">
52
+
<div class="sticky top-0 flex-grow max-h-screen overflow-y-auto">
53
53
{{ template "repo/fragments/diffChangedFiles" .Diff }}
54
54
</div>
55
55
{{end}}
-4
appview/pages/templates/repo/empty.html
-4
appview/pages/templates/repo/empty.html
+104
appview/pages/templates/repo/fragments/cloneDropdown.html
+104
appview/pages/templates/repo/fragments/cloneDropdown.html
···
1
+
{{ define "repo/fragments/cloneDropdown" }}
2
+
{{ $knot := .RepoInfo.Knot }}
3
+
{{ if eq $knot "knot1.tangled.sh" }}
4
+
{{ $knot = "tangled.sh" }}
5
+
{{ end }}
6
+
7
+
<details id="clone-dropdown" class="relative inline-block text-left group">
8
+
<summary class="btn-create cursor-pointer list-none flex items-center gap-2">
9
+
{{ i "download" "w-4 h-4" }}
10
+
<span class="hidden md:inline">code</span>
11
+
<span class="group-open:hidden">
12
+
{{ i "chevron-down" "w-4 h-4" }}
13
+
</span>
14
+
<span class="hidden group-open:flex">
15
+
{{ i "chevron-up" "w-4 h-4" }}
16
+
</span>
17
+
</summary>
18
+
19
+
<div class="absolute right-0 mt-2 w-96 bg-white dark:bg-gray-800 rounded border border-gray-200 dark:border-gray-700 drop-shadow-sm dark:text-white z-[9999]">
20
+
<div class="p-4">
21
+
<div class="mb-3">
22
+
<h3 class="text-sm font-semibold text-gray-900 dark:text-white mb-2">Clone this repository</h3>
23
+
</div>
24
+
25
+
<!-- HTTPS Clone -->
26
+
<div class="mb-3">
27
+
<label class="block text-xs font-medium text-gray-700 dark:text-gray-300 mb-1">HTTPS</label>
28
+
<div class="flex items-center border border-gray-300 dark:border-gray-600 rounded">
29
+
<code
30
+
class="flex-1 px-3 py-2 text-sm bg-gray-50 dark:bg-gray-700 text-gray-900 dark:text-gray-100 rounded-l select-all cursor-pointer whitespace-nowrap overflow-x-auto"
31
+
onclick="window.getSelection().selectAllChildren(this)"
32
+
data-url="https://tangled.sh/{{ .RepoInfo.OwnerWithAt }}/{{ .RepoInfo.Name }}"
33
+
>https://tangled.sh/{{ .RepoInfo.OwnerWithAt }}/{{ .RepoInfo.Name }}</code>
34
+
<button
35
+
onclick="copyToClipboard(this, this.previousElementSibling.getAttribute('data-url'))"
36
+
class="px-3 py-2 text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-200 border-l border-gray-300 dark:border-gray-600"
37
+
title="Copy to clipboard"
38
+
>
39
+
{{ i "copy" "w-4 h-4" }}
40
+
</button>
41
+
</div>
42
+
</div>
43
+
44
+
<!-- SSH Clone -->
45
+
<div class="mb-3">
46
+
<label class="block text-xs font-medium text-gray-700 dark:text-gray-300 mb-1">SSH</label>
47
+
<div class="flex items-center border border-gray-300 dark:border-gray-600 rounded">
48
+
<code
49
+
class="flex-1 px-3 py-2 text-sm bg-gray-50 dark:bg-gray-700 text-gray-900 dark:text-gray-100 rounded-l select-all cursor-pointer whitespace-nowrap overflow-x-auto"
50
+
onclick="window.getSelection().selectAllChildren(this)"
51
+
data-url="git@{{ $knot }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}"
52
+
>git@{{ $knot }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code>
53
+
<button
54
+
onclick="copyToClipboard(this, this.previousElementSibling.getAttribute('data-url'))"
55
+
class="px-3 py-2 text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-200 border-l border-gray-300 dark:border-gray-600"
56
+
title="Copy to clipboard"
57
+
>
58
+
{{ i "copy" "w-4 h-4" }}
59
+
</button>
60
+
</div>
61
+
</div>
62
+
63
+
<!-- Note for self-hosted -->
64
+
<p class="text-xs text-gray-500 dark:text-gray-400">
65
+
For self-hosted knots, clone URLs may differ based on your setup.
66
+
</p>
67
+
68
+
<!-- Download Archive -->
69
+
<div class="pt-2 mt-2 border-t border-gray-200 dark:border-gray-700">
70
+
<a
71
+
href="/{{ .RepoInfo.FullName }}/archive/{{ .Ref | urlquery }}"
72
+
class="flex items-center gap-2 px-3 py-2 text-sm"
73
+
>
74
+
{{ i "download" "w-4 h-4" }}
75
+
Download tar.gz
76
+
</a>
77
+
</div>
78
+
79
+
</div>
80
+
</div>
81
+
</details>
82
+
83
+
<script>
84
+
function copyToClipboard(button, text) {
85
+
navigator.clipboard.writeText(text).then(() => {
86
+
const originalContent = button.innerHTML;
87
+
button.innerHTML = `{{ i "check" "w-4 h-4" }}`;
88
+
setTimeout(() => {
89
+
button.innerHTML = originalContent;
90
+
}, 2000);
91
+
});
92
+
}
93
+
94
+
// Close clone dropdown when clicking outside
95
+
document.addEventListener('click', function(event) {
96
+
const cloneDropdown = document.getElementById('clone-dropdown');
97
+
if (cloneDropdown && cloneDropdown.hasAttribute('open')) {
98
+
if (!cloneDropdown.contains(event.target)) {
99
+
cloneDropdown.removeAttribute('open');
100
+
}
101
+
}
102
+
});
103
+
</script>
104
+
{{ end }}
-55
appview/pages/templates/repo/fragments/cloneInstructions.html
-55
appview/pages/templates/repo/fragments/cloneInstructions.html
···
1
-
{{ define "repo/fragments/cloneInstructions" }}
2
-
{{ $knot := .RepoInfo.Knot }}
3
-
{{ if eq $knot "knot1.tangled.sh" }}
4
-
{{ $knot = "tangled.sh" }}
5
-
{{ end }}
6
-
<section
7
-
class="mt-4 p-6 rounded drop-shadow-sm bg-white dark:bg-gray-800 dark:text-white w-full mx-auto overflow-auto flex flex-col gap-4"
8
-
>
9
-
<div class="flex flex-col gap-2">
10
-
<strong>push</strong>
11
-
<div class="md:pl-4 overflow-x-auto whitespace-nowrap">
12
-
<code class="dark:text-gray-100"
13
-
>git remote add origin
14
-
git@{{ $knot }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code
15
-
>
16
-
</div>
17
-
</div>
18
-
19
-
<div class="flex flex-col gap-2">
20
-
<strong>clone</strong>
21
-
<div class="md:pl-4 flex flex-col gap-2">
22
-
<div class="flex items-center gap-3">
23
-
<span
24
-
class="bg-gray-100 dark:bg-gray-700 p-1 mr-1 font-mono text-sm rounded select-none dark:text-white"
25
-
>HTTP</span
26
-
>
27
-
<div class="overflow-x-auto whitespace-nowrap flex-1">
28
-
<code class="dark:text-gray-100"
29
-
>git clone
30
-
https://tangled.sh/{{ .RepoInfo.OwnerWithAt }}/{{ .RepoInfo.Name }}</code
31
-
>
32
-
</div>
33
-
</div>
34
-
35
-
<div class="flex items-center gap-3">
36
-
<span
37
-
class="bg-gray-100 dark:bg-gray-700 p-1 mr-1 font-mono text-sm rounded select-none dark:text-white"
38
-
>SSH</span
39
-
>
40
-
<div class="overflow-x-auto whitespace-nowrap flex-1">
41
-
<code class="dark:text-gray-100"
42
-
>git clone
43
-
git@{{ $knot }}:{{ .RepoInfo.OwnerHandle }}/{{ .RepoInfo.Name }}</code
44
-
>
45
-
</div>
46
-
</div>
47
-
</div>
48
-
</div>
49
-
50
-
<p class="py-2 text-gray-500 dark:text-gray-400">
51
-
Note that for self-hosted knots, clone URLs may be different based
52
-
on your setup.
53
-
</p>
54
-
</section>
55
-
{{ end }}
+4
-4
appview/pages/templates/repo/fragments/fileTree.html
+4
-4
appview/pages/templates/repo/fragments/fileTree.html
···
3
3
<details open>
4
4
<summary class="cursor-pointer list-none pt-1">
5
5
<span class="tree-directory inline-flex items-center gap-2 ">
6
-
{{ i "folder" "size-4 fill-current" }}
7
-
<span class="filename text-black dark:text-white">{{ .Name }}</span>
6
+
{{ i "folder" "flex-shrink-0 size-4 fill-current" }}
7
+
<span class="filename truncate text-black dark:text-white">{{ .Name }}</span>
8
8
</span>
9
9
</summary>
10
10
<div class="ml-1 pl-2 border-l border-gray-200 dark:border-gray-700">
···
15
15
</details>
16
16
{{ else if .Name }}
17
17
<div class="tree-file flex items-center gap-2 pt-1">
18
-
{{ i "file" "size-4" }}
19
-
<a href="#file-{{ .Path }}" class="filename text-black dark:text-white no-underline hover:underline">{{ .Name }}</a>
18
+
{{ i "file" "flex-shrink-0 size-4" }}
19
+
<a href="#file-{{ .Path }}" class="filename truncate text-black dark:text-white no-underline hover:underline">{{ .Name }}</a>
20
20
</div>
21
21
{{ else }}
22
22
{{ range $child := .Children }}
+1
-1
appview/pages/templates/repo/fragments/interdiffFiles.html
+1
-1
appview/pages/templates/repo/fragments/interdiffFiles.html
···
1
1
{{ define "repo/fragments/interdiffFiles" }}
2
2
{{ $fileTree := fileTree .AffectedFiles }}
3
-
<section class="mt-4 px-6 py-2 border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm min-h-full text-sm">
3
+
<section class="px-6 py-2 border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm min-h-full text-sm">
4
4
<div class="diff-stat">
5
5
<div class="flex gap-2 items-center">
6
6
<strong class="text-sm uppercase dark:text-gray-200">files</strong>
+1
-1
appview/pages/templates/repo/fragments/repoDescription.html
+1
-1
appview/pages/templates/repo/fragments/repoDescription.html
···
1
1
{{ define "repo/fragments/repoDescription" }}
2
2
<span id="repo-description" class="flex flex-wrap items-center gap-2 text-sm" hx-target="this" hx-swap="outerHTML">
3
3
{{ if .RepoInfo.Description }}
4
-
{{ .RepoInfo.Description }}
4
+
{{ .RepoInfo.Description | description }}
5
5
{{ else }}
6
6
<span class="italic">this repo has no description</span>
7
7
{{ end }}
+70
-63
appview/pages/templates/repo/index.html
+70
-63
appview/pages/templates/repo/index.html
···
14
14
{{ end }}
15
15
<div class="flex items-center justify-between pb-5">
16
16
{{ block "branchSelector" . }}{{ end }}
17
-
<div class="flex md:hidden items-center gap-4">
18
-
<a href="/{{ .RepoInfo.FullName }}/commits/{{ .Ref | urlquery }}" class="inline-flex items-center text-sm gap-1">
17
+
<div class="flex md:hidden items-center gap-2">
18
+
<a href="/{{ .RepoInfo.FullName }}/commits/{{ .Ref | urlquery }}" class="inline-flex items-center text-sm gap-1 font-bold">
19
19
{{ i "git-commit-horizontal" "w-4" "h-4" }} {{ .TotalCommits }}
20
20
</a>
21
-
<a href="/{{ .RepoInfo.FullName }}/branches" class="inline-flex items-center text-sm gap-1">
21
+
<a href="/{{ .RepoInfo.FullName }}/branches" class="inline-flex items-center text-sm gap-1 font-bold">
22
22
{{ i "git-branch" "w-4" "h-4" }} {{ len .Branches }}
23
23
</a>
24
-
<a href="/{{ .RepoInfo.FullName }}/tags" class="inline-flex items-center text-sm gap-1">
24
+
<a href="/{{ .RepoInfo.FullName }}/tags" class="inline-flex items-center text-sm gap-1 font-bold">
25
25
{{ i "tags" "w-4" "h-4" }} {{ len .Tags }}
26
26
</a>
27
+
{{ template "repo/fragments/cloneDropdown" . }}
27
28
</div>
28
29
</div>
29
30
<div class="grid grid-cols-1 md:grid-cols-2 gap-2">
···
47
48
48
49
49
50
{{ define "branchSelector" }}
50
-
<div class="flex gap-2 items-center items-stretch justify-center">
51
-
<select
52
-
onchange="window.location.href = '/{{ .RepoInfo.FullName }}/tree/' + encodeURIComponent(this.value)"
53
-
class="p-1 border max-w-32 border-gray-200 bg-white dark:bg-gray-800 dark:text-white dark:border-gray-700"
54
-
>
55
-
<optgroup label="branches ({{len .Branches}})" class="bold text-sm">
56
-
{{ range .Branches }}
57
-
<option
58
-
value="{{ .Reference.Name }}"
59
-
class="py-1"
60
-
{{ if eq .Reference.Name $.Ref }}
61
-
selected
62
-
{{ end }}
63
-
>
64
-
{{ .Reference.Name }}
65
-
</option>
66
-
{{ end }}
67
-
</optgroup>
68
-
<optgroup label="tags ({{len .Tags}})" class="bold text-sm">
69
-
{{ range .Tags }}
70
-
<option
71
-
value="{{ .Reference.Name }}"
72
-
class="py-1"
73
-
{{ if eq .Reference.Name $.Ref }}
74
-
selected
75
-
{{ end }}
76
-
>
77
-
{{ .Reference.Name }}
78
-
</option>
79
-
{{ else }}
80
-
<option class="py-1" disabled>no tags found</option>
81
-
{{ end }}
82
-
</optgroup>
83
-
</select>
84
-
<div class="flex items-center gap-2">
51
+
<div class="flex gap-2 items-center justify-between w-full">
52
+
<div class="flex gap-2 items-center">
53
+
<select
54
+
onchange="window.location.href = '/{{ .RepoInfo.FullName }}/tree/' + encodeURIComponent(this.value)"
55
+
class="p-1 border max-w-32 border-gray-200 bg-white dark:bg-gray-800 dark:text-white dark:border-gray-700"
56
+
>
57
+
<optgroup label="branches ({{len .Branches}})" class="bold text-sm">
58
+
{{ range .Branches }}
59
+
<option
60
+
value="{{ .Reference.Name }}"
61
+
class="py-1"
62
+
{{ if eq .Reference.Name $.Ref }}
63
+
selected
64
+
{{ end }}
65
+
>
66
+
{{ .Reference.Name }}
67
+
</option>
68
+
{{ end }}
69
+
</optgroup>
70
+
<optgroup label="tags ({{len .Tags}})" class="bold text-sm">
71
+
{{ range .Tags }}
72
+
<option
73
+
value="{{ .Reference.Name }}"
74
+
class="py-1"
75
+
{{ if eq .Reference.Name $.Ref }}
76
+
selected
77
+
{{ end }}
78
+
>
79
+
{{ .Reference.Name }}
80
+
</option>
81
+
{{ else }}
82
+
<option class="py-1" disabled>no tags found</option>
83
+
{{ end }}
84
+
</optgroup>
85
+
</select>
86
+
<div class="flex items-center gap-2">
85
87
{{ $isOwner := and .LoggedInUser .RepoInfo.Roles.IsOwner }}
86
88
{{ $isCollaborator := and .LoggedInUser .RepoInfo.Roles.IsCollaborator }}
87
89
{{ if and (or $isOwner $isCollaborator) .ForkInfo .ForkInfo.IsFork }}
···
115
117
<span>sync</span>
116
118
</button>
117
119
{{ end }}
118
-
<a
119
-
href="/{{ .RepoInfo.FullName }}/compare?base={{ $.Ref | urlquery }}"
120
-
class="btn flex items-center gap-2 no-underline hover:no-underline"
121
-
title="Compare branches or tags"
122
-
>
123
-
{{ i "git-compare" "w-4 h-4" }}
124
-
</a>
120
+
<a
121
+
href="/{{ .RepoInfo.FullName }}/compare?base={{ $.Ref | urlquery }}"
122
+
class="btn flex items-center gap-2 no-underline hover:no-underline"
123
+
title="Compare branches or tags"
124
+
>
125
+
{{ i "git-compare" "w-4 h-4" }}
126
+
</a>
127
+
</div>
125
128
</div>
126
-
</div>
129
+
130
+
<!-- Clone dropdown in top right -->
131
+
<div class="hidden md:flex items-center ">
132
+
{{ template "repo/fragments/cloneDropdown" . }}
133
+
</div>
134
+
</div>
127
135
{{ end }}
128
136
129
137
{{ define "fileTree" }}
···
131
139
{{ $linkstyle := "no-underline hover:underline dark:text-white" }}
132
140
133
141
{{ range .Files }}
134
-
<div class="grid grid-cols-2 gap-4 items-center py-1">
135
-
<div class="col-span-1">
142
+
<div class="grid grid-cols-3 gap-4 items-center py-1">
143
+
<div class="col-span-2">
136
144
{{ $link := printf "/%s/%s/%s/%s" $.RepoInfo.FullName "tree" (urlquery $.Ref) .Name }}
137
145
{{ $icon := "folder" }}
138
146
{{ $iconStyle := "size-4 fill-current" }}
···
144
152
{{ end }}
145
153
<a href="{{ $link }}" class="{{ $linkstyle }}">
146
154
<div class="flex items-center gap-2">
147
-
{{ i $icon $iconStyle }}{{ .Name }}
155
+
{{ i $icon $iconStyle "flex-shrink-0" }}
156
+
<span class="truncate">{{ .Name }}</span>
148
157
</div>
149
158
</a>
150
159
</div>
151
160
152
-
<div class="text-xs col-span-1 text-right">
161
+
<div class="text-sm col-span-1 text-right">
153
162
{{ with .LastCommit }}
154
163
<a href="/{{ $.RepoInfo.FullName }}/commit/{{ .Hash }}" class="text-gray-500 dark:text-gray-400">{{ template "repo/fragments/time" .When }}</a>
155
164
{{ end }}
···
210
219
</div>
211
220
212
221
<!-- commit info bar -->
213
-
<div class="text-xs mt-2 text-gray-500 dark:text-gray-400 flex items-center">
222
+
<div class="text-xs mt-2 text-gray-500 dark:text-gray-400 flex items-center flex-wrap">
214
223
{{ $verified := $.VerifiedCommits.IsVerified .Hash.String }}
215
224
{{ $hashStyle := "text-gray-700 dark:text-gray-300 bg-gray-100 dark:bg-gray-900" }}
216
225
{{ if $verified }}
···
280
289
</a>
281
290
<div class="flex flex-col gap-1">
282
291
{{ range .BranchesTrunc }}
283
-
<div class="text-base flex items-center justify-between">
284
-
<div class="flex items-center gap-2">
292
+
<div class="text-base flex items-center justify-between overflow-hidden">
293
+
<div class="flex items-center gap-2 min-w-0 flex-1">
285
294
<a href="/{{ $.RepoInfo.FullName }}/tree/{{ .Reference.Name | urlquery }}"
286
-
class="inline no-underline hover:underline dark:text-white">
295
+
class="inline-block truncate no-underline hover:underline dark:text-white">
287
296
{{ .Reference.Name }}
288
297
</a>
289
298
{{ if .Commit }}
290
-
<span class="px-1 text-gray-500 dark:text-gray-400 select-none after:content-['ยท']"></span>
291
-
<span class="text-xs text-gray-500 dark:text-gray-400">{{ template "repo/fragments/time" .Commit.Committer.When }}</span>
299
+
<span class="px-1 text-gray-500 dark:text-gray-400 select-none after:content-['ยท'] shrink-0"></span>
300
+
<span class="whitespace-nowrap text-xs text-gray-500 dark:text-gray-400 shrink-0">{{ template "repo/fragments/time" .Commit.Committer.When }}</span>
292
301
{{ end }}
293
302
{{ if .IsDefault }}
294
-
<span class="px-1 text-gray-500 dark:text-gray-400 select-none after:content-['ยท']"></span>
295
-
<span class="bg-gray-200 dark:bg-gray-700 rounded py-1/2 px-1 text-xs font-mono">default</span>
303
+
<span class="px-1 text-gray-500 dark:text-gray-400 select-none after:content-['ยท'] shrink-0"></span>
304
+
<span class="bg-gray-200 dark:bg-gray-700 rounded py-1/2 px-1 text-xs font-mono shrink-0">default</span>
296
305
{{ end }}
297
306
</div>
298
307
{{ if ne $.Ref .Reference.Name }}
299
308
<a href="/{{ $.RepoInfo.FullName }}/compare/{{ $.Ref | urlquery }}...{{ .Reference.Name | urlquery }}"
300
-
class="text-xs flex gap-2 items-center"
309
+
class="text-xs flex gap-2 items-center shrink-0 ml-2"
301
310
title="Compare branches or tags">
302
311
{{ i "git-compare" "w-3 h-3" }} compare
303
312
</a>
304
-
{{end}}
313
+
{{ end }}
305
314
</div>
306
315
{{ end }}
307
316
</div>
···
362
371
{{- end -}}</article>
363
372
</section>
364
373
{{- end -}}
365
-
366
-
{{ template "repo/fragments/cloneInstructions" . }}
367
374
{{ end }}
+1
-2
appview/pages/templates/repo/issues/fragments/issueComment.html
+1
-2
appview/pages/templates/repo/issues/fragments/issueComment.html
···
2
2
{{ with .Comment }}
3
3
<div id="comment-container-{{.CommentId}}">
4
4
<div class="flex items-center gap-2 mb-2 text-gray-500 dark:text-gray-400 text-sm flex-wrap">
5
-
{{ $owner := index $.DidHandleMap .OwnerDid }}
6
-
{{ template "user/fragments/picHandleLink" $owner }}
5
+
{{ template "user/fragments/picHandleLink" .OwnerDid }}
7
6
8
7
<!-- show user "hats" -->
9
8
{{ $isIssueAuthor := eq .OwnerDid $.Issue.OwnerDid }}
+3
-3
appview/pages/templates/repo/issues/issue.html
+3
-3
appview/pages/templates/repo/issues/issue.html
···
11
11
{{ define "repoContent" }}
12
12
<header class="pb-4">
13
13
<h1 class="text-2xl">
14
-
{{ .Issue.Title }}
14
+
{{ .Issue.Title | description }}
15
15
<span class="text-gray-500 dark:text-gray-400">#{{ .Issue.IssueId }}</span>
16
16
</h1>
17
17
</header>
···
54
54
"Kind" $kind
55
55
"Count" (index $.Reactions $kind)
56
56
"IsReacted" (index $.UserReacted $kind)
57
-
"ThreadAt" $.Issue.IssueAt)
57
+
"ThreadAt" $.Issue.AtUri)
58
58
}}
59
59
{{ end }}
60
60
</div>
···
70
70
{{ if gt $index 0 }}
71
71
<div class="absolute left-8 -top-2 w-px h-2 bg-gray-300 dark:bg-gray-600"></div>
72
72
{{ end }}
73
-
{{ template "repo/issues/fragments/issueComment" (dict "RepoInfo" $.RepoInfo "LoggedInUser" $.LoggedInUser "DidHandleMap" $.DidHandleMap "Issue" $.Issue "Comment" .)}}
73
+
{{ template "repo/issues/fragments/issueComment" (dict "RepoInfo" $.RepoInfo "LoggedInUser" $.LoggedInUser "Issue" $.Issue "Comment" .)}}
74
74
</div>
75
75
{{ end }}
76
76
</section>
+2
-3
appview/pages/templates/repo/issues/issues.html
+2
-3
appview/pages/templates/repo/issues/issues.html
···
45
45
href="/{{ $.RepoInfo.FullName }}/issues/{{ .IssueId }}"
46
46
class="no-underline hover:underline"
47
47
>
48
-
{{ .Title }}
48
+
{{ .Title | description }}
49
49
<span class="text-gray-500">#{{ .IssueId }}</span>
50
50
</a>
51
51
</div>
···
65
65
</span>
66
66
67
67
<span class="ml-1">
68
-
{{ $owner := index $.DidHandleMap .OwnerDid }}
69
-
{{ template "user/fragments/picHandleLink" $owner }}
68
+
{{ template "user/fragments/picHandleLink" .OwnerDid }}
70
69
</span>
71
70
72
71
<span class="before:content-['ยท']">
+2
-2
appview/pages/templates/repo/pipelines/fragments/pipelineSymbol.html
+2
-2
appview/pages/templates/repo/pipelines/fragments/pipelineSymbol.html
···
23
23
</div>
24
24
{{ else if $allFail }}
25
25
<div class="flex gap-1 items-center">
26
-
{{ i "x" "size-4 text-red-600" }}
26
+
{{ i "x" "size-4 text-red-500" }}
27
27
<span>0/{{ $total }}</span>
28
28
</div>
29
29
{{ else if $allTimeout }}
30
30
<div class="flex gap-1 items-center">
31
-
{{ i "clock-alert" "size-4 text-orange-400" }}
31
+
{{ i "clock-alert" "size-4 text-orange-500" }}
32
32
<span>0/{{ $total }}</span>
33
33
</div>
34
34
{{ else }}
+1
-1
appview/pages/templates/repo/pipelines/fragments/workflowSymbol.html
+1
-1
appview/pages/templates/repo/pipelines/fragments/workflowSymbol.html
···
19
19
{{ $color = "text-gray-600 dark:text-gray-500" }}
20
20
{{ else if eq $kind "timeout" }}
21
21
{{ $icon = "clock-alert" }}
22
-
{{ $color = "text-orange-400 dark:text-orange-300" }}
22
+
{{ $color = "text-orange-400 dark:text-orange-500" }}
23
23
{{ else }}
24
24
{{ $icon = "x" }}
25
25
{{ $color = "text-red-600 dark:text-red-500" }}
+3
-3
appview/pages/templates/repo/pulls/fragments/pullHeader.html
+3
-3
appview/pages/templates/repo/pulls/fragments/pullHeader.html
···
1
1
{{ define "repo/pulls/fragments/pullHeader" }}
2
2
<header class="pb-4">
3
3
<h1 class="text-2xl dark:text-white">
4
-
{{ .Pull.Title }}
4
+
{{ .Pull.Title | description }}
5
5
<span class="text-gray-500 dark:text-gray-400">#{{ .Pull.PullId }}</span>
6
6
</h1>
7
7
</header>
···
17
17
{{ $icon = "git-merge" }}
18
18
{{ end }}
19
19
20
+
{{ $owner := resolve .Pull.OwnerDid }}
20
21
<section class="mt-2">
21
22
<div class="flex items-center gap-2">
22
23
<div
···
28
29
</div>
29
30
<span class="text-gray-500 dark:text-gray-400 text-sm flex flex-wrap items-center gap-1">
30
31
opened by
31
-
{{ $owner := index $.DidHandleMap .Pull.OwnerDid }}
32
-
{{ template "user/fragments/picHandleLink" $owner }}
32
+
{{ template "user/fragments/picHandleLink" .Pull.OwnerDid }}
33
33
<span class="select-none before:content-['\00B7']"></span>
34
34
{{ template "repo/fragments/time" .Pull.Created }}
35
35
+1
-1
appview/pages/templates/repo/pulls/fragments/summarizedPullHeader.html
+1
-1
appview/pages/templates/repo/pulls/fragments/summarizedPullHeader.html
+1
-1
appview/pages/templates/repo/pulls/interdiff.html
+1
-1
appview/pages/templates/repo/pulls/interdiff.html
···
68
68
<div class="flex flex-col gap-4 col-span-1 md:col-span-2">
69
69
{{ template "repo/fragments/diffOpts" .DiffOpts }}
70
70
</div>
71
-
<div class="sticky top-0 flex-grow max-h-screen">
71
+
<div class="sticky top-0 flex-grow max-h-screen overflow-y-auto">
72
72
{{ template "repo/fragments/interdiffFiles" .Interdiff }}
73
73
</div>
74
74
{{end}}
+1
-1
appview/pages/templates/repo/pulls/patch.html
+1
-1
appview/pages/templates/repo/pulls/patch.html
···
73
73
<div class="flex flex-col gap-4 col-span-1 md:col-span-2">
74
74
{{ template "repo/fragments/diffOpts" .DiffOpts }}
75
75
</div>
76
-
<div class="sticky top-0 flex-grow max-h-screen">
76
+
<div class="sticky top-0 flex-grow max-h-screen overflow-y-auto">
77
77
{{ template "repo/fragments/diffChangedFiles" .Diff }}
78
78
</div>
79
79
{{end}}
+4
-5
appview/pages/templates/repo/pulls/pull.html
+4
-5
appview/pages/templates/repo/pulls/pull.html
···
47
47
<!-- round summary -->
48
48
<div class="rounded drop-shadow-sm bg-white dark:bg-gray-800 p-2 text-gray-500 dark:text-gray-400">
49
49
<span class="gap-1 flex items-center">
50
-
{{ $owner := index $.DidHandleMap $.Pull.OwnerDid }}
50
+
{{ $owner := resolve $.Pull.OwnerDid }}
51
51
{{ $re := "re" }}
52
52
{{ if eq .RoundNumber 0 }}
53
53
{{ $re = "" }}
54
54
{{ end }}
55
55
<span class="hidden md:inline">{{$re}}submitted</span>
56
-
by {{ template "user/fragments/picHandleLink" $owner }}
56
+
by {{ template "user/fragments/picHandleLink" $.Pull.OwnerDid }}
57
57
<span class="select-none before:content-['\00B7']"></span>
58
58
<a class="text-gray-500 dark:text-gray-400 hover:text-gray-500" href="#round-#{{ .RoundNumber }}">{{ template "repo/fragments/shortTime" .Created }}</a>
59
59
<span class="select-none before:content-['ยท']"></span>
···
122
122
{{ end }}
123
123
</div>
124
124
<div class="flex items-center">
125
-
<span>{{ .Title }}</span>
125
+
<span>{{ .Title | description }}</span>
126
126
{{ if gt (len .Body) 0 }}
127
127
<button
128
128
class="py-1/2 px-1 mx-2 bg-gray-200 hover:bg-gray-400 rounded dark:bg-gray-700 dark:hover:bg-gray-600"
···
151
151
<div class="absolute left-8 -top-2 w-px h-2 bg-gray-300 dark:bg-gray-600"></div>
152
152
{{ end }}
153
153
<div class="text-sm text-gray-500 dark:text-gray-400 flex items-center gap-1">
154
-
{{ $owner := index $.DidHandleMap $c.OwnerDid }}
155
-
{{ template "user/fragments/picHandleLink" $owner }}
154
+
{{ template "user/fragments/picHandleLink" $c.OwnerDid }}
156
155
<span class="before:content-['ยท']"></span>
157
156
<a class="text-gray-500 dark:text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" href="#comment-{{.ID}}">{{ template "repo/fragments/time" $c.Created }}</a>
158
157
</div>
+2
-3
appview/pages/templates/repo/pulls/pulls.html
+2
-3
appview/pages/templates/repo/pulls/pulls.html
···
50
50
<div class="px-6 py-4 z-5">
51
51
<div class="pb-2">
52
52
<a href="/{{ $.RepoInfo.FullName }}/pulls/{{ .PullId }}" class="dark:text-white">
53
-
{{ .Title }}
53
+
{{ .Title | description }}
54
54
<span class="text-gray-500 dark:text-gray-400">#{{ .PullId }}</span>
55
55
</a>
56
56
</div>
57
57
<div class="text-sm text-gray-500 dark:text-gray-400 flex flex-wrap items-center gap-1">
58
-
{{ $owner := index $.DidHandleMap .OwnerDid }}
59
58
{{ $bgColor := "bg-gray-800 dark:bg-gray-700" }}
60
59
{{ $icon := "ban" }}
61
60
···
76
75
</span>
77
76
78
77
<span class="ml-1">
79
-
{{ template "user/fragments/picHandleLink" $owner }}
78
+
{{ template "user/fragments/picHandleLink" .OwnerDid }}
80
79
</span>
81
80
82
81
<span class="before:content-['ยท']">
+5
-5
appview/pages/templates/repo/tree.html
+5
-5
appview/pages/templates/repo/tree.html
···
54
54
55
55
{{ range .Files }}
56
56
<div class="grid grid-cols-12 gap-4 items-center py-1">
57
-
<div class="col-span-6 md:col-span-3">
57
+
<div class="col-span-8 md:col-span-4">
58
58
{{ $link := printf "/%s/%s/%s/%s/%s" $.RepoInfo.FullName "tree" (urlquery $.Ref) $.TreePath .Name }}
59
59
{{ $icon := "folder" }}
60
60
{{ $iconStyle := "size-4 fill-current" }}
61
61
62
62
{{ if .IsFile }}
63
63
{{ $icon = "file" }}
64
-
{{ $iconStyle = "flex-shrink-0 size-4" }}
64
+
{{ $iconStyle = "size-4" }}
65
65
{{ end }}
66
66
<a href="{{ $link }}" class="{{ $linkstyle }}">
67
67
<div class="flex items-center gap-2">
68
-
{{ i $icon $iconStyle }}
68
+
{{ i $icon $iconStyle "flex-shrink-0" }}
69
69
<span class="truncate">{{ .Name }}</span>
70
70
</div>
71
71
</a>
72
72
</div>
73
73
74
-
<div class="col-span-0 md:col-span-7 hidden md:block overflow-hidden">
74
+
<div class="col-span-0 md:col-span-6 hidden md:block overflow-hidden">
75
75
{{ with .LastCommit }}
76
76
<a href="/{{ $.RepoInfo.FullName }}/commit/{{ .Hash }}" class="text-gray-500 dark:text-gray-400 block truncate">{{ .Message }}</a>
77
77
{{ end }}
78
78
</div>
79
79
80
-
<div class="col-span-6 md:col-span-2 text-right">
80
+
<div class="col-span-4 md:col-span-2 text-sm text-right">
81
81
{{ with .LastCommit }}
82
82
<a href="/{{ $.RepoInfo.FullName }}/commit/{{ .Hash }}" class="text-gray-500 dark:text-gray-400">{{ template "repo/fragments/time" .When }}</a>
83
83
{{ end }}
+2
-4
appview/pages/templates/spindles/dashboard.html
+2
-4
appview/pages/templates/spindles/dashboard.html
···
42
42
<div>
43
43
<div class="flex justify-between items-center">
44
44
<div class="flex items-center gap-2">
45
-
{{ i "user" "size-4" }}
46
-
{{ $user := index $.DidHandleMap . }}
47
-
<a href="/{{ $user }}">{{ $user }}</a>
45
+
{{ template "user/fragments/picHandleLink" . }}
48
46
</div>
49
47
{{ if ne $.LoggedInUser.Did . }}
50
48
{{ block "removeMemberButton" (list $ . ) }} {{ end }}
···
109
107
hx-post="/spindles/{{ $root.Spindle.Instance }}/remove"
110
108
hx-swap="none"
111
109
hx-vals='{"member": "{{$member}}" }'
112
-
hx-confirm="Are you sure you want to remove {{ index $root.DidHandleMap $member }} from this instance?"
110
+
hx-confirm="Are you sure you want to remove {{ resolve $member }} from this instance?"
113
111
>
114
112
{{ i "user-minus" "w-4 h-4" }}
115
113
remove
+3
-2
appview/pages/templates/strings/fragments/form.html
+3
-2
appview/pages/templates/strings/fragments/form.html
···
13
13
type="text"
14
14
id="filename"
15
15
name="filename"
16
-
placeholder="Filename with extension"
16
+
placeholder="Filename"
17
17
required
18
18
value="{{ .String.Filename }}"
19
19
class="md:max-w-64 dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400 px-3 py-2 border rounded"
···
31
31
name="content"
32
32
id="content-textarea"
33
33
wrap="off"
34
-
class="w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400"
34
+
class="w-full dark:bg-gray-700 dark:text-white dark:border-gray-600 dark:placeholder-gray-400 font-mono"
35
35
rows="20"
36
+
spellcheck="false"
36
37
placeholder="Paste your string here!"
37
38
required>{{ .String.Contents }}</textarea>
38
39
<div class="flex justify-between items-center">
+2
-2
appview/pages/templates/strings/string.html
+2
-2
appview/pages/templates/strings/string.html
···
35
35
title="Delete string"
36
36
hx-delete="/strings/{{ .String.Did }}/{{ .String.Rkey }}/"
37
37
hx-swap="none"
38
-
hx-confirm="Are you sure you want to delete the gist `{{ .String.Filename }}`?"
38
+
hx-confirm="Are you sure you want to delete the string `{{ .String.Filename }}`?"
39
39
>
40
40
{{ i "trash-2" "size-4" }}
41
41
<span class="hidden md:inline">delete</span>
···
77
77
{{ end }}
78
78
</div>
79
79
</div>
80
-
<div class="overflow-auto relative">
80
+
<div class="overflow-x-auto overflow-y-hidden relative">
81
81
{{ if .ShowRendered }}
82
82
<div id="blob-contents" class="prose dark:prose-invert">{{ .RenderedContents }}</div>
83
83
{{ else }}
+65
appview/pages/templates/strings/timeline.html
+65
appview/pages/templates/strings/timeline.html
···
1
+
{{ define "title" }} all strings {{ end }}
2
+
3
+
{{ define "topbar" }}
4
+
{{ template "layouts/topbar" $ }}
5
+
{{ end }}
6
+
7
+
{{ define "content" }}
8
+
{{ block "timeline" $ }}{{ end }}
9
+
{{ end }}
10
+
11
+
{{ define "timeline" }}
12
+
<div>
13
+
<div class="p-6">
14
+
<p class="text-xl font-bold dark:text-white">All strings</p>
15
+
</div>
16
+
17
+
<div class="flex flex-col gap-4">
18
+
{{ range $i, $s := .Strings }}
19
+
<div class="relative">
20
+
{{ if ne $i 0 }}
21
+
<div class="absolute left-8 -top-4 w-px h-4 bg-gray-300 dark:bg-gray-600"></div>
22
+
{{ end }}
23
+
<div class="flex flex-col divide-y divide-gray-200 dark:divide-gray-700 border border-gray-200 dark:border-gray-700 rounded-sm">
24
+
{{ template "stringCard" $s }}
25
+
</div>
26
+
</div>
27
+
{{ end }}
28
+
</div>
29
+
</div>
30
+
{{ end }}
31
+
32
+
{{ define "stringCard" }}
33
+
<div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800">
34
+
<div class="font-medium dark:text-white flex gap-2 items-center">
35
+
<a href="/strings/{{ resolve .Did.String }}/{{ .Rkey }}">{{ .Filename }}</a>
36
+
</div>
37
+
{{ with .Description }}
38
+
<div class="text-gray-600 dark:text-gray-300 text-sm">
39
+
{{ . }}
40
+
</div>
41
+
{{ end }}
42
+
43
+
{{ template "stringCardInfo" . }}
44
+
</div>
45
+
{{ end }}
46
+
47
+
{{ define "stringCardInfo" }}
48
+
{{ $stat := .Stats }}
49
+
{{ $resolved := resolve .Did.String }}
50
+
<div class="text-gray-400 pt-4 text-sm font-mono inline-flex items-center gap-2 mt-auto">
51
+
<a href="/strings/{{ $resolved }}" class="flex items-center">
52
+
{{ template "user/fragments/picHandle" $resolved }}
53
+
</a>
54
+
<span class="select-none [&:before]:content-['ยท']"></span>
55
+
<span>{{ $stat.LineCount }} line{{if ne $stat.LineCount 1}}s{{end}}</span>
56
+
<span class="select-none [&:before]:content-['ยท']"></span>
57
+
{{ with .Edited }}
58
+
<span>edited {{ template "repo/fragments/shortTimeAgo" . }}</span>
59
+
{{ else }}
60
+
{{ template "repo/fragments/shortTimeAgo" .Created }}
61
+
{{ end }}
62
+
</div>
63
+
{{ end }}
64
+
65
+
+183
appview/pages/templates/timeline/timeline.html
+183
appview/pages/templates/timeline/timeline.html
···
1
+
{{ define "title" }}timeline{{ end }}
2
+
3
+
{{ define "extrameta" }}
4
+
<meta property="og:title" content="timeline ยท tangled" />
5
+
<meta property="og:type" content="object" />
6
+
<meta property="og:url" content="https://tangled.sh" />
7
+
<meta property="og:description" content="tightly-knit social coding" />
8
+
{{ end }}
9
+
10
+
{{ define "content" }}
11
+
{{ if .LoggedInUser }}
12
+
{{ else }}
13
+
{{ block "hero" $ }}{{ end }}
14
+
{{ end }}
15
+
16
+
{{ block "trending" $ }}{{ end }}
17
+
{{ block "timeline" $ }}{{ end }}
18
+
{{ end }}
19
+
20
+
{{ define "hero" }}
21
+
<div class="flex flex-col text-black dark:text-white p-6 gap-6 max-w-xl">
22
+
<div class="font-bold text-4xl">tightly-knit<br>social coding.</div>
23
+
24
+
<p class="text-lg">
25
+
tangled is new social-enabled git collaboration platform built on <a class="underline" href="https://atproto.com/">atproto</a>.
26
+
</p>
27
+
<p class="text-lg">
28
+
we envision a place where developers have complete ownership of their
29
+
code, open source communities can freely self-govern and most
30
+
importantly, coding can be social and fun again.
31
+
</p>
32
+
33
+
<div class="flex gap-6 items-center">
34
+
<a href="/signup" class="no-underline hover:no-underline ">
35
+
<button class="btn-create flex gap-2 px-4 items-center">
36
+
join now {{ i "arrow-right" "size-4" }}
37
+
</button>
38
+
</a>
39
+
</div>
40
+
</div>
41
+
{{ end }}
42
+
43
+
{{ define "trending" }}
44
+
<div class="w-full md:mx-0 py-4">
45
+
<div class="px-6 pb-4">
46
+
<h3 class="text-xl font-bold dark:text-white flex items-center gap-2">
47
+
Trending
48
+
{{ i "trending-up" "size-4 flex-shrink-0" }}
49
+
</h3>
50
+
</div>
51
+
<div class="flex gap-4 overflow-x-auto scrollbar-hide items-stretch">
52
+
{{ range $index, $repo := .Repos }}
53
+
<div class="flex-none h-full border border-gray-200 dark:border-gray-700 rounded-sm w-96">
54
+
{{ template "user/fragments/repoCard" (list $ $repo true) }}
55
+
</div>
56
+
{{ else }}
57
+
<div class="py-8 px-6 bg-white dark:bg-gray-800 border border-gray-200 dark:border-gray-700 rounded-sm">
58
+
<div class="text-sm text-gray-500 dark:text-gray-400 text-center">
59
+
No trending repositories this week
60
+
</div>
61
+
</div>
62
+
{{ end }}
63
+
</div>
64
+
</div>
65
+
{{ end }}
66
+
67
+
{{ define "timeline" }}
68
+
<div class="py-4">
69
+
<div class="px-6 pb-4">
70
+
<p class="text-xl font-bold dark:text-white">Timeline</p>
71
+
</div>
72
+
73
+
<div class="flex flex-col gap-4">
74
+
{{ range $i, $e := .Timeline }}
75
+
<div class="relative">
76
+
{{ if ne $i 0 }}
77
+
<div class="absolute left-8 -top-4 w-px h-4 bg-gray-300 dark:bg-gray-600"></div>
78
+
{{ end }}
79
+
{{ with $e }}
80
+
<div class="flex flex-col divide-y divide-gray-200 dark:divide-gray-700 border border-gray-200 dark:border-gray-700 rounded-sm">
81
+
{{ if .Repo }}
82
+
{{ block "repoEvent" (list $ .Repo .Source) }} {{ end }}
83
+
{{ else if .Star }}
84
+
{{ block "starEvent" (list $ .Star) }} {{ end }}
85
+
{{ else if .Follow }}
86
+
{{ block "followEvent" (list $ .Follow .Profile .FollowStats) }} {{ end }}
87
+
{{ end }}
88
+
</div>
89
+
{{ end }}
90
+
</div>
91
+
{{ end }}
92
+
</div>
93
+
</div>
94
+
{{ end }}
95
+
96
+
{{ define "repoEvent" }}
97
+
{{ $root := index . 0 }}
98
+
{{ $repo := index . 1 }}
99
+
{{ $source := index . 2 }}
100
+
{{ $userHandle := resolve $repo.Did }}
101
+
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
102
+
{{ template "user/fragments/picHandleLink" $repo.Did }}
103
+
{{ with $source }}
104
+
{{ $sourceDid := resolve .Did }}
105
+
forked
106
+
<a href="/{{ $sourceDid }}/{{ .Name }}"class="no-underline hover:underline">
107
+
{{ $sourceDid }}/{{ .Name }}
108
+
</a>
109
+
to
110
+
<a href="/{{ $userHandle }}/{{ $repo.Name }}" class="no-underline hover:underline">{{ $repo.Name }}</a>
111
+
{{ else }}
112
+
created
113
+
<a href="/{{ $userHandle }}/{{ $repo.Name }}" class="no-underline hover:underline">
114
+
{{ $repo.Name }}
115
+
</a>
116
+
{{ end }}
117
+
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" $repo.Created }}</span>
118
+
</div>
119
+
{{ with $repo }}
120
+
{{ template "user/fragments/repoCard" (list $root . true) }}
121
+
{{ end }}
122
+
{{ end }}
123
+
124
+
{{ define "starEvent" }}
125
+
{{ $root := index . 0 }}
126
+
{{ $star := index . 1 }}
127
+
{{ with $star }}
128
+
{{ $starrerHandle := resolve .StarredByDid }}
129
+
{{ $repoOwnerHandle := resolve .Repo.Did }}
130
+
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
131
+
{{ template "user/fragments/picHandleLink" $starrerHandle }}
132
+
starred
133
+
<a href="/{{ $repoOwnerHandle }}/{{ .Repo.Name }}" class="no-underline hover:underline">
134
+
{{ $repoOwnerHandle | truncateAt30 }}/{{ .Repo.Name }}
135
+
</a>
136
+
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" .Created }}</span>
137
+
</div>
138
+
{{ with .Repo }}
139
+
{{ template "user/fragments/repoCard" (list $root . true) }}
140
+
{{ end }}
141
+
{{ end }}
142
+
{{ end }}
143
+
144
+
145
+
{{ define "followEvent" }}
146
+
{{ $root := index . 0 }}
147
+
{{ $follow := index . 1 }}
148
+
{{ $profile := index . 2 }}
149
+
{{ $stat := index . 3 }}
150
+
151
+
{{ $userHandle := resolve $follow.UserDid }}
152
+
{{ $subjectHandle := resolve $follow.SubjectDid }}
153
+
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
154
+
{{ template "user/fragments/picHandleLink" $userHandle }}
155
+
followed
156
+
{{ template "user/fragments/picHandleLink" $subjectHandle }}
157
+
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" $follow.FollowedAt }}</span>
158
+
</div>
159
+
<div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800 flex items-center gap-4">
160
+
<div class="flex-shrink-0 max-h-full w-24 h-24">
161
+
<img class="object-cover rounded-full p-2" src="{{ fullAvatar $subjectHandle }}" />
162
+
</div>
163
+
164
+
<div class="flex-1 min-h-0 justify-around flex flex-col">
165
+
<a href="/{{ $subjectHandle }}">
166
+
<span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $subjectHandle | truncateAt30 }}</span>
167
+
</a>
168
+
{{ with $profile }}
169
+
{{ with .Description }}
170
+
<p class="text-sm pb-2 md:pb-2">{{.}}</p>
171
+
{{ end }}
172
+
{{ end }}
173
+
{{ with $stat }}
174
+
<div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full text-sm">
175
+
<span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
176
+
<span id="followers">{{ .Followers }} followers</span>
177
+
<span class="select-none after:content-['ยท']"></span>
178
+
<span id="following">{{ .Following }} following</span>
179
+
</div>
180
+
{{ end }}
181
+
</div>
182
+
</div>
183
+
{{ end }}
-161
appview/pages/templates/timeline.html
-161
appview/pages/templates/timeline.html
···
1
-
{{ define "title" }}timeline{{ end }}
2
-
3
-
{{ define "extrameta" }}
4
-
<meta property="og:title" content="timeline ยท tangled" />
5
-
<meta property="og:type" content="object" />
6
-
<meta property="og:url" content="https://tangled.sh" />
7
-
<meta property="og:description" content="see what's tangling" />
8
-
{{ end }}
9
-
10
-
{{ define "topbar" }}
11
-
{{ template "layouts/topbar" $ }}
12
-
{{ end }}
13
-
14
-
{{ define "content" }}
15
-
{{ with .LoggedInUser }}
16
-
{{ block "timeline" $ }}{{ end }}
17
-
{{ else }}
18
-
{{ block "hero" $ }}{{ end }}
19
-
{{ block "timeline" $ }}{{ end }}
20
-
{{ end }}
21
-
{{ end }}
22
-
23
-
{{ define "hero" }}
24
-
<div class="flex flex-col text-black dark:text-white p-6 gap-6 max-w-xl">
25
-
<div class="font-bold text-4xl">tightly-knit<br>social coding.</div>
26
-
27
-
<p class="text-lg">
28
-
tangled is new social-enabled git collaboration platform built on <a class="underline" href="https://atproto.com/">atproto</a>.
29
-
</p>
30
-
<p class="text-lg">
31
-
we envision a place where developers have complete ownership of their
32
-
code, open source communities can freely self-govern and most
33
-
importantly, coding can be social and fun again.
34
-
</p>
35
-
36
-
<div class="flex gap-6 items-center">
37
-
<a href="/signup" class="no-underline hover:no-underline ">
38
-
<button class="btn-create flex gap-2 px-4 items-center">
39
-
join now {{ i "arrow-right" "size-4" }}
40
-
</button>
41
-
</a>
42
-
</div>
43
-
</div>
44
-
{{ end }}
45
-
46
-
{{ define "timeline" }}
47
-
<div>
48
-
<div class="p-6">
49
-
<p class="text-xl font-bold dark:text-white">Timeline</p>
50
-
</div>
51
-
52
-
<div class="flex flex-col gap-4">
53
-
{{ range $i, $e := .Timeline }}
54
-
<div class="relative">
55
-
{{ if ne $i 0 }}
56
-
<div class="absolute left-8 -top-4 w-px h-4 bg-gray-300 dark:bg-gray-600"></div>
57
-
{{ end }}
58
-
{{ with $e }}
59
-
<div class="flex flex-col divide-y divide-gray-200 dark:divide-gray-700 border border-gray-200 dark:border-gray-700 rounded-sm">
60
-
{{ if .Repo }}
61
-
{{ block "repoEvent" (list $ .Repo .Source) }} {{ end }}
62
-
{{ else if .Star }}
63
-
{{ block "starEvent" (list $ .Star) }} {{ end }}
64
-
{{ else if .Follow }}
65
-
{{ block "followEvent" (list $ .Follow .Profile .FollowStats) }} {{ end }}
66
-
{{ end }}
67
-
</div>
68
-
{{ end }}
69
-
</div>
70
-
{{ end }}
71
-
</div>
72
-
</div>
73
-
{{ end }}
74
-
75
-
{{ define "repoEvent" }}
76
-
{{ $root := index . 0 }}
77
-
{{ $repo := index . 1 }}
78
-
{{ $source := index . 2 }}
79
-
{{ $userHandle := index $root.DidHandleMap $repo.Did }}
80
-
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
81
-
{{ template "user/fragments/picHandleLink" $userHandle }}
82
-
{{ with $source }}
83
-
forked
84
-
<a href="/{{ index $root.DidHandleMap .Did }}/{{ .Name }}"class="no-underline hover:underline">
85
-
{{ index $root.DidHandleMap .Did }}/{{ .Name }}
86
-
</a>
87
-
to
88
-
<a href="/{{ $userHandle }}/{{ $repo.Name }}" class="no-underline hover:underline">{{ $repo.Name }}</a>
89
-
{{ else }}
90
-
created
91
-
<a href="/{{ $userHandle }}/{{ $repo.Name }}" class="no-underline hover:underline">
92
-
{{ $repo.Name }}
93
-
</a>
94
-
{{ end }}
95
-
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" $repo.Created }}</span>
96
-
</div>
97
-
{{ with $repo }}
98
-
{{ template "user/fragments/repoCard" (list $root . true) }}
99
-
{{ end }}
100
-
{{ end }}
101
-
102
-
{{ define "starEvent" }}
103
-
{{ $root := index . 0 }}
104
-
{{ $star := index . 1 }}
105
-
{{ with $star }}
106
-
{{ $starrerHandle := index $root.DidHandleMap .StarredByDid }}
107
-
{{ $repoOwnerHandle := index $root.DidHandleMap .Repo.Did }}
108
-
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
109
-
{{ template "user/fragments/picHandleLink" $starrerHandle }}
110
-
starred
111
-
<a href="/{{ $repoOwnerHandle }}/{{ .Repo.Name }}" class="no-underline hover:underline">
112
-
{{ $repoOwnerHandle | truncateAt30 }}/{{ .Repo.Name }}
113
-
</a>
114
-
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" .Created }}</span>
115
-
</div>
116
-
{{ with .Repo }}
117
-
{{ template "user/fragments/repoCard" (list $root . true) }}
118
-
{{ end }}
119
-
{{ end }}
120
-
{{ end }}
121
-
122
-
123
-
{{ define "followEvent" }}
124
-
{{ $root := index . 0 }}
125
-
{{ $follow := index . 1 }}
126
-
{{ $profile := index . 2 }}
127
-
{{ $stat := index . 3 }}
128
-
129
-
{{ $userHandle := index $root.DidHandleMap $follow.UserDid }}
130
-
{{ $subjectHandle := index $root.DidHandleMap $follow.SubjectDid }}
131
-
<div class="pl-6 py-2 bg-white dark:bg-gray-800 text-gray-600 dark:text-gray-300 flex flex-wrap items-center gap-2 text-sm">
132
-
{{ template "user/fragments/picHandleLink" $userHandle }}
133
-
followed
134
-
{{ template "user/fragments/picHandleLink" $subjectHandle }}
135
-
<span class="text-gray-700 dark:text-gray-400 text-xs">{{ template "repo/fragments/time" $follow.FollowedAt }}</span>
136
-
</div>
137
-
<div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800 flex items-center gap-4">
138
-
<div class="flex-shrink-0 max-h-full w-24 h-24">
139
-
<img class="object-cover rounded-full p-2" src="{{ fullAvatar $subjectHandle }}" />
140
-
</div>
141
-
142
-
<div class="flex-1 min-h-0 justify-around flex flex-col">
143
-
<a href="/{{ $subjectHandle }}">
144
-
<span class="font-bold dark:text-white overflow-hidden text-ellipsis whitespace-nowrap max-w-full">{{ $subjectHandle | truncateAt30 }}</span>
145
-
</a>
146
-
{{ with $profile }}
147
-
{{ with .Description }}
148
-
<p class="text-sm pb-2 md:pb-2">{{.}}</p>
149
-
{{ end }}
150
-
{{ end }}
151
-
{{ with $stat }}
152
-
<div class="text-sm flex items-center gap-2 my-2 overflow-hidden text-ellipsis whitespace-nowrap max-w-full text-sm">
153
-
<span class="flex-shrink-0">{{ i "users" "size-4" }}</span>
154
-
<span id="followers">{{ .Followers }} followers</span>
155
-
<span class="select-none after:content-['ยท']"></span>
156
-
<span id="following">{{ .Following }} following</span>
157
-
</div>
158
-
{{ end }}
159
-
</div>
160
-
</div>
161
-
{{ end }}
+1
-1
appview/pages/templates/user/fragments/editPins.html
+1
-1
appview/pages/templates/user/fragments/editPins.html
···
27
27
<input type="checkbox" id="repo-{{$idx}}" name="pinnedRepo{{$idx}}" value="{{.RepoAt}}" {{if .IsPinned}}checked{{end}}>
28
28
<label for="repo-{{$idx}}" class="my-0 py-0 normal-case font-normal w-full">
29
29
<div class="flex justify-between items-center w-full">
30
-
<span class="flex-shrink-0 overflow-hidden text-ellipsis ">{{ index $.DidHandleMap .Did }}/{{.Name}}</span>
30
+
<span class="flex-shrink-0 overflow-hidden text-ellipsis ">{{ resolve .Did }}/{{.Name}}</span>
31
31
<div class="flex gap-1 items-center">
32
32
{{ i "star" "size-4 fill-current" }}
33
33
<span>{{ .RepoStats.StarCount }}</span>
+3
-2
appview/pages/templates/user/fragments/picHandleLink.html
+3
-2
appview/pages/templates/user/fragments/picHandleLink.html
···
1
1
{{ define "user/fragments/picHandleLink" }}
2
-
<a href="/{{ . }}" class="flex items-center">
3
-
{{ template "user/fragments/picHandle" . }}
2
+
{{ $resolved := resolve . }}
3
+
<a href="/{{ $resolved }}" class="flex items-center">
4
+
{{ template "user/fragments/picHandle" $resolved }}
4
5
</a>
5
6
{{ end }}
+36
-31
appview/pages/templates/user/fragments/repoCard.html
+36
-31
appview/pages/templates/user/fragments/repoCard.html
···
4
4
{{ $fullName := index . 2 }}
5
5
6
6
{{ with $repo }}
7
-
<div class="py-4 px-6 drop-shadow-sm rounded bg-white dark:bg-gray-800">
8
-
<div class="font-medium dark:text-white flex gap-2 items-center">
9
-
{{- if $fullName -}}
10
-
<a href="/{{ index $root.DidHandleMap .Did }}/{{ .Name }}">{{ index $root.DidHandleMap .Did }}/{{ .Name }}</a>
11
-
{{- else -}}
12
-
<a href="/{{ index $root.DidHandleMap .Did }}/{{ .Name }}">{{ .Name }}</a>
13
-
{{- end -}}
14
-
</div>
15
-
{{ with .Description }}
16
-
<div class="text-gray-600 dark:text-gray-300 text-sm">
17
-
{{ . }}
18
-
</div>
7
+
<div class="py-4 px-6 gap-1 flex flex-col drop-shadow-sm rounded bg-white dark:bg-gray-800 min-h-32">
8
+
<div class="font-medium dark:text-white flex items-center">
9
+
{{ if .Source }}
10
+
{{ i "git-fork" "w-4 h-4 mr-1.5 shrink-0" }}
11
+
{{ else }}
12
+
{{ i "book-marked" "w-4 h-4 mr-1.5 shrink-0" }}
19
13
{{ end }}
20
14
21
-
{{ if .RepoStats }}
22
-
{{ block "repoStats" .RepoStats }} {{ end }}
23
-
{{ end }}
15
+
{{ $repoOwner := resolve .Did }}
16
+
{{- if $fullName -}}
17
+
<a href="/{{ $repoOwner }}/{{ .Name }}" class="truncate">{{ $repoOwner }}/{{ .Name }}</a>
18
+
{{- else -}}
19
+
<a href="/{{ $repoOwner }}/{{ .Name }}" class="truncate">{{ .Name }}</a>
20
+
{{- end -}}
21
+
</div>
22
+
{{ with .Description }}
23
+
<div class="text-gray-600 dark:text-gray-300 text-sm line-clamp-2">
24
+
{{ . | description }}
25
+
</div>
26
+
{{ end }}
27
+
28
+
{{ if .RepoStats }}
29
+
{{ block "repoStats" .RepoStats }}{{ end }}
30
+
{{ end }}
24
31
</div>
25
32
{{ end }}
26
33
{{ end }}
27
34
28
35
{{ define "repoStats" }}
29
-
<div class="text-gray-400 pt-4 text-sm font-mono inline-flex gap-4 mt-auto">
36
+
<div class="text-gray-400 text-sm font-mono inline-flex gap-4 mt-auto">
30
37
{{ with .Language }}
31
38
<div class="flex gap-2 items-center text-sm">
32
-
<div class="size-2 rounded-full"
39
+
<div class="size-2 rounded-full"
33
40
style="background: radial-gradient(circle at 35% 35%, color-mix(in srgb, {{ langColor . }} 70%, white), {{ langColor . }} 30%, color-mix(in srgb, {{ langColor . }} 85%, black));"></div>
34
41
<span>{{ . }}</span>
35
42
</div>
36
43
{{ end }}
37
44
{{ with .StarCount }}
38
-
<div class="flex gap-1 items-center text-sm">
39
-
{{ i "star" "w-3 h-3 fill-current" }}
40
-
<span>{{ . }}</span>
41
-
</div>
45
+
<div class="flex gap-1 items-center text-sm">
46
+
{{ i "star" "w-3 h-3 fill-current" }}
47
+
<span>{{ . }}</span>
48
+
</div>
42
49
{{ end }}
43
50
{{ with .IssueCount.Open }}
44
-
<div class="flex gap-1 items-center text-sm">
45
-
{{ i "circle-dot" "w-3 h-3" }}
46
-
<span>{{ . }}</span>
47
-
</div>
51
+
<div class="flex gap-1 items-center text-sm">
52
+
{{ i "circle-dot" "w-3 h-3" }}
53
+
<span>{{ . }}</span>
54
+
</div>
48
55
{{ end }}
49
56
{{ with .PullCount.Open }}
50
-
<div class="flex gap-1 items-center text-sm">
51
-
{{ i "git-pull-request" "w-3 h-3" }}
52
-
<span>{{ . }}</span>
53
-
</div>
57
+
<div class="flex gap-1 items-center text-sm">
58
+
{{ i "git-pull-request" "w-3 h-3" }}
59
+
<span>{{ . }}</span>
60
+
</div>
54
61
{{ end }}
55
62
</div>
56
63
{{ end }}
57
-
58
-
+1
appview/pages/templates/user/login.html
+1
appview/pages/templates/user/login.html
+13
-20
appview/pages/templates/user/profile.html
+13
-20
appview/pages/templates/user/profile.html
···
50
50
</div>
51
51
{{ else }}
52
52
<div class="flex flex-col gap-1">
53
-
{{ block "repoEvents" (list .RepoEvents $.DidHandleMap) }} {{ end }}
54
-
{{ block "issueEvents" (list .IssueEvents $.DidHandleMap) }} {{ end }}
55
-
{{ block "pullEvents" (list .PullEvents $.DidHandleMap) }} {{ end }}
53
+
{{ block "repoEvents" .RepoEvents }} {{ end }}
54
+
{{ block "issueEvents" .IssueEvents }} {{ end }}
55
+
{{ block "pullEvents" .PullEvents }} {{ end }}
56
56
</div>
57
57
{{ end }}
58
58
</div>
···
66
66
{{ end }}
67
67
68
68
{{ define "repoEvents" }}
69
-
{{ $items := index . 0 }}
70
-
{{ $handleMap := index . 1 }}
71
-
72
-
{{ if gt (len $items) 0 }}
69
+
{{ if gt (len .) 0 }}
73
70
<details>
74
71
<summary class="list-none cursor-pointer hover:text-gray-500 hover:dark:text-gray-400">
75
72
<div class="flex flex-wrap items-center gap-2">
76
73
{{ i "book-plus" "w-4 h-4" }}
77
-
created {{ len $items }} {{if eq (len $items) 1 }}repository{{else}}repositories{{end}}
74
+
created {{ len . }} {{if eq (len .) 1 }}repository{{else}}repositories{{end}}
78
75
</div>
79
76
</summary>
80
77
<div class="py-2 text-sm flex flex-col gap-3 mb-2">
81
-
{{ range $items }}
78
+
{{ range . }}
82
79
<div class="flex flex-wrap items-center gap-2">
83
80
<span class="text-gray-500 dark:text-gray-400">
84
81
{{ if .Source }}
···
87
84
{{ i "book-plus" "w-4 h-4" }}
88
85
{{ end }}
89
86
</span>
90
-
<a href="/{{ index $handleMap .Repo.Did }}/{{ .Repo.Name }}" class="no-underline hover:underline">
87
+
<a href="/{{ resolve .Repo.Did }}/{{ .Repo.Name }}" class="no-underline hover:underline">
91
88
{{- .Repo.Name -}}
92
89
</a>
93
90
</div>
···
98
95
{{ end }}
99
96
100
97
{{ define "issueEvents" }}
101
-
{{ $i := index . 0 }}
102
-
{{ $items := $i.Items }}
103
-
{{ $stats := $i.Stats }}
104
-
{{ $handleMap := index . 1 }}
98
+
{{ $items := .Items }}
99
+
{{ $stats := .Stats }}
105
100
106
101
{{ if gt (len $items) 0 }}
107
102
<details>
···
129
124
</summary>
130
125
<div class="py-2 text-sm flex flex-col gap-3 mb-2">
131
126
{{ range $items }}
132
-
{{ $repoOwner := index $handleMap .Metadata.Repo.Did }}
127
+
{{ $repoOwner := resolve .Metadata.Repo.Did }}
133
128
{{ $repoName := .Metadata.Repo.Name }}
134
129
{{ $repoUrl := printf "%s/%s" $repoOwner $repoName }}
135
130
···
163
158
{{ end }}
164
159
165
160
{{ define "pullEvents" }}
166
-
{{ $i := index . 0 }}
167
-
{{ $items := $i.Items }}
168
-
{{ $stats := $i.Stats }}
169
-
{{ $handleMap := index . 1 }}
161
+
{{ $items := .Items }}
162
+
{{ $stats := .Stats }}
170
163
{{ if gt (len $items) 0 }}
171
164
<details>
172
165
<summary class="list-none cursor-pointer hover:text-gray-500 hover:dark:text-gray-400">
···
200
193
</summary>
201
194
<div class="py-2 text-sm flex flex-col gap-3 mb-2">
202
195
{{ range $items }}
203
-
{{ $repoOwner := index $handleMap .Repo.Did }}
196
+
{{ $repoOwner := resolve .Repo.Did }}
204
197
{{ $repoName := .Repo.Name }}
205
198
{{ $repoUrl := printf "%s/%s" $repoOwner $repoName }}
206
199
+40
-90
appview/pulls/pulls.go
+40
-90
appview/pulls/pulls.go
···
19
19
"tangled.sh/tangled.sh/core/appview/notify"
20
20
"tangled.sh/tangled.sh/core/appview/oauth"
21
21
"tangled.sh/tangled.sh/core/appview/pages"
22
+
"tangled.sh/tangled.sh/core/appview/pages/markup"
22
23
"tangled.sh/tangled.sh/core/appview/reporesolver"
23
24
"tangled.sh/tangled.sh/core/idresolver"
24
25
"tangled.sh/tangled.sh/core/knotclient"
···
28
29
29
30
"github.com/bluekeyes/go-gitdiff/gitdiff"
30
31
comatproto "github.com/bluesky-social/indigo/api/atproto"
31
-
"github.com/bluesky-social/indigo/atproto/syntax"
32
32
lexutil "github.com/bluesky-social/indigo/lex/util"
33
33
"github.com/go-chi/chi/v5"
34
34
"github.com/google/uuid"
···
151
151
}
152
152
}
153
153
154
-
resolvedIds := s.idResolver.ResolveIdents(r.Context(), identsToResolve)
155
-
didHandleMap := make(map[string]string)
156
-
for _, identity := range resolvedIds {
157
-
if !identity.Handle.IsInvalidHandle() {
158
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
159
-
} else {
160
-
didHandleMap[identity.DID.String()] = identity.DID.String()
161
-
}
162
-
}
163
-
164
154
mergeCheckResponse := s.mergeCheck(f, pull, stack)
165
155
resubmitResult := pages.Unknown
166
156
if user != nil && user.Did == pull.OwnerDid {
···
212
202
s.pages.RepoSinglePull(w, pages.RepoSinglePullParams{
213
203
LoggedInUser: user,
214
204
RepoInfo: repoInfo,
215
-
DidHandleMap: didHandleMap,
216
205
Pull: pull,
217
206
Stack: stack,
218
207
AbandonedPulls: abandonedPulls,
···
257
246
patch = mergeable.CombinedPatch()
258
247
}
259
248
260
-
resp, err := ksClient.MergeCheck([]byte(patch), f.OwnerDid(), f.RepoName, pull.TargetBranch)
249
+
resp, err := ksClient.MergeCheck([]byte(patch), f.OwnerDid(), f.Name, pull.TargetBranch)
261
250
if err != nil {
262
251
log.Println("failed to check for mergeability:", err)
263
252
return types.MergeCheckResponse{
···
318
307
// pulls within the same repo
319
308
knot = f.Knot
320
309
ownerDid = f.OwnerDid()
321
-
repoName = f.RepoName
310
+
repoName = f.Name
322
311
}
323
312
324
313
us, err := knotclient.NewUnsignedClient(knot, s.config.Core.Dev)
···
377
366
return
378
367
}
379
368
380
-
identsToResolve := []string{pull.OwnerDid}
381
-
resolvedIds := s.idResolver.ResolveIdents(r.Context(), identsToResolve)
382
-
didHandleMap := make(map[string]string)
383
-
for _, identity := range resolvedIds {
384
-
if !identity.Handle.IsInvalidHandle() {
385
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
386
-
} else {
387
-
didHandleMap[identity.DID.String()] = identity.DID.String()
388
-
}
389
-
}
390
-
391
369
patch := pull.Submissions[roundIdInt].Patch
392
370
diff := patchutil.AsNiceDiff(patch, pull.TargetBranch)
393
371
394
372
s.pages.RepoPullPatchPage(w, pages.RepoPullPatchParams{
395
373
LoggedInUser: user,
396
-
DidHandleMap: didHandleMap,
397
374
RepoInfo: f.RepoInfo(user),
398
375
Pull: pull,
399
376
Stack: stack,
···
440
417
return
441
418
}
442
419
443
-
identsToResolve := []string{pull.OwnerDid}
444
-
resolvedIds := s.idResolver.ResolveIdents(r.Context(), identsToResolve)
445
-
didHandleMap := make(map[string]string)
446
-
for _, identity := range resolvedIds {
447
-
if !identity.Handle.IsInvalidHandle() {
448
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
449
-
} else {
450
-
didHandleMap[identity.DID.String()] = identity.DID.String()
451
-
}
452
-
}
453
-
454
420
currentPatch, err := patchutil.AsDiff(pull.Submissions[roundIdInt].Patch)
455
421
if err != nil {
456
422
log.Println("failed to interdiff; current patch malformed")
···
472
438
RepoInfo: f.RepoInfo(user),
473
439
Pull: pull,
474
440
Round: roundIdInt,
475
-
DidHandleMap: didHandleMap,
476
441
Interdiff: interdiff,
477
442
DiffOpts: diffOpts,
478
443
})
···
494
459
return
495
460
}
496
461
497
-
identsToResolve := []string{pull.OwnerDid}
498
-
resolvedIds := s.idResolver.ResolveIdents(r.Context(), identsToResolve)
499
-
didHandleMap := make(map[string]string)
500
-
for _, identity := range resolvedIds {
501
-
if !identity.Handle.IsInvalidHandle() {
502
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
503
-
} else {
504
-
didHandleMap[identity.DID.String()] = identity.DID.String()
505
-
}
506
-
}
507
-
508
462
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
509
463
w.Write([]byte(pull.Submissions[roundIdInt].Patch))
510
464
}
···
529
483
530
484
pulls, err := db.GetPulls(
531
485
s.db,
532
-
db.FilterEq("repo_at", f.RepoAt),
486
+
db.FilterEq("repo_at", f.RepoAt()),
533
487
db.FilterEq("state", state),
534
488
)
535
489
if err != nil {
···
595
549
m[p.Sha] = p
596
550
}
597
551
598
-
identsToResolve := make([]string, len(pulls))
599
-
for i, pull := range pulls {
600
-
identsToResolve[i] = pull.OwnerDid
601
-
}
602
-
resolvedIds := s.idResolver.ResolveIdents(r.Context(), identsToResolve)
603
-
didHandleMap := make(map[string]string)
604
-
for _, identity := range resolvedIds {
605
-
if !identity.Handle.IsInvalidHandle() {
606
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
607
-
} else {
608
-
didHandleMap[identity.DID.String()] = identity.DID.String()
609
-
}
610
-
}
611
-
612
552
s.pages.RepoPulls(w, pages.RepoPullsParams{
613
553
LoggedInUser: s.oauth.GetUser(r),
614
554
RepoInfo: f.RepoInfo(user),
615
555
Pulls: pulls,
616
-
DidHandleMap: didHandleMap,
617
556
FilteringBy: state,
618
557
Stacks: stacks,
619
558
Pipelines: m,
···
671
610
createdAt := time.Now().Format(time.RFC3339)
672
611
ownerDid := user.Did
673
612
674
-
pullAt, err := db.GetPullAt(s.db, f.RepoAt, pull.PullId)
613
+
pullAt, err := db.GetPullAt(s.db, f.RepoAt(), pull.PullId)
675
614
if err != nil {
676
615
log.Println("failed to get pull at", err)
677
616
s.pages.Notice(w, "pull-comment", "Failed to create comment.")
678
617
return
679
618
}
680
619
681
-
atUri := f.RepoAt.String()
620
+
atUri := f.RepoAt().String()
682
621
client, err := s.oauth.AuthorizedClient(r)
683
622
if err != nil {
684
623
log.Println("failed to get authorized client", err)
···
707
646
708
647
comment := &db.PullComment{
709
648
OwnerDid: user.Did,
710
-
RepoAt: f.RepoAt.String(),
649
+
RepoAt: f.RepoAt().String(),
711
650
PullId: pull.PullId,
712
651
Body: body,
713
652
CommentAt: atResp.Uri,
···
753
692
return
754
693
}
755
694
756
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
695
+
result, err := us.Branches(f.OwnerDid(), f.Name)
757
696
if err != nil {
758
697
log.Println("failed to fetch branches", err)
759
698
return
···
799
738
if isPatchBased && !patchutil.IsFormatPatch(patch) {
800
739
if title == "" {
801
740
s.pages.Notice(w, "pull", "Title is required for git-diff patches.")
741
+
return
742
+
}
743
+
sanitizer := markup.NewSanitizer()
744
+
if st := strings.TrimSpace(sanitizer.SanitizeDescription(title)); (st) == "" {
745
+
s.pages.Notice(w, "pull", "Title is empty after HTML sanitization")
802
746
return
803
747
}
804
748
}
···
877
821
return
878
822
}
879
823
880
-
comparison, err := ksClient.Compare(f.OwnerDid(), f.RepoName, targetBranch, sourceBranch)
824
+
comparison, err := ksClient.Compare(f.OwnerDid(), f.Name, targetBranch, sourceBranch)
881
825
if err != nil {
882
826
log.Println("failed to compare", err)
883
827
s.pages.Notice(w, "pull", err.Error())
···
979
923
return
980
924
}
981
925
982
-
forkAtUri, err := syntax.ParseATURI(fork.AtUri)
983
-
if err != nil {
984
-
log.Println("failed to parse fork AT URI", err)
985
-
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
986
-
return
987
-
}
926
+
forkAtUri := fork.RepoAt()
927
+
forkAtUriStr := forkAtUri.String()
988
928
989
929
pullSource := &db.PullSource{
990
930
Branch: sourceBranch,
···
992
932
}
993
933
recordPullSource := &tangled.RepoPull_Source{
994
934
Branch: sourceBranch,
995
-
Repo: &fork.AtUri,
935
+
Repo: &forkAtUriStr,
996
936
Sha: sourceRev,
997
937
}
998
938
···
1068
1008
Body: body,
1069
1009
TargetBranch: targetBranch,
1070
1010
OwnerDid: user.Did,
1071
-
RepoAt: f.RepoAt,
1011
+
RepoAt: f.RepoAt(),
1072
1012
Rkey: rkey,
1073
1013
Submissions: []*db.PullSubmission{
1074
1014
&initialSubmission,
···
1081
1021
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1082
1022
return
1083
1023
}
1084
-
pullId, err := db.NextPullId(tx, f.RepoAt)
1024
+
pullId, err := db.NextPullId(tx, f.RepoAt())
1085
1025
if err != nil {
1086
1026
log.Println("failed to get pull id", err)
1087
1027
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
···
1096
1036
Val: &tangled.RepoPull{
1097
1037
Title: title,
1098
1038
PullId: int64(pullId),
1099
-
TargetRepo: string(f.RepoAt),
1039
+
TargetRepo: string(f.RepoAt()),
1100
1040
TargetBranch: targetBranch,
1101
1041
Patch: patch,
1102
1042
Source: recordPullSource,
···
1274
1214
return
1275
1215
}
1276
1216
1277
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
1217
+
result, err := us.Branches(f.OwnerDid(), f.Name)
1278
1218
if err != nil {
1279
1219
log.Println("failed to reach knotserver", err)
1280
1220
return
···
1358
1298
return
1359
1299
}
1360
1300
1361
-
targetResult, err := targetBranchesClient.Branches(f.OwnerDid(), f.RepoName)
1301
+
targetResult, err := targetBranchesClient.Branches(f.OwnerDid(), f.Name)
1362
1302
if err != nil {
1363
1303
log.Println("failed to reach knotserver for target branches", err)
1364
1304
return
···
1474
1414
return
1475
1415
}
1476
1416
1477
-
comparison, err := ksClient.Compare(f.OwnerDid(), f.RepoName, pull.TargetBranch, pull.PullSource.Branch)
1417
+
comparison, err := ksClient.Compare(f.OwnerDid(), f.Name, pull.TargetBranch, pull.PullSource.Branch)
1478
1418
if err != nil {
1479
1419
log.Printf("compare request failed: %s", err)
1480
1420
s.pages.Notice(w, "resubmit-error", err.Error())
···
1658
1598
Val: &tangled.RepoPull{
1659
1599
Title: pull.Title,
1660
1600
PullId: int64(pull.PullId),
1661
-
TargetRepo: string(f.RepoAt),
1601
+
TargetRepo: string(f.RepoAt()),
1662
1602
TargetBranch: pull.TargetBranch,
1663
1603
Patch: patch, // new patch
1664
1604
Source: recordPullSource,
···
1774
1714
1775
1715
// deleted pulls are marked as deleted in the DB
1776
1716
for _, p := range deletions {
1717
+
// do not do delete already merged PRs
1718
+
if p.State == db.PullMerged {
1719
+
continue
1720
+
}
1721
+
1777
1722
err := db.DeletePull(tx, p.RepoAt, p.PullId)
1778
1723
if err != nil {
1779
1724
log.Println("failed to delete pull", err, p.PullId)
···
1814
1759
op, _ := origById[id]
1815
1760
np, _ := newById[id]
1816
1761
1762
+
// do not update already merged PRs
1763
+
if op.State == db.PullMerged {
1764
+
continue
1765
+
}
1766
+
1817
1767
submission := np.Submissions[np.LastRoundNumber()]
1818
1768
1819
1769
// resubmit the old pull
···
1985
1935
}
1986
1936
1987
1937
// Merge the pull request
1988
-
resp, err := ksClient.Merge([]byte(patch), f.OwnerDid(), f.RepoName, pull.TargetBranch, pull.Title, pull.Body, ident.Handle.String(), email.Address)
1938
+
resp, err := ksClient.Merge([]byte(patch), f.OwnerDid(), f.Name, pull.TargetBranch, pull.Title, pull.Body, ident.Handle.String(), email.Address)
1989
1939
if err != nil {
1990
1940
log.Printf("failed to merge pull request: %s", err)
1991
1941
s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
···
2007
1957
defer tx.Rollback()
2008
1958
2009
1959
for _, p := range pullsToMerge {
2010
-
err := db.MergePull(tx, f.RepoAt, p.PullId)
1960
+
err := db.MergePull(tx, f.RepoAt(), p.PullId)
2011
1961
if err != nil {
2012
1962
log.Printf("failed to update pull request status in database: %s", err)
2013
1963
s.pages.Notice(w, "pull-merge-error", "Failed to merge pull request. Try again later.")
···
2023
1973
return
2024
1974
}
2025
1975
2026
-
s.pages.HxLocation(w, fmt.Sprintf("/@%s/%s/pulls/%d", f.OwnerHandle(), f.RepoName, pull.PullId))
1976
+
s.pages.HxLocation(w, fmt.Sprintf("/@%s/%s/pulls/%d", f.OwnerHandle(), f.Name, pull.PullId))
2027
1977
}
2028
1978
2029
1979
func (s *Pulls) ClosePull(w http.ResponseWriter, r *http.Request) {
···
2075
2025
2076
2026
for _, p := range pullsToClose {
2077
2027
// Close the pull in the database
2078
-
err = db.ClosePull(tx, f.RepoAt, p.PullId)
2028
+
err = db.ClosePull(tx, f.RepoAt(), p.PullId)
2079
2029
if err != nil {
2080
2030
log.Println("failed to close pull", err)
2081
2031
s.pages.Notice(w, "pull-close", "Failed to close pull.")
···
2143
2093
2144
2094
for _, p := range pullsToReopen {
2145
2095
// Close the pull in the database
2146
-
err = db.ReopenPull(tx, f.RepoAt, p.PullId)
2096
+
err = db.ReopenPull(tx, f.RepoAt(), p.PullId)
2147
2097
if err != nil {
2148
2098
log.Println("failed to close pull", err)
2149
2099
s.pages.Notice(w, "pull-close", "Failed to close pull.")
···
2195
2145
Body: body,
2196
2146
TargetBranch: targetBranch,
2197
2147
OwnerDid: user.Did,
2198
-
RepoAt: f.RepoAt,
2148
+
RepoAt: f.RepoAt(),
2199
2149
Rkey: rkey,
2200
2150
Submissions: []*db.PullSubmission{
2201
2151
&initialSubmission,
+6
-6
appview/repo/artifact.go
+6
-6
appview/repo/artifact.go
···
76
76
Artifact: uploadBlobResp.Blob,
77
77
CreatedAt: createdAt.Format(time.RFC3339),
78
78
Name: handler.Filename,
79
-
Repo: f.RepoAt.String(),
79
+
Repo: f.RepoAt().String(),
80
80
Tag: tag.Tag.Hash[:],
81
81
},
82
82
},
···
100
100
artifact := db.Artifact{
101
101
Did: user.Did,
102
102
Rkey: rkey,
103
-
RepoAt: f.RepoAt,
103
+
RepoAt: f.RepoAt(),
104
104
Tag: tag.Tag.Hash,
105
105
CreatedAt: createdAt,
106
106
BlobCid: cid.Cid(uploadBlobResp.Blob.Ref),
···
155
155
156
156
artifacts, err := db.GetArtifact(
157
157
rp.db,
158
-
db.FilterEq("repo_at", f.RepoAt),
158
+
db.FilterEq("repo_at", f.RepoAt()),
159
159
db.FilterEq("tag", tag.Tag.Hash[:]),
160
160
db.FilterEq("name", filename),
161
161
)
···
197
197
198
198
artifacts, err := db.GetArtifact(
199
199
rp.db,
200
-
db.FilterEq("repo_at", f.RepoAt),
200
+
db.FilterEq("repo_at", f.RepoAt()),
201
201
db.FilterEq("tag", tag[:]),
202
202
db.FilterEq("name", filename),
203
203
)
···
239
239
defer tx.Rollback()
240
240
241
241
err = db.DeleteArtifact(tx,
242
-
db.FilterEq("repo_at", f.RepoAt),
242
+
db.FilterEq("repo_at", f.RepoAt()),
243
243
db.FilterEq("tag", artifact.Tag[:]),
244
244
db.FilterEq("name", filename),
245
245
)
···
270
270
return nil, err
271
271
}
272
272
273
-
result, err := us.Tags(f.OwnerDid(), f.RepoName)
273
+
result, err := us.Tags(f.OwnerDid(), f.Name)
274
274
if err != nil {
275
275
log.Println("failed to reach knotserver", err)
276
276
return nil, err
+165
appview/repo/feed.go
+165
appview/repo/feed.go
···
1
+
package repo
2
+
3
+
import (
4
+
"context"
5
+
"fmt"
6
+
"log"
7
+
"net/http"
8
+
"slices"
9
+
"time"
10
+
11
+
"tangled.sh/tangled.sh/core/appview/db"
12
+
"tangled.sh/tangled.sh/core/appview/reporesolver"
13
+
14
+
"github.com/bluesky-social/indigo/atproto/syntax"
15
+
"github.com/gorilla/feeds"
16
+
)
17
+
18
+
func (rp *Repo) getRepoFeed(ctx context.Context, f *reporesolver.ResolvedRepo) (*feeds.Feed, error) {
19
+
const feedLimitPerType = 100
20
+
21
+
pulls, err := db.GetPullsWithLimit(rp.db, feedLimitPerType, db.FilterEq("repo_at", f.RepoAt()))
22
+
if err != nil {
23
+
return nil, err
24
+
}
25
+
26
+
issues, err := db.GetIssuesWithLimit(rp.db, feedLimitPerType, db.FilterEq("repo_at", f.RepoAt()))
27
+
if err != nil {
28
+
return nil, err
29
+
}
30
+
31
+
feed := &feeds.Feed{
32
+
Title: fmt.Sprintf("activity feed for %s", f.OwnerSlashRepo()),
33
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s", rp.config.Core.AppviewHost, f.OwnerSlashRepo()), Type: "text/html", Rel: "alternate"},
34
+
Items: make([]*feeds.Item, 0),
35
+
Updated: time.UnixMilli(0),
36
+
}
37
+
38
+
for _, pull := range pulls {
39
+
items, err := rp.createPullItems(ctx, pull, f)
40
+
if err != nil {
41
+
return nil, err
42
+
}
43
+
feed.Items = append(feed.Items, items...)
44
+
}
45
+
46
+
for _, issue := range issues {
47
+
item, err := rp.createIssueItem(ctx, issue, f)
48
+
if err != nil {
49
+
return nil, err
50
+
}
51
+
feed.Items = append(feed.Items, item)
52
+
}
53
+
54
+
slices.SortFunc(feed.Items, func(a, b *feeds.Item) int {
55
+
if a.Created.After(b.Created) {
56
+
return -1
57
+
}
58
+
return 1
59
+
})
60
+
61
+
if len(feed.Items) > 0 {
62
+
feed.Updated = feed.Items[0].Created
63
+
}
64
+
65
+
return feed, nil
66
+
}
67
+
68
+
func (rp *Repo) createPullItems(ctx context.Context, pull *db.Pull, f *reporesolver.ResolvedRepo) ([]*feeds.Item, error) {
69
+
owner, err := rp.idResolver.ResolveIdent(ctx, pull.OwnerDid)
70
+
if err != nil {
71
+
return nil, err
72
+
}
73
+
74
+
var items []*feeds.Item
75
+
76
+
state := rp.getPullState(pull)
77
+
description := rp.buildPullDescription(owner.Handle, state, pull, f.OwnerSlashRepo())
78
+
79
+
mainItem := &feeds.Item{
80
+
Title: fmt.Sprintf("[PR #%d] %s", pull.PullId, pull.Title),
81
+
Description: description,
82
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId)},
83
+
Created: pull.Created,
84
+
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
85
+
}
86
+
items = append(items, mainItem)
87
+
88
+
for _, round := range pull.Submissions {
89
+
if round == nil || round.RoundNumber == 0 {
90
+
continue
91
+
}
92
+
93
+
roundItem := &feeds.Item{
94
+
Title: fmt.Sprintf("[PR #%d] %s (round #%d)", pull.PullId, pull.Title, round.RoundNumber),
95
+
Description: fmt.Sprintf("@%s submitted changes (at round #%d) on PR #%d in %s", owner.Handle, round.RoundNumber, pull.PullId, f.OwnerSlashRepo()),
96
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/pulls/%d/round/%d/", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), pull.PullId, round.RoundNumber)},
97
+
Created: round.Created,
98
+
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
99
+
}
100
+
items = append(items, roundItem)
101
+
}
102
+
103
+
return items, nil
104
+
}
105
+
106
+
func (rp *Repo) createIssueItem(ctx context.Context, issue db.Issue, f *reporesolver.ResolvedRepo) (*feeds.Item, error) {
107
+
owner, err := rp.idResolver.ResolveIdent(ctx, issue.OwnerDid)
108
+
if err != nil {
109
+
return nil, err
110
+
}
111
+
112
+
state := "closed"
113
+
if issue.Open {
114
+
state = "opened"
115
+
}
116
+
117
+
return &feeds.Item{
118
+
Title: fmt.Sprintf("[Issue #%d] %s", issue.IssueId, issue.Title),
119
+
Description: fmt.Sprintf("@%s %s issue #%d in %s", owner.Handle, state, issue.IssueId, f.OwnerSlashRepo()),
120
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/%s/issues/%d", rp.config.Core.AppviewHost, f.OwnerSlashRepo(), issue.IssueId)},
121
+
Created: issue.Created,
122
+
Author: &feeds.Author{Name: fmt.Sprintf("@%s", owner.Handle)},
123
+
}, nil
124
+
}
125
+
126
+
func (rp *Repo) getPullState(pull *db.Pull) string {
127
+
if pull.State == db.PullOpen {
128
+
return "opened"
129
+
}
130
+
return pull.State.String()
131
+
}
132
+
133
+
func (rp *Repo) buildPullDescription(handle syntax.Handle, state string, pull *db.Pull, repoName string) string {
134
+
base := fmt.Sprintf("@%s %s pull request #%d", handle, state, pull.PullId)
135
+
136
+
if pull.State == db.PullMerged {
137
+
return fmt.Sprintf("%s (on round #%d) in %s", base, pull.LastRoundNumber(), repoName)
138
+
}
139
+
140
+
return fmt.Sprintf("%s in %s", base, repoName)
141
+
}
142
+
143
+
func (rp *Repo) RepoAtomFeed(w http.ResponseWriter, r *http.Request) {
144
+
f, err := rp.repoResolver.Resolve(r)
145
+
if err != nil {
146
+
log.Println("failed to fully resolve repo:", err)
147
+
return
148
+
}
149
+
150
+
feed, err := rp.getRepoFeed(r.Context(), f)
151
+
if err != nil {
152
+
log.Println("failed to get repo feed:", err)
153
+
rp.pages.Error500(w)
154
+
return
155
+
}
156
+
157
+
atom, err := feed.ToAtom()
158
+
if err != nil {
159
+
rp.pages.Error500(w)
160
+
return
161
+
}
162
+
163
+
w.Header().Set("content-type", "application/atom+xml")
164
+
w.Write([]byte(atom))
165
+
}
+15
-12
appview/repo/index.go
+15
-12
appview/repo/index.go
···
24
24
25
25
func (rp *Repo) RepoIndex(w http.ResponseWriter, r *http.Request) {
26
26
ref := chi.URLParam(r, "ref")
27
+
27
28
f, err := rp.repoResolver.Resolve(r)
28
29
if err != nil {
29
30
log.Println("failed to fully resolve repo", err)
···
37
38
return
38
39
}
39
40
40
-
result, err := us.Index(f.OwnerDid(), f.RepoName, ref)
41
+
result, err := us.Index(f.OwnerDid(), f.Name, ref)
41
42
if err != nil {
42
43
rp.pages.Error503(w)
43
44
log.Println("failed to reach knotserver", err)
···
118
119
119
120
var forkInfo *types.ForkInfo
120
121
if user != nil && (repoInfo.Roles.IsOwner() || repoInfo.Roles.IsCollaborator()) {
121
-
forkInfo, err = getForkInfo(repoInfo, rp, f, user, signedClient)
122
+
forkInfo, err = getForkInfo(repoInfo, rp, f, result.Ref, user, signedClient)
122
123
if err != nil {
123
124
log.Printf("Failed to fetch fork information: %v", err)
124
125
return
···
126
127
}
127
128
128
129
// TODO: a bit dirty
129
-
languageInfo, err := rp.getLanguageInfo(f, signedClient, chi.URLParam(r, "ref") == "")
130
+
languageInfo, err := rp.getLanguageInfo(f, signedClient, result.Ref, ref == "")
130
131
if err != nil {
131
132
log.Printf("failed to compute language percentages: %s", err)
132
133
// non-fatal
···
161
162
func (rp *Repo) getLanguageInfo(
162
163
f *reporesolver.ResolvedRepo,
163
164
signedClient *knotclient.SignedClient,
165
+
currentRef string,
164
166
isDefaultRef bool,
165
167
) ([]types.RepoLanguageDetails, error) {
166
168
// first attempt to fetch from db
167
169
langs, err := db.GetRepoLanguages(
168
170
rp.db,
169
-
db.FilterEq("repo_at", f.RepoAt),
170
-
db.FilterEq("ref", f.Ref),
171
+
db.FilterEq("repo_at", f.RepoAt()),
172
+
db.FilterEq("ref", currentRef),
171
173
)
172
174
173
175
if err != nil || langs == nil {
174
176
// non-fatal, fetch langs from ks
175
-
ls, err := signedClient.RepoLanguages(f.OwnerDid(), f.RepoName, f.Ref)
177
+
ls, err := signedClient.RepoLanguages(f.OwnerDid(), f.Name, currentRef)
176
178
if err != nil {
177
179
return nil, err
178
180
}
···
182
184
183
185
for l, s := range ls.Languages {
184
186
langs = append(langs, db.RepoLanguage{
185
-
RepoAt: f.RepoAt,
186
-
Ref: f.Ref,
187
+
RepoAt: f.RepoAt(),
188
+
Ref: currentRef,
187
189
IsDefaultRef: isDefaultRef,
188
190
Language: l,
189
191
Bytes: s,
···
234
236
repoInfo repoinfo.RepoInfo,
235
237
rp *Repo,
236
238
f *reporesolver.ResolvedRepo,
239
+
currentRef string,
237
240
user *oauth.User,
238
241
signedClient *knotclient.SignedClient,
239
242
) (*types.ForkInfo, error) {
···
264
267
}
265
268
266
269
if !slices.ContainsFunc(result.Branches, func(branch types.Branch) bool {
267
-
return branch.Name == f.Ref
270
+
return branch.Name == currentRef
268
271
}) {
269
272
forkInfo.Status = types.MissingBranch
270
273
return &forkInfo, nil
271
274
}
272
275
273
-
newHiddenRefResp, err := signedClient.NewHiddenRef(user.Did, repoInfo.Name, f.Ref, f.Ref)
276
+
newHiddenRefResp, err := signedClient.NewHiddenRef(user.Did, repoInfo.Name, currentRef, currentRef)
274
277
if err != nil || newHiddenRefResp.StatusCode != http.StatusNoContent {
275
278
log.Printf("failed to update tracking branch: %s", err)
276
279
return nil, err
277
280
}
278
281
279
-
hiddenRef := fmt.Sprintf("hidden/%s/%s", f.Ref, f.Ref)
282
+
hiddenRef := fmt.Sprintf("hidden/%s/%s", currentRef, currentRef)
280
283
281
284
var status types.AncestorCheckResponse
282
-
forkSyncableResp, err := signedClient.RepoForkAheadBehind(user.Did, string(f.RepoAt), repoInfo.Name, f.Ref, hiddenRef)
285
+
forkSyncableResp, err := signedClient.RepoForkAheadBehind(user.Did, string(f.RepoAt()), repoInfo.Name, currentRef, hiddenRef)
283
286
if err != nil {
284
287
log.Printf("failed to check if fork is ahead/behind: %s", err)
285
288
return nil, err
+72
-50
appview/repo/repo.go
+72
-50
appview/repo/repo.go
···
95
95
} else {
96
96
uri = "https"
97
97
}
98
-
url := fmt.Sprintf("%s://%s/%s/%s/archive/%s.tar.gz", uri, f.Knot, f.OwnerDid(), f.RepoName, url.PathEscape(refParam))
98
+
url := fmt.Sprintf("%s://%s/%s/%s/archive/%s.tar.gz", uri, f.Knot, f.OwnerDid(), f.Name, url.PathEscape(refParam))
99
99
100
100
http.Redirect(w, r, url, http.StatusFound)
101
101
}
···
123
123
return
124
124
}
125
125
126
-
repolog, err := us.Log(f.OwnerDid(), f.RepoName, ref, page)
126
+
repolog, err := us.Log(f.OwnerDid(), f.Name, ref, page)
127
127
if err != nil {
128
128
log.Println("failed to reach knotserver", err)
129
129
return
130
130
}
131
131
132
-
tagResult, err := us.Tags(f.OwnerDid(), f.RepoName)
132
+
tagResult, err := us.Tags(f.OwnerDid(), f.Name)
133
133
if err != nil {
134
134
log.Println("failed to reach knotserver", err)
135
135
return
···
144
144
tagMap[hash] = append(tagMap[hash], tag.Name)
145
145
}
146
146
147
-
branchResult, err := us.Branches(f.OwnerDid(), f.RepoName)
147
+
branchResult, err := us.Branches(f.OwnerDid(), f.Name)
148
148
if err != nil {
149
149
log.Println("failed to reach knotserver", err)
150
150
return
···
212
212
return
213
213
}
214
214
215
-
repoAt := f.RepoAt
215
+
repoAt := f.RepoAt()
216
216
rkey := repoAt.RecordKey().String()
217
217
if rkey == "" {
218
218
log.Println("invalid aturi for repo", err)
···
262
262
Record: &lexutil.LexiconTypeDecoder{
263
263
Val: &tangled.Repo{
264
264
Knot: f.Knot,
265
-
Name: f.RepoName,
265
+
Name: f.Name,
266
266
Owner: user.Did,
267
-
CreatedAt: f.CreatedAt,
267
+
CreatedAt: f.Created.Format(time.RFC3339),
268
268
Description: &newDescription,
269
269
Spindle: &f.Spindle,
270
270
},
···
310
310
return
311
311
}
312
312
313
-
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/commit/%s", protocol, f.Knot, f.OwnerDid(), f.RepoName, ref))
313
+
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/commit/%s", protocol, f.Knot, f.OwnerDid(), f.Repo.Name, ref))
314
314
if err != nil {
315
315
log.Println("failed to reach knotserver", err)
316
316
return
···
375
375
if !rp.config.Core.Dev {
376
376
protocol = "https"
377
377
}
378
-
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/tree/%s/%s", protocol, f.Knot, f.OwnerDid(), f.RepoName, ref, treePath))
378
+
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/tree/%s/%s", protocol, f.Knot, f.OwnerDid(), f.Repo.Name, ref, treePath))
379
379
if err != nil {
380
380
log.Println("failed to reach knotserver", err)
381
381
return
···
405
405
user := rp.oauth.GetUser(r)
406
406
407
407
var breadcrumbs [][]string
408
-
breadcrumbs = append(breadcrumbs, []string{f.RepoName, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), ref)})
408
+
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), ref)})
409
409
if treePath != "" {
410
410
for idx, elem := range strings.Split(treePath, "/") {
411
411
breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], elem)})
···
436
436
return
437
437
}
438
438
439
-
result, err := us.Tags(f.OwnerDid(), f.RepoName)
439
+
result, err := us.Tags(f.OwnerDid(), f.Name)
440
440
if err != nil {
441
441
log.Println("failed to reach knotserver", err)
442
442
return
443
443
}
444
444
445
-
artifacts, err := db.GetArtifact(rp.db, db.FilterEq("repo_at", f.RepoAt))
445
+
artifacts, err := db.GetArtifact(rp.db, db.FilterEq("repo_at", f.RepoAt()))
446
446
if err != nil {
447
447
log.Println("failed grab artifacts", err)
448
448
return
···
493
493
return
494
494
}
495
495
496
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
496
+
result, err := us.Branches(f.OwnerDid(), f.Name)
497
497
if err != nil {
498
498
log.Println("failed to reach knotserver", err)
499
499
return
···
522
522
if !rp.config.Core.Dev {
523
523
protocol = "https"
524
524
}
525
-
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/blob/%s/%s", protocol, f.Knot, f.OwnerDid(), f.RepoName, ref, filePath))
525
+
resp, err := http.Get(fmt.Sprintf("%s://%s/%s/%s/blob/%s/%s", protocol, f.Knot, f.OwnerDid(), f.Repo.Name, ref, filePath))
526
526
if err != nil {
527
527
log.Println("failed to reach knotserver", err)
528
528
return
···
542
542
}
543
543
544
544
var breadcrumbs [][]string
545
-
breadcrumbs = append(breadcrumbs, []string{f.RepoName, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), ref)})
545
+
breadcrumbs = append(breadcrumbs, []string{f.Name, fmt.Sprintf("/%s/tree/%s", f.OwnerSlashRepo(), ref)})
546
546
if filePath != "" {
547
547
for idx, elem := range strings.Split(filePath, "/") {
548
548
breadcrumbs = append(breadcrumbs, []string{elem, fmt.Sprintf("%s/%s", breadcrumbs[idx][1], elem)})
···
575
575
576
576
// fetch the actual binary content like in RepoBlobRaw
577
577
578
-
blobURL := fmt.Sprintf("%s://%s/%s/%s/raw/%s/%s", protocol, f.Knot, f.OwnerDid(), f.RepoName, ref, filePath)
578
+
blobURL := fmt.Sprintf("%s://%s/%s/%s/raw/%s/%s", protocol, f.Knot, f.OwnerDid(), f.Name, ref, filePath)
579
579
contentSrc = blobURL
580
580
if !rp.config.Core.Dev {
581
581
contentSrc = markup.GenerateCamoURL(rp.config.Camo.Host, rp.config.Camo.SharedSecret, blobURL)
···
612
612
if !rp.config.Core.Dev {
613
613
protocol = "https"
614
614
}
615
-
blobURL := fmt.Sprintf("%s://%s/%s/%s/raw/%s/%s", protocol, f.Knot, f.OwnerDid(), f.RepoName, ref, filePath)
616
-
resp, err := http.Get(blobURL)
615
+
616
+
blobURL := fmt.Sprintf("%s://%s/%s/%s/raw/%s/%s", protocol, f.Knot, f.OwnerDid(), f.Repo.Name, ref, filePath)
617
+
618
+
req, err := http.NewRequest("GET", blobURL, nil)
617
619
if err != nil {
618
-
log.Println("failed to reach knotserver:", err)
620
+
log.Println("failed to create request", err)
621
+
return
622
+
}
623
+
624
+
// forward the If-None-Match header
625
+
if clientETag := r.Header.Get("If-None-Match"); clientETag != "" {
626
+
req.Header.Set("If-None-Match", clientETag)
627
+
}
628
+
629
+
client := &http.Client{}
630
+
resp, err := client.Do(req)
631
+
if err != nil {
632
+
log.Println("failed to reach knotserver", err)
619
633
rp.pages.Error503(w)
620
634
return
621
635
}
622
636
defer resp.Body.Close()
623
637
638
+
// forward 304 not modified
639
+
if resp.StatusCode == http.StatusNotModified {
640
+
w.WriteHeader(http.StatusNotModified)
641
+
return
642
+
}
643
+
624
644
if resp.StatusCode != http.StatusOK {
625
645
log.Printf("knotserver returned non-OK status for raw blob %s: %d", blobURL, resp.StatusCode)
626
646
w.WriteHeader(resp.StatusCode)
···
668
688
return
669
689
}
670
690
671
-
repoAt := f.RepoAt
691
+
repoAt := f.RepoAt()
672
692
rkey := repoAt.RecordKey().String()
673
693
if rkey == "" {
674
694
fail("Failed to resolve repo. Try again later", err)
···
722
742
Record: &lexutil.LexiconTypeDecoder{
723
743
Val: &tangled.Repo{
724
744
Knot: f.Knot,
725
-
Name: f.RepoName,
745
+
Name: f.Name,
726
746
Owner: user.Did,
727
-
CreatedAt: f.CreatedAt,
747
+
CreatedAt: f.Created.Format(time.RFC3339),
728
748
Description: &f.Description,
729
749
Spindle: spindlePtr,
730
750
},
···
805
825
Record: &lexutil.LexiconTypeDecoder{
806
826
Val: &tangled.RepoCollaborator{
807
827
Subject: collaboratorIdent.DID.String(),
808
-
Repo: string(f.RepoAt),
828
+
Repo: string(f.RepoAt()),
809
829
CreatedAt: createdAt.Format(time.RFC3339),
810
830
}},
811
831
})
···
830
850
return
831
851
}
832
852
833
-
ksResp, err := ksClient.AddCollaborator(f.OwnerDid(), f.RepoName, collaboratorIdent.DID.String())
853
+
ksResp, err := ksClient.AddCollaborator(f.OwnerDid(), f.Name, collaboratorIdent.DID.String())
834
854
if err != nil {
835
855
fail("Knot was unreachable.", err)
836
856
return
···
864
884
Did: syntax.DID(currentUser.Did),
865
885
Rkey: rkey,
866
886
SubjectDid: collaboratorIdent.DID,
867
-
RepoAt: f.RepoAt,
887
+
RepoAt: f.RepoAt(),
868
888
Created: createdAt,
869
889
})
870
890
if err != nil {
···
902
922
log.Println("failed to get authorized client", err)
903
923
return
904
924
}
905
-
repoRkey := f.RepoAt.RecordKey().String()
906
925
_, err = xrpcClient.RepoDeleteRecord(r.Context(), &comatproto.RepoDeleteRecord_Input{
907
926
Collection: tangled.RepoNSID,
908
927
Repo: user.Did,
909
-
Rkey: repoRkey,
928
+
Rkey: f.Rkey,
910
929
})
911
930
if err != nil {
912
931
log.Printf("failed to delete record: %s", err)
913
932
rp.pages.Notice(w, "settings-delete", "Failed to delete repository from PDS.")
914
933
return
915
934
}
916
-
log.Println("removed repo record ", f.RepoAt.String())
935
+
log.Println("removed repo record ", f.RepoAt().String())
917
936
918
937
secret, err := db.GetRegistrationKey(rp.db, f.Knot)
919
938
if err != nil {
···
927
946
return
928
947
}
929
948
930
-
ksResp, err := ksClient.RemoveRepo(f.OwnerDid(), f.RepoName)
949
+
ksResp, err := ksClient.RemoveRepo(f.OwnerDid(), f.Name)
931
950
if err != nil {
932
951
log.Printf("failed to make request to %s: %s", f.Knot, err)
933
952
return
···
973
992
}
974
993
975
994
// remove repo from db
976
-
err = db.RemoveRepo(tx, f.OwnerDid(), f.RepoName)
995
+
err = db.RemoveRepo(tx, f.OwnerDid(), f.Name)
977
996
if err != nil {
978
997
rp.pages.Notice(w, "settings-delete", "Failed to update appview")
979
998
return
···
1022
1041
return
1023
1042
}
1024
1043
1025
-
ksResp, err := ksClient.SetDefaultBranch(f.OwnerDid(), f.RepoName, branch)
1044
+
ksResp, err := ksClient.SetDefaultBranch(f.OwnerDid(), f.Name, branch)
1026
1045
if err != nil {
1027
1046
log.Printf("failed to make request to %s: %s", f.Knot, err)
1028
1047
return
···
1062
1081
r,
1063
1082
oauth.WithService(f.Spindle),
1064
1083
oauth.WithLxm(lxm),
1084
+
oauth.WithExp(60),
1065
1085
oauth.WithDev(rp.config.Core.Dev),
1066
1086
)
1067
1087
if err != nil {
···
1089
1109
r.Context(),
1090
1110
spindleClient,
1091
1111
&tangled.RepoAddSecret_Input{
1092
-
Repo: f.RepoAt.String(),
1112
+
Repo: f.RepoAt().String(),
1093
1113
Key: key,
1094
1114
Value: value,
1095
1115
},
···
1107
1127
r.Context(),
1108
1128
spindleClient,
1109
1129
&tangled.RepoRemoveSecret_Input{
1110
-
Repo: f.RepoAt.String(),
1130
+
Repo: f.RepoAt().String(),
1111
1131
Key: key,
1112
1132
},
1113
1133
)
···
1169
1189
// return
1170
1190
// }
1171
1191
1172
-
// result, err := us.Branches(f.OwnerDid(), f.RepoName)
1192
+
// result, err := us.Branches(f.OwnerDid(), f.Name)
1173
1193
// if err != nil {
1174
1194
// log.Println("failed to reach knotserver", err)
1175
1195
// return
···
1191
1211
// oauth.WithDev(rp.config.Core.Dev),
1192
1212
// ); err != nil {
1193
1213
// log.Println("failed to create spindle client", err)
1194
-
// } else if resp, err := tangled.RepoListSecrets(r.Context(), spindleClient, f.RepoAt.String()); err != nil {
1214
+
// } else if resp, err := tangled.RepoListSecrets(r.Context(), spindleClient, f.RepoAt().String()); err != nil {
1195
1215
// log.Println("failed to fetch secrets", err)
1196
1216
// } else {
1197
1217
// secrets = resp.Secrets
···
1220
1240
return
1221
1241
}
1222
1242
1223
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
1243
+
result, err := us.Branches(f.OwnerDid(), f.Name)
1224
1244
if err != nil {
1225
1245
log.Println("failed to reach knotserver", err)
1226
1246
return
···
1270
1290
r,
1271
1291
oauth.WithService(f.Spindle),
1272
1292
oauth.WithLxm(tangled.RepoListSecretsNSID),
1293
+
oauth.WithExp(60),
1273
1294
oauth.WithDev(rp.config.Core.Dev),
1274
1295
); err != nil {
1275
1296
log.Println("failed to create spindle client", err)
1276
-
} else if resp, err := tangled.RepoListSecrets(r.Context(), spindleClient, f.RepoAt.String()); err != nil {
1297
+
} else if resp, err := tangled.RepoListSecrets(r.Context(), spindleClient, f.RepoAt().String()); err != nil {
1277
1298
log.Println("failed to fetch secrets", err)
1278
1299
} else {
1279
1300
secrets = resp.Secrets
···
1314
1335
}
1315
1336
1316
1337
func (rp *Repo) SyncRepoFork(w http.ResponseWriter, r *http.Request) {
1338
+
ref := chi.URLParam(r, "ref")
1339
+
1317
1340
user := rp.oauth.GetUser(r)
1318
1341
f, err := rp.repoResolver.Resolve(r)
1319
1342
if err != nil {
···
1341
1364
} else {
1342
1365
uri = "https"
1343
1366
}
1344
-
forkName := fmt.Sprintf("%s", f.RepoName)
1345
-
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.RepoName)
1367
+
forkName := fmt.Sprintf("%s", f.Name)
1368
+
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.Repo.Name)
1346
1369
1347
-
_, err = client.SyncRepoFork(user.Did, forkSourceUrl, forkName, f.Ref)
1370
+
_, err = client.SyncRepoFork(user.Did, forkSourceUrl, forkName, ref)
1348
1371
if err != nil {
1349
1372
rp.pages.Notice(w, "repo", "Failed to sync repository fork.")
1350
1373
return
···
1392
1415
return
1393
1416
}
1394
1417
1395
-
forkName := fmt.Sprintf("%s", f.RepoName)
1418
+
forkName := fmt.Sprintf("%s", f.Name)
1396
1419
1397
1420
// this check is *only* to see if the forked repo name already exists
1398
1421
// in the user's account.
1399
-
existingRepo, err := db.GetRepo(rp.db, user.Did, f.RepoName)
1422
+
existingRepo, err := db.GetRepo(rp.db, user.Did, f.Name)
1400
1423
if err != nil {
1401
1424
if errors.Is(err, sql.ErrNoRows) {
1402
1425
// no existing repo with this name found, we can use the name as is
···
1427
1450
} else {
1428
1451
uri = "https"
1429
1452
}
1430
-
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.RepoName)
1431
-
sourceAt := f.RepoAt.String()
1453
+
forkSourceUrl := fmt.Sprintf("%s://%s/%s/%s", uri, f.Knot, f.OwnerDid(), f.Repo.Name)
1454
+
sourceAt := f.RepoAt().String()
1432
1455
1433
1456
rkey := tid.TID()
1434
1457
repo := &db.Repo{
···
1497
1520
}
1498
1521
log.Println("created repo record: ", atresp.Uri)
1499
1522
1500
-
repo.AtUri = atresp.Uri
1501
1523
err = db.AddRepo(tx, repo)
1502
1524
if err != nil {
1503
1525
log.Println(err)
···
1548
1570
return
1549
1571
}
1550
1572
1551
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
1573
+
result, err := us.Branches(f.OwnerDid(), f.Name)
1552
1574
if err != nil {
1553
1575
rp.pages.Notice(w, "compare-error", "Failed to produce comparison. Try again later.")
1554
1576
log.Println("failed to reach knotserver", err)
···
1578
1600
head = queryHead
1579
1601
}
1580
1602
1581
-
tags, err := us.Tags(f.OwnerDid(), f.RepoName)
1603
+
tags, err := us.Tags(f.OwnerDid(), f.Name)
1582
1604
if err != nil {
1583
1605
rp.pages.Notice(w, "compare-error", "Failed to produce comparison. Try again later.")
1584
1606
log.Println("failed to reach knotserver", err)
···
1640
1662
return
1641
1663
}
1642
1664
1643
-
branches, err := us.Branches(f.OwnerDid(), f.RepoName)
1665
+
branches, err := us.Branches(f.OwnerDid(), f.Name)
1644
1666
if err != nil {
1645
1667
rp.pages.Notice(w, "compare-error", "Failed to produce comparison. Try again later.")
1646
1668
log.Println("failed to reach knotserver", err)
1647
1669
return
1648
1670
}
1649
1671
1650
-
tags, err := us.Tags(f.OwnerDid(), f.RepoName)
1672
+
tags, err := us.Tags(f.OwnerDid(), f.Name)
1651
1673
if err != nil {
1652
1674
rp.pages.Notice(w, "compare-error", "Failed to produce comparison. Try again later.")
1653
1675
log.Println("failed to reach knotserver", err)
1654
1676
return
1655
1677
}
1656
1678
1657
-
formatPatch, err := us.Compare(f.OwnerDid(), f.RepoName, base, head)
1679
+
formatPatch, err := us.Compare(f.OwnerDid(), f.Name, base, head)
1658
1680
if err != nil {
1659
1681
rp.pages.Notice(w, "compare-error", "Failed to produce comparison. Try again later.")
1660
1682
log.Println("failed to compare", err)
+1
appview/repo/router.go
+1
appview/repo/router.go
+37
-104
appview/reporesolver/resolver.go
+37
-104
appview/reporesolver/resolver.go
···
7
7
"fmt"
8
8
"log"
9
9
"net/http"
10
-
"net/url"
11
10
"path"
11
+
"regexp"
12
12
"strings"
13
13
14
14
"github.com/bluesky-social/indigo/atproto/identity"
15
-
"github.com/bluesky-social/indigo/atproto/syntax"
16
15
securejoin "github.com/cyphar/filepath-securejoin"
17
16
"github.com/go-chi/chi/v5"
18
17
"tangled.sh/tangled.sh/core/appview/config"
···
21
20
"tangled.sh/tangled.sh/core/appview/pages"
22
21
"tangled.sh/tangled.sh/core/appview/pages/repoinfo"
23
22
"tangled.sh/tangled.sh/core/idresolver"
24
-
"tangled.sh/tangled.sh/core/knotclient"
25
23
"tangled.sh/tangled.sh/core/rbac"
26
24
)
27
25
28
26
type ResolvedRepo struct {
29
-
Knot string
30
-
OwnerId identity.Identity
31
-
RepoName string
32
-
RepoAt syntax.ATURI
33
-
Description string
34
-
Spindle string
35
-
CreatedAt string
36
-
Ref string
37
-
CurrentDir string
27
+
db.Repo
28
+
OwnerId identity.Identity
29
+
CurrentDir string
30
+
Ref string
38
31
39
32
rr *RepoResolver
40
33
}
···
51
44
}
52
45
53
46
func (rr *RepoResolver) Resolve(r *http.Request) (*ResolvedRepo, error) {
54
-
repoName := chi.URLParam(r, "repo")
55
-
knot, ok := r.Context().Value("knot").(string)
47
+
repo, ok := r.Context().Value("repo").(*db.Repo)
56
48
if !ok {
57
-
log.Println("malformed middleware")
49
+
log.Println("malformed middleware: `repo` not exist in context")
58
50
return nil, fmt.Errorf("malformed middleware")
59
51
}
60
52
id, ok := r.Context().Value("resolvedId").(identity.Identity)
···
63
55
return nil, fmt.Errorf("malformed middleware")
64
56
}
65
57
66
-
repoAt, ok := r.Context().Value("repoAt").(string)
67
-
if !ok {
68
-
log.Println("malformed middleware")
69
-
return nil, fmt.Errorf("malformed middleware")
70
-
}
71
-
72
-
parsedRepoAt, err := syntax.ParseATURI(repoAt)
73
-
if err != nil {
74
-
log.Println("malformed repo at-uri")
75
-
return nil, fmt.Errorf("malformed middleware")
76
-
}
77
-
58
+
currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath()))
78
59
ref := chi.URLParam(r, "ref")
79
60
80
-
if ref == "" {
81
-
us, err := knotclient.NewUnsignedClient(knot, rr.config.Core.Dev)
82
-
if err != nil {
83
-
return nil, err
84
-
}
85
-
86
-
defaultBranch, err := us.DefaultBranch(id.DID.String(), repoName)
87
-
if err != nil {
88
-
return nil, err
89
-
}
90
-
91
-
ref = defaultBranch.Branch
92
-
}
93
-
94
-
currentDir := path.Dir(extractPathAfterRef(r.URL.EscapedPath(), ref))
95
-
96
-
// pass through values from the middleware
97
-
description, ok := r.Context().Value("repoDescription").(string)
98
-
addedAt, ok := r.Context().Value("repoAddedAt").(string)
99
-
spindle, ok := r.Context().Value("repoSpindle").(string)
100
-
101
61
return &ResolvedRepo{
102
-
Knot: knot,
103
-
OwnerId: id,
104
-
RepoName: repoName,
105
-
RepoAt: parsedRepoAt,
106
-
Description: description,
107
-
CreatedAt: addedAt,
108
-
Ref: ref,
109
-
CurrentDir: currentDir,
110
-
Spindle: spindle,
62
+
Repo: *repo,
63
+
OwnerId: id,
64
+
CurrentDir: currentDir,
65
+
Ref: ref,
111
66
112
67
rr: rr,
113
68
}, nil
···
126
81
127
82
var p string
128
83
if handle != "" && !handle.IsInvalidHandle() {
129
-
p, _ = securejoin.SecureJoin(fmt.Sprintf("@%s", handle), f.RepoName)
84
+
p, _ = securejoin.SecureJoin(fmt.Sprintf("@%s", handle), f.Name)
130
85
} else {
131
-
p, _ = securejoin.SecureJoin(f.OwnerDid(), f.RepoName)
86
+
p, _ = securejoin.SecureJoin(f.OwnerDid(), f.Name)
132
87
}
133
88
134
-
return p
135
-
}
136
-
137
-
func (f *ResolvedRepo) DidSlashRepo() string {
138
-
p, _ := securejoin.SecureJoin(f.OwnerDid(), f.RepoName)
139
89
return p
140
90
}
141
91
···
187
137
// this function is a bit weird since it now returns RepoInfo from an entirely different
188
138
// package. we should refactor this or get rid of RepoInfo entirely.
189
139
func (f *ResolvedRepo) RepoInfo(user *oauth.User) repoinfo.RepoInfo {
140
+
repoAt := f.RepoAt()
190
141
isStarred := false
191
142
if user != nil {
192
-
isStarred = db.GetStarStatus(f.rr.execer, user.Did, syntax.ATURI(f.RepoAt))
143
+
isStarred = db.GetStarStatus(f.rr.execer, user.Did, repoAt)
193
144
}
194
145
195
-
starCount, err := db.GetStarCount(f.rr.execer, f.RepoAt)
146
+
starCount, err := db.GetStarCount(f.rr.execer, repoAt)
196
147
if err != nil {
197
-
log.Println("failed to get star count for ", f.RepoAt)
148
+
log.Println("failed to get star count for ", repoAt)
198
149
}
199
-
issueCount, err := db.GetIssueCount(f.rr.execer, f.RepoAt)
150
+
issueCount, err := db.GetIssueCount(f.rr.execer, repoAt)
200
151
if err != nil {
201
-
log.Println("failed to get issue count for ", f.RepoAt)
152
+
log.Println("failed to get issue count for ", repoAt)
202
153
}
203
-
pullCount, err := db.GetPullCount(f.rr.execer, f.RepoAt)
154
+
pullCount, err := db.GetPullCount(f.rr.execer, repoAt)
204
155
if err != nil {
205
-
log.Println("failed to get issue count for ", f.RepoAt)
156
+
log.Println("failed to get issue count for ", repoAt)
206
157
}
207
-
source, err := db.GetRepoSource(f.rr.execer, f.RepoAt)
158
+
source, err := db.GetRepoSource(f.rr.execer, repoAt)
208
159
if errors.Is(err, sql.ErrNoRows) {
209
160
source = ""
210
161
} else if err != nil {
211
-
log.Println("failed to get repo source for ", f.RepoAt, err)
162
+
log.Println("failed to get repo source for ", repoAt, err)
212
163
}
213
164
214
165
var sourceRepo *db.Repo
···
228
179
}
229
180
230
181
knot := f.Knot
231
-
var disableFork bool
232
-
us, err := knotclient.NewUnsignedClient(knot, f.rr.config.Core.Dev)
233
-
if err != nil {
234
-
log.Printf("failed to create unsigned client for %s: %v", knot, err)
235
-
} else {
236
-
result, err := us.Branches(f.OwnerDid(), f.RepoName)
237
-
if err != nil {
238
-
log.Printf("failed to get branches for %s/%s: %v", f.OwnerDid(), f.RepoName, err)
239
-
}
240
-
241
-
if len(result.Branches) == 0 {
242
-
disableFork = true
243
-
}
244
-
}
245
182
246
183
repoInfo := repoinfo.RepoInfo{
247
184
OwnerDid: f.OwnerDid(),
248
185
OwnerHandle: f.OwnerHandle(),
249
-
Name: f.RepoName,
250
-
RepoAt: f.RepoAt,
186
+
Name: f.Name,
187
+
RepoAt: repoAt,
251
188
Description: f.Description,
252
-
Ref: f.Ref,
253
189
IsStarred: isStarred,
254
190
Knot: knot,
255
191
Spindle: f.Spindle,
···
259
195
IssueCount: issueCount,
260
196
PullCount: pullCount,
261
197
},
262
-
DisableFork: disableFork,
263
-
CurrentDir: f.CurrentDir,
198
+
CurrentDir: f.CurrentDir,
199
+
Ref: f.Ref,
264
200
}
265
201
266
202
if sourceRepo != nil {
···
284
220
// after the ref. for example:
285
221
//
286
222
// /@icyphox.sh/foorepo/blob/main/abc/xyz/ => abc/xyz/
287
-
func extractPathAfterRef(fullPath, ref string) string {
223
+
func extractPathAfterRef(fullPath string) string {
288
224
fullPath = strings.TrimPrefix(fullPath, "/")
289
225
290
-
ref = url.PathEscape(ref)
226
+
// match blob/, tree/, or raw/ followed by any ref and then a slash
227
+
//
228
+
// captures everything after the final slash
229
+
pattern := `(?:blob|tree|raw)/[^/]+/(.*)$`
291
230
292
-
prefixes := []string{
293
-
fmt.Sprintf("blob/%s/", ref),
294
-
fmt.Sprintf("tree/%s/", ref),
295
-
fmt.Sprintf("raw/%s/", ref),
296
-
}
231
+
re := regexp.MustCompile(pattern)
232
+
matches := re.FindStringSubmatch(fullPath)
297
233
298
-
for _, prefix := range prefixes {
299
-
idx := strings.Index(fullPath, prefix)
300
-
if idx != -1 {
301
-
return fullPath[idx+len(prefix):]
302
-
}
234
+
if len(matches) > 1 {
235
+
return matches[1]
303
236
}
304
237
305
238
return ""
-13
appview/spindles/spindles.go
-13
appview/spindles/spindles.go
···
113
113
return
114
114
}
115
115
116
-
identsToResolve := make([]string, len(members))
117
-
copy(identsToResolve, members)
118
-
resolvedIds := s.IdResolver.ResolveIdents(r.Context(), identsToResolve)
119
-
didHandleMap := make(map[string]string)
120
-
for _, identity := range resolvedIds {
121
-
if !identity.Handle.IsInvalidHandle() {
122
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
123
-
} else {
124
-
didHandleMap[identity.DID.String()] = identity.DID.String()
125
-
}
126
-
}
127
-
128
116
// organize repos by did
129
117
repoMap := make(map[string][]db.Repo)
130
118
for _, r := range repos {
···
136
124
Spindle: spindle,
137
125
Members: members,
138
126
Repos: repoMap,
139
-
DidHandleMap: didHandleMap,
140
127
})
141
128
}
142
129
+9
-12
appview/state/git_http.go
+9
-12
appview/state/git_http.go
···
3
3
import (
4
4
"fmt"
5
5
"io"
6
+
"maps"
6
7
"net/http"
7
8
8
9
"github.com/bluesky-social/indigo/atproto/identity"
9
10
"github.com/go-chi/chi/v5"
11
+
"tangled.sh/tangled.sh/core/appview/db"
10
12
)
11
13
12
14
func (s *State) InfoRefs(w http.ResponseWriter, r *http.Request) {
13
15
user := r.Context().Value("resolvedId").(identity.Identity)
14
-
knot := r.Context().Value("knot").(string)
15
-
repo := chi.URLParam(r, "repo")
16
+
repo := r.Context().Value("repo").(*db.Repo)
16
17
17
18
scheme := "https"
18
19
if s.config.Core.Dev {
19
20
scheme = "http"
20
21
}
21
22
22
-
targetURL := fmt.Sprintf("%s://%s/%s/%s/info/refs?%s", scheme, knot, user.DID, repo, r.URL.RawQuery)
23
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/info/refs?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
23
24
s.proxyRequest(w, r, targetURL)
24
25
25
26
}
···
30
31
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
31
32
return
32
33
}
33
-
knot := r.Context().Value("knot").(string)
34
-
repo := chi.URLParam(r, "repo")
34
+
repo := r.Context().Value("repo").(*db.Repo)
35
35
36
36
scheme := "https"
37
37
if s.config.Core.Dev {
38
38
scheme = "http"
39
39
}
40
40
41
-
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-pack?%s", scheme, knot, user.DID, repo, r.URL.RawQuery)
41
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-upload-pack?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
42
42
s.proxyRequest(w, r, targetURL)
43
43
}
44
44
···
48
48
http.Error(w, "failed to resolve user", http.StatusInternalServerError)
49
49
return
50
50
}
51
-
knot := r.Context().Value("knot").(string)
52
-
repo := chi.URLParam(r, "repo")
51
+
repo := r.Context().Value("repo").(*db.Repo)
53
52
54
53
scheme := "https"
55
54
if s.config.Core.Dev {
56
55
scheme = "http"
57
56
}
58
57
59
-
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-receive-pack?%s", scheme, knot, user.DID, repo, r.URL.RawQuery)
58
+
targetURL := fmt.Sprintf("%s://%s/%s/%s/git-receive-pack?%s", scheme, repo.Knot, user.DID, repo.Name, r.URL.RawQuery)
60
59
s.proxyRequest(w, r, targetURL)
61
60
}
62
61
···
85
84
defer resp.Body.Close()
86
85
87
86
// Copy response headers
88
-
for k, v := range resp.Header {
89
-
w.Header()[k] = v
90
-
}
87
+
maps.Copy(w.Header(), resp.Header)
91
88
92
89
// Set response status code
93
90
w.WriteHeader(resp.StatusCode)
+97
-111
appview/state/profile.go
+97
-111
appview/state/profile.go
···
89
89
log.Printf("failed to create profile timeline for %s: %s", ident.DID.String(), err)
90
90
}
91
91
92
-
var didsToResolve []string
93
-
for _, r := range collaboratingRepos {
94
-
didsToResolve = append(didsToResolve, r.Did)
95
-
}
96
-
for _, byMonth := range timeline.ByMonth {
97
-
for _, pe := range byMonth.PullEvents.Items {
98
-
didsToResolve = append(didsToResolve, pe.Repo.Did)
99
-
}
100
-
for _, ie := range byMonth.IssueEvents.Items {
101
-
didsToResolve = append(didsToResolve, ie.Metadata.Repo.Did)
102
-
}
103
-
for _, re := range byMonth.RepoEvents {
104
-
didsToResolve = append(didsToResolve, re.Repo.Did)
105
-
if re.Source != nil {
106
-
didsToResolve = append(didsToResolve, re.Source.Did)
107
-
}
108
-
}
109
-
}
110
-
111
-
resolvedIds := s.idResolver.ResolveIdents(r.Context(), didsToResolve)
112
-
didHandleMap := make(map[string]string)
113
-
for _, identity := range resolvedIds {
114
-
if !identity.Handle.IsInvalidHandle() {
115
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
116
-
} else {
117
-
didHandleMap[identity.DID.String()] = identity.DID.String()
118
-
}
119
-
}
120
-
121
-
followers, following, err := db.GetFollowerFollowing(s.db, ident.DID.String())
92
+
followers, following, err := db.GetFollowerFollowingCount(s.db, ident.DID.String())
122
93
if err != nil {
123
94
log.Printf("getting follow stats repos for %s: %s", ident.DID.String(), err)
124
95
}
···
145
116
LoggedInUser: loggedInUser,
146
117
Repos: pinnedRepos,
147
118
CollaboratingRepos: pinnedCollaboratingRepos,
148
-
DidHandleMap: didHandleMap,
149
119
Card: pages.ProfileCard{
150
120
UserDid: ident.DID.String(),
151
121
UserHandle: ident.Handle.String(),
···
186
156
followStatus = db.GetFollowStatus(s.db, loggedInUser.Did, ident.DID.String())
187
157
}
188
158
189
-
followers, following, err := db.GetFollowerFollowing(s.db, ident.DID.String())
159
+
followers, following, err := db.GetFollowerFollowingCount(s.db, ident.DID.String())
190
160
if err != nil {
191
161
log.Printf("getting follow stats repos for %s: %s", ident.DID.String(), err)
192
162
}
···
194
164
s.pages.ReposPage(w, pages.ReposPageParams{
195
165
LoggedInUser: loggedInUser,
196
166
Repos: repos,
197
-
DidHandleMap: map[string]string{ident.DID.String(): ident.Handle.String()},
198
167
Card: pages.ProfileCard{
199
168
UserDid: ident.DID.String(),
200
169
UserHandle: ident.Handle.String(),
···
206
175
})
207
176
}
208
177
209
-
func (s *State) feedFromRequest(w http.ResponseWriter, r *http.Request) *feeds.Feed {
178
+
func (s *State) AtomFeedPage(w http.ResponseWriter, r *http.Request) {
210
179
ident, ok := r.Context().Value("resolvedId").(identity.Identity)
211
180
if !ok {
212
181
s.pages.Error404(w)
213
-
return nil
182
+
return
214
183
}
215
184
216
-
feed, err := s.GetProfileFeed(r.Context(), ident.Handle.String(), ident.DID.String())
185
+
feed, err := s.getProfileFeed(r.Context(), &ident)
217
186
if err != nil {
218
187
s.pages.Error500(w)
219
-
return nil
188
+
return
220
189
}
221
190
222
-
return feed
223
-
}
224
-
225
-
func (s *State) AtomFeedPage(w http.ResponseWriter, r *http.Request) {
226
-
feed := s.feedFromRequest(w, r)
227
191
if feed == nil {
228
192
return
229
193
}
···
238
202
w.Write([]byte(atom))
239
203
}
240
204
241
-
func (s *State) GetProfileFeed(ctx context.Context, handle string, did string) (*feeds.Feed, error) {
242
-
timeline, err := db.MakeProfileTimeline(s.db, did)
205
+
func (s *State) getProfileFeed(ctx context.Context, id *identity.Identity) (*feeds.Feed, error) {
206
+
timeline, err := db.MakeProfileTimeline(s.db, id.DID.String())
243
207
if err != nil {
244
208
return nil, err
245
209
}
246
210
247
211
author := &feeds.Author{
248
-
Name: fmt.Sprintf("@%s", handle),
212
+
Name: fmt.Sprintf("@%s", id.Handle),
249
213
}
250
-
feed := &feeds.Feed{
251
-
Title: fmt.Sprintf("timeline feed for %s", author.Name),
252
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s", s.config.Core.AppviewHost, handle), Type: "text/html", Rel: "alternate"},
214
+
215
+
feed := feeds.Feed{
216
+
Title: fmt.Sprintf("%s's timeline", author.Name),
217
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s", s.config.Core.AppviewHost, id.Handle), Type: "text/html", Rel: "alternate"},
253
218
Items: make([]*feeds.Item, 0),
254
219
Updated: time.UnixMilli(0),
255
220
Author: author,
256
221
}
222
+
257
223
for _, byMonth := range timeline.ByMonth {
258
-
for _, pull := range byMonth.PullEvents.Items {
259
-
owner, err := s.idResolver.ResolveIdent(ctx, pull.Repo.Did)
260
-
if err != nil {
261
-
return nil, err
262
-
}
263
-
feed.Items = append(feed.Items, &feeds.Item{
264
-
Title: fmt.Sprintf("%s created pull request '%s' in @%s/%s", author.Name, pull.Title, owner.Handle, pull.Repo.Name),
265
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s/pulls/%d", s.config.Core.AppviewHost, owner.Handle, pull.Repo.Name, pull.PullId), Type: "text/html", Rel: "alternate"},
266
-
Created: pull.Created,
267
-
Author: author,
268
-
})
269
-
for _, submission := range pull.Submissions {
270
-
feed.Items = append(feed.Items, &feeds.Item{
271
-
Title: fmt.Sprintf("%s submitted pull request '%s' (round #%d) in @%s/%s", author.Name, pull.Title, submission.RoundNumber, owner.Handle, pull.Repo.Name),
272
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s/pulls/%d", s.config.Core.AppviewHost, owner.Handle, pull.Repo.Name, pull.PullId), Type: "text/html", Rel: "alternate"},
273
-
Created: submission.Created,
274
-
Author: author,
275
-
})
276
-
}
224
+
if err := s.addPullRequestItems(ctx, &feed, byMonth.PullEvents.Items, author); err != nil {
225
+
return nil, err
277
226
}
278
-
for _, issue := range byMonth.IssueEvents.Items {
279
-
owner, err := s.idResolver.ResolveIdent(ctx, issue.Metadata.Repo.Did)
280
-
if err != nil {
281
-
return nil, err
282
-
}
283
-
feed.Items = append(feed.Items, &feeds.Item{
284
-
Title: fmt.Sprintf("%s created issue '%s' in @%s/%s", author.Name, issue.Title, owner.Handle, issue.Metadata.Repo.Name),
285
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s/issues/%d", s.config.Core.AppviewHost, owner.Handle, issue.Metadata.Repo.Name, issue.IssueId), Type: "text/html", Rel: "alternate"},
286
-
Created: issue.Created,
287
-
Author: author,
288
-
})
227
+
if err := s.addIssueItems(ctx, &feed, byMonth.IssueEvents.Items, author); err != nil {
228
+
return nil, err
289
229
}
290
-
for _, repo := range byMonth.RepoEvents {
291
-
var title string
292
-
if repo.Source != nil {
293
-
id, err := s.idResolver.ResolveIdent(ctx, repo.Source.Did)
294
-
if err != nil {
295
-
return nil, err
296
-
}
297
-
title = fmt.Sprintf("%s forked repository @%s/%s to '%s'", author.Name, id.Handle, repo.Source.Name, repo.Repo.Name)
298
-
} else {
299
-
title = fmt.Sprintf("%s created repository '%s'", author.Name, repo.Repo.Name)
300
-
}
301
-
feed.Items = append(feed.Items, &feeds.Item{
302
-
Title: title,
303
-
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s", s.config.Core.AppviewHost, handle, repo.Repo.Name), Type: "text/html", Rel: "alternate"},
304
-
Created: repo.Repo.Created,
305
-
Author: author,
306
-
})
230
+
if err := s.addRepoItems(ctx, &feed, byMonth.RepoEvents, author); err != nil {
231
+
return nil, err
307
232
}
308
233
}
234
+
309
235
slices.SortFunc(feed.Items, func(a *feeds.Item, b *feeds.Item) int {
310
236
return int(b.Created.UnixMilli()) - int(a.Created.UnixMilli())
311
237
})
238
+
312
239
if len(feed.Items) > 0 {
313
240
feed.Updated = feed.Items[0].Created
314
241
}
315
242
316
-
return feed, nil
243
+
return &feed, nil
244
+
}
245
+
246
+
func (s *State) addPullRequestItems(ctx context.Context, feed *feeds.Feed, pulls []*db.Pull, author *feeds.Author) error {
247
+
for _, pull := range pulls {
248
+
owner, err := s.idResolver.ResolveIdent(ctx, pull.Repo.Did)
249
+
if err != nil {
250
+
return err
251
+
}
252
+
253
+
// Add pull request creation item
254
+
feed.Items = append(feed.Items, s.createPullRequestItem(pull, owner, author))
255
+
}
256
+
return nil
257
+
}
258
+
259
+
func (s *State) addIssueItems(ctx context.Context, feed *feeds.Feed, issues []*db.Issue, author *feeds.Author) error {
260
+
for _, issue := range issues {
261
+
owner, err := s.idResolver.ResolveIdent(ctx, issue.Metadata.Repo.Did)
262
+
if err != nil {
263
+
return err
264
+
}
265
+
266
+
feed.Items = append(feed.Items, s.createIssueItem(issue, owner, author))
267
+
}
268
+
return nil
269
+
}
270
+
271
+
func (s *State) addRepoItems(ctx context.Context, feed *feeds.Feed, repos []db.RepoEvent, author *feeds.Author) error {
272
+
for _, repo := range repos {
273
+
item, err := s.createRepoItem(ctx, repo, author)
274
+
if err != nil {
275
+
return err
276
+
}
277
+
feed.Items = append(feed.Items, item)
278
+
}
279
+
return nil
280
+
}
281
+
282
+
func (s *State) createPullRequestItem(pull *db.Pull, owner *identity.Identity, author *feeds.Author) *feeds.Item {
283
+
return &feeds.Item{
284
+
Title: fmt.Sprintf("%s created pull request '%s' in @%s/%s", author.Name, pull.Title, owner.Handle, pull.Repo.Name),
285
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s/pulls/%d", s.config.Core.AppviewHost, owner.Handle, pull.Repo.Name, pull.PullId), Type: "text/html", Rel: "alternate"},
286
+
Created: pull.Created,
287
+
Author: author,
288
+
}
289
+
}
290
+
291
+
func (s *State) createIssueItem(issue *db.Issue, owner *identity.Identity, author *feeds.Author) *feeds.Item {
292
+
return &feeds.Item{
293
+
Title: fmt.Sprintf("%s created issue '%s' in @%s/%s", author.Name, issue.Title, owner.Handle, issue.Metadata.Repo.Name),
294
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s/issues/%d", s.config.Core.AppviewHost, owner.Handle, issue.Metadata.Repo.Name, issue.IssueId), Type: "text/html", Rel: "alternate"},
295
+
Created: issue.Created,
296
+
Author: author,
297
+
}
298
+
}
299
+
300
+
func (s *State) createRepoItem(ctx context.Context, repo db.RepoEvent, author *feeds.Author) (*feeds.Item, error) {
301
+
var title string
302
+
if repo.Source != nil {
303
+
sourceOwner, err := s.idResolver.ResolveIdent(ctx, repo.Source.Did)
304
+
if err != nil {
305
+
return nil, err
306
+
}
307
+
title = fmt.Sprintf("%s forked repository @%s/%s to '%s'", author.Name, sourceOwner.Handle, repo.Source.Name, repo.Repo.Name)
308
+
} else {
309
+
title = fmt.Sprintf("%s created repository '%s'", author.Name, repo.Repo.Name)
310
+
}
311
+
312
+
return &feeds.Item{
313
+
Title: title,
314
+
Link: &feeds.Link{Href: fmt.Sprintf("%s/@%s/%s", s.config.Core.AppviewHost, author.Name[1:], repo.Repo.Name), Type: "text/html", Rel: "alternate"}, // Remove @ prefix
315
+
Created: repo.Repo.Created,
316
+
Author: author,
317
+
}, nil
317
318
}
318
319
319
320
func (s *State) UpdateProfileBio(w http.ResponseWriter, r *http.Request) {
···
518
519
})
519
520
}
520
521
521
-
var didsToResolve []string
522
-
for _, r := range allRepos {
523
-
didsToResolve = append(didsToResolve, r.Did)
524
-
}
525
-
resolvedIds := s.idResolver.ResolveIdents(r.Context(), didsToResolve)
526
-
didHandleMap := make(map[string]string)
527
-
for _, identity := range resolvedIds {
528
-
if !identity.Handle.IsInvalidHandle() {
529
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
530
-
} else {
531
-
didHandleMap[identity.DID.String()] = identity.DID.String()
532
-
}
533
-
}
534
-
535
522
s.pages.EditPinsFragment(w, pages.EditPinsParams{
536
523
LoggedInUser: user,
537
524
Profile: profile,
538
525
AllRepos: allRepos,
539
-
DidHandleMap: didHandleMap,
540
526
})
541
527
}
+14
-3
appview/state/router.go
+14
-3
appview/state/router.go
···
32
32
s.pages,
33
33
)
34
34
35
+
router.Get("/favicon.svg", s.Favicon)
36
+
router.Get("/favicon.ico", s.Favicon)
37
+
38
+
userRouter := s.UserRouter(&middleware)
39
+
standardRouter := s.StandardRouter(&middleware)
40
+
35
41
router.HandleFunc("/*", func(w http.ResponseWriter, r *http.Request) {
36
42
pat := chi.URLParam(r, "*")
37
43
if strings.HasPrefix(pat, "did:") || strings.HasPrefix(pat, "@") {
38
-
s.UserRouter(&middleware).ServeHTTP(w, r)
44
+
userRouter.ServeHTTP(w, r)
39
45
} else {
40
46
// Check if the first path element is a valid handle without '@' or a flattened DID
41
47
pathParts := strings.SplitN(pat, "/", 2)
···
58
64
return
59
65
}
60
66
}
61
-
s.StandardRouter(&middleware).ServeHTTP(w, r)
67
+
standardRouter.ServeHTTP(w, r)
62
68
}
63
69
})
64
70
···
72
78
r.Get("/", s.Profile)
73
79
r.Get("/feed.atom", s.AtomFeedPage)
74
80
81
+
// redirect /@handle/repo.git -> /@handle/repo
82
+
r.Get("/{repo}.git", func(w http.ResponseWriter, r *http.Request) {
83
+
nonDotGitPath := strings.TrimSuffix(r.URL.Path, ".git")
84
+
http.Redirect(w, r, nonDotGitPath, http.StatusMovedPermanently)
85
+
})
86
+
75
87
r.With(mw.ResolveRepo()).Route("/{repo}", func(r chi.Router) {
76
88
r.Use(mw.GoImport())
77
-
78
89
r.Mount("/", s.RepoRouter(mw))
79
90
r.Mount("/issues", s.IssuesRouter(mw))
80
91
r.Mount("/pulls", s.PullsRouter(mw))
+29
-30
appview/state/state.go
+29
-30
appview/state/state.go
···
61
61
return nil, fmt.Errorf("failed to create enforcer: %w", err)
62
62
}
63
63
64
-
pgs := pages.NewPages(config)
65
-
66
64
res, err := idresolver.RedisResolver(config.Redis.ToURL())
67
65
if err != nil {
68
66
log.Printf("failed to create redis resolver: %v", err)
69
67
res = idresolver.DefaultResolver()
70
68
}
69
+
70
+
pgs := pages.NewPages(config, res)
71
71
72
72
cache := cache.New(config.Redis.Addr)
73
73
sess := session.New(cache)
···
94
94
tangled.SpindleMemberNSID,
95
95
tangled.SpindleNSID,
96
96
tangled.StringNSID,
97
+
tangled.RepoIssueNSID,
98
+
tangled.RepoIssueCommentNSID,
97
99
},
98
100
nil,
99
101
slog.Default(),
···
157
159
return state, nil
158
160
}
159
161
162
+
func (s *State) Favicon(w http.ResponseWriter, r *http.Request) {
163
+
w.Header().Set("Content-Type", "image/svg+xml")
164
+
w.Header().Set("Cache-Control", "public, max-age=31536000") // one year
165
+
w.Header().Set("ETag", `"favicon-svg-v1"`)
166
+
167
+
if match := r.Header.Get("If-None-Match"); match == `"favicon-svg-v1"` {
168
+
w.WriteHeader(http.StatusNotModified)
169
+
return
170
+
}
171
+
172
+
s.pages.Favicon(w)
173
+
}
174
+
160
175
func (s *State) TermsOfService(w http.ResponseWriter, r *http.Request) {
161
176
user := s.oauth.GetUser(r)
162
177
s.pages.TermsOfService(w, pages.TermsOfServiceParams{
···
180
195
s.pages.Notice(w, "timeline", "Uh oh! Failed to load timeline.")
181
196
}
182
197
183
-
var didsToResolve []string
184
-
for _, ev := range timeline {
185
-
if ev.Repo != nil {
186
-
didsToResolve = append(didsToResolve, ev.Repo.Did)
187
-
if ev.Source != nil {
188
-
didsToResolve = append(didsToResolve, ev.Source.Did)
189
-
}
190
-
}
191
-
if ev.Follow != nil {
192
-
didsToResolve = append(didsToResolve, ev.Follow.UserDid, ev.Follow.SubjectDid)
193
-
}
194
-
if ev.Star != nil {
195
-
didsToResolve = append(didsToResolve, ev.Star.StarredByDid, ev.Star.Repo.Did)
196
-
}
197
-
}
198
-
199
-
resolvedIds := s.idResolver.ResolveIdents(r.Context(), didsToResolve)
200
-
didHandleMap := make(map[string]string)
201
-
for _, identity := range resolvedIds {
202
-
if !identity.Handle.IsInvalidHandle() {
203
-
didHandleMap[identity.DID.String()] = fmt.Sprintf("@%s", identity.Handle.String())
204
-
} else {
205
-
didHandleMap[identity.DID.String()] = identity.DID.String()
206
-
}
198
+
repos, err := db.GetTopStarredReposLastWeek(s.db)
199
+
if err != nil {
200
+
log.Println(err)
201
+
s.pages.Notice(w, "topstarredrepos", "Unable to load.")
202
+
return
207
203
}
208
204
209
205
s.pages.Timeline(w, pages.TimelineParams{
210
206
LoggedInUser: user,
211
207
Timeline: timeline,
212
-
DidHandleMap: didHandleMap,
208
+
Repos: repos,
213
209
})
214
-
215
-
return
216
210
}
217
211
218
212
func (s *State) Keys(w http.ResponseWriter, r *http.Request) {
···
279
273
return nil
280
274
}
281
275
276
+
func stripGitExt(name string) string {
277
+
return strings.TrimSuffix(name, ".git")
278
+
}
279
+
282
280
func (s *State) NewRepo(w http.ResponseWriter, r *http.Request) {
283
281
switch r.Method {
284
282
case http.MethodGet:
···
313
311
s.pages.Notice(w, "repo", err.Error())
314
312
return
315
313
}
314
+
315
+
repoName = stripGitExt(repoName)
316
316
317
317
defaultBranch := r.FormValue("branch")
318
318
if defaultBranch == "" {
···
410
410
// continue
411
411
}
412
412
413
-
repo.AtUri = atresp.Uri
414
413
err = db.AddRepo(tx, repo)
415
414
if err != nil {
416
415
log.Println(err)
+24
-13
appview/strings/strings.go
+24
-13
appview/strings/strings.go
···
7
7
"path"
8
8
"slices"
9
9
"strconv"
10
-
"strings"
11
10
"time"
12
11
13
12
"tangled.sh/tangled.sh/core/api/tangled"
···
44
43
r := chi.NewRouter()
45
44
46
45
r.
46
+
Get("/", s.timeline)
47
+
48
+
r.
47
49
With(mw.ResolveIdent()).
48
50
Route("/{user}", func(r chi.Router) {
49
51
r.Get("/", s.dashboard)
···
70
72
return r
71
73
}
72
74
75
+
func (s *Strings) timeline(w http.ResponseWriter, r *http.Request) {
76
+
l := s.Logger.With("handler", "timeline")
77
+
78
+
strings, err := db.GetStrings(s.Db, 50)
79
+
if err != nil {
80
+
l.Error("failed to fetch string", "err", err)
81
+
w.WriteHeader(http.StatusInternalServerError)
82
+
return
83
+
}
84
+
85
+
s.Pages.StringsTimeline(w, pages.StringTimelineParams{
86
+
LoggedInUser: s.OAuth.GetUser(r),
87
+
Strings: strings,
88
+
})
89
+
}
90
+
73
91
func (s *Strings) contents(w http.ResponseWriter, r *http.Request) {
74
92
l := s.Logger.With("handler", "contents")
75
93
···
91
109
92
110
strings, err := db.GetStrings(
93
111
s.Db,
112
+
0,
94
113
db.FilterEq("did", id.DID),
95
114
db.FilterEq("rkey", rkey),
96
115
)
···
154
173
155
174
all, err := db.GetStrings(
156
175
s.Db,
176
+
0,
157
177
db.FilterEq("did", id.DID),
158
178
)
159
179
if err != nil {
···
182
202
followStatus = db.GetFollowStatus(s.Db, loggedInUser.Did, id.DID.String())
183
203
}
184
204
185
-
followers, following, err := db.GetFollowerFollowing(s.Db, id.DID.String())
205
+
followers, following, err := db.GetFollowerFollowingCount(s.Db, id.DID.String())
186
206
if err != nil {
187
207
l.Error("failed to get follow stats", "err", err)
188
208
}
···
225
245
// get the string currently being edited
226
246
all, err := db.GetStrings(
227
247
s.Db,
248
+
0,
228
249
db.FilterEq("did", id.DID),
229
250
db.FilterEq("rkey", rkey),
230
251
)
···
266
287
fail("Empty filename.", nil)
267
288
return
268
289
}
269
-
if !strings.Contains(filename, ".") {
270
-
// TODO: make this a htmx form validation
271
-
fail("No extension provided for filename.", nil)
272
-
return
273
-
}
274
290
275
291
content := r.FormValue("content")
276
292
if content == "" {
···
353
369
fail("Empty filename.", nil)
354
370
return
355
371
}
356
-
if !strings.Contains(filename, ".") {
357
-
// TODO: make this a htmx form validation
358
-
fail("No extension provided for filename.", nil)
359
-
return
360
-
}
361
372
362
373
content := r.FormValue("content")
363
374
if content == "" {
···
434
445
}
435
446
436
447
if user.Did != id.DID.String() {
437
-
fail("You cannot delete this gist", fmt.Errorf("unauthorized deletion, %s != %s", user.Did, id.DID.String()))
448
+
fail("You cannot delete this string", fmt.Errorf("unauthorized deletion, %s != %s", user.Did, id.DID.String()))
438
449
return
439
450
}
440
451
-2
cmd/gen.go
-2
cmd/gen.go
···
27
27
tangled.KnotMember{},
28
28
tangled.Pipeline{},
29
29
tangled.Pipeline_CloneOpts{},
30
-
tangled.Pipeline_Dependency{},
31
30
tangled.Pipeline_ManualTriggerData{},
32
31
tangled.Pipeline_Pair{},
33
32
tangled.Pipeline_PullRequestTriggerData{},
34
33
tangled.Pipeline_PushTriggerData{},
35
34
tangled.PipelineStatus{},
36
-
tangled.Pipeline_Step{},
37
35
tangled.Pipeline_TriggerMetadata{},
38
36
tangled.Pipeline_TriggerRepo{},
39
37
tangled.Pipeline_Workflow{},
+1
-1
cmd/punchcardPopulate/main.go
+1
-1
cmd/punchcardPopulate/main.go
+6
docs/contributing.md
+6
docs/contributing.md
···
55
55
- Avoid noisy commit messages like "wip" or "final fix"โrewrite history
56
56
before submitting if necessary.
57
57
58
+
## code formatting
59
+
60
+
We use a variety of tools to format our code, and multiplex them with
61
+
[`treefmt`](https://treefmt.com): all you need to do to format your changes
62
+
is run `nix run .#fmt` (or just `treefmt` if you're in the devshell).
63
+
58
64
## proposals for bigger changes
59
65
60
66
Small fixes like typos, minor bugs, or trivial refactors can be
+4
-6
docs/hacking.md
+4
-6
docs/hacking.md
···
64
64
You will also need to set the `$TANGLED_VM_SPINDLE_OWNER`
65
65
variable to some value. If you don't want to [set up a
66
66
spindle](#running-a-spindle), you can use any placeholder
67
-
value.
67
+
value.
68
68
69
-
You can now start a lightweight NixOS VM using
70
-
`nixos-shell` like so:
69
+
You can now start a lightweight NixOS VM like so:
71
70
72
71
```bash
73
-
nix run .#vm
74
-
# or nixos-shell --flake .#vm
72
+
nix run --impure .#vm
75
73
76
-
# hit Ctrl-a + c + q to exit the VM
74
+
# type `poweroff` at the shell to exit the VM
77
75
```
78
76
79
77
This starts a knot on port 6000, a spindle on port 6555
+26
-3
docs/spindle/pipeline.md
+26
-3
docs/spindle/pipeline.md
···
4
4
repo. Generally:
5
5
6
6
* Pipelines are defined in YAML.
7
-
* Dependencies can be specified from
8
-
[Nixpkgs](https://search.nixos.org) or custom registries.
9
-
* Environment variables can be set globally or per-step.
7
+
* Workflows can run using different *engines*.
8
+
9
+
The most barebones workflow looks like this:
10
+
11
+
```yaml
12
+
when:
13
+
- event: ["push"]
14
+
branch: ["main"]
15
+
16
+
engine: "nixery"
17
+
18
+
# optional
19
+
clone:
20
+
skip: false
21
+
depth: 50
22
+
submodules: true
23
+
```
24
+
25
+
The `when` and `engine` fields are required, while every other aspect
26
+
of how the definition is parsed is up to the engine. Currently, a spindle
27
+
provides at least one of these built-in engines:
28
+
29
+
## `nixery`
30
+
31
+
The Nixery engine uses an instance of [Nixery](https://nixery.dev) to run
32
+
steps that use dependencies from [Nixpkgs](https://github.com/NixOS/nixpkgs).
10
33
11
34
Here's an example that uses all fields:
12
35
+1
-1
eventconsumer/cursor/sqlite.go
+1
-1
eventconsumer/cursor/sqlite.go
···
21
21
}
22
22
23
23
func NewSQLiteStore(dbPath string, opts ...SqliteStoreOpt) (*SqliteStore, error) {
24
-
db, err := sql.Open("sqlite3", dbPath)
24
+
db, err := sql.Open("sqlite3", dbPath+"?_foreign_keys=1")
25
25
if err != nil {
26
26
return nil, fmt.Errorf("failed to open sqlite database: %w", err)
27
27
}
+54
-28
flake.nix
+54
-28
flake.nix
···
106
106
pkgsCross-gnu64-pkgsStatic-knot = crossPackages.knot;
107
107
pkgsCross-gnu64-pkgsStatic-knot-unwrapped = crossPackages.knot-unwrapped;
108
108
pkgsCross-gnu64-pkgsStatic-spindle = crossPackages.spindle;
109
+
110
+
treefmt-wrapper = pkgs.treefmt.withConfig {
111
+
settings.formatter = {
112
+
alejandra = {
113
+
command = pkgs.lib.getExe pkgs.alejandra;
114
+
includes = ["*.nix"];
115
+
};
116
+
117
+
gofmt = {
118
+
command = pkgs.lib.getExe' pkgs.go "gofmt";
119
+
options = ["-w"];
120
+
includes = ["*.go"];
121
+
};
122
+
123
+
# prettier = let
124
+
# wrapper = pkgs.runCommandLocal "prettier-wrapper" {nativeBuildInputs = [pkgs.makeWrapper];} ''
125
+
# makeWrapper ${pkgs.prettier}/bin/prettier "$out" --add-flags "--plugin=${pkgs.prettier-plugin-go-template}/lib/node_modules/prettier-plugin-go-template/lib/index.js"
126
+
# '';
127
+
# in {
128
+
# command = wrapper;
129
+
# options = ["-w"];
130
+
# includes = ["*.html"];
131
+
# # causes Go template plugin errors: https://github.com/NiklasPor/prettier-plugin-go-template/issues/120
132
+
# excludes = ["appview/pages/templates/layouts/repobase.html" "appview/pages/templates/repo/tags.html"];
133
+
# };
134
+
};
135
+
};
109
136
});
110
137
defaultPackage = forAllSystems (system: self.packages.${system}.appview);
111
-
formatter = forAllSystems (system: nixpkgsFor.${system}.alejandra);
112
138
devShells = forAllSystems (system: let
113
139
pkgs = nixpkgsFor.${system};
114
140
packages' = self.packages.${system};
···
129
155
pkgs.redis
130
156
pkgs.coreutils # for those of us who are on systems that use busybox (alpine)
131
157
packages'.lexgen
158
+
packages'.treefmt-wrapper
132
159
];
133
160
shellHook = ''
134
161
mkdir -p appview/pages/static
···
158
185
${pkgs.tailwindcss}/bin/tailwindcss -w -i input.css -o ./appview/pages/static/tw.css
159
186
'';
160
187
in {
188
+
fmt = {
189
+
type = "app";
190
+
program = pkgs.lib.getExe packages'.treefmt-wrapper;
191
+
};
161
192
watch-appview = {
162
193
type = "app";
163
194
program = toString (pkgs.writeShellScript "watch-appview" ''
···
175
206
program = ''${tailwind-watcher}/bin/run'';
176
207
};
177
208
vm = let
178
-
system =
209
+
guestSystem =
179
210
if pkgs.stdenv.hostPlatform.isAarch64
180
-
then "aarch64"
181
-
else "x86_64";
182
-
183
-
nixos-shell = pkgs.nixos-shell.overrideAttrs (old: {
184
-
patches =
185
-
(old.patches or [])
186
-
++ [
187
-
# https://github.com/Mic92/nixos-shell/pull/94
188
-
(pkgs.fetchpatch {
189
-
name = "fix-foreign-vm.patch";
190
-
url = "https://github.com/Mic92/nixos-shell/commit/113e4cc55ae236b5b0b1fbd8b321e9b67c77580e.patch";
191
-
hash = "sha256-eauetBK0wXAOcd9PYbExokNCiwz2QyFnZ4FnwGi9VCo=";
192
-
})
193
-
];
194
-
});
211
+
then "aarch64-linux"
212
+
else "x86_64-linux";
195
213
in {
196
214
type = "app";
197
-
program = toString (pkgs.writeShellScript "vm" ''
198
-
${nixos-shell}/bin/nixos-shell --flake .#vm-${system} --guest-system ${system}-linux
199
-
'');
215
+
program =
216
+
(pkgs.writeShellApplication {
217
+
name = "launch-vm";
218
+
text = ''
219
+
rootDir=$(jj --ignore-working-copy root || git rev-parse --show-toplevel) || (echo "error: can't find repo root?"; exit 1)
220
+
cd "$rootDir"
221
+
222
+
mkdir -p nix/vm-data/{knot,repos,spindle,spindle-logs}
223
+
224
+
export TANGLED_VM_DATA_DIR="$rootDir/nix/vm-data"
225
+
exec ${pkgs.lib.getExe
226
+
(import ./nix/vm.nix {
227
+
inherit nixpkgs self;
228
+
system = guestSystem;
229
+
hostSystem = system;
230
+
}).config.system.build.vm}
231
+
'';
232
+
})
233
+
+ /bin/launch-vm;
200
234
};
201
235
gomod2nix = {
202
236
type = "app";
···
257
291
imports = [./nix/modules/spindle.nix];
258
292
259
293
services.tangled-spindle.package = lib.mkDefault self.packages.${pkgs.system}.spindle;
260
-
};
261
-
nixosConfigurations.vm-x86_64 = import ./nix/vm.nix {
262
-
inherit self nixpkgs;
263
-
system = "x86_64-linux";
264
-
};
265
-
nixosConfigurations.vm-aarch64 = import ./nix/vm.nix {
266
-
inherit self nixpkgs;
267
-
system = "aarch64-linux";
268
294
};
269
295
};
270
296
}
+3
-2
go.mod
+3
-2
go.mod
···
22
22
github.com/go-enry/go-enry/v2 v2.9.2
23
23
github.com/go-git/go-git/v5 v5.14.0
24
24
github.com/google/uuid v1.6.0
25
+
github.com/gorilla/feeds v1.2.0
25
26
github.com/gorilla/sessions v1.4.0
26
27
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
27
28
github.com/hiddeco/sshsig v0.2.0
···
38
39
github.com/stretchr/testify v1.10.0
39
40
github.com/urfave/cli/v3 v3.3.3
40
41
github.com/whyrusleeping/cbor-gen v0.3.1
41
-
github.com/yuin/goldmark v1.4.13
42
+
github.com/yuin/goldmark v1.4.15
43
+
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc
42
44
golang.org/x/crypto v0.40.0
43
45
golang.org/x/net v0.42.0
44
46
golang.org/x/sync v0.16.0
···
88
90
github.com/golang/mock v1.6.0 // indirect
89
91
github.com/google/go-querystring v1.1.0 // indirect
90
92
github.com/gorilla/css v1.0.1 // indirect
91
-
github.com/gorilla/feeds v1.2.0 // indirect
92
93
github.com/gorilla/securecookie v1.1.2 // indirect
93
94
github.com/hashicorp/errwrap v1.1.0 // indirect
94
95
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
+5
-1
go.sum
+5
-1
go.sum
···
79
79
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
80
80
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
81
81
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
82
+
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
82
83
github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
83
84
github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
84
85
github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw=
···
429
430
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
430
431
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
431
432
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
432
-
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
433
433
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
434
+
github.com/yuin/goldmark v1.4.15 h1:CFa84T0goNn/UIXYS+dmjjVxMyTAvpOmzld40N/nfK0=
435
+
github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
436
+
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ=
437
+
github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I=
434
438
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b h1:CzigHMRySiX3drau9C6Q5CAbNIApmLdat5jPMqChvDA=
435
439
gitlab.com/yawning/secp256k1-voi v0.0.0-20230925100816-f2616030848b/go.mod h1:/y/V339mxv2sZmYYR64O07VuCpdNZqCTwO8ZcouTMI8=
436
440
gitlab.com/yawning/tuplehash v0.0.0-20230713102510-df83abbf9a02 h1:qwDnMxjkyLmAFgcfgTnfJrmYKWhHnci3GjDqcZp1M3Q=
+72
-7
input.css
+72
-7
input.css
···
13
13
@font-face {
14
14
font-family: "InterVariable";
15
15
src: url("/static/fonts/InterVariable-Italic.woff2") format("woff2");
16
-
font-weight: 400;
16
+
font-weight: normal;
17
17
font-style: italic;
18
18
font-display: swap;
19
19
}
20
20
21
21
@font-face {
22
22
font-family: "InterVariable";
23
-
src: url("/static/fonts/InterVariable.woff2") format("woff2");
24
-
font-weight: 600;
23
+
src: url("/static/fonts/InterDisplay-Bold.woff2") format("woff2");
24
+
font-weight: bold;
25
25
font-style: normal;
26
26
font-display: swap;
27
27
}
28
28
29
29
@font-face {
30
+
font-family: "InterVariable";
31
+
src: url("/static/fonts/InterDisplay-BoldItalic.woff2") format("woff2");
32
+
font-weight: bold;
33
+
font-style: italic;
34
+
font-display: swap;
35
+
}
36
+
37
+
@font-face {
30
38
font-family: "IBMPlexMono";
31
39
src: url("/static/fonts/IBMPlexMono-Regular.woff2") format("woff2");
32
40
font-weight: normal;
41
+
font-style: normal;
42
+
font-display: swap;
43
+
}
44
+
45
+
@font-face {
46
+
font-family: "IBMPlexMono";
47
+
src: url("/static/fonts/IBMPlexMono-Italic.woff2") format("woff2");
48
+
font-weight: normal;
49
+
font-style: italic;
50
+
font-display: swap;
51
+
}
52
+
53
+
@font-face {
54
+
font-family: "IBMPlexMono";
55
+
src: url("/static/fonts/IBMPlexMono-Bold.woff2") format("woff2");
56
+
font-weight: bold;
57
+
font-style: normal;
58
+
font-display: swap;
59
+
}
60
+
61
+
@font-face {
62
+
font-family: "IBMPlexMono";
63
+
src: url("/static/fonts/IBMPlexMono-BoldItalic.woff2") format("woff2");
64
+
font-weight: bold;
33
65
font-style: italic;
34
66
font-display: swap;
35
67
}
···
46
78
@supports (font-variation-settings: normal) {
47
79
html {
48
80
font-feature-settings:
49
-
"ss01" 1,
50
81
"kern" 1,
51
82
"liga" 1,
52
83
"cv05" 1,
···
72
103
}
73
104
74
105
code {
75
-
@apply px-1 font-mono rounded bg-gray-100 dark:bg-gray-700;
106
+
@apply font-mono rounded bg-gray-100 dark:bg-gray-700 text-black dark:text-white;
76
107
}
77
108
}
78
109
···
102
133
disabled:before:bg-green-400 dark:disabled:before:bg-green-600;
103
134
}
104
135
136
+
.prose hr {
137
+
@apply my-2;
138
+
}
139
+
140
+
.prose li:has(input) {
141
+
@apply list-none;
142
+
}
143
+
144
+
.prose ul:has(input) {
145
+
@apply pl-2;
146
+
}
147
+
148
+
.prose .heading .anchor {
149
+
@apply no-underline mx-2 opacity-0;
150
+
}
151
+
152
+
.prose .heading:hover .anchor {
153
+
@apply opacity-70;
154
+
}
155
+
156
+
.prose .heading .anchor:hover {
157
+
@apply opacity-70;
158
+
}
159
+
160
+
.prose a.footnote-backref {
161
+
@apply no-underline;
162
+
}
163
+
164
+
.prose li {
165
+
@apply my-0 py-0;
166
+
}
167
+
168
+
.prose ul, .prose ol {
169
+
@apply my-1 py-0;
170
+
}
171
+
105
172
.prose img {
106
173
display: inline;
107
174
margin: 0;
···
134
201
/* PreWrapper */
135
202
.chroma {
136
203
color: #4c4f69;
137
-
background-color: #eff1f5;
138
204
}
139
205
/* Error */
140
206
.chroma .err {
···
471
537
/* PreWrapper */
472
538
.chroma {
473
539
color: #cad3f5;
474
-
background-color: #24273a;
475
540
}
476
541
/* Error */
477
542
.chroma .err {
+6
-4
jetstream/jetstream.go
+6
-4
jetstream/jetstream.go
···
68
68
type processor func(context.Context, *models.Event) error
69
69
70
70
func (j *JetstreamClient) withDidFilter(processFunc processor) processor {
71
-
// empty filter => all dids allowed
72
-
if len(j.wantedDids) == 0 {
73
-
return processFunc
74
-
}
75
71
// since this closure references j.WantedDids; it should auto-update
76
72
// existing instances of the closure when j.WantedDids is mutated
77
73
return func(ctx context.Context, evt *models.Event) error {
74
+
75
+
// empty filter => all dids allowed
76
+
if len(j.wantedDids) == 0 {
77
+
return processFunc(ctx, evt)
78
+
}
79
+
78
80
if _, ok := j.wantedDids[evt.Did]; ok {
79
81
return processFunc(ctx, evt)
80
82
} else {
+14
-10
knotserver/db/init.go
+14
-10
knotserver/db/init.go
···
2
2
3
3
import (
4
4
"database/sql"
5
+
"strings"
5
6
6
7
_ "github.com/mattn/go-sqlite3"
7
8
)
···
11
12
}
12
13
13
14
func Setup(dbPath string) (*DB, error) {
14
-
db, err := sql.Open("sqlite3", dbPath)
15
+
// https://github.com/mattn/go-sqlite3#connection-string
16
+
opts := []string{
17
+
"_foreign_keys=1",
18
+
"_journal_mode=WAL",
19
+
"_synchronous=NORMAL",
20
+
"_auto_vacuum=incremental",
21
+
}
22
+
23
+
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
15
24
if err != nil {
16
25
return nil, err
17
26
}
18
27
19
-
_, err = db.Exec(`
20
-
pragma journal_mode = WAL;
21
-
pragma synchronous = normal;
22
-
pragma foreign_keys = on;
23
-
pragma temp_store = memory;
24
-
pragma mmap_size = 30000000000;
25
-
pragma page_size = 32768;
26
-
pragma auto_vacuum = incremental;
27
-
pragma busy_timeout = 5000;
28
+
// NOTE: If any other migration is added here, you MUST
29
+
// copy the pattern in appview: use a single sql.Conn
30
+
// for every migration.
28
31
32
+
_, err = db.Exec(`
29
33
create table if not exists known_dids (
30
34
did text primary key
31
35
);
+8
-10
knotserver/git/fork.go
+8
-10
knotserver/git/fork.go
···
10
10
)
11
11
12
12
func Fork(repoPath, source string) error {
13
-
_, err := git.PlainClone(repoPath, true, &git.CloneOptions{
14
-
URL: source,
15
-
SingleBranch: false,
16
-
})
17
-
18
-
if err != nil {
13
+
cloneCmd := exec.Command("git", "clone", "--bare", source, repoPath)
14
+
if err := cloneCmd.Run(); err != nil {
19
15
return fmt.Errorf("failed to bare clone repository: %w", err)
20
16
}
21
17
22
-
err = exec.Command("git", "-C", repoPath, "config", "receive.hideRefs", "refs/hidden").Run()
23
-
if err != nil {
18
+
configureCmd := exec.Command("git", "-C", repoPath, "config", "receive.hideRefs", "refs/hidden")
19
+
if err := configureCmd.Run(); err != nil {
24
20
return fmt.Errorf("failed to configure hidden refs: %w", err)
25
21
}
26
22
27
23
return nil
28
24
}
29
25
30
-
func (g *GitRepo) Sync(branch string) error {
26
+
func (g *GitRepo) Sync() error {
27
+
branch := g.h.String()
28
+
31
29
fetchOpts := &git.FetchOptions{
32
30
RefSpecs: []config.RefSpec{
33
-
config.RefSpec(fmt.Sprintf("+refs/heads/%s:refs/heads/%s", branch, branch)),
31
+
config.RefSpec("+" + branch + ":" + branch), // +refs/heads/master:refs/heads/master
34
32
},
35
33
}
36
34
+5
knotserver/git.go
+5
knotserver/git.go
···
129
129
// If the appview gave us the repository owner's handle we can attempt to
130
130
// construct the correct ssh url.
131
131
ownerHandle := r.Header.Get("x-tangled-repo-owner-handle")
132
+
ownerHandle = strings.TrimPrefix(ownerHandle, "@")
132
133
if ownerHandle != "" && !strings.ContainsAny(ownerHandle, ":") {
133
134
hostname := d.c.Server.Hostname
134
135
if strings.Contains(hostname, ":") {
135
136
hostname = strings.Split(hostname, ":")[0]
137
+
}
138
+
139
+
if hostname == "knot1.tangled.sh" {
140
+
hostname = "tangled.sh"
136
141
}
137
142
138
143
fmt.Fprintf(w, " Try:\ngit remote set-url --push origin git@%s:%s/%s\n\n... and push again.", hostname, ownerHandle, unqualifiedRepoName)
+7
-7
knotserver/handler.go
+7
-7
knotserver/handler.go
···
52
52
return nil, fmt.Errorf("failed to setup enforcer: %w", err)
53
53
}
54
54
55
-
err = h.jc.StartJetstream(ctx, h.processMessages)
56
-
if err != nil {
57
-
return nil, fmt.Errorf("failed to start jetstream: %w", err)
58
-
}
59
-
60
55
// Check if the knot knows about any Dids;
61
56
// if it does, it is already initialized and we can repopulate the
62
57
// Jetstream subscriptions.
···
71
66
for _, d := range dids {
72
67
h.jc.AddDid(d)
73
68
}
69
+
}
70
+
71
+
err = h.jc.StartJetstream(ctx, h.processMessages)
72
+
if err != nil {
73
+
return nil, fmt.Errorf("failed to start jetstream: %w", err)
74
74
}
75
75
76
76
r.Get("/", h.Index)
···
142
142
r.Delete("/", h.RemoveRepo)
143
143
r.Route("/fork", func(r chi.Router) {
144
144
r.Post("/", h.RepoFork)
145
-
r.Post("/sync/{branch}", h.RepoForkSync)
146
-
r.Get("/sync/{branch}", h.RepoForkAheadBehind)
145
+
r.Post("/sync/*", h.RepoForkSync)
146
+
r.Get("/sync/*", h.RepoForkAheadBehind)
147
147
})
148
148
})
149
149
+46
-42
knotserver/ingester.go
+46
-42
knotserver/ingester.go
···
25
25
"tangled.sh/tangled.sh/core/workflow"
26
26
)
27
27
28
-
func (h *Handle) processPublicKey(ctx context.Context, did string, record tangled.PublicKey) error {
28
+
func (h *Handle) processPublicKey(ctx context.Context, event *models.Event) error {
29
29
l := log.FromContext(ctx)
30
+
raw := json.RawMessage(event.Commit.Record)
31
+
did := event.Did
32
+
33
+
var record tangled.PublicKey
34
+
if err := json.Unmarshal(raw, &record); err != nil {
35
+
return fmt.Errorf("failed to unmarshal record: %w", err)
36
+
}
37
+
30
38
pk := db.PublicKey{
31
39
Did: did,
32
40
PublicKey: record,
···
39
47
return nil
40
48
}
41
49
42
-
func (h *Handle) processKnotMember(ctx context.Context, did string, record tangled.KnotMember) error {
50
+
func (h *Handle) processKnotMember(ctx context.Context, event *models.Event) error {
43
51
l := log.FromContext(ctx)
52
+
raw := json.RawMessage(event.Commit.Record)
53
+
did := event.Did
54
+
55
+
var record tangled.KnotMember
56
+
if err := json.Unmarshal(raw, &record); err != nil {
57
+
return fmt.Errorf("failed to unmarshal record: %w", err)
58
+
}
44
59
45
60
if record.Domain != h.c.Server.Hostname {
46
61
l.Error("domain mismatch", "domain", record.Domain, "expected", h.c.Server.Hostname)
···
72
87
return nil
73
88
}
74
89
75
-
func (h *Handle) processPull(ctx context.Context, did string, record tangled.RepoPull) error {
90
+
func (h *Handle) processPull(ctx context.Context, event *models.Event) error {
91
+
raw := json.RawMessage(event.Commit.Record)
92
+
did := event.Did
93
+
94
+
var record tangled.RepoPull
95
+
if err := json.Unmarshal(raw, &record); err != nil {
96
+
return fmt.Errorf("failed to unmarshal record: %w", err)
97
+
}
98
+
76
99
l := log.FromContext(ctx)
77
100
l = l.With("handler", "processPull")
78
101
l = l.With("did", did)
···
200
223
return nil
201
224
}
202
225
203
-
event := db.Event{
226
+
ev := db.Event{
204
227
Rkey: TID(),
205
228
Nsid: tangled.PipelineNSID,
206
229
EventJson: string(eventJson),
207
230
}
208
231
209
-
return h.db.InsertEvent(event, h.n)
232
+
return h.db.InsertEvent(ev, h.n)
210
233
}
211
234
212
235
// duplicated from add collaborator
213
-
func (h *Handle) processCollaborator(ctx context.Context, did string, record tangled.RepoCollaborator) error {
236
+
func (h *Handle) processCollaborator(ctx context.Context, event *models.Event) error {
237
+
raw := json.RawMessage(event.Commit.Record)
238
+
did := event.Did
239
+
240
+
var record tangled.RepoCollaborator
241
+
if err := json.Unmarshal(raw, &record); err != nil {
242
+
return fmt.Errorf("failed to unmarshal record: %w", err)
243
+
}
244
+
214
245
repoAt, err := syntax.ParseATURI(record.Repo)
215
246
if err != nil {
216
247
return err
···
243
274
didSlashRepo, _ := securejoin.SecureJoin(owner.DID.String(), repo.Name)
244
275
245
276
// check perms for this user
246
-
if ok, err := h.e.IsCollaboratorInviteAllowed(owner.DID.String(), rbac.ThisServer, didSlashRepo); !ok || err != nil {
277
+
if ok, err := h.e.IsCollaboratorInviteAllowed(did, rbac.ThisServer, didSlashRepo); !ok || err != nil {
247
278
return fmt.Errorf("insufficient permissions: %w", err)
248
279
}
249
280
···
303
334
}
304
335
305
336
func (h *Handle) processMessages(ctx context.Context, event *models.Event) error {
306
-
did := event.Did
307
337
if event.Kind != models.EventKindCommit {
308
338
return nil
309
339
}
···
317
347
}
318
348
}()
319
349
320
-
raw := json.RawMessage(event.Commit.Record)
321
-
322
350
switch event.Commit.Collection {
323
351
case tangled.PublicKeyNSID:
324
-
var record tangled.PublicKey
325
-
if err := json.Unmarshal(raw, &record); err != nil {
326
-
return fmt.Errorf("failed to unmarshal record: %w", err)
327
-
}
328
-
if err := h.processPublicKey(ctx, did, record); err != nil {
329
-
return fmt.Errorf("failed to process public key: %w", err)
330
-
}
331
-
352
+
err = h.processPublicKey(ctx, event)
332
353
case tangled.KnotMemberNSID:
333
-
var record tangled.KnotMember
334
-
if err := json.Unmarshal(raw, &record); err != nil {
335
-
return fmt.Errorf("failed to unmarshal record: %w", err)
336
-
}
337
-
if err := h.processKnotMember(ctx, did, record); err != nil {
338
-
return fmt.Errorf("failed to process knot member: %w", err)
339
-
}
340
-
354
+
err = h.processKnotMember(ctx, event)
341
355
case tangled.RepoPullNSID:
342
-
var record tangled.RepoPull
343
-
if err := json.Unmarshal(raw, &record); err != nil {
344
-
return fmt.Errorf("failed to unmarshal record: %w", err)
345
-
}
346
-
if err := h.processPull(ctx, did, record); err != nil {
347
-
return fmt.Errorf("failed to process knot member: %w", err)
348
-
}
349
-
356
+
err = h.processPull(ctx, event)
350
357
case tangled.RepoCollaboratorNSID:
351
-
var record tangled.RepoCollaborator
352
-
if err := json.Unmarshal(raw, &record); err != nil {
353
-
return fmt.Errorf("failed to unmarshal record: %w", err)
354
-
}
355
-
if err := h.processCollaborator(ctx, did, record); err != nil {
356
-
return fmt.Errorf("failed to process knot member: %w", err)
357
-
}
358
+
err = h.processCollaborator(ctx, event)
359
+
}
358
360
361
+
if err != nil {
362
+
h.l.Debug("failed to process event", "nsid", event.Commit.Collection, "err", err)
359
363
}
360
364
361
-
return err
365
+
return nil
362
366
}
+4
-4
knotserver/internal.go
+4
-4
knotserver/internal.go
···
242
242
return err
243
243
}
244
244
245
+
for _, e := range compiler.Diagnostics.Errors {
246
+
*clientMsgs = append(*clientMsgs, e.String())
247
+
}
248
+
245
249
if pushOptions.verboseCi {
246
250
if compiler.Diagnostics.IsEmpty() {
247
251
*clientMsgs = append(*clientMsgs, "success: pipeline compiled with no diagnostics")
248
-
}
249
-
250
-
for _, e := range compiler.Diagnostics.Errors {
251
-
*clientMsgs = append(*clientMsgs, e.String())
252
252
}
253
253
254
254
for _, w := range compiler.Diagnostics.Warnings {
+16
-11
knotserver/routes.go
+16
-11
knotserver/routes.go
···
286
286
mimeType = "image/svg+xml"
287
287
}
288
288
289
+
contentHash := sha256.Sum256(contents)
290
+
eTag := fmt.Sprintf("\"%x\"", contentHash)
291
+
289
292
// allow image, video, and text/plain files to be served directly
290
293
switch {
291
-
case strings.HasPrefix(mimeType, "image/"):
292
-
// allowed
293
-
case strings.HasPrefix(mimeType, "video/"):
294
-
// allowed
294
+
case strings.HasPrefix(mimeType, "image/"), strings.HasPrefix(mimeType, "video/"):
295
+
if clientETag := r.Header.Get("If-None-Match"); clientETag == eTag {
296
+
w.WriteHeader(http.StatusNotModified)
297
+
return
298
+
}
299
+
w.Header().Set("ETag", eTag)
300
+
295
301
case strings.HasPrefix(mimeType, "text/plain"):
296
-
// allowed
302
+
w.Header().Set("Cache-Control", "public, no-cache")
303
+
297
304
default:
298
305
l.Error("attempted to serve disallowed file type", "mimetype", mimeType)
299
306
writeError(w, "only image, video, and text files can be accessed directly", http.StatusForbidden)
300
307
return
301
308
}
302
309
303
-
w.Header().Set("Cache-Control", "public, max-age=86400") // cache for 24 hours
304
-
w.Header().Set("ETag", fmt.Sprintf("%x", sha256.Sum256(contents)))
305
310
w.Header().Set("Content-Type", mimeType)
306
311
w.Write(contents)
307
312
}
···
710
715
}
711
716
712
717
func (h *Handle) RepoForkAheadBehind(w http.ResponseWriter, r *http.Request) {
713
-
l := h.l.With("handler", "RepoForkSync")
718
+
l := h.l.With("handler", "RepoForkAheadBehind")
714
719
715
720
data := struct {
716
721
Did string `json:"did"`
···
845
850
name = filepath.Base(source)
846
851
}
847
852
848
-
branch := chi.URLParam(r, "branch")
853
+
branch := chi.URLParam(r, "*")
849
854
branch, _ = url.PathUnescape(branch)
850
855
851
856
relativeRepoPath := filepath.Join(did, name)
852
857
repoPath, _ := securejoin.SecureJoin(h.c.Repo.ScanPath, relativeRepoPath)
853
858
854
-
gr, err := git.PlainOpen(repoPath)
859
+
gr, err := git.Open(repoPath, branch)
855
860
if err != nil {
856
861
log.Println(err)
857
862
notFound(w)
858
863
return
859
864
}
860
865
861
-
err = gr.Sync(branch)
866
+
err = gr.Sync()
862
867
if err != nil {
863
868
l.Error("error syncing repo fork", "error", err.Error())
864
869
writeError(w, err.Error(), http.StatusInternalServerError)
+1
-8
lexicons/issue/comment.json
+1
-8
lexicons/issue/comment.json
···
9
9
"key": "tid",
10
10
"record": {
11
11
"type": "object",
12
-
"required": [
13
-
"issue",
14
-
"body",
15
-
"createdAt"
16
-
],
12
+
"required": ["issue", "body", "createdAt"],
17
13
"properties": {
18
14
"issue": {
19
15
"type": "string",
···
22
18
"repo": {
23
19
"type": "string",
24
20
"format": "at-uri"
25
-
},
26
-
"commentId": {
27
-
"type": "integer"
28
21
},
29
22
"owner": {
30
23
"type": "string",
+1
-10
lexicons/issue/issue.json
+1
-10
lexicons/issue/issue.json
···
9
9
"key": "tid",
10
10
"record": {
11
11
"type": "object",
12
-
"required": [
13
-
"repo",
14
-
"issueId",
15
-
"owner",
16
-
"title",
17
-
"createdAt"
18
-
],
12
+
"required": ["repo", "owner", "title", "createdAt"],
19
13
"properties": {
20
14
"repo": {
21
15
"type": "string",
22
16
"format": "at-uri"
23
-
},
24
-
"issueId": {
25
-
"type": "integer"
26
17
},
27
18
"owner": {
28
19
"type": "string",
+7
-63
lexicons/pipeline/pipeline.json
+7
-63
lexicons/pipeline/pipeline.json
···
149
149
"type": "object",
150
150
"required": [
151
151
"name",
152
-
"dependencies",
153
-
"steps",
154
-
"environment",
155
-
"clone"
152
+
"engine",
153
+
"clone",
154
+
"raw"
156
155
],
157
156
"properties": {
158
157
"name": {
159
158
"type": "string"
160
159
},
161
-
"dependencies": {
162
-
"type": "array",
163
-
"items": {
164
-
"type": "ref",
165
-
"ref": "#dependency"
166
-
}
167
-
},
168
-
"steps": {
169
-
"type": "array",
170
-
"items": {
171
-
"type": "ref",
172
-
"ref": "#step"
173
-
}
174
-
},
175
-
"environment": {
176
-
"type": "array",
177
-
"items": {
178
-
"type": "ref",
179
-
"ref": "#pair"
180
-
}
160
+
"engine": {
161
+
"type": "string"
181
162
},
182
163
"clone": {
183
164
"type": "ref",
184
165
"ref": "#cloneOpts"
185
-
}
186
-
}
187
-
},
188
-
"dependency": {
189
-
"type": "object",
190
-
"required": [
191
-
"registry",
192
-
"packages"
193
-
],
194
-
"properties": {
195
-
"registry": {
166
+
},
167
+
"raw": {
196
168
"type": "string"
197
-
},
198
-
"packages": {
199
-
"type": "array",
200
-
"items": {
201
-
"type": "string"
202
-
}
203
169
}
204
170
}
205
171
},
···
219
185
},
220
186
"submodules": {
221
187
"type": "boolean"
222
-
}
223
-
}
224
-
},
225
-
"step": {
226
-
"type": "object",
227
-
"required": [
228
-
"name",
229
-
"command"
230
-
],
231
-
"properties": {
232
-
"name": {
233
-
"type": "string"
234
-
},
235
-
"command": {
236
-
"type": "string"
237
-
},
238
-
"environment": {
239
-
"type": "array",
240
-
"items": {
241
-
"type": "ref",
242
-
"ref": "#pair"
243
-
}
244
188
}
245
189
}
246
190
},
+3
-1
log/log.go
+3
-1
log/log.go
···
9
9
// NewHandler sets up a new slog.Handler with the service name
10
10
// as an attribute
11
11
func NewHandler(name string) slog.Handler {
12
-
handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{})
12
+
handler := slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
13
+
Level: slog.LevelDebug,
14
+
})
13
15
14
16
var attrs []slog.Attr
15
17
attrs = append(attrs, slog.Attr{Key: "service", Value: slog.StringValue(name)})
+5
-2
nix/gomod2nix.toml
+5
-2
nix/gomod2nix.toml
···
426
426
version = "v0.3.1"
427
427
hash = "sha256-PAd8M2Z8t6rVRBII+Rg8Bz+QaJIwbW64bfyqsv31kgc="
428
428
[mod."github.com/yuin/goldmark"]
429
-
version = "v1.4.13"
430
-
hash = "sha256-GVwFKZY6moIS6I0ZGuio/WtDif+lkZRfqWS6b4AAJyI="
429
+
version = "v1.4.15"
430
+
hash = "sha256-MvSOT6dwf5hVYkIg4MnqMpsy5ZtWZ7amAE7Zo9HkEa0="
431
+
[mod."github.com/yuin/goldmark-highlighting/v2"]
432
+
version = "v2.0.0-20230729083705-37449abec8cc"
433
+
hash = "sha256-HpiwU7jIeDUAg2zOpTIiviQir8dpRPuXYh2nqFFccpg="
431
434
[mod."gitlab.com/yawning/secp256k1-voi"]
432
435
version = "v0.0.0-20230925100816-f2616030848b"
433
436
hash = "sha256-X8INg01LTg13iOuwPI3uOhPN7r01sPZtmtwJ2sudjCA="
+27
-24
nix/modules/knot.nix
+27
-24
nix/modules/knot.nix
···
126
126
cfg.package
127
127
];
128
128
129
-
system.activationScripts.gitConfig = let
130
-
setMotd =
131
-
if cfg.motdFile != null && cfg.motd != null
132
-
then throw "motdFile and motd cannot be both set"
133
-
else ''
134
-
${optionalString (cfg.motdFile != null) "cat ${cfg.motdFile} > ${cfg.stateDir}/motd"}
135
-
${optionalString (cfg.motd != null) ''printf "${cfg.motd}" > ${cfg.stateDir}/motd''}
136
-
'';
137
-
in ''
138
-
mkdir -p "${cfg.repo.scanPath}"
139
-
chown -R ${cfg.gitUser}:${cfg.gitUser} "${cfg.repo.scanPath}"
140
-
141
-
mkdir -p "${cfg.stateDir}/.config/git"
142
-
cat > "${cfg.stateDir}/.config/git/config" << EOF
143
-
[user]
144
-
name = Git User
145
-
email = git@example.com
146
-
[receive]
147
-
advertisePushOptions = true
148
-
EOF
149
-
${setMotd}
150
-
chown -R ${cfg.gitUser}:${cfg.gitUser} "${cfg.stateDir}"
151
-
'';
152
-
153
129
users.users.${cfg.gitUser} = {
154
130
isSystemUser = true;
155
131
useDefaultShell = true;
···
185
161
description = "knot service";
186
162
after = ["network.target" "sshd.service"];
187
163
wantedBy = ["multi-user.target"];
164
+
enableStrictShellChecks = true;
165
+
166
+
preStart = let
167
+
setMotd =
168
+
if cfg.motdFile != null && cfg.motd != null
169
+
then throw "motdFile and motd cannot be both set"
170
+
else ''
171
+
${optionalString (cfg.motdFile != null) "cat ${cfg.motdFile} > ${cfg.stateDir}/motd"}
172
+
${optionalString (cfg.motd != null) ''printf "${cfg.motd}" > ${cfg.stateDir}/motd''}
173
+
'';
174
+
in ''
175
+
mkdir -p "${cfg.repo.scanPath}"
176
+
chown -R ${cfg.gitUser}:${cfg.gitUser} "${cfg.repo.scanPath}"
177
+
178
+
mkdir -p "${cfg.stateDir}/.config/git"
179
+
cat > "${cfg.stateDir}/.config/git/config" << EOF
180
+
[user]
181
+
name = Git User
182
+
email = git@example.com
183
+
[receive]
184
+
advertisePushOptions = true
185
+
EOF
186
+
${setMotd}
187
+
chown -R ${cfg.gitUser}:${cfg.gitUser} "${cfg.stateDir}"
188
+
'';
189
+
188
190
serviceConfig = {
189
191
User = cfg.gitUser;
192
+
PermissionsStartOnly = true;
190
193
WorkingDirectory = cfg.stateDir;
191
194
Environment = [
192
195
"KNOT_REPO_SCAN_PATH=${cfg.repo.scanPath}"
+2
-2
nix/modules/spindle.nix
+2
-2
nix/modules/spindle.nix
···
111
111
"SPINDLE_SERVER_SECRETS_PROVIDER=${cfg.server.secrets.provider}"
112
112
"SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=${cfg.server.secrets.openbao.proxyAddr}"
113
113
"SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=${cfg.server.secrets.openbao.mount}"
114
-
"SPINDLE_PIPELINES_NIXERY=${cfg.pipelines.nixery}"
115
-
"SPINDLE_PIPELINES_WORKFLOW_TIMEOUT=${cfg.pipelines.workflowTimeout}"
114
+
"SPINDLE_NIXERY_PIPELINES_NIXERY=${cfg.pipelines.nixery}"
115
+
"SPINDLE_NIXERY_PIPELINES_WORKFLOW_TIMEOUT=${cfg.pipelines.workflowTimeout}"
116
116
];
117
117
ExecStart = "${cfg.package}/bin/spindle";
118
118
Restart = "always";
+1
-1
nix/pkgs/appview-static-files.nix
+1
-1
nix/pkgs/appview-static-files.nix
···
22
22
cp -rf ${lucide-src}/*.svg icons/
23
23
cp -f ${inter-fonts-src}/web/InterVariable*.woff2 fonts/
24
24
cp -f ${inter-fonts-src}/web/InterDisplay*.woff2 fonts/
25
-
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono-Regular.woff2 fonts/
25
+
cp -f ${ibm-plex-mono-src}/fonts/complete/woff2/IBMPlexMono*.woff2 fonts/
26
26
# tailwindcss -c $src/tailwind.config.js -i $src/input.css -o tw.css won't work
27
27
# for whatever reason (produces broken css), so we are doing this instead
28
28
cd ${src} && ${tailwindcss}/bin/tailwindcss -i input.css -o $out/tw.css
+54
-16
nix/vm.nix
+54
-16
nix/vm.nix
···
1
1
{
2
2
nixpkgs,
3
3
system,
4
+
hostSystem,
4
5
self,
5
6
}: let
6
7
envVar = name: let
···
16
17
self.nixosModules.knot
17
18
self.nixosModules.spindle
18
19
({
20
+
lib,
19
21
config,
20
22
pkgs,
21
23
...
22
24
}: {
23
-
nixos-shell = {
24
-
inheritPath = false;
25
-
mounts = {
26
-
mountHome = false;
27
-
mountNixProfile = false;
28
-
};
29
-
};
30
-
virtualisation = {
25
+
virtualisation.vmVariant.virtualisation = {
26
+
host.pkgs = import nixpkgs {system = hostSystem;};
27
+
28
+
graphics = false;
31
29
memorySize = 2048;
32
30
diskSize = 10 * 1024;
33
31
cores = 2;
···
51
49
guest.port = 6555;
52
50
}
53
51
];
52
+
sharedDirectories = {
53
+
# We can't use the 9p mounts directly for most of these
54
+
# as SQLite is incompatible with them. So instead we
55
+
# mount the shared directories to a different location
56
+
# and copy the contents around on service start/stop.
57
+
knotData = {
58
+
source = "$TANGLED_VM_DATA_DIR/knot";
59
+
target = "/mnt/knot-data";
60
+
};
61
+
spindleData = {
62
+
source = "$TANGLED_VM_DATA_DIR/spindle";
63
+
target = "/mnt/spindle-data";
64
+
};
65
+
spindleLogs = {
66
+
source = "$TANGLED_VM_DATA_DIR/spindle-logs";
67
+
target = "/var/log/spindle";
68
+
};
69
+
};
54
70
};
71
+
# This is fine because any and all ports that are forwarded to host are explicitly marked above, we don't need a separate guest firewall
72
+
networking.firewall.enable = false;
55
73
services.getty.autologinUser = "root";
56
74
environment.systemPackages = with pkgs; [curl vim git sqlite litecli];
57
-
systemd.tmpfiles.rules = let
58
-
u = config.services.tangled-knot.gitUser;
59
-
g = config.services.tangled-knot.gitUser;
60
-
in [
61
-
"d /var/lib/knot 0770 ${u} ${g} - -" # Create the directory first
62
-
"f+ /var/lib/knot/secret 0660 ${u} ${g} - KNOT_SERVER_SECRET=${envVar "TANGLED_VM_KNOT_SECRET"}"
63
-
];
64
75
services.tangled-knot = {
65
76
enable = true;
66
77
motd = "Welcome to the development knot!\n";
67
78
server = {
68
-
secretFile = "/var/lib/knot/secret";
79
+
secretFile = builtins.toFile "knot-secret" ("KNOT_SERVER_SECRET=" + (envVar "TANGLED_VM_KNOT_SECRET"));
69
80
hostname = "localhost:6000";
70
81
listenAddr = "0.0.0.0:6000";
71
82
};
···
81
92
provider = "sqlite";
82
93
};
83
94
};
95
+
};
96
+
users = {
97
+
# So we don't have to deal with permission clashing between
98
+
# blank disk VMs and existing state
99
+
users.${config.services.tangled-knot.gitUser}.uid = 666;
100
+
groups.${config.services.tangled-knot.gitUser}.gid = 666;
101
+
102
+
# TODO: separate spindle user
103
+
};
104
+
systemd.services = let
105
+
mkDataSyncScripts = source: target: {
106
+
enableStrictShellChecks = true;
107
+
108
+
preStart = lib.mkBefore ''
109
+
mkdir -p ${target}
110
+
${lib.getExe pkgs.rsync} -a ${source}/ ${target}
111
+
'';
112
+
113
+
postStop = lib.mkAfter ''
114
+
${lib.getExe pkgs.rsync} -a ${target}/ ${source}
115
+
'';
116
+
117
+
serviceConfig.PermissionsStartOnly = true;
118
+
};
119
+
in {
120
+
knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled-knot.stateDir;
121
+
spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled-spindle.server.dbPath);
84
122
};
85
123
})
86
124
];
+1
-1
rbac/rbac.go
+1
-1
rbac/rbac.go
+1
-1
rbac/rbac_test.go
+1
-1
rbac/rbac_test.go
+4
-4
spindle/config/config.go
+4
-4
spindle/config/config.go
···
16
16
Dev bool `env:"DEV, default=false"`
17
17
Owner string `env:"OWNER, required"`
18
18
Secrets Secrets `env:",prefix=SECRETS_"`
19
+
LogDir string `env:"LOG_DIR, default=/var/log/spindle"`
19
20
}
20
21
21
22
func (s Server) Did() syntax.DID {
···
32
33
Mount string `env:"MOUNT, default=spindle"`
33
34
}
34
35
35
-
type Pipelines struct {
36
+
type NixeryPipelines struct {
36
37
Nixery string `env:"NIXERY, default=nixery.tangled.sh"`
37
38
WorkflowTimeout string `env:"WORKFLOW_TIMEOUT, default=5m"`
38
-
LogDir string `env:"LOG_DIR, default=/var/log/spindle"`
39
39
}
40
40
41
41
type Config struct {
42
-
Server Server `env:",prefix=SPINDLE_SERVER_"`
43
-
Pipelines Pipelines `env:",prefix=SPINDLE_PIPELINES_"`
42
+
Server Server `env:",prefix=SPINDLE_SERVER_"`
43
+
NixeryPipelines NixeryPipelines `env:",prefix=SPINDLE_NIXERY_PIPELINES_"`
44
44
}
45
45
46
46
func Load(ctx context.Context) (*Config, error) {
+14
-10
spindle/db/db.go
+14
-10
spindle/db/db.go
···
2
2
3
3
import (
4
4
"database/sql"
5
+
"strings"
5
6
6
7
_ "github.com/mattn/go-sqlite3"
7
8
)
···
11
12
}
12
13
13
14
func Make(dbPath string) (*DB, error) {
14
-
db, err := sql.Open("sqlite3", dbPath)
15
+
// https://github.com/mattn/go-sqlite3#connection-string
16
+
opts := []string{
17
+
"_foreign_keys=1",
18
+
"_journal_mode=WAL",
19
+
"_synchronous=NORMAL",
20
+
"_auto_vacuum=incremental",
21
+
}
22
+
23
+
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
15
24
if err != nil {
16
25
return nil, err
17
26
}
18
27
19
-
_, err = db.Exec(`
20
-
pragma journal_mode = WAL;
21
-
pragma synchronous = normal;
22
-
pragma foreign_keys = on;
23
-
pragma temp_store = memory;
24
-
pragma mmap_size = 30000000000;
25
-
pragma page_size = 32768;
26
-
pragma auto_vacuum = incremental;
27
-
pragma busy_timeout = 5000;
28
+
// NOTE: If any other migration is added here, you MUST
29
+
// copy the pattern in appview: use a single sql.Conn
30
+
// for every migration.
28
31
32
+
_, err = db.Exec(`
29
33
create table if not exists _jetstream (
30
34
id integer primary key autoincrement,
31
35
last_time_us integer not null
-21
spindle/engine/ansi_stripper.go
-21
spindle/engine/ansi_stripper.go
···
1
-
package engine
2
-
3
-
import (
4
-
"io"
5
-
6
-
"regexp"
7
-
)
8
-
9
-
// regex to match ANSI escape codes (e.g., color codes, cursor moves)
10
-
const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))"
11
-
12
-
var re = regexp.MustCompile(ansi)
13
-
14
-
type ansiStrippingWriter struct {
15
-
underlying io.Writer
16
-
}
17
-
18
-
func (w *ansiStrippingWriter) Write(p []byte) (int, error) {
19
-
clean := re.ReplaceAll(p, []byte{})
20
-
return w.underlying.Write(clean)
21
-
}
+68
-415
spindle/engine/engine.go
+68
-415
spindle/engine/engine.go
···
4
4
"context"
5
5
"errors"
6
6
"fmt"
7
-
"io"
8
7
"log/slog"
9
-
"os"
10
-
"strings"
11
-
"sync"
12
-
"time"
13
8
14
9
securejoin "github.com/cyphar/filepath-securejoin"
15
-
"github.com/docker/docker/api/types/container"
16
-
"github.com/docker/docker/api/types/image"
17
-
"github.com/docker/docker/api/types/mount"
18
-
"github.com/docker/docker/api/types/network"
19
-
"github.com/docker/docker/api/types/volume"
20
-
"github.com/docker/docker/client"
21
-
"github.com/docker/docker/pkg/stdcopy"
22
10
"golang.org/x/sync/errgroup"
23
-
"tangled.sh/tangled.sh/core/log"
24
11
"tangled.sh/tangled.sh/core/notifier"
25
12
"tangled.sh/tangled.sh/core/spindle/config"
26
13
"tangled.sh/tangled.sh/core/spindle/db"
···
28
15
"tangled.sh/tangled.sh/core/spindle/secrets"
29
16
)
30
17
31
-
const (
32
-
workspaceDir = "/tangled/workspace"
18
+
var (
19
+
ErrTimedOut = errors.New("timed out")
20
+
ErrWorkflowFailed = errors.New("workflow failed")
33
21
)
34
22
35
-
type cleanupFunc func(context.Context) error
36
-
37
-
type Engine struct {
38
-
docker client.APIClient
39
-
l *slog.Logger
40
-
db *db.DB
41
-
n *notifier.Notifier
42
-
cfg *config.Config
43
-
vault secrets.Manager
44
-
45
-
cleanupMu sync.Mutex
46
-
cleanup map[string][]cleanupFunc
47
-
}
48
-
49
-
func New(ctx context.Context, cfg *config.Config, db *db.DB, n *notifier.Notifier, vault secrets.Manager) (*Engine, error) {
50
-
dcli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
51
-
if err != nil {
52
-
return nil, err
53
-
}
54
-
55
-
l := log.FromContext(ctx).With("component", "spindle")
56
-
57
-
e := &Engine{
58
-
docker: dcli,
59
-
l: l,
60
-
db: db,
61
-
n: n,
62
-
cfg: cfg,
63
-
vault: vault,
64
-
}
65
-
66
-
e.cleanup = make(map[string][]cleanupFunc)
67
-
68
-
return e, nil
69
-
}
70
-
71
-
func (e *Engine) StartWorkflows(ctx context.Context, pipeline *models.Pipeline, pipelineId models.PipelineId) {
72
-
e.l.Info("starting all workflows in parallel", "pipeline", pipelineId)
23
+
func StartWorkflows(l *slog.Logger, vault secrets.Manager, cfg *config.Config, db *db.DB, n *notifier.Notifier, ctx context.Context, pipeline *models.Pipeline, pipelineId models.PipelineId) {
24
+
l.Info("starting all workflows in parallel", "pipeline", pipelineId)
73
25
74
26
// extract secrets
75
27
var allSecrets []secrets.UnlockedSecret
76
28
if didSlashRepo, err := securejoin.SecureJoin(pipeline.RepoOwner, pipeline.RepoName); err == nil {
77
-
if res, err := e.vault.GetSecretsUnlocked(ctx, secrets.DidSlashRepo(didSlashRepo)); err == nil {
29
+
if res, err := vault.GetSecretsUnlocked(ctx, secrets.DidSlashRepo(didSlashRepo)); err == nil {
78
30
allSecrets = res
79
31
}
80
32
}
81
33
82
-
workflowTimeoutStr := e.cfg.Pipelines.WorkflowTimeout
83
-
workflowTimeout, err := time.ParseDuration(workflowTimeoutStr)
84
-
if err != nil {
85
-
e.l.Error("failed to parse workflow timeout", "error", err, "timeout", workflowTimeoutStr)
86
-
workflowTimeout = 5 * time.Minute
87
-
}
88
-
e.l.Info("using workflow timeout", "timeout", workflowTimeout)
89
-
90
34
eg, ctx := errgroup.WithContext(ctx)
91
-
for _, w := range pipeline.Workflows {
92
-
eg.Go(func() error {
93
-
wid := models.WorkflowId{
94
-
PipelineId: pipelineId,
95
-
Name: w.Name,
96
-
}
97
-
98
-
err := e.db.StatusRunning(wid, e.n)
99
-
if err != nil {
100
-
return err
101
-
}
35
+
for eng, wfs := range pipeline.Workflows {
36
+
workflowTimeout := eng.WorkflowTimeout()
37
+
l.Info("using workflow timeout", "timeout", workflowTimeout)
102
38
103
-
err = e.SetupWorkflow(ctx, wid)
104
-
if err != nil {
105
-
e.l.Error("setting up worklow", "wid", wid, "err", err)
106
-
return err
107
-
}
108
-
defer e.DestroyWorkflow(ctx, wid)
109
-
110
-
reader, err := e.docker.ImagePull(ctx, w.Image, image.PullOptions{})
111
-
if err != nil {
112
-
e.l.Error("pipeline image pull failed!", "image", w.Image, "workflowId", wid, "error", err.Error())
39
+
for _, w := range wfs {
40
+
eg.Go(func() error {
41
+
wid := models.WorkflowId{
42
+
PipelineId: pipelineId,
43
+
Name: w.Name,
44
+
}
113
45
114
-
err := e.db.StatusFailed(wid, err.Error(), -1, e.n)
46
+
err := db.StatusRunning(wid, n)
115
47
if err != nil {
116
48
return err
117
49
}
118
50
119
-
return fmt.Errorf("pulling image: %w", err)
120
-
}
121
-
defer reader.Close()
122
-
io.Copy(os.Stdout, reader)
123
-
124
-
ctx, cancel := context.WithTimeout(ctx, workflowTimeout)
125
-
defer cancel()
51
+
err = eng.SetupWorkflow(ctx, wid, &w)
52
+
if err != nil {
53
+
// TODO(winter): Should this always set StatusFailed?
54
+
// In the original, we only do in a subset of cases.
55
+
l.Error("setting up worklow", "wid", wid, "err", err)
126
56
127
-
err = e.StartSteps(ctx, wid, w, allSecrets)
128
-
if err != nil {
129
-
if errors.Is(err, ErrTimedOut) {
130
-
dbErr := e.db.StatusTimeout(wid, e.n)
131
-
if dbErr != nil {
132
-
return dbErr
57
+
destroyErr := eng.DestroyWorkflow(ctx, wid)
58
+
if destroyErr != nil {
59
+
l.Error("failed to destroy workflow after setup failure", "error", destroyErr)
133
60
}
134
-
} else {
135
-
dbErr := e.db.StatusFailed(wid, err.Error(), -1, e.n)
61
+
62
+
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
136
63
if dbErr != nil {
137
64
return dbErr
138
65
}
66
+
return err
139
67
}
68
+
defer eng.DestroyWorkflow(ctx, wid)
140
69
141
-
return fmt.Errorf("starting steps image: %w", err)
142
-
}
70
+
wfLogger, err := models.NewWorkflowLogger(cfg.Server.LogDir, wid)
71
+
if err != nil {
72
+
l.Warn("failed to setup step logger; logs will not be persisted", "error", err)
73
+
wfLogger = nil
74
+
} else {
75
+
defer wfLogger.Close()
76
+
}
143
77
144
-
err = e.db.StatusSuccess(wid, e.n)
145
-
if err != nil {
146
-
return err
147
-
}
78
+
ctx, cancel := context.WithTimeout(ctx, workflowTimeout)
79
+
defer cancel()
148
80
149
-
return nil
150
-
})
151
-
}
81
+
for stepIdx, step := range w.Steps {
82
+
if wfLogger != nil {
83
+
ctl := wfLogger.ControlWriter(stepIdx, step)
84
+
ctl.Write([]byte(step.Name()))
85
+
}
152
86
153
-
if err = eg.Wait(); err != nil {
154
-
e.l.Error("failed to run one or more workflows", "err", err)
155
-
} else {
156
-
e.l.Error("successfully ran full pipeline")
157
-
}
158
-
}
87
+
err = eng.RunStep(ctx, wid, &w, stepIdx, allSecrets, wfLogger)
88
+
if err != nil {
89
+
if errors.Is(err, ErrTimedOut) {
90
+
dbErr := db.StatusTimeout(wid, n)
91
+
if dbErr != nil {
92
+
return dbErr
93
+
}
94
+
} else {
95
+
dbErr := db.StatusFailed(wid, err.Error(), -1, n)
96
+
if dbErr != nil {
97
+
return dbErr
98
+
}
99
+
}
159
100
160
-
// SetupWorkflow sets up a new network for the workflow and volumes for
161
-
// the workspace and Nix store. These are persisted across steps and are
162
-
// destroyed at the end of the workflow.
163
-
func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId) error {
164
-
e.l.Info("setting up workflow", "workflow", wid)
101
+
return fmt.Errorf("starting steps image: %w", err)
102
+
}
103
+
}
165
104
166
-
_, err := e.docker.VolumeCreate(ctx, volume.CreateOptions{
167
-
Name: workspaceVolume(wid),
168
-
Driver: "local",
169
-
})
170
-
if err != nil {
171
-
return err
172
-
}
173
-
e.registerCleanup(wid, func(ctx context.Context) error {
174
-
return e.docker.VolumeRemove(ctx, workspaceVolume(wid), true)
175
-
})
176
-
177
-
_, err = e.docker.VolumeCreate(ctx, volume.CreateOptions{
178
-
Name: nixVolume(wid),
179
-
Driver: "local",
180
-
})
181
-
if err != nil {
182
-
return err
183
-
}
184
-
e.registerCleanup(wid, func(ctx context.Context) error {
185
-
return e.docker.VolumeRemove(ctx, nixVolume(wid), true)
186
-
})
187
-
188
-
_, err = e.docker.NetworkCreate(ctx, networkName(wid), network.CreateOptions{
189
-
Driver: "bridge",
190
-
})
191
-
if err != nil {
192
-
return err
193
-
}
194
-
e.registerCleanup(wid, func(ctx context.Context) error {
195
-
return e.docker.NetworkRemove(ctx, networkName(wid))
196
-
})
105
+
err = db.StatusSuccess(wid, n)
106
+
if err != nil {
107
+
return err
108
+
}
197
109
198
-
return nil
199
-
}
200
-
201
-
// StartSteps starts all steps sequentially with the same base image.
202
-
// ONLY marks pipeline as failed if container's exit code is non-zero.
203
-
// All other errors are bubbled up.
204
-
// Fixed version of the step execution logic
205
-
func (e *Engine) StartSteps(ctx context.Context, wid models.WorkflowId, w models.Workflow, secrets []secrets.UnlockedSecret) error {
206
-
workflowEnvs := ConstructEnvs(w.Environment)
207
-
for _, s := range secrets {
208
-
workflowEnvs.AddEnv(s.Key, s.Value)
209
-
}
210
-
211
-
for stepIdx, step := range w.Steps {
212
-
select {
213
-
case <-ctx.Done():
214
-
return ctx.Err()
215
-
default:
216
-
}
217
-
218
-
envs := append(EnvVars(nil), workflowEnvs...)
219
-
for k, v := range step.Environment {
220
-
envs.AddEnv(k, v)
221
-
}
222
-
envs.AddEnv("HOME", workspaceDir)
223
-
e.l.Debug("envs for step", "step", step.Name, "envs", envs.Slice())
224
-
225
-
hostConfig := hostConfig(wid)
226
-
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
227
-
Image: w.Image,
228
-
Cmd: []string{"bash", "-c", step.Command},
229
-
WorkingDir: workspaceDir,
230
-
Tty: false,
231
-
Hostname: "spindle",
232
-
Env: envs.Slice(),
233
-
}, hostConfig, nil, nil, "")
234
-
defer e.DestroyStep(ctx, resp.ID)
235
-
if err != nil {
236
-
return fmt.Errorf("creating container: %w", err)
237
-
}
238
-
239
-
err = e.docker.NetworkConnect(ctx, networkName(wid), resp.ID, nil)
240
-
if err != nil {
241
-
return fmt.Errorf("connecting network: %w", err)
242
-
}
243
-
244
-
err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{})
245
-
if err != nil {
246
-
return err
247
-
}
248
-
e.l.Info("started container", "name", resp.ID, "step", step.Name)
249
-
250
-
// start tailing logs in background
251
-
tailDone := make(chan error, 1)
252
-
go func() {
253
-
tailDone <- e.TailStep(ctx, resp.ID, wid, stepIdx, step)
254
-
}()
255
-
256
-
// wait for container completion or timeout
257
-
waitDone := make(chan struct{})
258
-
var state *container.State
259
-
var waitErr error
260
-
261
-
go func() {
262
-
defer close(waitDone)
263
-
state, waitErr = e.WaitStep(ctx, resp.ID)
264
-
}()
265
-
266
-
select {
267
-
case <-waitDone:
268
-
269
-
// wait for tailing to complete
270
-
<-tailDone
271
-
272
-
case <-ctx.Done():
273
-
e.l.Warn("step timed out; killing container", "container", resp.ID, "step", step.Name)
274
-
err = e.DestroyStep(context.Background(), resp.ID)
275
-
if err != nil {
276
-
e.l.Error("failed to destroy step", "container", resp.ID, "error", err)
277
-
}
278
-
279
-
// wait for both goroutines to finish
280
-
<-waitDone
281
-
<-tailDone
282
-
283
-
return ErrTimedOut
284
-
}
285
-
286
-
select {
287
-
case <-ctx.Done():
288
-
return ctx.Err()
289
-
default:
290
-
}
291
-
292
-
if waitErr != nil {
293
-
return waitErr
294
-
}
295
-
296
-
err = e.DestroyStep(ctx, resp.ID)
297
-
if err != nil {
298
-
return err
299
-
}
300
-
301
-
if state.ExitCode != 0 {
302
-
e.l.Error("workflow failed!", "workflow_id", wid.String(), "error", state.Error, "exit_code", state.ExitCode, "oom_killed", state.OOMKilled)
303
-
if state.OOMKilled {
304
-
return ErrOOMKilled
305
-
}
306
-
return ErrWorkflowFailed
110
+
return nil
111
+
})
307
112
}
308
113
}
309
114
310
-
return nil
311
-
}
312
-
313
-
func (e *Engine) WaitStep(ctx context.Context, containerID string) (*container.State, error) {
314
-
wait, errCh := e.docker.ContainerWait(ctx, containerID, container.WaitConditionNotRunning)
315
-
select {
316
-
case err := <-errCh:
317
-
if err != nil {
318
-
return nil, err
319
-
}
320
-
case <-wait:
321
-
}
322
-
323
-
e.l.Info("waited for container", "name", containerID)
324
-
325
-
info, err := e.docker.ContainerInspect(ctx, containerID)
326
-
if err != nil {
327
-
return nil, err
328
-
}
329
-
330
-
return info.State, nil
331
-
}
332
-
333
-
func (e *Engine) TailStep(ctx context.Context, containerID string, wid models.WorkflowId, stepIdx int, step models.Step) error {
334
-
wfLogger, err := NewWorkflowLogger(e.cfg.Pipelines.LogDir, wid)
335
-
if err != nil {
336
-
e.l.Warn("failed to setup step logger; logs will not be persisted", "error", err)
337
-
return err
115
+
if err := eg.Wait(); err != nil {
116
+
l.Error("failed to run one or more workflows", "err", err)
117
+
} else {
118
+
l.Error("successfully ran full pipeline")
338
119
}
339
-
defer wfLogger.Close()
340
-
341
-
ctl := wfLogger.ControlWriter(stepIdx, step)
342
-
ctl.Write([]byte(step.Name))
343
-
344
-
logs, err := e.docker.ContainerLogs(ctx, containerID, container.LogsOptions{
345
-
Follow: true,
346
-
ShowStdout: true,
347
-
ShowStderr: true,
348
-
Details: false,
349
-
Timestamps: false,
350
-
})
351
-
if err != nil {
352
-
return err
353
-
}
354
-
355
-
_, err = stdcopy.StdCopy(
356
-
wfLogger.DataWriter("stdout"),
357
-
wfLogger.DataWriter("stderr"),
358
-
logs,
359
-
)
360
-
if err != nil && err != io.EOF && !errors.Is(err, context.DeadlineExceeded) {
361
-
return fmt.Errorf("failed to copy logs: %w", err)
362
-
}
363
-
364
-
return nil
365
-
}
366
-
367
-
func (e *Engine) DestroyStep(ctx context.Context, containerID string) error {
368
-
err := e.docker.ContainerKill(ctx, containerID, "9") // SIGKILL
369
-
if err != nil && !isErrContainerNotFoundOrNotRunning(err) {
370
-
return err
371
-
}
372
-
373
-
if err := e.docker.ContainerRemove(ctx, containerID, container.RemoveOptions{
374
-
RemoveVolumes: true,
375
-
RemoveLinks: false,
376
-
Force: false,
377
-
}); err != nil && !isErrContainerNotFoundOrNotRunning(err) {
378
-
return err
379
-
}
380
-
381
-
return nil
382
-
}
383
-
384
-
func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error {
385
-
e.cleanupMu.Lock()
386
-
key := wid.String()
387
-
388
-
fns := e.cleanup[key]
389
-
delete(e.cleanup, key)
390
-
e.cleanupMu.Unlock()
391
-
392
-
for _, fn := range fns {
393
-
if err := fn(ctx); err != nil {
394
-
e.l.Error("failed to cleanup workflow resource", "workflowId", wid, "error", err)
395
-
}
396
-
}
397
-
return nil
398
-
}
399
-
400
-
func (e *Engine) registerCleanup(wid models.WorkflowId, fn cleanupFunc) {
401
-
e.cleanupMu.Lock()
402
-
defer e.cleanupMu.Unlock()
403
-
404
-
key := wid.String()
405
-
e.cleanup[key] = append(e.cleanup[key], fn)
406
-
}
407
-
408
-
func workspaceVolume(wid models.WorkflowId) string {
409
-
return fmt.Sprintf("workspace-%s", wid)
410
-
}
411
-
412
-
func nixVolume(wid models.WorkflowId) string {
413
-
return fmt.Sprintf("nix-%s", wid)
414
-
}
415
-
416
-
func networkName(wid models.WorkflowId) string {
417
-
return fmt.Sprintf("workflow-network-%s", wid)
418
-
}
419
-
420
-
func hostConfig(wid models.WorkflowId) *container.HostConfig {
421
-
hostConfig := &container.HostConfig{
422
-
Mounts: []mount.Mount{
423
-
{
424
-
Type: mount.TypeVolume,
425
-
Source: workspaceVolume(wid),
426
-
Target: workspaceDir,
427
-
},
428
-
{
429
-
Type: mount.TypeVolume,
430
-
Source: nixVolume(wid),
431
-
Target: "/nix",
432
-
},
433
-
{
434
-
Type: mount.TypeTmpfs,
435
-
Target: "/tmp",
436
-
ReadOnly: false,
437
-
TmpfsOptions: &mount.TmpfsOptions{
438
-
Mode: 0o1777, // world-writeable sticky bit
439
-
Options: [][]string{
440
-
{"exec"},
441
-
},
442
-
},
443
-
},
444
-
{
445
-
Type: mount.TypeVolume,
446
-
Source: "etc-nix-" + wid.String(),
447
-
Target: "/etc/nix",
448
-
},
449
-
},
450
-
ReadonlyRootfs: false,
451
-
CapDrop: []string{"ALL"},
452
-
CapAdd: []string{"CAP_DAC_OVERRIDE"},
453
-
SecurityOpt: []string{"no-new-privileges"},
454
-
ExtraHosts: []string{"host.docker.internal:host-gateway"},
455
-
}
456
-
457
-
return hostConfig
458
-
}
459
-
460
-
// thanks woodpecker
461
-
func isErrContainerNotFoundOrNotRunning(err error) bool {
462
-
// Error response from daemon: Cannot kill container: ...: No such container: ...
463
-
// Error response from daemon: Cannot kill container: ...: Container ... is not running"
464
-
// Error response from podman daemon: can only kill running containers. ... is in state exited
465
-
// Error: No such container: ...
466
-
return err != nil && (strings.Contains(err.Error(), "No such container") || strings.Contains(err.Error(), "is not running") || strings.Contains(err.Error(), "can only kill running containers"))
467
120
}
-28
spindle/engine/envs.go
-28
spindle/engine/envs.go
···
1
-
package engine
2
-
3
-
import (
4
-
"fmt"
5
-
)
6
-
7
-
type EnvVars []string
8
-
9
-
// ConstructEnvs converts a tangled.Pipeline_Step_Environment_Elem.{Key,Value}
10
-
// representation into a docker-friendly []string{"KEY=value", ...} slice.
11
-
func ConstructEnvs(envs map[string]string) EnvVars {
12
-
var dockerEnvs EnvVars
13
-
for k, v := range envs {
14
-
ev := fmt.Sprintf("%s=%s", k, v)
15
-
dockerEnvs = append(dockerEnvs, ev)
16
-
}
17
-
return dockerEnvs
18
-
}
19
-
20
-
// Slice returns the EnvVar as a []string slice.
21
-
func (ev EnvVars) Slice() []string {
22
-
return ev
23
-
}
24
-
25
-
// AddEnv adds a key=value string to the EnvVar.
26
-
func (ev *EnvVars) AddEnv(key, value string) {
27
-
*ev = append(*ev, fmt.Sprintf("%s=%s", key, value))
28
-
}
-48
spindle/engine/envs_test.go
-48
spindle/engine/envs_test.go
···
1
-
package engine
2
-
3
-
import (
4
-
"testing"
5
-
6
-
"github.com/stretchr/testify/assert"
7
-
)
8
-
9
-
func TestConstructEnvs(t *testing.T) {
10
-
tests := []struct {
11
-
name string
12
-
in map[string]string
13
-
want EnvVars
14
-
}{
15
-
{
16
-
name: "empty input",
17
-
in: make(map[string]string),
18
-
want: EnvVars{},
19
-
},
20
-
{
21
-
name: "single env var",
22
-
in: map[string]string{"FOO": "bar"},
23
-
want: EnvVars{"FOO=bar"},
24
-
},
25
-
{
26
-
name: "multiple env vars",
27
-
in: map[string]string{"FOO": "bar", "BAZ": "qux"},
28
-
want: EnvVars{"FOO=bar", "BAZ=qux"},
29
-
},
30
-
}
31
-
for _, tt := range tests {
32
-
t.Run(tt.name, func(t *testing.T) {
33
-
got := ConstructEnvs(tt.in)
34
-
if got == nil {
35
-
got = EnvVars{}
36
-
}
37
-
assert.ElementsMatch(t, tt.want, got)
38
-
})
39
-
}
40
-
}
41
-
42
-
func TestAddEnv(t *testing.T) {
43
-
ev := EnvVars{}
44
-
ev.AddEnv("FOO", "bar")
45
-
ev.AddEnv("BAZ", "qux")
46
-
want := EnvVars{"FOO=bar", "BAZ=qux"}
47
-
assert.ElementsMatch(t, want, ev)
48
-
}
-9
spindle/engine/errors.go
-9
spindle/engine/errors.go
-84
spindle/engine/logger.go
-84
spindle/engine/logger.go
···
1
-
package engine
2
-
3
-
import (
4
-
"encoding/json"
5
-
"fmt"
6
-
"io"
7
-
"os"
8
-
"path/filepath"
9
-
"strings"
10
-
11
-
"tangled.sh/tangled.sh/core/spindle/models"
12
-
)
13
-
14
-
type WorkflowLogger struct {
15
-
file *os.File
16
-
encoder *json.Encoder
17
-
}
18
-
19
-
func NewWorkflowLogger(baseDir string, wid models.WorkflowId) (*WorkflowLogger, error) {
20
-
path := LogFilePath(baseDir, wid)
21
-
22
-
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
23
-
if err != nil {
24
-
return nil, fmt.Errorf("creating log file: %w", err)
25
-
}
26
-
27
-
return &WorkflowLogger{
28
-
file: file,
29
-
encoder: json.NewEncoder(file),
30
-
}, nil
31
-
}
32
-
33
-
func LogFilePath(baseDir string, workflowID models.WorkflowId) string {
34
-
logFilePath := filepath.Join(baseDir, fmt.Sprintf("%s.log", workflowID.String()))
35
-
return logFilePath
36
-
}
37
-
38
-
func (l *WorkflowLogger) Close() error {
39
-
return l.file.Close()
40
-
}
41
-
42
-
func (l *WorkflowLogger) DataWriter(stream string) io.Writer {
43
-
// TODO: emit stream
44
-
return &dataWriter{
45
-
logger: l,
46
-
stream: stream,
47
-
}
48
-
}
49
-
50
-
func (l *WorkflowLogger) ControlWriter(idx int, step models.Step) io.Writer {
51
-
return &controlWriter{
52
-
logger: l,
53
-
idx: idx,
54
-
step: step,
55
-
}
56
-
}
57
-
58
-
type dataWriter struct {
59
-
logger *WorkflowLogger
60
-
stream string
61
-
}
62
-
63
-
func (w *dataWriter) Write(p []byte) (int, error) {
64
-
line := strings.TrimRight(string(p), "\r\n")
65
-
entry := models.NewDataLogLine(line, w.stream)
66
-
if err := w.logger.encoder.Encode(entry); err != nil {
67
-
return 0, err
68
-
}
69
-
return len(p), nil
70
-
}
71
-
72
-
type controlWriter struct {
73
-
logger *WorkflowLogger
74
-
idx int
75
-
step models.Step
76
-
}
77
-
78
-
func (w *controlWriter) Write(_ []byte) (int, error) {
79
-
entry := models.NewControlLogLine(w.idx, w.step)
80
-
if err := w.logger.encoder.Encode(entry); err != nil {
81
-
return 0, err
82
-
}
83
-
return len(w.step.Name), nil
84
-
}
+21
spindle/engines/nixery/ansi_stripper.go
+21
spindle/engines/nixery/ansi_stripper.go
···
1
+
package nixery
2
+
3
+
import (
4
+
"io"
5
+
6
+
"regexp"
7
+
)
8
+
9
+
// regex to match ANSI escape codes (e.g., color codes, cursor moves)
10
+
const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))"
11
+
12
+
var re = regexp.MustCompile(ansi)
13
+
14
+
type ansiStrippingWriter struct {
15
+
underlying io.Writer
16
+
}
17
+
18
+
func (w *ansiStrippingWriter) Write(p []byte) (int, error) {
19
+
clean := re.ReplaceAll(p, []byte{})
20
+
return w.underlying.Write(clean)
21
+
}
+418
spindle/engines/nixery/engine.go
+418
spindle/engines/nixery/engine.go
···
1
+
package nixery
2
+
3
+
import (
4
+
"context"
5
+
"errors"
6
+
"fmt"
7
+
"io"
8
+
"log/slog"
9
+
"os"
10
+
"path"
11
+
"runtime"
12
+
"sync"
13
+
"time"
14
+
15
+
"github.com/docker/docker/api/types/container"
16
+
"github.com/docker/docker/api/types/image"
17
+
"github.com/docker/docker/api/types/mount"
18
+
"github.com/docker/docker/api/types/network"
19
+
"github.com/docker/docker/client"
20
+
"github.com/docker/docker/pkg/stdcopy"
21
+
"gopkg.in/yaml.v3"
22
+
"tangled.sh/tangled.sh/core/api/tangled"
23
+
"tangled.sh/tangled.sh/core/log"
24
+
"tangled.sh/tangled.sh/core/spindle/config"
25
+
"tangled.sh/tangled.sh/core/spindle/engine"
26
+
"tangled.sh/tangled.sh/core/spindle/models"
27
+
"tangled.sh/tangled.sh/core/spindle/secrets"
28
+
)
29
+
30
+
const (
31
+
workspaceDir = "/tangled/workspace"
32
+
homeDir = "/tangled/home"
33
+
)
34
+
35
+
type cleanupFunc func(context.Context) error
36
+
37
+
type Engine struct {
38
+
docker client.APIClient
39
+
l *slog.Logger
40
+
cfg *config.Config
41
+
42
+
cleanupMu sync.Mutex
43
+
cleanup map[string][]cleanupFunc
44
+
}
45
+
46
+
type Step struct {
47
+
name string
48
+
kind models.StepKind
49
+
command string
50
+
environment map[string]string
51
+
}
52
+
53
+
func (s Step) Name() string {
54
+
return s.name
55
+
}
56
+
57
+
func (s Step) Command() string {
58
+
return s.command
59
+
}
60
+
61
+
func (s Step) Kind() models.StepKind {
62
+
return s.kind
63
+
}
64
+
65
+
// setupSteps get added to start of Steps
66
+
type setupSteps []models.Step
67
+
68
+
// addStep adds a step to the beginning of the workflow's steps.
69
+
func (ss *setupSteps) addStep(step models.Step) {
70
+
*ss = append(*ss, step)
71
+
}
72
+
73
+
type addlFields struct {
74
+
image string
75
+
container string
76
+
env map[string]string
77
+
}
78
+
79
+
func (e *Engine) InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*models.Workflow, error) {
80
+
swf := &models.Workflow{}
81
+
addl := addlFields{}
82
+
83
+
dwf := &struct {
84
+
Steps []struct {
85
+
Command string `yaml:"command"`
86
+
Name string `yaml:"name"`
87
+
Environment map[string]string `yaml:"environment"`
88
+
} `yaml:"steps"`
89
+
Dependencies map[string][]string `yaml:"dependencies"`
90
+
Environment map[string]string `yaml:"environment"`
91
+
}{}
92
+
err := yaml.Unmarshal([]byte(twf.Raw), &dwf)
93
+
if err != nil {
94
+
return nil, err
95
+
}
96
+
97
+
for _, dstep := range dwf.Steps {
98
+
sstep := Step{}
99
+
sstep.environment = dstep.Environment
100
+
sstep.command = dstep.Command
101
+
sstep.name = dstep.Name
102
+
sstep.kind = models.StepKindUser
103
+
swf.Steps = append(swf.Steps, sstep)
104
+
}
105
+
swf.Name = twf.Name
106
+
addl.env = dwf.Environment
107
+
addl.image = workflowImage(dwf.Dependencies, e.cfg.NixeryPipelines.Nixery)
108
+
109
+
setup := &setupSteps{}
110
+
111
+
setup.addStep(nixConfStep())
112
+
setup.addStep(cloneStep(twf, *tpl.TriggerMetadata, e.cfg.Server.Dev))
113
+
// this step could be empty
114
+
if s := dependencyStep(dwf.Dependencies); s != nil {
115
+
setup.addStep(*s)
116
+
}
117
+
118
+
// append setup steps in order to the start of workflow steps
119
+
swf.Steps = append(*setup, swf.Steps...)
120
+
swf.Data = addl
121
+
122
+
return swf, nil
123
+
}
124
+
125
+
func (e *Engine) WorkflowTimeout() time.Duration {
126
+
workflowTimeoutStr := e.cfg.NixeryPipelines.WorkflowTimeout
127
+
workflowTimeout, err := time.ParseDuration(workflowTimeoutStr)
128
+
if err != nil {
129
+
e.l.Error("failed to parse workflow timeout", "error", err, "timeout", workflowTimeoutStr)
130
+
workflowTimeout = 5 * time.Minute
131
+
}
132
+
133
+
return workflowTimeout
134
+
}
135
+
136
+
func workflowImage(deps map[string][]string, nixery string) string {
137
+
var dependencies string
138
+
for reg, ds := range deps {
139
+
if reg == "nixpkgs" {
140
+
dependencies = path.Join(ds...)
141
+
}
142
+
}
143
+
144
+
// load defaults from somewhere else
145
+
dependencies = path.Join(dependencies, "bash", "git", "coreutils", "nix")
146
+
147
+
if runtime.GOARCH == "arm64" {
148
+
dependencies = path.Join("arm64", dependencies)
149
+
}
150
+
151
+
return path.Join(nixery, dependencies)
152
+
}
153
+
154
+
func New(ctx context.Context, cfg *config.Config) (*Engine, error) {
155
+
dcli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
156
+
if err != nil {
157
+
return nil, err
158
+
}
159
+
160
+
l := log.FromContext(ctx).With("component", "spindle")
161
+
162
+
e := &Engine{
163
+
docker: dcli,
164
+
l: l,
165
+
cfg: cfg,
166
+
}
167
+
168
+
e.cleanup = make(map[string][]cleanupFunc)
169
+
170
+
return e, nil
171
+
}
172
+
173
+
func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId, wf *models.Workflow) error {
174
+
e.l.Info("setting up workflow", "workflow", wid)
175
+
176
+
_, err := e.docker.NetworkCreate(ctx, networkName(wid), network.CreateOptions{
177
+
Driver: "bridge",
178
+
})
179
+
if err != nil {
180
+
return err
181
+
}
182
+
e.registerCleanup(wid, func(ctx context.Context) error {
183
+
return e.docker.NetworkRemove(ctx, networkName(wid))
184
+
})
185
+
186
+
addl := wf.Data.(addlFields)
187
+
188
+
reader, err := e.docker.ImagePull(ctx, addl.image, image.PullOptions{})
189
+
if err != nil {
190
+
e.l.Error("pipeline image pull failed!", "image", addl.image, "workflowId", wid, "error", err.Error())
191
+
192
+
return fmt.Errorf("pulling image: %w", err)
193
+
}
194
+
defer reader.Close()
195
+
io.Copy(os.Stdout, reader)
196
+
197
+
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
198
+
Image: addl.image,
199
+
Cmd: []string{"cat"},
200
+
OpenStdin: true, // so cat stays alive :3
201
+
Tty: false,
202
+
Hostname: "spindle",
203
+
WorkingDir: workspaceDir,
204
+
// TODO(winter): investigate whether environment variables passed here
205
+
// get propagated to ContainerExec processes
206
+
}, &container.HostConfig{
207
+
Mounts: []mount.Mount{
208
+
{
209
+
Type: mount.TypeTmpfs,
210
+
Target: "/tmp",
211
+
ReadOnly: false,
212
+
TmpfsOptions: &mount.TmpfsOptions{
213
+
Mode: 0o1777, // world-writeable sticky bit
214
+
Options: [][]string{
215
+
{"exec"},
216
+
},
217
+
},
218
+
},
219
+
},
220
+
ReadonlyRootfs: false,
221
+
CapDrop: []string{"ALL"},
222
+
CapAdd: []string{"CAP_DAC_OVERRIDE"},
223
+
SecurityOpt: []string{"no-new-privileges"},
224
+
ExtraHosts: []string{"host.docker.internal:host-gateway"},
225
+
}, nil, nil, "")
226
+
if err != nil {
227
+
return fmt.Errorf("creating container: %w", err)
228
+
}
229
+
e.registerCleanup(wid, func(ctx context.Context) error {
230
+
err = e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{})
231
+
if err != nil {
232
+
return err
233
+
}
234
+
235
+
return e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{
236
+
RemoveVolumes: true,
237
+
RemoveLinks: false,
238
+
Force: false,
239
+
})
240
+
})
241
+
242
+
err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{})
243
+
if err != nil {
244
+
return fmt.Errorf("starting container: %w", err)
245
+
}
246
+
247
+
mkExecResp, err := e.docker.ContainerExecCreate(ctx, resp.ID, container.ExecOptions{
248
+
Cmd: []string{"mkdir", "-p", workspaceDir, homeDir},
249
+
AttachStdout: true, // NOTE(winter): pretty sure this will make it so that when stdout read is done below, mkdir is done. maybe??
250
+
AttachStderr: true, // for good measure, backed up by docker/cli ("If -d is not set, attach to everything by default")
251
+
})
252
+
if err != nil {
253
+
return err
254
+
}
255
+
256
+
// This actually *starts* the command. Thanks, Docker!
257
+
execResp, err := e.docker.ContainerExecAttach(ctx, mkExecResp.ID, container.ExecAttachOptions{})
258
+
if err != nil {
259
+
return err
260
+
}
261
+
defer execResp.Close()
262
+
263
+
// This is apparently best way to wait for the command to complete.
264
+
_, err = io.ReadAll(execResp.Reader)
265
+
if err != nil {
266
+
return err
267
+
}
268
+
269
+
execInspectResp, err := e.docker.ContainerExecInspect(ctx, mkExecResp.ID)
270
+
if err != nil {
271
+
return err
272
+
}
273
+
274
+
if execInspectResp.ExitCode != 0 {
275
+
return fmt.Errorf("mkdir exited with exit code %d", execInspectResp.ExitCode)
276
+
} else if execInspectResp.Running {
277
+
return errors.New("mkdir is somehow still running??")
278
+
}
279
+
280
+
addl.container = resp.ID
281
+
wf.Data = addl
282
+
283
+
return nil
284
+
}
285
+
286
+
func (e *Engine) RunStep(ctx context.Context, wid models.WorkflowId, w *models.Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *models.WorkflowLogger) error {
287
+
addl := w.Data.(addlFields)
288
+
workflowEnvs := ConstructEnvs(addl.env)
289
+
// TODO(winter): should SetupWorkflow also have secret access?
290
+
// IMO yes, but probably worth thinking on.
291
+
for _, s := range secrets {
292
+
workflowEnvs.AddEnv(s.Key, s.Value)
293
+
}
294
+
295
+
step := w.Steps[idx].(Step)
296
+
297
+
select {
298
+
case <-ctx.Done():
299
+
return ctx.Err()
300
+
default:
301
+
}
302
+
303
+
envs := append(EnvVars(nil), workflowEnvs...)
304
+
for k, v := range step.environment {
305
+
envs.AddEnv(k, v)
306
+
}
307
+
envs.AddEnv("HOME", homeDir)
308
+
309
+
mkExecResp, err := e.docker.ContainerExecCreate(ctx, addl.container, container.ExecOptions{
310
+
Cmd: []string{"bash", "-c", step.command},
311
+
AttachStdout: true,
312
+
AttachStderr: true,
313
+
Env: envs,
314
+
})
315
+
if err != nil {
316
+
return fmt.Errorf("creating exec: %w", err)
317
+
}
318
+
319
+
// start tailing logs in background
320
+
tailDone := make(chan error, 1)
321
+
go func() {
322
+
tailDone <- e.tailStep(ctx, wfLogger, mkExecResp.ID, wid, idx, step)
323
+
}()
324
+
325
+
select {
326
+
case <-tailDone:
327
+
328
+
case <-ctx.Done():
329
+
// cleanup will be handled by DestroyWorkflow, since
330
+
// Docker doesn't provide an API to kill an exec run
331
+
// (sure, we could grab the PID and kill it ourselves,
332
+
// but that's wasted effort)
333
+
e.l.Warn("step timed out", "step", step.Name)
334
+
335
+
<-tailDone
336
+
337
+
return engine.ErrTimedOut
338
+
}
339
+
340
+
select {
341
+
case <-ctx.Done():
342
+
return ctx.Err()
343
+
default:
344
+
}
345
+
346
+
execInspectResp, err := e.docker.ContainerExecInspect(ctx, mkExecResp.ID)
347
+
if err != nil {
348
+
return err
349
+
}
350
+
351
+
if execInspectResp.ExitCode != 0 {
352
+
inspectResp, err := e.docker.ContainerInspect(ctx, addl.container)
353
+
if err != nil {
354
+
return err
355
+
}
356
+
357
+
e.l.Error("workflow failed!", "workflow_id", wid.String(), "exit_code", execInspectResp.ExitCode, "oom_killed", inspectResp.State.OOMKilled)
358
+
359
+
if inspectResp.State.OOMKilled {
360
+
return ErrOOMKilled
361
+
}
362
+
return engine.ErrWorkflowFailed
363
+
}
364
+
365
+
return nil
366
+
}
367
+
368
+
func (e *Engine) tailStep(ctx context.Context, wfLogger *models.WorkflowLogger, execID string, wid models.WorkflowId, stepIdx int, step models.Step) error {
369
+
if wfLogger == nil {
370
+
return nil
371
+
}
372
+
373
+
// This actually *starts* the command. Thanks, Docker!
374
+
logs, err := e.docker.ContainerExecAttach(ctx, execID, container.ExecAttachOptions{})
375
+
if err != nil {
376
+
return err
377
+
}
378
+
defer logs.Close()
379
+
380
+
_, err = stdcopy.StdCopy(
381
+
wfLogger.DataWriter("stdout"),
382
+
wfLogger.DataWriter("stderr"),
383
+
logs.Reader,
384
+
)
385
+
if err != nil && err != io.EOF && !errors.Is(err, context.DeadlineExceeded) {
386
+
return fmt.Errorf("failed to copy logs: %w", err)
387
+
}
388
+
389
+
return nil
390
+
}
391
+
392
+
func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error {
393
+
e.cleanupMu.Lock()
394
+
key := wid.String()
395
+
396
+
fns := e.cleanup[key]
397
+
delete(e.cleanup, key)
398
+
e.cleanupMu.Unlock()
399
+
400
+
for _, fn := range fns {
401
+
if err := fn(ctx); err != nil {
402
+
e.l.Error("failed to cleanup workflow resource", "workflowId", wid, "error", err)
403
+
}
404
+
}
405
+
return nil
406
+
}
407
+
408
+
func (e *Engine) registerCleanup(wid models.WorkflowId, fn cleanupFunc) {
409
+
e.cleanupMu.Lock()
410
+
defer e.cleanupMu.Unlock()
411
+
412
+
key := wid.String()
413
+
e.cleanup[key] = append(e.cleanup[key], fn)
414
+
}
415
+
416
+
func networkName(wid models.WorkflowId) string {
417
+
return fmt.Sprintf("workflow-network-%s", wid)
418
+
}
+28
spindle/engines/nixery/envs.go
+28
spindle/engines/nixery/envs.go
···
1
+
package nixery
2
+
3
+
import (
4
+
"fmt"
5
+
)
6
+
7
+
type EnvVars []string
8
+
9
+
// ConstructEnvs converts a tangled.Pipeline_Step_Environment_Elem.{Key,Value}
10
+
// representation into a docker-friendly []string{"KEY=value", ...} slice.
11
+
func ConstructEnvs(envs map[string]string) EnvVars {
12
+
var dockerEnvs EnvVars
13
+
for k, v := range envs {
14
+
ev := fmt.Sprintf("%s=%s", k, v)
15
+
dockerEnvs = append(dockerEnvs, ev)
16
+
}
17
+
return dockerEnvs
18
+
}
19
+
20
+
// Slice returns the EnvVar as a []string slice.
21
+
func (ev EnvVars) Slice() []string {
22
+
return ev
23
+
}
24
+
25
+
// AddEnv adds a key=value string to the EnvVar.
26
+
func (ev *EnvVars) AddEnv(key, value string) {
27
+
*ev = append(*ev, fmt.Sprintf("%s=%s", key, value))
28
+
}
+48
spindle/engines/nixery/envs_test.go
+48
spindle/engines/nixery/envs_test.go
···
1
+
package nixery
2
+
3
+
import (
4
+
"testing"
5
+
6
+
"github.com/stretchr/testify/assert"
7
+
)
8
+
9
+
func TestConstructEnvs(t *testing.T) {
10
+
tests := []struct {
11
+
name string
12
+
in map[string]string
13
+
want EnvVars
14
+
}{
15
+
{
16
+
name: "empty input",
17
+
in: make(map[string]string),
18
+
want: EnvVars{},
19
+
},
20
+
{
21
+
name: "single env var",
22
+
in: map[string]string{"FOO": "bar"},
23
+
want: EnvVars{"FOO=bar"},
24
+
},
25
+
{
26
+
name: "multiple env vars",
27
+
in: map[string]string{"FOO": "bar", "BAZ": "qux"},
28
+
want: EnvVars{"FOO=bar", "BAZ=qux"},
29
+
},
30
+
}
31
+
for _, tt := range tests {
32
+
t.Run(tt.name, func(t *testing.T) {
33
+
got := ConstructEnvs(tt.in)
34
+
if got == nil {
35
+
got = EnvVars{}
36
+
}
37
+
assert.ElementsMatch(t, tt.want, got)
38
+
})
39
+
}
40
+
}
41
+
42
+
func TestAddEnv(t *testing.T) {
43
+
ev := EnvVars{}
44
+
ev.AddEnv("FOO", "bar")
45
+
ev.AddEnv("BAZ", "qux")
46
+
want := EnvVars{"FOO=bar", "BAZ=qux"}
47
+
assert.ElementsMatch(t, want, ev)
48
+
}
+7
spindle/engines/nixery/errors.go
+7
spindle/engines/nixery/errors.go
+126
spindle/engines/nixery/setup_steps.go
+126
spindle/engines/nixery/setup_steps.go
···
1
+
package nixery
2
+
3
+
import (
4
+
"fmt"
5
+
"path"
6
+
"strings"
7
+
8
+
"tangled.sh/tangled.sh/core/api/tangled"
9
+
"tangled.sh/tangled.sh/core/workflow"
10
+
)
11
+
12
+
func nixConfStep() Step {
13
+
setupCmd := `mkdir -p /etc/nix
14
+
echo 'extra-experimental-features = nix-command flakes' >> /etc/nix/nix.conf
15
+
echo 'build-users-group = ' >> /etc/nix/nix.conf`
16
+
return Step{
17
+
command: setupCmd,
18
+
name: "Configure Nix",
19
+
}
20
+
}
21
+
22
+
// cloneOptsAsSteps processes clone options and adds corresponding steps
23
+
// to the beginning of the workflow's step list if cloning is not skipped.
24
+
//
25
+
// the steps to do here are:
26
+
// - git init
27
+
// - git remote add origin <url>
28
+
// - git fetch --depth=<d> --recurse-submodules=<yes|no> <sha>
29
+
// - git checkout FETCH_HEAD
30
+
func cloneStep(twf tangled.Pipeline_Workflow, tr tangled.Pipeline_TriggerMetadata, dev bool) Step {
31
+
if twf.Clone.Skip {
32
+
return Step{}
33
+
}
34
+
35
+
var commands []string
36
+
37
+
// initialize git repo in workspace
38
+
commands = append(commands, "git init")
39
+
40
+
// add repo as git remote
41
+
scheme := "https://"
42
+
if dev {
43
+
scheme = "http://"
44
+
tr.Repo.Knot = strings.ReplaceAll(tr.Repo.Knot, "localhost", "host.docker.internal")
45
+
}
46
+
url := scheme + path.Join(tr.Repo.Knot, tr.Repo.Did, tr.Repo.Repo)
47
+
commands = append(commands, fmt.Sprintf("git remote add origin %s", url))
48
+
49
+
// run git fetch
50
+
{
51
+
var fetchArgs []string
52
+
53
+
// default clone depth is 1
54
+
depth := 1
55
+
if twf.Clone.Depth > 1 {
56
+
depth = int(twf.Clone.Depth)
57
+
}
58
+
fetchArgs = append(fetchArgs, fmt.Sprintf("--depth=%d", depth))
59
+
60
+
// optionally recurse submodules
61
+
if twf.Clone.Submodules {
62
+
fetchArgs = append(fetchArgs, "--recurse-submodules=yes")
63
+
}
64
+
65
+
// set remote to fetch from
66
+
fetchArgs = append(fetchArgs, "origin")
67
+
68
+
// set revision to checkout
69
+
switch workflow.TriggerKind(tr.Kind) {
70
+
case workflow.TriggerKindManual:
71
+
// TODO: unimplemented
72
+
case workflow.TriggerKindPush:
73
+
fetchArgs = append(fetchArgs, tr.Push.NewSha)
74
+
case workflow.TriggerKindPullRequest:
75
+
fetchArgs = append(fetchArgs, tr.PullRequest.SourceSha)
76
+
}
77
+
78
+
commands = append(commands, fmt.Sprintf("git fetch %s", strings.Join(fetchArgs, " ")))
79
+
}
80
+
81
+
// run git checkout
82
+
commands = append(commands, "git checkout FETCH_HEAD")
83
+
84
+
cloneStep := Step{
85
+
command: strings.Join(commands, "\n"),
86
+
name: "Clone repository into workspace",
87
+
}
88
+
return cloneStep
89
+
}
90
+
91
+
// dependencyStep processes dependencies defined in the workflow.
92
+
// For dependencies using a custom registry (i.e. not nixpkgs), it collects
93
+
// all packages and adds a single 'nix profile install' step to the
94
+
// beginning of the workflow's step list.
95
+
func dependencyStep(deps map[string][]string) *Step {
96
+
var customPackages []string
97
+
98
+
for registry, packages := range deps {
99
+
if registry == "nixpkgs" {
100
+
continue
101
+
}
102
+
103
+
if len(packages) == 0 {
104
+
customPackages = append(customPackages, registry)
105
+
}
106
+
// collect packages from custom registries
107
+
for _, pkg := range packages {
108
+
customPackages = append(customPackages, fmt.Sprintf("'%s#%s'", registry, pkg))
109
+
}
110
+
}
111
+
112
+
if len(customPackages) > 0 {
113
+
installCmd := "nix --extra-experimental-features nix-command --extra-experimental-features flakes profile install"
114
+
cmd := fmt.Sprintf("%s %s", installCmd, strings.Join(customPackages, " "))
115
+
installStep := Step{
116
+
command: cmd,
117
+
name: "Install custom dependencies",
118
+
environment: map[string]string{
119
+
"NIX_NO_COLOR": "1",
120
+
"NIX_SHOW_DOWNLOAD_PROGRESS": "0",
121
+
},
122
+
}
123
+
return &installStep
124
+
}
125
+
return nil
126
+
}
+8
-4
spindle/ingester.go
+8
-4
spindle/ingester.go
···
40
40
41
41
switch e.Commit.Collection {
42
42
case tangled.SpindleMemberNSID:
43
-
s.ingestMember(ctx, e)
43
+
err = s.ingestMember(ctx, e)
44
44
case tangled.RepoNSID:
45
-
s.ingestRepo(ctx, e)
45
+
err = s.ingestRepo(ctx, e)
46
46
case tangled.RepoCollaboratorNSID:
47
-
s.ingestCollaborator(ctx, e)
47
+
err = s.ingestCollaborator(ctx, e)
48
48
}
49
49
50
-
return err
50
+
if err != nil {
51
+
s.l.Debug("failed to process message", "nsid", e.Commit.Collection, "err", err)
52
+
}
53
+
54
+
return nil
51
55
}
52
56
}
53
57
+17
spindle/models/engine.go
+17
spindle/models/engine.go
···
1
+
package models
2
+
3
+
import (
4
+
"context"
5
+
"time"
6
+
7
+
"tangled.sh/tangled.sh/core/api/tangled"
8
+
"tangled.sh/tangled.sh/core/spindle/secrets"
9
+
)
10
+
11
+
type Engine interface {
12
+
InitWorkflow(twf tangled.Pipeline_Workflow, tpl tangled.Pipeline) (*Workflow, error)
13
+
SetupWorkflow(ctx context.Context, wid WorkflowId, wf *Workflow) error
14
+
WorkflowTimeout() time.Duration
15
+
DestroyWorkflow(ctx context.Context, wid WorkflowId) error
16
+
RunStep(ctx context.Context, wid WorkflowId, w *Workflow, idx int, secrets []secrets.UnlockedSecret, wfLogger *WorkflowLogger) error
17
+
}
+82
spindle/models/logger.go
+82
spindle/models/logger.go
···
1
+
package models
2
+
3
+
import (
4
+
"encoding/json"
5
+
"fmt"
6
+
"io"
7
+
"os"
8
+
"path/filepath"
9
+
"strings"
10
+
)
11
+
12
+
type WorkflowLogger struct {
13
+
file *os.File
14
+
encoder *json.Encoder
15
+
}
16
+
17
+
func NewWorkflowLogger(baseDir string, wid WorkflowId) (*WorkflowLogger, error) {
18
+
path := LogFilePath(baseDir, wid)
19
+
20
+
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
21
+
if err != nil {
22
+
return nil, fmt.Errorf("creating log file: %w", err)
23
+
}
24
+
25
+
return &WorkflowLogger{
26
+
file: file,
27
+
encoder: json.NewEncoder(file),
28
+
}, nil
29
+
}
30
+
31
+
func LogFilePath(baseDir string, workflowID WorkflowId) string {
32
+
logFilePath := filepath.Join(baseDir, fmt.Sprintf("%s.log", workflowID.String()))
33
+
return logFilePath
34
+
}
35
+
36
+
func (l *WorkflowLogger) Close() error {
37
+
return l.file.Close()
38
+
}
39
+
40
+
func (l *WorkflowLogger) DataWriter(stream string) io.Writer {
41
+
// TODO: emit stream
42
+
return &dataWriter{
43
+
logger: l,
44
+
stream: stream,
45
+
}
46
+
}
47
+
48
+
func (l *WorkflowLogger) ControlWriter(idx int, step Step) io.Writer {
49
+
return &controlWriter{
50
+
logger: l,
51
+
idx: idx,
52
+
step: step,
53
+
}
54
+
}
55
+
56
+
type dataWriter struct {
57
+
logger *WorkflowLogger
58
+
stream string
59
+
}
60
+
61
+
func (w *dataWriter) Write(p []byte) (int, error) {
62
+
line := strings.TrimRight(string(p), "\r\n")
63
+
entry := NewDataLogLine(line, w.stream)
64
+
if err := w.logger.encoder.Encode(entry); err != nil {
65
+
return 0, err
66
+
}
67
+
return len(p), nil
68
+
}
69
+
70
+
type controlWriter struct {
71
+
logger *WorkflowLogger
72
+
idx int
73
+
step Step
74
+
}
75
+
76
+
func (w *controlWriter) Write(_ []byte) (int, error) {
77
+
entry := NewControlLogLine(w.idx, w.step)
78
+
if err := w.logger.encoder.Encode(entry); err != nil {
79
+
return 0, err
80
+
}
81
+
return len(w.step.Name()), nil
82
+
}
+3
-3
spindle/models/models.go
+3
-3
spindle/models/models.go
···
104
104
func NewControlLogLine(idx int, step Step) LogLine {
105
105
return LogLine{
106
106
Kind: LogKindControl,
107
-
Content: step.Name,
107
+
Content: step.Name(),
108
108
StepId: idx,
109
-
StepKind: step.Kind,
110
-
StepCommand: step.Command,
109
+
StepKind: step.Kind(),
110
+
StepCommand: step.Command(),
111
111
}
112
112
}
+8
-103
spindle/models/pipeline.go
+8
-103
spindle/models/pipeline.go
···
1
1
package models
2
2
3
-
import (
4
-
"path"
5
-
6
-
"tangled.sh/tangled.sh/core/api/tangled"
7
-
"tangled.sh/tangled.sh/core/spindle/config"
8
-
)
9
-
10
3
type Pipeline struct {
11
4
RepoOwner string
12
5
RepoName string
13
-
Workflows []Workflow
6
+
Workflows map[Engine][]Workflow
14
7
}
15
8
16
-
type Step struct {
17
-
Command string
18
-
Name string
19
-
Environment map[string]string
20
-
Kind StepKind
9
+
type Step interface {
10
+
Name() string
11
+
Command() string
12
+
Kind() StepKind
21
13
}
22
14
23
15
type StepKind int
···
30
22
)
31
23
32
24
type Workflow struct {
33
-
Steps []Step
34
-
Environment map[string]string
35
-
Name string
36
-
Image string
37
-
}
38
-
39
-
// setupSteps get added to start of Steps
40
-
type setupSteps []Step
41
-
42
-
// addStep adds a step to the beginning of the workflow's steps.
43
-
func (ss *setupSteps) addStep(step Step) {
44
-
*ss = append(*ss, step)
45
-
}
46
-
47
-
// ToPipeline converts a tangled.Pipeline into a model.Pipeline.
48
-
// In the process, dependencies are resolved: nixpkgs deps
49
-
// are constructed atop nixery and set as the Workflow.Image,
50
-
// and ones from custom registries
51
-
func ToPipeline(pl tangled.Pipeline, cfg config.Config) *Pipeline {
52
-
workflows := []Workflow{}
53
-
54
-
for _, twf := range pl.Workflows {
55
-
swf := &Workflow{}
56
-
for _, tstep := range twf.Steps {
57
-
sstep := Step{}
58
-
sstep.Environment = stepEnvToMap(tstep.Environment)
59
-
sstep.Command = tstep.Command
60
-
sstep.Name = tstep.Name
61
-
sstep.Kind = StepKindUser
62
-
swf.Steps = append(swf.Steps, sstep)
63
-
}
64
-
swf.Name = twf.Name
65
-
swf.Environment = workflowEnvToMap(twf.Environment)
66
-
swf.Image = workflowImage(twf.Dependencies, cfg.Pipelines.Nixery)
67
-
68
-
setup := &setupSteps{}
69
-
70
-
setup.addStep(nixConfStep())
71
-
setup.addStep(cloneStep(*twf, *pl.TriggerMetadata, cfg.Server.Dev))
72
-
// this step could be empty
73
-
if s := dependencyStep(*twf); s != nil {
74
-
setup.addStep(*s)
75
-
}
76
-
77
-
// append setup steps in order to the start of workflow steps
78
-
swf.Steps = append(*setup, swf.Steps...)
79
-
80
-
workflows = append(workflows, *swf)
81
-
}
82
-
repoOwner := pl.TriggerMetadata.Repo.Did
83
-
repoName := pl.TriggerMetadata.Repo.Repo
84
-
return &Pipeline{
85
-
RepoOwner: repoOwner,
86
-
RepoName: repoName,
87
-
Workflows: workflows,
88
-
}
89
-
}
90
-
91
-
func workflowEnvToMap(envs []*tangled.Pipeline_Pair) map[string]string {
92
-
envMap := map[string]string{}
93
-
for _, env := range envs {
94
-
if env != nil {
95
-
envMap[env.Key] = env.Value
96
-
}
97
-
}
98
-
return envMap
99
-
}
100
-
101
-
func stepEnvToMap(envs []*tangled.Pipeline_Pair) map[string]string {
102
-
envMap := map[string]string{}
103
-
for _, env := range envs {
104
-
if env != nil {
105
-
envMap[env.Key] = env.Value
106
-
}
107
-
}
108
-
return envMap
109
-
}
110
-
111
-
func workflowImage(deps []*tangled.Pipeline_Dependency, nixery string) string {
112
-
var dependencies string
113
-
for _, d := range deps {
114
-
if d.Registry == "nixpkgs" {
115
-
dependencies = path.Join(d.Packages...)
116
-
}
117
-
}
118
-
119
-
// load defaults from somewhere else
120
-
dependencies = path.Join(dependencies, "bash", "git", "coreutils", "nix")
121
-
122
-
return path.Join(nixery, dependencies)
25
+
Steps []Step
26
+
Name string
27
+
Data any
123
28
}
-128
spindle/models/setup_steps.go
-128
spindle/models/setup_steps.go
···
1
-
package models
2
-
3
-
import (
4
-
"fmt"
5
-
"path"
6
-
"strings"
7
-
8
-
"tangled.sh/tangled.sh/core/api/tangled"
9
-
"tangled.sh/tangled.sh/core/workflow"
10
-
)
11
-
12
-
func nixConfStep() Step {
13
-
setupCmd := `echo 'extra-experimental-features = nix-command flakes' >> /etc/nix/nix.conf
14
-
echo 'build-users-group = ' >> /etc/nix/nix.conf`
15
-
return Step{
16
-
Command: setupCmd,
17
-
Name: "Configure Nix",
18
-
}
19
-
}
20
-
21
-
// cloneOptsAsSteps processes clone options and adds corresponding steps
22
-
// to the beginning of the workflow's step list if cloning is not skipped.
23
-
//
24
-
// the steps to do here are:
25
-
// - git init
26
-
// - git remote add origin <url>
27
-
// - git fetch --depth=<d> --recurse-submodules=<yes|no> <sha>
28
-
// - git checkout FETCH_HEAD
29
-
func cloneStep(twf tangled.Pipeline_Workflow, tr tangled.Pipeline_TriggerMetadata, dev bool) Step {
30
-
if twf.Clone.Skip {
31
-
return Step{}
32
-
}
33
-
34
-
var commands []string
35
-
36
-
// initialize git repo in workspace
37
-
commands = append(commands, "git init")
38
-
39
-
// add repo as git remote
40
-
scheme := "https://"
41
-
if dev {
42
-
scheme = "http://"
43
-
tr.Repo.Knot = strings.ReplaceAll(tr.Repo.Knot, "localhost", "host.docker.internal")
44
-
}
45
-
url := scheme + path.Join(tr.Repo.Knot, tr.Repo.Did, tr.Repo.Repo)
46
-
commands = append(commands, fmt.Sprintf("git remote add origin %s", url))
47
-
48
-
// run git fetch
49
-
{
50
-
var fetchArgs []string
51
-
52
-
// default clone depth is 1
53
-
depth := 1
54
-
if twf.Clone.Depth > 1 {
55
-
depth = int(twf.Clone.Depth)
56
-
}
57
-
fetchArgs = append(fetchArgs, fmt.Sprintf("--depth=%d", depth))
58
-
59
-
// optionally recurse submodules
60
-
if twf.Clone.Submodules {
61
-
fetchArgs = append(fetchArgs, "--recurse-submodules=yes")
62
-
}
63
-
64
-
// set remote to fetch from
65
-
fetchArgs = append(fetchArgs, "origin")
66
-
67
-
// set revision to checkout
68
-
switch workflow.TriggerKind(tr.Kind) {
69
-
case workflow.TriggerKindManual:
70
-
// TODO: unimplemented
71
-
case workflow.TriggerKindPush:
72
-
fetchArgs = append(fetchArgs, tr.Push.NewSha)
73
-
case workflow.TriggerKindPullRequest:
74
-
fetchArgs = append(fetchArgs, tr.PullRequest.SourceSha)
75
-
}
76
-
77
-
commands = append(commands, fmt.Sprintf("git fetch %s", strings.Join(fetchArgs, " ")))
78
-
}
79
-
80
-
// run git checkout
81
-
commands = append(commands, "git checkout FETCH_HEAD")
82
-
83
-
cloneStep := Step{
84
-
Command: strings.Join(commands, "\n"),
85
-
Name: "Clone repository into workspace",
86
-
}
87
-
return cloneStep
88
-
}
89
-
90
-
// dependencyStep processes dependencies defined in the workflow.
91
-
// For dependencies using a custom registry (i.e. not nixpkgs), it collects
92
-
// all packages and adds a single 'nix profile install' step to the
93
-
// beginning of the workflow's step list.
94
-
func dependencyStep(twf tangled.Pipeline_Workflow) *Step {
95
-
var customPackages []string
96
-
97
-
for _, d := range twf.Dependencies {
98
-
registry := d.Registry
99
-
packages := d.Packages
100
-
101
-
if registry == "nixpkgs" {
102
-
continue
103
-
}
104
-
105
-
if len(packages) == 0 {
106
-
customPackages = append(customPackages, registry)
107
-
}
108
-
// collect packages from custom registries
109
-
for _, pkg := range packages {
110
-
customPackages = append(customPackages, fmt.Sprintf("'%s#%s'", registry, pkg))
111
-
}
112
-
}
113
-
114
-
if len(customPackages) > 0 {
115
-
installCmd := "nix --extra-experimental-features nix-command --extra-experimental-features flakes profile install"
116
-
cmd := fmt.Sprintf("%s %s", installCmd, strings.Join(customPackages, " "))
117
-
installStep := Step{
118
-
Command: cmd,
119
-
Name: "Install custom dependencies",
120
-
Environment: map[string]string{
121
-
"NIX_NO_COLOR": "1",
122
-
"NIX_SHOW_DOWNLOAD_PROGRESS": "0",
123
-
},
124
-
}
125
-
return &installStep
126
-
}
127
-
return nil
128
-
}
+1
-1
spindle/secrets/sqlite.go
+1
-1
spindle/secrets/sqlite.go
···
24
24
}
25
25
26
26
func NewSQLiteManager(dbPath string, opts ...SqliteManagerOpt) (*SqliteManager, error) {
27
-
db, err := sql.Open("sqlite3", dbPath)
27
+
db, err := sql.Open("sqlite3", dbPath+"?_foreign_keys=1")
28
28
if err != nil {
29
29
return nil, fmt.Errorf("failed to open sqlite database: %w", err)
30
30
}
+38
-8
spindle/server.go
+38
-8
spindle/server.go
···
20
20
"tangled.sh/tangled.sh/core/spindle/config"
21
21
"tangled.sh/tangled.sh/core/spindle/db"
22
22
"tangled.sh/tangled.sh/core/spindle/engine"
23
+
"tangled.sh/tangled.sh/core/spindle/engines/nixery"
23
24
"tangled.sh/tangled.sh/core/spindle/models"
24
25
"tangled.sh/tangled.sh/core/spindle/queue"
25
26
"tangled.sh/tangled.sh/core/spindle/secrets"
···
39
40
e *rbac.Enforcer
40
41
l *slog.Logger
41
42
n *notifier.Notifier
42
-
eng *engine.Engine
43
+
engs map[string]models.Engine
43
44
jq *queue.Queue
44
45
cfg *config.Config
45
46
ks *eventconsumer.Consumer
···
93
94
return fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
94
95
}
95
96
96
-
eng, err := engine.New(ctx, cfg, d, &n, vault)
97
+
nixeryEng, err := nixery.New(ctx, cfg)
97
98
if err != nil {
98
99
return err
99
100
}
···
128
129
db: d,
129
130
l: logger,
130
131
n: &n,
131
-
eng: eng,
132
+
engs: map[string]models.Engine{"nixery": nixeryEng},
132
133
jq: jq,
133
134
cfg: cfg,
134
135
res: resolver,
···
216
217
Logger: logger,
217
218
Db: s.db,
218
219
Enforcer: s.e,
219
-
Engine: s.eng,
220
+
Engines: s.engs,
220
221
Config: s.cfg,
221
222
Resolver: s.res,
222
223
Vault: s.vault,
···
261
262
Rkey: msg.Rkey,
262
263
}
263
264
265
+
workflows := make(map[models.Engine][]models.Workflow)
266
+
264
267
for _, w := range tpl.Workflows {
265
268
if w != nil {
266
-
err := s.db.StatusPending(models.WorkflowId{
269
+
if _, ok := s.engs[w.Engine]; !ok {
270
+
err = s.db.StatusFailed(models.WorkflowId{
271
+
PipelineId: pipelineId,
272
+
Name: w.Name,
273
+
}, fmt.Sprintf("unknown engine %#v", w.Engine), -1, s.n)
274
+
if err != nil {
275
+
return err
276
+
}
277
+
278
+
continue
279
+
}
280
+
281
+
eng := s.engs[w.Engine]
282
+
283
+
if _, ok := workflows[eng]; !ok {
284
+
workflows[eng] = []models.Workflow{}
285
+
}
286
+
287
+
ewf, err := s.engs[w.Engine].InitWorkflow(*w, tpl)
288
+
if err != nil {
289
+
return err
290
+
}
291
+
292
+
workflows[eng] = append(workflows[eng], *ewf)
293
+
294
+
err = s.db.StatusPending(models.WorkflowId{
267
295
PipelineId: pipelineId,
268
296
Name: w.Name,
269
297
}, s.n)
···
273
301
}
274
302
}
275
303
276
-
spl := models.ToPipeline(tpl, *s.cfg)
277
-
278
304
ok := s.jq.Enqueue(queue.Job{
279
305
Run: func() error {
280
-
s.eng.StartWorkflows(ctx, spl, pipelineId)
306
+
engine.StartWorkflows(s.l, s.vault, s.cfg, s.db, s.n, ctx, &models.Pipeline{
307
+
RepoOwner: tpl.TriggerMetadata.Repo.Did,
308
+
RepoName: tpl.TriggerMetadata.Repo.Repo,
309
+
Workflows: workflows,
310
+
}, pipelineId)
281
311
return nil
282
312
},
283
313
OnFail: func(jobError error) {
+32
-2
spindle/stream.go
+32
-2
spindle/stream.go
···
6
6
"fmt"
7
7
"io"
8
8
"net/http"
9
+
"os"
9
10
"strconv"
10
11
"time"
11
12
12
-
"tangled.sh/tangled.sh/core/spindle/engine"
13
13
"tangled.sh/tangled.sh/core/spindle/models"
14
14
15
15
"github.com/go-chi/chi/v5"
···
143
143
}
144
144
isFinished := models.StatusKind(status.Status).IsFinish()
145
145
146
-
filePath := engine.LogFilePath(s.cfg.Pipelines.LogDir, wid)
146
+
filePath := models.LogFilePath(s.cfg.Server.LogDir, wid)
147
+
148
+
if status.Status == models.StatusKindFailed.String() && status.Error != nil {
149
+
if _, err := os.Stat(filePath); os.IsNotExist(err) {
150
+
msgs := []models.LogLine{
151
+
{
152
+
Kind: models.LogKindControl,
153
+
Content: "",
154
+
StepId: 0,
155
+
StepKind: models.StepKindUser,
156
+
},
157
+
{
158
+
Kind: models.LogKindData,
159
+
Content: *status.Error,
160
+
},
161
+
}
162
+
163
+
for _, msg := range msgs {
164
+
b, err := json.Marshal(msg)
165
+
if err != nil {
166
+
return err
167
+
}
168
+
169
+
if err := conn.WriteMessage(websocket.TextMessage, b); err != nil {
170
+
return fmt.Errorf("failed to write to websocket: %w", err)
171
+
}
172
+
}
173
+
174
+
return nil
175
+
}
176
+
}
147
177
148
178
config := tail.Config{
149
179
Follow: !isFinished,
+2
-2
spindle/xrpc/xrpc.go
+2
-2
spindle/xrpc/xrpc.go
···
17
17
"tangled.sh/tangled.sh/core/rbac"
18
18
"tangled.sh/tangled.sh/core/spindle/config"
19
19
"tangled.sh/tangled.sh/core/spindle/db"
20
-
"tangled.sh/tangled.sh/core/spindle/engine"
20
+
"tangled.sh/tangled.sh/core/spindle/models"
21
21
"tangled.sh/tangled.sh/core/spindle/secrets"
22
22
)
23
23
···
27
27
Logger *slog.Logger
28
28
Db *db.DB
29
29
Enforcer *rbac.Enforcer
30
-
Engine *engine.Engine
30
+
Engines map[string]models.Engine
31
31
Config *config.Config
32
32
Resolver *idresolver.Resolver
33
33
Vault secrets.Manager
+1
-9
tailwind.config.js
+1
-9
tailwind.config.js
···
36
36
css: {
37
37
maxWidth: "none",
38
38
pre: {
39
-
backgroundColor: colors.gray[100],
40
-
color: colors.black,
41
-
"@apply font-normal text-black bg-gray-100 dark:bg-gray-900 dark:text-gray-300 dark:border-gray-700 dark:border": {},
42
-
},
43
-
li: {
44
-
"@apply inline-block w-full my-0 py-0": {},
45
-
},
46
-
"ul, ol": {
47
-
"@apply my-1 py-0": {},
39
+
"@apply font-normal text-black bg-gray-50 dark:bg-gray-900 dark:text-gray-300 dark:border-gray-700 border": {},
48
40
},
49
41
code: {
50
42
"@apply font-normal font-mono p-1 rounded text-black bg-gray-100 dark:bg-gray-900 dark:text-gray-300 dark:border-gray-700": {},
+17
-36
workflow/compile.go
+17
-36
workflow/compile.go
···
1
1
package workflow
2
2
3
3
import (
4
+
"errors"
4
5
"fmt"
5
6
6
7
"tangled.sh/tangled.sh/core/api/tangled"
···
63
64
return fmt.Sprintf("warning: %s: %s: %s", w.Path, w.Type, w.Reason)
64
65
}
65
66
67
+
var (
68
+
MissingEngine error = errors.New("missing engine")
69
+
)
70
+
66
71
type WarningKind string
67
72
68
73
var (
···
95
100
for _, wf := range p {
96
101
cw := compiler.compileWorkflow(wf)
97
102
98
-
// empty workflows are not added to the pipeline
99
-
if len(cw.Steps) == 0 {
103
+
if cw == nil {
100
104
continue
101
105
}
102
106
103
-
cp.Workflows = append(cp.Workflows, &cw)
107
+
cp.Workflows = append(cp.Workflows, cw)
104
108
}
105
109
106
110
return cp
107
111
}
108
112
109
-
func (compiler *Compiler) compileWorkflow(w Workflow) tangled.Pipeline_Workflow {
110
-
cw := tangled.Pipeline_Workflow{}
113
+
func (compiler *Compiler) compileWorkflow(w Workflow) *tangled.Pipeline_Workflow {
114
+
cw := &tangled.Pipeline_Workflow{}
111
115
112
116
if !w.Match(compiler.Trigger) {
113
117
compiler.Diagnostics.AddWarning(
···
115
119
WorkflowSkipped,
116
120
fmt.Sprintf("did not match trigger %s", compiler.Trigger.Kind),
117
121
)
118
-
return cw
119
-
}
120
-
121
-
if len(w.Steps) == 0 {
122
-
compiler.Diagnostics.AddWarning(
123
-
w.Name,
124
-
WorkflowSkipped,
125
-
"empty workflow",
126
-
)
127
-
return cw
122
+
return nil
128
123
}
129
124
130
125
// validate clone options
131
126
compiler.analyzeCloneOptions(w)
132
127
133
128
cw.Name = w.Name
134
-
cw.Dependencies = w.Dependencies.AsRecord()
135
-
for _, s := range w.Steps {
136
-
step := tangled.Pipeline_Step{
137
-
Command: s.Command,
138
-
Name: s.Name,
139
-
}
140
-
for k, v := range s.Environment {
141
-
e := &tangled.Pipeline_Pair{
142
-
Key: k,
143
-
Value: v,
144
-
}
145
-
step.Environment = append(step.Environment, e)
146
-
}
147
-
cw.Steps = append(cw.Steps, &step)
129
+
130
+
if w.Engine == "" {
131
+
compiler.Diagnostics.AddError(w.Name, MissingEngine)
132
+
return nil
148
133
}
149
-
for k, v := range w.Environment {
150
-
e := &tangled.Pipeline_Pair{
151
-
Key: k,
152
-
Value: v,
153
-
}
154
-
cw.Environment = append(cw.Environment, e)
155
-
}
134
+
135
+
cw.Engine = w.Engine
136
+
cw.Raw = w.Raw
156
137
157
138
o := w.CloneOpts.AsRecord()
158
139
cw.Clone = &o
+23
-29
workflow/compile_test.go
+23
-29
workflow/compile_test.go
···
26
26
27
27
func TestCompileWorkflow_MatchingWorkflowWithSteps(t *testing.T) {
28
28
wf := Workflow{
29
-
Name: ".tangled/workflows/test.yml",
30
-
When: when,
31
-
Steps: []Step{
32
-
{Name: "Test", Command: "go test ./..."},
33
-
},
29
+
Name: ".tangled/workflows/test.yml",
30
+
Engine: "nixery",
31
+
When: when,
34
32
CloneOpts: CloneOpts{}, // default true
35
33
}
36
34
···
43
41
assert.False(t, c.Diagnostics.IsErr())
44
42
}
45
43
46
-
func TestCompileWorkflow_EmptySteps(t *testing.T) {
47
-
wf := Workflow{
48
-
Name: ".tangled/workflows/empty.yml",
49
-
When: when,
50
-
Steps: []Step{}, // no steps
51
-
}
52
-
53
-
c := Compiler{Trigger: trigger}
54
-
cp := c.Compile([]Workflow{wf})
55
-
56
-
assert.Len(t, cp.Workflows, 0)
57
-
assert.Len(t, c.Diagnostics.Warnings, 1)
58
-
assert.Equal(t, WorkflowSkipped, c.Diagnostics.Warnings[0].Type)
59
-
}
60
-
61
44
func TestCompileWorkflow_TriggerMismatch(t *testing.T) {
62
45
wf := Workflow{
63
-
Name: ".tangled/workflows/mismatch.yml",
46
+
Name: ".tangled/workflows/mismatch.yml",
47
+
Engine: "nixery",
64
48
When: []Constraint{
65
49
{
66
50
Event: []string{"push"},
67
51
Branch: []string{"master"}, // different branch
68
52
},
69
53
},
70
-
Steps: []Step{
71
-
{Name: "Lint", Command: "golint ./..."},
72
-
},
73
54
}
74
55
75
56
c := Compiler{Trigger: trigger}
···
82
63
83
64
func TestCompileWorkflow_CloneFalseWithShallowTrue(t *testing.T) {
84
65
wf := Workflow{
85
-
Name: ".tangled/workflows/clone_skip.yml",
86
-
When: when,
87
-
Steps: []Step{
88
-
{Name: "Skip", Command: "echo skip"},
89
-
},
66
+
Name: ".tangled/workflows/clone_skip.yml",
67
+
Engine: "nixery",
68
+
When: when,
90
69
CloneOpts: CloneOpts{
91
70
Skip: true,
92
71
Depth: 1,
···
101
80
assert.Len(t, c.Diagnostics.Warnings, 1)
102
81
assert.Equal(t, InvalidConfiguration, c.Diagnostics.Warnings[0].Type)
103
82
}
83
+
84
+
func TestCompileWorkflow_MissingEngine(t *testing.T) {
85
+
wf := Workflow{
86
+
Name: ".tangled/workflows/missing_engine.yml",
87
+
When: when,
88
+
Engine: "",
89
+
}
90
+
91
+
c := Compiler{Trigger: trigger}
92
+
cp := c.Compile([]Workflow{wf})
93
+
94
+
assert.Len(t, cp.Workflows, 0)
95
+
assert.Len(t, c.Diagnostics.Errors, 1)
96
+
assert.Equal(t, MissingEngine, c.Diagnostics.Errors[0].Error)
97
+
}
+6
-33
workflow/def.go
+6
-33
workflow/def.go
···
24
24
25
25
// this is simply a structural representation of the workflow file
26
26
Workflow struct {
27
-
Name string `yaml:"-"` // name of the workflow file
28
-
When []Constraint `yaml:"when"`
29
-
Dependencies Dependencies `yaml:"dependencies"`
30
-
Steps []Step `yaml:"steps"`
31
-
Environment map[string]string `yaml:"environment"`
32
-
CloneOpts CloneOpts `yaml:"clone"`
27
+
Name string `yaml:"-"` // name of the workflow file
28
+
Engine string `yaml:"engine"`
29
+
When []Constraint `yaml:"when"`
30
+
CloneOpts CloneOpts `yaml:"clone"`
31
+
Raw string `yaml:"-"`
33
32
}
34
33
35
34
Constraint struct {
36
35
Event StringList `yaml:"event"`
37
36
Branch StringList `yaml:"branch"` // this is optional, and only applied on "push" events
38
37
}
39
-
40
-
Dependencies map[string][]string
41
38
42
39
CloneOpts struct {
43
40
Skip bool `yaml:"skip"`
44
41
Depth int `yaml:"depth"`
45
42
IncludeSubmodules bool `yaml:"submodules"`
46
-
}
47
-
48
-
Step struct {
49
-
Name string `yaml:"name"`
50
-
Command string `yaml:"command"`
51
-
Environment map[string]string `yaml:"environment"`
52
43
}
53
44
54
45
StringList []string
···
77
68
}
78
69
79
70
wf.Name = name
71
+
wf.Raw = string(contents)
80
72
81
73
return wf, nil
82
74
}
···
173
165
}
174
166
175
167
return errors.New("failed to unmarshal StringOrSlice")
176
-
}
177
-
178
-
// conversion utilities to atproto records
179
-
func (d Dependencies) AsRecord() []*tangled.Pipeline_Dependency {
180
-
var deps []*tangled.Pipeline_Dependency
181
-
for registry, packages := range d {
182
-
deps = append(deps, &tangled.Pipeline_Dependency{
183
-
Registry: registry,
184
-
Packages: packages,
185
-
})
186
-
}
187
-
return deps
188
-
}
189
-
190
-
func (s Step) AsRecord() tangled.Pipeline_Step {
191
-
return tangled.Pipeline_Step{
192
-
Command: s.Command,
193
-
Name: s.Name,
194
-
}
195
168
}
196
169
197
170
func (c CloneOpts) AsRecord() tangled.Pipeline_CloneOpts {
+1
-86
workflow/def_test.go
+1
-86
workflow/def_test.go
···
10
10
yamlData := `
11
11
when:
12
12
- event: ["push", "pull_request"]
13
-
branch: ["main", "develop"]
14
-
15
-
dependencies:
16
-
nixpkgs:
17
-
- go
18
-
- git
19
-
- curl
20
-
21
-
steps:
22
-
- name: "Test"
23
-
command: |
24
-
go test ./...`
13
+
branch: ["main", "develop"]`
25
14
26
15
wf, err := FromFile("test.yml", []byte(yamlData))
27
16
assert.NoError(t, err, "YAML should unmarshal without error")
···
30
19
assert.ElementsMatch(t, []string{"main", "develop"}, wf.When[0].Branch)
31
20
assert.ElementsMatch(t, []string{"push", "pull_request"}, wf.When[0].Event)
32
21
33
-
assert.Len(t, wf.Steps, 1)
34
-
assert.Equal(t, "Test", wf.Steps[0].Name)
35
-
assert.Equal(t, "go test ./...", wf.Steps[0].Command)
36
-
37
-
pkgs, ok := wf.Dependencies["nixpkgs"]
38
-
assert.True(t, ok, "`nixpkgs` should be present in dependencies")
39
-
assert.ElementsMatch(t, []string{"go", "git", "curl"}, pkgs)
40
-
41
22
assert.False(t, wf.CloneOpts.Skip, "Skip should default to false")
42
23
}
43
24
44
-
func TestUnmarshalCustomRegistry(t *testing.T) {
45
-
yamlData := `
46
-
when:
47
-
- event: push
48
-
branch: main
49
-
50
-
dependencies:
51
-
git+https://tangled.sh/@oppi.li/tbsp:
52
-
- tbsp
53
-
git+https://git.peppe.rs/languages/statix:
54
-
- statix
55
-
56
-
steps:
57
-
- name: "Check"
58
-
command: |
59
-
statix check`
60
-
61
-
wf, err := FromFile("test.yml", []byte(yamlData))
62
-
assert.NoError(t, err, "YAML should unmarshal without error")
63
-
64
-
assert.ElementsMatch(t, []string{"push"}, wf.When[0].Event)
65
-
assert.ElementsMatch(t, []string{"main"}, wf.When[0].Branch)
66
-
67
-
assert.ElementsMatch(t, []string{"tbsp"}, wf.Dependencies["git+https://tangled.sh/@oppi.li/tbsp"])
68
-
assert.ElementsMatch(t, []string{"statix"}, wf.Dependencies["git+https://git.peppe.rs/languages/statix"])
69
-
}
70
-
71
25
func TestUnmarshalCloneFalse(t *testing.T) {
72
26
yamlData := `
73
27
when:
···
75
29
76
30
clone:
77
31
skip: true
78
-
79
-
dependencies:
80
-
nixpkgs:
81
-
- python3
82
-
83
-
steps:
84
-
- name: Notify
85
-
command: |
86
-
python3 ./notify.py
87
32
`
88
33
89
34
wf, err := FromFile("test.yml", []byte(yamlData))
···
93
38
94
39
assert.True(t, wf.CloneOpts.Skip, "Skip should be false")
95
40
}
96
-
97
-
func TestUnmarshalEnv(t *testing.T) {
98
-
yamlData := `
99
-
when:
100
-
- event: ["pull_request_close"]
101
-
102
-
clone:
103
-
skip: false
104
-
105
-
environment:
106
-
HOME: /home/foo bar/baz
107
-
CGO_ENABLED: 1
108
-
109
-
steps:
110
-
- name: Something
111
-
command: echo "hello"
112
-
environment:
113
-
FOO: bar
114
-
BAZ: qux
115
-
`
116
-
117
-
wf, err := FromFile("test.yml", []byte(yamlData))
118
-
assert.NoError(t, err)
119
-
120
-
assert.Len(t, wf.Environment, 2)
121
-
assert.Equal(t, "/home/foo bar/baz", wf.Environment["HOME"])
122
-
assert.Equal(t, "1", wf.Environment["CGO_ENABLED"])
123
-
assert.Equal(t, "bar", wf.Steps[0].Environment["FOO"])
124
-
assert.Equal(t, "qux", wf.Steps[0].Environment["BAZ"])
125
-
}