+79
-20
api/tangled/cbor_gen.go
+79
-20
api/tangled/cbor_gen.go
···
7934
7934
}
7935
7935
7936
7936
cw := cbg.NewCborWriter(w)
7937
-
fieldCount := 9
7937
+
fieldCount := 10
7938
7938
7939
7939
if t.Body == nil {
7940
7940
fieldCount--
7941
7941
}
7942
7942
7943
7943
if t.Mentions == nil {
7944
+
fieldCount--
7945
+
}
7946
+
7947
+
if t.Patch == nil {
7944
7948
fieldCount--
7945
7949
}
7946
7950
···
8008
8012
}
8009
8013
8010
8014
// t.Patch (string) (string)
8011
-
if len("patch") > 1000000 {
8012
-
return xerrors.Errorf("Value in field \"patch\" was too long")
8013
-
}
8015
+
if t.Patch != nil {
8014
8016
8015
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil {
8016
-
return err
8017
-
}
8018
-
if _, err := cw.WriteString(string("patch")); err != nil {
8019
-
return err
8020
-
}
8017
+
if len("patch") > 1000000 {
8018
+
return xerrors.Errorf("Value in field \"patch\" was too long")
8019
+
}
8021
8020
8022
-
if len(t.Patch) > 1000000 {
8023
-
return xerrors.Errorf("Value in field t.Patch was too long")
8024
-
}
8021
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patch"))); err != nil {
8022
+
return err
8023
+
}
8024
+
if _, err := cw.WriteString(string("patch")); err != nil {
8025
+
return err
8026
+
}
8027
+
8028
+
if t.Patch == nil {
8029
+
if _, err := cw.Write(cbg.CborNull); err != nil {
8030
+
return err
8031
+
}
8032
+
} else {
8033
+
if len(*t.Patch) > 1000000 {
8034
+
return xerrors.Errorf("Value in field t.Patch was too long")
8035
+
}
8025
8036
8026
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Patch))); err != nil {
8027
-
return err
8028
-
}
8029
-
if _, err := cw.WriteString(string(t.Patch)); err != nil {
8030
-
return err
8037
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Patch))); err != nil {
8038
+
return err
8039
+
}
8040
+
if _, err := cw.WriteString(string(*t.Patch)); err != nil {
8041
+
return err
8042
+
}
8043
+
}
8031
8044
}
8032
8045
8033
8046
// t.Title (string) (string)
···
8147
8160
return err
8148
8161
}
8149
8162
8163
+
// t.PatchBlob (util.LexBlob) (struct)
8164
+
if len("patchBlob") > 1000000 {
8165
+
return xerrors.Errorf("Value in field \"patchBlob\" was too long")
8166
+
}
8167
+
8168
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("patchBlob"))); err != nil {
8169
+
return err
8170
+
}
8171
+
if _, err := cw.WriteString(string("patchBlob")); err != nil {
8172
+
return err
8173
+
}
8174
+
8175
+
if err := t.PatchBlob.MarshalCBOR(cw); err != nil {
8176
+
return err
8177
+
}
8178
+
8150
8179
// t.References ([]string) (slice)
8151
8180
if t.References != nil {
8152
8181
···
8262
8291
case "patch":
8263
8292
8264
8293
{
8265
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
8294
+
b, err := cr.ReadByte()
8266
8295
if err != nil {
8267
8296
return err
8268
8297
}
8298
+
if b != cbg.CborNull[0] {
8299
+
if err := cr.UnreadByte(); err != nil {
8300
+
return err
8301
+
}
8269
8302
8270
-
t.Patch = string(sval)
8303
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
8304
+
if err != nil {
8305
+
return err
8306
+
}
8307
+
8308
+
t.Patch = (*string)(&sval)
8309
+
}
8271
8310
}
8272
8311
// t.Title (string) (string)
8273
8312
case "title":
···
8370
8409
}
8371
8410
8372
8411
t.CreatedAt = string(sval)
8412
+
}
8413
+
// t.PatchBlob (util.LexBlob) (struct)
8414
+
case "patchBlob":
8415
+
8416
+
{
8417
+
8418
+
b, err := cr.ReadByte()
8419
+
if err != nil {
8420
+
return err
8421
+
}
8422
+
if b != cbg.CborNull[0] {
8423
+
if err := cr.UnreadByte(); err != nil {
8424
+
return err
8425
+
}
8426
+
t.PatchBlob = new(util.LexBlob)
8427
+
if err := t.PatchBlob.UnmarshalCBOR(cr); err != nil {
8428
+
return xerrors.Errorf("unmarshaling t.PatchBlob pointer: %w", err)
8429
+
}
8430
+
}
8431
+
8373
8432
}
8374
8433
// t.References ([]string) (slice)
8375
8434
case "references":
-34
api/tangled/pipelinecancelPipeline.go
-34
api/tangled/pipelinecancelPipeline.go
···
1
-
// Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT.
2
-
3
-
package tangled
4
-
5
-
// schema: sh.tangled.pipeline.cancelPipeline
6
-
7
-
import (
8
-
"context"
9
-
10
-
"github.com/bluesky-social/indigo/lex/util"
11
-
)
12
-
13
-
const (
14
-
PipelineCancelPipelineNSID = "sh.tangled.pipeline.cancelPipeline"
15
-
)
16
-
17
-
// PipelineCancelPipeline_Input is the input argument to a sh.tangled.pipeline.cancelPipeline call.
18
-
type PipelineCancelPipeline_Input struct {
19
-
// pipeline: pipeline at-uri
20
-
Pipeline string `json:"pipeline" cborgen:"pipeline"`
21
-
// repo: repo at-uri, spindle can't resolve repo from pipeline at-uri yet
22
-
Repo string `json:"repo" cborgen:"repo"`
23
-
// workflow: workflow name
24
-
Workflow string `json:"workflow" cborgen:"workflow"`
25
-
}
26
-
27
-
// PipelineCancelPipeline calls the XRPC method "sh.tangled.pipeline.cancelPipeline".
28
-
func PipelineCancelPipeline(ctx context.Context, c util.LexClient, input *PipelineCancelPipeline_Input) error {
29
-
if err := c.LexDo(ctx, util.Procedure, "application/json", "sh.tangled.pipeline.cancelPipeline", nil, input, nil); err != nil {
30
-
return err
31
-
}
32
-
33
-
return nil
34
-
}
+12
-9
api/tangled/repopull.go
+12
-9
api/tangled/repopull.go
···
17
17
} //
18
18
// RECORDTYPE: RepoPull
19
19
type RepoPull struct {
20
-
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
21
-
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
-
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
-
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
24
-
Patch string `json:"patch" cborgen:"patch"`
25
-
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
26
-
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
27
-
Target *RepoPull_Target `json:"target" cborgen:"target"`
28
-
Title string `json:"title" cborgen:"title"`
20
+
LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull" cborgen:"$type,const=sh.tangled.repo.pull"`
21
+
Body *string `json:"body,omitempty" cborgen:"body,omitempty"`
22
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
+
Mentions []string `json:"mentions,omitempty" cborgen:"mentions,omitempty"`
24
+
// patch: (deprecated) use patchBlob instead
25
+
Patch *string `json:"patch,omitempty" cborgen:"patch,omitempty"`
26
+
// patchBlob: patch content
27
+
PatchBlob *util.LexBlob `json:"patchBlob" cborgen:"patchBlob"`
28
+
References []string `json:"references,omitempty" cborgen:"references,omitempty"`
29
+
Source *RepoPull_Source `json:"source,omitempty" cborgen:"source,omitempty"`
30
+
Target *RepoPull_Target `json:"target" cborgen:"target"`
31
+
Title string `json:"title" cborgen:"title"`
29
32
}
30
33
31
34
// RepoPull_Source is a "source" in the sh.tangled.repo.pull schema.
+6
-6
appview/db/pipeline.go
+6
-6
appview/db/pipeline.go
···
6
6
"strings"
7
7
"time"
8
8
9
-
"github.com/bluesky-social/indigo/atproto/syntax"
10
9
"tangled.org/core/appview/models"
11
10
"tangled.org/core/orm"
12
11
)
···
217
216
}
218
217
defer rows.Close()
219
218
220
-
pipelines := make(map[syntax.ATURI]models.Pipeline)
219
+
pipelines := make(map[string]models.Pipeline)
221
220
for rows.Next() {
222
221
var p models.Pipeline
223
222
var t models.Trigger
···
254
253
p.Trigger = &t
255
254
p.Statuses = make(map[string]models.WorkflowStatus)
256
255
257
-
pipelines[p.AtUri()] = p
256
+
k := fmt.Sprintf("%s/%s", p.Knot, p.Rkey)
257
+
pipelines[k] = p
258
258
}
259
259
260
260
// get all statuses
···
314
314
return nil, fmt.Errorf("invalid status created timestamp %q: %w", created, err)
315
315
}
316
316
317
-
pipelineAt := ps.PipelineAt()
317
+
key := fmt.Sprintf("%s/%s", ps.PipelineKnot, ps.PipelineRkey)
318
318
319
319
// extract
320
-
pipeline, ok := pipelines[pipelineAt]
320
+
pipeline, ok := pipelines[key]
321
321
if !ok {
322
322
continue
323
323
}
···
331
331
332
332
// reassign
333
333
pipeline.Statuses[ps.Workflow] = statuses
334
-
pipelines[pipelineAt] = pipeline
334
+
pipelines[key] = pipeline
335
335
}
336
336
337
337
var all []models.Pipeline
-10
appview/models/pipeline.go
-10
appview/models/pipeline.go
···
1
1
package models
2
2
3
3
import (
4
-
"fmt"
5
4
"slices"
6
5
"time"
7
6
8
7
"github.com/bluesky-social/indigo/atproto/syntax"
9
8
"github.com/go-git/go-git/v5/plumbing"
10
-
"tangled.org/core/api/tangled"
11
9
spindle "tangled.org/core/spindle/models"
12
10
"tangled.org/core/workflow"
13
11
)
···
25
23
// populate when querying for reverse mappings
26
24
Trigger *Trigger
27
25
Statuses map[string]WorkflowStatus
28
-
}
29
-
30
-
func (p *Pipeline) AtUri() syntax.ATURI {
31
-
return syntax.ATURI(fmt.Sprintf("at://did:web:%s/%s/%s", p.Knot, tangled.PipelineNSID, p.Rkey))
32
26
}
33
27
34
28
type WorkflowStatus struct {
···
134
128
Error *string
135
129
ExitCode int
136
130
}
137
-
138
-
func (ps *PipelineStatus) PipelineAt() syntax.ATURI {
139
-
return syntax.ATURI(fmt.Sprintf("at://did:web:%s/%s/%s", ps.PipelineKnot, tangled.PipelineNSID, ps.PipelineRkey))
140
-
}
+1
-1
appview/models/pull.go
+1
-1
appview/models/pull.go
···
83
83
Repo *Repo
84
84
}
85
85
86
+
// NOTE: This method does not include patch blob in returned atproto record
86
87
func (p Pull) AsRecord() tangled.RepoPull {
87
88
var source *tangled.RepoPull_Source
88
89
if p.PullSource != nil {
···
113
114
Repo: p.RepoAt.String(),
114
115
Branch: p.TargetBranch,
115
116
},
116
-
Patch: p.LatestPatch(),
117
117
Source: source,
118
118
}
119
119
return record
+1
-1
appview/pages/templates/repo/fragments/diff.html
+1
-1
appview/pages/templates/repo/fragments/diff.html
···
17
17
{{ else }}
18
18
{{ range $idx, $hunk := $diff }}
19
19
{{ with $hunk }}
20
-
<details open id="file-{{ .Name.New }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}">
20
+
<details open id="file-{{ .Id }}" class="group border border-gray-200 dark:border-gray-700 w-full mx-auto rounded bg-white dark:bg-gray-800 drop-shadow-sm" tabindex="{{ add $idx 1 }}">
21
21
<summary class="list-none cursor-pointer sticky top-0">
22
22
<div id="diff-file-header" class="rounded cursor-pointer bg-white dark:bg-gray-800 flex justify-between">
23
23
<div id="left-side-items" class="p-2 flex gap-2 items-center overflow-x-auto">
+35
-35
appview/pages/templates/repo/fragments/splitDiff.html
+35
-35
appview/pages/templates/repo/fragments/splitDiff.html
···
3
3
{{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800" -}}
4
4
{{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}}
5
5
{{- $lineNrSepStyle := "pr-2 border-r border-gray-200 dark:border-gray-700" -}}
6
-
{{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
6
+
{{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
7
7
{{- $emptyStyle := "bg-gray-200/30 dark:bg-gray-700/30" -}}
8
8
{{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400" -}}
9
9
{{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}}
10
10
{{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}}
11
11
{{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}}
12
12
<div class="grid grid-cols-2 divide-x divide-gray-200 dark:divide-gray-700">
13
-
<pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
13
+
<div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
14
14
{{- range .LeftLines -}}
15
15
{{- if .IsEmpty -}}
16
-
<div class="{{ $emptyStyle }} {{ $containerStyle }}">
17
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div>
18
-
<div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div>
19
-
<div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div>
20
-
</div>
16
+
<span class="{{ $emptyStyle }} {{ $containerStyle }}">
17
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span>
18
+
<span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span>
19
+
<span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span>
20
+
</span>
21
21
{{- else if eq .Op.String "-" -}}
22
-
<div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
23
-
<div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div>
24
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
25
-
<div class="px-2">{{ .Content }}</div>
26
-
</div>
22
+
<span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
23
+
<span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span>
24
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
25
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
26
+
</span>
27
27
{{- else if eq .Op.String " " -}}
28
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
29
-
<div class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></div>
30
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
31
-
<div class="px-2">{{ .Content }}</div>
32
-
</div>
28
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{.LineNumber}}">
29
+
<span class="{{ $lineNrStyle }} {{ $lineNrSepStyle }}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{.LineNumber}}">{{ .LineNumber }}</a></span>
30
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
31
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
32
+
</span>
33
33
{{- end -}}
34
34
{{- end -}}
35
-
{{- end -}}</div></div></pre>
35
+
{{- end -}}</div></div></div>
36
36
37
-
<pre class="overflow-x-auto col-span-1"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
37
+
<div class="overflow-x-auto col-span-1 font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
38
38
{{- range .RightLines -}}
39
39
{{- if .IsEmpty -}}
40
-
<div class="{{ $emptyStyle }} {{ $containerStyle }}">
41
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></div>
42
-
<div class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></div>
43
-
<div class="px-2 invisible" aria-hidden="true">{{ .Content }}</div>
44
-
</div>
40
+
<span class="{{ $emptyStyle }} {{ $containerStyle }}">
41
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><span aria-hidden="true" class="invisible">{{.LineNumber}}</span></span>
42
+
<span class="{{ $opStyle }}"><span aria-hidden="true" class="invisible">{{ .Op.String }}</span></span>
43
+
<span class="px-2 invisible" aria-hidden="true">{{ .Content }}</span>
44
+
</span>
45
45
{{- else if eq .Op.String "+" -}}
46
-
<div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
47
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div>
48
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
49
-
<div class="px-2" >{{ .Content }}</div>
50
-
</div>
46
+
<span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
47
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></span>
48
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
49
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
50
+
</span>
51
51
{{- else if eq .Op.String " " -}}
52
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
53
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a></div>
54
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
55
-
<div class="px-2">{{ .Content }}</div>
56
-
</div>
52
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-N{{.LineNumber}}">
53
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{.LineNumber}}">{{ .LineNumber }}</a> </span>
54
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
55
+
<span class="px-2 whitespace-pre">{{ .Content }}</span>
56
+
</span>
57
57
{{- end -}}
58
58
{{- end -}}
59
-
{{- end -}}</div></div></pre>
59
+
{{- end -}}</div></div></div>
60
60
</div>
61
61
{{ end }}
+21
-22
appview/pages/templates/repo/fragments/unifiedDiff.html
+21
-22
appview/pages/templates/repo/fragments/unifiedDiff.html
···
1
1
{{ define "repo/fragments/unifiedDiff" }}
2
2
{{ $name := .Id }}
3
-
<pre class="overflow-x-auto"><div class="overflow-x-auto"><div class="min-w-full inline-block">{{- range .TextFragments -}}<div class="bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</div>
3
+
<div class="overflow-x-auto font-mono leading-normal"><div class="overflow-x-auto"><div class="inline-flex flex-col min-w-full">{{- range .TextFragments -}}<span class="block bg-gray-100 dark:bg-gray-700 text-gray-500 dark:text-gray-400 select-none text-center">···</span>
4
4
{{- $oldStart := .OldPosition -}}
5
5
{{- $newStart := .NewPosition -}}
6
6
{{- $lineNrStyle := "min-w-[3.5rem] flex-shrink-0 select-none text-right bg-white dark:bg-gray-800 target:bg-yellow-200 target:dark:bg-yellow-600" -}}
7
7
{{- $linkStyle := "text-gray-400 dark:text-gray-500 hover:underline" -}}
8
8
{{- $lineNrSepStyle1 := "" -}}
9
9
{{- $lineNrSepStyle2 := "pr-2 border-r border-gray-200 dark:border-gray-700" -}}
10
-
{{- $containerStyle := "flex min-w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
10
+
{{- $containerStyle := "inline-flex w-full items-center target:border target:rounded-sm target:border-yellow-200 target:dark:border-yellow-700 scroll-mt-20" -}}
11
11
{{- $addStyle := "bg-green-100 dark:bg-green-800/30 text-green-700 dark:text-green-400 " -}}
12
12
{{- $delStyle := "bg-red-100 dark:bg-red-800/30 text-red-700 dark:text-red-400 " -}}
13
13
{{- $ctxStyle := "bg-white dark:bg-gray-800 text-gray-500 dark:text-gray-400" -}}
14
14
{{- $opStyle := "w-5 flex-shrink-0 select-none text-center" -}}
15
15
{{- range .Lines -}}
16
16
{{- if eq .Op.String "+" -}}
17
-
<div class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}">
18
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></div>
19
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></div>
20
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
21
-
<div class="px-2">{{ .Line }}</div>
22
-
</div>
17
+
<span class="{{ $addStyle }} {{ $containerStyle }}" id="{{$name}}-N{{$newStart}}">
18
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><span aria-hidden="true" class="invisible">{{$newStart}}</span></span>
19
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-N{{$newStart}}">{{ $newStart }}</a></span>
20
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
21
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
22
+
</span>
23
23
{{- $newStart = add64 $newStart 1 -}}
24
24
{{- end -}}
25
25
{{- if eq .Op.String "-" -}}
26
-
<div class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}">
27
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></div>
28
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></div>
29
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
30
-
<div class="px-2">{{ .Line }}</div>
31
-
</div>
26
+
<span class="{{ $delStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}">
27
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}">{{ $oldStart }}</a></span>
28
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><span aria-hidden="true" class="invisible">{{$oldStart}}</span></span>
29
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
30
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
31
+
</span>
32
32
{{- $oldStart = add64 $oldStart 1 -}}
33
33
{{- end -}}
34
34
{{- if eq .Op.String " " -}}
35
-
<div class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}">
36
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></div>
37
-
<div class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></div>
38
-
<div class="{{ $opStyle }}">{{ .Op.String }}</div>
39
-
<div class="px-2">{{ .Line }}</div>
40
-
</div>
35
+
<span class="{{ $ctxStyle }} {{ $containerStyle }}" id="{{$name}}-O{{$oldStart}}-N{{$newStart}}">
36
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle1}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $oldStart }}</a></span>
37
+
<span class="{{$lineNrStyle}} {{$lineNrSepStyle2}}"><a class="{{$linkStyle}}" href="#{{$name}}-O{{$oldStart}}-N{{$newStart}}">{{ $newStart }}</a></span>
38
+
<span class="{{ $opStyle }}">{{ .Op.String }}</span>
39
+
<span class="px-2 whitespace-pre">{{ .Line }}</span>
40
+
</span>
41
41
{{- $newStart = add64 $newStart 1 -}}
42
42
{{- $oldStart = add64 $oldStart 1 -}}
43
43
{{- end -}}
44
44
{{- end -}}
45
-
{{- end -}}</div></div></pre>
45
+
{{- end -}}</div></div></div>
46
46
{{ end }}
47
-
-14
appview/pages/templates/repo/pipelines/workflow.html
-14
appview/pages/templates/repo/pipelines/workflow.html
···
12
12
{{ block "sidebar" . }} {{ end }}
13
13
</div>
14
14
<div class="col-span-1 md:col-span-3">
15
-
<!-- TODO(boltless): explictly check for pipeline cancel permission -->
16
-
{{ if $.RepoInfo.Roles.IsOwner }}
17
-
<div class="flex justify-between mb-2">
18
-
<div id="workflow-error" class="text-red-500 dark:text-red-400"></div>
19
-
<button
20
-
class="btn"
21
-
hx-post="/{{ $.RepoInfo.FullName }}/pipelines/{{ .Pipeline.Id }}/workflow/{{ .Workflow }}/cancel"
22
-
hx-swap="none"
23
-
{{ if (index .Pipeline.Statuses .Workflow).Latest.Status.IsFinish -}}
24
-
disabled
25
-
{{- end }}
26
-
>Cancel</button>
27
-
</div>
28
-
{{ end }}
29
15
{{ block "logs" . }} {{ end }}
30
16
</div>
31
17
</section>
+1
-86
appview/pipelines/pipelines.go
+1
-86
appview/pipelines/pipelines.go
···
4
4
"bytes"
5
5
"context"
6
6
"encoding/json"
7
-
"fmt"
8
7
"log/slog"
9
8
"net/http"
10
9
"strings"
11
10
"time"
12
11
13
-
"tangled.org/core/api/tangled"
14
12
"tangled.org/core/appview/config"
15
13
"tangled.org/core/appview/db"
16
-
"tangled.org/core/appview/middleware"
17
-
"tangled.org/core/appview/models"
18
14
"tangled.org/core/appview/oauth"
19
15
"tangled.org/core/appview/pages"
20
16
"tangled.org/core/appview/reporesolver"
···
40
36
logger *slog.Logger
41
37
}
42
38
43
-
func (p *Pipelines) Router(mw *middleware.Middleware) http.Handler {
39
+
func (p *Pipelines) Router() http.Handler {
44
40
r := chi.NewRouter()
45
41
r.Get("/", p.Index)
46
42
r.Get("/{pipeline}/workflow/{workflow}", p.Workflow)
47
43
r.Get("/{pipeline}/workflow/{workflow}/logs", p.Logs)
48
-
r.
49
-
With(mw.RepoPermissionMiddleware("repo:owner")).
50
-
Post("/{pipeline}/workflow/{workflow}/cancel", p.Cancel)
51
44
52
45
return r
53
46
}
···
321
314
}
322
315
}
323
316
}
324
-
}
325
-
326
-
func (p *Pipelines) Cancel(w http.ResponseWriter, r *http.Request) {
327
-
l := p.logger.With("handler", "Cancel")
328
-
329
-
var (
330
-
pipelineId = chi.URLParam(r, "pipeline")
331
-
workflow = chi.URLParam(r, "workflow")
332
-
)
333
-
if pipelineId == "" || workflow == "" {
334
-
http.Error(w, "missing pipeline ID or workflow", http.StatusBadRequest)
335
-
return
336
-
}
337
-
338
-
f, err := p.repoResolver.Resolve(r)
339
-
if err != nil {
340
-
l.Error("failed to get repo and knot", "err", err)
341
-
http.Error(w, "bad repo/knot", http.StatusBadRequest)
342
-
return
343
-
}
344
-
345
-
pipeline, err := func() (models.Pipeline, error) {
346
-
ps, err := db.GetPipelineStatuses(
347
-
p.db,
348
-
1,
349
-
orm.FilterEq("repo_owner", f.Did),
350
-
orm.FilterEq("repo_name", f.Name),
351
-
orm.FilterEq("knot", f.Knot),
352
-
orm.FilterEq("id", pipelineId),
353
-
)
354
-
if err != nil {
355
-
return models.Pipeline{}, err
356
-
}
357
-
if len(ps) != 1 {
358
-
return models.Pipeline{}, fmt.Errorf("wrong pipeline count %d", len(ps))
359
-
}
360
-
return ps[0], nil
361
-
}()
362
-
if err != nil {
363
-
l.Error("pipeline query failed", "err", err)
364
-
http.Error(w, "pipeline not found", http.StatusNotFound)
365
-
}
366
-
var (
367
-
spindle = f.Spindle
368
-
knot = f.Knot
369
-
rkey = pipeline.Rkey
370
-
)
371
-
372
-
if spindle == "" || knot == "" || rkey == "" {
373
-
http.Error(w, "invalid repo info", http.StatusBadRequest)
374
-
return
375
-
}
376
-
377
-
spindleClient, err := p.oauth.ServiceClient(
378
-
r,
379
-
oauth.WithService(f.Spindle),
380
-
oauth.WithLxm(tangled.PipelineCancelPipelineNSID),
381
-
oauth.WithDev(p.config.Core.Dev),
382
-
oauth.WithTimeout(time.Second*30), // workflow cleanup usually takes time
383
-
)
384
-
385
-
err = tangled.PipelineCancelPipeline(
386
-
r.Context(),
387
-
spindleClient,
388
-
&tangled.PipelineCancelPipeline_Input{
389
-
Repo: string(f.RepoAt()),
390
-
Pipeline: pipeline.AtUri().String(),
391
-
Workflow: workflow,
392
-
},
393
-
)
394
-
err = fmt.Errorf("boo! new error")
395
-
errorId := "workflow-error"
396
-
if err != nil {
397
-
l.Error("failed to cancel workflow", "err", err)
398
-
p.pages.Notice(w, errorId, "Failed to cancel workflow")
399
-
return
400
-
}
401
-
l.Debug("canceled pipeline", "uri", pipeline.AtUri())
402
317
}
403
318
404
319
// either a message or an error
+48
-36
appview/pulls/pulls.go
+48
-36
appview/pulls/pulls.go
···
1241
1241
return
1242
1242
}
1243
1243
1244
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
1245
+
if err != nil {
1246
+
log.Println("failed to upload patch", err)
1247
+
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1248
+
return
1249
+
}
1250
+
1244
1251
_, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
1245
1252
Collection: tangled.RepoPullNSID,
1246
1253
Repo: user.Did,
···
1252
1259
Repo: string(repo.RepoAt()),
1253
1260
Branch: targetBranch,
1254
1261
},
1255
-
Patch: patch,
1262
+
PatchBlob: blob.Blob,
1256
1263
Source: recordPullSource,
1257
1264
CreatedAt: time.Now().Format(time.RFC3339),
1258
1265
},
···
1328
1335
// apply all record creations at once
1329
1336
var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem
1330
1337
for _, p := range stack {
1338
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(p.LatestPatch()))
1339
+
if err != nil {
1340
+
log.Println("failed to upload patch blob", err)
1341
+
s.pages.Notice(w, "pull", "Failed to create pull request. Try again later.")
1342
+
return
1343
+
}
1344
+
1331
1345
record := p.AsRecord()
1332
-
write := comatproto.RepoApplyWrites_Input_Writes_Elem{
1346
+
record.PatchBlob = blob.Blob
1347
+
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
1333
1348
RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{
1334
1349
Collection: tangled.RepoPullNSID,
1335
1350
Rkey: &p.Rkey,
···
1337
1352
Val: &record,
1338
1353
},
1339
1354
},
1340
-
}
1341
-
writes = append(writes, &write)
1355
+
})
1342
1356
}
1343
1357
_, err = comatproto.RepoApplyWrites(r.Context(), client, &comatproto.RepoApplyWrites_Input{
1344
1358
Repo: user.Did,
···
1871
1885
return
1872
1886
}
1873
1887
1874
-
var recordPullSource *tangled.RepoPull_Source
1875
-
if pull.IsBranchBased() {
1876
-
recordPullSource = &tangled.RepoPull_Source{
1877
-
Branch: pull.PullSource.Branch,
1878
-
Sha: sourceRev,
1879
-
}
1888
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
1889
+
if err != nil {
1890
+
log.Println("failed to upload patch blob", err)
1891
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
1892
+
return
1880
1893
}
1881
-
if pull.IsForkBased() {
1882
-
repoAt := pull.PullSource.RepoAt.String()
1883
-
recordPullSource = &tangled.RepoPull_Source{
1884
-
Branch: pull.PullSource.Branch,
1885
-
Repo: &repoAt,
1886
-
Sha: sourceRev,
1887
-
}
1888
-
}
1894
+
record := pull.AsRecord()
1895
+
record.PatchBlob = blob.Blob
1896
+
record.CreatedAt = time.Now().Format(time.RFC3339)
1889
1897
1890
1898
_, err = comatproto.RepoPutRecord(r.Context(), client, &comatproto.RepoPutRecord_Input{
1891
1899
Collection: tangled.RepoPullNSID,
···
1893
1901
Rkey: pull.Rkey,
1894
1902
SwapRecord: ex.Cid,
1895
1903
Record: &lexutil.LexiconTypeDecoder{
1896
-
Val: &tangled.RepoPull{
1897
-
Title: pull.Title,
1898
-
Target: &tangled.RepoPull_Target{
1899
-
Repo: string(repo.RepoAt()),
1900
-
Branch: pull.TargetBranch,
1901
-
},
1902
-
Patch: patch, // new patch
1903
-
Source: recordPullSource,
1904
-
CreatedAt: time.Now().Format(time.RFC3339),
1905
-
},
1904
+
Val: &record,
1906
1905
},
1907
1906
})
1908
1907
if err != nil {
···
1988
1987
}
1989
1988
defer tx.Rollback()
1990
1989
1990
+
client, err := s.oauth.AuthorizedClient(r)
1991
+
if err != nil {
1992
+
log.Println("failed to authorize client")
1993
+
s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
1994
+
return
1995
+
}
1996
+
1991
1997
// pds updates to make
1992
1998
var writes []*comatproto.RepoApplyWrites_Input_Writes_Elem
1993
1999
···
2021
2027
return
2022
2028
}
2023
2029
2030
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
2031
+
if err != nil {
2032
+
log.Println("failed to upload patch blob", err)
2033
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
2034
+
return
2035
+
}
2024
2036
record := p.AsRecord()
2037
+
record.PatchBlob = blob.Blob
2025
2038
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
2026
2039
RepoApplyWrites_Create: &comatproto.RepoApplyWrites_Create{
2027
2040
Collection: tangled.RepoPullNSID,
···
2056
2069
return
2057
2070
}
2058
2071
2072
+
blob, err := comatproto.RepoUploadBlob(r.Context(), client, strings.NewReader(patch))
2073
+
if err != nil {
2074
+
log.Println("failed to upload patch blob", err)
2075
+
s.pages.Notice(w, "resubmit-error", "Failed to update pull request on the PDS. Try again later.")
2076
+
return
2077
+
}
2059
2078
record := np.AsRecord()
2060
-
2079
+
record.PatchBlob = blob.Blob
2061
2080
writes = append(writes, &comatproto.RepoApplyWrites_Input_Writes_Elem{
2062
2081
RepoApplyWrites_Update: &comatproto.RepoApplyWrites_Update{
2063
2082
Collection: tangled.RepoPullNSID,
···
2091
2110
if err != nil {
2092
2111
log.Println("failed to resubmit pull", err)
2093
2112
s.pages.Notice(w, "pull-resubmit-error", "Failed to resubmit pull request. Try again later.")
2094
-
return
2095
-
}
2096
-
2097
-
client, err := s.oauth.AuthorizedClient(r)
2098
-
if err != nil {
2099
-
log.Println("failed to authorize client")
2100
-
s.pages.Notice(w, "resubmit-error", "Failed to create pull request. Try again later.")
2101
2113
return
2102
2114
}
2103
2115
+1
appview/repo/archive.go
+1
appview/repo/archive.go
···
18
18
l := rp.logger.With("handler", "DownloadArchive")
19
19
ref := chi.URLParam(r, "ref")
20
20
ref, _ = url.PathUnescape(ref)
21
+
ref = strings.TrimSuffix(ref, ".tar.gz")
21
22
f, err := rp.repoResolver.Resolve(r)
22
23
if err != nil {
23
24
l.Error("failed to get repo and knot", "err", err)
+86
appview/state/knotstream.go
+86
appview/state/knotstream.go
···
18
18
"tangled.org/core/log"
19
19
"tangled.org/core/orm"
20
20
"tangled.org/core/rbac"
21
+
"tangled.org/core/workflow"
21
22
23
+
"github.com/bluesky-social/indigo/atproto/syntax"
22
24
"github.com/go-git/go-git/v5/plumbing"
23
25
"github.com/posthog/posthog-go"
24
26
)
···
65
67
switch msg.Nsid {
66
68
case tangled.GitRefUpdateNSID:
67
69
return ingestRefUpdate(d, enforcer, posthog, dev, source, msg)
70
+
case tangled.PipelineNSID:
71
+
return ingestPipeline(d, source, msg)
68
72
}
69
73
70
74
return nil
···
186
190
187
191
return tx.Commit()
188
192
}
193
+
194
+
func ingestPipeline(d *db.DB, source ec.Source, msg ec.Message) error {
195
+
var record tangled.Pipeline
196
+
err := json.Unmarshal(msg.EventJson, &record)
197
+
if err != nil {
198
+
return err
199
+
}
200
+
201
+
if record.TriggerMetadata == nil {
202
+
return fmt.Errorf("empty trigger metadata: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
203
+
}
204
+
205
+
if record.TriggerMetadata.Repo == nil {
206
+
return fmt.Errorf("empty repo: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
207
+
}
208
+
209
+
// does this repo have a spindle configured?
210
+
repos, err := db.GetRepos(
211
+
d,
212
+
0,
213
+
orm.FilterEq("did", record.TriggerMetadata.Repo.Did),
214
+
orm.FilterEq("name", record.TriggerMetadata.Repo.Repo),
215
+
)
216
+
if err != nil {
217
+
return fmt.Errorf("failed to look for repo in DB: nsid %s, rkey %s, %w", msg.Nsid, msg.Rkey, err)
218
+
}
219
+
if len(repos) != 1 {
220
+
return fmt.Errorf("incorrect number of repos returned: %d (expected 1)", len(repos))
221
+
}
222
+
if repos[0].Spindle == "" {
223
+
return fmt.Errorf("repo does not have a spindle configured yet: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
224
+
}
225
+
226
+
// trigger info
227
+
var trigger models.Trigger
228
+
var sha string
229
+
trigger.Kind = workflow.TriggerKind(record.TriggerMetadata.Kind)
230
+
switch trigger.Kind {
231
+
case workflow.TriggerKindPush:
232
+
trigger.PushRef = &record.TriggerMetadata.Push.Ref
233
+
trigger.PushNewSha = &record.TriggerMetadata.Push.NewSha
234
+
trigger.PushOldSha = &record.TriggerMetadata.Push.OldSha
235
+
sha = *trigger.PushNewSha
236
+
case workflow.TriggerKindPullRequest:
237
+
trigger.PRSourceBranch = &record.TriggerMetadata.PullRequest.SourceBranch
238
+
trigger.PRTargetBranch = &record.TriggerMetadata.PullRequest.TargetBranch
239
+
trigger.PRSourceSha = &record.TriggerMetadata.PullRequest.SourceSha
240
+
trigger.PRAction = &record.TriggerMetadata.PullRequest.Action
241
+
sha = *trigger.PRSourceSha
242
+
}
243
+
244
+
tx, err := d.Begin()
245
+
if err != nil {
246
+
return fmt.Errorf("failed to start txn: %w", err)
247
+
}
248
+
249
+
triggerId, err := db.AddTrigger(tx, trigger)
250
+
if err != nil {
251
+
return fmt.Errorf("failed to add trigger entry: %w", err)
252
+
}
253
+
254
+
pipeline := models.Pipeline{
255
+
Rkey: msg.Rkey,
256
+
Knot: source.Key(),
257
+
RepoOwner: syntax.DID(record.TriggerMetadata.Repo.Did),
258
+
RepoName: record.TriggerMetadata.Repo.Repo,
259
+
TriggerId: int(triggerId),
260
+
Sha: sha,
261
+
}
262
+
263
+
err = db.AddPipeline(tx, pipeline)
264
+
if err != nil {
265
+
return fmt.Errorf("failed to add pipeline: %w", err)
266
+
}
267
+
268
+
err = tx.Commit()
269
+
if err != nil {
270
+
return fmt.Errorf("failed to commit txn: %w", err)
271
+
}
272
+
273
+
return nil
274
+
}
+3
-3
appview/state/router.go
+3
-3
appview/state/router.go
···
96
96
r.Mount("/", s.RepoRouter(mw))
97
97
r.Mount("/issues", s.IssuesRouter(mw))
98
98
r.Mount("/pulls", s.PullsRouter(mw))
99
-
r.Mount("/pipelines", s.PipelinesRouter(mw))
99
+
r.Mount("/pipelines", s.PipelinesRouter())
100
100
r.Mount("/labels", s.LabelsRouter())
101
101
102
102
// These routes get proxied to the knot
···
313
313
return repo.Router(mw)
314
314
}
315
315
316
-
func (s *State) PipelinesRouter(mw *middleware.Middleware) http.Handler {
316
+
func (s *State) PipelinesRouter() http.Handler {
317
317
pipes := pipelines.New(
318
318
s.oauth,
319
319
s.repoResolver,
···
325
325
s.enforcer,
326
326
log.SubLogger(s.logger, "pipelines"),
327
327
)
328
-
return pipes.Router(mw)
328
+
return pipes.Router()
329
329
}
330
330
331
331
func (s *State) LabelsRouter() http.Handler {
-89
appview/state/spindlestream.go
-89
appview/state/spindlestream.go
···
20
20
"tangled.org/core/orm"
21
21
"tangled.org/core/rbac"
22
22
spindle "tangled.org/core/spindle/models"
23
-
"tangled.org/core/workflow"
24
23
)
25
24
26
25
func Spindlestream(ctx context.Context, c *config.Config, d *db.DB, enforcer *rbac.Enforcer) (*ec.Consumer, error) {
···
63
62
func spindleIngester(ctx context.Context, logger *slog.Logger, d *db.DB) ec.ProcessFunc {
64
63
return func(ctx context.Context, source ec.Source, msg ec.Message) error {
65
64
switch msg.Nsid {
66
-
case tangled.PipelineNSID:
67
-
return ingestPipeline(logger, d, source, msg)
68
65
case tangled.PipelineStatusNSID:
69
66
return ingestPipelineStatus(ctx, logger, d, source, msg)
70
67
}
71
68
72
69
return nil
73
70
}
74
-
}
75
-
76
-
func ingestPipeline(l *slog.Logger, d *db.DB, source ec.Source, msg ec.Message) error {
77
-
var record tangled.Pipeline
78
-
err := json.Unmarshal(msg.EventJson, &record)
79
-
if err != nil {
80
-
return err
81
-
}
82
-
83
-
if record.TriggerMetadata == nil {
84
-
return fmt.Errorf("empty trigger metadata: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
85
-
}
86
-
87
-
if record.TriggerMetadata.Repo == nil {
88
-
return fmt.Errorf("empty repo: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
89
-
}
90
-
91
-
// does this repo have a spindle configured?
92
-
repos, err := db.GetRepos(
93
-
d,
94
-
0,
95
-
orm.FilterEq("did", record.TriggerMetadata.Repo.Did),
96
-
orm.FilterEq("name", record.TriggerMetadata.Repo.Repo),
97
-
)
98
-
if err != nil {
99
-
return fmt.Errorf("failed to look for repo in DB: nsid %s, rkey %s, %w", msg.Nsid, msg.Rkey, err)
100
-
}
101
-
if len(repos) != 1 {
102
-
return fmt.Errorf("incorrect number of repos returned: %d (expected 1)", len(repos))
103
-
}
104
-
if repos[0].Spindle == "" {
105
-
return fmt.Errorf("repo does not have a spindle configured yet: nsid %s, rkey %s", msg.Nsid, msg.Rkey)
106
-
}
107
-
108
-
// trigger info
109
-
var trigger models.Trigger
110
-
var sha string
111
-
trigger.Kind = workflow.TriggerKind(record.TriggerMetadata.Kind)
112
-
switch trigger.Kind {
113
-
case workflow.TriggerKindPush:
114
-
trigger.PushRef = &record.TriggerMetadata.Push.Ref
115
-
trigger.PushNewSha = &record.TriggerMetadata.Push.NewSha
116
-
trigger.PushOldSha = &record.TriggerMetadata.Push.OldSha
117
-
sha = *trigger.PushNewSha
118
-
case workflow.TriggerKindPullRequest:
119
-
trigger.PRSourceBranch = &record.TriggerMetadata.PullRequest.SourceBranch
120
-
trigger.PRTargetBranch = &record.TriggerMetadata.PullRequest.TargetBranch
121
-
trigger.PRSourceSha = &record.TriggerMetadata.PullRequest.SourceSha
122
-
trigger.PRAction = &record.TriggerMetadata.PullRequest.Action
123
-
sha = *trigger.PRSourceSha
124
-
}
125
-
126
-
tx, err := d.Begin()
127
-
if err != nil {
128
-
return fmt.Errorf("failed to start txn: %w", err)
129
-
}
130
-
131
-
triggerId, err := db.AddTrigger(tx, trigger)
132
-
if err != nil {
133
-
return fmt.Errorf("failed to add trigger entry: %w", err)
134
-
}
135
-
136
-
// TODO: we shouldn't even use knot to identify pipelines
137
-
knot := record.TriggerMetadata.Repo.Knot
138
-
pipeline := models.Pipeline{
139
-
Rkey: msg.Rkey,
140
-
Knot: knot,
141
-
RepoOwner: syntax.DID(record.TriggerMetadata.Repo.Did),
142
-
RepoName: record.TriggerMetadata.Repo.Repo,
143
-
TriggerId: int(triggerId),
144
-
Sha: sha,
145
-
}
146
-
147
-
err = db.AddPipeline(tx, pipeline)
148
-
if err != nil {
149
-
return fmt.Errorf("failed to add pipeline: %w", err)
150
-
}
151
-
152
-
err = tx.Commit()
153
-
if err != nil {
154
-
return fmt.Errorf("failed to commit txn: %w", err)
155
-
}
156
-
157
-
l.Info("added pipeline", "pipeline", pipeline)
158
-
159
-
return nil
160
71
}
161
72
162
73
func ingestPipelineStatus(ctx context.Context, logger *slog.Logger, d *db.DB, source ec.Source, msg ec.Message) error {
+2
-31
flake.nix
+2
-31
flake.nix
···
94
94
spindle = self.callPackage ./nix/pkgs/spindle.nix {};
95
95
knot-unwrapped = self.callPackage ./nix/pkgs/knot-unwrapped.nix {};
96
96
knot = self.callPackage ./nix/pkgs/knot.nix {};
97
-
did-method-plc = self.callPackage ./nix/pkgs/did-method-plc.nix {};
98
-
bluesky-jetstream = self.callPackage ./nix/pkgs/bluesky-jetstream.nix {};
99
-
bluesky-relay = self.callPackage ./nix/pkgs/bluesky-relay.nix {};
100
-
tap = self.callPackage ./nix/pkgs/tap.nix {};
101
97
});
102
98
in {
103
99
overlays.default = final: prev: {
104
-
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs did-method-plc bluesky-jetstream bluesky-relay tap;
100
+
inherit (mkPackageSet final) lexgen goat sqlite-lib spindle knot-unwrapped knot appview docs;
105
101
};
106
102
107
103
packages = forAllSystems (system: let
···
110
106
staticPackages = mkPackageSet pkgs.pkgsStatic;
111
107
crossPackages = mkPackageSet pkgs.pkgsCross.gnu64.pkgsStatic;
112
108
in {
113
-
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs did-method-plc bluesky-jetstream bluesky-relay tap;
109
+
inherit (packages) appview appview-static-files lexgen goat spindle knot knot-unwrapped sqlite-lib docs;
114
110
115
111
pkgsStatic-appview = staticPackages.appview;
116
112
pkgsStatic-knot = staticPackages.knot;
···
309
305
imports = [./nix/modules/spindle.nix];
310
306
311
307
services.tangled.spindle.package = lib.mkDefault self.packages.${pkgs.stdenv.hostPlatform.system}.spindle;
312
-
services.tangled.spindle.tap-package = lib.mkDefault self.packages.${pkgs.system}.tap;
313
-
};
314
-
nixosModules.did-method-plc = {
315
-
lib,
316
-
pkgs,
317
-
...
318
-
}: {
319
-
imports = [./nix/modules/did-method-plc.nix];
320
-
services.did-method-plc.package = lib.mkDefault self.packages.${pkgs.system}.did-method-plc;
321
-
};
322
-
nixosModules.bluesky-relay = {
323
-
lib,
324
-
pkgs,
325
-
...
326
-
}: {
327
-
imports = [./nix/modules/bluesky-relay.nix];
328
-
services.bluesky-relay.package = lib.mkDefault self.packages.${pkgs.system}.bluesky-relay;
329
-
};
330
-
nixosModules.bluesky-jetstream = {
331
-
lib,
332
-
pkgs,
333
-
...
334
-
}: {
335
-
imports = [./nix/modules/bluesky-jetstream.nix];
336
-
services.bluesky-jetstream.package = lib.mkDefault self.packages.${pkgs.system}.bluesky-jetstream;
337
308
};
338
309
};
339
310
}
-1
go.mod
-1
go.mod
···
29
29
github.com/gorilla/feeds v1.2.0
30
30
github.com/gorilla/sessions v1.4.0
31
31
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
32
-
github.com/hashicorp/go-version v1.8.0
33
32
github.com/hiddeco/sshsig v0.2.0
34
33
github.com/hpcloud/tail v1.0.0
35
34
github.com/ipfs/go-cid v0.5.0
-2
go.sum
-2
go.sum
···
264
264
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
265
265
github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=
266
266
github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=
267
-
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
268
-
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
269
267
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
270
268
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
271
269
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+136
knotserver/ingester.go
+136
knotserver/ingester.go
···
7
7
"io"
8
8
"net/http"
9
9
"net/url"
10
+
"path/filepath"
10
11
"strings"
11
12
12
13
comatproto "github.com/bluesky-social/indigo/api/atproto"
···
16
17
securejoin "github.com/cyphar/filepath-securejoin"
17
18
"tangled.org/core/api/tangled"
18
19
"tangled.org/core/knotserver/db"
20
+
"tangled.org/core/knotserver/git"
19
21
"tangled.org/core/log"
20
22
"tangled.org/core/rbac"
23
+
"tangled.org/core/workflow"
21
24
)
22
25
23
26
func (h *Knot) processPublicKey(ctx context.Context, event *models.Event) error {
···
82
85
return nil
83
86
}
84
87
88
+
func (h *Knot) processPull(ctx context.Context, event *models.Event) error {
89
+
raw := json.RawMessage(event.Commit.Record)
90
+
did := event.Did
91
+
92
+
var record tangled.RepoPull
93
+
if err := json.Unmarshal(raw, &record); err != nil {
94
+
return fmt.Errorf("failed to unmarshal record: %w", err)
95
+
}
96
+
97
+
l := log.FromContext(ctx)
98
+
l = l.With("handler", "processPull")
99
+
l = l.With("did", did)
100
+
101
+
if record.Target == nil {
102
+
return fmt.Errorf("ignoring pull record: target repo is nil")
103
+
}
104
+
105
+
l = l.With("target_repo", record.Target.Repo)
106
+
l = l.With("target_branch", record.Target.Branch)
107
+
108
+
if record.Source == nil {
109
+
return fmt.Errorf("ignoring pull record: not a branch-based pull request")
110
+
}
111
+
112
+
if record.Source.Repo != nil {
113
+
return fmt.Errorf("ignoring pull record: fork based pull")
114
+
}
115
+
116
+
repoAt, err := syntax.ParseATURI(record.Target.Repo)
117
+
if err != nil {
118
+
return fmt.Errorf("failed to parse ATURI: %w", err)
119
+
}
120
+
121
+
// resolve this aturi to extract the repo record
122
+
ident, err := h.resolver.ResolveIdent(ctx, repoAt.Authority().String())
123
+
if err != nil || ident.Handle.IsInvalidHandle() {
124
+
return fmt.Errorf("failed to resolve handle: %w", err)
125
+
}
126
+
127
+
xrpcc := xrpc.Client{
128
+
Host: ident.PDSEndpoint(),
129
+
}
130
+
131
+
resp, err := comatproto.RepoGetRecord(ctx, &xrpcc, "", tangled.RepoNSID, repoAt.Authority().String(), repoAt.RecordKey().String())
132
+
if err != nil {
133
+
return fmt.Errorf("failed to resolver repo: %w", err)
134
+
}
135
+
136
+
repo := resp.Value.Val.(*tangled.Repo)
137
+
138
+
if repo.Knot != h.c.Server.Hostname {
139
+
return fmt.Errorf("rejected pull record: not this knot, %s != %s", repo.Knot, h.c.Server.Hostname)
140
+
}
141
+
142
+
didSlashRepo, err := securejoin.SecureJoin(ident.DID.String(), repo.Name)
143
+
if err != nil {
144
+
return fmt.Errorf("failed to construct relative repo path: %w", err)
145
+
}
146
+
147
+
repoPath, err := securejoin.SecureJoin(h.c.Repo.ScanPath, didSlashRepo)
148
+
if err != nil {
149
+
return fmt.Errorf("failed to construct absolute repo path: %w", err)
150
+
}
151
+
152
+
gr, err := git.Open(repoPath, record.Source.Sha)
153
+
if err != nil {
154
+
return fmt.Errorf("failed to open git repository: %w", err)
155
+
}
156
+
157
+
workflowDir, err := gr.FileTree(ctx, workflow.WorkflowDir)
158
+
if err != nil {
159
+
return fmt.Errorf("failed to open workflow directory: %w", err)
160
+
}
161
+
162
+
var pipeline workflow.RawPipeline
163
+
for _, e := range workflowDir {
164
+
if !e.IsFile() {
165
+
continue
166
+
}
167
+
168
+
fpath := filepath.Join(workflow.WorkflowDir, e.Name)
169
+
contents, err := gr.RawContent(fpath)
170
+
if err != nil {
171
+
continue
172
+
}
173
+
174
+
pipeline = append(pipeline, workflow.RawWorkflow{
175
+
Name: e.Name,
176
+
Contents: contents,
177
+
})
178
+
}
179
+
180
+
trigger := tangled.Pipeline_PullRequestTriggerData{
181
+
Action: "create",
182
+
SourceBranch: record.Source.Branch,
183
+
SourceSha: record.Source.Sha,
184
+
TargetBranch: record.Target.Branch,
185
+
}
186
+
187
+
compiler := workflow.Compiler{
188
+
Trigger: tangled.Pipeline_TriggerMetadata{
189
+
Kind: string(workflow.TriggerKindPullRequest),
190
+
PullRequest: &trigger,
191
+
Repo: &tangled.Pipeline_TriggerRepo{
192
+
Did: ident.DID.String(),
193
+
Knot: repo.Knot,
194
+
Repo: repo.Name,
195
+
},
196
+
},
197
+
}
198
+
199
+
cp := compiler.Compile(compiler.Parse(pipeline))
200
+
eventJson, err := json.Marshal(cp)
201
+
if err != nil {
202
+
return fmt.Errorf("failed to marshal pipeline event: %w", err)
203
+
}
204
+
205
+
// do not run empty pipelines
206
+
if cp.Workflows == nil {
207
+
return nil
208
+
}
209
+
210
+
ev := db.Event{
211
+
Rkey: TID(),
212
+
Nsid: tangled.PipelineNSID,
213
+
EventJson: string(eventJson),
214
+
}
215
+
216
+
return h.db.InsertEvent(ev, h.n)
217
+
}
218
+
85
219
// duplicated from add collaborator
86
220
func (h *Knot) processCollaborator(ctx context.Context, event *models.Event) error {
87
221
raw := json.RawMessage(event.Commit.Record)
···
204
338
err = h.processPublicKey(ctx, event)
205
339
case tangled.KnotMemberNSID:
206
340
err = h.processKnotMember(ctx, event)
341
+
case tangled.RepoPullNSID:
342
+
err = h.processPull(ctx, event)
207
343
case tangled.RepoCollaboratorNSID:
208
344
err = h.processCollaborator(ctx, event)
209
345
}
+109
-1
knotserver/internal.go
+109
-1
knotserver/internal.go
···
23
23
"tangled.org/core/log"
24
24
"tangled.org/core/notifier"
25
25
"tangled.org/core/rbac"
26
+
"tangled.org/core/workflow"
26
27
)
27
28
28
29
type InternalHandle struct {
···
175
176
}
176
177
177
178
for _, line := range lines {
178
-
// TODO: pass pushOptions to refUpdate
179
179
err := h.insertRefUpdate(line, gitUserDid, repoDid, repoName)
180
180
if err != nil {
181
181
l.Error("failed to insert op", "err", err, "line", line, "did", gitUserDid, "repo", gitRelativeDir)
···
185
185
err = h.emitCompareLink(&resp.Messages, line, repoDid, repoName)
186
186
if err != nil {
187
187
l.Error("failed to reply with compare link", "err", err, "line", line, "did", gitUserDid, "repo", gitRelativeDir)
188
+
// non-fatal
189
+
}
190
+
191
+
err = h.triggerPipeline(&resp.Messages, line, gitUserDid, repoDid, repoName, pushOptions)
192
+
if err != nil {
193
+
l.Error("failed to trigger pipeline", "err", err, "line", line, "did", gitUserDid, "repo", gitRelativeDir)
188
194
// non-fatal
189
195
}
190
196
}
···
235
241
}
236
242
237
243
return errors.Join(errs, h.db.InsertEvent(event, h.n))
244
+
}
245
+
246
+
func (h *InternalHandle) triggerPipeline(
247
+
clientMsgs *[]string,
248
+
line git.PostReceiveLine,
249
+
gitUserDid string,
250
+
repoDid string,
251
+
repoName string,
252
+
pushOptions PushOptions,
253
+
) error {
254
+
if pushOptions.skipCi {
255
+
return nil
256
+
}
257
+
258
+
didSlashRepo, err := securejoin.SecureJoin(repoDid, repoName)
259
+
if err != nil {
260
+
return err
261
+
}
262
+
263
+
repoPath, err := securejoin.SecureJoin(h.c.Repo.ScanPath, didSlashRepo)
264
+
if err != nil {
265
+
return err
266
+
}
267
+
268
+
gr, err := git.Open(repoPath, line.Ref)
269
+
if err != nil {
270
+
return err
271
+
}
272
+
273
+
workflowDir, err := gr.FileTree(context.Background(), workflow.WorkflowDir)
274
+
if err != nil {
275
+
return err
276
+
}
277
+
278
+
var pipeline workflow.RawPipeline
279
+
for _, e := range workflowDir {
280
+
if !e.IsFile() {
281
+
continue
282
+
}
283
+
284
+
fpath := filepath.Join(workflow.WorkflowDir, e.Name)
285
+
contents, err := gr.RawContent(fpath)
286
+
if err != nil {
287
+
continue
288
+
}
289
+
290
+
pipeline = append(pipeline, workflow.RawWorkflow{
291
+
Name: e.Name,
292
+
Contents: contents,
293
+
})
294
+
}
295
+
296
+
trigger := tangled.Pipeline_PushTriggerData{
297
+
Ref: line.Ref,
298
+
OldSha: line.OldSha.String(),
299
+
NewSha: line.NewSha.String(),
300
+
}
301
+
302
+
compiler := workflow.Compiler{
303
+
Trigger: tangled.Pipeline_TriggerMetadata{
304
+
Kind: string(workflow.TriggerKindPush),
305
+
Push: &trigger,
306
+
Repo: &tangled.Pipeline_TriggerRepo{
307
+
Did: repoDid,
308
+
Knot: h.c.Server.Hostname,
309
+
Repo: repoName,
310
+
},
311
+
},
312
+
}
313
+
314
+
cp := compiler.Compile(compiler.Parse(pipeline))
315
+
eventJson, err := json.Marshal(cp)
316
+
if err != nil {
317
+
return err
318
+
}
319
+
320
+
for _, e := range compiler.Diagnostics.Errors {
321
+
*clientMsgs = append(*clientMsgs, e.String())
322
+
}
323
+
324
+
if pushOptions.verboseCi {
325
+
if compiler.Diagnostics.IsEmpty() {
326
+
*clientMsgs = append(*clientMsgs, "success: pipeline compiled with no diagnostics")
327
+
}
328
+
329
+
for _, w := range compiler.Diagnostics.Warnings {
330
+
*clientMsgs = append(*clientMsgs, w.String())
331
+
}
332
+
}
333
+
334
+
// do not run empty pipelines
335
+
if cp.Workflows == nil {
336
+
return nil
337
+
}
338
+
339
+
event := db.Event{
340
+
Rkey: TID(),
341
+
Nsid: tangled.PipelineNSID,
342
+
EventJson: string(eventJson),
343
+
}
344
+
345
+
return h.db.InsertEvent(event, h.n)
238
346
}
239
347
240
348
func (h *InternalHandle) emitCompareLink(
+1
knotserver/server.go
+1
knotserver/server.go
···
79
79
jc, err := jetstream.NewJetstreamClient(c.Server.JetstreamEndpoint, "knotserver", []string{
80
80
tangled.PublicKeyNSID,
81
81
tangled.KnotMemberNSID,
82
+
tangled.RepoPullNSID,
82
83
tangled.RepoCollaboratorNSID,
83
84
}, nil, log.SubLogger(logger, "jetstream"), db, true, c.Server.LogDids)
84
85
if err != nil {
-33
lexicons/pipeline/cancelPipeline.json
-33
lexicons/pipeline/cancelPipeline.json
···
1
-
{
2
-
"lexicon": 1,
3
-
"id": "sh.tangled.pipeline.cancelPipeline",
4
-
"defs": {
5
-
"main": {
6
-
"type": "procedure",
7
-
"description": "Cancel a running pipeline",
8
-
"input": {
9
-
"encoding": "application/json",
10
-
"schema": {
11
-
"type": "object",
12
-
"required": ["repo", "pipeline", "workflow"],
13
-
"properties": {
14
-
"repo": {
15
-
"type": "string",
16
-
"format": "at-uri",
17
-
"description": "repo at-uri, spindle can't resolve repo from pipeline at-uri yet"
18
-
},
19
-
"pipeline": {
20
-
"type": "string",
21
-
"format": "at-uri",
22
-
"description": "pipeline at-uri"
23
-
},
24
-
"workflow": {
25
-
"type": "string",
26
-
"description": "workflow name"
27
-
}
28
-
}
29
-
}
30
-
}
31
-
}
32
-
}
33
-
}
+8
-2
lexicons/pulls/pull.json
+8
-2
lexicons/pulls/pull.json
···
12
12
"required": [
13
13
"target",
14
14
"title",
15
-
"patch",
15
+
"patchBlob",
16
16
"createdAt"
17
17
],
18
18
"properties": {
···
27
27
"type": "string"
28
28
},
29
29
"patch": {
30
-
"type": "string"
30
+
"type": "string",
31
+
"description": "(deprecated) use patchBlob instead"
32
+
},
33
+
"patchBlob": {
34
+
"type": "blob",
35
+
"accept": "text/x-patch",
36
+
"description": "patch content"
31
37
},
32
38
"source": {
33
39
"type": "ref",
-3
nix/gomod2nix.toml
-3
nix/gomod2nix.toml
···
304
304
[mod."github.com/hashicorp/go-sockaddr"]
305
305
version = "v1.0.7"
306
306
hash = "sha256-p6eDOrGzN1jMmT/F/f/VJMq0cKNFhUcEuVVwTE6vSrs="
307
-
[mod."github.com/hashicorp/go-version"]
308
-
version = "v1.8.0"
309
-
hash = "sha256-KXtqERmYrWdpqPCViWcHbe6jnuH7k16bvBIcuJuevj8="
310
307
[mod."github.com/hashicorp/golang-lru"]
311
308
version = "v1.0.2"
312
309
hash = "sha256-yy+5botc6T5wXgOe2mfNXJP3wr+MkVlUZ2JBkmmrA48="
-64
nix/modules/bluesky-jetstream.nix
-64
nix/modules/bluesky-jetstream.nix
···
1
-
{
2
-
config,
3
-
pkgs,
4
-
lib,
5
-
...
6
-
}: let
7
-
cfg = config.services.bluesky-jetstream;
8
-
in
9
-
with lib; {
10
-
options.services.bluesky-jetstream = {
11
-
enable = mkEnableOption "jetstream server";
12
-
package = mkPackageOption pkgs "bluesky-jetstream" {};
13
-
14
-
# dataDir = mkOption {
15
-
# type = types.str;
16
-
# default = "/var/lib/jetstream";
17
-
# description = "directory to store data (pebbleDB)";
18
-
# };
19
-
livenessTtl = mkOption {
20
-
type = types.int;
21
-
default = 15;
22
-
description = "time to restart when no event detected (seconds)";
23
-
};
24
-
websocketUrl = mkOption {
25
-
type = types.str;
26
-
default = "wss://bsky.network/xrpc/com.atproto.sync.subscribeRepos";
27
-
description = "full websocket path to the ATProto SubscribeRepos XRPC endpoint";
28
-
};
29
-
};
30
-
config = mkIf cfg.enable {
31
-
systemd.services.bluesky-jetstream = {
32
-
description = "bluesky jetstream";
33
-
after = ["network.target" "pds.service"];
34
-
wantedBy = ["multi-user.target"];
35
-
36
-
serviceConfig = {
37
-
User = "jetstream";
38
-
Group = "jetstream";
39
-
StateDirectory = "jetstream";
40
-
StateDirectoryMode = "0755";
41
-
# preStart = ''
42
-
# mkdir -p "${cfg.dataDir}"
43
-
# chown -R jetstream:jetstream "${cfg.dataDir}"
44
-
# '';
45
-
# WorkingDirectory = cfg.dataDir;
46
-
Environment = [
47
-
"JETSTREAM_DATA_DIR=/var/lib/jetstream/data"
48
-
"JETSTREAM_LIVENESS_TTL=${toString cfg.livenessTtl}s"
49
-
"JETSTREAM_WS_URL=${cfg.websocketUrl}"
50
-
];
51
-
ExecStart = getExe cfg.package;
52
-
Restart = "always";
53
-
RestartSec = 5;
54
-
};
55
-
};
56
-
users = {
57
-
users.jetstream = {
58
-
group = "jetstream";
59
-
isSystemUser = true;
60
-
};
61
-
groups.jetstream = {};
62
-
};
63
-
};
64
-
}
-48
nix/modules/bluesky-relay.nix
-48
nix/modules/bluesky-relay.nix
···
1
-
{
2
-
config,
3
-
pkgs,
4
-
lib,
5
-
...
6
-
}: let
7
-
cfg = config.services.bluesky-relay;
8
-
in
9
-
with lib; {
10
-
options.services.bluesky-relay = {
11
-
enable = mkEnableOption "relay server";
12
-
package = mkPackageOption pkgs "bluesky-relay" {};
13
-
};
14
-
config = mkIf cfg.enable {
15
-
systemd.services.bluesky-relay = {
16
-
description = "bluesky relay";
17
-
after = ["network.target" "pds.service"];
18
-
wantedBy = ["multi-user.target"];
19
-
20
-
serviceConfig = {
21
-
User = "relay";
22
-
Group = "relay";
23
-
StateDirectory = "relay";
24
-
StateDirectoryMode = "0755";
25
-
Environment = [
26
-
"RELAY_ADMIN_PASSWORD=password"
27
-
"RELAY_PLC_HOST=https://plc.tngl.boltless.dev"
28
-
"DATABASE_URL=sqlite:///var/lib/relay/relay.sqlite"
29
-
"RELAY_IP_BIND=:2470"
30
-
"RELAY_PERSIST_DIR=/var/lib/relay"
31
-
"RELAY_DISABLE_REQUEST_CRAWL=0"
32
-
"RELAY_INITIAL_SEQ_NUMBER=1"
33
-
"RELAY_ALLOW_INSECURE_HOSTS=1"
34
-
];
35
-
ExecStart = "${getExe cfg.package} serve";
36
-
Restart = "always";
37
-
RestartSec = 5;
38
-
};
39
-
};
40
-
users = {
41
-
users.relay = {
42
-
group = "relay";
43
-
isSystemUser = true;
44
-
};
45
-
groups.relay = {};
46
-
};
47
-
};
48
-
}
-76
nix/modules/did-method-plc.nix
-76
nix/modules/did-method-plc.nix
···
1
-
{
2
-
config,
3
-
pkgs,
4
-
lib,
5
-
...
6
-
}: let
7
-
cfg = config.services.did-method-plc;
8
-
in
9
-
with lib; {
10
-
options.services.did-method-plc = {
11
-
enable = mkEnableOption "did-method-plc server";
12
-
package = mkPackageOption pkgs "did-method-plc" {};
13
-
};
14
-
config = mkIf cfg.enable {
15
-
services.postgresql = {
16
-
enable = true;
17
-
package = pkgs.postgresql_14;
18
-
ensureDatabases = ["plc"];
19
-
ensureUsers = [
20
-
{
21
-
name = "pg";
22
-
# ensurePermissions."DATABASE plc" = "ALL PRIVILEGES";
23
-
}
24
-
];
25
-
authentication = ''
26
-
local all all trust
27
-
host all all 127.0.0.1/32 trust
28
-
'';
29
-
};
30
-
systemd.services.did-method-plc = {
31
-
description = "did-method-plc";
32
-
33
-
after = ["postgresql.service"];
34
-
wants = ["postgresql.service"];
35
-
wantedBy = ["multi-user.target"];
36
-
37
-
environment = let
38
-
db_creds_json = builtins.toJSON {
39
-
username = "pg";
40
-
password = "";
41
-
host = "127.0.0.1";
42
-
port = 5432;
43
-
};
44
-
in {
45
-
# TODO: inherit from config
46
-
DEBUG_MODE = "1";
47
-
LOG_ENABLED = "true";
48
-
LOG_LEVEL = "debug";
49
-
LOG_DESTINATION = "1";
50
-
ENABLE_MIGRATIONS = "true";
51
-
DB_CREDS_JSON = db_creds_json;
52
-
DB_MIGRATE_CREDS_JSON = db_creds_json;
53
-
PLC_VERSION = "0.0.1";
54
-
PORT = "8080";
55
-
};
56
-
57
-
serviceConfig = {
58
-
ExecStart = getExe cfg.package;
59
-
User = "plc";
60
-
Group = "plc";
61
-
StateDirectory = "plc";
62
-
StateDirectoryMode = "0755";
63
-
Restart = "always";
64
-
65
-
# Hardening
66
-
};
67
-
};
68
-
users = {
69
-
users.plc = {
70
-
group = "plc";
71
-
isSystemUser = true;
72
-
};
73
-
groups.plc = {};
74
-
};
75
-
};
76
-
}
+12
-46
nix/modules/spindle.nix
+12
-46
nix/modules/spindle.nix
···
1
1
{
2
2
config,
3
-
pkgs,
4
3
lib,
5
4
...
6
5
}: let
···
18
17
type = types.package;
19
18
description = "Package to use for the spindle";
20
19
};
21
-
tap-package = mkOption {
22
-
type = types.package;
23
-
description = "Package to use for the spindle";
24
-
};
25
-
26
-
atpRelayUrl = mkOption {
27
-
type = types.str;
28
-
default = "https://relay1.us-east.bsky.network";
29
-
description = "atproto relay";
30
-
};
31
20
32
21
server = {
33
22
listenAddr = mkOption {
···
36
25
description = "Address to listen on";
37
26
};
38
27
39
-
stateDir = mkOption {
28
+
dbPath = mkOption {
40
29
type = types.path;
41
-
default = "/var/lib/spindle";
42
-
description = "Tangled spindle data directory";
30
+
default = "/var/lib/spindle/spindle.db";
31
+
description = "Path to the database file";
43
32
};
44
33
45
34
hostname = mkOption {
···
52
41
type = types.str;
53
42
default = "https://plc.directory";
54
43
description = "atproto PLC directory";
44
+
};
45
+
46
+
jetstreamEndpoint = mkOption {
47
+
type = types.str;
48
+
default = "wss://jetstream1.us-west.bsky.network/subscribe";
49
+
description = "Jetstream endpoint to subscribe to";
55
50
};
56
51
57
52
dev = mkOption {
···
119
114
config = mkIf cfg.enable {
120
115
virtualisation.docker.enable = true;
121
116
122
-
systemd.services.spindle-tap = {
123
-
description = "spindle tap service";
124
-
after = ["network.target" "docker.service"];
125
-
wantedBy = ["multi-user.target"];
126
-
serviceConfig = {
127
-
LogsDirectory = "spindle-tap";
128
-
StateDirectory = "spindle-tap";
129
-
Environment = [
130
-
"TAP_BIND=:2480"
131
-
"TAP_PLC_URL=${cfg.server.plcUrl}"
132
-
"TAP_RELAY_URL=${cfg.atpRelayUrl}"
133
-
"TAP_DATABASE_URL=sqlite:///var/lib/spindle-tap/tap.db"
134
-
"TAP_RETRY_TIMEOUT=3s"
135
-
"TAP_COLLECTION_FILTERS=${concatStringsSep "," [
136
-
"sh.tangled.repo"
137
-
"sh.tangled.repo.collaborator"
138
-
"sh.tangled.spindle.member"
139
-
"sh.tangled.repo.pull"
140
-
]}"
141
-
# temporary hack to listen for repo.pull from non-tangled users
142
-
"TAP_SIGNAL_COLLECTION=sh.tangled.repo.pull"
143
-
];
144
-
ExecStart = "${getExe cfg.tap-package} run";
145
-
};
146
-
};
147
-
148
117
systemd.services.spindle = {
149
118
description = "spindle service";
150
-
after = ["network.target" "docker.service" "spindle-tap.service"];
119
+
after = ["network.target" "docker.service"];
151
120
wantedBy = ["multi-user.target"];
152
-
path = [
153
-
pkgs.git
154
-
];
155
121
serviceConfig = {
156
122
LogsDirectory = "spindle";
157
123
StateDirectory = "spindle";
158
124
Environment = [
159
125
"SPINDLE_SERVER_LISTEN_ADDR=${cfg.server.listenAddr}"
160
-
"SPINDLE_SERVER_DATA_DIR=${cfg.server.stateDir}"
126
+
"SPINDLE_SERVER_DB_PATH=${cfg.server.dbPath}"
161
127
"SPINDLE_SERVER_HOSTNAME=${cfg.server.hostname}"
162
128
"SPINDLE_SERVER_PLC_URL=${cfg.server.plcUrl}"
129
+
"SPINDLE_SERVER_JETSTREAM_ENDPOINT=${cfg.server.jetstreamEndpoint}"
163
130
"SPINDLE_SERVER_DEV=${lib.boolToString cfg.server.dev}"
164
131
"SPINDLE_SERVER_OWNER=${cfg.server.owner}"
165
132
"SPINDLE_SERVER_MAX_JOB_COUNT=${toString cfg.server.maxJobCount}"
···
167
134
"SPINDLE_SERVER_SECRETS_PROVIDER=${cfg.server.secrets.provider}"
168
135
"SPINDLE_SERVER_SECRETS_OPENBAO_PROXY_ADDR=${cfg.server.secrets.openbao.proxyAddr}"
169
136
"SPINDLE_SERVER_SECRETS_OPENBAO_MOUNT=${cfg.server.secrets.openbao.mount}"
170
-
"SPINDLE_SERVER_TAP_URL=http://localhost:2480"
171
137
"SPINDLE_NIXERY_PIPELINES_NIXERY=${cfg.pipelines.nixery}"
172
138
"SPINDLE_NIXERY_PIPELINES_WORKFLOW_TIMEOUT=${cfg.pipelines.workflowTimeout}"
173
139
];
-20
nix/pkgs/bluesky-jetstream.nix
-20
nix/pkgs/bluesky-jetstream.nix
···
1
-
{
2
-
buildGoModule,
3
-
fetchFromGitHub,
4
-
}:
5
-
buildGoModule {
6
-
pname = "bluesky-jetstream";
7
-
version = "0.1.0";
8
-
src = fetchFromGitHub {
9
-
owner = "bluesky-social";
10
-
repo = "jetstream";
11
-
rev = "7d7efa58d7f14101a80ccc4f1085953948b7d5de";
12
-
sha256 = "sha256-1e9SL/8gaDPMA4YZed51ffzgpkptbMd0VTbTTDbPTFw=";
13
-
};
14
-
subPackages = ["cmd/jetstream"];
15
-
vendorHash = "sha256-/21XJQH6fo9uPzlABUAbdBwt1O90odmppH6gXu2wkiQ=";
16
-
doCheck = false;
17
-
meta = {
18
-
mainProgram = "jetstream";
19
-
};
20
-
}
-20
nix/pkgs/bluesky-relay.nix
-20
nix/pkgs/bluesky-relay.nix
···
1
-
{
2
-
buildGoModule,
3
-
fetchFromGitHub,
4
-
}:
5
-
buildGoModule {
6
-
pname = "bluesky-relay";
7
-
version = "0.1.0";
8
-
src = fetchFromGitHub {
9
-
owner = "boltlessengineer";
10
-
repo = "indigo";
11
-
rev = "7fe70a304d795b998f354d2b7b2050b909709c99";
12
-
sha256 = "sha256-+h34x67cqH5t30+8rua53/ucvbn3BanrmH0Og3moHok=";
13
-
};
14
-
subPackages = ["cmd/relay"];
15
-
vendorHash = "sha256-UOedwNYnM8Jx6B7Y9tFcZX8IeUBESAFAPTRYk7n0yo8=";
16
-
doCheck = false;
17
-
meta = {
18
-
mainProgram = "relay";
19
-
};
20
-
}
-65
nix/pkgs/did-method-plc.nix
-65
nix/pkgs/did-method-plc.nix
···
1
-
# inspired by https://github.com/NixOS/nixpkgs/blob/333bfb7c258fab089a834555ea1c435674c459b4/pkgs/by-name/ga/gatsby-cli/package.nix
2
-
{
3
-
lib,
4
-
stdenv,
5
-
fetchFromGitHub,
6
-
fetchYarnDeps,
7
-
yarnConfigHook,
8
-
yarnBuildHook,
9
-
nodejs,
10
-
makeBinaryWrapper,
11
-
}:
12
-
stdenv.mkDerivation (finalAttrs: {
13
-
pname = "did-method-plc";
14
-
version = "0.0.1";
15
-
16
-
src = fetchFromGitHub {
17
-
owner = "did-method-plc";
18
-
repo = "did-method-plc";
19
-
rev = "158ba5535ac3da4fd4309954bde41deab0b45972";
20
-
sha256 = "sha256-O5smubbrnTDMCvL6iRyMXkddr5G7YHxkQRVMRULHanQ=";
21
-
};
22
-
postPatch = ''
23
-
# remove dd-trace dependency
24
-
sed -i '3d' packages/server/service/index.js
25
-
'';
26
-
27
-
yarnOfflineCache = fetchYarnDeps {
28
-
yarnLock = finalAttrs.src + "/yarn.lock";
29
-
hash = "sha256-g8GzaAbWSnWwbQjJMV2DL5/ZlWCCX0sRkjjvX3tqU4Y=";
30
-
};
31
-
32
-
nativeBuildInputs = [
33
-
yarnConfigHook
34
-
yarnBuildHook
35
-
nodejs
36
-
makeBinaryWrapper
37
-
];
38
-
yarnBuildScript = "lerna";
39
-
yarnBuildFlags = [
40
-
"run"
41
-
"build"
42
-
"--scope"
43
-
"@did-plc/server"
44
-
"--include-dependencies"
45
-
];
46
-
47
-
installPhase = ''
48
-
runHook preInstall
49
-
50
-
mkdir -p $out/lib/node_modules/
51
-
mv packages/ $out/lib/packages/
52
-
mv node_modules/* $out/lib/node_modules/
53
-
54
-
makeWrapper ${lib.getExe nodejs} $out/bin/plc \
55
-
--add-flags $out/lib/packages/server/service/index.js \
56
-
--add-flags --enable-source-maps \
57
-
--set NODE_PATH $out/lib/node_modules
58
-
59
-
runHook postInstall
60
-
'';
61
-
62
-
meta = {
63
-
mainProgram = "plc";
64
-
};
65
-
})
-20
nix/pkgs/tap.nix
-20
nix/pkgs/tap.nix
···
1
-
{
2
-
buildGoModule,
3
-
fetchFromGitHub,
4
-
}:
5
-
buildGoModule {
6
-
pname = "tap";
7
-
version = "0.1.0";
8
-
src = fetchFromGitHub {
9
-
owner = "bluesky-social";
10
-
repo = "indigo";
11
-
rev = "498ecb9693e8ae050f73234c86f340f51ad896a9";
12
-
sha256 = "sha256-KASCdwkg/hlKBt7RTW3e3R5J3hqJkphoarFbaMgtN1k=";
13
-
};
14
-
subPackages = ["cmd/tap"];
15
-
vendorHash = "sha256-UOedwNYnM8Jx6B7Y9tFcZX8IeUBESAFAPTRYk7n0yo8=";
16
-
doCheck = false;
17
-
meta = {
18
-
mainProgram = "tap";
19
-
};
20
-
}
+2
-8
nix/vm.nix
+2
-8
nix/vm.nix
···
19
19
20
20
plcUrl = envVarOr "TANGLED_VM_PLC_URL" "https://plc.directory";
21
21
jetstream = envVarOr "TANGLED_VM_JETSTREAM_ENDPOINT" "wss://jetstream1.us-west.bsky.network/subscribe";
22
-
relayUrl = envVarOr "TANGLED_VM_RELAY_URL" "https://relay1.us-east.bsky.network";
23
22
in
24
23
nixpkgs.lib.nixosSystem {
25
24
inherit system;
···
58
57
host.port = 6555;
59
58
guest.port = 6555;
60
59
}
61
-
{
62
-
from = "host";
63
-
host.port = 6556;
64
-
guest.port = 2480;
65
-
}
66
60
];
67
61
sharedDirectories = {
68
62
# We can't use the 9p mounts directly for most of these
···
101
95
};
102
96
services.tangled.spindle = {
103
97
enable = true;
104
-
atpRelayUrl = relayUrl;
105
98
server = {
106
99
owner = envVar "TANGLED_VM_SPINDLE_OWNER";
107
100
hostname = envVarOr "TANGLED_VM_SPINDLE_HOST" "localhost:6555";
108
101
plcUrl = plcUrl;
102
+
jetstreamEndpoint = jetstream;
109
103
listenAddr = "0.0.0.0:6555";
110
104
dev = true;
111
105
queueSize = 100;
···
140
134
};
141
135
in {
142
136
knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled.knot.stateDir;
143
-
spindle = mkDataSyncScripts "/mnt/spindle-data" config.services.tangled.spindle.server.stateDir;
137
+
spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled.spindle.server.dbPath);
144
138
};
145
139
})
146
140
];
-10
orm/orm.go
-10
orm/orm.go
···
20
20
}
21
21
defer tx.Rollback()
22
22
23
-
_, err = tx.Exec(`
24
-
create table if not exists migrations (
25
-
id integer primary key autoincrement,
26
-
name text unique
27
-
);
28
-
`)
29
-
if err != nil {
30
-
return fmt.Errorf("creating migrations table: %w", err)
31
-
}
32
-
33
23
var exists bool
34
24
err = tx.QueryRow("select exists (select 1 from migrations where name = ?)", name).Scan(&exists)
35
25
if err != nil {
-52
rbac2/bytesadapter/adapter.go
-52
rbac2/bytesadapter/adapter.go
···
1
-
package bytesadapter
2
-
3
-
import (
4
-
"bufio"
5
-
"bytes"
6
-
"errors"
7
-
"strings"
8
-
9
-
"github.com/casbin/casbin/v2/model"
10
-
"github.com/casbin/casbin/v2/persist"
11
-
)
12
-
13
-
var (
14
-
errNotImplemented = errors.New("not implemented")
15
-
)
16
-
17
-
type Adapter struct {
18
-
b []byte
19
-
}
20
-
21
-
var _ persist.Adapter = &Adapter{}
22
-
23
-
func NewAdapter(b []byte) *Adapter {
24
-
return &Adapter{b}
25
-
}
26
-
27
-
func (a *Adapter) LoadPolicy(model model.Model) error {
28
-
scanner := bufio.NewScanner(bytes.NewReader(a.b))
29
-
for scanner.Scan() {
30
-
line := strings.TrimSpace(scanner.Text())
31
-
if err := persist.LoadPolicyLine(line, model); err != nil {
32
-
return err
33
-
}
34
-
}
35
-
return scanner.Err()
36
-
}
37
-
38
-
func (a *Adapter) AddPolicy(sec string, ptype string, rule []string) error {
39
-
return errNotImplemented
40
-
}
41
-
42
-
func (a *Adapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error {
43
-
return errNotImplemented
44
-
}
45
-
46
-
func (a *Adapter) RemovePolicy(sec string, ptype string, rule []string) error {
47
-
return errNotImplemented
48
-
}
49
-
50
-
func (a *Adapter) SavePolicy(model model.Model) error {
51
-
return errNotImplemented
52
-
}
-139
rbac2/rbac2.go
-139
rbac2/rbac2.go
···
1
-
package rbac2
2
-
3
-
import (
4
-
"database/sql"
5
-
_ "embed"
6
-
"fmt"
7
-
8
-
adapter "github.com/Blank-Xu/sql-adapter"
9
-
"github.com/bluesky-social/indigo/atproto/syntax"
10
-
"github.com/casbin/casbin/v2"
11
-
"github.com/casbin/casbin/v2/model"
12
-
"github.com/casbin/casbin/v2/util"
13
-
"tangled.org/core/rbac2/bytesadapter"
14
-
)
15
-
16
-
const (
17
-
Model = `
18
-
[request_definition]
19
-
r = sub, dom, obj, act
20
-
21
-
[policy_definition]
22
-
p = sub, dom, obj, act
23
-
24
-
[role_definition]
25
-
g = _, _, _
26
-
27
-
[policy_effect]
28
-
e = some(where (p.eft == allow))
29
-
30
-
[matchers]
31
-
m = g(r.sub, p.sub, r.dom) && keyMatch4(r.dom, p.dom) && r.obj == p.obj && r.act == p.act
32
-
`
33
-
)
34
-
35
-
type Enforcer struct {
36
-
e *casbin.Enforcer
37
-
}
38
-
39
-
//go:embed tangled_policy.csv
40
-
var tangledPolicy []byte
41
-
42
-
func NewEnforcer(path string) (*Enforcer, error) {
43
-
db, err := sql.Open("sqlite3", path+"?_foreign_keys=1")
44
-
if err != nil {
45
-
return nil, err
46
-
}
47
-
return NewEnforcerWithDB(db)
48
-
}
49
-
50
-
func NewEnforcerWithDB(db *sql.DB) (*Enforcer, error) {
51
-
m, err := model.NewModelFromString(Model)
52
-
if err != nil {
53
-
return nil, err
54
-
}
55
-
56
-
a, err := adapter.NewAdapter(db, "sqlite3", "acl")
57
-
if err != nil {
58
-
return nil, err
59
-
}
60
-
61
-
// // PATCH: create unique index to make `AddPoliciesEx` work
62
-
// _, err = db.Exec(fmt.Sprintf(
63
-
// `create unique index if not exists uq_%[1]s on %[1]s (p_type,v0,v1,v2,v3,v4,v5);`,
64
-
// tableName,
65
-
// ))
66
-
// if err != nil {
67
-
// return nil, err
68
-
// }
69
-
70
-
e, _ := casbin.NewEnforcer() // NewEnforcer() without param won't return error
71
-
// e.EnableLog(true)
72
-
73
-
// NOTE: casbin clears the model on init, so we should intialize with temporary adapter first
74
-
// and then override the adapter to sql-adapter.
75
-
// `e.SetModel(m)` after init doesn't work for some reason
76
-
if err := e.InitWithModelAndAdapter(m, bytesadapter.NewAdapter(tangledPolicy)); err != nil {
77
-
return nil, err
78
-
}
79
-
80
-
// load dynamic policy from db
81
-
e.EnableAutoSave(false)
82
-
if err := a.LoadPolicy(e.GetModel()); err != nil {
83
-
return nil, err
84
-
}
85
-
e.AddNamedDomainMatchingFunc("g", "keyMatch4", util.KeyMatch4)
86
-
e.BuildRoleLinks()
87
-
e.SetAdapter(a)
88
-
e.EnableAutoSave(true)
89
-
90
-
return &Enforcer{e}, nil
91
-
}
92
-
93
-
// CaptureModel returns copy of current model. Used for testing
94
-
func (e *Enforcer) CaptureModel() model.Model {
95
-
return e.e.GetModel().Copy()
96
-
}
97
-
98
-
func (e *Enforcer) hasImplicitRoleForUser(name string, role string, domain ...string) (bool, error) {
99
-
roles, err := e.e.GetImplicitRolesForUser(name, domain...)
100
-
if err != nil {
101
-
return false, err
102
-
}
103
-
for _, r := range roles {
104
-
if r == role {
105
-
return true, nil
106
-
}
107
-
}
108
-
return false, nil
109
-
}
110
-
111
-
// setRoleForUser sets single user role for specified domain.
112
-
// All existing users with that role will be removed.
113
-
func (e *Enforcer) setRoleForUser(name string, role string, domain ...string) error {
114
-
currentUsers, err := e.e.GetUsersForRole(role, domain...)
115
-
if err != nil {
116
-
return err
117
-
}
118
-
119
-
for _, oldUser := range currentUsers {
120
-
_, err = e.e.DeleteRoleForUser(oldUser, role, domain...)
121
-
if err != nil {
122
-
return err
123
-
}
124
-
}
125
-
126
-
_, err = e.e.AddRoleForUser(name, role, domain...)
127
-
return err
128
-
}
129
-
130
-
// validateAtUri enforeces AT-URI to have valid did as authority and match collection NSID.
131
-
func validateAtUri(uri syntax.ATURI, expected string) error {
132
-
if !uri.Authority().IsDID() {
133
-
return fmt.Errorf("expected at-uri with did")
134
-
}
135
-
if expected != "" && uri.Collection().String() != expected {
136
-
return fmt.Errorf("incorrect repo at-uri collection nsid '%s' (expected '%s')", uri.Collection(), expected)
137
-
}
138
-
return nil
139
-
}
-150
rbac2/rbac2_test.go
-150
rbac2/rbac2_test.go
···
1
-
package rbac2_test
2
-
3
-
import (
4
-
"database/sql"
5
-
"testing"
6
-
7
-
"github.com/bluesky-social/indigo/atproto/syntax"
8
-
_ "github.com/mattn/go-sqlite3"
9
-
"github.com/stretchr/testify/assert"
10
-
"tangled.org/core/rbac2"
11
-
)
12
-
13
-
func setup(t *testing.T) *rbac2.Enforcer {
14
-
enforcer, err := rbac2.NewEnforcer(":memory:")
15
-
assert.NoError(t, err)
16
-
17
-
return enforcer
18
-
}
19
-
20
-
func TestNewEnforcer(t *testing.T) {
21
-
db, err := sql.Open("sqlite3", "/tmp/test/test.db?_foreign_keys=1")
22
-
assert.NoError(t, err)
23
-
24
-
enforcer1, err := rbac2.NewEnforcerWithDB(db)
25
-
assert.NoError(t, err)
26
-
enforcer1.AddRepo(syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey"))
27
-
model1 := enforcer1.CaptureModel()
28
-
29
-
enforcer2, err := rbac2.NewEnforcerWithDB(db)
30
-
assert.NoError(t, err)
31
-
model2 := enforcer2.CaptureModel()
32
-
33
-
// model1.GetLogger().EnableLog(true)
34
-
// model1.PrintModel()
35
-
// model1.PrintPolicy()
36
-
// model1.GetLogger().EnableLog(false)
37
-
38
-
model2.GetLogger().EnableLog(true)
39
-
model2.PrintModel()
40
-
model2.PrintPolicy()
41
-
model2.GetLogger().EnableLog(false)
42
-
43
-
assert.Equal(t, model1, model2)
44
-
}
45
-
46
-
func TestRepoOwnerPermissions(t *testing.T) {
47
-
var (
48
-
e = setup(t)
49
-
ok bool
50
-
err error
51
-
fooRepo = syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey")
52
-
fooUser = syntax.DID("did:plc:foo")
53
-
)
54
-
55
-
assert.NoError(t, e.AddRepo(fooRepo))
56
-
57
-
ok, err = e.IsRepoOwner(fooUser, fooRepo)
58
-
assert.NoError(t, err)
59
-
assert.True(t, ok, "repo author should be repo owner")
60
-
61
-
ok, err = e.IsRepoWriteAllowed(fooUser, fooRepo)
62
-
assert.NoError(t, err)
63
-
assert.True(t, ok, "repo owner should be able to modify the repo itself")
64
-
65
-
ok, err = e.IsRepoCollaborator(fooUser, fooRepo)
66
-
assert.NoError(t, err)
67
-
assert.True(t, ok, "repo owner should inherit role role:collaborator")
68
-
69
-
ok, err = e.IsRepoSettingsWriteAllowed(fooUser, fooRepo)
70
-
assert.NoError(t, err)
71
-
assert.True(t, ok, "repo owner should inherit collaborator permissions")
72
-
}
73
-
74
-
func TestRepoCollaboratorPermissions(t *testing.T) {
75
-
var (
76
-
e = setup(t)
77
-
ok bool
78
-
err error
79
-
fooRepo = syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey")
80
-
barUser = syntax.DID("did:plc:bar")
81
-
)
82
-
83
-
assert.NoError(t, e.AddRepo(fooRepo))
84
-
assert.NoError(t, e.AddRepoCollaborator(barUser, fooRepo))
85
-
86
-
ok, err = e.IsRepoCollaborator(barUser, fooRepo)
87
-
assert.NoError(t, err)
88
-
assert.True(t, ok, "should set repo collaborator")
89
-
90
-
ok, err = e.IsRepoSettingsWriteAllowed(barUser, fooRepo)
91
-
assert.NoError(t, err)
92
-
assert.True(t, ok, "repo collaborator should be able to edit repo settings")
93
-
94
-
ok, err = e.IsRepoWriteAllowed(barUser, fooRepo)
95
-
assert.NoError(t, err)
96
-
assert.False(t, ok, "repo collaborator shouldn't be able to modify the repo itself")
97
-
}
98
-
99
-
func TestGetByRole(t *testing.T) {
100
-
var (
101
-
e = setup(t)
102
-
err error
103
-
fooRepo = syntax.ATURI("at://did:plc:foo/sh.tangled.repo/reporkey")
104
-
owner = syntax.DID("did:plc:foo")
105
-
collaborator1 = syntax.DID("did:plc:bar")
106
-
collaborator2 = syntax.DID("did:plc:baz")
107
-
)
108
-
109
-
assert.NoError(t, e.AddRepo(fooRepo))
110
-
assert.NoError(t, e.AddRepoCollaborator(collaborator1, fooRepo))
111
-
assert.NoError(t, e.AddRepoCollaborator(collaborator2, fooRepo))
112
-
113
-
collaborators, err := e.GetRepoCollaborators(fooRepo)
114
-
assert.NoError(t, err)
115
-
assert.ElementsMatch(t, []syntax.DID{
116
-
owner,
117
-
collaborator1,
118
-
collaborator2,
119
-
}, collaborators)
120
-
}
121
-
122
-
func TestSpindleOwnerPermissions(t *testing.T) {
123
-
var (
124
-
e = setup(t)
125
-
ok bool
126
-
err error
127
-
spindle = syntax.DID("did:web:spindle.example.com")
128
-
owner = syntax.DID("did:plc:foo")
129
-
member = syntax.DID("did:plc:bar")
130
-
)
131
-
132
-
assert.NoError(t, e.SetSpindleOwner(owner, spindle))
133
-
assert.NoError(t, e.AddSpindleMember(member, spindle))
134
-
135
-
ok, err = e.IsSpindleMember(owner, spindle)
136
-
assert.NoError(t, err)
137
-
assert.True(t, ok, "spindle owner is spindle member")
138
-
139
-
ok, err = e.IsSpindleMember(member, spindle)
140
-
assert.NoError(t, err)
141
-
assert.True(t, ok, "spindle member is spindle member")
142
-
143
-
ok, err = e.IsSpindleMemberInviteAllowed(owner, spindle)
144
-
assert.NoError(t, err)
145
-
assert.True(t, ok, "spindle owner can invite members")
146
-
147
-
ok, err = e.IsSpindleMemberInviteAllowed(member, spindle)
148
-
assert.NoError(t, err)
149
-
assert.False(t, ok, "spindle member cannot invite members")
150
-
}
-91
rbac2/repo.go
-91
rbac2/repo.go
···
1
-
package rbac2
2
-
3
-
import (
4
-
"slices"
5
-
"strings"
6
-
7
-
"github.com/bluesky-social/indigo/atproto/syntax"
8
-
"tangled.org/core/api/tangled"
9
-
)
10
-
11
-
// AddRepo adds new repo with its owner to rbac enforcer
12
-
func (e *Enforcer) AddRepo(repo syntax.ATURI) error {
13
-
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
14
-
return err
15
-
}
16
-
user := repo.Authority()
17
-
18
-
return e.setRoleForUser(user.String(), "repo:owner", repo.String())
19
-
}
20
-
21
-
// DeleteRepo deletes all policies related to the repo
22
-
func (e *Enforcer) DeleteRepo(repo syntax.ATURI) error {
23
-
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
24
-
return err
25
-
}
26
-
27
-
_, err := e.e.DeleteDomains(repo.String())
28
-
return err
29
-
}
30
-
31
-
// AddRepoCollaborator adds new collaborator to the repo
32
-
func (e *Enforcer) AddRepoCollaborator(user syntax.DID, repo syntax.ATURI) error {
33
-
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
34
-
return err
35
-
}
36
-
37
-
_, err := e.e.AddRoleForUser(user.String(), "repo:collaborator", repo.String())
38
-
return err
39
-
}
40
-
41
-
// RemoveRepoCollaborator removes the collaborator from the repo.
42
-
// This won't remove inherited roles like repository owner.
43
-
func (e *Enforcer) RemoveRepoCollaborator(user syntax.DID, repo syntax.ATURI) error {
44
-
if err := validateAtUri(repo, tangled.RepoNSID); err != nil {
45
-
return err
46
-
}
47
-
48
-
_, err := e.e.DeleteRoleForUser(user.String(), "repo:collaborator", repo.String())
49
-
return err
50
-
}
51
-
52
-
func (e *Enforcer) GetRepoCollaborators(repo syntax.ATURI) ([]syntax.DID, error) {
53
-
var collaborators []syntax.DID
54
-
members, err := e.e.GetImplicitUsersForRole("repo:collaborator", repo.String())
55
-
if err != nil {
56
-
return nil, err
57
-
}
58
-
for _, m := range members {
59
-
if !strings.HasPrefix(m, "did:") { // skip non-user subjects like 'repo:owner'
60
-
continue
61
-
}
62
-
collaborators = append(collaborators, syntax.DID(m))
63
-
}
64
-
65
-
slices.Sort(collaborators)
66
-
return slices.Compact(collaborators), nil
67
-
}
68
-
69
-
func (e *Enforcer) IsRepoOwner(user syntax.DID, repo syntax.ATURI) (bool, error) {
70
-
return e.e.HasRoleForUser(user.String(), "repo:owner", repo.String())
71
-
}
72
-
73
-
func (e *Enforcer) IsRepoCollaborator(user syntax.DID, repo syntax.ATURI) (bool, error) {
74
-
return e.hasImplicitRoleForUser(user.String(), "repo:collaborator", repo.String())
75
-
}
76
-
77
-
func (e *Enforcer) IsRepoWriteAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
78
-
return e.e.Enforce(user.String(), repo.String(), "/", "write")
79
-
}
80
-
81
-
func (e *Enforcer) IsRepoSettingsWriteAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
82
-
return e.e.Enforce(user.String(), repo.String(), "/settings", "write")
83
-
}
84
-
85
-
func (e *Enforcer) IsRepoCollaboratorInviteAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
86
-
return e.e.Enforce(user.String(), repo.String(), "/collaborator", "write")
87
-
}
88
-
89
-
func (e *Enforcer) IsRepoGitPushAllowed(user syntax.DID, repo syntax.ATURI) (bool, error) {
90
-
return e.e.Enforce(user.String(), repo.String(), "/git", "write")
91
-
}
-29
rbac2/spindle.go
-29
rbac2/spindle.go
···
1
-
package rbac2
2
-
3
-
import "github.com/bluesky-social/indigo/atproto/syntax"
4
-
5
-
func (e *Enforcer) SetSpindleOwner(user syntax.DID, spindle syntax.DID) error {
6
-
return e.setRoleForUser(user.String(), "server:owner", intoSpindle(spindle))
7
-
}
8
-
9
-
func (e *Enforcer) IsSpindleMember(user syntax.DID, spindle syntax.DID) (bool, error) {
10
-
return e.hasImplicitRoleForUser(user.String(), "server:member", intoSpindle(spindle))
11
-
}
12
-
13
-
func (e *Enforcer) AddSpindleMember(user syntax.DID, spindle syntax.DID) error {
14
-
_, err := e.e.AddRoleForUser(user.String(), "server:member", intoSpindle(spindle))
15
-
return err
16
-
}
17
-
18
-
func (e *Enforcer) RemoveSpindleMember(user syntax.DID, spindle syntax.DID) error {
19
-
_, err := e.e.DeleteRoleForUser(user.String(), "server:member", intoSpindle(spindle))
20
-
return err
21
-
}
22
-
23
-
func (e *Enforcer) IsSpindleMemberInviteAllowed(user syntax.DID, spindle syntax.DID) (bool, error) {
24
-
return e.e.Enforce(user.String(), intoSpindle(spindle), "/member", "write")
25
-
}
26
-
27
-
func intoSpindle(did syntax.DID) string {
28
-
return "/spindle/" + did.String()
29
-
}
-19
rbac2/tangled_policy.csv
-19
rbac2/tangled_policy.csv
···
1
-
#, policies
2
-
#, sub, dom, obj, act
3
-
p, repo:owner, at://{did}/sh.tangled.repo/{rkey}, /, write
4
-
p, repo:owner, at://{did}/sh.tangled.repo/{rkey}, /collaborator, write
5
-
p, repo:collaborator, at://{did}/sh.tangled.repo/{rkey}, /settings, write
6
-
p, repo:collaborator, at://{did}/sh.tangled.repo/{rkey}, /git, write
7
-
8
-
p, server:owner, /knot/{did}, /member, write
9
-
p, server:member, /knot/{did}, /git, write
10
-
11
-
p, server:owner, /spindle/{did}, /member, write
12
-
13
-
14
-
#, group policies
15
-
#, sub, role, dom
16
-
g, repo:owner, repo:collaborator, at://{did}/sh.tangled.repo/{rkey}
17
-
18
-
g, server:owner, server:member, /knot/{did}
19
-
g, server:owner, server:member, /spindle/{did}
+11
-20
spindle/config/config.go
+11
-20
spindle/config/config.go
···
3
3
import (
4
4
"context"
5
5
"fmt"
6
-
"path/filepath"
7
6
8
7
"github.com/bluesky-social/indigo/atproto/syntax"
9
8
"github.com/sethvargo/go-envconfig"
10
9
)
11
10
12
11
type Server struct {
13
-
ListenAddr string `env:"LISTEN_ADDR, default=0.0.0.0:6555"`
14
-
Hostname string `env:"HOSTNAME, required"`
15
-
TapUrl string `env:"TAP_URL, required"`
16
-
PlcUrl string `env:"PLC_URL, default=https://plc.directory"`
17
-
Dev bool `env:"DEV, default=false"`
18
-
Owner syntax.DID `env:"OWNER, required"`
19
-
Secrets Secrets `env:",prefix=SECRETS_"`
20
-
LogDir string `env:"LOG_DIR, default=/var/log/spindle"`
21
-
DataDir string `env:"DATA_DIR, default=/var/lib/spindle"`
22
-
QueueSize int `env:"QUEUE_SIZE, default=100"`
23
-
MaxJobCount int `env:"MAX_JOB_COUNT, default=2"` // max number of jobs that run at a time
12
+
ListenAddr string `env:"LISTEN_ADDR, default=0.0.0.0:6555"`
13
+
DBPath string `env:"DB_PATH, default=spindle.db"`
14
+
Hostname string `env:"HOSTNAME, required"`
15
+
JetstreamEndpoint string `env:"JETSTREAM_ENDPOINT, default=wss://jetstream1.us-west.bsky.network/subscribe"`
16
+
PlcUrl string `env:"PLC_URL, default=https://plc.directory"`
17
+
Dev bool `env:"DEV, default=false"`
18
+
Owner string `env:"OWNER, required"`
19
+
Secrets Secrets `env:",prefix=SECRETS_"`
20
+
LogDir string `env:"LOG_DIR, default=/var/log/spindle"`
21
+
QueueSize int `env:"QUEUE_SIZE, default=100"`
22
+
MaxJobCount int `env:"MAX_JOB_COUNT, default=2"` // max number of jobs that run at a time
24
23
}
25
24
26
25
func (s Server) Did() syntax.DID {
27
26
return syntax.DID(fmt.Sprintf("did:web:%s", s.Hostname))
28
-
}
29
-
30
-
func (s Server) RepoDir() string {
31
-
return filepath.Join(s.DataDir, "repos")
32
-
}
33
-
34
-
func (s Server) DBPath() string {
35
-
return filepath.Join(s.DataDir, "spindle.db")
36
27
}
37
28
38
29
type Secrets struct {
+18
-73
spindle/db/db.go
+18
-73
spindle/db/db.go
···
1
1
package db
2
2
3
3
import (
4
-
"context"
5
4
"database/sql"
6
5
"strings"
7
6
8
-
"github.com/bluesky-social/indigo/atproto/syntax"
9
7
_ "github.com/mattn/go-sqlite3"
10
-
"tangled.org/core/log"
11
-
"tangled.org/core/orm"
12
8
)
13
9
14
10
type DB struct {
15
11
*sql.DB
16
12
}
17
13
18
-
func Make(ctx context.Context, dbPath string) (*DB, error) {
14
+
func Make(dbPath string) (*DB, error) {
19
15
// https://github.com/mattn/go-sqlite3#connection-string
20
16
opts := []string{
21
17
"_foreign_keys=1",
···
24
20
"_auto_vacuum=incremental",
25
21
}
26
22
27
-
logger := log.FromContext(ctx)
28
-
logger = log.SubLogger(logger, "db")
29
-
30
23
db, err := sql.Open("sqlite3", dbPath+"?"+strings.Join(opts, "&"))
31
24
if err != nil {
32
25
return nil, err
33
26
}
34
27
35
-
conn, err := db.Conn(ctx)
36
-
if err != nil {
37
-
return nil, err
38
-
}
39
-
defer conn.Close()
28
+
// NOTE: If any other migration is added here, you MUST
29
+
// copy the pattern in appview: use a single sql.Conn
30
+
// for every migration.
40
31
41
32
_, err = db.Exec(`
42
33
create table if not exists _jetstream (
···
58
49
unique(owner, name)
59
50
);
60
51
61
-
create table if not exists repo_collaborators (
62
-
-- identifiers
63
-
id integer primary key autoincrement,
64
-
did text not null,
65
-
rkey text not null,
66
-
at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo.collaborator' || '/' || rkey) stored,
67
-
68
-
repo text not null,
69
-
subject text not null,
70
-
71
-
addedAt text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
72
-
unique(did, rkey)
73
-
);
74
-
75
52
create table if not exists spindle_members (
76
53
-- identifiers for the record
77
54
id integer primary key autoincrement,
···
99
76
return nil, err
100
77
}
101
78
102
-
// run migrations
79
+
return &DB{db}, nil
80
+
}
103
81
104
-
// NOTE: this won't migrate existing records
105
-
// they will be fetched again with tap instead
106
-
orm.RunMigration(conn, logger, "add-rkey-to-repos", func(tx *sql.Tx) error {
107
-
// archive legacy repos (just in case)
108
-
_, err = tx.Exec(`alter table repos rename to repos_old`)
109
-
if err != nil {
110
-
return err
111
-
}
112
-
113
-
_, err := tx.Exec(`
114
-
create table repos (
115
-
-- identifiers
116
-
id integer primary key autoincrement,
117
-
did text not null,
118
-
rkey text not null,
119
-
at_uri text generated always as ('at://' || did || '/' || 'sh.tangled.repo' || '/' || rkey) stored,
120
-
121
-
name text not null,
122
-
knot text not null,
123
-
124
-
addedAt text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
125
-
unique(did, rkey)
126
-
);
127
-
`)
128
-
if err != nil {
129
-
return err
130
-
}
131
-
132
-
return nil
133
-
})
134
-
135
-
return &DB{db}, nil
82
+
func (d *DB) SaveLastTimeUs(lastTimeUs int64) error {
83
+
_, err := d.Exec(`
84
+
insert into _jetstream (id, last_time_us)
85
+
values (1, ?)
86
+
on conflict(id) do update set last_time_us = excluded.last_time_us
87
+
`, lastTimeUs)
88
+
return err
136
89
}
137
90
138
-
func (d *DB) IsKnownDid(did syntax.DID) (bool, error) {
139
-
// is spindle member / repo collaborator
140
-
var exists bool
141
-
err := d.QueryRow(
142
-
`select exists (
143
-
select 1 from repo_collaborators where subject = ?
144
-
union all
145
-
select 1 from spindle_members where did = ?
146
-
)`,
147
-
did,
148
-
did,
149
-
).Scan(&exists)
150
-
return exists, err
91
+
func (d *DB) GetLastTimeUs() (int64, error) {
92
+
var lastTimeUs int64
93
+
row := d.QueryRow(`select last_time_us from _jetstream where id = 1;`)
94
+
err := row.Scan(&lastTimeUs)
95
+
return lastTimeUs, err
151
96
}
+8
-10
spindle/db/events.go
+8
-10
spindle/db/events.go
···
18
18
EventJson string `json:"event"`
19
19
}
20
20
21
-
func (d *DB) insertEvent(event Event, notifier *notifier.Notifier) error {
21
+
func (d *DB) InsertEvent(event Event, notifier *notifier.Notifier) error {
22
22
_, err := d.Exec(
23
23
`insert into events (rkey, nsid, event, created) values (?, ?, ?, ?)`,
24
24
event.Rkey,
···
70
70
return evts, nil
71
71
}
72
72
73
-
func (d *DB) CreatePipelineEvent(rkey string, pipeline tangled.Pipeline, n *notifier.Notifier) error {
74
-
eventJson, err := json.Marshal(pipeline)
73
+
func (d *DB) CreateStatusEvent(rkey string, s tangled.PipelineStatus, n *notifier.Notifier) error {
74
+
eventJson, err := json.Marshal(s)
75
75
if err != nil {
76
76
return err
77
77
}
78
+
78
79
event := Event{
79
80
Rkey: rkey,
80
-
Nsid: tangled.PipelineNSID,
81
+
Nsid: tangled.PipelineStatusNSID,
81
82
Created: time.Now().UnixNano(),
82
83
EventJson: string(eventJson),
83
84
}
84
-
return d.insertEvent(event, n)
85
+
86
+
return d.InsertEvent(event, n)
85
87
}
86
88
87
89
func (d *DB) createStatusEvent(
···
114
116
EventJson: string(eventJson),
115
117
}
116
118
117
-
return d.insertEvent(event, n)
119
+
return d.InsertEvent(event, n)
118
120
119
121
}
120
122
···
162
164
163
165
func (d *DB) StatusFailed(workflowId models.WorkflowId, workflowError string, exitCode int64, n *notifier.Notifier) error {
164
166
return d.createStatusEvent(workflowId, models.StatusKindFailed, &workflowError, &exitCode, n)
165
-
}
166
-
167
-
func (d *DB) StatusCancelled(workflowId models.WorkflowId, workflowError string, exitCode int64, n *notifier.Notifier) error {
168
-
return d.createStatusEvent(workflowId, models.StatusKindCancelled, &workflowError, &exitCode, n)
169
167
}
170
168
171
169
func (d *DB) StatusSuccess(workflowId models.WorkflowId, n *notifier.Notifier) error {
+44
spindle/db/known_dids.go
+44
spindle/db/known_dids.go
···
1
+
package db
2
+
3
+
func (d *DB) AddDid(did string) error {
4
+
_, err := d.Exec(`insert or ignore into known_dids (did) values (?)`, did)
5
+
return err
6
+
}
7
+
8
+
func (d *DB) RemoveDid(did string) error {
9
+
_, err := d.Exec(`delete from known_dids where did = ?`, did)
10
+
return err
11
+
}
12
+
13
+
func (d *DB) GetAllDids() ([]string, error) {
14
+
var dids []string
15
+
16
+
rows, err := d.Query(`select did from known_dids`)
17
+
if err != nil {
18
+
return nil, err
19
+
}
20
+
defer rows.Close()
21
+
22
+
for rows.Next() {
23
+
var did string
24
+
if err := rows.Scan(&did); err != nil {
25
+
return nil, err
26
+
}
27
+
dids = append(dids, did)
28
+
}
29
+
30
+
if err := rows.Err(); err != nil {
31
+
return nil, err
32
+
}
33
+
34
+
return dids, nil
35
+
}
36
+
37
+
func (d *DB) HasKnownDids() bool {
38
+
var count int
39
+
err := d.QueryRow(`select count(*) from known_dids`).Scan(&count)
40
+
if err != nil {
41
+
return false
42
+
}
43
+
return count > 0
44
+
}
+11
-119
spindle/db/repos.go
+11
-119
spindle/db/repos.go
···
1
1
package db
2
2
3
-
import "github.com/bluesky-social/indigo/atproto/syntax"
4
-
5
3
type Repo struct {
6
-
Did syntax.DID
7
-
Rkey syntax.RecordKey
8
-
Name string
9
-
Knot string
4
+
Knot string
5
+
Owner string
6
+
Name string
10
7
}
11
8
12
-
type RepoCollaborator struct {
13
-
Did syntax.DID
14
-
Rkey syntax.RecordKey
15
-
Repo syntax.ATURI
16
-
Subject syntax.DID
17
-
}
18
-
19
-
func (d *DB) PutRepo(repo *Repo) error {
20
-
_, err := d.Exec(
21
-
`insert or ignore into repos (did, rkey, name, knot)
22
-
values (?, ?, ?, ?)
23
-
on conflict(did, rkey) do update set
24
-
name = excluded.name,
25
-
knot = excluded.knot`,
26
-
repo.Did,
27
-
repo.Rkey,
28
-
repo.Name,
29
-
repo.Knot,
30
-
)
31
-
return err
32
-
}
33
-
34
-
func (d *DB) DeleteRepo(did syntax.DID, rkey syntax.RecordKey) error {
35
-
_, err := d.Exec(
36
-
`delete from repos where did = ? and rkey = ?`,
37
-
did,
38
-
rkey,
39
-
)
9
+
func (d *DB) AddRepo(knot, owner, name string) error {
10
+
_, err := d.Exec(`insert or ignore into repos (knot, owner, name) values (?, ?, ?)`, knot, owner, name)
40
11
return err
41
12
}
42
13
···
63
34
return knots, nil
64
35
}
65
36
66
-
func (d *DB) GetRepo(repoAt syntax.ATURI) (*Repo, error) {
37
+
func (d *DB) GetRepo(knot, owner, name string) (*Repo, error) {
67
38
var repo Repo
68
-
err := d.DB.QueryRow(
69
-
`select
70
-
did,
71
-
rkey,
72
-
name,
73
-
knot
74
-
from repos where at_uri = ?`,
75
-
repoAt,
76
-
).Scan(
77
-
&repo.Did,
78
-
&repo.Rkey,
79
-
&repo.Name,
80
-
&repo.Knot,
81
-
)
82
-
if err != nil {
83
-
return nil, err
84
-
}
85
-
return &repo, nil
86
-
}
87
39
88
-
func (d *DB) GetRepoWithName(did syntax.DID, name string) (*Repo, error) {
89
-
var repo Repo
90
-
err := d.DB.QueryRow(
91
-
`select
92
-
did,
93
-
rkey,
94
-
name,
95
-
knot
96
-
from repos where did = ? and name = ?`,
97
-
did,
98
-
name,
99
-
).Scan(
100
-
&repo.Did,
101
-
&repo.Rkey,
102
-
&repo.Name,
103
-
&repo.Knot,
104
-
)
40
+
query := "select knot, owner, name from repos where knot = ? and owner = ? and name = ?"
41
+
err := d.DB.QueryRow(query, knot, owner, name).
42
+
Scan(&repo.Knot, &repo.Owner, &repo.Name)
43
+
105
44
if err != nil {
106
45
return nil, err
107
46
}
47
+
108
48
return &repo, nil
109
49
}
110
-
111
-
func (d *DB) PutRepoCollaborator(collaborator *RepoCollaborator) error {
112
-
_, err := d.Exec(
113
-
`insert into repo_collaborators (did, rkey, repo, subject)
114
-
values (?, ?, ?, ?)
115
-
on conflict(did, rkey) do update set
116
-
repo = excluded.repo,
117
-
subject = excluded.subject`,
118
-
collaborator.Did,
119
-
collaborator.Rkey,
120
-
collaborator.Repo,
121
-
collaborator.Subject,
122
-
)
123
-
return err
124
-
}
125
-
126
-
func (d *DB) RemoveRepoCollaborator(did syntax.DID, rkey syntax.RecordKey) error {
127
-
_, err := d.Exec(
128
-
`delete from repo_collaborators where did = ? and rkey = ?`,
129
-
did,
130
-
rkey,
131
-
)
132
-
return err
133
-
}
134
-
135
-
func (d *DB) GetRepoCollaborator(did syntax.DID, rkey syntax.RecordKey) (*RepoCollaborator, error) {
136
-
var collaborator RepoCollaborator
137
-
err := d.DB.QueryRow(
138
-
`select
139
-
did,
140
-
rkey,
141
-
repo,
142
-
subject
143
-
from repo_collaborators
144
-
where did = ? and rkey = ?`,
145
-
did,
146
-
rkey,
147
-
).Scan(
148
-
&collaborator.Did,
149
-
&collaborator.Rkey,
150
-
&collaborator.Repo,
151
-
&collaborator.Subject,
152
-
)
153
-
if err != nil {
154
-
return nil, err
155
-
}
156
-
return &collaborator, nil
157
-
}
+13
-24
spindle/engines/nixery/engine.go
+13
-24
spindle/engines/nixery/engine.go
···
179
179
return err
180
180
}
181
181
e.registerCleanup(wid, func(ctx context.Context) error {
182
-
if err := e.docker.NetworkRemove(ctx, networkName(wid)); err != nil {
183
-
return fmt.Errorf("removing network: %w", err)
184
-
}
185
-
return nil
182
+
return e.docker.NetworkRemove(ctx, networkName(wid))
186
183
})
187
184
188
185
addl := wf.Data.(addlFields)
···
232
229
return fmt.Errorf("creating container: %w", err)
233
230
}
234
231
e.registerCleanup(wid, func(ctx context.Context) error {
235
-
if err := e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{}); err != nil {
236
-
return fmt.Errorf("stopping container: %w", err)
232
+
err = e.docker.ContainerStop(ctx, resp.ID, container.StopOptions{})
233
+
if err != nil {
234
+
return err
237
235
}
238
236
239
-
err := e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{
237
+
return e.docker.ContainerRemove(ctx, resp.ID, container.RemoveOptions{
240
238
RemoveVolumes: true,
241
239
RemoveLinks: false,
242
240
Force: false,
243
241
})
244
-
if err != nil {
245
-
return fmt.Errorf("removing container: %w", err)
246
-
}
247
-
return nil
248
242
})
249
243
250
-
if err := e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
244
+
err = e.docker.ContainerStart(ctx, resp.ID, container.StartOptions{})
245
+
if err != nil {
251
246
return fmt.Errorf("starting container: %w", err)
252
247
}
253
248
···
399
394
}
400
395
401
396
func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error {
402
-
fns := e.drainCleanups(wid)
397
+
e.cleanupMu.Lock()
398
+
key := wid.String()
399
+
400
+
fns := e.cleanup[key]
401
+
delete(e.cleanup, key)
402
+
e.cleanupMu.Unlock()
403
403
404
404
for _, fn := range fns {
405
405
if err := fn(ctx); err != nil {
···
415
415
416
416
key := wid.String()
417
417
e.cleanup[key] = append(e.cleanup[key], fn)
418
-
}
419
-
420
-
func (e *Engine) drainCleanups(wid models.WorkflowId) []cleanupFunc {
421
-
e.cleanupMu.Lock()
422
-
key := wid.String()
423
-
424
-
fns := e.cleanup[key]
425
-
delete(e.cleanup, key)
426
-
e.cleanupMu.Unlock()
427
-
428
-
return fns
429
418
}
430
419
431
420
func networkName(wid models.WorkflowId) string {
-73
spindle/git/git.go
-73
spindle/git/git.go
···
1
-
package git
2
-
3
-
import (
4
-
"bytes"
5
-
"context"
6
-
"fmt"
7
-
"os"
8
-
"os/exec"
9
-
"strings"
10
-
11
-
"github.com/hashicorp/go-version"
12
-
)
13
-
14
-
func Version() (*version.Version, error) {
15
-
var buf bytes.Buffer
16
-
cmd := exec.Command("git", "version")
17
-
cmd.Stdout = &buf
18
-
cmd.Stderr = os.Stderr
19
-
err := cmd.Run()
20
-
if err != nil {
21
-
return nil, err
22
-
}
23
-
fields := strings.Fields(buf.String())
24
-
if len(fields) < 3 {
25
-
return nil, fmt.Errorf("invalid git version: %s", buf.String())
26
-
}
27
-
28
-
// version string is like: "git version 2.29.3" or "git version 2.29.3.windows.1"
29
-
versionString := fields[2]
30
-
if pos := strings.Index(versionString, "windows"); pos >= 1 {
31
-
versionString = versionString[:pos-1]
32
-
}
33
-
return version.NewVersion(versionString)
34
-
}
35
-
36
-
const WorkflowDir = `/.tangled/workflows`
37
-
38
-
func SparseSyncGitRepo(ctx context.Context, cloneUri, path, rev string) error {
39
-
exist, err := isDir(path)
40
-
if err != nil {
41
-
return err
42
-
}
43
-
if rev == "" {
44
-
rev = "HEAD"
45
-
}
46
-
if !exist {
47
-
if err := exec.Command("git", "clone", "--no-checkout", "--depth=1", "--filter=tree:0", "--revision="+rev, cloneUri, path).Run(); err != nil {
48
-
return fmt.Errorf("git clone: %w", err)
49
-
}
50
-
if err := exec.Command("git", "-C", path, "sparse-checkout", "set", "--no-cone", WorkflowDir).Run(); err != nil {
51
-
return fmt.Errorf("git sparse-checkout set: %w", err)
52
-
}
53
-
} else {
54
-
if err := exec.Command("git", "-C", path, "fetch", "--depth=1", "--filter=tree:0", "origin", rev).Run(); err != nil {
55
-
return fmt.Errorf("git pull: %w", err)
56
-
}
57
-
}
58
-
if err := exec.Command("git", "-C", path, "checkout", rev).Run(); err != nil {
59
-
return fmt.Errorf("git checkout: %w", err)
60
-
}
61
-
return nil
62
-
}
63
-
64
-
func isDir(path string) (bool, error) {
65
-
info, err := os.Stat(path)
66
-
if err == nil && info.IsDir() {
67
-
return true, nil
68
-
}
69
-
if os.IsNotExist(err) {
70
-
return false, nil
71
-
}
72
-
return false, err
73
-
}
+300
spindle/ingester.go
+300
spindle/ingester.go
···
1
+
package spindle
2
+
3
+
import (
4
+
"context"
5
+
"encoding/json"
6
+
"errors"
7
+
"fmt"
8
+
"time"
9
+
10
+
"tangled.org/core/api/tangled"
11
+
"tangled.org/core/eventconsumer"
12
+
"tangled.org/core/rbac"
13
+
"tangled.org/core/spindle/db"
14
+
15
+
comatproto "github.com/bluesky-social/indigo/api/atproto"
16
+
"github.com/bluesky-social/indigo/atproto/identity"
17
+
"github.com/bluesky-social/indigo/atproto/syntax"
18
+
"github.com/bluesky-social/indigo/xrpc"
19
+
"github.com/bluesky-social/jetstream/pkg/models"
20
+
securejoin "github.com/cyphar/filepath-securejoin"
21
+
)
22
+
23
+
type Ingester func(ctx context.Context, e *models.Event) error
24
+
25
+
func (s *Spindle) ingest() Ingester {
26
+
return func(ctx context.Context, e *models.Event) error {
27
+
var err error
28
+
defer func() {
29
+
eventTime := e.TimeUS
30
+
lastTimeUs := eventTime + 1
31
+
if err := s.db.SaveLastTimeUs(lastTimeUs); err != nil {
32
+
err = fmt.Errorf("(deferred) failed to save last time us: %w", err)
33
+
}
34
+
}()
35
+
36
+
if e.Kind != models.EventKindCommit {
37
+
return nil
38
+
}
39
+
40
+
switch e.Commit.Collection {
41
+
case tangled.SpindleMemberNSID:
42
+
err = s.ingestMember(ctx, e)
43
+
case tangled.RepoNSID:
44
+
err = s.ingestRepo(ctx, e)
45
+
case tangled.RepoCollaboratorNSID:
46
+
err = s.ingestCollaborator(ctx, e)
47
+
}
48
+
49
+
if err != nil {
50
+
s.l.Debug("failed to process message", "nsid", e.Commit.Collection, "err", err)
51
+
}
52
+
53
+
return nil
54
+
}
55
+
}
56
+
57
+
func (s *Spindle) ingestMember(_ context.Context, e *models.Event) error {
58
+
var err error
59
+
did := e.Did
60
+
rkey := e.Commit.RKey
61
+
62
+
l := s.l.With("component", "ingester", "record", tangled.SpindleMemberNSID)
63
+
64
+
switch e.Commit.Operation {
65
+
case models.CommitOperationCreate, models.CommitOperationUpdate:
66
+
raw := e.Commit.Record
67
+
record := tangled.SpindleMember{}
68
+
err = json.Unmarshal(raw, &record)
69
+
if err != nil {
70
+
l.Error("invalid record", "error", err)
71
+
return err
72
+
}
73
+
74
+
domain := s.cfg.Server.Hostname
75
+
recordInstance := record.Instance
76
+
77
+
if recordInstance != domain {
78
+
l.Error("domain mismatch", "domain", recordInstance, "expected", domain)
79
+
return fmt.Errorf("domain mismatch: %s != %s", record.Instance, domain)
80
+
}
81
+
82
+
ok, err := s.e.IsSpindleInviteAllowed(did, rbacDomain)
83
+
if err != nil || !ok {
84
+
l.Error("failed to add member", "did", did, "error", err)
85
+
return fmt.Errorf("failed to enforce permissions: %w", err)
86
+
}
87
+
88
+
if err := db.AddSpindleMember(s.db, db.SpindleMember{
89
+
Did: syntax.DID(did),
90
+
Rkey: rkey,
91
+
Instance: recordInstance,
92
+
Subject: syntax.DID(record.Subject),
93
+
Created: time.Now(),
94
+
}); err != nil {
95
+
l.Error("failed to add member", "error", err)
96
+
return fmt.Errorf("failed to add member: %w", err)
97
+
}
98
+
99
+
if err := s.e.AddSpindleMember(rbacDomain, record.Subject); err != nil {
100
+
l.Error("failed to add member", "error", err)
101
+
return fmt.Errorf("failed to add member: %w", err)
102
+
}
103
+
l.Info("added member from firehose", "member", record.Subject)
104
+
105
+
if err := s.db.AddDid(record.Subject); err != nil {
106
+
l.Error("failed to add did", "error", err)
107
+
return fmt.Errorf("failed to add did: %w", err)
108
+
}
109
+
s.jc.AddDid(record.Subject)
110
+
111
+
return nil
112
+
113
+
case models.CommitOperationDelete:
114
+
record, err := db.GetSpindleMember(s.db, did, rkey)
115
+
if err != nil {
116
+
l.Error("failed to find member", "error", err)
117
+
return fmt.Errorf("failed to find member: %w", err)
118
+
}
119
+
120
+
if err := db.RemoveSpindleMember(s.db, did, rkey); err != nil {
121
+
l.Error("failed to remove member", "error", err)
122
+
return fmt.Errorf("failed to remove member: %w", err)
123
+
}
124
+
125
+
if err := s.e.RemoveSpindleMember(rbacDomain, record.Subject.String()); err != nil {
126
+
l.Error("failed to add member", "error", err)
127
+
return fmt.Errorf("failed to add member: %w", err)
128
+
}
129
+
l.Info("added member from firehose", "member", record.Subject)
130
+
131
+
if err := s.db.RemoveDid(record.Subject.String()); err != nil {
132
+
l.Error("failed to add did", "error", err)
133
+
return fmt.Errorf("failed to add did: %w", err)
134
+
}
135
+
s.jc.RemoveDid(record.Subject.String())
136
+
137
+
}
138
+
return nil
139
+
}
140
+
141
+
func (s *Spindle) ingestRepo(ctx context.Context, e *models.Event) error {
142
+
var err error
143
+
did := e.Did
144
+
145
+
l := s.l.With("component", "ingester", "record", tangled.RepoNSID)
146
+
147
+
l.Info("ingesting repo record", "did", did)
148
+
149
+
switch e.Commit.Operation {
150
+
case models.CommitOperationCreate, models.CommitOperationUpdate:
151
+
raw := e.Commit.Record
152
+
record := tangled.Repo{}
153
+
err = json.Unmarshal(raw, &record)
154
+
if err != nil {
155
+
l.Error("invalid record", "error", err)
156
+
return err
157
+
}
158
+
159
+
domain := s.cfg.Server.Hostname
160
+
161
+
// no spindle configured for this repo
162
+
if record.Spindle == nil {
163
+
l.Info("no spindle configured", "name", record.Name)
164
+
return nil
165
+
}
166
+
167
+
// this repo did not want this spindle
168
+
if *record.Spindle != domain {
169
+
l.Info("different spindle configured", "name", record.Name, "spindle", *record.Spindle, "domain", domain)
170
+
return nil
171
+
}
172
+
173
+
// add this repo to the watch list
174
+
if err := s.db.AddRepo(record.Knot, did, record.Name); err != nil {
175
+
l.Error("failed to add repo", "error", err)
176
+
return fmt.Errorf("failed to add repo: %w", err)
177
+
}
178
+
179
+
didSlashRepo, err := securejoin.SecureJoin(did, record.Name)
180
+
if err != nil {
181
+
return err
182
+
}
183
+
184
+
// add repo to rbac
185
+
if err := s.e.AddRepo(did, rbac.ThisServer, didSlashRepo); err != nil {
186
+
l.Error("failed to add repo to enforcer", "error", err)
187
+
return fmt.Errorf("failed to add repo: %w", err)
188
+
}
189
+
190
+
// add collaborators to rbac
191
+
owner, err := s.res.ResolveIdent(ctx, did)
192
+
if err != nil || owner.Handle.IsInvalidHandle() {
193
+
return err
194
+
}
195
+
if err := s.fetchAndAddCollaborators(ctx, owner, didSlashRepo); err != nil {
196
+
return err
197
+
}
198
+
199
+
// add this knot to the event consumer
200
+
src := eventconsumer.NewKnotSource(record.Knot)
201
+
s.ks.AddSource(context.Background(), src)
202
+
203
+
return nil
204
+
205
+
}
206
+
return nil
207
+
}
208
+
209
+
func (s *Spindle) ingestCollaborator(ctx context.Context, e *models.Event) error {
210
+
var err error
211
+
212
+
l := s.l.With("component", "ingester", "record", tangled.RepoCollaboratorNSID, "did", e.Did)
213
+
214
+
l.Info("ingesting collaborator record")
215
+
216
+
switch e.Commit.Operation {
217
+
case models.CommitOperationCreate, models.CommitOperationUpdate:
218
+
raw := e.Commit.Record
219
+
record := tangled.RepoCollaborator{}
220
+
err = json.Unmarshal(raw, &record)
221
+
if err != nil {
222
+
l.Error("invalid record", "error", err)
223
+
return err
224
+
}
225
+
226
+
subjectId, err := s.res.ResolveIdent(ctx, record.Subject)
227
+
if err != nil || subjectId.Handle.IsInvalidHandle() {
228
+
return err
229
+
}
230
+
231
+
repoAt, err := syntax.ParseATURI(record.Repo)
232
+
if err != nil {
233
+
l.Info("rejecting record, invalid repoAt", "repoAt", record.Repo)
234
+
return nil
235
+
}
236
+
237
+
// TODO: get rid of this entirely
238
+
// resolve this aturi to extract the repo record
239
+
owner, err := s.res.ResolveIdent(ctx, repoAt.Authority().String())
240
+
if err != nil || owner.Handle.IsInvalidHandle() {
241
+
return fmt.Errorf("failed to resolve handle: %w", err)
242
+
}
243
+
244
+
xrpcc := xrpc.Client{
245
+
Host: owner.PDSEndpoint(),
246
+
}
247
+
248
+
resp, err := comatproto.RepoGetRecord(ctx, &xrpcc, "", tangled.RepoNSID, repoAt.Authority().String(), repoAt.RecordKey().String())
249
+
if err != nil {
250
+
return err
251
+
}
252
+
253
+
repo := resp.Value.Val.(*tangled.Repo)
254
+
didSlashRepo, _ := securejoin.SecureJoin(owner.DID.String(), repo.Name)
255
+
256
+
// check perms for this user
257
+
if ok, err := s.e.IsCollaboratorInviteAllowed(owner.DID.String(), rbac.ThisServer, didSlashRepo); !ok || err != nil {
258
+
return fmt.Errorf("insufficient permissions: %w", err)
259
+
}
260
+
261
+
// add collaborator to rbac
262
+
if err := s.e.AddCollaborator(record.Subject, rbac.ThisServer, didSlashRepo); err != nil {
263
+
l.Error("failed to add repo to enforcer", "error", err)
264
+
return fmt.Errorf("failed to add repo: %w", err)
265
+
}
266
+
267
+
return nil
268
+
}
269
+
return nil
270
+
}
271
+
272
+
func (s *Spindle) fetchAndAddCollaborators(ctx context.Context, owner *identity.Identity, didSlashRepo string) error {
273
+
l := s.l.With("component", "ingester", "handler", "fetchAndAddCollaborators")
274
+
275
+
l.Info("fetching and adding existing collaborators")
276
+
277
+
xrpcc := xrpc.Client{
278
+
Host: owner.PDSEndpoint(),
279
+
}
280
+
281
+
resp, err := comatproto.RepoListRecords(ctx, &xrpcc, tangled.RepoCollaboratorNSID, "", 50, owner.DID.String(), false)
282
+
if err != nil {
283
+
return err
284
+
}
285
+
286
+
var errs error
287
+
for _, r := range resp.Records {
288
+
if r == nil {
289
+
continue
290
+
}
291
+
record := r.Value.Val.(*tangled.RepoCollaborator)
292
+
293
+
if err := s.e.AddCollaborator(record.Subject, rbac.ThisServer, didSlashRepo); err != nil {
294
+
l.Error("failed to add repo to enforcer", "error", err)
295
+
errors.Join(errs, fmt.Errorf("failed to add repo: %w", err))
296
+
}
297
+
}
298
+
299
+
return errs
300
+
}
+1
-1
spindle/models/pipeline_env.go
+1
-1
spindle/models/pipeline_env.go
+150
-223
spindle/server.go
+150
-223
spindle/server.go
···
4
4
"context"
5
5
_ "embed"
6
6
"encoding/json"
7
-
"errors"
8
7
"fmt"
9
8
"log/slog"
10
9
"maps"
11
10
"net/http"
12
-
"path/filepath"
13
11
14
-
"github.com/bluesky-social/indigo/atproto/syntax"
15
12
"github.com/go-chi/chi/v5"
16
-
"github.com/go-git/go-git/v5/plumbing/object"
17
-
"github.com/hashicorp/go-version"
18
13
"tangled.org/core/api/tangled"
19
14
"tangled.org/core/eventconsumer"
20
15
"tangled.org/core/eventconsumer/cursor"
21
16
"tangled.org/core/idresolver"
22
-
kgit "tangled.org/core/knotserver/git"
17
+
"tangled.org/core/jetstream"
23
18
"tangled.org/core/log"
24
19
"tangled.org/core/notifier"
25
-
"tangled.org/core/rbac2"
20
+
"tangled.org/core/rbac"
26
21
"tangled.org/core/spindle/config"
27
22
"tangled.org/core/spindle/db"
28
23
"tangled.org/core/spindle/engine"
29
24
"tangled.org/core/spindle/engines/nixery"
30
-
"tangled.org/core/spindle/git"
31
25
"tangled.org/core/spindle/models"
32
26
"tangled.org/core/spindle/queue"
33
27
"tangled.org/core/spindle/secrets"
34
28
"tangled.org/core/spindle/xrpc"
35
-
"tangled.org/core/tap"
36
-
"tangled.org/core/tid"
37
-
"tangled.org/core/workflow"
38
29
"tangled.org/core/xrpc/serviceauth"
39
30
)
40
31
41
32
//go:embed motd
42
33
var motd []byte
43
34
35
+
const (
36
+
rbacDomain = "thisserver"
37
+
)
38
+
44
39
type Spindle struct {
45
-
tap *tap.Client
40
+
jc *jetstream.JetstreamClient
46
41
db *db.DB
47
-
e *rbac2.Enforcer
42
+
e *rbac.Enforcer
48
43
l *slog.Logger
49
44
n *notifier.Notifier
50
45
engs map[string]models.Engine
···
59
54
func New(ctx context.Context, cfg *config.Config, engines map[string]models.Engine) (*Spindle, error) {
60
55
logger := log.FromContext(ctx)
61
56
62
-
if err := ensureGitVersion(); err != nil {
63
-
return nil, fmt.Errorf("ensuring git version: %w", err)
64
-
}
65
-
66
-
d, err := db.Make(ctx, cfg.Server.DBPath())
57
+
d, err := db.Make(cfg.Server.DBPath)
67
58
if err != nil {
68
59
return nil, fmt.Errorf("failed to setup db: %w", err)
69
60
}
70
61
71
-
e, err := rbac2.NewEnforcer(cfg.Server.DBPath())
62
+
e, err := rbac.NewEnforcer(cfg.Server.DBPath)
72
63
if err != nil {
73
64
return nil, fmt.Errorf("failed to setup rbac enforcer: %w", err)
74
65
}
66
+
e.E.EnableAutoSave(true)
75
67
76
68
n := notifier.New()
77
69
···
91
83
}
92
84
logger.Info("using openbao secrets provider", "proxy_address", cfg.Server.Secrets.OpenBao.ProxyAddr, "mount", cfg.Server.Secrets.OpenBao.Mount)
93
85
case "sqlite", "":
94
-
vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath(), secrets.WithTableName("secrets"))
86
+
vault, err = secrets.NewSQLiteManager(cfg.Server.DBPath, secrets.WithTableName("secrets"))
95
87
if err != nil {
96
88
return nil, fmt.Errorf("failed to setup sqlite secrets provider: %w", err)
97
89
}
98
-
logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath())
90
+
logger.Info("using sqlite secrets provider", "path", cfg.Server.DBPath)
99
91
default:
100
92
return nil, fmt.Errorf("unknown secrets provider: %s", cfg.Server.Secrets.Provider)
101
93
}
···
103
95
jq := queue.NewQueue(cfg.Server.QueueSize, cfg.Server.MaxJobCount)
104
96
logger.Info("initialized queue", "queueSize", cfg.Server.QueueSize, "numWorkers", cfg.Server.MaxJobCount)
105
97
106
-
tap := tap.NewClient(cfg.Server.TapUrl, "")
98
+
collections := []string{
99
+
tangled.SpindleMemberNSID,
100
+
tangled.RepoNSID,
101
+
tangled.RepoCollaboratorNSID,
102
+
}
103
+
jc, err := jetstream.NewJetstreamClient(cfg.Server.JetstreamEndpoint, "spindle", collections, nil, log.SubLogger(logger, "jetstream"), d, true, true)
104
+
if err != nil {
105
+
return nil, fmt.Errorf("failed to setup jetstream client: %w", err)
106
+
}
107
+
jc.AddDid(cfg.Server.Owner)
108
+
109
+
// Check if the spindle knows about any Dids;
110
+
dids, err := d.GetAllDids()
111
+
if err != nil {
112
+
return nil, fmt.Errorf("failed to get all dids: %w", err)
113
+
}
114
+
for _, d := range dids {
115
+
jc.AddDid(d)
116
+
}
107
117
108
118
resolver := idresolver.DefaultResolver(cfg.Server.PlcUrl)
109
119
110
120
spindle := &Spindle{
111
-
tap: &tap,
121
+
jc: jc,
112
122
e: e,
113
123
db: d,
114
124
l: logger,
···
120
130
vault: vault,
121
131
}
122
132
123
-
err = e.SetSpindleOwner(spindle.cfg.Server.Owner, spindle.cfg.Server.Did())
133
+
err = e.AddSpindle(rbacDomain)
134
+
if err != nil {
135
+
return nil, fmt.Errorf("failed to set rbac domain: %w", err)
136
+
}
137
+
err = spindle.configureOwner()
124
138
if err != nil {
125
139
return nil, err
126
140
}
127
141
logger.Info("owner set", "did", cfg.Server.Owner)
128
142
129
-
cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath())
143
+
cursorStore, err := cursor.NewSQLiteStore(cfg.Server.DBPath)
130
144
if err != nil {
131
145
return nil, fmt.Errorf("failed to setup sqlite3 cursor store: %w", err)
132
146
}
133
147
134
-
// spindle listen to knot stream for sh.tangled.git.refUpdate
135
-
// which will sync the local workflow files in spindle and enqueues the
136
-
// pipeline job for on-push workflows
148
+
err = jc.StartJetstream(ctx, spindle.ingest())
149
+
if err != nil {
150
+
return nil, fmt.Errorf("failed to start jetstream consumer: %w", err)
151
+
}
152
+
153
+
// for each incoming sh.tangled.pipeline, we execute
154
+
// spindle.processPipeline, which in turn enqueues the pipeline
155
+
// job in the above registered queue.
137
156
ccfg := eventconsumer.NewConsumerConfig()
138
157
ccfg.Logger = log.SubLogger(logger, "eventconsumer")
139
158
ccfg.Dev = cfg.Server.Dev
140
-
ccfg.ProcessFunc = spindle.processKnotStream
159
+
ccfg.ProcessFunc = spindle.processPipeline
141
160
ccfg.CursorStore = cursorStore
142
161
knownKnots, err := d.Knots()
143
162
if err != nil {
···
178
197
}
179
198
180
199
// Enforcer returns the RBAC enforcer instance.
181
-
func (s *Spindle) Enforcer() *rbac2.Enforcer {
200
+
func (s *Spindle) Enforcer() *rbac.Enforcer {
182
201
return s.e
183
202
}
184
203
···
198
217
s.ks.Start(ctx)
199
218
}()
200
219
201
-
// ensure server owner is tracked
202
-
if err := s.tap.AddRepos(ctx, []syntax.DID{s.cfg.Server.Owner}); err != nil {
203
-
return err
204
-
}
205
-
206
-
go func() {
207
-
s.l.Info("starting tap stream consumer")
208
-
s.tap.Connect(ctx, &tap.SimpleIndexer{
209
-
EventHandler: s.processEvent,
210
-
})
211
-
}()
212
-
213
220
s.l.Info("starting spindle server", "address", s.cfg.Server.ListenAddr)
214
221
return http.ListenAndServe(s.cfg.Server.ListenAddr, s.Router())
215
222
}
···
261
268
Config: s.cfg,
262
269
Resolver: s.res,
263
270
Vault: s.vault,
264
-
Notifier: s.Notifier(),
265
271
ServiceAuth: serviceAuth,
266
272
}
267
273
268
274
return x.Router()
269
275
}
270
276
271
-
func (s *Spindle) processKnotStream(ctx context.Context, src eventconsumer.Source, msg eventconsumer.Message) error {
272
-
l := log.FromContext(ctx).With("handler", "processKnotStream")
273
-
l = l.With("src", src.Key(), "msg.Nsid", msg.Nsid, "msg.Rkey", msg.Rkey)
274
-
if msg.Nsid == tangled.GitRefUpdateNSID {
275
-
event := tangled.GitRefUpdate{}
276
-
if err := json.Unmarshal(msg.EventJson, &event); err != nil {
277
-
l.Error("error unmarshalling", "err", err)
277
+
func (s *Spindle) processPipeline(ctx context.Context, src eventconsumer.Source, msg eventconsumer.Message) error {
278
+
if msg.Nsid == tangled.PipelineNSID {
279
+
tpl := tangled.Pipeline{}
280
+
err := json.Unmarshal(msg.EventJson, &tpl)
281
+
if err != nil {
282
+
fmt.Println("error unmarshalling", err)
278
283
return err
279
284
}
280
-
l = l.With("repoDid", event.RepoDid, "repoName", event.RepoName)
281
285
282
-
// resolve repo name to rkey
283
-
// TODO: git.refUpdate should respond with rkey instead of repo name
284
-
repo, err := s.db.GetRepoWithName(syntax.DID(event.RepoDid), event.RepoName)
285
-
if err != nil {
286
-
return fmt.Errorf("get repo with did and name (%s/%s): %w", event.RepoDid, event.RepoName, err)
286
+
if tpl.TriggerMetadata == nil {
287
+
return fmt.Errorf("no trigger metadata found")
287
288
}
288
289
289
-
// NOTE: we are blindly trusting the knot that it will return only repos it own
290
-
repoCloneUri := s.newRepoCloneUrl(src.Key(), event.RepoDid, event.RepoName)
291
-
repoPath := s.newRepoPath(repo.Did, repo.Rkey)
292
-
if err := git.SparseSyncGitRepo(ctx, repoCloneUri, repoPath, event.NewSha); err != nil {
293
-
return fmt.Errorf("sync git repo: %w", err)
290
+
if tpl.TriggerMetadata.Repo == nil {
291
+
return fmt.Errorf("no repo data found")
294
292
}
295
-
l.Info("synced git repo")
296
293
297
-
compiler := workflow.Compiler{
298
-
Trigger: tangled.Pipeline_TriggerMetadata{
299
-
Kind: string(workflow.TriggerKindPush),
300
-
Push: &tangled.Pipeline_PushTriggerData{
301
-
Ref: event.Ref,
302
-
OldSha: event.OldSha,
303
-
NewSha: event.NewSha,
304
-
},
305
-
Repo: &tangled.Pipeline_TriggerRepo{
306
-
Did: repo.Did.String(),
307
-
Knot: repo.Knot,
308
-
Repo: repo.Name,
309
-
},
310
-
},
294
+
if src.Key() != tpl.TriggerMetadata.Repo.Knot {
295
+
return fmt.Errorf("repo knot does not match event source: %s != %s", src.Key(), tpl.TriggerMetadata.Repo.Knot)
311
296
}
312
297
313
-
// load workflow definitions from rev (without spindle context)
314
-
rawPipeline, err := s.loadPipeline(ctx, repoCloneUri, repoPath, event.NewSha)
298
+
// filter by repos
299
+
_, err = s.db.GetRepo(
300
+
tpl.TriggerMetadata.Repo.Knot,
301
+
tpl.TriggerMetadata.Repo.Did,
302
+
tpl.TriggerMetadata.Repo.Repo,
303
+
)
315
304
if err != nil {
316
-
return fmt.Errorf("loading pipeline: %w", err)
317
-
}
318
-
if len(rawPipeline) == 0 {
319
-
l.Info("no workflow definition find for the repo. skipping the event")
320
-
return nil
321
-
}
322
-
tpl := compiler.Compile(compiler.Parse(rawPipeline))
323
-
// TODO: pass compile error to workflow log
324
-
for _, w := range compiler.Diagnostics.Errors {
325
-
l.Error(w.String())
326
-
}
327
-
for _, w := range compiler.Diagnostics.Warnings {
328
-
l.Warn(w.String())
305
+
return err
329
306
}
330
307
331
308
pipelineId := models.PipelineId{
332
-
Knot: tpl.TriggerMetadata.Repo.Knot,
333
-
Rkey: tid.TID(),
334
-
}
335
-
if err := s.db.CreatePipelineEvent(pipelineId.Rkey, tpl, s.n); err != nil {
336
-
l.Error("failed to create pipeline event", "err", err)
337
-
return nil
338
-
}
339
-
err = s.processPipeline(ctx, tpl, pipelineId)
340
-
if err != nil {
341
-
return err
309
+
Knot: src.Key(),
310
+
Rkey: msg.Rkey,
342
311
}
343
-
}
344
312
345
-
return nil
346
-
}
313
+
workflows := make(map[models.Engine][]models.Workflow)
314
+
315
+
// Build pipeline environment variables once for all workflows
316
+
pipelineEnv := models.PipelineEnvVars(tpl.TriggerMetadata, pipelineId, s.cfg.Server.Dev)
347
317
348
-
func (s *Spindle) loadPipeline(ctx context.Context, repoUri, repoPath, rev string) (workflow.RawPipeline, error) {
349
-
if err := git.SparseSyncGitRepo(ctx, repoUri, repoPath, rev); err != nil {
350
-
return nil, fmt.Errorf("syncing git repo: %w", err)
351
-
}
352
-
gr, err := kgit.Open(repoPath, rev)
353
-
if err != nil {
354
-
return nil, fmt.Errorf("opening git repo: %w", err)
355
-
}
318
+
for _, w := range tpl.Workflows {
319
+
if w != nil {
320
+
if _, ok := s.engs[w.Engine]; !ok {
321
+
err = s.db.StatusFailed(models.WorkflowId{
322
+
PipelineId: pipelineId,
323
+
Name: w.Name,
324
+
}, fmt.Sprintf("unknown engine %#v", w.Engine), -1, s.n)
325
+
if err != nil {
326
+
return err
327
+
}
356
328
357
-
workflowDir, err := gr.FileTree(ctx, workflow.WorkflowDir)
358
-
if errors.Is(err, object.ErrDirectoryNotFound) {
359
-
// return empty RawPipeline when directory doesn't exist
360
-
return nil, nil
361
-
} else if err != nil {
362
-
return nil, fmt.Errorf("loading file tree: %w", err)
363
-
}
329
+
continue
330
+
}
364
331
365
-
var rawPipeline workflow.RawPipeline
366
-
for _, e := range workflowDir {
367
-
if !e.IsFile() {
368
-
continue
369
-
}
332
+
eng := s.engs[w.Engine]
370
333
371
-
fpath := filepath.Join(workflow.WorkflowDir, e.Name)
372
-
contents, err := gr.RawContent(fpath)
373
-
if err != nil {
374
-
return nil, fmt.Errorf("reading raw content of '%s': %w", fpath, err)
375
-
}
334
+
if _, ok := workflows[eng]; !ok {
335
+
workflows[eng] = []models.Workflow{}
336
+
}
376
337
377
-
rawPipeline = append(rawPipeline, workflow.RawWorkflow{
378
-
Name: e.Name,
379
-
Contents: contents,
380
-
})
381
-
}
338
+
ewf, err := s.engs[w.Engine].InitWorkflow(*w, tpl)
339
+
if err != nil {
340
+
return err
341
+
}
382
342
383
-
return rawPipeline, nil
384
-
}
343
+
// inject TANGLED_* env vars after InitWorkflow
344
+
// This prevents user-defined env vars from overriding them
345
+
if ewf.Environment == nil {
346
+
ewf.Environment = make(map[string]string)
347
+
}
348
+
maps.Copy(ewf.Environment, pipelineEnv)
385
349
386
-
func (s *Spindle) processPipeline(ctx context.Context, tpl tangled.Pipeline, pipelineId models.PipelineId) error {
387
-
// Build pipeline environment variables once for all workflows
388
-
pipelineEnv := models.PipelineEnvVars(tpl.TriggerMetadata, pipelineId, s.cfg.Server.Dev)
350
+
workflows[eng] = append(workflows[eng], *ewf)
389
351
390
-
// filter & init workflows
391
-
workflows := make(map[models.Engine][]models.Workflow)
392
-
for _, w := range tpl.Workflows {
393
-
if w == nil {
394
-
continue
395
-
}
396
-
if _, ok := s.engs[w.Engine]; !ok {
397
-
err := s.db.StatusFailed(models.WorkflowId{
398
-
PipelineId: pipelineId,
399
-
Name: w.Name,
400
-
}, fmt.Sprintf("unknown engine %#v", w.Engine), -1, s.n)
401
-
if err != nil {
402
-
return fmt.Errorf("db.StatusFailed: %w", err)
352
+
err = s.db.StatusPending(models.WorkflowId{
353
+
PipelineId: pipelineId,
354
+
Name: w.Name,
355
+
}, s.n)
356
+
if err != nil {
357
+
return err
358
+
}
403
359
}
404
-
405
-
continue
406
360
}
407
361
408
-
eng := s.engs[w.Engine]
409
-
410
-
if _, ok := workflows[eng]; !ok {
411
-
workflows[eng] = []models.Workflow{}
412
-
}
413
-
414
-
ewf, err := s.engs[w.Engine].InitWorkflow(*w, tpl)
415
-
if err != nil {
416
-
return fmt.Errorf("init workflow: %w", err)
417
-
}
418
-
419
-
// inject TANGLED_* env vars after InitWorkflow
420
-
// This prevents user-defined env vars from overriding them
421
-
if ewf.Environment == nil {
422
-
ewf.Environment = make(map[string]string)
362
+
ok := s.jq.Enqueue(queue.Job{
363
+
Run: func() error {
364
+
engine.StartWorkflows(log.SubLogger(s.l, "engine"), s.vault, s.cfg, s.db, s.n, ctx, &models.Pipeline{
365
+
RepoOwner: tpl.TriggerMetadata.Repo.Did,
366
+
RepoName: tpl.TriggerMetadata.Repo.Repo,
367
+
Workflows: workflows,
368
+
}, pipelineId)
369
+
return nil
370
+
},
371
+
OnFail: func(jobError error) {
372
+
s.l.Error("pipeline run failed", "error", jobError)
373
+
},
374
+
})
375
+
if ok {
376
+
s.l.Info("pipeline enqueued successfully", "id", msg.Rkey)
377
+
} else {
378
+
s.l.Error("failed to enqueue pipeline: queue is full")
423
379
}
424
-
maps.Copy(ewf.Environment, pipelineEnv)
425
-
426
-
workflows[eng] = append(workflows[eng], *ewf)
427
380
}
428
381
429
-
// enqueue pipeline
430
-
ok := s.jq.Enqueue(queue.Job{
431
-
Run: func() error {
432
-
engine.StartWorkflows(log.SubLogger(s.l, "engine"), s.vault, s.cfg, s.db, s.n, ctx, &models.Pipeline{
433
-
RepoOwner: tpl.TriggerMetadata.Repo.Did,
434
-
RepoName: tpl.TriggerMetadata.Repo.Repo,
435
-
Workflows: workflows,
436
-
}, pipelineId)
437
-
return nil
438
-
},
439
-
OnFail: func(jobError error) {
440
-
s.l.Error("pipeline run failed", "error", jobError)
441
-
},
442
-
})
443
-
if !ok {
444
-
return fmt.Errorf("failed to enqueue pipeline: queue is full")
445
-
}
446
-
s.l.Info("pipeline enqueued successfully", "id", pipelineId)
447
-
448
-
// emit StatusPending for all workflows here (after successful enqueue)
449
-
for _, ewfs := range workflows {
450
-
for _, ewf := range ewfs {
451
-
err := s.db.StatusPending(models.WorkflowId{
452
-
PipelineId: pipelineId,
453
-
Name: ewf.Name,
454
-
}, s.n)
455
-
if err != nil {
456
-
return fmt.Errorf("db.StatusPending: %w", err)
457
-
}
458
-
}
459
-
}
460
382
return nil
461
383
}
462
384
463
-
// newRepoPath creates a path to store repository by its did and rkey.
464
-
// The path format would be: `/data/repos/did:plc:foo/sh.tangled.repo/repo-rkey
465
-
func (s *Spindle) newRepoPath(did syntax.DID, rkey syntax.RecordKey) string {
466
-
return filepath.Join(s.cfg.Server.RepoDir(), did.String(), tangled.RepoNSID, rkey.String())
467
-
}
385
+
func (s *Spindle) configureOwner() error {
386
+
cfgOwner := s.cfg.Server.Owner
468
387
469
-
func (s *Spindle) newRepoCloneUrl(knot, did, name string) string {
470
-
scheme := "https://"
471
-
if s.cfg.Server.Dev {
472
-
scheme = "http://"
388
+
existing, err := s.e.GetSpindleUsersByRole("server:owner", rbacDomain)
389
+
if err != nil {
390
+
return err
473
391
}
474
-
return fmt.Sprintf("%s%s/%s/%s", scheme, knot, did, name)
475
-
}
476
392
477
-
const RequiredVersion = "2.49.0"
393
+
switch len(existing) {
394
+
case 0:
395
+
// no owner configured, continue
396
+
case 1:
397
+
// find existing owner
398
+
existingOwner := existing[0]
478
399
479
-
func ensureGitVersion() error {
480
-
v, err := git.Version()
481
-
if err != nil {
482
-
return fmt.Errorf("fetching git version: %w", err)
400
+
// no ownership change, this is okay
401
+
if existingOwner == s.cfg.Server.Owner {
402
+
break
403
+
}
404
+
405
+
// remove existing owner
406
+
err = s.e.RemoveSpindleOwner(rbacDomain, existingOwner)
407
+
if err != nil {
408
+
return nil
409
+
}
410
+
default:
411
+
return fmt.Errorf("more than one owner in DB, try deleting %q and starting over", s.cfg.Server.DBPath)
483
412
}
484
-
if v.LessThan(version.Must(version.NewVersion(RequiredVersion))) {
485
-
return fmt.Errorf("installed git version %q is not supported, Spindle requires git version >= %q", v, RequiredVersion)
486
-
}
487
-
return nil
413
+
414
+
return s.e.AddSpindleOwner(rbacDomain, cfgOwner)
488
415
}
-391
spindle/tap.go
-391
spindle/tap.go
···
1
-
package spindle
2
-
3
-
import (
4
-
"context"
5
-
"encoding/json"
6
-
"fmt"
7
-
"time"
8
-
9
-
"github.com/bluesky-social/indigo/atproto/syntax"
10
-
"tangled.org/core/api/tangled"
11
-
"tangled.org/core/eventconsumer"
12
-
"tangled.org/core/spindle/db"
13
-
"tangled.org/core/spindle/git"
14
-
"tangled.org/core/spindle/models"
15
-
"tangled.org/core/tap"
16
-
"tangled.org/core/tid"
17
-
"tangled.org/core/workflow"
18
-
)
19
-
20
-
func (s *Spindle) processEvent(ctx context.Context, evt tap.Event) error {
21
-
l := s.l.With("component", "tapIndexer")
22
-
23
-
var err error
24
-
switch evt.Type {
25
-
case tap.EvtRecord:
26
-
switch evt.Record.Collection.String() {
27
-
case tangled.SpindleMemberNSID:
28
-
err = s.processMember(ctx, evt)
29
-
case tangled.RepoNSID:
30
-
err = s.processRepo(ctx, evt)
31
-
case tangled.RepoCollaboratorNSID:
32
-
err = s.processCollaborator(ctx, evt)
33
-
case tangled.RepoPullNSID:
34
-
err = s.processPull(ctx, evt)
35
-
}
36
-
case tap.EvtIdentity:
37
-
// no-op
38
-
}
39
-
40
-
if err != nil {
41
-
l.Error("failed to process message. will retry later", "event.ID", evt.ID, "err", err)
42
-
return err
43
-
}
44
-
return nil
45
-
}
46
-
47
-
// NOTE: make sure to return nil if we don't need to retry (e.g. forbidden, unrelated)
48
-
49
-
func (s *Spindle) processMember(ctx context.Context, evt tap.Event) error {
50
-
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
51
-
52
-
l.Info("processing spindle.member record")
53
-
54
-
// only listen to members
55
-
if ok, err := s.e.IsSpindleMemberInviteAllowed(evt.Record.Did, s.cfg.Server.Did()); !ok || err != nil {
56
-
l.Warn("forbidden request: member invite not allowed", "did", evt.Record.Did, "error", err)
57
-
return nil
58
-
}
59
-
60
-
switch evt.Record.Action {
61
-
case tap.RecordCreateAction, tap.RecordUpdateAction:
62
-
record := tangled.SpindleMember{}
63
-
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
64
-
return fmt.Errorf("parsing record: %w", err)
65
-
}
66
-
67
-
domain := s.cfg.Server.Hostname
68
-
if record.Instance != domain {
69
-
l.Info("domain mismatch", "domain", record.Instance, "expected", domain)
70
-
return nil
71
-
}
72
-
73
-
created, err := time.Parse(record.CreatedAt, time.RFC3339)
74
-
if err != nil {
75
-
created = time.Now()
76
-
}
77
-
if err := db.AddSpindleMember(s.db, db.SpindleMember{
78
-
Did: evt.Record.Did,
79
-
Rkey: evt.Record.Rkey.String(),
80
-
Instance: record.Instance,
81
-
Subject: syntax.DID(record.Subject),
82
-
Created: created,
83
-
}); err != nil {
84
-
l.Error("failed to add member", "error", err)
85
-
return fmt.Errorf("adding member to db: %w", err)
86
-
}
87
-
if err := s.e.AddSpindleMember(syntax.DID(record.Subject), s.cfg.Server.Did()); err != nil {
88
-
return fmt.Errorf("adding member to rbac: %w", err)
89
-
}
90
-
if err := s.tap.AddRepos(ctx, []syntax.DID{syntax.DID(record.Subject)}); err != nil {
91
-
return fmt.Errorf("adding did to tap", err)
92
-
}
93
-
94
-
l.Info("added member", "member", record.Subject)
95
-
return nil
96
-
97
-
case tap.RecordDeleteAction:
98
-
var (
99
-
did = evt.Record.Did.String()
100
-
rkey = evt.Record.Rkey.String()
101
-
)
102
-
member, err := db.GetSpindleMember(s.db, did, rkey)
103
-
if err != nil {
104
-
return fmt.Errorf("finding member: %w", err)
105
-
}
106
-
107
-
if err := db.RemoveSpindleMember(s.db, did, rkey); err != nil {
108
-
return fmt.Errorf("removing member from db: %w", err)
109
-
}
110
-
if err := s.e.RemoveSpindleMember(member.Subject, s.cfg.Server.Did()); err != nil {
111
-
return fmt.Errorf("removing member from rbac: %w", err)
112
-
}
113
-
if err := s.tapSafeRemoveDid(ctx, member.Subject); err != nil {
114
-
return fmt.Errorf("removing did from tap: %w", err)
115
-
}
116
-
117
-
l.Info("removed member", "member", member.Subject)
118
-
return nil
119
-
}
120
-
return nil
121
-
}
122
-
123
-
func (s *Spindle) processCollaborator(ctx context.Context, evt tap.Event) error {
124
-
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
125
-
126
-
l.Info("processing repo.collaborator record")
127
-
128
-
// only listen to members
129
-
if ok, err := s.e.IsSpindleMember(evt.Record.Did, s.cfg.Server.Did()); !ok || err != nil {
130
-
l.Warn("forbidden request: not spindle member", "did", evt.Record.Did, "err", err)
131
-
return nil
132
-
}
133
-
134
-
switch evt.Record.Action {
135
-
case tap.RecordCreateAction, tap.RecordUpdateAction:
136
-
record := tangled.RepoCollaborator{}
137
-
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
138
-
l.Error("invalid record", "err", err)
139
-
return fmt.Errorf("parsing record: %w", err)
140
-
}
141
-
142
-
// retry later if target repo is not ingested yet
143
-
if _, err := s.db.GetRepo(syntax.ATURI(record.Repo)); err != nil {
144
-
l.Warn("target repo is not ingested yet", "repo", record.Repo, "err", err)
145
-
return fmt.Errorf("target repo is unknown")
146
-
}
147
-
148
-
// check perms for this user
149
-
if ok, err := s.e.IsRepoCollaboratorInviteAllowed(evt.Record.Did, syntax.ATURI(record.Repo)); !ok || err != nil {
150
-
l.Warn("forbidden request collaborator invite not allowed", "did", evt.Record.Did, "err", err)
151
-
return nil
152
-
}
153
-
154
-
if err := s.db.PutRepoCollaborator(&db.RepoCollaborator{
155
-
Did: evt.Record.Did,
156
-
Rkey: evt.Record.Rkey,
157
-
Repo: syntax.ATURI(record.Repo),
158
-
Subject: syntax.DID(record.Subject),
159
-
}); err != nil {
160
-
return fmt.Errorf("adding collaborator to db: %w", err)
161
-
}
162
-
if err := s.e.AddRepoCollaborator(syntax.DID(record.Subject), syntax.ATURI(record.Repo)); err != nil {
163
-
return fmt.Errorf("adding collaborator to rbac: %w", err)
164
-
}
165
-
if err := s.tap.AddRepos(ctx, []syntax.DID{syntax.DID(record.Subject)}); err != nil {
166
-
return fmt.Errorf("adding did to tap: %w", err)
167
-
}
168
-
169
-
l.Info("add repo collaborator", "subejct", record.Subject, "repo", record.Repo)
170
-
return nil
171
-
172
-
case tap.RecordDeleteAction:
173
-
// get existing collaborator
174
-
collaborator, err := s.db.GetRepoCollaborator(evt.Record.Did, evt.Record.Rkey)
175
-
if err != nil {
176
-
return fmt.Errorf("failed to get existing collaborator info: %w", err)
177
-
}
178
-
179
-
// check perms for this user
180
-
if ok, err := s.e.IsRepoCollaboratorInviteAllowed(evt.Record.Did, collaborator.Repo); !ok || err != nil {
181
-
l.Warn("forbidden request collaborator invite not allowed", "did", evt.Record.Did, "err", err)
182
-
return nil
183
-
}
184
-
185
-
if err := s.db.RemoveRepoCollaborator(collaborator.Subject, collaborator.Rkey); err != nil {
186
-
return fmt.Errorf("removing collaborator from db: %w", err)
187
-
}
188
-
if err := s.e.RemoveRepoCollaborator(collaborator.Subject, collaborator.Repo); err != nil {
189
-
return fmt.Errorf("removing collaborator from rbac: %w", err)
190
-
}
191
-
if err := s.tapSafeRemoveDid(ctx, collaborator.Subject); err != nil {
192
-
return fmt.Errorf("removing did from tap: %w", err)
193
-
}
194
-
195
-
l.Info("removed repo collaborator", "subejct", collaborator.Subject, "repo", collaborator.Repo)
196
-
return nil
197
-
}
198
-
return nil
199
-
}
200
-
201
-
func (s *Spindle) processRepo(ctx context.Context, evt tap.Event) error {
202
-
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
203
-
204
-
l.Info("processing repo record")
205
-
206
-
// only listen to members
207
-
if ok, err := s.e.IsSpindleMember(evt.Record.Did, s.cfg.Server.Did()); !ok || err != nil {
208
-
l.Warn("forbidden request: not spindle member", "did", evt.Record.Did, "err", err)
209
-
return nil
210
-
}
211
-
212
-
switch evt.Record.Action {
213
-
case tap.RecordCreateAction, tap.RecordUpdateAction:
214
-
record := tangled.Repo{}
215
-
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
216
-
return fmt.Errorf("parsing record: %w", err)
217
-
}
218
-
219
-
domain := s.cfg.Server.Hostname
220
-
if record.Spindle == nil || *record.Spindle != domain {
221
-
if record.Spindle == nil {
222
-
l.Info("spindle isn't configured", "name", record.Name)
223
-
} else {
224
-
l.Info("different spindle configured", "name", record.Name, "spindle", *record.Spindle, "domain", domain)
225
-
}
226
-
if err := s.db.DeleteRepo(evt.Record.Did, evt.Record.Rkey); err != nil {
227
-
return fmt.Errorf("deleting repo from db: %w", err)
228
-
}
229
-
return nil
230
-
}
231
-
232
-
repo := &db.Repo{
233
-
Did: evt.Record.Did,
234
-
Rkey: evt.Record.Rkey,
235
-
Name: record.Name,
236
-
Knot: record.Knot,
237
-
}
238
-
239
-
if err := s.db.PutRepo(repo); err != nil {
240
-
return fmt.Errorf("adding repo to db: %w", err)
241
-
}
242
-
243
-
if err := s.e.AddRepo(evt.Record.AtUri()); err != nil {
244
-
return fmt.Errorf("adding repo to rbac")
245
-
}
246
-
247
-
// add this knot to the event consumer
248
-
src := eventconsumer.NewKnotSource(record.Knot)
249
-
s.ks.AddSource(context.Background(), src)
250
-
251
-
// setup sparse sync
252
-
repoCloneUri := s.newRepoCloneUrl(repo.Knot, repo.Did.String(), repo.Name)
253
-
repoPath := s.newRepoPath(repo.Did, repo.Rkey)
254
-
if err := git.SparseSyncGitRepo(ctx, repoCloneUri, repoPath, ""); err != nil {
255
-
return fmt.Errorf("setting up sparse-clone git repo: %w", err)
256
-
}
257
-
258
-
l.Info("added repo", "repo", evt.Record.AtUri())
259
-
return nil
260
-
261
-
case tap.RecordDeleteAction:
262
-
// check perms for this user
263
-
if ok, err := s.e.IsRepoOwner(evt.Record.Did, evt.Record.AtUri()); !ok || err != nil {
264
-
l.Warn("forbidden request: not repo owner", "did", evt.Record.Did, "err", err)
265
-
return nil
266
-
}
267
-
268
-
if err := s.db.DeleteRepo(evt.Record.Did, evt.Record.Rkey); err != nil {
269
-
return fmt.Errorf("deleting repo from db: %w", err)
270
-
}
271
-
272
-
if err := s.e.DeleteRepo(evt.Record.AtUri()); err != nil {
273
-
return fmt.Errorf("deleting repo from rbac: %w", err)
274
-
}
275
-
276
-
l.Info("deleted repo", "repo", evt.Record.AtUri())
277
-
return nil
278
-
}
279
-
return nil
280
-
}
281
-
282
-
func (s *Spindle) processPull(ctx context.Context, evt tap.Event) error {
283
-
l := s.l.With("component", "tapIndexer", "record", evt.Record.AtUri())
284
-
285
-
l.Info("processing pull record")
286
-
287
-
// only listen to live events
288
-
if !evt.Record.Live {
289
-
l.Info("skipping backfill event", "event", evt.Record.AtUri())
290
-
return nil
291
-
}
292
-
293
-
switch evt.Record.Action {
294
-
case tap.RecordCreateAction, tap.RecordUpdateAction:
295
-
record := tangled.RepoPull{}
296
-
if err := json.Unmarshal(evt.Record.Record, &record); err != nil {
297
-
l.Error("invalid record", "err", err)
298
-
return fmt.Errorf("parsing record: %w", err)
299
-
}
300
-
301
-
// ignore legacy records
302
-
if record.Target == nil {
303
-
l.Info("ignoring pull record: target repo is nil")
304
-
return nil
305
-
}
306
-
307
-
// ignore patch-based and fork-based PRs
308
-
if record.Source == nil || record.Source.Repo != nil {
309
-
l.Info("ignoring pull record: not a branch-based pull request")
310
-
return nil
311
-
}
312
-
313
-
// skip if target repo is unknown
314
-
repo, err := s.db.GetRepo(syntax.ATURI(record.Target.Repo))
315
-
if err != nil {
316
-
l.Warn("target repo is not ingested yet", "repo", record.Target.Repo, "err", err)
317
-
return fmt.Errorf("target repo is unknown")
318
-
}
319
-
320
-
compiler := workflow.Compiler{
321
-
Trigger: tangled.Pipeline_TriggerMetadata{
322
-
Kind: string(workflow.TriggerKindPullRequest),
323
-
PullRequest: &tangled.Pipeline_PullRequestTriggerData{
324
-
Action: "create",
325
-
SourceBranch: record.Source.Branch,
326
-
SourceSha: record.Source.Sha,
327
-
TargetBranch: record.Target.Branch,
328
-
},
329
-
Repo: &tangled.Pipeline_TriggerRepo{
330
-
Did: repo.Did.String(),
331
-
Knot: repo.Knot,
332
-
Repo: repo.Name,
333
-
},
334
-
},
335
-
}
336
-
337
-
repoUri := s.newRepoCloneUrl(repo.Knot, repo.Did.String(), repo.Name)
338
-
repoPath := s.newRepoPath(repo.Did, repo.Rkey)
339
-
340
-
// load workflow definitions from rev (without spindle context)
341
-
rawPipeline, err := s.loadPipeline(ctx, repoUri, repoPath, record.Source.Sha)
342
-
if err != nil {
343
-
// don't retry
344
-
l.Error("failed loading pipeline", "err", err)
345
-
return nil
346
-
}
347
-
if len(rawPipeline) == 0 {
348
-
l.Info("no workflow definition find for the repo. skipping the event")
349
-
return nil
350
-
}
351
-
tpl := compiler.Compile(compiler.Parse(rawPipeline))
352
-
// TODO: pass compile error to workflow log
353
-
for _, w := range compiler.Diagnostics.Errors {
354
-
l.Error(w.String())
355
-
}
356
-
for _, w := range compiler.Diagnostics.Warnings {
357
-
l.Warn(w.String())
358
-
}
359
-
360
-
pipelineId := models.PipelineId{
361
-
Knot: tpl.TriggerMetadata.Repo.Knot,
362
-
Rkey: tid.TID(),
363
-
}
364
-
if err := s.db.CreatePipelineEvent(pipelineId.Rkey, tpl, s.n); err != nil {
365
-
l.Error("failed to create pipeline event", "err", err)
366
-
return nil
367
-
}
368
-
err = s.processPipeline(ctx, tpl, pipelineId)
369
-
if err != nil {
370
-
// don't retry
371
-
l.Error("failed processing pipeline", "err", err)
372
-
return nil
373
-
}
374
-
case tap.RecordDeleteAction:
375
-
// no-op
376
-
}
377
-
return nil
378
-
}
379
-
380
-
func (s *Spindle) tapSafeRemoveDid(ctx context.Context, did syntax.DID) error {
381
-
known, err := s.db.IsKnownDid(syntax.DID(did))
382
-
if err != nil {
383
-
return fmt.Errorf("ensuring did known state: %w", err)
384
-
}
385
-
if !known {
386
-
if err := s.tap.RemoveRepos(ctx, []syntax.DID{did}); err != nil {
387
-
return fmt.Errorf("removing did from tap: %w", err)
388
-
}
389
-
}
390
-
return nil
391
-
}
+2
-1
spindle/xrpc/add_secret.go
+2
-1
spindle/xrpc/add_secret.go
···
11
11
"github.com/bluesky-social/indigo/xrpc"
12
12
securejoin "github.com/cyphar/filepath-securejoin"
13
13
"tangled.org/core/api/tangled"
14
+
"tangled.org/core/rbac"
14
15
"tangled.org/core/spindle/secrets"
15
16
xrpcerr "tangled.org/core/xrpc/errors"
16
17
)
···
67
68
return
68
69
}
69
70
70
-
if ok, err := x.Enforcer.IsRepoSettingsWriteAllowed(actorDid, repoAt); !ok || err != nil {
71
+
if ok, err := x.Enforcer.IsSettingsAllowed(actorDid.String(), rbac.ThisServer, didPath); !ok || err != nil {
71
72
l.Error("insufficent permissions", "did", actorDid.String())
72
73
writeError(w, xrpcerr.AccessControlError(actorDid.String()), http.StatusUnauthorized)
73
74
return
+2
-1
spindle/xrpc/list_secrets.go
+2
-1
spindle/xrpc/list_secrets.go
···
11
11
"github.com/bluesky-social/indigo/xrpc"
12
12
securejoin "github.com/cyphar/filepath-securejoin"
13
13
"tangled.org/core/api/tangled"
14
+
"tangled.org/core/rbac"
14
15
"tangled.org/core/spindle/secrets"
15
16
xrpcerr "tangled.org/core/xrpc/errors"
16
17
)
···
62
63
return
63
64
}
64
65
65
-
if ok, err := x.Enforcer.IsRepoSettingsWriteAllowed(actorDid, repoAt); !ok || err != nil {
66
+
if ok, err := x.Enforcer.IsSettingsAllowed(actorDid.String(), rbac.ThisServer, didPath); !ok || err != nil {
66
67
l.Error("insufficent permissions", "did", actorDid.String())
67
68
writeError(w, xrpcerr.AccessControlError(actorDid.String()), http.StatusUnauthorized)
68
69
return
+1
-1
spindle/xrpc/owner.go
+1
-1
spindle/xrpc/owner.go
-72
spindle/xrpc/pipeline_cancelPipeline.go
-72
spindle/xrpc/pipeline_cancelPipeline.go
···
1
-
package xrpc
2
-
3
-
import (
4
-
"encoding/json"
5
-
"fmt"
6
-
"net/http"
7
-
"strings"
8
-
9
-
"github.com/bluesky-social/indigo/atproto/syntax"
10
-
"tangled.org/core/api/tangled"
11
-
"tangled.org/core/spindle/models"
12
-
xrpcerr "tangled.org/core/xrpc/errors"
13
-
)
14
-
15
-
func (x *Xrpc) CancelPipeline(w http.ResponseWriter, r *http.Request) {
16
-
l := x.Logger
17
-
fail := func(e xrpcerr.XrpcError) {
18
-
l.Error("failed", "kind", e.Tag, "error", e.Message)
19
-
writeError(w, e, http.StatusBadRequest)
20
-
}
21
-
l.Debug("cancel pipeline")
22
-
23
-
actorDid, ok := r.Context().Value(ActorDid).(syntax.DID)
24
-
if !ok {
25
-
fail(xrpcerr.MissingActorDidError)
26
-
return
27
-
}
28
-
29
-
var input tangled.PipelineCancelPipeline_Input
30
-
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
31
-
fail(xrpcerr.GenericError(err))
32
-
return
33
-
}
34
-
35
-
aturi := syntax.ATURI(input.Pipeline)
36
-
wid := models.WorkflowId{
37
-
PipelineId: models.PipelineId{
38
-
Knot: strings.TrimPrefix(aturi.Authority().String(), "did:web:"),
39
-
Rkey: aturi.RecordKey().String(),
40
-
},
41
-
Name: input.Workflow,
42
-
}
43
-
l.Debug("cancel pipeline", "wid", wid)
44
-
45
-
// unfortunately we have to resolve repo-at here
46
-
repoAt, err := syntax.ParseATURI(input.Repo)
47
-
if err != nil {
48
-
fail(xrpcerr.InvalidRepoError(input.Repo))
49
-
return
50
-
}
51
-
52
-
isRepoOwner, err := x.Enforcer.IsRepoOwner(actorDid, repoAt)
53
-
if err != nil || !isRepoOwner {
54
-
fail(xrpcerr.AccessControlError(actorDid.String()))
55
-
return
56
-
}
57
-
for _, engine := range x.Engines {
58
-
l.Debug("destorying workflow", "wid", wid)
59
-
err = engine.DestroyWorkflow(r.Context(), wid)
60
-
if err != nil {
61
-
fail(xrpcerr.GenericError(fmt.Errorf("dailed to destroy workflow: %w", err)))
62
-
return
63
-
}
64
-
err = x.Db.StatusCancelled(wid, "User canceled the workflow", -1, x.Notifier)
65
-
if err != nil {
66
-
fail(xrpcerr.GenericError(fmt.Errorf("dailed to emit status failed: %w", err)))
67
-
return
68
-
}
69
-
}
70
-
71
-
w.WriteHeader(http.StatusOK)
72
-
}
+2
-1
spindle/xrpc/remove_secret.go
+2
-1
spindle/xrpc/remove_secret.go
···
10
10
"github.com/bluesky-social/indigo/xrpc"
11
11
securejoin "github.com/cyphar/filepath-securejoin"
12
12
"tangled.org/core/api/tangled"
13
+
"tangled.org/core/rbac"
13
14
"tangled.org/core/spindle/secrets"
14
15
xrpcerr "tangled.org/core/xrpc/errors"
15
16
)
···
61
62
return
62
63
}
63
64
64
-
if ok, err := x.Enforcer.IsRepoSettingsWriteAllowed(actorDid, repoAt); !ok || err != nil {
65
+
if ok, err := x.Enforcer.IsSettingsAllowed(actorDid.String(), rbac.ThisServer, didPath); !ok || err != nil {
65
66
l.Error("insufficent permissions", "did", actorDid.String())
66
67
writeError(w, xrpcerr.AccessControlError(actorDid.String()), http.StatusUnauthorized)
67
68
return
+2
-5
spindle/xrpc/xrpc.go
+2
-5
spindle/xrpc/xrpc.go
···
10
10
11
11
"tangled.org/core/api/tangled"
12
12
"tangled.org/core/idresolver"
13
-
"tangled.org/core/notifier"
14
-
"tangled.org/core/rbac2"
13
+
"tangled.org/core/rbac"
15
14
"tangled.org/core/spindle/config"
16
15
"tangled.org/core/spindle/db"
17
16
"tangled.org/core/spindle/models"
···
25
24
type Xrpc struct {
26
25
Logger *slog.Logger
27
26
Db *db.DB
28
-
Enforcer *rbac2.Enforcer
27
+
Enforcer *rbac.Enforcer
29
28
Engines map[string]models.Engine
30
29
Config *config.Config
31
30
Resolver *idresolver.Resolver
32
31
Vault secrets.Manager
33
-
Notifier *notifier.Notifier
34
32
ServiceAuth *serviceauth.ServiceAuth
35
33
}
36
34
···
43
41
r.Post("/"+tangled.RepoAddSecretNSID, x.AddSecret)
44
42
r.Post("/"+tangled.RepoRemoveSecretNSID, x.RemoveSecret)
45
43
r.Get("/"+tangled.RepoListSecretsNSID, x.ListSecrets)
46
-
r.Post("/"+tangled.PipelineCancelPipelineNSID, x.CancelPipeline)
47
44
})
48
45
49
46
// service query endpoints (no auth required)
-24
tap/simpleIndexer.go
-24
tap/simpleIndexer.go
···
1
-
package tap
2
-
3
-
import "context"
4
-
5
-
type SimpleIndexer struct {
6
-
EventHandler func(ctx context.Context, evt Event) error
7
-
ErrorHandler func(ctx context.Context, err error)
8
-
}
9
-
10
-
var _ Handler = (*SimpleIndexer)(nil)
11
-
12
-
func (i *SimpleIndexer) OnEvent(ctx context.Context, evt Event) error {
13
-
if i.EventHandler == nil {
14
-
return nil
15
-
}
16
-
return i.EventHandler(ctx, evt)
17
-
}
18
-
19
-
func (i *SimpleIndexer) OnError(ctx context.Context, err error) {
20
-
if i.ErrorHandler == nil {
21
-
return
22
-
}
23
-
i.ErrorHandler(ctx, err)
24
-
}
-169
tap/tap.go
-169
tap/tap.go
···
1
-
/// heavily inspired by <https://github.com/bluesky-social/atproto/blob/c7f5a868837d3e9b3289f988fee2267789327b06/packages/tap/README.md>
2
-
3
-
package tap
4
-
5
-
import (
6
-
"bytes"
7
-
"context"
8
-
"encoding/json"
9
-
"fmt"
10
-
"net/http"
11
-
"net/url"
12
-
13
-
"github.com/bluesky-social/indigo/atproto/syntax"
14
-
"github.com/gorilla/websocket"
15
-
"tangled.org/core/log"
16
-
)
17
-
18
-
// type WebsocketOptions struct {
19
-
// maxReconnectSeconds int
20
-
// heartbeatIntervalMs int
21
-
// // onReconnectError
22
-
// }
23
-
24
-
type Handler interface {
25
-
OnEvent(ctx context.Context, evt Event) error
26
-
OnError(ctx context.Context, err error)
27
-
}
28
-
29
-
type Client struct {
30
-
Url string
31
-
AdminPassword string
32
-
HTTPClient *http.Client
33
-
}
34
-
35
-
func NewClient(url, adminPassword string) Client {
36
-
return Client{
37
-
Url: url,
38
-
AdminPassword: adminPassword,
39
-
HTTPClient: &http.Client{},
40
-
}
41
-
}
42
-
43
-
func (c *Client) AddRepos(ctx context.Context, dids []syntax.DID) error {
44
-
body, err := json.Marshal(map[string][]syntax.DID{"dids": dids})
45
-
if err != nil {
46
-
return err
47
-
}
48
-
req, err := http.NewRequestWithContext(ctx, "POST", c.Url+"/repos/add", bytes.NewReader(body))
49
-
if err != nil {
50
-
return err
51
-
}
52
-
req.SetBasicAuth("admin", c.AdminPassword)
53
-
req.Header.Set("Content-Type", "application/json")
54
-
55
-
resp, err := c.HTTPClient.Do(req)
56
-
if err != nil {
57
-
return err
58
-
}
59
-
defer resp.Body.Close()
60
-
if resp.StatusCode != http.StatusOK {
61
-
return fmt.Errorf("tap: /repos/add failed with status %d", resp.StatusCode)
62
-
}
63
-
return nil
64
-
}
65
-
66
-
func (c *Client) RemoveRepos(ctx context.Context, dids []syntax.DID) error {
67
-
body, err := json.Marshal(map[string][]syntax.DID{"dids": dids})
68
-
if err != nil {
69
-
return err
70
-
}
71
-
req, err := http.NewRequestWithContext(ctx, "POST", c.Url+"/repos/remove", bytes.NewReader(body))
72
-
if err != nil {
73
-
return err
74
-
}
75
-
req.SetBasicAuth("admin", c.AdminPassword)
76
-
req.Header.Set("Content-Type", "application/json")
77
-
78
-
resp, err := c.HTTPClient.Do(req)
79
-
if err != nil {
80
-
return err
81
-
}
82
-
defer resp.Body.Close()
83
-
if resp.StatusCode != http.StatusOK {
84
-
return fmt.Errorf("tap: /repos/remove failed with status %d", resp.StatusCode)
85
-
}
86
-
return nil
87
-
}
88
-
89
-
func (c *Client) Connect(ctx context.Context, handler Handler) error {
90
-
l := log.FromContext(ctx)
91
-
92
-
u, err := url.Parse(c.Url)
93
-
if err != nil {
94
-
return err
95
-
}
96
-
if u.Scheme == "https" {
97
-
u.Scheme = "wss"
98
-
} else {
99
-
u.Scheme = "ws"
100
-
}
101
-
u.Path = "/channel"
102
-
103
-
// TODO: set auth on dial
104
-
105
-
url := u.String()
106
-
107
-
// var backoff int
108
-
// for {
109
-
// select {
110
-
// case <-ctx.Done():
111
-
// return ctx.Err()
112
-
// default:
113
-
// }
114
-
//
115
-
// header := http.Header{
116
-
// "Authorization": []string{""},
117
-
// }
118
-
// conn, res, err := websocket.DefaultDialer.DialContext(ctx, url, header)
119
-
// if err != nil {
120
-
// l.Warn("dialing failed", "url", url, "err", err, "backoff", backoff)
121
-
// time.Sleep(time.Duration(5+backoff) * time.Second)
122
-
// backoff++
123
-
//
124
-
// continue
125
-
// } else {
126
-
// backoff = 0
127
-
// }
128
-
//
129
-
// l.Info("event subscription response", "code", res.StatusCode)
130
-
// }
131
-
132
-
// TODO: keep websocket connection alive
133
-
conn, _, err := websocket.DefaultDialer.DialContext(ctx, url, nil)
134
-
if err != nil {
135
-
return err
136
-
}
137
-
defer conn.Close()
138
-
139
-
for {
140
-
select {
141
-
case <-ctx.Done():
142
-
return ctx.Err()
143
-
default:
144
-
}
145
-
_, message, err := conn.ReadMessage()
146
-
if err != nil {
147
-
return err
148
-
}
149
-
150
-
var ev Event
151
-
if err := json.Unmarshal(message, &ev); err != nil {
152
-
handler.OnError(ctx, fmt.Errorf("failed to parse message: %w", err))
153
-
continue
154
-
}
155
-
if err := handler.OnEvent(ctx, ev); err != nil {
156
-
handler.OnError(ctx, fmt.Errorf("failed to process event %d: %w", ev.ID, err))
157
-
continue
158
-
}
159
-
160
-
ack := map[string]any{
161
-
"type": "ack",
162
-
"id": ev.ID,
163
-
}
164
-
if err := conn.WriteJSON(ack); err != nil {
165
-
l.Warn("failed to send ack", "err", err)
166
-
continue
167
-
}
168
-
}
169
-
}
-62
tap/types.go
-62
tap/types.go
···
1
-
package tap
2
-
3
-
import (
4
-
"encoding/json"
5
-
"fmt"
6
-
7
-
"github.com/bluesky-social/indigo/atproto/syntax"
8
-
)
9
-
10
-
type EventType string
11
-
12
-
const (
13
-
EvtRecord EventType = "record"
14
-
EvtIdentity EventType = "identity"
15
-
)
16
-
17
-
type Event struct {
18
-
ID int64 `json:"id"`
19
-
Type EventType `json:"type"`
20
-
Record *RecordEventData `json:"record,omitempty"`
21
-
Identity *IdentityEventData `json:"identity,omitempty"`
22
-
}
23
-
24
-
type RecordEventData struct {
25
-
Live bool `json:"live"`
26
-
Did syntax.DID `json:"did"`
27
-
Rev string `json:"rev"`
28
-
Collection syntax.NSID `json:"collection"`
29
-
Rkey syntax.RecordKey `json:"rkey"`
30
-
Action RecordAction `json:"action"`
31
-
Record json.RawMessage `json:"record,omitempty"`
32
-
CID *syntax.CID `json:"cid,omitempty"`
33
-
}
34
-
35
-
func (r *RecordEventData) AtUri() syntax.ATURI {
36
-
return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, r.Collection, r.Rkey))
37
-
}
38
-
39
-
type RecordAction string
40
-
41
-
const (
42
-
RecordCreateAction RecordAction = "create"
43
-
RecordUpdateAction RecordAction = "update"
44
-
RecordDeleteAction RecordAction = "delete"
45
-
)
46
-
47
-
type IdentityEventData struct {
48
-
DID syntax.DID `json:"did"`
49
-
Handle string `json:"handle"`
50
-
IsActive bool `json:"is_active"`
51
-
Status RepoStatus `json:"status"`
52
-
}
53
-
54
-
type RepoStatus string
55
-
56
-
const (
57
-
RepoStatusActive RepoStatus = "active"
58
-
RepoStatusTakendown RepoStatus = "takendown"
59
-
RepoStatusSuspended RepoStatus = "suspended"
60
-
RepoStatusDeactivated RepoStatus = "deactivated"
61
-
RepoStatusDeleted RepoStatus = "deleted"
62
-
)
+3
types/diff.go
+3
types/diff.go
+112
types/diff_test.go
+112
types/diff_test.go
···
1
+
package types
2
+
3
+
import "testing"
4
+
5
+
func TestDiffId(t *testing.T) {
6
+
tests := []struct {
7
+
name string
8
+
diff Diff
9
+
expected string
10
+
}{
11
+
{
12
+
name: "regular file uses new name",
13
+
diff: Diff{
14
+
Name: struct {
15
+
Old string `json:"old"`
16
+
New string `json:"new"`
17
+
}{Old: "", New: "src/main.go"},
18
+
},
19
+
expected: "src/main.go",
20
+
},
21
+
{
22
+
name: "new file uses new name",
23
+
diff: Diff{
24
+
Name: struct {
25
+
Old string `json:"old"`
26
+
New string `json:"new"`
27
+
}{Old: "", New: "src/new.go"},
28
+
IsNew: true,
29
+
},
30
+
expected: "src/new.go",
31
+
},
32
+
{
33
+
name: "deleted file uses old name",
34
+
diff: Diff{
35
+
Name: struct {
36
+
Old string `json:"old"`
37
+
New string `json:"new"`
38
+
}{Old: "src/deleted.go", New: ""},
39
+
IsDelete: true,
40
+
},
41
+
expected: "src/deleted.go",
42
+
},
43
+
{
44
+
name: "renamed file uses new name",
45
+
diff: Diff{
46
+
Name: struct {
47
+
Old string `json:"old"`
48
+
New string `json:"new"`
49
+
}{Old: "src/old.go", New: "src/renamed.go"},
50
+
IsRename: true,
51
+
},
52
+
expected: "src/renamed.go",
53
+
},
54
+
}
55
+
56
+
for _, tt := range tests {
57
+
t.Run(tt.name, func(t *testing.T) {
58
+
if got := tt.diff.Id(); got != tt.expected {
59
+
t.Errorf("Diff.Id() = %q, want %q", got, tt.expected)
60
+
}
61
+
})
62
+
}
63
+
}
64
+
65
+
func TestChangedFilesMatchesDiffId(t *testing.T) {
66
+
// ChangedFiles() must return values matching each Diff's Id()
67
+
// so that sidebar links point to the correct anchors.
68
+
// Tests existing, deleted, new, and renamed files.
69
+
nd := NiceDiff{
70
+
Diff: []Diff{
71
+
{
72
+
Name: struct {
73
+
Old string `json:"old"`
74
+
New string `json:"new"`
75
+
}{Old: "", New: "src/modified.go"},
76
+
},
77
+
{
78
+
Name: struct {
79
+
Old string `json:"old"`
80
+
New string `json:"new"`
81
+
}{Old: "src/deleted.go", New: ""},
82
+
IsDelete: true,
83
+
},
84
+
{
85
+
Name: struct {
86
+
Old string `json:"old"`
87
+
New string `json:"new"`
88
+
}{Old: "", New: "src/new.go"},
89
+
IsNew: true,
90
+
},
91
+
{
92
+
Name: struct {
93
+
Old string `json:"old"`
94
+
New string `json:"new"`
95
+
}{Old: "src/old.go", New: "src/renamed.go"},
96
+
IsRename: true,
97
+
},
98
+
},
99
+
}
100
+
101
+
changedFiles := nd.ChangedFiles()
102
+
103
+
if len(changedFiles) != len(nd.Diff) {
104
+
t.Fatalf("ChangedFiles() returned %d items, want %d", len(changedFiles), len(nd.Diff))
105
+
}
106
+
107
+
for i, diff := range nd.Diff {
108
+
if changedFiles[i] != diff.Id() {
109
+
t.Errorf("ChangedFiles()[%d] = %q, but Diff.Id() = %q", i, changedFiles[i], diff.Id())
110
+
}
111
+
}
112
+
}