+47
-70
api/tangled/cbor_gen.go
+47
-70
api/tangled/cbor_gen.go
···
1816
fieldCount--
1817
}
1818
1819
-
if t.FinishedAt == nil {
1820
-
fieldCount--
1821
-
}
1822
-
1823
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
1824
return err
1825
}
···
1930
1931
}
1932
1933
-
// t.StartedAt (string) (string)
1934
-
if len("startedAt") > 1000000 {
1935
-
return xerrors.Errorf("Value in field \"startedAt\" was too long")
1936
}
1937
1938
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("startedAt"))); err != nil {
1939
return err
1940
}
1941
-
if _, err := cw.WriteString(string("startedAt")); err != nil {
1942
return err
1943
}
1944
1945
-
if len(t.StartedAt) > 1000000 {
1946
-
return xerrors.Errorf("Value in field t.StartedAt was too long")
1947
}
1948
1949
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.StartedAt))); err != nil {
1950
return err
1951
}
1952
-
if _, err := cw.WriteString(string(t.StartedAt)); err != nil {
1953
return err
1954
}
1955
1956
-
// t.UpdatedAt (string) (string)
1957
-
if len("updatedAt") > 1000000 {
1958
-
return xerrors.Errorf("Value in field \"updatedAt\" was too long")
1959
}
1960
1961
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("updatedAt"))); err != nil {
1962
return err
1963
}
1964
-
if _, err := cw.WriteString(string("updatedAt")); err != nil {
1965
return err
1966
}
1967
1968
-
if len(t.UpdatedAt) > 1000000 {
1969
-
return xerrors.Errorf("Value in field t.UpdatedAt was too long")
1970
}
1971
1972
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.UpdatedAt))); err != nil {
1973
return err
1974
}
1975
-
if _, err := cw.WriteString(string(t.UpdatedAt)); err != nil {
1976
return err
1977
}
1978
1979
-
// t.FinishedAt (string) (string)
1980
-
if t.FinishedAt != nil {
1981
1982
-
if len("finishedAt") > 1000000 {
1983
-
return xerrors.Errorf("Value in field \"finishedAt\" was too long")
1984
-
}
1985
1986
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("finishedAt"))); err != nil {
1987
-
return err
1988
-
}
1989
-
if _, err := cw.WriteString(string("finishedAt")); err != nil {
1990
-
return err
1991
-
}
1992
1993
-
if t.FinishedAt == nil {
1994
-
if _, err := cw.Write(cbg.CborNull); err != nil {
1995
-
return err
1996
-
}
1997
-
} else {
1998
-
if len(*t.FinishedAt) > 1000000 {
1999
-
return xerrors.Errorf("Value in field t.FinishedAt was too long")
2000
-
}
2001
-
2002
-
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.FinishedAt))); err != nil {
2003
-
return err
2004
-
}
2005
-
if _, err := cw.WriteString(string(*t.FinishedAt)); err != nil {
2006
-
return err
2007
-
}
2008
-
}
2009
}
2010
return nil
2011
}
···
2035
2036
n := extra
2037
2038
-
nameBuf := make([]byte, 10)
2039
for i := uint64(0); i < n; i++ {
2040
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
2041
if err != nil {
···
2130
t.ExitCode = (*int64)(&extraI)
2131
}
2132
}
2133
-
// t.StartedAt (string) (string)
2134
-
case "startedAt":
2135
2136
{
2137
sval, err := cbg.ReadStringWithMax(cr, 1000000)
···
2139
return err
2140
}
2141
2142
-
t.StartedAt = string(sval)
2143
}
2144
-
// t.UpdatedAt (string) (string)
2145
-
case "updatedAt":
2146
2147
{
2148
sval, err := cbg.ReadStringWithMax(cr, 1000000)
···
2150
return err
2151
}
2152
2153
-
t.UpdatedAt = string(sval)
2154
}
2155
-
// t.FinishedAt (string) (string)
2156
-
case "finishedAt":
2157
2158
{
2159
-
b, err := cr.ReadByte()
2160
if err != nil {
2161
return err
2162
}
2163
-
if b != cbg.CborNull[0] {
2164
-
if err := cr.UnreadByte(); err != nil {
2165
-
return err
2166
-
}
2167
2168
-
sval, err := cbg.ReadStringWithMax(cr, 1000000)
2169
-
if err != nil {
2170
-
return err
2171
-
}
2172
-
2173
-
t.FinishedAt = (*string)(&sval)
2174
-
}
2175
}
2176
2177
default:
···
1816
fieldCount--
1817
}
1818
1819
if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil {
1820
return err
1821
}
···
1926
1927
}
1928
1929
+
// t.Pipeline (string) (string)
1930
+
if len("pipeline") > 1000000 {
1931
+
return xerrors.Errorf("Value in field \"pipeline\" was too long")
1932
}
1933
1934
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("pipeline"))); err != nil {
1935
return err
1936
}
1937
+
if _, err := cw.WriteString(string("pipeline")); err != nil {
1938
return err
1939
}
1940
1941
+
if len(t.Pipeline) > 1000000 {
1942
+
return xerrors.Errorf("Value in field t.Pipeline was too long")
1943
}
1944
1945
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Pipeline))); err != nil {
1946
return err
1947
}
1948
+
if _, err := cw.WriteString(string(t.Pipeline)); err != nil {
1949
return err
1950
}
1951
1952
+
// t.Workflow (string) (string)
1953
+
if len("workflow") > 1000000 {
1954
+
return xerrors.Errorf("Value in field \"workflow\" was too long")
1955
}
1956
1957
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("workflow"))); err != nil {
1958
return err
1959
}
1960
+
if _, err := cw.WriteString(string("workflow")); err != nil {
1961
return err
1962
}
1963
1964
+
if len(t.Workflow) > 1000000 {
1965
+
return xerrors.Errorf("Value in field t.Workflow was too long")
1966
}
1967
1968
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Workflow))); err != nil {
1969
return err
1970
}
1971
+
if _, err := cw.WriteString(string(t.Workflow)); err != nil {
1972
return err
1973
}
1974
1975
+
// t.CreatedAt (string) (string)
1976
+
if len("createdAt") > 1000000 {
1977
+
return xerrors.Errorf("Value in field \"createdAt\" was too long")
1978
+
}
1979
1980
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil {
1981
+
return err
1982
+
}
1983
+
if _, err := cw.WriteString(string("createdAt")); err != nil {
1984
+
return err
1985
+
}
1986
1987
+
if len(t.CreatedAt) > 1000000 {
1988
+
return xerrors.Errorf("Value in field t.CreatedAt was too long")
1989
+
}
1990
1991
+
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil {
1992
+
return err
1993
+
}
1994
+
if _, err := cw.WriteString(string(t.CreatedAt)); err != nil {
1995
+
return err
1996
}
1997
return nil
1998
}
···
2022
2023
n := extra
2024
2025
+
nameBuf := make([]byte, 9)
2026
for i := uint64(0); i < n; i++ {
2027
nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 1000000)
2028
if err != nil {
···
2117
t.ExitCode = (*int64)(&extraI)
2118
}
2119
}
2120
+
// t.Pipeline (string) (string)
2121
+
case "pipeline":
2122
2123
{
2124
sval, err := cbg.ReadStringWithMax(cr, 1000000)
···
2126
return err
2127
}
2128
2129
+
t.Pipeline = string(sval)
2130
}
2131
+
// t.Workflow (string) (string)
2132
+
case "workflow":
2133
2134
{
2135
sval, err := cbg.ReadStringWithMax(cr, 1000000)
···
2137
return err
2138
}
2139
2140
+
t.Workflow = string(sval)
2141
}
2142
+
// t.CreatedAt (string) (string)
2143
+
case "createdAt":
2144
2145
{
2146
+
sval, err := cbg.ReadStringWithMax(cr, 1000000)
2147
if err != nil {
2148
return err
2149
}
2150
2151
+
t.CreatedAt = string(sval)
2152
}
2153
2154
default:
+6
-8
api/tangled/pipelinestatus.go
+6
-8
api/tangled/pipelinestatus.go
···
18
// RECORDTYPE: PipelineStatus
19
type PipelineStatus struct {
20
LexiconTypeID string `json:"$type,const=sh.tangled.pipeline.status" cborgen:"$type,const=sh.tangled.pipeline.status"`
21
// error: error message if failed
22
Error *string `json:"error,omitempty" cborgen:"error,omitempty"`
23
// exitCode: exit code if failed
24
ExitCode *int64 `json:"exitCode,omitempty" cborgen:"exitCode,omitempty"`
25
-
// finishedAt: pipeline finish time, if finished
26
-
FinishedAt *string `json:"finishedAt,omitempty" cborgen:"finishedAt,omitempty"`
27
-
// pipeline: pipeline at ref
28
Pipeline string `json:"pipeline" cborgen:"pipeline"`
29
-
// startedAt: pipeline start time
30
-
StartedAt string `json:"startedAt" cborgen:"startedAt"`
31
-
// status: Pipeline status
32
Status string `json:"status" cborgen:"status"`
33
-
// updatedAt: pipeline last updated time
34
-
UpdatedAt string `json:"updatedAt" cborgen:"updatedAt"`
35
}
···
18
// RECORDTYPE: PipelineStatus
19
type PipelineStatus struct {
20
LexiconTypeID string `json:"$type,const=sh.tangled.pipeline.status" cborgen:"$type,const=sh.tangled.pipeline.status"`
21
+
// createdAt: time of creation of this status update
22
+
CreatedAt string `json:"createdAt" cborgen:"createdAt"`
23
// error: error message if failed
24
Error *string `json:"error,omitempty" cborgen:"error,omitempty"`
25
// exitCode: exit code if failed
26
ExitCode *int64 `json:"exitCode,omitempty" cborgen:"exitCode,omitempty"`
27
+
// pipeline: ATURI of the pipeline
28
Pipeline string `json:"pipeline" cborgen:"pipeline"`
29
+
// status: status of the workflow
30
Status string `json:"status" cborgen:"status"`
31
+
// workflow: name of the workflow within this pipeline
32
+
Workflow string `json:"workflow" cborgen:"workflow"`
33
}
+1
-1
api/tangled/tangledpipeline.go
+1
-1
api/tangled/tangledpipeline.go
···
85
// Pipeline_Workflow is a "workflow" in the sh.tangled.pipeline schema.
86
type Pipeline_Workflow struct {
87
Clone *Pipeline_CloneOpts `json:"clone" cborgen:"clone"`
88
-
Dependencies []Pipeline_Dependencies_Elem `json:"dependencies" cborgen:"dependencies"`
89
Environment []*Pipeline_Workflow_Environment_Elem `json:"environment" cborgen:"environment"`
90
Name string `json:"name" cborgen:"name"`
91
Steps []*Pipeline_Step `json:"steps" cborgen:"steps"`
···
85
// Pipeline_Workflow is a "workflow" in the sh.tangled.pipeline schema.
86
type Pipeline_Workflow struct {
87
Clone *Pipeline_CloneOpts `json:"clone" cborgen:"clone"`
88
+
Dependencies []Pipeline_Dependencies_Elem `json:"dependencies" cborgen:"dependencies"`
89
Environment []*Pipeline_Workflow_Environment_Elem `json:"environment" cborgen:"environment"`
90
Name string `json:"name" cborgen:"name"`
91
Steps []*Pipeline_Step `json:"steps" cborgen:"steps"`
+1
cmd/spindle/main.go
+1
cmd/spindle/main.go
+13
-18
lexicons/pipeline/status.json
+13
-18
lexicons/pipeline/status.json
···
9
"key": "tid",
10
"record": {
11
"type": "object",
12
-
"required": ["pipeline", "status", "startedAt", "updatedAt"],
13
"properties": {
14
"pipeline": {
15
"type": "string",
16
"format": "at-uri",
17
-
"description": "pipeline at ref"
18
},
19
"status": {
20
"type": "string",
21
-
"description": "Pipeline status",
22
"enum": [
23
"pending",
24
"running",
···
27
"cancelled",
28
"success"
29
]
30
},
31
"error": {
32
"type": "string",
···
35
"exitCode": {
36
"type": "integer",
37
"description": "exit code if failed"
38
-
},
39
-
"startedAt": {
40
-
"type": "string",
41
-
"format": "datetime",
42
-
"description": "pipeline start time"
43
-
},
44
-
"updatedAt": {
45
-
"type": "string",
46
-
"format": "datetime",
47
-
"description": "pipeline last updated time"
48
-
},
49
-
"finishedAt": {
50
-
"type": "string",
51
-
"format": "datetime",
52
-
"description": "pipeline finish time, if finished"
53
}
54
}
55
}
···
9
"key": "tid",
10
"record": {
11
"type": "object",
12
+
"required": ["pipeline", "workflow", "status", "createdAt"],
13
"properties": {
14
"pipeline": {
15
"type": "string",
16
"format": "at-uri",
17
+
"description": "ATURI of the pipeline"
18
+
},
19
+
"workflow": {
20
+
"type": "string",
21
+
"format": "at-uri",
22
+
"description": "name of the workflow within this pipeline"
23
},
24
"status": {
25
"type": "string",
26
+
"description": "status of the workflow",
27
"enum": [
28
"pending",
29
"running",
···
32
"cancelled",
33
"success"
34
]
35
+
},
36
+
"createdAt": {
37
+
"type": "string",
38
+
"format": "datetime",
39
+
"description": "time of creation of this status update"
40
},
41
"error": {
42
"type": "string",
···
45
"exitCode": {
46
"type": "integer",
47
"description": "exit code if failed"
48
}
49
}
50
}
+5
-13
spindle/db/db.go
+5
-13
spindle/db/db.go
···
30
did text primary key
31
);
32
33
-
create table if not exists pipeline_status (
34
rkey text not null,
35
-
pipeline text not null,
36
-
status text not null,
37
-
38
-
-- only set if status is 'failed'
39
-
error text,
40
-
exit_code integer,
41
-
42
-
started_at timestamp not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
43
-
updated_at timestamp not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
44
-
finished_at timestamp,
45
-
46
-
primary key (rkey)
47
);
48
`)
49
if err != nil {
+148
spindle/db/events.go
+148
spindle/db/events.go
···
···
1
+
package db
2
+
3
+
import (
4
+
"encoding/json"
5
+
"fmt"
6
+
"time"
7
+
8
+
"tangled.sh/tangled.sh/core/api/tangled"
9
+
"tangled.sh/tangled.sh/core/notifier"
10
+
"tangled.sh/tangled.sh/core/spindle/models"
11
+
"tangled.sh/tangled.sh/core/tid"
12
+
)
13
+
14
+
type Event struct {
15
+
Rkey string `json:"rkey"`
16
+
Nsid string `json:"nsid"`
17
+
Created int64 `json:"created"`
18
+
EventJson string `json:"event"`
19
+
}
20
+
21
+
func (d *DB) InsertEvent(event Event, notifier *notifier.Notifier) error {
22
+
_, err := d.Exec(
23
+
`insert into events (rkey, nsid, event, created) values (?, ?, ?, ?)`,
24
+
event.Rkey,
25
+
event.Nsid,
26
+
event.EventJson,
27
+
time.Now().UnixNano(),
28
+
)
29
+
30
+
notifier.NotifyAll()
31
+
32
+
return err
33
+
}
34
+
35
+
func (d *DB) GetEvents(cursor int64) ([]Event, error) {
36
+
whereClause := ""
37
+
args := []any{}
38
+
if cursor > 0 {
39
+
whereClause = "where created > ?"
40
+
args = append(args, cursor)
41
+
}
42
+
43
+
query := fmt.Sprintf(`
44
+
select rkey, nsid, event, created
45
+
from events
46
+
%s
47
+
order by created asc
48
+
limit 100
49
+
`, whereClause)
50
+
51
+
rows, err := d.Query(query, args...)
52
+
if err != nil {
53
+
return nil, err
54
+
}
55
+
defer rows.Close()
56
+
57
+
var evts []Event
58
+
for rows.Next() {
59
+
var ev Event
60
+
if err := rows.Scan(&ev.Rkey, &ev.Nsid, &ev.EventJson, &ev.Created); err != nil {
61
+
return nil, err
62
+
}
63
+
evts = append(evts, ev)
64
+
}
65
+
66
+
if err := rows.Err(); err != nil {
67
+
return nil, err
68
+
}
69
+
70
+
return evts, nil
71
+
}
72
+
73
+
func (d *DB) CreateStatusEvent(rkey string, s tangled.PipelineStatus, n *notifier.Notifier) error {
74
+
eventJson, err := json.Marshal(s)
75
+
if err != nil {
76
+
return err
77
+
}
78
+
79
+
event := Event{
80
+
Rkey: rkey,
81
+
Nsid: tangled.PipelineStatusNSID,
82
+
Created: time.Now().UnixNano(),
83
+
EventJson: string(eventJson),
84
+
}
85
+
86
+
return d.InsertEvent(event, n)
87
+
}
88
+
89
+
type StatusKind string
90
+
91
+
var (
92
+
StatusKindPending StatusKind = "pending"
93
+
StatusKindRunning StatusKind = "running"
94
+
StatusKindFailed StatusKind = "failed"
95
+
StatusKindTimeout StatusKind = "timeout"
96
+
StatusKindCancelled StatusKind = "cancelled"
97
+
StatusKindSuccess StatusKind = "success"
98
+
)
99
+
100
+
func (d *DB) createStatusEvent(
101
+
workflowId models.WorkflowId,
102
+
statusKind StatusKind,
103
+
workflowError *string,
104
+
exitCode *int64,
105
+
n *notifier.Notifier,
106
+
) error {
107
+
now := time.Now()
108
+
pipelineAtUri := workflowId.PipelineId.AtUri()
109
+
s := tangled.PipelineStatus{
110
+
CreatedAt: now.Format(time.RFC3339),
111
+
Error: workflowError,
112
+
ExitCode: exitCode,
113
+
Pipeline: string(pipelineAtUri),
114
+
Workflow: workflowId.Name,
115
+
Status: string(statusKind),
116
+
}
117
+
118
+
eventJson, err := json.Marshal(s)
119
+
if err != nil {
120
+
return err
121
+
}
122
+
123
+
event := Event{
124
+
Rkey: tid.TID(),
125
+
Nsid: tangled.PipelineStatusNSID,
126
+
Created: now.UnixNano(),
127
+
EventJson: string(eventJson),
128
+
}
129
+
130
+
return d.InsertEvent(event, n)
131
+
132
+
}
133
+
134
+
func (d *DB) StatusPending(workflowId models.WorkflowId, n *notifier.Notifier) error {
135
+
return d.createStatusEvent(workflowId, StatusKindPending, nil, nil, n)
136
+
}
137
+
138
+
func (d *DB) StatusRunning(workflowId models.WorkflowId, n *notifier.Notifier) error {
139
+
return d.createStatusEvent(workflowId, StatusKindRunning, nil, nil, n)
140
+
}
141
+
142
+
func (d *DB) StatusFailed(workflowId models.WorkflowId, workflowError string, exitCode int64, n *notifier.Notifier) error {
143
+
return d.createStatusEvent(workflowId, StatusKindFailed, &workflowError, &exitCode, n)
144
+
}
145
+
146
+
func (d *DB) StatusSuccess(workflowId models.WorkflowId, n *notifier.Notifier) error {
147
+
return d.createStatusEvent(workflowId, StatusKindSuccess, nil, nil, n)
148
+
}
+205
-177
spindle/db/pipelines.go
+205
-177
spindle/db/pipelines.go
···
1
package db
2
3
-
import (
4
-
"fmt"
5
-
"time"
6
-
7
-
"tangled.sh/tangled.sh/core/api/tangled"
8
-
"tangled.sh/tangled.sh/core/notifier"
9
-
)
10
-
11
-
type PipelineRunStatus string
12
-
13
-
var (
14
-
PipelinePending PipelineRunStatus = "pending"
15
-
PipelineRunning PipelineRunStatus = "running"
16
-
PipelineFailed PipelineRunStatus = "failed"
17
-
PipelineTimeout PipelineRunStatus = "timeout"
18
-
PipelineCancelled PipelineRunStatus = "cancelled"
19
-
PipelineSuccess PipelineRunStatus = "success"
20
-
)
21
-
22
-
type PipelineStatus struct {
23
-
Rkey string `json:"rkey"`
24
-
Pipeline string `json:"pipeline"`
25
-
Status PipelineRunStatus `json:"status"`
26
-
27
-
// only if Failed
28
-
Error string `json:"error"`
29
-
ExitCode int `json:"exit_code"`
30
-
31
-
StartedAt time.Time `json:"started_at"`
32
-
UpdatedAt time.Time `json:"updated_at"`
33
-
FinishedAt time.Time `json:"finished_at"`
34
-
}
35
-
36
-
func (p PipelineStatus) AsRecord() *tangled.PipelineStatus {
37
-
exitCode64 := int64(p.ExitCode)
38
-
finishedAt := p.FinishedAt.String()
39
-
40
-
return &tangled.PipelineStatus{
41
-
LexiconTypeID: tangled.PipelineStatusNSID,
42
-
Pipeline: p.Pipeline,
43
-
Status: string(p.Status),
44
-
45
-
ExitCode: &exitCode64,
46
-
Error: &p.Error,
47
-
48
-
StartedAt: p.StartedAt.String(),
49
-
UpdatedAt: p.UpdatedAt.String(),
50
-
FinishedAt: &finishedAt,
51
-
}
52
-
}
53
-
54
-
func pipelineAtUri(rkey, knot string) string {
55
-
return fmt.Sprintf("at://%s/did:web:%s/%s", tangled.PipelineStatusNSID, knot, rkey)
56
-
}
57
-
58
-
func (db *DB) CreatePipeline(rkey, pipeline string, n *notifier.Notifier) error {
59
-
_, err := db.Exec(`
60
-
insert into pipeline_status (rkey, status, pipeline)
61
-
values (?, ?, ?)
62
-
`, rkey, PipelinePending, pipeline)
63
-
64
-
if err != nil {
65
-
return err
66
-
}
67
-
n.NotifyAll()
68
-
return nil
69
-
}
70
-
71
-
func (db *DB) MarkPipelineRunning(rkey string, n *notifier.Notifier) error {
72
-
_, err := db.Exec(`
73
-
update pipeline_status
74
-
set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
75
-
where rkey = ?
76
-
`, PipelineRunning, rkey)
77
-
78
-
if err != nil {
79
-
return err
80
-
}
81
-
n.NotifyAll()
82
-
return nil
83
-
}
84
-
85
-
func (db *DB) MarkPipelineFailed(rkey string, exitCode int, errorMsg string, n *notifier.Notifier) error {
86
-
_, err := db.Exec(`
87
-
update pipeline_status
88
-
set status = ?,
89
-
exit_code = ?,
90
-
error = ?,
91
-
updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'),
92
-
finished_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
93
-
where rkey = ?
94
-
`, PipelineFailed, exitCode, errorMsg, rkey)
95
-
if err != nil {
96
-
return err
97
-
}
98
-
n.NotifyAll()
99
-
return nil
100
-
}
101
-
102
-
func (db *DB) MarkPipelineTimeout(rkey string, n *notifier.Notifier) error {
103
-
_, err := db.Exec(`
104
-
update pipeline_status
105
-
set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
106
-
where rkey = ?
107
-
`, PipelineTimeout, rkey)
108
-
if err != nil {
109
-
return err
110
-
}
111
-
n.NotifyAll()
112
-
return nil
113
-
}
114
-
115
-
func (db *DB) MarkPipelineSuccess(rkey string, n *notifier.Notifier) error {
116
-
_, err := db.Exec(`
117
-
update pipeline_status
118
-
set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'),
119
-
finished_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
120
-
where rkey = ?
121
-
`, PipelineSuccess, rkey)
122
-
123
-
if err != nil {
124
-
return err
125
-
}
126
-
n.NotifyAll()
127
-
return nil
128
-
}
129
-
130
-
func (db *DB) GetPipelineStatus(rkey string) (PipelineStatus, error) {
131
-
var p PipelineStatus
132
-
err := db.QueryRow(`
133
-
select rkey, status, error, exit_code, started_at, updated_at, finished_at
134
-
from pipelines
135
-
where rkey = ?
136
-
`, rkey).Scan(&p.Rkey, &p.Status, &p.Error, &p.ExitCode, &p.StartedAt, &p.UpdatedAt, &p.FinishedAt)
137
-
return p, err
138
-
}
139
-
140
-
func (db *DB) GetPipelineStatusAsRecords(cursor string) ([]PipelineStatus, error) {
141
-
whereClause := ""
142
-
args := []any{}
143
-
if cursor != "" {
144
-
whereClause = "where rkey > ?"
145
-
args = append(args, cursor)
146
-
}
147
-
148
-
query := fmt.Sprintf(`
149
-
select rkey, status, error, exit_code, started_at, updated_at, finished_at
150
-
from pipeline_status
151
-
%s
152
-
order by rkey asc
153
-
limit 100
154
-
`, whereClause)
155
-
156
-
rows, err := db.Query(query, args...)
157
-
if err != nil {
158
-
return nil, err
159
-
}
160
-
defer rows.Close()
161
-
162
-
var pipelines []PipelineStatus
163
-
for rows.Next() {
164
-
var p PipelineStatus
165
-
rows.Scan(&p.Rkey, &p.Status, &p.Error, &p.ExitCode, &p.StartedAt, &p.UpdatedAt, &p.FinishedAt)
166
-
pipelines = append(pipelines, p)
167
-
}
168
-
169
-
if err := rows.Err(); err != nil {
170
-
return nil, err
171
-
}
172
-
173
-
records := []*tangled.PipelineStatus{}
174
-
for _, p := range pipelines {
175
-
records = append(records, p.AsRecord())
176
-
}
177
-
178
-
return pipelines, nil
179
-
}
···
1
package db
2
3
+
//
4
+
// import (
5
+
// "database/sql"
6
+
// "fmt"
7
+
// "time"
8
+
//
9
+
// "tangled.sh/tangled.sh/core/api/tangled"
10
+
// "tangled.sh/tangled.sh/core/notifier"
11
+
// )
12
+
//
13
+
// type PipelineRunStatus string
14
+
//
15
+
// var (
16
+
// PipelinePending PipelineRunStatus = "pending"
17
+
// PipelineRunning PipelineRunStatus = "running"
18
+
// PipelineFailed PipelineRunStatus = "failed"
19
+
// PipelineTimeout PipelineRunStatus = "timeout"
20
+
// PipelineCancelled PipelineRunStatus = "cancelled"
21
+
// PipelineSuccess PipelineRunStatus = "success"
22
+
// )
23
+
//
24
+
// type PipelineStatus struct {
25
+
// Rkey string `json:"rkey"`
26
+
// Pipeline string `json:"pipeline"`
27
+
// Status PipelineRunStatus `json:"status"`
28
+
//
29
+
// // only if Failed
30
+
// Error string `json:"error"`
31
+
// ExitCode int `json:"exit_code"`
32
+
//
33
+
// LastUpdate int64 `json:"last_update"`
34
+
// StartedAt time.Time `json:"started_at"`
35
+
// UpdatedAt time.Time `json:"updated_at"`
36
+
// FinishedAt time.Time `json:"finished_at"`
37
+
// }
38
+
//
39
+
// func (p PipelineStatus) AsRecord() *tangled.PipelineStatus {
40
+
// exitCode64 := int64(p.ExitCode)
41
+
// finishedAt := p.FinishedAt.String()
42
+
//
43
+
// return &tangled.PipelineStatus{
44
+
// LexiconTypeID: tangled.PipelineStatusNSID,
45
+
// Pipeline: p.Pipeline,
46
+
// Status: string(p.Status),
47
+
//
48
+
// ExitCode: &exitCode64,
49
+
// Error: &p.Error,
50
+
//
51
+
// StartedAt: p.StartedAt.String(),
52
+
// UpdatedAt: p.UpdatedAt.String(),
53
+
// FinishedAt: &finishedAt,
54
+
// }
55
+
// }
56
+
//
57
+
// func pipelineAtUri(rkey, knot string) string {
58
+
// return fmt.Sprintf("at://%s/did:web:%s/%s", tangled.PipelineStatusNSID, knot, rkey)
59
+
// }
60
+
//
61
+
// func (db *DB) CreatePipeline(rkey, pipeline string, n *notifier.Notifier) error {
62
+
// _, err := db.Exec(`
63
+
// insert into pipeline_status (rkey, status, pipeline, last_update)
64
+
// values (?, ?, ?, ?)
65
+
// `, rkey, PipelinePending, pipeline, time.Now().UnixNano())
66
+
//
67
+
// if err != nil {
68
+
// return err
69
+
// }
70
+
// n.NotifyAll()
71
+
// return nil
72
+
// }
73
+
//
74
+
// func (db *DB) MarkPipelineRunning(rkey string, n *notifier.Notifier) error {
75
+
// _, err := db.Exec(`
76
+
// update pipeline_status
77
+
// set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'), last_update = ?
78
+
// where rkey = ?
79
+
// `, PipelineRunning, rkey, time.Now().UnixNano())
80
+
//
81
+
// if err != nil {
82
+
// return err
83
+
// }
84
+
// n.NotifyAll()
85
+
// return nil
86
+
// }
87
+
//
88
+
// func (db *DB) MarkPipelineFailed(rkey string, exitCode int, errorMsg string, n *notifier.Notifier) error {
89
+
// _, err := db.Exec(`
90
+
// update pipeline_status
91
+
// set status = ?,
92
+
// exit_code = ?,
93
+
// error = ?,
94
+
// updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'),
95
+
// finished_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'),
96
+
// last_update = ?
97
+
// where rkey = ?
98
+
// `, PipelineFailed, exitCode, errorMsg, rkey, time.Now().UnixNano())
99
+
// if err != nil {
100
+
// return err
101
+
// }
102
+
// n.NotifyAll()
103
+
// return nil
104
+
// }
105
+
//
106
+
// func (db *DB) MarkPipelineTimeout(rkey string, n *notifier.Notifier) error {
107
+
// _, err := db.Exec(`
108
+
// update pipeline_status
109
+
// set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
110
+
// where rkey = ?
111
+
// `, PipelineTimeout, rkey)
112
+
// if err != nil {
113
+
// return err
114
+
// }
115
+
// n.NotifyAll()
116
+
// return nil
117
+
// }
118
+
//
119
+
// func (db *DB) MarkPipelineSuccess(rkey string, n *notifier.Notifier) error {
120
+
// _, err := db.Exec(`
121
+
// update pipeline_status
122
+
// set status = ?, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now'),
123
+
// finished_at = strftime('%Y-%m-%dT%H:%M:%SZ', 'now')
124
+
// where rkey = ?
125
+
// `, PipelineSuccess, rkey)
126
+
//
127
+
// if err != nil {
128
+
// return err
129
+
// }
130
+
// n.NotifyAll()
131
+
// return nil
132
+
// }
133
+
//
134
+
// func (db *DB) GetPipelineStatus(rkey string) (PipelineStatus, error) {
135
+
// var p PipelineStatus
136
+
// err := db.QueryRow(`
137
+
// select rkey, status, error, exit_code, started_at, updated_at, finished_at
138
+
// from pipelines
139
+
// where rkey = ?
140
+
// `, rkey).Scan(&p.Rkey, &p.Status, &p.Error, &p.ExitCode, &p.StartedAt, &p.UpdatedAt, &p.FinishedAt)
141
+
// return p, err
142
+
// }
143
+
//
144
+
// func (db *DB) GetPipelineStatusAsRecords(cursor int64) ([]PipelineStatus, error) {
145
+
// whereClause := ""
146
+
// args := []any{}
147
+
// if cursor != 0 {
148
+
// whereClause = "where created_at > ?"
149
+
// args = append(args, cursor)
150
+
// }
151
+
//
152
+
// query := fmt.Sprintf(`
153
+
// select rkey, status, error, exit_code, created_at, started_at, updated_at, finished_at
154
+
// from pipeline_status
155
+
// %s
156
+
// order by created_at asc
157
+
// limit 100
158
+
// `, whereClause)
159
+
//
160
+
// rows, err := db.Query(query, args...)
161
+
// if err != nil {
162
+
// return nil, err
163
+
// }
164
+
// defer rows.Close()
165
+
//
166
+
// var pipelines []PipelineStatus
167
+
// for rows.Next() {
168
+
// var p PipelineStatus
169
+
// var pipelineError sql.NullString
170
+
// var exitCode sql.NullInt64
171
+
// var startedAt, updatedAt string
172
+
// var finishedAt sql.NullTime
173
+
//
174
+
// err := rows.Scan(&p.Rkey, &p.Status, &pipelineError, &exitCode, &p.LastUpdate, &startedAt, &updatedAt, &finishedAt)
175
+
// if err != nil {
176
+
// return nil, err
177
+
// }
178
+
//
179
+
// if pipelineError.Valid {
180
+
// p.Error = pipelineError.String
181
+
// }
182
+
//
183
+
// if exitCode.Valid {
184
+
// p.ExitCode = int(exitCode.Int64)
185
+
// }
186
+
//
187
+
// if v, err := time.Parse(time.RFC3339, startedAt); err == nil {
188
+
// p.StartedAt = v
189
+
// }
190
+
//
191
+
// if v, err := time.Parse(time.RFC3339, updatedAt); err == nil {
192
+
// p.UpdatedAt = v
193
+
// }
194
+
//
195
+
// if finishedAt.Valid {
196
+
// p.FinishedAt = finishedAt.Time
197
+
// }
198
+
//
199
+
// pipelines = append(pipelines, p)
200
+
// }
201
+
//
202
+
// if err := rows.Err(); err != nil {
203
+
// return nil, err
204
+
// }
205
+
//
206
+
// return pipelines, nil
207
+
// }
+83
-73
spindle/engine/engine.go
+83
-73
spindle/engine/engine.go
···
10
"path"
11
"strings"
12
"sync"
13
-
"syscall"
14
15
"github.com/docker/docker/api/types/container"
16
"github.com/docker/docker/api/types/image"
···
19
"github.com/docker/docker/api/types/volume"
20
"github.com/docker/docker/client"
21
"github.com/docker/docker/pkg/stdcopy"
22
-
"golang.org/x/sync/errgroup"
23
"tangled.sh/tangled.sh/core/api/tangled"
24
"tangled.sh/tangled.sh/core/log"
25
"tangled.sh/tangled.sh/core/notifier"
26
"tangled.sh/tangled.sh/core/spindle/db"
27
)
28
29
const (
···
69
return e, nil
70
}
71
72
-
func (e *Engine) StartWorkflows(ctx context.Context, pipeline *tangled.Pipeline, id string) error {
73
-
e.l.Info("starting all workflows in parallel", "pipeline", id)
74
75
-
err := e.db.MarkPipelineRunning(id, e.n)
76
-
if err != nil {
77
-
return err
78
-
}
79
80
-
g := errgroup.Group{}
81
-
for _, w := range pipeline.Workflows {
82
-
g.Go(func() error {
83
-
err := e.SetupWorkflow(ctx, id, w.Name)
84
if err != nil {
85
return err
86
}
87
88
-
defer e.DestroyWorkflow(ctx, id, w.Name)
89
90
// TODO: actual checks for image/registry etc.
91
var deps string
···
101
cimg := path.Join("nixery.dev", deps)
102
reader, err := e.docker.ImagePull(ctx, cimg, image.PullOptions{})
103
if err != nil {
104
-
e.l.Error("pipeline failed!", "id", id, "error", err.Error())
105
-
err := e.db.MarkPipelineFailed(id, -1, err.Error(), e.n)
106
if err != nil {
107
return err
108
}
109
return fmt.Errorf("pulling image: %w", err)
110
}
111
defer reader.Close()
112
io.Copy(os.Stdout, reader)
113
114
-
err = e.StartSteps(ctx, w.Steps, w.Name, id, cimg)
115
if err != nil {
116
-
e.l.Error("pipeline failed!", "id", id, "error", err.Error())
117
-
return e.db.MarkPipelineFailed(id, -1, err.Error(), e.n)
118
}
119
120
return nil
121
-
})
122
}
123
124
-
err = g.Wait()
125
-
if err != nil {
126
-
e.l.Error("pipeline failed!", "id", id, "error", err.Error())
127
-
return e.db.MarkPipelineFailed(id, -1, err.Error(), e.n)
128
-
}
129
-
130
-
e.l.Info("pipeline success!", "id", id)
131
-
return e.db.MarkPipelineSuccess(id, e.n)
132
}
133
134
// SetupWorkflow sets up a new network for the workflow and volumes for
135
// the workspace and Nix store. These are persisted across steps and are
136
// destroyed at the end of the workflow.
137
-
func (e *Engine) SetupWorkflow(ctx context.Context, id, workflowName string) error {
138
-
e.l.Info("setting up workflow", "pipeline", id, "workflow", workflowName)
139
140
_, err := e.docker.VolumeCreate(ctx, volume.CreateOptions{
141
-
Name: workspaceVolume(id, workflowName),
142
Driver: "local",
143
})
144
if err != nil {
145
return err
146
}
147
-
e.registerCleanup(id, workflowName, func(ctx context.Context) error {
148
-
return e.docker.VolumeRemove(ctx, workspaceVolume(id, workflowName), true)
149
})
150
151
_, err = e.docker.VolumeCreate(ctx, volume.CreateOptions{
152
-
Name: nixVolume(id, workflowName),
153
Driver: "local",
154
})
155
if err != nil {
156
return err
157
}
158
-
e.registerCleanup(id, workflowName, func(ctx context.Context) error {
159
-
return e.docker.VolumeRemove(ctx, nixVolume(id, workflowName), true)
160
})
161
162
-
_, err = e.docker.NetworkCreate(ctx, networkName(id, workflowName), network.CreateOptions{
163
Driver: "bridge",
164
})
165
if err != nil {
166
return err
167
}
168
-
e.registerCleanup(id, workflowName, func(ctx context.Context) error {
169
-
return e.docker.NetworkRemove(ctx, networkName(id, workflowName))
170
})
171
172
return nil
···
175
// StartSteps starts all steps sequentially with the same base image.
176
// ONLY marks pipeline as failed if container's exit code is non-zero.
177
// All other errors are bubbled up.
178
-
func (e *Engine) StartSteps(ctx context.Context, steps []*tangled.Pipeline_Step, workflowName, id, image string) error {
179
// set up logging channels
180
e.chanMu.Lock()
181
-
if _, exists := e.stdoutChans[id]; !exists {
182
-
e.stdoutChans[id] = make(chan string, 100)
183
}
184
-
if _, exists := e.stderrChans[id]; !exists {
185
-
e.stderrChans[id] = make(chan string, 100)
186
}
187
e.chanMu.Unlock()
188
189
// close channels after all steps are complete
190
defer func() {
191
-
close(e.stdoutChans[id])
192
-
close(e.stderrChans[id])
193
}()
194
195
for _, step := range steps {
196
-
hostConfig := hostConfig(id, workflowName)
197
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
198
Image: image,
199
Cmd: []string{"bash", "-c", step.Command},
···
206
return fmt.Errorf("creating container: %w", err)
207
}
208
209
-
err = e.docker.NetworkConnect(ctx, networkName(id, workflowName), resp.ID, nil)
210
if err != nil {
211
return fmt.Errorf("connecting network: %w", err)
212
}
···
222
wg.Add(1)
223
go func() {
224
defer wg.Done()
225
-
err := e.TailStep(ctx, resp.ID, id)
226
if err != nil {
227
e.l.Error("failed to tail container", "container", resp.ID)
228
return
···
237
return err
238
}
239
240
-
err = e.DestroyStep(ctx, resp.ID, id)
241
if err != nil {
242
return err
243
}
244
245
if state.ExitCode != 0 {
246
-
e.l.Error("pipeline failed!", "id", id, "error", state.Error, "exit_code", state.ExitCode)
247
-
return e.db.MarkPipelineFailed(id, state.ExitCode, state.Error, e.n)
248
}
249
}
250
···
272
return info.State, nil
273
}
274
275
-
func (e *Engine) TailStep(ctx context.Context, containerID, pipelineID string) error {
276
logs, err := e.docker.ContainerLogs(ctx, containerID, container.LogsOptions{
277
Follow: true,
278
ShowStdout: true,
···
308
// once all steps are done.
309
go func() {
310
e.chanMu.RLock()
311
-
stdoutCh := e.stdoutChans[pipelineID]
312
e.chanMu.RUnlock()
313
314
scanner := bufio.NewScanner(rpipeOut)
···
325
// once all steps are done.
326
go func() {
327
e.chanMu.RLock()
328
-
stderrCh := e.stderrChans[pipelineID]
329
e.chanMu.RUnlock()
330
331
scanner := bufio.NewScanner(rpipeErr)
···
340
return nil
341
}
342
343
-
func (e *Engine) DestroyStep(ctx context.Context, containerID, pipelineID string) error {
344
-
err := e.docker.ContainerKill(ctx, containerID, syscall.SIGKILL.String())
345
if err != nil && !isErrContainerNotFoundOrNotRunning(err) {
346
return err
347
}
···
357
return nil
358
}
359
360
-
func (e *Engine) DestroyWorkflow(ctx context.Context, pipelineID, workflowName string) error {
361
e.cleanupMu.Lock()
362
-
key := fmt.Sprintf("%s-%s", pipelineID, workflowName)
363
364
fns := e.cleanup[key]
365
delete(e.cleanup, key)
···
367
368
for _, fn := range fns {
369
if err := fn(ctx); err != nil {
370
-
e.l.Error("failed to cleanup workflow resource", "pipeline", pipelineID, "workflow", workflowName, "err", err)
371
}
372
}
373
return nil
374
}
375
376
-
func (e *Engine) LogChannels(pipelineID string) (stdout <-chan string, stderr <-chan string, ok bool) {
377
e.chanMu.RLock()
378
defer e.chanMu.RUnlock()
379
380
-
stdoutCh, ok1 := e.stdoutChans[pipelineID]
381
-
stderrCh, ok2 := e.stderrChans[pipelineID]
382
383
if !ok1 || !ok2 {
384
return nil, nil, false
···
386
return stdoutCh, stderrCh, true
387
}
388
389
-
func (e *Engine) registerCleanup(pipelineID, workflowName string, fn cleanupFunc) {
390
e.cleanupMu.Lock()
391
defer e.cleanupMu.Unlock()
392
393
-
key := fmt.Sprintf("%s-%s", pipelineID, workflowName)
394
e.cleanup[key] = append(e.cleanup[key], fn)
395
}
396
397
-
func workspaceVolume(id, name string) string {
398
-
return fmt.Sprintf("workspace-%s-%s", id, name)
399
}
400
401
-
func nixVolume(id, name string) string {
402
-
return fmt.Sprintf("nix-%s-%s", id, name)
403
}
404
405
-
func networkName(id, name string) string {
406
-
return fmt.Sprintf("workflow-network-%s-%s", id, name)
407
}
408
409
-
func hostConfig(id, name string) *container.HostConfig {
410
hostConfig := &container.HostConfig{
411
Mounts: []mount.Mount{
412
{
413
Type: mount.TypeVolume,
414
-
Source: workspaceVolume(id, name),
415
Target: workspaceDir,
416
},
417
{
418
Type: mount.TypeVolume,
419
-
Source: nixVolume(id, name),
420
Target: "/nix",
421
},
422
},
···
10
"path"
11
"strings"
12
"sync"
13
14
"github.com/docker/docker/api/types/container"
15
"github.com/docker/docker/api/types/image"
···
18
"github.com/docker/docker/api/types/volume"
19
"github.com/docker/docker/client"
20
"github.com/docker/docker/pkg/stdcopy"
21
"tangled.sh/tangled.sh/core/api/tangled"
22
"tangled.sh/tangled.sh/core/log"
23
"tangled.sh/tangled.sh/core/notifier"
24
"tangled.sh/tangled.sh/core/spindle/db"
25
+
"tangled.sh/tangled.sh/core/spindle/models"
26
)
27
28
const (
···
68
return e, nil
69
}
70
71
+
func (e *Engine) StartWorkflows(ctx context.Context, pipeline *tangled.Pipeline, pipelineId models.PipelineId) {
72
+
e.l.Info("starting all workflows in parallel", "pipeline", pipelineId)
73
74
+
wg := sync.WaitGroup{}
75
+
for _, w := range pipeline.Workflows {
76
+
wg.Add(1)
77
+
go func() error {
78
+
defer wg.Done()
79
+
wid := models.WorkflowId{
80
+
PipelineId: pipelineId,
81
+
Name: w.Name,
82
+
}
83
84
+
err := e.db.StatusRunning(wid, e.n)
85
if err != nil {
86
return err
87
}
88
89
+
err = e.SetupWorkflow(ctx, wid)
90
+
if err != nil {
91
+
e.l.Error("setting up worklow", "wid", wid, "err", err)
92
+
return err
93
+
}
94
+
defer e.DestroyWorkflow(ctx, wid)
95
96
// TODO: actual checks for image/registry etc.
97
var deps string
···
107
cimg := path.Join("nixery.dev", deps)
108
reader, err := e.docker.ImagePull(ctx, cimg, image.PullOptions{})
109
if err != nil {
110
+
e.l.Error("pipeline failed!", "workflowId", wid, "error", err.Error())
111
+
112
+
err := e.db.StatusFailed(wid, err.Error(), -1, e.n)
113
if err != nil {
114
return err
115
}
116
+
117
return fmt.Errorf("pulling image: %w", err)
118
}
119
defer reader.Close()
120
io.Copy(os.Stdout, reader)
121
122
+
err = e.StartSteps(ctx, w.Steps, wid, cimg)
123
if err != nil {
124
+
e.l.Error("workflow failed!", "wid", wid.String(), "error", err.Error())
125
+
126
+
err := e.db.StatusFailed(wid, err.Error(), -1, e.n)
127
+
if err != nil {
128
+
return err
129
+
}
130
+
}
131
+
132
+
err = e.db.StatusSuccess(wid, e.n)
133
+
if err != nil {
134
+
return err
135
}
136
137
return nil
138
+
}()
139
}
140
141
+
wg.Wait()
142
}
143
144
// SetupWorkflow sets up a new network for the workflow and volumes for
145
// the workspace and Nix store. These are persisted across steps and are
146
// destroyed at the end of the workflow.
147
+
func (e *Engine) SetupWorkflow(ctx context.Context, wid models.WorkflowId) error {
148
+
e.l.Info("setting up workflow", "workflow", wid)
149
150
_, err := e.docker.VolumeCreate(ctx, volume.CreateOptions{
151
+
Name: workspaceVolume(wid),
152
Driver: "local",
153
})
154
if err != nil {
155
return err
156
}
157
+
e.registerCleanup(wid, func(ctx context.Context) error {
158
+
return e.docker.VolumeRemove(ctx, workspaceVolume(wid), true)
159
})
160
161
_, err = e.docker.VolumeCreate(ctx, volume.CreateOptions{
162
+
Name: nixVolume(wid),
163
Driver: "local",
164
})
165
if err != nil {
166
return err
167
}
168
+
e.registerCleanup(wid, func(ctx context.Context) error {
169
+
return e.docker.VolumeRemove(ctx, nixVolume(wid), true)
170
})
171
172
+
_, err = e.docker.NetworkCreate(ctx, networkName(wid), network.CreateOptions{
173
Driver: "bridge",
174
})
175
if err != nil {
176
return err
177
}
178
+
e.registerCleanup(wid, func(ctx context.Context) error {
179
+
return e.docker.NetworkRemove(ctx, networkName(wid))
180
})
181
182
return nil
···
185
// StartSteps starts all steps sequentially with the same base image.
186
// ONLY marks pipeline as failed if container's exit code is non-zero.
187
// All other errors are bubbled up.
188
+
func (e *Engine) StartSteps(ctx context.Context, steps []*tangled.Pipeline_Step, wid models.WorkflowId, image string) error {
189
// set up logging channels
190
e.chanMu.Lock()
191
+
if _, exists := e.stdoutChans[wid.String()]; !exists {
192
+
e.stdoutChans[wid.String()] = make(chan string, 100)
193
}
194
+
if _, exists := e.stderrChans[wid.String()]; !exists {
195
+
e.stderrChans[wid.String()] = make(chan string, 100)
196
}
197
e.chanMu.Unlock()
198
199
// close channels after all steps are complete
200
defer func() {
201
+
close(e.stdoutChans[wid.String()])
202
+
close(e.stderrChans[wid.String()])
203
}()
204
205
for _, step := range steps {
206
+
hostConfig := hostConfig(wid)
207
resp, err := e.docker.ContainerCreate(ctx, &container.Config{
208
Image: image,
209
Cmd: []string{"bash", "-c", step.Command},
···
216
return fmt.Errorf("creating container: %w", err)
217
}
218
219
+
err = e.docker.NetworkConnect(ctx, networkName(wid), resp.ID, nil)
220
if err != nil {
221
return fmt.Errorf("connecting network: %w", err)
222
}
···
232
wg.Add(1)
233
go func() {
234
defer wg.Done()
235
+
err := e.TailStep(ctx, resp.ID, wid)
236
if err != nil {
237
e.l.Error("failed to tail container", "container", resp.ID)
238
return
···
247
return err
248
}
249
250
+
err = e.DestroyStep(ctx, resp.ID)
251
if err != nil {
252
return err
253
}
254
255
if state.ExitCode != 0 {
256
+
e.l.Error("workflow failed!", "workflow_id", wid.String(), "error", state.Error, "exit_code", state.ExitCode)
257
+
// return e.db.MarkPipelineFailed(id, state.ExitCode, state.Error, e.n)
258
}
259
}
260
···
282
return info.State, nil
283
}
284
285
+
func (e *Engine) TailStep(ctx context.Context, containerID string, wid models.WorkflowId) error {
286
logs, err := e.docker.ContainerLogs(ctx, containerID, container.LogsOptions{
287
Follow: true,
288
ShowStdout: true,
···
318
// once all steps are done.
319
go func() {
320
e.chanMu.RLock()
321
+
stdoutCh := e.stdoutChans[wid.String()]
322
e.chanMu.RUnlock()
323
324
scanner := bufio.NewScanner(rpipeOut)
···
335
// once all steps are done.
336
go func() {
337
e.chanMu.RLock()
338
+
stderrCh := e.stderrChans[wid.String()]
339
e.chanMu.RUnlock()
340
341
scanner := bufio.NewScanner(rpipeErr)
···
350
return nil
351
}
352
353
+
func (e *Engine) DestroyStep(ctx context.Context, containerID string) error {
354
+
err := e.docker.ContainerKill(ctx, containerID, "9") // SIGKILL
355
if err != nil && !isErrContainerNotFoundOrNotRunning(err) {
356
return err
357
}
···
367
return nil
368
}
369
370
+
func (e *Engine) DestroyWorkflow(ctx context.Context, wid models.WorkflowId) error {
371
e.cleanupMu.Lock()
372
+
key := wid.String()
373
374
fns := e.cleanup[key]
375
delete(e.cleanup, key)
···
377
378
for _, fn := range fns {
379
if err := fn(ctx); err != nil {
380
+
e.l.Error("failed to cleanup workflow resource", "workflowId", wid)
381
}
382
}
383
return nil
384
}
385
386
+
func (e *Engine) LogChannels(wid models.WorkflowId) (stdout <-chan string, stderr <-chan string, ok bool) {
387
e.chanMu.RLock()
388
defer e.chanMu.RUnlock()
389
390
+
stdoutCh, ok1 := e.stdoutChans[wid.String()]
391
+
stderrCh, ok2 := e.stderrChans[wid.String()]
392
393
if !ok1 || !ok2 {
394
return nil, nil, false
···
396
return stdoutCh, stderrCh, true
397
}
398
399
+
func (e *Engine) registerCleanup(wid models.WorkflowId, fn cleanupFunc) {
400
e.cleanupMu.Lock()
401
defer e.cleanupMu.Unlock()
402
403
+
key := wid.String()
404
e.cleanup[key] = append(e.cleanup[key], fn)
405
}
406
407
+
func workspaceVolume(wid models.WorkflowId) string {
408
+
return fmt.Sprintf("workspace-%s", wid)
409
}
410
411
+
func nixVolume(wid models.WorkflowId) string {
412
+
return fmt.Sprintf("nix-%s", wid)
413
}
414
415
+
func networkName(wid models.WorkflowId) string {
416
+
return fmt.Sprintf("workflow-network-%s", wid)
417
}
418
419
+
func hostConfig(wid models.WorkflowId) *container.HostConfig {
420
hostConfig := &container.HostConfig{
421
Mounts: []mount.Mount{
422
{
423
Type: mount.TypeVolume,
424
+
Source: workspaceVolume(wid),
425
Target: workspaceDir,
426
},
427
{
428
Type: mount.TypeVolume,
429
+
Source: nixVolume(wid),
430
Target: "/nix",
431
},
432
},
+37
spindle/models/models.go
+37
spindle/models/models.go
···
···
1
+
package models
2
+
3
+
import (
4
+
"fmt"
5
+
"regexp"
6
+
7
+
"tangled.sh/tangled.sh/core/api/tangled"
8
+
9
+
"github.com/bluesky-social/indigo/atproto/syntax"
10
+
)
11
+
12
+
var (
13
+
re = regexp.MustCompile(`[^a-zA-Z0-9_.-]`)
14
+
)
15
+
16
+
type PipelineId struct {
17
+
Knot string
18
+
Rkey string
19
+
}
20
+
21
+
func (p *PipelineId) AtUri() syntax.ATURI {
22
+
return syntax.ATURI(fmt.Sprintf("at://did:web:%s/%s/%s", p.Knot, tangled.PipelineNSID, p.Rkey))
23
+
}
24
+
25
+
type WorkflowId struct {
26
+
PipelineId
27
+
Name string
28
+
}
29
+
30
+
func (wid WorkflowId) String() string {
31
+
return fmt.Sprintf("%s-%s-%s", normalize(wid.Knot), wid.Rkey, normalize(wid.Name))
32
+
}
33
+
34
+
func normalize(name string) string {
35
+
normalized := re.ReplaceAllString(name, "-")
36
+
return normalized
37
+
}
+29
-11
spindle/queue/queue.go
+29
-11
spindle/queue/queue.go
···
1
package queue
2
3
type Job struct {
4
Run func() error
5
OnFail func(error)
6
}
7
8
type Queue struct {
9
-
jobs chan Job
10
}
11
12
-
func NewQueue(size int) *Queue {
13
return &Queue{
14
-
jobs: make(chan Job, size),
15
}
16
}
17
···
24
}
25
}
26
27
-
func (q *Queue) StartRunner() {
28
-
go func() {
29
-
for job := range q.jobs {
30
-
if err := job.Run(); err != nil {
31
-
if job.OnFail != nil {
32
-
job.OnFail(err)
33
-
}
34
}
35
}
36
-
}()
37
}
···
1
package queue
2
3
+
import (
4
+
"sync"
5
+
)
6
+
7
type Job struct {
8
Run func() error
9
OnFail func(error)
10
}
11
12
type Queue struct {
13
+
jobs chan Job
14
+
workers int
15
+
wg sync.WaitGroup
16
}
17
18
+
func NewQueue(queueSize, numWorkers int) *Queue {
19
return &Queue{
20
+
jobs: make(chan Job, queueSize),
21
+
workers: numWorkers,
22
}
23
}
24
···
31
}
32
}
33
34
+
func (q *Queue) Start() {
35
+
for range q.workers {
36
+
q.wg.Add(1)
37
+
go q.worker()
38
+
}
39
+
}
40
+
41
+
func (q *Queue) worker() {
42
+
defer q.wg.Done()
43
+
for job := range q.jobs {
44
+
if err := job.Run(); err != nil {
45
+
if job.OnFail != nil {
46
+
job.OnFail(err)
47
}
48
}
49
+
}
50
+
}
51
+
52
+
func (q *Queue) Stop() {
53
+
close(q.jobs)
54
+
q.wg.Wait()
55
}
+23
-13
spindle/server.go
+23
-13
spindle/server.go
···
18
"tangled.sh/tangled.sh/core/spindle/config"
19
"tangled.sh/tangled.sh/core/spindle/db"
20
"tangled.sh/tangled.sh/core/spindle/engine"
21
"tangled.sh/tangled.sh/core/spindle/queue"
22
)
23
···
61
return err
62
}
63
64
-
jq := queue.NewQueue(100)
65
66
// starts a job queue runner in the background
67
-
jq.StartRunner()
68
69
spindle := Spindle{
70
jc: jc,
···
109
mux := chi.NewRouter()
110
111
mux.HandleFunc("/events", s.Events)
112
-
mux.HandleFunc("/logs/{pipelineID}", s.Logs)
113
return mux
114
}
115
···
122
return err
123
}
124
125
-
ok := s.jq.Enqueue(queue.Job{
126
-
Run: func() error {
127
-
// this is a "fake" at uri for now
128
-
pipelineAtUri := fmt.Sprintf("at://%s/did:web:%s/%s", tangled.PipelineNSID, pipeline.TriggerMetadata.Repo.Knot, msg.Rkey)
129
-
130
-
rkey := TID()
131
132
-
err = s.db.CreatePipeline(rkey, pipelineAtUri, s.n)
133
if err != nil {
134
return err
135
}
136
137
-
return s.eng.StartWorkflows(ctx, &pipeline, rkey)
138
},
139
-
OnFail: func(error) {
140
-
s.l.Error("pipeline run failed", "error", err)
141
},
142
})
143
if ok {
···
18
"tangled.sh/tangled.sh/core/spindle/config"
19
"tangled.sh/tangled.sh/core/spindle/db"
20
"tangled.sh/tangled.sh/core/spindle/engine"
21
+
"tangled.sh/tangled.sh/core/spindle/models"
22
"tangled.sh/tangled.sh/core/spindle/queue"
23
)
24
···
62
return err
63
}
64
65
+
jq := queue.NewQueue(100, 2)
66
67
// starts a job queue runner in the background
68
+
jq.Start()
69
+
defer jq.Stop()
70
71
spindle := Spindle{
72
jc: jc,
···
111
mux := chi.NewRouter()
112
113
mux.HandleFunc("/events", s.Events)
114
+
mux.HandleFunc("/logs/{knot}/{rkey}/{name}", s.Logs)
115
return mux
116
}
117
···
124
return err
125
}
126
127
+
pipelineId := models.PipelineId{
128
+
Knot: src.Knot,
129
+
Rkey: msg.Rkey,
130
+
}
131
132
+
for _, w := range pipeline.Workflows {
133
+
if w != nil {
134
+
err := s.db.StatusPending(models.WorkflowId{
135
+
PipelineId: pipelineId,
136
+
Name: w.Name,
137
+
}, s.n)
138
if err != nil {
139
return err
140
}
141
+
}
142
+
}
143
144
+
ok := s.jq.Enqueue(queue.Job{
145
+
Run: func() error {
146
+
s.eng.StartWorkflows(ctx, &pipeline, pipelineId)
147
+
return nil
148
},
149
+
OnFail: func(jobError error) {
150
+
s.l.Error("pipeline run failed", "error", jobError)
151
},
152
})
153
if ok {
+54
-23
spindle/stream.go
+54
-23
spindle/stream.go
···
1
package spindle
2
3
import (
4
"fmt"
5
"net/http"
6
"time"
7
8
-
"context"
9
10
"github.com/go-chi/chi/v5"
11
"github.com/gorilla/websocket"
···
18
19
func (s *Spindle) Events(w http.ResponseWriter, r *http.Request) {
20
l := s.l.With("handler", "Events")
21
-
l.Info("received new connection")
22
23
conn, err := upgrader.Upgrade(w, r, nil)
24
if err != nil {
···
27
return
28
}
29
defer conn.Close()
30
-
l.Info("upgraded http to wss")
31
32
ch := s.n.Subscribe()
33
defer s.n.Unsubscribe(ch)
···
44
}
45
}()
46
47
-
cursor := ""
48
49
// complete backfill first before going to live data
50
-
l.Info("going through backfill", "cursor", cursor)
51
if err := s.streamPipelines(conn, &cursor); err != nil {
52
l.Error("failed to backfill", "err", err)
53
return
···
57
// wait for new data or timeout
58
select {
59
case <-ctx.Done():
60
-
l.Info("stopping stream: client closed connection")
61
return
62
case <-ch:
63
// we have been notified of new data
64
-
l.Info("going through live data", "cursor", cursor)
65
if err := s.streamPipelines(conn, &cursor); err != nil {
66
l.Error("failed to stream", "err", err)
67
return
68
}
69
case <-time.After(30 * time.Second):
70
// send a keep-alive
71
-
l.Info("sent keepalive")
72
if err = conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
73
l.Error("failed to write control", "err", err)
74
}
···
79
func (s *Spindle) Logs(w http.ResponseWriter, r *http.Request) {
80
l := s.l.With("handler", "Logs")
81
82
-
pipelineID := chi.URLParam(r, "pipelineID")
83
-
if pipelineID == "" {
84
-
http.Error(w, "pipelineID required", http.StatusBadRequest)
85
return
86
}
87
-
l = l.With("pipelineID", pipelineID)
88
89
conn, err := upgrader.Upgrade(w, r, nil)
90
if err != nil {
···
93
return
94
}
95
defer conn.Close()
96
-
l.Info("upgraded http to wss")
97
98
ctx, cancel := context.WithCancel(r.Context())
99
defer cancel()
···
101
go func() {
102
for {
103
if _, _, err := conn.NextReader(); err != nil {
104
-
l.Info("client disconnected", "err", err)
105
cancel()
106
return
107
}
108
}
109
}()
110
111
-
if err := s.streamLogs(ctx, conn, pipelineID); err != nil {
112
l.Error("streamLogs failed", "err", err)
113
}
114
-
l.Info("logs connection closed")
115
}
116
117
-
func (s *Spindle) streamLogs(ctx context.Context, conn *websocket.Conn, pipelineID string) error {
118
-
l := s.l.With("pipelineID", pipelineID)
119
120
-
stdoutCh, stderrCh, ok := s.eng.LogChannels(pipelineID)
121
if !ok {
122
-
return fmt.Errorf("pipelineID %q not found", pipelineID)
123
}
124
125
done := make(chan struct{})
···
174
return nil
175
}
176
177
-
func (s *Spindle) streamPipelines(conn *websocket.Conn, cursor *string) error {
178
-
ops, err := s.db.GetPipelineStatusAsRecords(*cursor)
179
if err != nil {
180
s.l.Debug("err", "err", err)
181
return err
···
187
s.l.Debug("err", "err", err)
188
return err
189
}
190
-
*cursor = op.Rkey
191
}
192
193
return nil
···
1
package spindle
2
3
import (
4
+
"context"
5
"fmt"
6
"net/http"
7
+
"strconv"
8
"time"
9
10
+
"tangled.sh/tangled.sh/core/spindle/models"
11
12
"github.com/go-chi/chi/v5"
13
"github.com/gorilla/websocket"
···
20
21
func (s *Spindle) Events(w http.ResponseWriter, r *http.Request) {
22
l := s.l.With("handler", "Events")
23
+
l.Debug("received new connection")
24
25
conn, err := upgrader.Upgrade(w, r, nil)
26
if err != nil {
···
29
return
30
}
31
defer conn.Close()
32
+
l.Debug("upgraded http to wss")
33
34
ch := s.n.Subscribe()
35
defer s.n.Unsubscribe(ch)
···
46
}
47
}()
48
49
+
defaultCursor := time.Now().UnixNano()
50
+
cursorStr := r.URL.Query().Get("cursor")
51
+
cursor, err := strconv.ParseInt(cursorStr, 10, 64)
52
+
if err != nil {
53
+
l.Error("empty or invalid cursor", "invalidCursor", cursorStr, "default", defaultCursor)
54
+
}
55
+
if cursor == 0 {
56
+
cursor = defaultCursor
57
+
}
58
59
// complete backfill first before going to live data
60
+
l.Debug("going through backfill", "cursor", cursor)
61
if err := s.streamPipelines(conn, &cursor); err != nil {
62
l.Error("failed to backfill", "err", err)
63
return
···
67
// wait for new data or timeout
68
select {
69
case <-ctx.Done():
70
+
l.Debug("stopping stream: client closed connection")
71
return
72
case <-ch:
73
// we have been notified of new data
74
+
l.Debug("going through live data", "cursor", cursor)
75
if err := s.streamPipelines(conn, &cursor); err != nil {
76
l.Error("failed to stream", "err", err)
77
return
78
}
79
case <-time.After(30 * time.Second):
80
// send a keep-alive
81
+
l.Debug("sent keepalive")
82
if err = conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(time.Second)); err != nil {
83
l.Error("failed to write control", "err", err)
84
}
···
89
func (s *Spindle) Logs(w http.ResponseWriter, r *http.Request) {
90
l := s.l.With("handler", "Logs")
91
92
+
knot := chi.URLParam(r, "knot")
93
+
if knot == "" {
94
+
http.Error(w, "knot required", http.StatusBadRequest)
95
+
return
96
+
}
97
+
98
+
rkey := chi.URLParam(r, "rkey")
99
+
if rkey == "" {
100
+
http.Error(w, "rkey required", http.StatusBadRequest)
101
+
return
102
+
}
103
+
104
+
name := chi.URLParam(r, "name")
105
+
if name == "" {
106
+
http.Error(w, "name required", http.StatusBadRequest)
107
return
108
}
109
+
110
+
wid := models.WorkflowId{
111
+
PipelineId: models.PipelineId{
112
+
Knot: knot,
113
+
Rkey: rkey,
114
+
},
115
+
Name: name,
116
+
}
117
+
118
+
l = l.With("knot", knot, "rkey", rkey, "name", name)
119
120
conn, err := upgrader.Upgrade(w, r, nil)
121
if err != nil {
···
124
return
125
}
126
defer conn.Close()
127
+
l.Debug("upgraded http to wss")
128
129
ctx, cancel := context.WithCancel(r.Context())
130
defer cancel()
···
132
go func() {
133
for {
134
if _, _, err := conn.NextReader(); err != nil {
135
+
l.Debug("client disconnected", "err", err)
136
cancel()
137
return
138
}
139
}
140
}()
141
142
+
if err := s.streamLogs(ctx, conn, wid); err != nil {
143
l.Error("streamLogs failed", "err", err)
144
}
145
+
l.Debug("logs connection closed")
146
}
147
148
+
func (s *Spindle) streamLogs(ctx context.Context, conn *websocket.Conn, wid models.WorkflowId) error {
149
+
l := s.l.With("workflow_id", wid.String())
150
151
+
stdoutCh, stderrCh, ok := s.eng.LogChannels(wid)
152
if !ok {
153
+
return fmt.Errorf("workflow_id %q not found", wid.String())
154
}
155
156
done := make(chan struct{})
···
205
return nil
206
}
207
208
+
func (s *Spindle) streamPipelines(conn *websocket.Conn, cursor *int64) error {
209
+
ops, err := s.db.GetEvents(*cursor)
210
if err != nil {
211
s.l.Debug("err", "err", err)
212
return err
···
218
s.l.Debug("err", "err", err)
219
return err
220
}
221
+
*cursor = op.Created
222
}
223
224
return nil