···11+package nixery
22+33+import (
44+ "tangled.org/core/spindle/models"
55+ "tangled.org/core/workflow"
66+)
77+88+type nixeryWorkflow struct {
99+ event models.Event // event that triggered the workflow
1010+ def WorkflowDef // definition of the workflow
1111+}
1212+1313+// TODO: extract general fields to workflow.WorkflowDef struct
1414+1515+// nixery adapter workflow definition spec
1616+type WorkflowDef struct {
1717+ Name string `yaml:"-"` // name of the workflow file
1818+ When []workflow.Constraint `yaml:"when"`
1919+ CloneOpts workflow.CloneOpts `yaml:"clone"`
2020+2121+ Dependencies map[string][]string // nix packages used for the workflow
2222+ Steps []Step // workflow steps
2323+}
2424+2525+type Step struct {
2626+ Name string `yaml:"name"`
2727+ Command string `yaml:"command"`
2828+ Enviornment map[string]string `yaml:"environment"`
2929+}
3030+3131+func (d *WorkflowDef) AsInfo() models.WorkflowDef {
3232+ return models.WorkflowDef{
3333+ AdapterId: AdapterID,
3434+ Name: d.Name,
3535+ When: d.When,
3636+ }
3737+}
3838+3939+func (d *WorkflowDef) ShouldRunOn(event models.Event) bool {
4040+ // panic("unimplemented")
4141+ return false
4242+}
+93
spindle/models/adapter.go
···11+package models
22+33+import (
44+ "context"
55+66+ "github.com/bluesky-social/indigo/atproto/syntax"
77+)
88+99+// Adapter is the core of the spindle. It can use its own way to configure and
1010+// run the workflows. The workflow definition can be either yaml files in git
1111+// repositories or even from dedicated web UI.
1212+//
1313+// An adapter is expected to be hold all created workflow runs.
1414+type Adapter interface {
1515+ // Init intializes the adapter
1616+ Init() error
1717+1818+ // Shutdown gracefully shuts down background jobs
1919+ Shutdown(ctx context.Context) error
2020+2121+ // SetupRepo ensures adapter connected to the repository.
2222+ // This usually includes adding repository watcher that does sparse-clone.
2323+ SetupRepo(ctx context.Context, repo syntax.ATURI) error
2424+2525+ // ListWorkflowDefs parses and returns all workflow definitions in the given
2626+ // repository at the specified revision
2727+ ListWorkflowDefs(ctx context.Context, repo syntax.ATURI, rev string) ([]WorkflowDef, error)
2828+2929+ // EvaluateEvent consumes a trigger event and returns a list of triggered
3030+ // workflow runs. It is expected to return immediately after scheduling the
3131+ // workflows.
3232+ EvaluateEvent(ctx context.Context, event Event) ([]WorkflowRun, error)
3333+3434+ // GetActiveWorkflowRun returns current state of specific workflow run.
3535+ // This method will be called regularly for active workflow runs.
3636+ GetActiveWorkflowRun(ctx context.Context, runId syntax.ATURI) (WorkflowRun, error)
3737+3838+3939+4040+4141+ // NOTE: baisically I'm not sure about this method.
4242+ // How to properly sync workflow.run states?
4343+ //
4444+ // for adapters with external engine, they will hold every past
4545+ // workflow.run objects.
4646+ // for adapters with internal engine, they... should also hold every
4747+ // past workflow.run objects..?
4848+ //
4949+ // problem:
5050+ // when spindle suffer downtime (spindle server shutdown),
5151+ // external `workflow.run`s might be unsynced in "running" or "pending" state
5252+ // same for internal `workflow.run`s.
5353+ //
5454+ // BUT, spindle itself is holding the runs,
5555+ // so it already knows unsynced workflows (=workflows not finished)
5656+ // therefore, it can just fetch them again.
5757+ // for adapters with internal engines, they will fail to fetch previous
5858+ // run.
5959+ // Leaving spindle to mark the run as "Lost" or "Failed".
6060+ // Because of _lacking_ adaters, spindle should be able to manually
6161+ // mark unknown runs with "lost" state.
6262+ //
6363+ // GetWorkflowRun : used to get background crawling
6464+ // XCodeCloud: ok
6565+ // Nixery: (will fail if unknown) -> spindle will mark workflow as failed anyways
6666+ // StreamWorkflowRun : used to notify real-time updates
6767+ // XCodeCloud: ok (but old events will be lost)
6868+ // Nixery: same. old events on spindle downtime will be lost
6969+ //
7070+ //
7171+ // To avoid this, each adapters should hold outbox buffer
7272+ //
7373+ // |
7474+ // v
7575+7676+ // StreamWorkflowRun(ctx context.Context) <-chan WorkflowRun
7777+7878+7979+ // ListActiveWorkflowRuns returns current list of active workflow runs.
8080+ // Runs where status is either Pending or Running
8181+ ListActiveWorkflowRuns(ctx context.Context) ([]WorkflowRun, error)
8282+ SubscribeWorkflowRun(ctx context.Context) <-chan WorkflowRun
8383+8484+8585+8686+8787+ // StreamWorkflowRunLogs streams logs for a running workflow execution
8888+ StreamWorkflowRunLogs(ctx context.Context, runId syntax.ATURI, handle func(line LogLine) error) error
8989+9090+ // CancelWorkflowRun attempts to stop a running workflow execution.
9191+ // It won't do anything when the workflow has already completed.
9292+ CancelWorkflowRun(ctx context.Context, runId syntax.ATURI) error
9393+}
+124
spindle/models/pipeline2.go
···11+package models
22+33+import (
44+ "fmt"
55+ "slices"
66+77+ "github.com/bluesky-social/indigo/atproto/syntax"
88+ "tangled.org/core/api/tangled"
99+)
1010+1111+// `sh.tangled.ci.event`
1212+type Event struct {
1313+ SourceRepo syntax.ATURI // repository to find the workflow definition
1414+ SourceSha string // sha to find the workflow definition
1515+ TargetSha string // sha to run the workflow
1616+ // union type of:
1717+ // 1. PullRequestEvent
1818+ // 2. PushEvent
1919+ // 3. ManualEvent
2020+}
2121+2222+func (e *Event) AsRecord() tangled.CiEvent {
2323+ // var meta tangled.CiEvent_Meta
2424+ // return tangled.CiEvent{
2525+ // Meta: &meta,
2626+ // }
2727+ panic("unimplemented")
2828+}
2929+3030+// `sh.tangled.ci.pipeline`
3131+//
3232+// Pipeline is basically a group of workflows triggered by single event.
3333+type Pipeline2 struct {
3434+ Did syntax.DID
3535+ Rkey syntax.RecordKey
3636+3737+ Event Event // event that triggered the pipeline
3838+ WorkflowRuns []WorkflowRun // workflow runs inside this pipeline
3939+}
4040+4141+func (p *Pipeline2) AtUri() syntax.ATURI {
4242+ return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", p.Did, tangled.CiPipelineNSID, p.Rkey))
4343+}
4444+4545+func (p *Pipeline2) AsRecord() tangled.CiPipeline {
4646+ event := p.Event.AsRecord()
4747+ runs := make([]string, len(p.WorkflowRuns))
4848+ for i, run := range p.WorkflowRuns {
4949+ runs[i] = run.AtUri().String()
5050+ }
5151+ return tangled.CiPipeline{
5252+ Event: &event,
5353+ WorkflowRuns: runs,
5454+ }
5555+}
5656+5757+// `sh.tangled.ci.workflow.run`
5858+type WorkflowRun struct {
5959+ Did syntax.DID
6060+ Rkey syntax.RecordKey
6161+6262+ AdapterId string // adapter id
6363+ Name string // name of workflow run (not workflow definition name!)
6464+ Status WorkflowStatus // workflow status
6565+ // TODO: can add some custom fields like adapter-specific log-id
6666+}
6767+6868+func (r WorkflowRun) WithStatus(status WorkflowStatus) WorkflowRun {
6969+ return WorkflowRun{
7070+ Did: r.Did,
7171+ Rkey: r.Rkey,
7272+ AdapterId: r.AdapterId,
7373+ Name: r.Name,
7474+ Status: status,
7575+ }
7676+}
7777+7878+func (r *WorkflowRun) AtUri() syntax.ATURI {
7979+ return syntax.ATURI(fmt.Sprintf("at://%s/%s/%s", r.Did, tangled.CiWorkflowRunNSID, r.Rkey))
8080+}
8181+8282+func (r *WorkflowRun) AsRecord() tangled.CiWorkflowRun {
8383+ statusStr := string(r.Status)
8484+ return tangled.CiWorkflowRun{
8585+ Adapter: r.AdapterId,
8686+ Name: r.Name,
8787+ Status: &statusStr,
8888+ }
8989+}
9090+9191+// `sh.tangled.ci.workflow.status`
9292+type WorkflowStatus string
9393+9494+var (
9595+ WorkflowStatusPending WorkflowStatus = "pending"
9696+ WorkflowStatusRunning WorkflowStatus = "running"
9797+ WorkflowStatusFailed WorkflowStatus = "failed"
9898+ WorkflowStatusCancelled WorkflowStatus = "cancelled"
9999+ WorkflowStatusSuccess WorkflowStatus = "success"
100100+ WorkflowStatusTimeout WorkflowStatus = "timeout"
101101+102102+ activeStatuses [2]WorkflowStatus = [2]WorkflowStatus{
103103+ WorkflowStatusPending,
104104+ WorkflowStatusRunning,
105105+ }
106106+)
107107+108108+func (s WorkflowStatus) IsActive() bool {
109109+ return slices.Contains(activeStatuses[:], s)
110110+}
111111+112112+func (s WorkflowStatus) IsFinish() bool {
113113+ return !s.IsActive()
114114+}
115115+116116+// `sh.tangled.ci.workflow.def`
117117+//
118118+// Brief information of the workflow definition. A workflow can be defined in
119119+// any form. This is a common info struct for any workflow definitions
120120+type WorkflowDef struct {
121121+ AdapterId string // adapter id
122122+ Name string // name or the workflow (usually the yml file name)
123123+ When any // events the workflow is listening to
124124+}
+40
spindle/pipeline.go
···11+package spindle
22+33+import (
44+ "context"
55+66+ "tangled.org/core/spindle/models"
77+)
88+99+// createPipeline creates a pipeline from given event.
1010+// It will call `EvaluateEvent` for all adapters, gather the triggered workflow
1111+// runs, and constuct a pipeline record from them. pipeline record. It will
1212+// return nil if no workflow run has triggered.
1313+//
1414+// NOTE: This method won't fail. If `adapter.EvaluateEvent` returns an error,
1515+// the error will be logged but won't bubble-up.
1616+//
1717+// NOTE: Adapters might create sub-event on its own for workflows triggered by
1818+// other workflow runs.
1919+func (s *Spindle) createPipeline(ctx context.Context, event models.Event) (*models.Pipeline2) {
2020+ l := s.l
2121+2222+ pipeline := models.Pipeline2{
2323+ Event: event,
2424+ }
2525+2626+ // TODO: run in parallel
2727+ for id, adapter := range s.adapters {
2828+ runs, err := adapter.EvaluateEvent(ctx, event)
2929+ if err != nil {
3030+ l.Error("failed to process trigger from adapter '%s': %w", id, err)
3131+ }
3232+ pipeline.WorkflowRuns = append(pipeline.WorkflowRuns, runs...)
3333+ }
3434+3535+ if len(pipeline.WorkflowRuns) == 0 {
3636+ return nil
3737+ }
3838+3939+ return &pipeline
4040+}
+169
spindle/repomanager/repomanager.go
···11+package repomanager
22+33+import (
44+ "bufio"
55+ "bytes"
66+ "context"
77+ "errors"
88+ "fmt"
99+ "os"
1010+ "os/exec"
1111+ "path/filepath"
1212+ "slices"
1313+ "strings"
1414+1515+ "github.com/bluesky-social/indigo/atproto/syntax"
1616+ "github.com/go-git/go-git/v5"
1717+ "github.com/go-git/go-git/v5/config"
1818+ "github.com/go-git/go-git/v5/plumbing/object"
1919+ kgit "tangled.org/core/knotserver/git"
2020+ "tangled.org/core/types"
2121+)
2222+2323+// RepoManager manages a `sh.tangled.repo` record with its git context.
2424+// It can be used to efficiently fetch the filetree of the repository.
2525+type RepoManager struct {
2626+ repoDir string
2727+ // TODO: it would be nice if RepoManager can be configured with different
2828+ // strategies:
2929+ // - use db as an only source for repo records
3030+ // - use atproto if record doesn't exist from the db
3131+ // - always use atproto
3232+ // hmm do we need `RepoStore` interface?
3333+ // now `DbRepoStore` and `AtprotoRepoStore` can implement both.
3434+ // all `RepoStore` objects will hold `KnotStore` interface, so they can
3535+ // source the knot store if needed.
3636+3737+ // but now we can't do complex queries like "get repo with issue count"
3838+ // that kind of queries will be done directly from `appview.DB` struct
3939+ // is graphql better tech for atproto?
4040+}
4141+4242+func New(repoDir string) RepoManager {
4343+ return RepoManager{
4444+ repoDir: repoDir,
4545+ }
4646+}
4747+4848+// TODO: RepoManager can return file tree from repoAt & rev
4949+// It will start syncing the repository if doesn't exist
5050+5151+// RegisterRepo starts sparse-syncing repository with paths
5252+func (m *RepoManager) RegisterRepo(ctx context.Context, repoAt syntax.ATURI, paths []string) error {
5353+ repoPath := m.repoPath(repoAt)
5454+ exist, err := isDir(repoPath)
5555+ if err != nil {
5656+ return fmt.Errorf("checking dir info: %w", err)
5757+ }
5858+ var sparsePaths []string
5959+ if !exist {
6060+ // init bare git repo
6161+ repo, err := git.PlainInit(repoPath, true)
6262+ if err != nil {
6363+ return fmt.Errorf("initializing repo: %w", err)
6464+ }
6565+ _, err = repo.CreateRemote(&config.RemoteConfig{
6666+ Name: "origin",
6767+ URLs: []string{m.repoCloneUrl(repoAt)},
6868+ })
6969+ if err != nil {
7070+ return fmt.Errorf("configuring repo remote: %w", err)
7171+ }
7272+ } else {
7373+ // get sparse-checkout list
7474+ sparsePaths, err = func(path string) ([]string, error) {
7575+ var stdout bytes.Buffer
7676+ listCmd := exec.Command("git", "-C", path, "sparse-checkout", "list")
7777+ listCmd.Stdout = &stdout
7878+ if err := listCmd.Run(); err != nil {
7979+ return nil, err
8080+ }
8181+8282+ var sparseList []string
8383+ scanner := bufio.NewScanner(&stdout)
8484+ for scanner.Scan() {
8585+ line := strings.TrimSpace(scanner.Text())
8686+ if line == "" {
8787+ continue
8888+ }
8989+ sparseList = append(sparseList, line)
9090+ }
9191+ if err := scanner.Err(); err != nil {
9292+ return nil, fmt.Errorf("scanning stdout: %w", err)
9393+ }
9494+9595+ return sparseList, nil
9696+ }(repoPath)
9797+ if err != nil {
9898+ return fmt.Errorf("parsing sparse-checkout list: %w", err)
9999+ }
100100+101101+ // add paths to sparse-checkout list
102102+ for _, path := range paths {
103103+ sparsePaths = append(sparsePaths, path)
104104+ }
105105+ sparsePaths = slices.Collect(slices.Values(sparsePaths))
106106+ }
107107+108108+ // set sparse-checkout list
109109+ args := append([]string{"-C", repoPath, "sparse-checkout", "set", "--no-cone"}, sparsePaths...)
110110+ if err := exec.Command("git", args...).Run(); err != nil {
111111+ return fmt.Errorf("setting sparse-checkout list: %w", err)
112112+ }
113113+ return nil
114114+}
115115+116116+// SyncRepo sparse-fetch specific rev of the repo
117117+func (m *RepoManager) SyncRepo(ctx context.Context, repo syntax.ATURI, rev string) error {
118118+ // TODO: fetch repo with rev.
119119+ panic("unimplemented")
120120+}
121121+122122+func (m *RepoManager) Open(repo syntax.ATURI, rev string) (*kgit.GitRepo, error) {
123123+ // TODO: don't depend on knot/git
124124+ return kgit.Open(m.repoPath(repo), rev)
125125+}
126126+127127+func (m *RepoManager) FileTree(ctx context.Context, repo syntax.ATURI, rev, path string) ([]types.NiceTree, error) {
128128+ if err := m.SyncRepo(ctx, repo, rev); err != nil {
129129+ return nil, fmt.Errorf("syncing git repo")
130130+ }
131131+ gr, err := m.Open(repo, rev)
132132+ if err != nil {
133133+ return nil, err
134134+ }
135135+ dir, err := gr.FileTree(ctx, path)
136136+ if err != nil {
137137+ if errors.Is(err, object.ErrDirectoryNotFound) {
138138+ return nil, nil
139139+ }
140140+ return nil, fmt.Errorf("loading file tree: %w", err)
141141+ }
142142+ return dir, err
143143+}
144144+145145+func (m *RepoManager) repoPath(repo syntax.ATURI) string {
146146+ return filepath.Join(
147147+ m.repoDir,
148148+ repo.Authority().String(),
149149+ repo.Collection().String(),
150150+ repo.RecordKey().String(),
151151+ )
152152+}
153153+154154+func (m *RepoManager) repoCloneUrl(repo syntax.ATURI) string {
155155+ // 1. get repo & knot models from db. fetch it if doesn't exist
156156+ // 2. construct https clone url
157157+ panic("unimplemented")
158158+}
159159+160160+func isDir(path string) (bool, error) {
161161+ info, err := os.Stat(path)
162162+ if err == nil && info.IsDir() {
163163+ return true, nil
164164+ }
165165+ if os.IsNotExist(err) {
166166+ return false, nil
167167+ }
168168+ return false, err
169169+}