···1515.env
1616*.rdb
1717.envrc
1818+**/*.bleve
1819# Created if following hacking.md
1920genjwks.out
2021/nix/vm-data
+3-1
api/tangled/actorprofile.go
···2727 Location *string `json:"location,omitempty" cborgen:"location,omitempty"`
2828 // pinnedRepositories: Any ATURI, it is up to appviews to validate these fields.
2929 PinnedRepositories []string `json:"pinnedRepositories,omitempty" cborgen:"pinnedRepositories,omitempty"`
3030- Stats []string `json:"stats,omitempty" cborgen:"stats,omitempty"`
3030+ // pronouns: Preferred gender pronouns.
3131+ Pronouns *string `json:"pronouns,omitempty" cborgen:"pronouns,omitempty"`
3232+ Stats []string `json:"stats,omitempty" cborgen:"stats,omitempty"`
3133}
+196-2
api/tangled/cbor_gen.go
···2626 }
27272828 cw := cbg.NewCborWriter(w)
2929- fieldCount := 7
2929+ fieldCount := 8
30303131 if t.Description == nil {
3232 fieldCount--
···4141 }
42424343 if t.PinnedRepositories == nil {
4444+ fieldCount--
4545+ }
4646+4747+ if t.Pronouns == nil {
4448 fieldCount--
4549 }
4650···186190 return err
187191 }
188192 if _, err := cw.WriteString(string(*t.Location)); err != nil {
193193+ return err
194194+ }
195195+ }
196196+ }
197197+198198+ // t.Pronouns (string) (string)
199199+ if t.Pronouns != nil {
200200+201201+ if len("pronouns") > 1000000 {
202202+ return xerrors.Errorf("Value in field \"pronouns\" was too long")
203203+ }
204204+205205+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("pronouns"))); err != nil {
206206+ return err
207207+ }
208208+ if _, err := cw.WriteString(string("pronouns")); err != nil {
209209+ return err
210210+ }
211211+212212+ if t.Pronouns == nil {
213213+ if _, err := cw.Write(cbg.CborNull); err != nil {
214214+ return err
215215+ }
216216+ } else {
217217+ if len(*t.Pronouns) > 1000000 {
218218+ return xerrors.Errorf("Value in field t.Pronouns was too long")
219219+ }
220220+221221+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Pronouns))); err != nil {
222222+ return err
223223+ }
224224+ if _, err := cw.WriteString(string(*t.Pronouns)); err != nil {
189225 return err
190226 }
191227 }
···430466 }
431467432468 t.Location = (*string)(&sval)
469469+ }
470470+ }
471471+ // t.Pronouns (string) (string)
472472+ case "pronouns":
473473+474474+ {
475475+ b, err := cr.ReadByte()
476476+ if err != nil {
477477+ return err
478478+ }
479479+ if b != cbg.CborNull[0] {
480480+ if err := cr.UnreadByte(); err != nil {
481481+ return err
482482+ }
483483+484484+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
485485+ if err != nil {
486486+ return err
487487+ }
488488+489489+ t.Pronouns = (*string)(&sval)
433490 }
434491 }
435492 // t.Description (string) (string)
···58065863 }
5807586458085865 cw := cbg.NewCborWriter(w)
58095809- fieldCount := 8
58665866+ fieldCount := 10
5810586758115868 if t.Description == nil {
58125869 fieldCount--
···58215878 }
5822587958235880 if t.Spindle == nil {
58815881+ fieldCount--
58825882+ }
58835883+58845884+ if t.Topics == nil {
58855885+ fieldCount--
58865886+ }
58875887+58885888+ if t.Website == nil {
58245889 fieldCount--
58255890 }
58265891···59616026 }
59626027 }
5963602860296029+ // t.Topics ([]string) (slice)
60306030+ if t.Topics != nil {
60316031+60326032+ if len("topics") > 1000000 {
60336033+ return xerrors.Errorf("Value in field \"topics\" was too long")
60346034+ }
60356035+60366036+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("topics"))); err != nil {
60376037+ return err
60386038+ }
60396039+ if _, err := cw.WriteString(string("topics")); err != nil {
60406040+ return err
60416041+ }
60426042+60436043+ if len(t.Topics) > 8192 {
60446044+ return xerrors.Errorf("Slice value in field t.Topics was too long")
60456045+ }
60466046+60476047+ if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Topics))); err != nil {
60486048+ return err
60496049+ }
60506050+ for _, v := range t.Topics {
60516051+ if len(v) > 1000000 {
60526052+ return xerrors.Errorf("Value in field v was too long")
60536053+ }
60546054+60556055+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
60566056+ return err
60576057+ }
60586058+ if _, err := cw.WriteString(string(v)); err != nil {
60596059+ return err
60606060+ }
60616061+60626062+ }
60636063+ }
60646064+59646065 // t.Spindle (string) (string)
59656066 if t.Spindle != nil {
59666067···59936094 }
59946095 }
5995609660976097+ // t.Website (string) (string)
60986098+ if t.Website != nil {
60996099+61006100+ if len("website") > 1000000 {
61016101+ return xerrors.Errorf("Value in field \"website\" was too long")
61026102+ }
61036103+61046104+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("website"))); err != nil {
61056105+ return err
61066106+ }
61076107+ if _, err := cw.WriteString(string("website")); err != nil {
61086108+ return err
61096109+ }
61106110+61116111+ if t.Website == nil {
61126112+ if _, err := cw.Write(cbg.CborNull); err != nil {
61136113+ return err
61146114+ }
61156115+ } else {
61166116+ if len(*t.Website) > 1000000 {
61176117+ return xerrors.Errorf("Value in field t.Website was too long")
61186118+ }
61196119+61206120+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Website))); err != nil {
61216121+ return err
61226122+ }
61236123+ if _, err := cw.WriteString(string(*t.Website)); err != nil {
61246124+ return err
61256125+ }
61266126+ }
61276127+ }
61286128+59966129 // t.CreatedAt (string) (string)
59976130 if len("createdAt") > 1000000 {
59986131 return xerrors.Errorf("Value in field \"createdAt\" was too long")
···61856318 t.Source = (*string)(&sval)
61866319 }
61876320 }
63216321+ // t.Topics ([]string) (slice)
63226322+ case "topics":
63236323+63246324+ maj, extra, err = cr.ReadHeader()
63256325+ if err != nil {
63266326+ return err
63276327+ }
63286328+63296329+ if extra > 8192 {
63306330+ return fmt.Errorf("t.Topics: array too large (%d)", extra)
63316331+ }
63326332+63336333+ if maj != cbg.MajArray {
63346334+ return fmt.Errorf("expected cbor array")
63356335+ }
63366336+63376337+ if extra > 0 {
63386338+ t.Topics = make([]string, extra)
63396339+ }
63406340+63416341+ for i := 0; i < int(extra); i++ {
63426342+ {
63436343+ var maj byte
63446344+ var extra uint64
63456345+ var err error
63466346+ _ = maj
63476347+ _ = extra
63486348+ _ = err
63496349+63506350+ {
63516351+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
63526352+ if err != nil {
63536353+ return err
63546354+ }
63556355+63566356+ t.Topics[i] = string(sval)
63576357+ }
63586358+63596359+ }
63606360+ }
61886361 // t.Spindle (string) (string)
61896362 case "spindle":
61906363···62046377 }
6205637862066379 t.Spindle = (*string)(&sval)
63806380+ }
63816381+ }
63826382+ // t.Website (string) (string)
63836383+ case "website":
63846384+63856385+ {
63866386+ b, err := cr.ReadByte()
63876387+ if err != nil {
63886388+ return err
63896389+ }
63906390+ if b != cbg.CborNull[0] {
63916391+ if err := cr.UnreadByte(); err != nil {
63926392+ return err
63936393+ }
63946394+63956395+ sval, err := cbg.ReadStringWithMax(cr, 1000000)
63966396+ if err != nil {
63976397+ return err
63986398+ }
63996399+64006400+ t.Website = (*string)(&sval)
62076401 }
62086402 }
62096403 // t.CreatedAt (string) (string)
+13-1
api/tangled/repoblob.go
···3030// RepoBlob_Output is the output of a sh.tangled.repo.blob call.
3131type RepoBlob_Output struct {
3232 // content: File content (base64 encoded for binary files)
3333- Content string `json:"content" cborgen:"content"`
3333+ Content *string `json:"content,omitempty" cborgen:"content,omitempty"`
3434 // encoding: Content encoding
3535 Encoding *string `json:"encoding,omitempty" cborgen:"encoding,omitempty"`
3636 // isBinary: Whether the file is binary
···4444 Ref string `json:"ref" cborgen:"ref"`
4545 // size: File size in bytes
4646 Size *int64 `json:"size,omitempty" cborgen:"size,omitempty"`
4747+ // submodule: Submodule information if path is a submodule
4848+ Submodule *RepoBlob_Submodule `json:"submodule,omitempty" cborgen:"submodule,omitempty"`
4749}
48504951// RepoBlob_Signature is a "signature" in the sh.tangled.repo.blob schema.
···5456 Name string `json:"name" cborgen:"name"`
5557 // when: Author timestamp
5658 When string `json:"when" cborgen:"when"`
5959+}
6060+6161+// RepoBlob_Submodule is a "submodule" in the sh.tangled.repo.blob schema.
6262+type RepoBlob_Submodule struct {
6363+ // branch: Branch to track in the submodule
6464+ Branch *string `json:"branch,omitempty" cborgen:"branch,omitempty"`
6565+ // name: Submodule name
6666+ Name string `json:"name" cborgen:"name"`
6767+ // url: Submodule repository URL
6868+ Url string `json:"url" cborgen:"url"`
5769}
58705971// RepoBlob calls the XRPC method "sh.tangled.repo.blob".
-4
api/tangled/repotree.go
···47474848// RepoTree_TreeEntry is a "treeEntry" in the sh.tangled.repo.tree schema.
4949type RepoTree_TreeEntry struct {
5050- // is_file: Whether this entry is a file
5151- Is_file bool `json:"is_file" cborgen:"is_file"`
5252- // is_subtree: Whether this entry is a directory/subtree
5353- Is_subtree bool `json:"is_subtree" cborgen:"is_subtree"`
5450 Last_commit *RepoTree_LastCommit `json:"last_commit,omitempty" cborgen:"last_commit,omitempty"`
5551 // mode: File mode
5652 Mode string `json:"mode" cborgen:"mode"`
+4
api/tangled/tangledrepo.go
···3030 Source *string `json:"source,omitempty" cborgen:"source,omitempty"`
3131 // spindle: CI runner to send jobs to and receive results from
3232 Spindle *string `json:"spindle,omitempty" cborgen:"spindle,omitempty"`
3333+ // topics: Topics related to the repo
3434+ Topics []string `json:"topics,omitempty" cborgen:"topics,omitempty"`
3535+ // website: Any URI related to the repo
3636+ Website *string `json:"website,omitempty" cborgen:"website,omitempty"`
3337}
···11+// Copyright 2021 The Gitea Authors. All rights reserved.
22+// SPDX-License-Identifier: MIT
33+44+package bleveutil
55+66+import (
77+ "github.com/blevesearch/bleve/v2"
88+)
99+1010+// FlushingBatch is a batch of operations that automatically flushes to the
1111+// underlying index once it reaches a certain size.
1212+type FlushingBatch struct {
1313+ maxBatchSize int
1414+ batch *bleve.Batch
1515+ index bleve.Index
1616+}
1717+1818+// NewFlushingBatch creates a new flushing batch for the specified index. Once
1919+// the number of operations in the batch reaches the specified limit, the batch
2020+// automatically flushes its operations to the index.
2121+func NewFlushingBatch(index bleve.Index, maxBatchSize int) *FlushingBatch {
2222+ return &FlushingBatch{
2323+ maxBatchSize: maxBatchSize,
2424+ batch: index.NewBatch(),
2525+ index: index,
2626+ }
2727+}
2828+2929+// Index add a new index to batch
3030+func (b *FlushingBatch) Index(id string, data any) error {
3131+ if err := b.batch.Index(id, data); err != nil {
3232+ return err
3333+ }
3434+ return b.flushIfFull()
3535+}
3636+3737+// Delete add a delete index to batch
3838+func (b *FlushingBatch) Delete(id string) error {
3939+ b.batch.Delete(id)
4040+ return b.flushIfFull()
4141+}
4242+4343+func (b *FlushingBatch) flushIfFull() error {
4444+ if b.batch.Size() < b.maxBatchSize {
4545+ return nil
4646+ }
4747+ return b.Flush()
4848+}
4949+5050+// Flush submit the batch and create a new one
5151+func (b *FlushingBatch) Flush() error {
5252+ err := b.index.Batch(b.batch)
5353+ if err != nil {
5454+ return err
5555+ }
5656+ b.batch = b.index.NewBatch()
5757+ return nil
5858+}
···44 <h1 class="font-bold text-4xl">tightly-knit<br>social coding.</h1>
5566 <p class="text-lg">
77- tangled is new social-enabled git collaboration platform built on <a class="underline" href="https://atproto.com/">atproto</a>.
77+ Tangled is a decentralized Git hosting and collaboration platform.
88 </p>
99 <p class="text-lg">
1010- we envision a place where developers have complete ownership of their
1010+ We envision a place where developers have complete ownership of their
1111 code, open source communities can freely self-govern and most
1212 importantly, coding can be social and fun again.
1313 </p>
···37373838```
3939# oauth jwks should already be setup by the nix devshell:
4040-echo $TANGLED_OAUTH_JWKS
4141-{"crv":"P-256","d":"tELKHYH-Dko6qo4ozYcVPE1ah6LvXHFV2wpcWpi8ab4","kid":"1753352226","kty":"EC","x":"mRzYpLzAGq74kJez9UbgGfV040DxgsXpMbaVsdy8RZs","y":"azqqXzUYywMlLb2Uc5AVG18nuLXyPnXr4kI4T39eeIc"}
4040+echo $TANGLED_OAUTH_CLIENT_SECRET
4141+z42ty4RT1ovnTopY8B8ekz9NuziF2CuMkZ7rbRFpAR9jBqMc
4242+4343+echo $TANGLED_OAUTH_CLIENT_KID
4444+1761667908
42454346# if not, you can set it up yourself:
4444-go build -o genjwks.out ./cmd/genjwks
4545-export TANGLED_OAUTH_JWKS="$(./genjwks.out)"
4747+goat key generate -t P-256
4848+Key Type: P-256 / secp256r1 / ES256 private key
4949+Secret Key (Multibase Syntax): save this securely (eg, add to password manager)
5050+ z42tuPDKRfM2mz2Kv953ARen2jmrPA8S9LX9tRq4RVcUMwwL
5151+Public Key (DID Key Syntax): share or publish this (eg, in DID document)
5252+ did:key:zDnaeUBxtG6Xuv3ATJE4GaWeyXM3jyamJsZw3bSPpxx4bNXDR
5353+5454+# the secret key from above
5555+export TANGLED_OAUTH_CLIENT_SECRET="z42tuP..."
46564757# run redis in at a new shell to store oauth sessions
4858redis-server
···158168159169If for any reason you wish to disable either one of the
160170services in the VM, modify [nix/vm.nix](/nix/vm.nix) and set
161161-`services.tangled-spindle.enable` (or
162162-`services.tangled-knot.enable`) to `false`.
171171+`services.tangled.spindle.enable` (or
172172+`services.tangled.knot.enable`) to `false`.
+1-1
docs/migrations.md
···4949latest revision, and change your config block like so:
50505151```diff
5252- services.tangled-knot = {
5252+ services.tangled.knot = {
5353 enable = true;
5454 server = {
5555- secretFile = /path/to/secret;
+19-1
docs/spindle/pipeline.md
···1919 - `push`: The workflow should run every time a commit is pushed to the repository.
2020 - `pull_request`: The workflow should run every time a pull request is made or updated.
2121 - `manual`: The workflow can be triggered manually.
2222-- `branch`: This is a **required** field that defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event.
2222+- `branch`: Defines which branches the workflow should run for. If used with the `push` event, commits to the branch(es) listed here will trigger the workflow. If used with the `pull_request` event, updates to pull requests targeting the branch(es) listed here will trigger the workflow. This field has no effect with the `manual` event. Supports glob patterns using `*` and `**` (e.g., `main`, `develop`, `release-*`). Either `branch` or `tag` (or both) must be specified for `push` events.
2323+- `tag`: Defines which tags the workflow should run for. Only used with the `push` event - when tags matching the pattern(s) listed here are pushed, the workflow will trigger. This field has no effect with `pull_request` or `manual` events. Supports glob patterns using `*` and `**` (e.g., `v*`, `v1.*`, `release-**`). Either `branch` or `tag` (or both) must be specified for `push` events.
23242425For example, if you'd like to define a workflow that runs when commits are pushed to the `main` and `develop` branches, or when pull requests that target the `main` branch are updated, or manually, you can do so with:
2526···2930 branch: ["main", "develop"]
3031 - event: ["pull_request"]
3132 branch: ["main"]
3333+```
3434+3535+You can also trigger workflows on tag pushes. For instance, to run a deployment workflow when tags matching `v*` are pushed:
3636+3737+```yaml
3838+when:
3939+ - event: ["push"]
4040+ tag: ["v*"]
4141+```
4242+4343+You can even combine branch and tag patterns in a single constraint (the workflow triggers if either matches):
4444+4545+```yaml
4646+when:
4747+ - event: ["push"]
4848+ branch: ["main", "release-*"]
4949+ tag: ["v*", "stable"]
3250```
33513452## Engine
···1010 if var == ""
1111 then throw "\$${name} must be defined, see docs/hacking.md for more details"
1212 else var;
1313+ envVarOr = name: default: let
1414+ var = builtins.getEnv name;
1515+ in
1616+ if var != ""
1717+ then var
1818+ else default;
1919+2020+ plcUrl = envVarOr "TANGLED_VM_PLC_URL" "https://plc.directory";
2121+ jetstream = envVarOr "TANGLED_VM_JETSTREAM_ENDPOINT" "wss://jetstream1.us-west.bsky.network/subscribe";
1322in
1423 nixpkgs.lib.nixosSystem {
1524 inherit system;
···7382 time.timeZone = "Europe/London";
7483 services.getty.autologinUser = "root";
7584 environment.systemPackages = with pkgs; [curl vim git sqlite litecli];
7676- services.tangled-knot = {
8585+ services.tangled.knot = {
7786 enable = true;
7887 motd = "Welcome to the development knot!\n";
7988 server = {
8089 owner = envVar "TANGLED_VM_KNOT_OWNER";
8181- hostname = "localhost:6000";
9090+ hostname = envVarOr "TANGLED_VM_KNOT_HOST" "localhost:6000";
9191+ plcUrl = plcUrl;
9292+ jetstreamEndpoint = jetstream;
8293 listenAddr = "0.0.0.0:6000";
8394 };
8495 };
8585- services.tangled-spindle = {
9696+ services.tangled.spindle = {
8697 enable = true;
8798 server = {
8899 owner = envVar "TANGLED_VM_SPINDLE_OWNER";
8989- hostname = "localhost:6555";
100100+ hostname = envVarOr "TANGLED_VM_SPINDLE_HOST" "localhost:6555";
101101+ plcUrl = plcUrl;
102102+ jetstreamEndpoint = jetstream;
90103 listenAddr = "0.0.0.0:6555";
91104 dev = true;
92105 queueSize = 100;
···99112 users = {
100113 # So we don't have to deal with permission clashing between
101114 # blank disk VMs and existing state
102102- users.${config.services.tangled-knot.gitUser}.uid = 666;
103103- groups.${config.services.tangled-knot.gitUser}.gid = 666;
115115+ users.${config.services.tangled.knot.gitUser}.uid = 666;
116116+ groups.${config.services.tangled.knot.gitUser}.gid = 666;
104117105118 # TODO: separate spindle user
106119 };
···120133 serviceConfig.PermissionsStartOnly = true;
121134 };
122135 in {
123123- knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled-knot.stateDir;
124124- spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled-spindle.server.dbPath);
136136+ knot = mkDataSyncScripts "/mnt/knot-data" config.services.tangled.knot.stateDir;
137137+ spindle = mkDataSyncScripts "/mnt/spindle-data" (builtins.dirOf config.services.tangled.spindle.server.dbPath);
125138 };
126139 })
127140 ];
-26
scripts/appview.sh
···11-#!/bin/bash
22-33-# Variables
44-BINARY_NAME="appview"
55-BINARY_PATH=".bin/app"
66-SERVER="95.111.206.63"
77-USER="appview"
88-99-# SCP the binary to root's home directory
1010-scp "$BINARY_PATH" root@$SERVER:/root/"$BINARY_NAME"
1111-1212-# SSH into the server and perform the necessary operations
1313-ssh root@$SERVER <<EOF
1414- set -e # Exit on error
1515-1616- # Move binary to /usr/local/bin and set executable permissions
1717- mv /root/$BINARY_NAME /usr/local/bin/$BINARY_NAME
1818- chmod +x /usr/local/bin/$BINARY_NAME
1919-2020- su appview
2121- cd ~
2222- ./reset.sh
2323-EOF
2424-2525-echo "Deployment complete."
2626-
-5
scripts/generate-jwks.sh
···11-#! /usr/bin/env bash
22-33-set -e
44-55-go run ./cmd/genjwks/