···2020 LexiconTypeID string `json:"$type,const=sh.tangled.repo.pull.status" cborgen:"$type,const=sh.tangled.repo.pull.status"`
2121 Pull string `json:"pull" cborgen:"pull"`
2222 // status: status of the pull request
2323- Status *string `json:"status,omitempty" cborgen:"status,omitempty"`
2323+ Status string `json:"status" cborgen:"status"`
2424}
+31
api/tangled/repoartifact.go
···11+// Code generated by cmd/lexgen (see Makefile's lexgen); DO NOT EDIT.
22+33+package tangled
44+55+// schema: sh.tangled.repo.artifact
66+77+import (
88+ "github.com/bluesky-social/indigo/lex/util"
99+)
1010+1111+const (
1212+ RepoArtifactNSID = "sh.tangled.repo.artifact"
1313+)
1414+1515+func init() {
1616+ util.RegisterType("sh.tangled.repo.artifact", &RepoArtifact{})
1717+} //
1818+// RECORDTYPE: RepoArtifact
1919+type RepoArtifact struct {
2020+ LexiconTypeID string `json:"$type,const=sh.tangled.repo.artifact" cborgen:"$type,const=sh.tangled.repo.artifact"`
2121+ // artifact: the artifact
2222+ Artifact *util.LexBlob `json:"artifact" cborgen:"artifact"`
2323+ // createdAt: time of creation of this artifact
2424+ CreatedAt string `json:"createdAt" cborgen:"createdAt"`
2525+ // name: name of the artifact
2626+ Name string `json:"name" cborgen:"name"`
2727+ // repo: repo that this artifact is being uploaded to
2828+ Repo string `json:"repo" cborgen:"repo"`
2929+ // tag: hash of the tag object that this artifact is attached to (only annotated tags are supported)
3030+ Tag util.LexBytes `json:"tag,omitempty" cborgen:"tag,omitempty"`
3131+}
···33import (
44 "context"
55 "database/sql"
66+ "fmt"
67 "log"
7889 _ "github.com/mattn/go-sqlite3"
···208209 unique(did, email)
209210 );
210211212212+ create table if not exists artifacts (
213213+ -- id
214214+ id integer primary key autoincrement,
215215+ did text not null,
216216+ rkey text not null,
217217+218218+ -- meta
219219+ repo_at text not null,
220220+ tag binary(20) not null,
221221+ created text not null default (strftime('%Y-%m-%dT%H:%M:%SZ', 'now')),
222222+223223+ -- data
224224+ blob_cid text not null,
225225+ name text not null,
226226+ size integer not null default 0,
227227+ mimetype string not null default "*/*",
228228+229229+ -- constraints
230230+ unique(did, rkey), -- record must be unique
231231+ unique(repo_at, tag, name), -- for a given tag object, each file must be unique
232232+ foreign key (repo_at) references repos(at_uri) on delete cascade
233233+ );
234234+235235+ create table if not exists profile (
236236+ -- id
237237+ id integer primary key autoincrement,
238238+ did text not null,
239239+240240+ -- data
241241+ description text not null,
242242+ include_bluesky integer not null default 0,
243243+ location text,
244244+245245+ -- constraints
246246+ unique(did)
247247+ );
248248+ create table if not exists profile_links (
249249+ -- id
250250+ id integer primary key autoincrement,
251251+ did text not null,
252252+253253+ -- data
254254+ link text not null,
255255+256256+ -- constraints
257257+ foreign key (did) references profile(did) on delete cascade
258258+ );
259259+ create table if not exists profile_stats (
260260+ -- id
261261+ id integer primary key autoincrement,
262262+ did text not null,
263263+264264+ -- data
265265+ kind text not null check (kind in (
266266+ "merged-pull-request-count",
267267+ "closed-pull-request-count",
268268+ "open-pull-request-count",
269269+ "open-issue-count",
270270+ "closed-issue-count",
271271+ "repository-count"
272272+ )),
273273+274274+ -- constraints
275275+ foreign key (did) references profile(did) on delete cascade
276276+ );
277277+ create table if not exists profile_pinned_repositories (
278278+ -- id
279279+ id integer primary key autoincrement,
280280+ did text not null,
281281+282282+ -- data
283283+ at_uri text not null,
284284+285285+ -- constraints
286286+ unique(did, at_uri),
287287+ foreign key (did) references profile(did) on delete cascade,
288288+ foreign key (at_uri) references repos(at_uri) on delete cascade
289289+ );
290290+291291+ create table if not exists oauth_requests (
292292+ id integer primary key autoincrement,
293293+ auth_server_iss text not null,
294294+ state text not null,
295295+ did text not null,
296296+ handle text not null,
297297+ pds_url text not null,
298298+ pkce_verifier text not null,
299299+ dpop_auth_server_nonce text not null,
300300+ dpop_private_jwk text not null
301301+ );
302302+303303+ create table if not exists oauth_sessions (
304304+ id integer primary key autoincrement,
305305+ did text not null,
306306+ handle text not null,
307307+ pds_url text not null,
308308+ auth_server_iss text not null,
309309+ access_jwt text not null,
310310+ refresh_jwt text not null,
311311+ dpop_pds_nonce text,
312312+ dpop_auth_server_nonce text not null,
313313+ dpop_private_jwk text not null,
314314+ expiry text not null
315315+ );
316316+211317 create table if not exists migrations (
212318 id integer primary key autoincrement,
213319 name text unique
···325431326432 return nil
327433}
434434+435435+type filter struct {
436436+ key string
437437+ arg any
438438+}
439439+440440+func Filter(key string, arg any) filter {
441441+ return filter{
442442+ key: key,
443443+ arg: arg,
444444+ }
445445+}
446446+447447+func (f filter) Condition() string {
448448+ return fmt.Sprintf("%s = ?", f.key)
449449+}
+6
appview/db/follow.go
···4747 return err
4848}
49495050+// Remove a follow
5151+func DeleteFollowByRkey(e Execer, userDid, rkey string) error {
5252+ _, err := e.Exec(`delete from follows where user_did = ? and rkey = ?`, userDid, rkey)
5353+ return err
5454+}
5555+5056func GetFollowerFollowing(e Execer, did string) (int, int, error) {
5157 followers, following := 0, 0
5258 err := e.QueryRow(
+35-20
appview/db/issues.go
···55 "time"
6677 "github.com/bluesky-social/indigo/atproto/syntax"
88+ "tangled.sh/tangled.sh/core/appview/pagination"
89)
9101011type Issue struct {
···102103 return ownerDid, err
103104}
104105105105-func GetIssues(e Execer, repoAt syntax.ATURI, isOpen bool) ([]Issue, error) {
106106+func GetIssues(e Execer, repoAt syntax.ATURI, isOpen bool, page pagination.Page) ([]Issue, error) {
106107 var issues []Issue
107108 openValue := 0
108109 if isOpen {
···110111 }
111112112113 rows, err := e.Query(
113113- `select
114114- i.owner_did,
115115- i.issue_id,
116116- i.created,
117117- i.title,
118118- i.body,
119119- i.open,
120120- count(c.id)
121121- from
122122- issues i
123123- left join
124124- comments c on i.repo_at = c.repo_at and i.issue_id = c.issue_id
125125- where
126126- i.repo_at = ? and i.open = ?
127127- group by
128128- i.id, i.owner_did, i.issue_id, i.created, i.title, i.body, i.open
129129- order by
130130- i.created desc`,
131131- repoAt, openValue)
114114+ `
115115+ with numbered_issue as (
116116+ select
117117+ i.owner_did,
118118+ i.issue_id,
119119+ i.created,
120120+ i.title,
121121+ i.body,
122122+ i.open,
123123+ count(c.id) as comment_count,
124124+ row_number() over (order by i.created desc) as row_num
125125+ from
126126+ issues i
127127+ left join
128128+ comments c on i.repo_at = c.repo_at and i.issue_id = c.issue_id
129129+ where
130130+ i.repo_at = ? and i.open = ?
131131+ group by
132132+ i.id, i.owner_did, i.issue_id, i.created, i.title, i.body, i.open
133133+ )
134134+ select
135135+ owner_did,
136136+ issue_id,
137137+ created,
138138+ title,
139139+ body,
140140+ open,
141141+ comment_count
142142+ from
143143+ numbered_issue
144144+ where
145145+ row_num between ? and ?`,
146146+ repoAt, openValue, page.Offset+1, page.Offset+page.Limit)
132147 if err != nil {
133148 return nil, err
134149 }
···11+# avatar
22+33+avatar is a small service that fetches your pretty Bluesky avatar and caches it on Cloudflare.
44+It uses a shared secret `AVATAR_SHARED_SECRET` to ensure requests only originate from the trusted appview.
55+66+It's deployed using `wrangler` like so:
77+88+```
99+npx wrangler deploy
1010+npx wrangler secrets put AVATAR_SHARED_SECRET
1111+```
+88
avatar/src/index.js
···11+export default {
22+ async fetch(request, env) {
33+ const url = new URL(request.url);
44+ const { pathname } = url;
55+66+ if (!pathname || pathname === '/') {
77+ return new Response(`This is Tangled's avatar service. It fetches your pretty avatar from Bluesky and caches it on Cloudflare.
88+You can't use this directly unforunately since all requests are signed and may only originate from the appview.`);
99+ }
1010+1111+ const cache = caches.default;
1212+1313+ let cacheKey = request.url;
1414+ let response = await cache.match(cacheKey);
1515+ if (response) {
1616+ return response;
1717+ }
1818+1919+ const pathParts = pathname.slice(1).split('/');
2020+ if (pathParts.length < 2) {
2121+ return new Response('Bad URL', { status: 400 });
2222+ }
2323+2424+ const [signatureHex, actor] = pathParts;
2525+2626+ const actorBytes = new TextEncoder().encode(actor);
2727+2828+ const key = await crypto.subtle.importKey(
2929+ 'raw',
3030+ new TextEncoder().encode(env.AVATAR_SHARED_SECRET),
3131+ { name: 'HMAC', hash: 'SHA-256' },
3232+ false,
3333+ ['sign', 'verify'],
3434+ );
3535+3636+ const computedSigBuffer = await crypto.subtle.sign('HMAC', key, actorBytes);
3737+ const computedSig = Array.from(new Uint8Array(computedSigBuffer))
3838+ .map((b) => b.toString(16).padStart(2, '0'))
3939+ .join('');
4040+4141+ console.log({
4242+ level: 'debug',
4343+ message: 'avatar request for: ' + actor,
4444+ computedSignature: computedSig,
4545+ providedSignature: signatureHex,
4646+ });
4747+4848+ const sigBytes = Uint8Array.from(signatureHex.match(/.{2}/g).map((b) => parseInt(b, 16)));
4949+ const valid = await crypto.subtle.verify('HMAC', key, sigBytes, actorBytes);
5050+5151+ if (!valid) {
5252+ return new Response('Invalid signature', { status: 403 });
5353+ }
5454+5555+ try {
5656+ const profileResponse = await fetch(`https://public.api.bsky.app/xrpc/app.bsky.actor.getProfile?actor=${actor}`, { method: 'GET' });
5757+ const profile = await profileResponse.json();
5858+ const avatar = profile.avatar;
5959+6060+ if (!avatar) {
6161+ return new Response(`avatar not found for ${actor}.`, { status: 404 });
6262+ }
6363+6464+ // fetch the actual avatar image
6565+ const avatarResponse = await fetch(avatar);
6666+ if (!avatarResponse.ok) {
6767+ return new Response(`failed to fetch avatar for ${actor}.`, { status: avatarResponse.status });
6868+ }
6969+7070+ const avatarData = await avatarResponse.arrayBuffer();
7171+ const contentType = avatarResponse.headers.get('content-type') || 'image/jpeg';
7272+7373+ response = new Response(avatarData, {
7474+ headers: {
7575+ 'Content-Type': contentType,
7676+ 'Cache-Control': 'public, max-age=3600',
7777+ },
7878+ });
7979+8080+ // cache it in cf using request.url as the key
8181+ await cache.put(cacheKey, response.clone());
8282+8383+ return response;
8484+ } catch (error) {
8585+ return new Response(`error fetching avatar: ${error.message}`, { status: 500 });
8686+ }
8787+ },
8888+};
···11+# camo
22+33+Camo is Tangled's "camouflage" service much like that of [GitHub's](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-anonymized-urls).
44+55+Camo uses a shared secret `CAMO_SHARED_SECRET` to verify HMAC signatures. URLs are of the form:
66+77+```
88+https://camo.tangled.sh/<signature>/<hex-encoded-origin-url>
99+```
1010+1111+It's pretty barebones for the moment and doesn't support a whole lot of what the
1212+big G's does. Ours is a Cloudflare Worker, deployed using `wrangler` like so:
1313+1414+```
1515+npx wrangler deploy
1616+npx wrangler secrets put CAMO_SHARED_SECRET
1717+```
+101
camo/src/index.js
···11+export default {
22+ async fetch(request, env) {
33+ const url = new URL(request.url);
44+55+ if (url.pathname === "/" || url.pathname === "") {
66+ return new Response(
77+ "This is Tangled's Camo service. It proxies images served from knots via Cloudflare.",
88+ );
99+ }
1010+1111+ const cache = caches.default;
1212+1313+ const pathParts = url.pathname.slice(1).split("/");
1414+ if (pathParts.length < 2) {
1515+ return new Response("Bad URL", { status: 400 });
1616+ }
1717+1818+ const [signatureHex, ...hexUrlParts] = pathParts;
1919+ const hexUrl = hexUrlParts.join("");
2020+ const urlBytes = Uint8Array.from(
2121+ hexUrl.match(/.{2}/g).map((b) => parseInt(b, 16)),
2222+ );
2323+ const targetUrl = new TextDecoder().decode(urlBytes);
2424+2525+ // check if we have an entry in the cache with the target url
2626+ let cacheKey = new Request(targetUrl);
2727+ let response = await cache.match(cacheKey);
2828+ if (response) {
2929+ return response;
3030+ }
3131+3232+ // else compute the signature
3333+ const key = await crypto.subtle.importKey(
3434+ "raw",
3535+ new TextEncoder().encode(env.CAMO_SHARED_SECRET),
3636+ { name: "HMAC", hash: "SHA-256" },
3737+ false,
3838+ ["sign", "verify"],
3939+ );
4040+4141+ const computedSigBuffer = await crypto.subtle.sign("HMAC", key, urlBytes);
4242+ const computedSig = Array.from(new Uint8Array(computedSigBuffer))
4343+ .map((b) => b.toString(16).padStart(2, "0"))
4444+ .join("");
4545+4646+ console.log({
4747+ level: "debug",
4848+ message: "camo target: " + targetUrl,
4949+ computedSignature: computedSig,
5050+ providedSignature: signatureHex,
5151+ targetUrl: targetUrl,
5252+ });
5353+5454+ const sigBytes = Uint8Array.from(
5555+ signatureHex.match(/.{2}/g).map((b) => parseInt(b, 16)),
5656+ );
5757+ const valid = await crypto.subtle.verify("HMAC", key, sigBytes, urlBytes);
5858+5959+ if (!valid) {
6060+ return new Response("Invalid signature", { status: 403 });
6161+ }
6262+6363+ let parsedUrl;
6464+ try {
6565+ parsedUrl = new URL(targetUrl);
6666+ if (!["https:", "http:"].includes(parsedUrl.protocol)) {
6767+ return new Response("Only HTTP(S) allowed", { status: 400 });
6868+ }
6969+ } catch {
7070+ return new Response("Malformed URL", { status: 400 });
7171+ }
7272+7373+ // fetch from the parsed URL
7474+ const res = await fetch(parsedUrl.toString(), {
7575+ headers: { "User-Agent": "Tangled Camo v0.1.0" },
7676+ });
7777+7878+ const allowedMimeTypes = require("./mimetypes.json");
7979+8080+ const contentType =
8181+ res.headers.get("Content-Type") || "application/octet-stream";
8282+8383+ if (!allowedMimeTypes.includes(contentType.split(";")[0].trim())) {
8484+ return new Response("Unsupported media type", { status: 415 });
8585+ }
8686+8787+ const headers = new Headers();
8888+ headers.set("Content-Type", contentType);
8989+ headers.set("Cache-Control", "public, max-age=86400, immutable");
9090+9191+ // serve and cache it with cf
9292+ response = new Response(await res.arrayBuffer(), {
9393+ status: res.status,
9494+ headers,
9595+ });
9696+9797+ await cache.put(cacheKey, response.clone());
9898+9999+ return response;
100100+ },
101101+};
···1111### message format
12121313```
1414-<service/top-level directory>: <package/path>: <short summary of change>
1414+<service/top-level directory>: <affected package/directory>: <short summary of change>
151516161717-Optional longer description, if needed. Explain what the change does and
1818-why, especially if not obvious. Reference relevant issues or PRs when
1919-applicable. These can be links for now since we don't auto-link
2020-issues/PRs yet.
1717+Optional longer description can go here, if necessary. Explain what the
1818+change does and why, especially if not obvious. Reference relevant
1919+issues or PRs when applicable. These can be links for now since we don't
2020+auto-link issues/PRs yet.
2121```
22222323Here are some examples:
···35353636### general notes
37373838-- PRs get merged as a single commit, so keep PRs small and focused. Use
3939-the above guidelines for the PR title and description.
3838+- PRs get merged "as-is" (fast-forward) -- like applying a patch-series
3939+using `git am`. At present, there is no squashing -- so please author
4040+your commits as they would appear on `master`, following the above
4141+guidelines.
4042- Use the imperative mood in the summary line (e.g., "fix bug" not
4143"fixed bug" or "fixes bug").
4244- Try to keep the summary line under 72 characters, but we aren't too
+72
docs/hacking.md
···11+# hacking on tangled
22+33+We highly recommend [installing
44+nix](https://nixos.org/download/) (the package manager)
55+before working on the codebase. The nix flake provides a lot
66+of helpers to get started and most importantly, builds and
77+dev shells are entirely deterministic.
88+99+To set up your dev environment:
1010+1111+```bash
1212+nix develop
1313+```
1414+1515+Non-nix users can look at the `devShell` attribute in the
1616+`flake.nix` file to determine necessary dependencies.
1717+1818+## running the appview
1919+2020+The nix flake also exposes a few `app` attributes (run `nix
2121+flake show` to see a full list of what the flake provides),
2222+one of the apps runs the appview with the `air`
2323+live-reloader:
2424+2525+```bash
2626+TANGLED_DEV=true nix run .#watch-appview
2727+2828+# TANGLED_DB_PATH might be of interest to point to
2929+# different sqlite DBs
3030+3131+# in a separate shell, you can live-reload tailwind
3232+nix run .#watch-tailwind
3333+```
3434+3535+## running a knotserver
3636+3737+An end-to-end knotserver setup requires setting up a machine
3838+with `sshd`, `repoguard`, `keyfetch`, a git user, which is
3939+quite cumbersome and so the nix flake provides a
4040+`nixosConfiguration` to do so.
4141+4242+To begin, head to `http://localhost:3000` in the browser and
4343+generate a knotserver secret. Replace the existing secret in
4444+`flake.nix` with the newly generated secret.
4545+4646+You can now start a lightweight NixOS VM using
4747+`nixos-shell` like so:
4848+4949+```bash
5050+QEMU_NET_OPTS="hostfwd=tcp::6000-:6000,hostfwd=tcp::2222-:22" nixos-shell --flake .#knotVM
5151+5252+# hit Ctrl-a + c + q to exit the VM
5353+```
5454+5555+This starts a knotserver on port 6000 with `ssh` exposed on
5656+port 2222. You can push repositories to this VM with this
5757+ssh config block on your main machine:
5858+5959+```bash
6060+Host nixos-shell
6161+ Hostname localhost
6262+ Port 2222
6363+ User git
6464+ IdentityFile ~/.ssh/my_tangled_key
6565+```
6666+6767+Set up a remote called `local-dev` on a git repo:
6868+6969+```bash
7070+git remote add local-dev git@nixos-shell:user/repo
7171+git push local-dev main
7272+```
+82
docs/knot-hosting.md
···106106107107You should now have a running knot server! You can finalize your registration by hitting the
108108`initialize` button on the [/knots](/knots) page.
109109+110110+### custom paths
111111+112112+(This section applies to manual setup only. Docker users should edit the mounts
113113+in `docker-compose.yml` instead.)
114114+115115+Right now, the database and repositories of your knot lives in `/home/git`. You
116116+can move these paths if you'd like to store them in another folder. Be careful
117117+when adjusting these paths:
118118+119119+* Stop your knot when moving data (e.g. `systemctl stop knotserver`) to prevent
120120+any possible side effects. Remember to restart it once you're done.
121121+* Make backups before moving in case something goes wrong.
122122+* Make sure the `git` user can read and write from the new paths.
123123+124124+#### database
125125+126126+As an example, let's say the current database is at `/home/git/knotserver.db`,
127127+and we want to move it to `/home/git/database/knotserver.db`.
128128+129129+Copy the current database to the new location. Make sure to copy the `.db-shm`
130130+and `.db-wal` files if they exist.
131131+132132+```
133133+mkdir /home/git/database
134134+cp /home/git/knotserver.db* /home/git/database
135135+```
136136+137137+In the environment (e.g. `/home/git/.knot.env`), set `KNOT_SERVER_DB_PATH` to
138138+the new file path (_not_ the directory):
139139+140140+```
141141+KNOT_SERVER_DB_PATH=/home/git/database/knotserver.db
142142+```
143143+144144+#### repositories
145145+146146+As an example, let's say the repositories are currently in `/home/git`, and we
147147+want to move them into `/home/git/repositories`.
148148+149149+Create the new folder, then move the existing repositories (if there are any):
150150+151151+```
152152+mkdir /home/git/repositories
153153+# move all DIDs into the new folder; these will vary for you!
154154+mv /home/git/did:plc:wshs7t2adsemcrrd4snkeqli /home/git/repositories
155155+```
156156+157157+In the environment (e.g. `/home/git/.knot.env`), update `KNOT_REPO_SCAN_PATH`
158158+to the new directory:
159159+160160+```
161161+KNOT_REPO_SCAN_PATH=/home/git/repositories
162162+```
163163+164164+In your SSH config (e.g. `/etc/ssh/sshd_config.d/authorized_keys_command.conf`),
165165+update the `AuthorizedKeysCommand` line to use the new folder. For example:
166166+167167+```
168168+Match User git
169169+ AuthorizedKeysCommand /usr/local/libexec/tangled-keyfetch -git-dir /home/git/repositories
170170+ AuthorizedKeysCommandUser nobody
171171+```
172172+173173+Make sure to restart your SSH server!
174174+175175+#### git
176176+177177+The keyfetch executable takes multiple arguments to change certain paths. You
178178+can view a full list by running `/usr/local/libexec/tangled-keyfetch -h`.
179179+180180+As an example, if you wanted to change the path to the repoguard executable,
181181+you would edit your SSH config (e.g. `/etc/ssh/sshd_config.d/authorized_keys_command.conf`)
182182+and update the `AuthorizedKeysCommand` line:
183183+184184+```
185185+Match User git
186186+ AuthorizedKeysCommand /usr/local/libexec/tangled-keyfetch -repoguard-path /path/to/repoguard
187187+ AuthorizedKeysCommandUser nobody
188188+```
189189+190190+Make sure to restart your SSH server!