+3
-5
.tangled/workflows/deploy-docs.yml
+3
-5
.tangled/workflows/deploy-docs.yml
···
6
6
7
7
dependencies:
8
8
nixpkgs:
9
-
- nodejs
10
-
- coreutils
9
+
- bun
11
10
- curl
12
11
13
12
environment:
14
-
# Use DID directly to avoid handle-resolution issues in CI.
15
13
WISP_DID: "did:plc:mkqt76xvfgxuemlwlx6ruc3w"
16
14
WISP_SITE_NAME: "docs"
17
15
18
16
steps:
19
-
- name: build docs site
17
+
- name: build site
20
18
command: |
21
-
node ./scripts/build-wisp-docs.mjs
19
+
bun ./scripts/build-site.mjs
22
20
23
21
- name: deploy docs to wisp
24
22
command: |
+15
.tangled/workflows/publish-docs.yml
+15
.tangled/workflows/publish-docs.yml
+10
-72
CHANGELOG.md
+10
-72
CHANGELOG.md
···
1
1
# changelog
2
2
3
-
## 0.1.0
3
+
## 0.1.2
4
+
5
+
- `extractAt` now logs diagnostic info on parse failures (enable with `.zat` debug scope)
6
+
7
+
## 0.1.1
4
8
5
-
first feature release. adds protocol-level enums for firehose consumption.
9
+
- xrpc client sets `Content-Type: application/json` for POST requests
10
+
- docs published as `site.standard.document` records on tag releases
6
11
7
-
### what's new
12
+
## 0.1.0
8
13
9
-
**sync types** - enums from `com.atproto.sync.subscribeRepos` lexicon:
14
+
sync types for firehose consumption:
10
15
11
16
- `CommitAction` - `.create`, `.update`, `.delete`
12
17
- `EventKind` - `.commit`, `.sync`, `.identity`, `.account`, `.info`
13
18
- `AccountStatus` - `.takendown`, `.suspended`, `.deleted`, `.deactivated`, `.desynchronized`, `.throttled`
14
19
15
-
these integrate with zig's `std.json` for automatic parsing. define struct fields as enums instead of strings, and get exhaustive switch checking.
16
-
17
-
### migration
18
-
19
-
if you're currently doing string comparisons:
20
-
21
-
```zig
22
-
// before: string comparisons everywhere
23
-
const TapRecord = struct {
24
-
action: []const u8,
25
-
collection: []const u8,
26
-
// ...
27
-
};
28
-
29
-
if (mem.eql(u8, rec.action, "create") or mem.eql(u8, rec.action, "update")) {
30
-
// handle upsert
31
-
} else if (mem.eql(u8, rec.action, "delete")) {
32
-
// handle delete
33
-
}
34
-
```
35
-
36
-
switch to enum fields:
37
-
38
-
```zig
39
-
// after: type-safe enums
40
-
const TapRecord = struct {
41
-
action: zat.CommitAction, // parsed automatically by std.json
42
-
collection: []const u8,
43
-
// ...
44
-
};
45
-
46
-
switch (rec.action) {
47
-
.create, .update => processUpsert(rec),
48
-
.delete => processDelete(rec),
49
-
}
50
-
```
51
-
52
-
the compiler enforces exhaustive handling - if AT Protocol adds a new action, your code won't compile until you handle it.
53
-
54
-
**this is backwards compatible.** your existing code continues to work. adopt the new types when you're ready.
55
-
56
-
### library overview
57
-
58
-
zat provides zig primitives for AT Protocol:
59
-
60
-
| feature | description |
61
-
|---------|-------------|
62
-
| string primitives | `Tid`, `Did`, `Handle`, `Nsid`, `Rkey`, `AtUri` - parsing and validation |
63
-
| did resolution | resolve `did:plc` and `did:web` to documents |
64
-
| handle resolution | resolve handles to DIDs via HTTP well-known |
65
-
| xrpc client | call AT Protocol endpoints (queries and procedures) |
66
-
| sync types | enums for firehose consumption |
67
-
| json helpers | navigate nested json without verbose if-chains |
68
-
| jwt verification | verify service auth tokens (ES256, ES256K) |
69
-
| multibase/multicodec | decode public keys from DID documents |
70
-
71
-
### install
72
-
73
-
```bash
74
-
zig fetch --save https://tangled.sh/zzstoatzz.io/zat/archive/main
75
-
```
76
-
77
-
```zig
78
-
// build.zig
79
-
const zat = b.dependency("zat", .{}).module("zat");
80
-
exe.root_module.addImport("zat", zat);
81
-
```
20
+
these integrate with `std.json` for automatic parsing.
82
21
83
22
## 0.0.2
84
23
···
87
26
88
27
## 0.0.1
89
28
90
-
- initial release
91
29
- string primitives (Tid, Did, Handle, Nsid, Rkey, AtUri)
92
30
- did/handle resolution
93
31
- json helpers
+15
CONTRIBUTING.md
+15
CONTRIBUTING.md
+16
-3
README.md
+16
-3
README.md
···
1
-
# zat
1
+
# [zat](https://zat.dev)
2
+
3
+
AT Protocol building blocks for zig.
4
+
5
+
<details>
6
+
<summary><strong>this readme is an ATProto record</strong></summary>
7
+
8
+
โ [view in zat.dev's repository](https://at-me.zzstoatzz.io/view?handle=zat.dev)
2
9
3
-
zig primitives for AT Protocol.
10
+
zat publishes these docs as [`site.standard.document`](https://standard.site) records, signed by its DID.
11
+
12
+
</details>
4
13
5
14
## install
6
15
7
16
```bash
8
-
zig fetch --save https://tangled.sh/zzstoatzz.io/zat/archive/main
17
+
zig fetch --save https://tangled.sh/zat.dev/zat/archive/main
9
18
```
10
19
11
20
then in `build.zig`:
···
185
194
## license
186
195
187
196
MIT
197
+
198
+
---
199
+
200
+
[roadmap](docs/roadmap.md) ยท [changelog](CHANGELOG.md)
+12
build.zig
+12
build.zig
···
15
15
16
16
const test_step = b.step("test", "run unit tests");
17
17
test_step.dependOn(&run_tests.step);
18
+
19
+
// publish-docs script (uses zat to publish docs to ATProto)
20
+
const publish_docs = b.addExecutable(.{
21
+
.name = "publish-docs",
22
+
.root_module = b.createModule(.{
23
+
.root_source_file = b.path("scripts/publish-docs.zig"),
24
+
.target = target,
25
+
.optimize = optimize,
26
+
.imports = &.{.{ .name = "zat", .module = mod }},
27
+
}),
28
+
});
29
+
b.installArtifact(publish_docs);
18
30
}
+35
devlog/001-self-publishing-docs.md
+35
devlog/001-self-publishing-docs.md
···
1
+
# zat publishes its own docs to ATProto
2
+
3
+
zat uses itself to publish these docs as `site.standard.document` records. here's how.
4
+
5
+
## the idea
6
+
7
+
i'm working on [search for leaflet](https://leaflet-search.pages.dev/) and more generally, search for [standard.site](https://standard.site/) records. many are [currently thinking about how to facilitate better idea sharing on atproto right now](https://bsky.app/profile/eugenevinitsky.bsky.social/post/3mbpqpylv3s2e).
8
+
9
+
this is me doing a rep of shipping a "standard.site", so i know what i'll be searching through, and to better understand why blogging platforms choose their schema extensions etc for i start indexing/searching their record types.
10
+
11
+
## what we built
12
+
13
+
a zig script ([`scripts/publish-docs.zig`](https://tangled.sh/zat.dev/zat/tree/main/scripts/publish-docs.zig)) that:
14
+
15
+
1. authenticates with the PDS via `com.atproto.server.createSession`
16
+
2. creates a `site.standard.publication` record
17
+
3. publishes each doc as a `site.standard.document` pointing to that publication
18
+
4. uses deterministic TIDs so records get the same rkey every time (idempotent updates)
19
+
20
+
## the mechanics
21
+
22
+
### TIDs
23
+
24
+
timestamp identifiers. base32-sortable. we use a fixed base timestamp with incrementing clock_id so each doc gets a stable rkey:
25
+
26
+
```zig
27
+
const pub_tid = zat.Tid.fromTimestamp(1704067200000000, 0); // publication
28
+
const doc_tid = zat.Tid.fromTimestamp(1704067200000000, i + 1); // docs get 1, 2, 3...
29
+
```
30
+
31
+
### CI
32
+
33
+
[`.tangled/workflows/publish-docs.yml`](https://tangled.sh/zat.dev/zat/tree/main/.tangled/workflows/publish-docs.yml) triggers on `v*` tags. tag a release, docs publish automatically.
34
+
35
+
`putRecord` with the same rkey overwrites, so the CI job overwrites `standard.site` records when you cut a tag.
+102
docs/archive/plan-expanded.md
+102
docs/archive/plan-expanded.md
···
1
+
# archived: expanded plan (partially implemented)
2
+
3
+
This file is preserved for context/history. Current direction lives in `docs/roadmap.md`.
4
+
5
+
# zat - expanded scope
6
+
7
+
the initial release delivered string primitives (Tid, Did, Handle, Nsid, Rkey, AtUri). this plan expands toward a usable AT Protocol sdk.
8
+
9
+
## motivation
10
+
11
+
real-world usage shows repeated implementations of:
12
+
- DID resolution (plc.directory lookups, did:web fetches)
13
+
- JWT parsing and signature verification
14
+
- ECDSA verification (P256, secp256k1)
15
+
- base58/base64url decoding
16
+
- XRPC calls with manual json navigation
17
+
18
+
this is shared infrastructure across any atproto app. zat can absorb it incrementally.
19
+
20
+
## next: did resolution
21
+
22
+
```zig
23
+
pub const DidResolver = struct {
24
+
/// resolve a did to its document
25
+
pub fn resolve(self: *DidResolver, did: Did) !DidDocument
26
+
27
+
/// resolve did:plc via plc.directory
28
+
fn resolvePlc(self: *DidResolver, id: []const u8) !DidDocument
29
+
30
+
/// resolve did:web via .well-known
31
+
fn resolveWeb(self: *DidResolver, domain: []const u8) !DidDocument
32
+
};
33
+
34
+
pub const DidDocument = struct {
35
+
id: Did,
36
+
also_known_as: [][]const u8, // handles
37
+
verification_methods: []VerificationMethod,
38
+
services: []Service,
39
+
40
+
pub fn pdsEndpoint(self: DidDocument) ?[]const u8
41
+
pub fn handle(self: DidDocument) ?[]const u8
42
+
};
43
+
```
44
+
45
+
## next: cid (content identifiers)
46
+
47
+
```zig
48
+
pub const Cid = struct {
49
+
raw: []const u8,
50
+
51
+
pub fn parse(s: []const u8) ?Cid
52
+
pub fn version(self: Cid) u8
53
+
pub fn codec(self: Cid) u64
54
+
pub fn hash(self: Cid) []const u8
55
+
};
56
+
```
57
+
58
+
## later: xrpc client
59
+
60
+
```zig
61
+
pub const XrpcClient = struct {
62
+
pds: []const u8,
63
+
access_token: ?[]const u8,
64
+
65
+
pub fn query(self: *XrpcClient, nsid: Nsid, params: anytype) !JsonValue
66
+
pub fn procedure(self: *XrpcClient, nsid: Nsid, input: anytype) !JsonValue
67
+
};
68
+
```
69
+
70
+
## later: jwt verification
71
+
72
+
```zig
73
+
pub const Jwt = struct {
74
+
header: JwtHeader,
75
+
payload: JwtPayload,
76
+
signature: []const u8,
77
+
78
+
pub fn parse(token: []const u8) ?Jwt
79
+
pub fn verify(self: Jwt, public_key: PublicKey) bool
80
+
};
81
+
```
82
+
83
+
## out of scope
84
+
85
+
- lexicon codegen (separate project)
86
+
- session management / token refresh (app-specific)
87
+
- jetstream client (websocket.zig + json is enough)
88
+
- application frameworks (too opinionated)
89
+
90
+
## design principles
91
+
92
+
1. **layered** - each piece usable independently (use Did without DidResolver)
93
+
2. **explicit** - no hidden allocations, pass allocators where needed
94
+
3. **borrowing** - parse returns slices into input where possible
95
+
4. **fallible** - return errors/optionals, don't panic
96
+
5. **protocol-focused** - AT Protocol primitives, not app-specific features
97
+
98
+
## open questions
99
+
100
+
- should DidResolver cache? or leave that to caller?
101
+
- should XrpcClient handle auth refresh? or just expose tokens?
102
+
- how to handle json parsing without imposing a specific json library?
+192
docs/archive/plan-initial.md
+192
docs/archive/plan-initial.md
···
1
+
# archived: initial plan (out of date)
2
+
3
+
This file is preserved for context/history. Current direction lives in `docs/roadmap.md`.
4
+
5
+
# zat - zig atproto primitives
6
+
7
+
low-level building blocks for atproto applications in zig. not a full sdk - just the pieces that everyone reimplements.
8
+
9
+
## philosophy
10
+
11
+
from studying the wishlists: the pain is real, but the suggested solutions often over-engineer. we want:
12
+
13
+
1. **primitives, not frameworks** - types and parsers, not http clients or feed scaffolds
14
+
2. **layered design** - each piece usable independently
15
+
3. **zig idioms** - explicit buffers, comptime validation, no hidden allocations
16
+
4. **minimal scope** - solve the repeated pain, not every possible need
17
+
18
+
## scope
19
+
20
+
### in scope (v0.1)
21
+
22
+
**tid** - timestamp identifiers
23
+
- parse tid string to timestamp (microseconds)
24
+
- generate tid from timestamp
25
+
- extract clock id
26
+
- comptime validation of format
27
+
28
+
**at-uri** - `at://did:plc:xyz/collection/rkey`
29
+
- parse to components (did, collection, rkey)
30
+
- construct from components
31
+
- validation
32
+
33
+
**did** - decentralized identifiers
34
+
- parse did:plc and did:web
35
+
- validate format
36
+
- type-safe wrapper (not just `[]const u8`)
37
+
38
+
### maybe v0.2
39
+
40
+
**facets** - extract links/mentions/tags from post records
41
+
- given a json value with `text` and `facets`, extract urls
42
+
- byte-offset handling for utf-8
43
+
44
+
**cid** - content identifiers
45
+
- parse cid strings
46
+
- validate format
47
+
48
+
### out of scope (for now)
49
+
50
+
- lexicon codegen (too big, could be its own project)
51
+
- xrpc client (std.http.Client is fine)
52
+
- session management (app-specific)
53
+
- jetstream client (websocket.zig exists, just wire it)
54
+
- feed generator framework (each feed is unique)
55
+
- did resolution (requires http, out of primitive scope)
56
+
57
+
## design
58
+
59
+
### tid.zig
60
+
61
+
```zig
62
+
pub const Tid = struct {
63
+
raw: [13]u8,
64
+
65
+
/// parse a tid string. returns null if invalid.
66
+
pub fn parse(s: []const u8) ?Tid
67
+
68
+
/// timestamp in microseconds since unix epoch
69
+
pub fn timestamp(self: Tid) u64
70
+
71
+
/// clock identifier (lower 10 bits)
72
+
pub fn clockId(self: Tid) u10
73
+
74
+
/// generate tid for current time
75
+
pub fn now() Tid
76
+
77
+
/// generate tid for specific timestamp
78
+
pub fn fromTimestamp(ts: u64, clock_id: u10) Tid
79
+
80
+
/// format to string
81
+
pub fn format(self: Tid, buf: *[13]u8) void
82
+
};
83
+
```
84
+
85
+
encoding: base32-sortable (chars `234567abcdefghijklmnopqrstuvwxyz`), 13 chars, first 11 encode 53-bit timestamp, last 2 encode 10-bit clock id.
86
+
87
+
### at_uri.zig
88
+
89
+
```zig
90
+
pub const AtUri = struct {
91
+
/// the full uri string (borrowed, not owned)
92
+
raw: []const u8,
93
+
94
+
/// offsets into raw for each component
95
+
did_end: usize,
96
+
collection_end: usize,
97
+
98
+
pub fn parse(s: []const u8) ?AtUri
99
+
100
+
pub fn did(self: AtUri) []const u8
101
+
pub fn collection(self: AtUri) []const u8
102
+
pub fn rkey(self: AtUri) []const u8
103
+
104
+
/// construct a new uri. caller owns the buffer.
105
+
pub fn format(
106
+
buf: []u8,
107
+
did: []const u8,
108
+
collection: []const u8,
109
+
rkey: []const u8,
110
+
) ?[]const u8
111
+
};
112
+
```
113
+
114
+
### did.zig
115
+
116
+
```zig
117
+
pub const Did = union(enum) {
118
+
plc: [24]u8, // the identifier after "did:plc:"
119
+
web: []const u8, // the domain after "did:web:"
120
+
121
+
pub fn parse(s: []const u8) ?Did
122
+
123
+
/// format to string
124
+
pub fn format(self: Did, buf: []u8) ?[]const u8
125
+
126
+
/// check if this is a plc did
127
+
pub fn isPlc(self: Did) bool
128
+
};
129
+
```
130
+
131
+
## structure
132
+
133
+
```
134
+
zat/
135
+
โโโ build.zig
136
+
โโโ build.zig.zon
137
+
โโโ src/
138
+
โ โโโ root.zig # public API (stable exports)
139
+
โ โโโ internal.zig # internal API (experimental)
140
+
โ โโโ internal/
141
+
โ โโโ tid.zig
142
+
โ โโโ at_uri.zig
143
+
โ โโโ did.zig
144
+
โโโ docs/
145
+
โโโ plan.md
146
+
```
147
+
148
+
## internal โ public promotion
149
+
150
+
new features start in `internal` where we can iterate freely. when an API stabilizes:
151
+
152
+
```zig
153
+
// in root.zig, uncomment to promote:
154
+
pub const Tid = internal.Tid;
155
+
```
156
+
157
+
users who need bleeding-edge access can always use:
158
+
159
+
```zig
160
+
const zat = @import("zat");
161
+
const tid = zat.internal.Tid.parse("...");
162
+
```
163
+
164
+
this pattern exists indefinitely - even after 1.0, new experimental features start in internal.
165
+
166
+
## decisions
167
+
168
+
### why not typed lexicons?
169
+
170
+
codegen from lexicon json is a big project on its own. the core pain (json navigation) can be partially addressed by documenting patterns, and the sdk should work regardless of how people parse json.
171
+
172
+
### why not an http client wrapper?
173
+
174
+
zig 0.15's `std.http.Client` with `Io.Writer.Allocating` works well. wrapping it doesn't add much value. the real pain is around auth token refresh and rate limiting - those are better solved at the application level where retry logic is domain-specific.
175
+
176
+
### why not websocket/jetstream?
177
+
178
+
websocket.zig already exists and works well. the jetstream protocol is simple json messages. a thin wrapper doesn't justify a dependency.
179
+
180
+
### borrowing vs owning
181
+
182
+
for parse operations, we borrow slices into the input rather than allocating. callers who need owned data can dupe. this matches zig's explicit memory style.
183
+
184
+
## next steps
185
+
186
+
1. ~~implement tid.zig with tests~~ done
187
+
2. ~~implement at_uri.zig with tests~~ done
188
+
3. ~~implement did.zig with tests~~ done
189
+
4. ~~wire up build.zig as a module~~ done
190
+
5. try using it in find-bufo or music-atmosphere-feed to validate the api
191
+
6. iterate on internal APIs based on real usage
192
+
7. promote stable APIs to root.zig
-98
docs/plan-expanded.md
-98
docs/plan-expanded.md
···
1
-
# zat - expanded scope
2
-
3
-
the initial release delivered string primitives (Tid, Did, Handle, Nsid, Rkey, AtUri). this plan expands toward a usable AT Protocol sdk.
4
-
5
-
## motivation
6
-
7
-
real-world usage shows repeated implementations of:
8
-
- DID resolution (plc.directory lookups, did:web fetches)
9
-
- JWT parsing and signature verification
10
-
- ECDSA verification (P256, secp256k1)
11
-
- base58/base64url decoding
12
-
- XRPC calls with manual json navigation
13
-
14
-
this is shared infrastructure across any atproto app. zat can absorb it incrementally.
15
-
16
-
## next: did resolution
17
-
18
-
```zig
19
-
pub const DidResolver = struct {
20
-
/// resolve a did to its document
21
-
pub fn resolve(self: *DidResolver, did: Did) !DidDocument
22
-
23
-
/// resolve did:plc via plc.directory
24
-
fn resolvePlc(self: *DidResolver, id: []const u8) !DidDocument
25
-
26
-
/// resolve did:web via .well-known
27
-
fn resolveWeb(self: *DidResolver, domain: []const u8) !DidDocument
28
-
};
29
-
30
-
pub const DidDocument = struct {
31
-
id: Did,
32
-
also_known_as: [][]const u8, // handles
33
-
verification_methods: []VerificationMethod,
34
-
services: []Service,
35
-
36
-
pub fn pdsEndpoint(self: DidDocument) ?[]const u8
37
-
pub fn handle(self: DidDocument) ?[]const u8
38
-
};
39
-
```
40
-
41
-
## next: cid (content identifiers)
42
-
43
-
```zig
44
-
pub const Cid = struct {
45
-
raw: []const u8,
46
-
47
-
pub fn parse(s: []const u8) ?Cid
48
-
pub fn version(self: Cid) u8
49
-
pub fn codec(self: Cid) u64
50
-
pub fn hash(self: Cid) []const u8
51
-
};
52
-
```
53
-
54
-
## later: xrpc client
55
-
56
-
```zig
57
-
pub const XrpcClient = struct {
58
-
pds: []const u8,
59
-
access_token: ?[]const u8,
60
-
61
-
pub fn query(self: *XrpcClient, nsid: Nsid, params: anytype) !JsonValue
62
-
pub fn procedure(self: *XrpcClient, nsid: Nsid, input: anytype) !JsonValue
63
-
};
64
-
```
65
-
66
-
## later: jwt verification
67
-
68
-
```zig
69
-
pub const Jwt = struct {
70
-
header: JwtHeader,
71
-
payload: JwtPayload,
72
-
signature: []const u8,
73
-
74
-
pub fn parse(token: []const u8) ?Jwt
75
-
pub fn verify(self: Jwt, public_key: PublicKey) bool
76
-
};
77
-
```
78
-
79
-
## out of scope
80
-
81
-
- lexicon codegen (separate project)
82
-
- session management / token refresh (app-specific)
83
-
- jetstream client (websocket.zig + json is enough)
84
-
- application frameworks (too opinionated)
85
-
86
-
## design principles
87
-
88
-
1. **layered** - each piece usable independently (use Did without DidResolver)
89
-
2. **explicit** - no hidden allocations, pass allocators where needed
90
-
3. **borrowing** - parse returns slices into input where possible
91
-
4. **fallible** - return errors/optionals, don't panic
92
-
5. **protocol-focused** - AT Protocol primitives, not app-specific features
93
-
94
-
## open questions
95
-
96
-
- should DidResolver cache? or leave that to caller?
97
-
- should XrpcClient handle auth refresh? or just expose tokens?
98
-
- how to handle json parsing without imposing a specific json library?
-188
docs/plan-initial.md
-188
docs/plan-initial.md
···
1
-
# zat - zig atproto primitives
2
-
3
-
low-level building blocks for atproto applications in zig. not a full sdk - just the pieces that everyone reimplements.
4
-
5
-
## philosophy
6
-
7
-
from studying the wishlists: the pain is real, but the suggested solutions often over-engineer. we want:
8
-
9
-
1. **primitives, not frameworks** - types and parsers, not http clients or feed scaffolds
10
-
2. **layered design** - each piece usable independently
11
-
3. **zig idioms** - explicit buffers, comptime validation, no hidden allocations
12
-
4. **minimal scope** - solve the repeated pain, not every possible need
13
-
14
-
## scope
15
-
16
-
### in scope (v0.1)
17
-
18
-
**tid** - timestamp identifiers
19
-
- parse tid string to timestamp (microseconds)
20
-
- generate tid from timestamp
21
-
- extract clock id
22
-
- comptime validation of format
23
-
24
-
**at-uri** - `at://did:plc:xyz/collection/rkey`
25
-
- parse to components (did, collection, rkey)
26
-
- construct from components
27
-
- validation
28
-
29
-
**did** - decentralized identifiers
30
-
- parse did:plc and did:web
31
-
- validate format
32
-
- type-safe wrapper (not just `[]const u8`)
33
-
34
-
### maybe v0.2
35
-
36
-
**facets** - extract links/mentions/tags from post records
37
-
- given a json value with `text` and `facets`, extract urls
38
-
- byte-offset handling for utf-8
39
-
40
-
**cid** - content identifiers
41
-
- parse cid strings
42
-
- validate format
43
-
44
-
### out of scope (for now)
45
-
46
-
- lexicon codegen (too big, could be its own project)
47
-
- xrpc client (std.http.Client is fine)
48
-
- session management (app-specific)
49
-
- jetstream client (websocket.zig exists, just wire it)
50
-
- feed generator framework (each feed is unique)
51
-
- did resolution (requires http, out of primitive scope)
52
-
53
-
## design
54
-
55
-
### tid.zig
56
-
57
-
```zig
58
-
pub const Tid = struct {
59
-
raw: [13]u8,
60
-
61
-
/// parse a tid string. returns null if invalid.
62
-
pub fn parse(s: []const u8) ?Tid
63
-
64
-
/// timestamp in microseconds since unix epoch
65
-
pub fn timestamp(self: Tid) u64
66
-
67
-
/// clock identifier (lower 10 bits)
68
-
pub fn clockId(self: Tid) u10
69
-
70
-
/// generate tid for current time
71
-
pub fn now() Tid
72
-
73
-
/// generate tid for specific timestamp
74
-
pub fn fromTimestamp(ts: u64, clock_id: u10) Tid
75
-
76
-
/// format to string
77
-
pub fn format(self: Tid, buf: *[13]u8) void
78
-
};
79
-
```
80
-
81
-
encoding: base32-sortable (chars `234567abcdefghijklmnopqrstuvwxyz`), 13 chars, first 11 encode 53-bit timestamp, last 2 encode 10-bit clock id.
82
-
83
-
### at_uri.zig
84
-
85
-
```zig
86
-
pub const AtUri = struct {
87
-
/// the full uri string (borrowed, not owned)
88
-
raw: []const u8,
89
-
90
-
/// offsets into raw for each component
91
-
did_end: usize,
92
-
collection_end: usize,
93
-
94
-
pub fn parse(s: []const u8) ?AtUri
95
-
96
-
pub fn did(self: AtUri) []const u8
97
-
pub fn collection(self: AtUri) []const u8
98
-
pub fn rkey(self: AtUri) []const u8
99
-
100
-
/// construct a new uri. caller owns the buffer.
101
-
pub fn format(
102
-
buf: []u8,
103
-
did: []const u8,
104
-
collection: []const u8,
105
-
rkey: []const u8,
106
-
) ?[]const u8
107
-
};
108
-
```
109
-
110
-
### did.zig
111
-
112
-
```zig
113
-
pub const Did = union(enum) {
114
-
plc: [24]u8, // the identifier after "did:plc:"
115
-
web: []const u8, // the domain after "did:web:"
116
-
117
-
pub fn parse(s: []const u8) ?Did
118
-
119
-
/// format to string
120
-
pub fn format(self: Did, buf: []u8) ?[]const u8
121
-
122
-
/// check if this is a plc did
123
-
pub fn isPlc(self: Did) bool
124
-
};
125
-
```
126
-
127
-
## structure
128
-
129
-
```
130
-
zat/
131
-
โโโ build.zig
132
-
โโโ build.zig.zon
133
-
โโโ src/
134
-
โ โโโ root.zig # public API (stable exports)
135
-
โ โโโ internal.zig # internal API (experimental)
136
-
โ โโโ internal/
137
-
โ โโโ tid.zig
138
-
โ โโโ at_uri.zig
139
-
โ โโโ did.zig
140
-
โโโ docs/
141
-
โโโ plan.md
142
-
```
143
-
144
-
## internal โ public promotion
145
-
146
-
new features start in `internal` where we can iterate freely. when an API stabilizes:
147
-
148
-
```zig
149
-
// in root.zig, uncomment to promote:
150
-
pub const Tid = internal.Tid;
151
-
```
152
-
153
-
users who need bleeding-edge access can always use:
154
-
155
-
```zig
156
-
const zat = @import("zat");
157
-
const tid = zat.internal.Tid.parse("...");
158
-
```
159
-
160
-
this pattern exists indefinitely - even after 1.0, new experimental features start in internal.
161
-
162
-
## decisions
163
-
164
-
### why not typed lexicons?
165
-
166
-
codegen from lexicon json is a big project on its own. the core pain (json navigation) can be partially addressed by documenting patterns, and the sdk should work regardless of how people parse json.
167
-
168
-
### why not an http client wrapper?
169
-
170
-
zig 0.15's `std.http.Client` with `Io.Writer.Allocating` works well. wrapping it doesn't add much value. the real pain is around auth token refresh and rate limiting - those are better solved at the application level where retry logic is domain-specific.
171
-
172
-
### why not websocket/jetstream?
173
-
174
-
websocket.zig already exists and works well. the jetstream protocol is simple json messages. a thin wrapper doesn't justify a dependency.
175
-
176
-
### borrowing vs owning
177
-
178
-
for parse operations, we borrow slices into the input rather than allocating. callers who need owned data can dupe. this matches zig's explicit memory style.
179
-
180
-
## next steps
181
-
182
-
1. ~~implement tid.zig with tests~~ done
183
-
2. ~~implement at_uri.zig with tests~~ done
184
-
3. ~~implement did.zig with tests~~ done
185
-
4. ~~wire up build.zig as a module~~ done
186
-
5. try using it in find-bufo or music-atmosphere-feed to validate the api
187
-
6. iterate on internal APIs based on real usage
188
-
7. promote stable APIs to root.zig
+40
docs/roadmap.md
+40
docs/roadmap.md
···
1
+
# roadmap
2
+
3
+
zat started as a small set of string primitives for AT Protocol - the types everyone reimplements (`Tid`, `Did`, `Handle`, `Nsid`, `Rkey`, `AtUri`). the scope grew based on real usage.
4
+
5
+
## history
6
+
7
+
**initial scope** - string primitives with parsing and validation. the philosophy: primitives not frameworks, layered design, zig idioms, minimal scope.
8
+
9
+
**what grew from usage:**
10
+
- DID resolution was originally "out of scope" - real projects needed it, so `DidResolver` and `DidDocument` got added
11
+
- XRPC client and JSON helpers - same story
12
+
- JWT verification for service auth
13
+
- handle resolution via HTTP well-known
14
+
- handle resolution via DNS-over-HTTP (community contribution)
15
+
- sync types for firehose consumption (`CommitAction`, `EventKind`, `AccountStatus`)
16
+
17
+
this pattern - start minimal, expand based on real pain - continues.
18
+
19
+
## now
20
+
21
+
use zat in real projects. let usage drive what's next.
22
+
23
+
the primitives are reasonably complete. what's missing will show up when people build things. until then, no speculative features.
24
+
25
+
## maybe later
26
+
27
+
these stay out of scope unless real demand emerges:
28
+
29
+
- lexicon codegen - probably a separate project
30
+
- higher-level clients/frameworks - too opinionated
31
+
- token refresh/session management - app-specific
32
+
- feed generator scaffolding - each feed is unique
33
+
34
+
## non-goals
35
+
36
+
zat is not trying to be:
37
+
38
+
- a "one true SDK" that does everything
39
+
- an opinionated app framework
40
+
- a replacement for understanding the protocol
+17
justfile
+17
justfile
+215
scripts/build-site.mjs
+215
scripts/build-site.mjs
···
1
+
import {
2
+
readdir,
3
+
readFile,
4
+
mkdir,
5
+
rm,
6
+
cp,
7
+
writeFile,
8
+
access,
9
+
} from "node:fs/promises";
10
+
import path from "node:path";
11
+
import { execFile } from "node:child_process";
12
+
import { promisify } from "node:util";
13
+
14
+
const repoRoot = path.resolve(new URL("..", import.meta.url).pathname);
15
+
const docsDir = path.join(repoRoot, "docs");
16
+
const devlogDir = path.join(repoRoot, "devlog");
17
+
const siteSrcDir = path.join(repoRoot, "site");
18
+
const outDir = path.join(repoRoot, "site-out");
19
+
const outDocsDir = path.join(outDir, "docs");
20
+
21
+
const execFileAsync = promisify(execFile);
22
+
23
+
async function exists(filePath) {
24
+
try {
25
+
await access(filePath);
26
+
return true;
27
+
} catch {
28
+
return false;
29
+
}
30
+
}
31
+
32
+
function isMarkdown(filePath) {
33
+
return filePath.toLowerCase().endsWith(".md");
34
+
}
35
+
36
+
async function listMarkdownFiles(dir, prefix = "") {
37
+
const entries = await readdir(dir, { withFileTypes: true });
38
+
const out = [];
39
+
for (const e of entries) {
40
+
if (e.name.startsWith(".")) continue;
41
+
const rel = path.join(prefix, e.name);
42
+
const abs = path.join(dir, e.name);
43
+
if (e.isDirectory()) {
44
+
out.push(...(await listMarkdownFiles(abs, rel)));
45
+
} else if (e.isFile() && isMarkdown(e.name)) {
46
+
out.push(rel.replaceAll(path.sep, "/"));
47
+
}
48
+
}
49
+
return out.sort((a, b) => a.localeCompare(b));
50
+
}
51
+
52
+
function titleFromMarkdown(md, fallback) {
53
+
const lines = md.split(/\r?\n/);
54
+
for (const line of lines) {
55
+
const m = /^#\s+(.+)\s*$/.exec(line);
56
+
if (m) return m[1].trim();
57
+
}
58
+
return fallback.replace(/\.md$/i, "");
59
+
}
60
+
61
+
function normalizeTitle(title) {
62
+
let t = String(title || "").trim();
63
+
// Strip markdown links: [text](url) -> text
64
+
t = t.replace(/\[([^\]]+)\]\([^)]+\)/g, "$1");
65
+
// If pages follow a "zat - ..." style, drop the redundant prefix in the nav.
66
+
t = t.replace(/^zat\s*-\s*/i, "");
67
+
// Cheaply capitalize (keeps the rest as-authored).
68
+
if (t.length) t = t[0].toUpperCase() + t.slice(1);
69
+
return t;
70
+
}
71
+
72
+
async function getBuildId() {
73
+
try {
74
+
const { stdout } = await execFileAsync("git", ["rev-parse", "HEAD"], {
75
+
cwd: repoRoot,
76
+
});
77
+
const full = String(stdout || "").trim();
78
+
if (full) return full.slice(0, 12);
79
+
} catch {
80
+
// ignore
81
+
}
82
+
return String(Date.now());
83
+
}
84
+
85
+
async function main() {
86
+
await rm(outDir, { recursive: true, force: true });
87
+
await mkdir(outDir, { recursive: true });
88
+
89
+
// Copy static site shell
90
+
await cp(siteSrcDir, outDir, { recursive: true });
91
+
92
+
// Cache-bust immutable assets on Wisp by appending a per-commit query string.
93
+
const buildId = await getBuildId();
94
+
const outIndex = path.join(outDir, "index.html");
95
+
if (await exists(outIndex)) {
96
+
let html = await readFile(outIndex, "utf8");
97
+
html = html.replaceAll('href="./style.css"', `href="./style.css?v=${buildId}"`);
98
+
html = html.replaceAll(
99
+
'src="./vendor/marked.min.js"',
100
+
`src="./vendor/marked.min.js?v=${buildId}"`,
101
+
);
102
+
html = html.replaceAll(
103
+
'src="./app.js"',
104
+
`src="./app.js?v=${buildId}"`,
105
+
);
106
+
html = html.replaceAll(
107
+
'href="./favicon.svg"',
108
+
`href="./favicon.svg?v=${buildId}"`,
109
+
);
110
+
await writeFile(outIndex, html, "utf8");
111
+
}
112
+
113
+
// Copy docs
114
+
await mkdir(outDocsDir, { recursive: true });
115
+
116
+
const pages = [];
117
+
118
+
// Prefer an explicit docs homepage if present; otherwise use repo README as index.
119
+
const docsIndex = path.join(docsDir, "index.md");
120
+
if (!(await exists(docsIndex))) {
121
+
const readme = path.join(repoRoot, "README.md");
122
+
if (await exists(readme)) {
123
+
let md = await readFile(readme, "utf8");
124
+
// Strip docs/ prefix from links since we're now inside the docs context.
125
+
md = md.replace(/\]\(docs\//g, "](");
126
+
await writeFile(path.join(outDocsDir, "index.md"), md, "utf8");
127
+
pages.push({
128
+
path: "index.md",
129
+
title: normalizeTitle(titleFromMarkdown(md, "index.md")),
130
+
});
131
+
}
132
+
}
133
+
134
+
const changelog = path.join(repoRoot, "CHANGELOG.md");
135
+
const docsChangelog = path.join(docsDir, "changelog.md");
136
+
if ((await exists(changelog)) && !(await exists(docsChangelog))) {
137
+
const md = await readFile(changelog, "utf8");
138
+
await writeFile(path.join(outDocsDir, "changelog.md"), md, "utf8");
139
+
pages.push({
140
+
path: "changelog.md",
141
+
title: normalizeTitle(titleFromMarkdown(md, "changelog.md")),
142
+
});
143
+
}
144
+
145
+
const mdFiles = (await exists(docsDir)) ? await listMarkdownFiles(docsDir) : [];
146
+
147
+
// Copy all markdown under docs/ (including archives), but only include non-archive
148
+
// paths in the sidebar manifest.
149
+
for (const rel of mdFiles) {
150
+
const src = path.join(docsDir, rel);
151
+
const dst = path.join(outDocsDir, rel);
152
+
await mkdir(path.dirname(dst), { recursive: true });
153
+
await cp(src, dst);
154
+
155
+
const md = await readFile(src, "utf8");
156
+
if (!rel.startsWith("archive/")) {
157
+
pages.push({ path: rel, title: normalizeTitle(titleFromMarkdown(md, rel)) });
158
+
}
159
+
}
160
+
161
+
// Copy devlog files to docs/devlog/ and generate an index
162
+
const devlogFiles = (await exists(devlogDir)) ? await listMarkdownFiles(devlogDir) : [];
163
+
const devlogEntries = [];
164
+
165
+
for (const rel of devlogFiles) {
166
+
const src = path.join(devlogDir, rel);
167
+
const dst = path.join(outDocsDir, "devlog", rel);
168
+
await mkdir(path.dirname(dst), { recursive: true });
169
+
await cp(src, dst);
170
+
171
+
const md = await readFile(src, "utf8");
172
+
devlogEntries.push({
173
+
path: `devlog/${rel}`,
174
+
title: titleFromMarkdown(md, rel),
175
+
});
176
+
}
177
+
178
+
// Generate devlog index listing all entries (newest first by filename)
179
+
if (devlogEntries.length > 0) {
180
+
devlogEntries.sort((a, b) => b.path.localeCompare(a.path));
181
+
const indexMd = [
182
+
"# devlog",
183
+
"",
184
+
...devlogEntries.map((e) => `- [${e.title}](${e.path})`),
185
+
"",
186
+
].join("\n");
187
+
await writeFile(path.join(outDocsDir, "devlog", "index.md"), indexMd, "utf8");
188
+
}
189
+
190
+
// Stable nav order: README homepage, then roadmap, then changelog, then the rest.
191
+
pages.sort((a, b) => {
192
+
const order = (p) => {
193
+
if (p === "index.md") return 0;
194
+
if (p === "roadmap.md") return 1;
195
+
if (p === "changelog.md") return 2;
196
+
return 3;
197
+
};
198
+
const ao = order(a.path);
199
+
const bo = order(b.path);
200
+
if (ao !== bo) return ao - bo;
201
+
return a.title.localeCompare(b.title);
202
+
});
203
+
204
+
await writeFile(
205
+
path.join(outDir, "manifest.json"),
206
+
JSON.stringify({ pages }, null, 2) + "\n",
207
+
"utf8",
208
+
);
209
+
210
+
process.stdout.write(
211
+
`Built Wisp docs site: ${pages.length} markdown file(s) -> ${outDir}\n`,
212
+
);
213
+
}
214
+
215
+
await main();
-151
scripts/build-wisp-docs.mjs
-151
scripts/build-wisp-docs.mjs
···
1
-
import {
2
-
readdir,
3
-
readFile,
4
-
mkdir,
5
-
rm,
6
-
cp,
7
-
writeFile,
8
-
access,
9
-
} from "node:fs/promises";
10
-
import path from "node:path";
11
-
import { execFile } from "node:child_process";
12
-
import { promisify } from "node:util";
13
-
14
-
const repoRoot = path.resolve(new URL("..", import.meta.url).pathname);
15
-
const docsDir = path.join(repoRoot, "docs");
16
-
const siteSrcDir = path.join(repoRoot, "site");
17
-
const outDir = path.join(repoRoot, "site-out");
18
-
const outDocsDir = path.join(outDir, "docs");
19
-
20
-
const execFileAsync = promisify(execFile);
21
-
22
-
async function exists(filePath) {
23
-
try {
24
-
await access(filePath);
25
-
return true;
26
-
} catch {
27
-
return false;
28
-
}
29
-
}
30
-
31
-
function isMarkdown(filePath) {
32
-
return filePath.toLowerCase().endsWith(".md");
33
-
}
34
-
35
-
async function listMarkdownFiles(dir, prefix = "") {
36
-
const entries = await readdir(dir, { withFileTypes: true });
37
-
const out = [];
38
-
for (const e of entries) {
39
-
if (e.name.startsWith(".")) continue;
40
-
const rel = path.join(prefix, e.name);
41
-
const abs = path.join(dir, e.name);
42
-
if (e.isDirectory()) {
43
-
out.push(...(await listMarkdownFiles(abs, rel)));
44
-
} else if (e.isFile() && isMarkdown(e.name)) {
45
-
out.push(rel.replaceAll(path.sep, "/"));
46
-
}
47
-
}
48
-
return out.sort((a, b) => a.localeCompare(b));
49
-
}
50
-
51
-
function titleFromMarkdown(md, fallback) {
52
-
const lines = md.split(/\r?\n/);
53
-
for (const line of lines) {
54
-
const m = /^#\s+(.+)\s*$/.exec(line);
55
-
if (m) return m[1].trim();
56
-
}
57
-
return fallback.replace(/\.md$/i, "");
58
-
}
59
-
60
-
async function getBuildId() {
61
-
try {
62
-
const { stdout } = await execFileAsync("git", ["rev-parse", "HEAD"], {
63
-
cwd: repoRoot,
64
-
});
65
-
const full = String(stdout || "").trim();
66
-
if (full) return full.slice(0, 12);
67
-
} catch {
68
-
// ignore
69
-
}
70
-
return String(Date.now());
71
-
}
72
-
73
-
async function main() {
74
-
await rm(outDir, { recursive: true, force: true });
75
-
await mkdir(outDir, { recursive: true });
76
-
77
-
// Copy static site shell
78
-
await cp(siteSrcDir, outDir, { recursive: true });
79
-
80
-
// Cache-bust immutable assets on Wisp by appending a per-commit query string.
81
-
const buildId = await getBuildId();
82
-
const outIndex = path.join(outDir, "index.html");
83
-
if (await exists(outIndex)) {
84
-
let html = await readFile(outIndex, "utf8");
85
-
html = html.replaceAll('href="./style.css"', `href="./style.css?v=${buildId}"`);
86
-
html = html.replaceAll(
87
-
'src="./vendor/marked.min.js"',
88
-
`src="./vendor/marked.min.js?v=${buildId}"`,
89
-
);
90
-
html = html.replaceAll(
91
-
'src="./app.js"',
92
-
`src="./app.js?v=${buildId}"`,
93
-
);
94
-
html = html.replaceAll(
95
-
'href="./favicon.svg"',
96
-
`href="./favicon.svg?v=${buildId}"`,
97
-
);
98
-
await writeFile(outIndex, html, "utf8");
99
-
}
100
-
101
-
// Copy docs
102
-
await mkdir(outDocsDir, { recursive: true });
103
-
104
-
const pages = [];
105
-
106
-
// Prefer an explicit docs homepage if present; otherwise use repo README as index.
107
-
const docsIndex = path.join(docsDir, "index.md");
108
-
if (!(await exists(docsIndex))) {
109
-
const readme = path.join(repoRoot, "README.md");
110
-
if (await exists(readme)) {
111
-
const md = await readFile(readme, "utf8");
112
-
await writeFile(path.join(outDocsDir, "index.md"), md, "utf8");
113
-
pages.push({ path: "index.md", title: titleFromMarkdown(md, "index.md") });
114
-
}
115
-
}
116
-
117
-
const changelog = path.join(repoRoot, "CHANGELOG.md");
118
-
const docsChangelog = path.join(docsDir, "changelog.md");
119
-
if ((await exists(changelog)) && !(await exists(docsChangelog))) {
120
-
const md = await readFile(changelog, "utf8");
121
-
await writeFile(path.join(outDocsDir, "changelog.md"), md, "utf8");
122
-
pages.push({
123
-
path: "changelog.md",
124
-
title: titleFromMarkdown(md, "changelog.md"),
125
-
});
126
-
}
127
-
128
-
const mdFiles = (await exists(docsDir)) ? await listMarkdownFiles(docsDir) : [];
129
-
130
-
for (const rel of mdFiles) {
131
-
const src = path.join(docsDir, rel);
132
-
const dst = path.join(outDocsDir, rel);
133
-
await mkdir(path.dirname(dst), { recursive: true });
134
-
await cp(src, dst);
135
-
136
-
const md = await readFile(src, "utf8");
137
-
pages.push({ path: rel, title: titleFromMarkdown(md, rel) });
138
-
}
139
-
140
-
await writeFile(
141
-
path.join(outDir, "manifest.json"),
142
-
JSON.stringify({ pages }, null, 2) + "\n",
143
-
"utf8",
144
-
);
145
-
146
-
process.stdout.write(
147
-
`Built Wisp docs site: ${pages.length} markdown file(s) -> ${outDir}\n`,
148
-
);
149
-
}
150
-
151
-
await main();
+263
scripts/publish-docs.zig
+263
scripts/publish-docs.zig
···
1
+
const std = @import("std");
2
+
const zat = @import("zat");
3
+
4
+
const Allocator = std.mem.Allocator;
5
+
6
+
const DocEntry = struct { path: []const u8, file: []const u8 };
7
+
8
+
/// docs to publish as site.standard.document records
9
+
const docs = [_]DocEntry{
10
+
.{ .path = "/", .file = "README.md" },
11
+
.{ .path = "/roadmap", .file = "docs/roadmap.md" },
12
+
.{ .path = "/changelog", .file = "CHANGELOG.md" },
13
+
};
14
+
15
+
/// devlog entries
16
+
const devlog = [_]DocEntry{
17
+
.{ .path = "/devlog/001", .file = "devlog/001-self-publishing-docs.md" },
18
+
};
19
+
20
+
pub fn main() !void {
21
+
// use page_allocator for CLI tool - OS reclaims on exit
22
+
const allocator = std.heap.page_allocator;
23
+
24
+
const handle = "zat.dev";
25
+
26
+
const password = std.posix.getenv("ATPROTO_PASSWORD") orelse {
27
+
std.debug.print("error: ATPROTO_PASSWORD not set\n", .{});
28
+
return error.MissingEnv;
29
+
};
30
+
31
+
const pds = std.posix.getenv("ATPROTO_PDS") orelse "https://bsky.social";
32
+
33
+
var client = zat.XrpcClient.init(allocator, pds);
34
+
defer client.deinit();
35
+
36
+
const session = try createSession(&client, allocator, handle, password);
37
+
defer {
38
+
allocator.free(session.did);
39
+
allocator.free(session.access_token);
40
+
}
41
+
42
+
std.debug.print("authenticated as {s}\n", .{session.did});
43
+
client.setAuth(session.access_token);
44
+
45
+
// generate TID for publication (fixed timestamp for deterministic rkey)
46
+
// using 2024-01-01 00:00:00 UTC as base timestamp (1704067200 seconds = 1704067200000000 microseconds)
47
+
const pub_tid = zat.Tid.fromTimestamp(1704067200000000, 0);
48
+
const pub_record = Publication{
49
+
.url = "https://zat.dev",
50
+
.name = "zat",
51
+
.description = "AT Protocol building blocks for zig",
52
+
};
53
+
54
+
try putRecord(&client, allocator, session.did, "site.standard.publication", pub_tid.str(), pub_record);
55
+
std.debug.print("created publication: at://{s}/site.standard.publication/{s}\n", .{ session.did, pub_tid.str() });
56
+
57
+
var pub_uri_buf: std.ArrayList(u8) = .empty;
58
+
defer pub_uri_buf.deinit(allocator);
59
+
try pub_uri_buf.print(allocator, "at://{s}/site.standard.publication/{s}", .{ session.did, pub_tid.str() });
60
+
const pub_uri = pub_uri_buf.items;
61
+
62
+
// publish each doc with deterministic TIDs (same base timestamp, incrementing clock_id)
63
+
const now = timestamp();
64
+
65
+
for (docs, 0..) |doc, i| {
66
+
const content = std.fs.cwd().readFileAlloc(allocator, doc.file, 1024 * 1024) catch |err| {
67
+
std.debug.print("warning: could not read {s}: {}\n", .{ doc.file, err });
68
+
continue;
69
+
};
70
+
defer allocator.free(content);
71
+
72
+
const title = extractTitle(content) orelse doc.file;
73
+
const tid = zat.Tid.fromTimestamp(1704067200000000, @intCast(i + 1)); // clock_id 1, 2, 3...
74
+
75
+
const doc_record = Document{
76
+
.site = pub_uri,
77
+
.title = title,
78
+
.path = doc.path,
79
+
.textContent = content,
80
+
.publishedAt = &now,
81
+
};
82
+
83
+
try putRecord(&client, allocator, session.did, "site.standard.document", tid.str(), doc_record);
84
+
std.debug.print("published: {s} -> at://{s}/site.standard.document/{s}\n", .{ doc.file, session.did, tid.str() });
85
+
}
86
+
87
+
// devlog publication (clock_id 100 to separate from docs)
88
+
const devlog_tid = zat.Tid.fromTimestamp(1704067200000000, 100);
89
+
const devlog_pub = Publication{
90
+
.url = "https://zat.dev",
91
+
.name = "zat devlog",
92
+
.description = "building zat in public",
93
+
};
94
+
95
+
try putRecord(&client, allocator, session.did, "site.standard.publication", devlog_tid.str(), devlog_pub);
96
+
std.debug.print("created publication: at://{s}/site.standard.publication/{s}\n", .{ session.did, devlog_tid.str() });
97
+
98
+
var devlog_uri_buf: std.ArrayList(u8) = .empty;
99
+
defer devlog_uri_buf.deinit(allocator);
100
+
try devlog_uri_buf.print(allocator, "at://{s}/site.standard.publication/{s}", .{ session.did, devlog_tid.str() });
101
+
const devlog_uri = devlog_uri_buf.items;
102
+
103
+
// publish devlog entries (clock_id 101, 102, ...)
104
+
for (devlog, 0..) |entry, i| {
105
+
const content = std.fs.cwd().readFileAlloc(allocator, entry.file, 1024 * 1024) catch |err| {
106
+
std.debug.print("warning: could not read {s}: {}\n", .{ entry.file, err });
107
+
continue;
108
+
};
109
+
defer allocator.free(content);
110
+
111
+
const title = extractTitle(content) orelse entry.file;
112
+
const tid = zat.Tid.fromTimestamp(1704067200000000, @intCast(101 + i));
113
+
114
+
const doc_record = Document{
115
+
.site = devlog_uri,
116
+
.title = title,
117
+
.path = entry.path,
118
+
.textContent = content,
119
+
.publishedAt = &now,
120
+
};
121
+
122
+
try putRecord(&client, allocator, session.did, "site.standard.document", tid.str(), doc_record);
123
+
std.debug.print("published: {s} -> at://{s}/site.standard.document/{s}\n", .{ entry.file, session.did, tid.str() });
124
+
}
125
+
126
+
std.debug.print("done\n", .{});
127
+
}
128
+
129
+
const Publication = struct {
130
+
@"$type": []const u8 = "site.standard.publication",
131
+
url: []const u8,
132
+
name: []const u8,
133
+
description: ?[]const u8 = null,
134
+
};
135
+
136
+
const Document = struct {
137
+
@"$type": []const u8 = "site.standard.document",
138
+
site: []const u8,
139
+
title: []const u8,
140
+
path: ?[]const u8 = null,
141
+
textContent: ?[]const u8 = null,
142
+
publishedAt: []const u8,
143
+
};
144
+
145
+
const Session = struct {
146
+
did: []const u8,
147
+
access_token: []const u8,
148
+
};
149
+
150
+
fn createSession(client: *zat.XrpcClient, allocator: Allocator, handle: []const u8, password: []const u8) !Session {
151
+
const CreateSessionInput = struct {
152
+
identifier: []const u8,
153
+
password: []const u8,
154
+
};
155
+
156
+
var buf: std.ArrayList(u8) = .empty;
157
+
defer buf.deinit(allocator);
158
+
try buf.print(allocator, "{f}", .{std.json.fmt(CreateSessionInput{
159
+
.identifier = handle,
160
+
.password = password,
161
+
}, .{})});
162
+
163
+
const nsid = zat.Nsid.parse("com.atproto.server.createSession").?;
164
+
var response = try client.procedure(nsid, buf.items);
165
+
defer response.deinit();
166
+
167
+
if (!response.ok()) {
168
+
std.debug.print("createSession failed: {s}\n", .{response.body});
169
+
return error.AuthFailed;
170
+
}
171
+
172
+
var parsed = try response.json();
173
+
defer parsed.deinit();
174
+
175
+
const did = zat.json.getString(parsed.value, "did") orelse return error.MissingDid;
176
+
const token = zat.json.getString(parsed.value, "accessJwt") orelse return error.MissingToken;
177
+
178
+
return .{
179
+
.did = try allocator.dupe(u8, did),
180
+
.access_token = try allocator.dupe(u8, token),
181
+
};
182
+
}
183
+
184
+
fn putRecord(client: *zat.XrpcClient, allocator: Allocator, repo: []const u8, collection: []const u8, rkey: []const u8, record: anytype) !void {
185
+
// serialize record to json
186
+
var record_buf: std.ArrayList(u8) = .empty;
187
+
defer record_buf.deinit(allocator);
188
+
try record_buf.print(allocator, "{f}", .{std.json.fmt(record, .{})});
189
+
190
+
// build request body
191
+
var body: std.ArrayList(u8) = .empty;
192
+
defer body.deinit(allocator);
193
+
194
+
try body.appendSlice(allocator, "{\"repo\":\"");
195
+
try body.appendSlice(allocator, repo);
196
+
try body.appendSlice(allocator, "\",\"collection\":\"");
197
+
try body.appendSlice(allocator, collection);
198
+
try body.appendSlice(allocator, "\",\"rkey\":\"");
199
+
try body.appendSlice(allocator, rkey);
200
+
try body.appendSlice(allocator, "\",\"record\":");
201
+
try body.appendSlice(allocator, record_buf.items);
202
+
try body.append(allocator, '}');
203
+
204
+
const nsid = zat.Nsid.parse("com.atproto.repo.putRecord").?;
205
+
var response = try client.procedure(nsid, body.items);
206
+
defer response.deinit();
207
+
208
+
if (!response.ok()) {
209
+
std.debug.print("putRecord failed: {s}\n", .{response.body});
210
+
return error.PutFailed;
211
+
}
212
+
}
213
+
214
+
fn extractTitle(content: []const u8) ?[]const u8 {
215
+
var lines = std.mem.splitScalar(u8, content, '\n');
216
+
while (lines.next()) |line| {
217
+
const trimmed = std.mem.trim(u8, line, " \t\r");
218
+
if (trimmed.len > 2 and trimmed[0] == '#' and trimmed[1] == ' ') {
219
+
var title = trimmed[2..];
220
+
// strip markdown link: [text](url) -> text
221
+
if (std.mem.indexOf(u8, title, "](")) |bracket| {
222
+
if (title[0] == '[') {
223
+
title = title[1..bracket];
224
+
}
225
+
}
226
+
return title;
227
+
}
228
+
}
229
+
return null;
230
+
}
231
+
232
+
fn timestamp() [20]u8 {
233
+
const epoch_seconds = std.time.timestamp();
234
+
const days: i32 = @intCast(@divFloor(epoch_seconds, std.time.s_per_day));
235
+
const day_secs: u32 = @intCast(@mod(epoch_seconds, std.time.s_per_day));
236
+
237
+
// calculate year/month/day from days since epoch (1970-01-01)
238
+
var y: i32 = 1970;
239
+
var remaining = days;
240
+
while (true) {
241
+
const year_days: i32 = if (@mod(y, 4) == 0 and (@mod(y, 100) != 0 or @mod(y, 400) == 0)) 366 else 365;
242
+
if (remaining < year_days) break;
243
+
remaining -= year_days;
244
+
y += 1;
245
+
}
246
+
247
+
const is_leap = @mod(y, 4) == 0 and (@mod(y, 100) != 0 or @mod(y, 400) == 0);
248
+
const month_days = [12]u8{ 31, if (is_leap) 29 else 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
249
+
var m: usize = 0;
250
+
while (m < 12 and remaining >= month_days[m]) : (m += 1) {
251
+
remaining -= month_days[m];
252
+
}
253
+
254
+
const hours = day_secs / 3600;
255
+
const mins = (day_secs % 3600) / 60;
256
+
const secs = day_secs % 60;
257
+
258
+
var buf: [20]u8 = undefined;
259
+
_ = std.fmt.bufPrint(&buf, "{d:0>4}-{d:0>2}-{d:0>2}T{d:0>2}:{d:0>2}:{d:0>2}Z", .{
260
+
@as(u32, @intCast(y)), @as(u32, @intCast(m + 1)), @as(u32, @intCast(remaining + 1)), hours, mins, secs,
261
+
}) catch unreachable;
262
+
return buf;
263
+
}
+20
-1
site/app.js
+20
-1
site/app.js
···
1
1
const navEl = document.getElementById("nav");
2
2
const contentEl = document.getElementById("content");
3
+
const menuToggle = document.querySelector(".menu-toggle");
4
+
const sidebar = document.querySelector(".sidebar");
5
+
const overlay = document.querySelector(".overlay");
6
+
7
+
function toggleMenu(open) {
8
+
const isOpen = open ?? !sidebar.classList.contains("open");
9
+
sidebar.classList.toggle("open", isOpen);
10
+
overlay?.classList.toggle("open", isOpen);
11
+
menuToggle?.setAttribute("aria-expanded", isOpen);
12
+
document.body.style.overflow = isOpen ? "hidden" : "";
13
+
}
14
+
15
+
menuToggle?.addEventListener("click", () => toggleMenu());
16
+
overlay?.addEventListener("click", () => toggleMenu(false));
17
+
18
+
// Close menu when nav link clicked (mobile)
19
+
navEl?.addEventListener("click", (e) => {
20
+
if (e.target.closest("a")) toggleMenu(false);
21
+
});
3
22
4
23
const buildId = new URL(import.meta.url).searchParams.get("v") || "";
5
24
···
120
139
}
121
140
122
141
try {
123
-
const md = await fetchText(`./docs/${encodeURIComponent(activePath)}`);
142
+
const md = await fetchText(`./docs/${activePath}`);
124
143
const html = globalThis.marked.parse(md);
125
144
contentEl.innerHTML = html;
126
145
+8
-8
site/index.html
+8
-8
site/index.html
···
11
11
<body>
12
12
<div class="app">
13
13
<header class="header">
14
+
<button class="menu-toggle" aria-label="Toggle navigation" aria-expanded="false">
15
+
<span></span>
16
+
</button>
14
17
<a class="brand" href="./">zat.dev</a>
15
-
<a
16
-
class="header-link"
17
-
href="https://tangled.org/zzstoatzz.io/zat"
18
-
target="_blank"
19
-
rel="noopener noreferrer"
20
-
>
21
-
repo
22
-
</a>
18
+
<div class="header-links">
19
+
<a class="header-link" href="#devlog/index.md">devlog</a>
20
+
<a class="header-link" href="https://tangled.sh/zat.dev/zat" target="_blank" rel="noopener noreferrer">repo</a>
21
+
</div>
23
22
</header>
24
23
24
+
<div class="overlay"></div>
25
25
<div class="layout">
26
26
<nav class="sidebar">
27
27
<div id="nav" class="nav"></div>
+202
-60
site/style.css
+202
-60
site/style.css
···
10
10
--shadow: rgba(0, 0, 0, 0.35);
11
11
--max: 900px;
12
12
--radius: 12px;
13
+
--gutter: 16px;
13
14
--mono: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono",
14
15
"Courier New", monospace;
15
16
--sans: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Helvetica,
···
27
28
--codebg: rgba(0, 0, 0, 0.04);
28
29
--shadow: rgba(0, 0, 0, 0.08);
29
30
}
31
+
}
32
+
33
+
* {
34
+
box-sizing: border-box;
30
35
}
31
36
32
37
html,
···
49
54
text-decoration: underline;
50
55
}
51
56
57
+
/* App shell */
52
58
.app {
53
59
min-height: 100%;
54
60
display: flex;
55
61
flex-direction: column;
56
62
}
57
63
64
+
/* Header */
58
65
.header {
59
66
position: sticky;
60
67
top: 0;
61
-
z-index: 5;
68
+
z-index: 20;
62
69
display: flex;
63
-
gap: 12px;
64
70
align-items: center;
65
-
padding: 12px 16px;
71
+
gap: 12px;
72
+
padding: 12px var(--gutter);
66
73
border-bottom: 1px solid var(--border);
67
74
background: color-mix(in srgb, var(--panel) 92%, transparent);
68
75
backdrop-filter: blur(10px);
69
76
}
70
77
78
+
.menu-toggle {
79
+
display: none;
80
+
align-items: center;
81
+
justify-content: center;
82
+
width: 36px;
83
+
height: 36px;
84
+
padding: 0;
85
+
background: transparent;
86
+
border: 1px solid var(--border);
87
+
border-radius: 8px;
88
+
cursor: pointer;
89
+
flex-shrink: 0;
90
+
}
91
+
.menu-toggle span {
92
+
display: block;
93
+
width: 16px;
94
+
height: 2px;
95
+
background: var(--text);
96
+
border-radius: 1px;
97
+
position: relative;
98
+
}
99
+
.menu-toggle span::before,
100
+
.menu-toggle span::after {
101
+
content: "";
102
+
position: absolute;
103
+
left: 0;
104
+
width: 16px;
105
+
height: 2px;
106
+
background: var(--text);
107
+
border-radius: 1px;
108
+
transition: transform 0.2s;
109
+
}
110
+
.menu-toggle span::before {
111
+
top: -5px;
112
+
}
113
+
.menu-toggle span::after {
114
+
top: 5px;
115
+
}
116
+
.menu-toggle[aria-expanded="true"] span {
117
+
background: transparent;
118
+
}
119
+
.menu-toggle[aria-expanded="true"] span::before {
120
+
transform: translateY(5px) rotate(45deg);
121
+
}
122
+
.menu-toggle[aria-expanded="true"] span::after {
123
+
transform: translateY(-5px) rotate(-45deg);
124
+
}
125
+
71
126
.brand {
72
127
font-weight: 700;
73
-
letter-spacing: 0.2px;
74
-
padding: 6px 10px;
75
-
border-radius: 10px;
128
+
font-size: 15px;
129
+
color: var(--text);
130
+
padding: 6px 0;
76
131
}
77
132
.brand:hover {
78
-
background: color-mix(in srgb, var(--codebg) 70%, transparent);
79
133
text-decoration: none;
134
+
opacity: 0.8;
135
+
}
136
+
137
+
.header-links {
138
+
display: flex;
139
+
gap: 8px;
140
+
margin-left: auto;
80
141
}
81
142
82
143
.header-link {
83
-
margin-left: auto;
84
-
padding: 8px 10px;
85
-
border-radius: 10px;
144
+
padding: 6px 12px;
145
+
font-size: 14px;
146
+
border-radius: 8px;
86
147
border: 1px solid var(--border);
87
148
color: var(--text);
88
-
opacity: 0.9;
89
149
}
90
150
.header-link:hover {
91
-
background: color-mix(in srgb, var(--codebg) 70%, transparent);
151
+
background: var(--codebg);
92
152
text-decoration: none;
93
-
opacity: 1;
94
153
}
95
154
155
+
/* Overlay */
156
+
.overlay {
157
+
display: none;
158
+
position: fixed;
159
+
inset: 0;
160
+
z-index: 15;
161
+
background: rgba(0, 0, 0, 0.5);
162
+
}
163
+
.overlay.open {
164
+
display: block;
165
+
}
166
+
167
+
/* Layout */
96
168
.layout {
97
-
display: grid;
98
-
grid-template-columns: 280px 1fr;
99
-
gap: 16px;
100
-
padding: 16px;
169
+
display: flex;
170
+
gap: var(--gutter);
171
+
padding: var(--gutter);
101
172
flex: 1;
102
-
}
103
-
104
-
@media (max-width: 980px) {
105
-
.layout {
106
-
grid-template-columns: 1fr;
107
-
}
108
-
.sidebar {
109
-
position: relative;
110
-
top: auto;
111
-
max-height: none;
112
-
}
173
+
max-width: 1200px;
174
+
margin: 0 auto;
175
+
width: 100%;
113
176
}
114
177
178
+
/* Sidebar */
115
179
.sidebar {
180
+
width: 240px;
181
+
flex-shrink: 0;
116
182
position: sticky;
117
-
top: 64px;
118
-
align-self: start;
119
-
max-height: calc(100vh - 84px);
120
-
overflow: auto;
183
+
top: 72px;
184
+
align-self: flex-start;
185
+
max-height: calc(100vh - 88px);
186
+
overflow-y: auto;
121
187
border: 1px solid var(--border);
122
188
border-radius: var(--radius);
123
189
background: var(--panel);
124
-
box-shadow: 0 12px 40px var(--shadow);
125
190
}
126
191
127
192
.nav {
···
133
198
134
199
.nav a {
135
200
display: block;
136
-
padding: 8px 10px;
137
-
border-radius: 10px;
201
+
padding: 10px 12px;
202
+
border-radius: 8px;
138
203
color: var(--text);
139
-
opacity: 0.9;
204
+
font-size: 14px;
140
205
}
141
206
.nav a:hover {
142
-
background: color-mix(in srgb, var(--codebg) 70%, transparent);
207
+
background: var(--codebg);
143
208
text-decoration: none;
144
209
}
145
210
.nav a[aria-current="page"] {
146
-
background: color-mix(in srgb, var(--link) 14%, var(--codebg));
147
-
border: 1px solid color-mix(in srgb, var(--link) 20%, var(--border));
211
+
background: color-mix(in srgb, var(--link) 15%, transparent);
148
212
}
149
213
214
+
/* Main content */
150
215
.main {
151
-
display: flex;
152
-
justify-content: center;
216
+
flex: 1;
217
+
min-width: 0;
153
218
}
154
219
155
220
.content {
156
-
width: min(var(--max), 100%);
157
221
border: 1px solid var(--border);
158
222
border-radius: var(--radius);
159
223
background: var(--panel);
160
-
box-shadow: 0 12px 40px var(--shadow);
161
224
padding: 24px;
162
225
}
163
226
227
+
/* Footer */
164
228
.site-footer {
165
-
display: flex;
166
-
justify-content: center;
167
-
padding: 12px 16px;
229
+
padding: 16px var(--gutter);
230
+
text-align: center;
168
231
border-top: 1px solid var(--border);
169
-
background: var(--panel);
170
232
}
171
233
172
234
.footer-link {
173
235
font-size: 13px;
174
236
color: var(--muted);
175
-
padding: 6px 10px;
176
-
border-radius: 10px;
177
-
border: 1px solid transparent;
178
237
}
179
238
.footer-link:hover {
180
239
color: var(--text);
181
-
background: color-mix(in srgb, var(--codebg) 70%, transparent);
182
-
border-color: var(--border);
183
240
text-decoration: none;
184
241
}
185
242
243
+
/* Content typography */
186
244
.content h1,
187
245
.content h2,
188
246
.content h3 {
189
-
scroll-margin-top: 84px;
247
+
scroll-margin-top: 80px;
190
248
}
191
249
192
250
.content h1 {
193
251
margin-top: 0;
194
-
font-size: 34px;
252
+
font-size: 28px;
253
+
}
254
+
255
+
.content h2 {
256
+
font-size: 20px;
257
+
margin-top: 32px;
258
+
}
259
+
260
+
.content h3 {
261
+
font-size: 16px;
262
+
margin-top: 24px;
195
263
}
196
264
197
265
.content p,
198
266
.content li {
199
-
line-height: 1.6;
267
+
line-height: 1.65;
200
268
}
201
269
202
270
.content code {
203
271
font-family: var(--mono);
204
-
font-size: 0.95em;
272
+
font-size: 0.9em;
205
273
background: var(--codebg);
206
274
padding: 2px 6px;
207
-
border-radius: 8px;
275
+
border-radius: 6px;
208
276
}
209
277
210
278
.content pre {
211
-
overflow: auto;
212
-
padding: 14px 16px;
213
-
border-radius: 12px;
279
+
overflow-x: auto;
280
+
padding: 16px;
281
+
border-radius: 10px;
214
282
background: var(--codebg);
215
283
border: 1px solid var(--border);
284
+
font-size: 14px;
285
+
line-height: 1.5;
216
286
}
217
287
218
288
.content pre code {
···
220
290
padding: 0;
221
291
}
222
292
293
+
.content details {
294
+
margin: 16px 0;
295
+
}
296
+
297
+
.content details summary {
298
+
cursor: pointer;
299
+
padding: 8px 0;
300
+
}
301
+
223
302
.empty {
224
303
color: var(--muted);
225
304
}
305
+
306
+
/* Mobile */
307
+
@media (max-width: 768px) {
308
+
:root {
309
+
--gutter: 16px;
310
+
}
311
+
312
+
.menu-toggle {
313
+
display: flex;
314
+
}
315
+
316
+
.layout {
317
+
flex-direction: column;
318
+
}
319
+
320
+
.sidebar {
321
+
position: fixed;
322
+
top: 0;
323
+
left: 0;
324
+
bottom: 0;
325
+
width: 280px;
326
+
max-width: 80vw;
327
+
z-index: 16;
328
+
border: none;
329
+
border-radius: 0;
330
+
border-right: 1px solid var(--border);
331
+
max-height: none;
332
+
padding-top: 60px;
333
+
transform: translateX(-100%);
334
+
transition: transform 0.2s ease-out;
335
+
}
336
+
337
+
.sidebar.open {
338
+
transform: translateX(0);
339
+
}
340
+
341
+
.nav {
342
+
padding: 12px;
343
+
}
344
+
345
+
.nav a {
346
+
padding: 12px 14px;
347
+
font-size: 15px;
348
+
}
349
+
350
+
.content {
351
+
padding: 20px;
352
+
border-radius: 10px;
353
+
}
354
+
355
+
.content h1 {
356
+
font-size: 24px;
357
+
}
358
+
359
+
.content h2 {
360
+
font-size: 18px;
361
+
}
362
+
363
+
.content pre {
364
+
font-size: 13px;
365
+
padding: 14px;
366
+
}
367
+
}
+141
-3
src/internal/handle_resolver.zig
+141
-3
src/internal/handle_resolver.zig
···
15
15
pub const HandleResolver = struct {
16
16
allocator: std.mem.Allocator,
17
17
http_client: std.http.Client,
18
+
doh_endpoint: []const u8,
18
19
19
20
pub fn init(allocator: std.mem.Allocator) HandleResolver {
20
21
return .{
21
22
.allocator = allocator,
22
23
.http_client = .{ .allocator = allocator },
24
+
.doh_endpoint = "https://cloudflare-dns.com/dns-query",
23
25
};
24
26
}
25
27
···
29
31
30
32
/// resolve a handle to a DID via HTTP well-known
31
33
pub fn resolve(self: *HandleResolver, handle: Handle) ![]const u8 {
32
-
return try self.resolveHttp(handle);
34
+
if (self.resolveHttp(handle)) |did| {
35
+
return did;
36
+
} else |_| {
37
+
return try self.resolveDns(handle);
38
+
}
33
39
}
34
40
35
41
/// resolve via HTTP at https://{handle}/.well-known/atproto-did
···
63
69
64
70
return try self.allocator.dupe(u8, did_str);
65
71
}
72
+
73
+
/// resolve via DoH default: https://cloudflare-dns.com/dns-query
74
+
pub fn resolveDns(self: *HandleResolver, handle: Handle) ![]const u8 {
75
+
const dns_name = try std.fmt.allocPrint(
76
+
self.allocator,
77
+
"_atproto.{s}",
78
+
.{handle.str()},
79
+
);
80
+
defer self.allocator.free(dns_name);
81
+
82
+
const url = try std.fmt.allocPrint(
83
+
self.allocator,
84
+
"{s}?name={s}&type=TXT",
85
+
.{ self.doh_endpoint, dns_name },
86
+
);
87
+
defer self.allocator.free(url);
88
+
89
+
var aw: std.io.Writer.Allocating = .init(self.allocator);
90
+
defer aw.deinit();
91
+
92
+
const result = self.http_client.fetch(.{
93
+
.location = .{ .url = url },
94
+
.extra_headers = &.{
95
+
.{ .name = "accept", .value = "application/dns-json" },
96
+
},
97
+
.response_writer = &aw.writer,
98
+
}) catch return error.DnsResolutionFailed;
99
+
100
+
if (result.status != .ok) {
101
+
return error.DnsResolutionFailed;
102
+
}
103
+
104
+
const response_body = aw.toArrayList().items;
105
+
const parsed = std.json.parseFromSlice(
106
+
DnsResponse,
107
+
self.allocator,
108
+
response_body,
109
+
.{},
110
+
) catch return error.InvalidDnsResponse;
111
+
defer parsed.deinit();
112
+
113
+
const dns_response = parsed.value;
114
+
if (dns_response.Answer == null or dns_response.Answer.?.len == 0) {
115
+
return error.NoDnsRecordsFound;
116
+
}
117
+
118
+
for (dns_response.Answer.?) |answer| {
119
+
const data = answer.data orelse continue;
120
+
const did_str = extractDidFromTxt(data) orelse continue;
121
+
122
+
if (Did.parse(did_str) != null) {
123
+
return try self.allocator.dupe(u8, did_str);
124
+
}
125
+
}
126
+
127
+
return error.NoValidDidFound;
128
+
}
129
+
};
130
+
131
+
fn extractDidFromTxt(txt_data: []const u8) ?[]const u8 {
132
+
var data = txt_data;
133
+
if (data.len >= 2 and data[0] == '"' and data[data.len - 1] == '"') {
134
+
data = data[1 .. data.len - 1];
135
+
}
136
+
137
+
const prefix = "did=";
138
+
if (std.mem.startsWith(u8, data, prefix)) {
139
+
return data[prefix.len..];
140
+
}
141
+
142
+
return null;
143
+
}
144
+
145
+
const DnsResponse = struct {
146
+
Status: i32,
147
+
TC: bool,
148
+
RD: bool,
149
+
RA: bool,
150
+
AD: bool,
151
+
CD: bool,
152
+
Question: ?[]Question = null,
153
+
Answer: ?[]Answer = null,
154
+
};
155
+
156
+
const Question = struct {
157
+
name: []const u8,
158
+
type: i32,
159
+
};
160
+
161
+
const Answer = struct {
162
+
name: []const u8,
163
+
type: i32,
164
+
TTL: i32,
165
+
data: ?[]const u8 = null,
66
166
};
67
167
68
168
// === integration tests ===
69
169
// these actually hit the network - run with: zig test src/internal/handle_resolver.zig
70
170
71
-
test "resolve handle - integration" {
171
+
test "resolve handle (http) - integration" {
72
172
// use arena for http client internals that may leak
73
173
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
74
174
defer arena.deinit();
···
78
178
79
179
// resolve a known handle that has .well-known/atproto-did
80
180
const handle = Handle.parse("jay.bsky.social") orelse return error.InvalidHandle;
81
-
const did = resolver.resolve(handle) catch |err| {
181
+
const did = resolver.resolveHttp(handle) catch |err| {
82
182
// network errors are ok in CI without network access
83
183
std.debug.print("network error (expected in some CI): {}\n", .{err});
84
184
return;
···
88
188
try std.testing.expect(Did.parse(did) != null);
89
189
try std.testing.expect(std.mem.startsWith(u8, did, "did:plc:"));
90
190
}
191
+
192
+
test "resolve handle (dns over http) - integration" {
193
+
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
194
+
defer arena.deinit();
195
+
196
+
var resolver = HandleResolver.init(arena.allocator());
197
+
defer resolver.deinit();
198
+
199
+
const handle = Handle.parse("seiso.moe") orelse return error.InvalidHandle;
200
+
const did = resolver.resolveDns(handle) catch |err| {
201
+
// network errors are ok in CI without network access
202
+
std.debug.print("network error (expected in some CI): {}\n", .{err});
203
+
return;
204
+
};
205
+
206
+
// should be a valid DID
207
+
try std.testing.expect(Did.parse(did) != null);
208
+
try std.testing.expect(std.mem.startsWith(u8, did, "did:"));
209
+
}
210
+
211
+
test "resolve handle - integration" {
212
+
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
213
+
defer arena.deinit();
214
+
215
+
var resolver = HandleResolver.init(arena.allocator());
216
+
defer resolver.deinit();
217
+
218
+
const handle = Handle.parse("jay.bsky.social") orelse return error.InvalidHandle;
219
+
const did = resolver.resolve(handle) catch |err| {
220
+
// network errors are ok in CI without network access
221
+
std.debug.print("network error (expected in some CI): {}\n", .{err});
222
+
return;
223
+
};
224
+
225
+
// should be a valid DID
226
+
try std.testing.expect(Did.parse(did) != null);
227
+
try std.testing.expect(std.mem.startsWith(u8, did, "did:"));
228
+
}
+107
-3
src/internal/json.zig
+107
-3
src/internal/json.zig
···
6
6
//! two approaches:
7
7
//! - runtime paths: getString(value, "embed.external.uri") - for dynamic paths
8
8
//! - comptime paths: extractAt(T, alloc, value, .{"embed", "external"}) - for static paths with type safety
9
+
//!
10
+
//! debug logging:
11
+
//! enable with `pub const std_options = .{ .log_scope_levels = &.{.{ .scope = .zat, .level = .debug }} };`
9
12
10
13
const std = @import("std");
14
+
const log = std.log.scoped(.zat);
11
15
12
16
/// navigate a json value by dot-separated path
13
17
/// returns null if any segment is missing or wrong type
···
92
96
/// extract a typed struct from a nested path
93
97
/// uses comptime tuple for path segments - no runtime string parsing
94
98
/// leverages std.json.parseFromValueLeaky for type-safe extraction
99
+
///
100
+
/// on failure, logs diagnostic info when debug logging is enabled for .zat scope
95
101
pub fn extractAt(
96
102
comptime T: type,
97
103
allocator: std.mem.Allocator,
···
101
107
var current = value;
102
108
inline for (path) |segment| {
103
109
current = switch (current) {
104
-
.object => |obj| obj.get(segment) orelse return error.MissingField,
105
-
else => return error.UnexpectedToken,
110
+
.object => |obj| obj.get(segment) orelse {
111
+
log.debug("extractAt: missing field \"{s}\" in path {any}, expected {s}", .{
112
+
segment,
113
+
path,
114
+
@typeName(T),
115
+
});
116
+
return error.MissingField;
117
+
},
118
+
else => {
119
+
log.debug("extractAt: expected object at \"{s}\" in path {any}, got {s}", .{
120
+
segment,
121
+
path,
122
+
@tagName(current),
123
+
});
124
+
return error.UnexpectedToken;
125
+
},
106
126
};
107
127
}
108
-
return std.json.parseFromValueLeaky(T, allocator, current, .{});
128
+
return std.json.parseFromValueLeaky(T, allocator, current, .{ .ignore_unknown_fields = true }) catch |err| {
129
+
log.debug("extractAt: parse failed for {s} at path {any}: {s} (json type: {s})", .{
130
+
@typeName(T),
131
+
path,
132
+
@errorName(err),
133
+
@tagName(current),
134
+
});
135
+
return err;
136
+
};
109
137
}
110
138
111
139
/// extract a typed value, returning null if path doesn't exist
···
278
306
const missing = extractAtOptional(Thing, arena.allocator(), parsed.value, .{"missing"});
279
307
try std.testing.expect(missing == null);
280
308
}
309
+
310
+
test "extractAt logs diagnostic on enum parse failure" {
311
+
// simulates the issue: unknown enum value from external API
312
+
const json_str =
313
+
\\{"op": {"action": "archive", "path": "app.bsky.feed.post/abc"}}
314
+
;
315
+
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
316
+
defer arena.deinit();
317
+
318
+
const parsed = try std.json.parseFromSlice(std.json.Value, arena.allocator(), json_str, .{});
319
+
320
+
const Action = enum { create, update, delete };
321
+
const Op = struct {
322
+
action: Action,
323
+
path: []const u8,
324
+
};
325
+
326
+
// "archive" is not a valid Action variant - this should fail
327
+
// with debug logging enabled, you'd see:
328
+
// debug(zat): extractAt: parse failed for json.Op at path { "op" }: InvalidEnumTag (json type: object)
329
+
const result = extractAtOptional(Op, arena.allocator(), parsed.value, .{"op"});
330
+
try std.testing.expect(result == null);
331
+
}
332
+
333
+
test "extractAt logs diagnostic on missing field" {
334
+
const json_str =
335
+
\\{"data": {"name": "test"}}
336
+
;
337
+
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
338
+
defer arena.deinit();
339
+
340
+
const parsed = try std.json.parseFromSlice(std.json.Value, arena.allocator(), json_str, .{});
341
+
342
+
const Thing = struct { value: i64 };
343
+
344
+
// path "data.missing" doesn't exist
345
+
// with debug logging enabled, you'd see:
346
+
// debug(zat): extractAt: missing field "missing" in path { "data", "missing" }, expected json.Thing
347
+
const result = extractAtOptional(Thing, arena.allocator(), parsed.value, .{ "data", "missing" });
348
+
try std.testing.expect(result == null);
349
+
}
350
+
351
+
test "extractAt ignores unknown fields" {
352
+
// real-world case: TAP messages have extra fields (live, rev, cid) that we don't need
353
+
const json_str =
354
+
\\{
355
+
\\ "record": {
356
+
\\ "live": true,
357
+
\\ "did": "did:plc:abc123",
358
+
\\ "rev": "3mbspmpaidl2a",
359
+
\\ "collection": "pub.leaflet.document",
360
+
\\ "rkey": "xyz789",
361
+
\\ "action": "create",
362
+
\\ "cid": "bafyreitest"
363
+
\\ }
364
+
\\}
365
+
;
366
+
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
367
+
defer arena.deinit();
368
+
369
+
const parsed = try std.json.parseFromSlice(std.json.Value, arena.allocator(), json_str, .{});
370
+
371
+
// only extract the fields we care about
372
+
const Record = struct {
373
+
collection: []const u8,
374
+
action: []const u8,
375
+
did: []const u8,
376
+
rkey: []const u8,
377
+
};
378
+
379
+
const rec = try extractAt(Record, arena.allocator(), parsed.value, .{"record"});
380
+
try std.testing.expectEqualStrings("pub.leaflet.document", rec.collection);
381
+
try std.testing.expectEqualStrings("create", rec.action);
382
+
try std.testing.expectEqualStrings("did:plc:abc123", rec.did);
383
+
try std.testing.expectEqualStrings("xyz789", rec.rkey);
384
+
}
+5
-1
src/internal/xrpc.zig
+5
-1
src/internal/xrpc.zig
···
18
18
/// bearer token for authenticated requests
19
19
access_token: ?[]const u8 = null,
20
20
21
+
/// atproto JWTs are ~1KB; buffer needs room for "Bearer " prefix
22
+
const max_auth_header_len = 2048;
23
+
21
24
pub fn init(allocator: std.mem.Allocator, host: []const u8) XrpcClient {
22
25
return .{
23
26
.allocator = allocator,
···
89
92
// https://github.com/ziglang/zig/issues/25021
90
93
var extra_headers: std.http.Client.Request.Headers = .{
91
94
.accept_encoding = .{ .override = "identity" },
95
+
.content_type = if (body != null) .{ .override = "application/json" } else .default,
92
96
};
93
-
var auth_header_buf: [256]u8 = undefined;
97
+
var auth_header_buf: [max_auth_header_len]u8 = undefined;
94
98
if (self.access_token) |token| {
95
99
const auth_value = try std.fmt.bufPrint(&auth_header_buf, "Bearer {s}", .{token});
96
100
extra_headers.authorization = .{ .override = auth_value };