···136 .build(),
137 )
138 .await
139- .map_err(|e| CapturedError::from_display(format_smolstr!("Failed to fetch entry: {}", e).as_str().to_string()))?;
000000140141- let record = resp
142- .into_output()
143- .map_err(|e| CapturedError::from_display(format_smolstr!("Failed to parse entry: {}", e).as_str().to_string()))?;
0000144145 // Parse the entry
146 let entry: Entry = jacquard::from_data(&record.value).map_err(|e| {
147- CapturedError::from_display(format_smolstr!("Failed to deserialize entry: {}", e).as_str().to_string())
0000148 })?;
149150 // Find the image by name
···159 })
160 .map(|img| img.image.blob().cid().clone().into_static())
161 .ok_or_else(|| {
162- CapturedError::from_display(format_smolstr!("Image '{}' not found in entry", name).as_str().to_string())
0000163 })?;
164165 // Check cache first
···199 )
200 .await
201 .map_err(|e| {
202- CapturedError::from_display(format_smolstr!("Failed to fetch PublishedBlob: {}", e).as_str().to_string())
0000203 })?;
204205 let record = resp.into_output().map_err(|e| {
206- CapturedError::from_display(format_smolstr!("Failed to parse PublishedBlob: {}", e).as_str().to_string())
0000207 })?;
208209 // Parse the PublishedBlob
210 let published: PublishedBlob = jacquard::from_data(&record.value).map_err(|e| {
211- CapturedError::from_display(format_smolstr!("Failed to deserialize PublishedBlob: {}", e).as_str().to_string())
0000212 })?;
213214 // Get CID from the upload blob ref
···248 .get_notebook_by_key(notebook_key)
249 .await?
250 .ok_or_else(|| {
251- CapturedError::from_display(format_smolstr!("Notebook '{}' not found", notebook_key).as_str().to_string())
0000252 })?;
253254 let (view, entry_refs) = notebook.as_ref();
255256 // Get the DID from the notebook URI for blob fetching
257 let notebook_did = jacquard::types::aturi::AtUri::new(view.uri.as_ref())
258- .map_err(|e| CapturedError::from_display(format_smolstr!("Invalid notebook URI: {}", e).as_str().to_string()))?
000000259 .authority()
260 .clone()
261 .into_static();
···277 let client = self.fetcher.get_client();
278 for entry_ref in entry_refs {
279 // Parse the entry URI to get rkey
280- let entry_uri = jacquard::types::aturi::AtUri::new(entry_ref.uri.as_ref())
281- .map_err(|e| CapturedError::from_display(format_smolstr!("Invalid entry URI: {}", e).as_str().to_string()))?;
000000282 let rkey = entry_uri
283 .rkey()
284 .ok_or_else(|| CapturedError::from_display("Entry URI missing rkey"))?;
···319 }
320 }
321322- Err(CapturedError::from_display(format_smolstr!(
323- "Image '{}' not found in notebook '{}'",
324- image_name, notebook_key
325- ).as_str().to_string()))
00000326 }
327328 /// Insert bytes directly into cache (for pre-warming after upload)
···136 .build(),
137 )
138 .await
139+ .map_err(|e| {
140+ CapturedError::from_display(
141+ format_smolstr!("Failed to fetch entry: {}", e)
142+ .as_str()
143+ .to_string(),
144+ )
145+ })?;
146147+ let record = resp.into_output().map_err(|e| {
148+ CapturedError::from_display(
149+ format_smolstr!("Failed to parse entry: {}", e)
150+ .as_str()
151+ .to_string(),
152+ )
153+ })?;
154155 // Parse the entry
156 let entry: Entry = jacquard::from_data(&record.value).map_err(|e| {
157+ CapturedError::from_display(
158+ format_smolstr!("Failed to deserialize entry: {}", e)
159+ .as_str()
160+ .to_string(),
161+ )
162 })?;
163164 // Find the image by name
···173 })
174 .map(|img| img.image.blob().cid().clone().into_static())
175 .ok_or_else(|| {
176+ CapturedError::from_display(
177+ format_smolstr!("Image '{}' not found in entry", name)
178+ .as_str()
179+ .to_string(),
180+ )
181 })?;
182183 // Check cache first
···217 )
218 .await
219 .map_err(|e| {
220+ CapturedError::from_display(
221+ format_smolstr!("Failed to fetch PublishedBlob: {}", e)
222+ .as_str()
223+ .to_string(),
224+ )
225 })?;
226227 let record = resp.into_output().map_err(|e| {
228+ CapturedError::from_display(
229+ format_smolstr!("Failed to parse PublishedBlob: {}", e)
230+ .as_str()
231+ .to_string(),
232+ )
233 })?;
234235 // Parse the PublishedBlob
236 let published: PublishedBlob = jacquard::from_data(&record.value).map_err(|e| {
237+ CapturedError::from_display(
238+ format_smolstr!("Failed to deserialize PublishedBlob: {}", e)
239+ .as_str()
240+ .to_string(),
241+ )
242 })?;
243244 // Get CID from the upload blob ref
···278 .get_notebook_by_key(notebook_key)
279 .await?
280 .ok_or_else(|| {
281+ CapturedError::from_display(
282+ format_smolstr!("Notebook '{}' not found", notebook_key)
283+ .as_str()
284+ .to_string(),
285+ )
286 })?;
287288 let (view, entry_refs) = notebook.as_ref();
289290 // Get the DID from the notebook URI for blob fetching
291 let notebook_did = jacquard::types::aturi::AtUri::new(view.uri.as_ref())
292+ .map_err(|e| {
293+ CapturedError::from_display(
294+ format_smolstr!("Invalid notebook URI: {}", e)
295+ .as_str()
296+ .to_string(),
297+ )
298+ })?
299 .authority()
300 .clone()
301 .into_static();
···317 let client = self.fetcher.get_client();
318 for entry_ref in entry_refs {
319 // Parse the entry URI to get rkey
320+ let entry_uri = jacquard::types::aturi::AtUri::new(entry_ref.entry.uri.as_ref())
321+ .map_err(|e| {
322+ CapturedError::from_display(
323+ format_smolstr!("Invalid entry URI: {}", e)
324+ .as_str()
325+ .to_string(),
326+ )
327+ })?;
328 let rkey = entry_uri
329 .rkey()
330 .ok_or_else(|| CapturedError::from_display("Entry URI missing rkey"))?;
···365 }
366 }
367368+ Err(CapturedError::from_display(
369+ format_smolstr!(
370+ "Image '{}' not found in notebook '{}'",
371+ image_name,
372+ notebook_key
373+ )
374+ .as_str()
375+ .to_string(),
376+ ))
377 }
378379 /// Insert bytes directly into cache (for pre-warming after upload)
+67-3
crates/weaver-app/src/components/editor/writer.rs
···14371438 self.last_char_offset = closing_char_end;
1439 }
1440- Html(html) | InlineHtml(html) => {
1441 // Track offset mapping for raw HTML
1442 let char_start = self.last_char_offset;
1443 let html_char_len = html.chars().count();
···14451446 self.write(&html)?;
14470000000000001448 // Record mapping for inline HTML
1449 self.record_mapping(range.clone(), char_start..char_end);
1450 self.last_char_offset = char_end;
···17431744 // Emit the opening tag
1745 match tag {
1746- Tag::HtmlBlock => Ok(()),
0000000000000000000000000000000000000001747 Tag::Paragraph => {
1748 // Record paragraph start for boundary tracking
1749 // BUT skip if inside a list - list owns the paragraph boundary
···24542455 // Emit tag HTML first
2456 let result = match tag {
2457- TagEnd::HtmlBlock => Ok(()),
00000000000002458 TagEnd::Paragraph => {
2459 // Record paragraph end for boundary tracking
2460 // BUT skip if inside a list - list owns the paragraph boundary
···14371438 self.last_char_offset = closing_char_end;
1439 }
1440+ Html(html) => {
1441 // Track offset mapping for raw HTML
1442 let char_start = self.last_char_offset;
1443 let html_char_len = html.chars().count();
···14451446 self.write(&html)?;
14471448+ // Record mapping for inline HTML
1449+ self.record_mapping(range.clone(), char_start..char_end);
1450+ self.last_char_offset = char_end;
1451+ }
1452+ InlineHtml(html) => {
1453+ // Track offset mapping for raw HTML
1454+ let char_start = self.last_char_offset;
1455+ let html_char_len = html.chars().count();
1456+ let char_end = char_start + html_char_len;
1457+ self.write(r#"<span class="html-embed html-embed-inline">"#)?;
1458+ self.write(&html)?;
1459+ self.write("</span>")?;
1460 // Record mapping for inline HTML
1461 self.record_mapping(range.clone(), char_start..char_end);
1462 self.last_char_offset = char_end;
···17551756 // Emit the opening tag
1757 match tag {
1758+ // HTML blocks get their own paragraph to try and corral them better
1759+ Tag::HtmlBlock => {
1760+ // Record paragraph start for boundary tracking
1761+ // BUT skip if inside a list - list owns the paragraph boundary
1762+ if self.list_depth == 0 {
1763+ self.current_paragraph_start =
1764+ Some((self.last_byte_offset, self.last_char_offset));
1765+ }
1766+ let node_id = self.gen_node_id();
1767+1768+ if self.end_newline {
1769+ write!(
1770+ &mut self.writer,
1771+ r#"<p id="{}", class="html-embed html-embed-block">"#,
1772+ node_id
1773+ )?;
1774+ } else {
1775+ write!(
1776+ &mut self.writer,
1777+ r#"\n<p id="{}", class="html-embed html-embed-block">"#,
1778+ node_id
1779+ )?;
1780+ }
1781+ self.begin_node(node_id.clone());
1782+1783+ // Map the start position of the paragraph (before any content)
1784+ // This allows cursor to be placed at the very beginning
1785+ let para_start_char = self.last_char_offset;
1786+ let mapping = OffsetMapping {
1787+ byte_range: range.start..range.start,
1788+ char_range: para_start_char..para_start_char,
1789+ node_id,
1790+ char_offset_in_node: 0,
1791+ child_index: Some(0), // position before first child
1792+ utf16_len: 0,
1793+ };
1794+ self.offset_maps.push(mapping);
1795+1796+ Ok(())
1797+ }
1798 Tag::Paragraph => {
1799 // Record paragraph start for boundary tracking
1800 // BUT skip if inside a list - list owns the paragraph boundary
···25052506 // Emit tag HTML first
2507 let result = match tag {
2508+ TagEnd::HtmlBlock => {
2509+ // Record paragraph end for boundary tracking
2510+ // BUT skip if inside a list - list owns the paragraph boundary
2511+ if self.list_depth == 0 {
2512+ if let Some((byte_start, char_start)) = self.current_paragraph_start.take() {
2513+ let byte_range = byte_start..self.last_byte_offset;
2514+ let char_range = char_start..self.last_char_offset;
2515+ self.paragraph_ranges.push((byte_range, char_range));
2516+ }
2517+ }
2518+2519+ self.end_node();
2520+ self.write("</p>\n")
2521+ }
2522 TagEnd::Paragraph => {
2523 // Record paragraph end for boundary tracking
2524 // BUT skip if inside a list - list owns the paragraph boundary
+111-40
crates/weaver-app/src/components/identity.rs
···6use jacquard::{smol_str::SmolStr, types::ident::AtIdentifier};
7use std::collections::HashSet;
8use weaver_api::com_atproto::repo::strong_ref::StrongRef;
9-use weaver_api::sh_weaver::notebook::{EntryView, NotebookView, entry::Entry};
0000000000000000000000000000000000000000000000000000000000000000000001011/// A single item in the profile timeline (either notebook or standalone entry)
12#[derive(Clone, PartialEq)]
13pub enum ProfileTimelineItem {
14 Notebook {
15 notebook: NotebookView<'static>,
16- entries: Vec<StrongRef<'static>>,
17 /// Most recent entry's created_at for sorting
18 sort_date: jacquard::types::string::Datetime,
19 },
···93 let auth_state = use_context::<Signal<AuthState>>();
9495 // Use client-only versions to avoid SSR issues with concurrent server futures
96- let (_profile_res, profile) = data::use_profile_data_client(ident);
97- let (_notebooks_res, notebooks) = data::use_notebooks_for_did_client(ident);
98- let (_entries_res, all_entries) = data::use_entries_for_did_client(ident);
000000099100 // Check if viewing own profile
101 let is_own_profile = use_memo(move || {
···171 if let Some(all_ents) = ents.as_ref() {
172 for (notebook, entry_refs) in nbs {
173 if is_pinned(notebook.uri.as_ref(), pinned_set) {
174- let sort_date = entry_refs
0175 .iter()
176- .filter_map(|r| {
177 all_ents
178 .iter()
179- .find(|(v, _)| v.uri.as_ref() == r.uri.as_ref())
180 })
181 .map(|(_, entry)| entry.created_at.clone())
182 .max()
···184185 items.push(ProfileTimelineItem::Notebook {
186 notebook: notebook.clone(),
187- entries: entry_refs.clone(),
188 sort_date,
189 });
190 }
···231 if let Some(all_ents) = ents.as_ref() {
232 for (notebook, entry_refs) in nbs {
233 if !is_pinned(notebook.uri.as_ref(), pinned_set) {
234- let sort_date = entry_refs
0235 .iter()
236- .filter_map(|r| {
237 all_ents
238 .iter()
239- .find(|(v, _)| v.uri.as_ref() == r.uri.as_ref())
240 })
241 .map(|(_, entry)| entry.created_at.clone())
242 .max()
···244245 items.push(ProfileTimelineItem::Notebook {
246 notebook: notebook.clone(),
247- entries: entry_refs.clone(),
248 sort_date,
249 });
250 }
···361 class: "pinned-item",
362 NotebookCard {
363 notebook: notebook.clone(),
364- entry_refs: entries.clone(),
365 is_pinned: true,
366 profile_ident: Some(ident()),
367 }
···409 key: "notebook-{notebook.cid}",
410 NotebookCard {
411 notebook: notebook.clone(),
412- entry_refs: entries.clone(),
413 is_pinned: false,
414 profile_ident: Some(ident()),
415 }
···458459 let entry_view = &book_entry_view.entry;
460461- let entry_title = entry_view.title.as_ref()
00462 .map(|t| t.as_ref())
463 .unwrap_or("Untitled");
464465- let entry_path = entry_view.path
0466 .as_ref()
467 .map(|p| p.as_ref().to_string())
468 .unwrap_or_else(|| entry_title.to_string());
···476 html_buf
477 });
478479- let created_at = parsed_entry.as_ref()
0480 .map(|entry| entry.created_at.as_ref().format("%B %d, %Y").to_string());
481482 let entry_uri = entry_view.uri.clone().into_static();
···529#[component]
530pub fn NotebookCard(
531 notebook: NotebookView<'static>,
532- entry_refs: Vec<StrongRef<'static>>,
533 #[props(default = false)] is_pinned: bool,
534 #[props(default)] show_author: Option<bool>,
535 /// Profile identity for context-aware author visibility (hides single author on their own profile)
536- #[props(default)] profile_ident: Option<AtIdentifier<'static>>,
0537 #[props(default)] on_pinned_changed: Option<EventHandler<bool>>,
538 #[props(default)] on_deleted: Option<EventHandler<()>>,
539) -> Element {
···575 let ident = notebook.uri.authority().clone().into_static();
576 let book_title: SmolStr = notebook_path.clone().into();
577578- // Fetch all entries to get first/last
579- let ident_for_fetch = ident.clone();
580- let book_title_for_fetch = book_title.clone();
581- let entries = use_resource(use_reactive!(|(ident_for_fetch, book_title_for_fetch)| {
582- let fetcher = fetcher.clone();
583- async move {
584- fetcher
585- .list_notebook_entries(ident_for_fetch, book_title_for_fetch)
586- .await
587- .ok()
588- .flatten()
589- }
590- }));
591 rsx! {
592 div { class: "notebook-card",
593 div { class: "notebook-card-container",
···642 }
643644 // Entry previews section
645- if let Some(Some(entry_list)) = entries() {
646 div { class: "notebook-card-previews",
647 {
648 use jacquard::from_data;
649 use weaver_api::sh_weaver::notebook::entry::Entry;
00650651- if entry_list.len() <= 5 {
652 // Show all entries if 5 or fewer
653 rsx! {
654- for entry_view in entry_list.iter() {
655 NotebookEntryPreview {
656 book_entry_view: entry_view.clone(),
657 ident: ident.clone(),
···662 } else {
663 // Show first, interstitial, and last
664 rsx! {
665- if let Some(first_entry) = entry_list.first() {
666 NotebookEntryPreview {
667 book_entry_view: first_entry.clone(),
668 ident: ident.clone(),
···673674 // Interstitial showing count
675 {
676- let middle_count = entry_list.len().saturating_sub(2);
677 rsx! {
678 div { class: "notebook-entry-interstitial",
679 "... {middle_count} more "
···683 }
684 }
685686- if let Some(last_entry) = entry_list.last() {
687 NotebookEntryPreview {
688 book_entry_view: last_entry.clone(),
689 ident: ident.clone(),
···695 }
696 }
697 }
698- }
699700 if let Some(ref tags) = notebook.tags {
701 if !tags.is_empty() {
···6use jacquard::{smol_str::SmolStr, types::ident::AtIdentifier};
7use std::collections::HashSet;
8use weaver_api::com_atproto::repo::strong_ref::StrongRef;
9+use weaver_api::sh_weaver::notebook::{
10+ BookEntryRef, BookEntryView, EntryView, NotebookView, entry::Entry,
11+};
12+13+/// Constructs BookEntryViews from notebook entry refs and all available entries.
14+///
15+/// Matches StrongRefs by URI to find the corresponding EntryView,
16+/// then builds BookEntryView with index and prev/next navigation refs.
17+fn build_book_entry_views(
18+ entry_refs: &[StrongRef<'static>],
19+ all_entries: &[(EntryView<'static>, Entry<'static>)],
20+) -> Vec<BookEntryView<'static>> {
21+ use jacquard::IntoStatic;
22+23+ // Build a lookup map for faster matching
24+ let entry_map: std::collections::HashMap<&str, &EntryView<'static>> = all_entries
25+ .iter()
26+ .map(|(view, _)| (view.uri.as_ref(), view))
27+ .collect();
28+29+ let mut views = Vec::with_capacity(entry_refs.len());
30+31+ for (idx, strong_ref) in entry_refs.iter().enumerate() {
32+ let Some(entry_view) = entry_map.get(strong_ref.uri.as_ref()).copied() else {
33+ continue;
34+ };
35+36+ // Build prev ref (if not first)
37+ let prev = if idx > 0 {
38+ entry_refs
39+ .get(idx - 1)
40+ .and_then(|prev_ref| entry_map.get(prev_ref.uri.as_ref()).copied())
41+ .map(|prev_view| {
42+ BookEntryRef::new()
43+ .entry(prev_view.clone())
44+ .build()
45+ .into_static()
46+ })
47+ } else {
48+ None
49+ };
50+51+ // Build next ref (if not last)
52+ let next = if idx + 1 < entry_refs.len() {
53+ entry_refs
54+ .get(idx + 1)
55+ .and_then(|next_ref| entry_map.get(next_ref.uri.as_ref()).copied())
56+ .map(|next_view| {
57+ BookEntryRef::new()
58+ .entry(next_view.clone())
59+ .build()
60+ .into_static()
61+ })
62+ } else {
63+ None
64+ };
65+66+ views.push(
67+ BookEntryView::new()
68+ .entry(entry_view.clone())
69+ .index(idx as i64)
70+ .maybe_prev(prev)
71+ .maybe_next(next)
72+ .build()
73+ .into_static(),
74+ );
75+ }
76+77+ views
78+}
7980/// A single item in the profile timeline (either notebook or standalone entry)
81#[derive(Clone, PartialEq)]
82pub enum ProfileTimelineItem {
83 Notebook {
84 notebook: NotebookView<'static>,
85+ entries: Vec<BookEntryView<'static>>,
86 /// Most recent entry's created_at for sorting
87 sort_date: jacquard::types::string::Datetime,
88 },
···162 let auth_state = use_context::<Signal<AuthState>>();
163164 // Use client-only versions to avoid SSR issues with concurrent server futures
165+ let (_profile_res, profile) = data::use_profile_data(ident);
166+ let (_notebooks_res, notebooks) = data::use_notebooks_for_did(ident);
167+ let (_entries_res, all_entries) = data::use_entries_for_did(ident);
168+169+ #[cfg(feature = "fullstack-server")]
170+ {
171+ _profile_res?;
172+ _notebooks_res?;
173+ _entries_res?;
174+ }
175176 // Check if viewing own profile
177 let is_own_profile = use_memo(move || {
···247 if let Some(all_ents) = ents.as_ref() {
248 for (notebook, entry_refs) in nbs {
249 if is_pinned(notebook.uri.as_ref(), pinned_set) {
250+ let book_entries = build_book_entry_views(entry_refs, all_ents);
251+ let sort_date = book_entries
252 .iter()
253+ .filter_map(|bev| {
254 all_ents
255 .iter()
256+ .find(|(v, _)| v.uri.as_ref() == bev.entry.uri.as_ref())
257 })
258 .map(|(_, entry)| entry.created_at.clone())
259 .max()
···261262 items.push(ProfileTimelineItem::Notebook {
263 notebook: notebook.clone(),
264+ entries: book_entries,
265 sort_date,
266 });
267 }
···308 if let Some(all_ents) = ents.as_ref() {
309 for (notebook, entry_refs) in nbs {
310 if !is_pinned(notebook.uri.as_ref(), pinned_set) {
311+ let book_entries = build_book_entry_views(entry_refs, all_ents);
312+ let sort_date = book_entries
313 .iter()
314+ .filter_map(|bev| {
315 all_ents
316 .iter()
317+ .find(|(v, _)| v.uri.as_ref() == bev.entry.uri.as_ref())
318 })
319 .map(|(_, entry)| entry.created_at.clone())
320 .max()
···322323 items.push(ProfileTimelineItem::Notebook {
324 notebook: notebook.clone(),
325+ entries: book_entries,
326 sort_date,
327 });
328 }
···439 class: "pinned-item",
440 NotebookCard {
441 notebook: notebook.clone(),
442+ entries: entries.clone(),
443 is_pinned: true,
444 profile_ident: Some(ident()),
445 }
···487 key: "notebook-{notebook.cid}",
488 NotebookCard {
489 notebook: notebook.clone(),
490+ entries: entries.clone(),
491 is_pinned: false,
492 profile_ident: Some(ident()),
493 }
···536537 let entry_view = &book_entry_view.entry;
538539+ let entry_title = entry_view
540+ .title
541+ .as_ref()
542 .map(|t| t.as_ref())
543 .unwrap_or("Untitled");
544545+ let entry_path = entry_view
546+ .path
547 .as_ref()
548 .map(|p| p.as_ref().to_string())
549 .unwrap_or_else(|| entry_title.to_string());
···557 html_buf
558 });
559560+ let created_at = parsed_entry
561+ .as_ref()
562 .map(|entry| entry.created_at.as_ref().format("%B %d, %Y").to_string());
563564 let entry_uri = entry_view.uri.clone().into_static();
···611#[component]
612pub fn NotebookCard(
613 notebook: NotebookView<'static>,
614+ entries: Vec<BookEntryView<'static>>,
615 #[props(default = false)] is_pinned: bool,
616 #[props(default)] show_author: Option<bool>,
617 /// Profile identity for context-aware author visibility (hides single author on their own profile)
618+ #[props(default)]
619+ profile_ident: Option<AtIdentifier<'static>>,
620 #[props(default)] on_pinned_changed: Option<EventHandler<bool>>,
621 #[props(default)] on_deleted: Option<EventHandler<()>>,
622) -> Element {
···658 let ident = notebook.uri.authority().clone().into_static();
659 let book_title: SmolStr = notebook_path.clone().into();
6600000000000000661 rsx! {
662 div { class: "notebook-card",
663 div { class: "notebook-card-container",
···712 }
713714 // Entry previews section
0715 div { class: "notebook-card-previews",
716 {
717 use jacquard::from_data;
718 use weaver_api::sh_weaver::notebook::entry::Entry;
719+ tracing::info!("rendering entries: {:?}", entries.iter().map(|e|
720+ e.entry.uri.as_ref()).collect::<Vec<_>>());
721722+ if entries.len() <= 5 {
723 // Show all entries if 5 or fewer
724 rsx! {
725+ for entry_view in entries.iter() {
726 NotebookEntryPreview {
727 book_entry_view: entry_view.clone(),
728 ident: ident.clone(),
···733 } else {
734 // Show first, interstitial, and last
735 rsx! {
736+ if let Some(first_entry) = entries.first() {
737 NotebookEntryPreview {
738 book_entry_view: first_entry.clone(),
739 ident: ident.clone(),
···744745 // Interstitial showing count
746 {
747+ let middle_count = entries.len().saturating_sub(2);
748 rsx! {
749 div { class: "notebook-entry-interstitial",
750 "... {middle_count} more "
···754 }
755 }
756757+ if let Some(last_entry) = entries.last() {
758 NotebookEntryPreview {
759 book_entry_view: last_entry.clone(),
760 ident: ident.clone(),
···766 }
767 }
768 }
769+770771 if let Some(ref tags) = notebook.tags {
772 if !tags.is_empty() {
···6use jacquard::IntoStatic;
7use jacquard::cowstr::ToCowStr;
8use jacquard::identity::resolver::IdentityResolver;
9+use jacquard::prelude::*;
10use jacquard::types::ident::AtIdentifier;
11use jacquard::types::string::{AtUri, Cid, Did, Handle, Uri};
12use jacquard_axum::ExtractXrpc;
···53 })?;
5455 let Some(data) = profile_data else {
56+ // get the bluesky profile
57+ // TODO: either cache this or yell at tap to start tracking their account!
58+ let profile_resp = state
59+ .resolver
60+ .send(
61+ weaver_api::app_bsky::actor::get_profile::GetProfile::new()
62+ .actor(did)
63+ .build(),
64+ )
65+ .await
66+ .map_err(|e| XrpcErrorResponse::not_found(e.to_string()))?;
67+ let bsky_profile = profile_resp
68+ .into_output()
69+ .map_err(|e| XrpcErrorResponse::not_found(e.to_string()))?
70+ .value;
71+ let inner_profile = ProfileView::new()
72+ .did(bsky_profile.did)
73+ .handle(bsky_profile.handle)
74+ .maybe_display_name(bsky_profile.display_name)
75+ .maybe_description(bsky_profile.description)
76+ .maybe_avatar(bsky_profile.avatar)
77+ .maybe_banner(bsky_profile.banner)
78+ .build();
79+80+ let inner = ProfileDataViewInner::ProfileView(Box::new(inner_profile));
81+82+ let output = ProfileDataView::new().inner(inner).build();
83+84+ return Ok(Json(
85+ GetProfileOutput {
86+ value: output,
87+ extra_data: None,
88+ }
89+ .into_static(),
90+ ));
91 };
9293 // Build the response
+1
crates/weaver-index/src/endpoints/bsky.rs
···16 State(state): State<AppState>,
17 ExtractXrpc(args): ExtractXrpc<GetProfileRequest>,
18) -> Result<Json<GetProfileOutput<'static>>, XrpcErrorResponse> {
019 let response = state.resolver.send(args).await.map_err(|e| {
20 tracing::warn!("Appview getProfile failed: {}", e);
21 XrpcErrorResponse::internal_error("Failed to fetch profile from appview")
···16 State(state): State<AppState>,
17 ExtractXrpc(args): ExtractXrpc<GetProfileRequest>,
18) -> Result<Json<GetProfileOutput<'static>>, XrpcErrorResponse> {
19+ // TODO: either cache this or yell at tap to start tracking their account!
20 let response = state.resolver.send(args).await.map_err(|e| {
21 tracing::warn!("Appview getProfile failed: {}", e);
22 XrpcErrorResponse::internal_error("Failed to fetch profile from appview")
+11-2
crates/weaver-index/src/endpoints/edit.rs
···289290/// Handle sh.weaver.edit.listDrafts
291///
292-/// Returns draft records for an actor.
0293pub async fn list_drafts(
294 State(state): State<AppState>,
295 ExtractOptionalServiceAuth(viewer): ExtractOptionalServiceAuth,
296 ExtractXrpc(args): ExtractXrpc<ListDraftsRequest>,
297) -> Result<Json<ListDraftsOutput<'static>>, XrpcErrorResponse> {
298- let _viewer: Viewer = viewer;
00299300 let limit = args.limit.unwrap_or(50).min(100).max(1);
301···309310 // Resolve actor to DID
311 let actor_did = resolve_actor(&state, &args.actor).await?;
000000312313 // Fetch drafts
314 let draft_rows = state
···289290/// Handle sh.weaver.edit.listDrafts
291///
292+/// Returns draft records for an actor. Requires authentication.
293+/// Only returns drafts if viewer is the actor or has collab permission.
294pub async fn list_drafts(
295 State(state): State<AppState>,
296 ExtractOptionalServiceAuth(viewer): ExtractOptionalServiceAuth,
297 ExtractXrpc(args): ExtractXrpc<ListDraftsRequest>,
298) -> Result<Json<ListDraftsOutput<'static>>, XrpcErrorResponse> {
299+ // Require authentication
300+ let viewer = viewer.ok_or_else(|| XrpcErrorResponse::auth_required("Authentication required"))?;
301+ let viewer_did = viewer.did();
302303 let limit = args.limit.unwrap_or(50).min(100).max(1);
304···312313 // Resolve actor to DID
314 let actor_did = resolve_actor(&state, &args.actor).await?;
315+316+ // Permission check: viewer must be the actor (owner access)
317+ // TODO: Add collab grant check for draft sharing
318+ if viewer_did.as_str() != actor_did.as_str() {
319+ return Err(XrpcErrorResponse::forbidden("Cannot view another user's drafts"));
320+ }
321322 // Fetch drafts
323 let draft_rows = state
+1-4
crates/weaver-index/src/endpoints/notebook.rs
···46 let name = args.name.as_ref();
4748 let limit = args.entry_limit.unwrap_or(50).clamp(1, 100) as u32;
49- let cursor: Option<u32> = args
50- .entry_cursor
51- .as_deref()
52- .and_then(|c| c.parse().ok());
5354 // Fetch notebook first to get its rkey
55 let notebook_row = state
···46 let name = args.name.as_ref();
4748 let limit = args.entry_limit.unwrap_or(50).clamp(1, 100) as u32;
49+ let cursor: Option<u32> = args.entry_cursor.as_deref().and_then(|c| c.parse().ok());
0005051 // Fetch notebook first to get its rkey
52 let notebook_row = state
···01If you've been to this site before, you maybe noticed it loaded a fair bit more quickly this time. That's not really because the web server creating this HTML got a whole lot better. It did require some refactoring, but it was mostly in the vein of taking some code and adding new code that did the same thing gated behind a cargo feature. This did, however, have the side effect of, in the final binary, replacing functions that are literally hundreds of lines, that in turn call functions that may also be hundreds of lines, making several cascading network requests, with functions that look like this, which make by and large a single network request and return exactly what is required.
23```rust
···29Of course the reason is that I finally got round to building the Weaver AppView. I'm going to be calling mine the Index, because Weaver is about writing and I think "AppView" as a term kind of sucks and "index" is much more elegant, on top of being a good descriptor of what the big backend service now powering Weaver does. ![[at://did:plc:ragtjsm2j2vknwkz3zp4oxrd/app.bsky.feed.post/3lyucxfxq622w]]
30For the uninitiated, because I expect at least some people reading this aren't big into AT Protocol development, an AppView is an instance of the kind of big backend service that Bluesky PBLLC runs which powers essentially every Bluesky client, with a few notable exceptions, such as [Red Dwarf](https://reddwarf.app/), and (partially, eventually more completely) [Blacksky](https://blacksky.community/). It listens to the [Firehose](https://bsky.network/) [event stream](https://atproto.com/specs/event-stream) from the main Bluesky Relay and analyzes the data which comes through that pertains to Bluesky, producing your timeline feeds, figuring out who follows you, who you block and who blocks you (and filtering them out of your view of the app), how many people liked your last post, and so on. Because the records in your PDS (and those of all the other people on Bluesky) need context and relationship and so on to give them meaning, and then that context can be passed along to you without your app having to go collect it all. ![[at://did:plc:uu5axsmbm2or2dngy4gwchec/app.bsky.feed.post/3lsc2tzfsys2f]]
31It's a very normal backend with some weird constraints because of the protocol, and in it's practice the thing that separates the day-to-day Bluesky experience from the Mastodon experience the most. It's also by far the most centralising force in the network, because it also does moderation, and because it's quite expensive to run. A full index of all Bluesky activity takes a lot of storage (futur's Zeppelin experiment detailed above took about 16 terabytes of storage using PostgreSQL for the database and cost $200/month to run), and then it takes that much more computing power to calculate all the relationships between the data on the fly as new events come in and then serve personalized versions to everyone that uses it.
003233It's not the only AppView out there, most atproto apps have something like this. Tangled, Streamplace, Leaflet, and so on all have substantial backends. Some (like Tangled) actually combine the front end you interact with and the AppView into a single service. But in general these are big, complicated persistent services you have to backfill from existing data to bootstrap, and they really strongly shape your app, whether they're literally part of the same executable or hosted on the same server or not. And when I started building Weaver in earnest, not only did I still have a few big unanswered questions about how I wanted Weaver to work, how it needed to work, I also didn't want to fundamentally tie it to some big server, create this centralising force. I wanted it to be possible for someone else to run it without being dependent on me personally, ideally possible even if all they had access to was a static site host like GitHub Pages or a browser runtime platform like Cloudflare Workers, so long as someone somewhere was running a couple of generic services. I wanted to be able to distribute the fullstack server version as basically just an executable in a directory of files with no other dependencies, which could easily be run in any container hosting environment with zero persistent storage required. Hell, you could technically serve it as a blob or series of blobs from your PDS with the right entry point if I did my job right.
34···57In contrast, notebook entry records lack links to other parts of the notebook in and of themselves because calculating them would be challenging, and updating one entry would require not just updating the entry itself and notebook it's in, but also neighbouring entries in said notebook. With the shape of collaborative publishing in Weaver, that would result in up to 4 writes to the PDS when you publish an entry, in addition to any blob uploads. And trying to link the other way in edit history (root to edit head) is similarly challenging.
5859I anticipated some of these. but others emerged only because I ran into them while building the web app. I've had to manually fix up records more than once because I made breaking changes to my lexicons after discovering I really wanted X piece of metadata or cross-linkage. If I'd built the index first or alongside—particularly if the index remained a separate service from the web app as I intended it to, to keep the web app simple—it would likely have constrained my choices and potentially cut off certain solutions, due to the time it takes to dump the database and re-run backfill even at a very small scale. Building a big chunk of the front end first told me exactly what the index needed to provide easy access to.
0060# ClickHAUS
61-So what does Weaver's index look like? Well it starts with either the firehose or the new Tap sync tool. The index ingests from either over a WebSocket connection, does a bit of processing (less is required when ingesting from Tap, and that's currently what I've deployed) and then dumps them in the Clickhouse database. I chose it as the primary index database on recommendation from a friend, and after doing a lot of reading. It fits atproto data well, as Graze found. Because it isolates concurrent inserts and selects so that you can just dump data in, while it cleans things up asynchronously after, it does wonderfully when you have a single major input point or a set of them to dump into that fans out, which you can then transform and then read from.
006263-I will not claim that the tables you can find in the weaver repository are especially **good** database design overall, but they work, and we'll see how they scale. This is one of three main input tables. One for record writes, one for identity events, and one for account events.
64```SQL
65CREATE TABLE IF NOT EXISTS raw_records (
66 did String,
···92ENGINE = MergeTree()
93ORDER BY (collection, did, rkey, event_time, indexed_at);
94```
95-From here we fan out into a cascading series of materialized views and other specialised tables. These break out the different record types, calculate metadata, and pull critical fields out of the record JSON for easier querying. Clickhouse's wild-ass compression means we're not too badly off replicating data on disk this way. Seriously, their JSON type ends up being the same size as a CBOR BLOB on disk in my testing, though it *does* have some quirks, as I discovered when I read back Datetime fields and got...not the format I put in. Thankfully there's a config setting for that. We also build out the list of who contributed to a published entry and determine the canonical record for it, so that fetching a fully hydrated entry with all contributor profiles only takes a couple of `SELECT` queries that themselves avoid performing extensive table scans due to reasonable choices of `ORDER BY` fields in the denormalized tables they query. And then I can do quirky things like power a profile fetch endpoint that will provide either a Weaver or a Bluesky profile, while also unifying fields so that we can easily get at the critical stuff in common. This is a relatively expensive calculation, but people thankfully don't edit their profiles that often, and this is why we don't keep the stats in the same table.
9697However, this is ***also*** why Clickhouse will not be the only database used in the index.
98···140141If people have ideas, I'm all ears.
142143-I hope you found this interesting. I enjoyed writing it out.0000000
···1+2If you've been to this site before, you maybe noticed it loaded a fair bit more quickly this time. That's not really because the web server creating this HTML got a whole lot better. It did require some refactoring, but it was mostly in the vein of taking some code and adding new code that did the same thing gated behind a cargo feature. This did, however, have the side effect of, in the final binary, replacing functions that are literally hundreds of lines, that in turn call functions that may also be hundreds of lines, making several cascading network requests, with functions that look like this, which make by and large a single network request and return exactly what is required.
34```rust
···30Of course the reason is that I finally got round to building the Weaver AppView. I'm going to be calling mine the Index, because Weaver is about writing and I think "AppView" as a term kind of sucks and "index" is much more elegant, on top of being a good descriptor of what the big backend service now powering Weaver does. ![[at://did:plc:ragtjsm2j2vknwkz3zp4oxrd/app.bsky.feed.post/3lyucxfxq622w]]
31For the uninitiated, because I expect at least some people reading this aren't big into AT Protocol development, an AppView is an instance of the kind of big backend service that Bluesky PBLLC runs which powers essentially every Bluesky client, with a few notable exceptions, such as [Red Dwarf](https://reddwarf.app/), and (partially, eventually more completely) [Blacksky](https://blacksky.community/). It listens to the [Firehose](https://bsky.network/) [event stream](https://atproto.com/specs/event-stream) from the main Bluesky Relay and analyzes the data which comes through that pertains to Bluesky, producing your timeline feeds, figuring out who follows you, who you block and who blocks you (and filtering them out of your view of the app), how many people liked your last post, and so on. Because the records in your PDS (and those of all the other people on Bluesky) need context and relationship and so on to give them meaning, and then that context can be passed along to you without your app having to go collect it all. ![[at://did:plc:uu5axsmbm2or2dngy4gwchec/app.bsky.feed.post/3lsc2tzfsys2f]]
32It's a very normal backend with some weird constraints because of the protocol, and in it's practice the thing that separates the day-to-day Bluesky experience from the Mastodon experience the most. It's also by far the most centralising force in the network, because it also does moderation, and because it's quite expensive to run. A full index of all Bluesky activity takes a lot of storage (futur's Zeppelin experiment detailed above took about 16 terabytes of storage using PostgreSQL for the database and cost $200/month to run), and then it takes that much more computing power to calculate all the relationships between the data on the fly as new events come in and then serve personalized versions to everyone that uses it.
33+34+3536It's not the only AppView out there, most atproto apps have something like this. Tangled, Streamplace, Leaflet, and so on all have substantial backends. Some (like Tangled) actually combine the front end you interact with and the AppView into a single service. But in general these are big, complicated persistent services you have to backfill from existing data to bootstrap, and they really strongly shape your app, whether they're literally part of the same executable or hosted on the same server or not. And when I started building Weaver in earnest, not only did I still have a few big unanswered questions about how I wanted Weaver to work, how it needed to work, I also didn't want to fundamentally tie it to some big server, create this centralising force. I wanted it to be possible for someone else to run it without being dependent on me personally, ideally possible even if all they had access to was a static site host like GitHub Pages or a browser runtime platform like Cloudflare Workers, so long as someone somewhere was running a couple of generic services. I wanted to be able to distribute the fullstack server version as basically just an executable in a directory of files with no other dependencies, which could easily be run in any container hosting environment with zero persistent storage required. Hell, you could technically serve it as a blob or series of blobs from your PDS with the right entry point if I did my job right.
37···60In contrast, notebook entry records lack links to other parts of the notebook in and of themselves because calculating them would be challenging, and updating one entry would require not just updating the entry itself and notebook it's in, but also neighbouring entries in said notebook. With the shape of collaborative publishing in Weaver, that would result in up to 4 writes to the PDS when you publish an entry, in addition to any blob uploads. And trying to link the other way in edit history (root to edit head) is similarly challenging.
6162I anticipated some of these. but others emerged only because I ran into them while building the web app. I've had to manually fix up records more than once because I made breaking changes to my lexicons after discovering I really wanted X piece of metadata or cross-linkage. If I'd built the index first or alongside—particularly if the index remained a separate service from the web app as I intended it to, to keep the web app simple—it would likely have constrained my choices and potentially cut off certain solutions, due to the time it takes to dump the database and re-run backfill even at a very small scale. Building a big chunk of the front end first told me exactly what the index needed to provide easy access to.
63+64+You can access it here: [index.weaver.sh](https://index.weaver.sh)
65# ClickHAUS
66+So what does Weaver's index look like? Well it starts with either the firehose or the new [Tap](https://docs.bsky.app/blog/introducing-tap) sync tool. The index ingests from either over a WebSocket connection, does a bit of processing (less is required when ingesting from Tap, and that's currently what I've deployed) and then dumps them in the Clickhouse database. I chose it as the primary index database on recommendation from a friend, and after doing a lot of reading. It fits atproto data well, as Graze found. Because it isolates concurrent inserts and selects so that you can just dump data in, while it cleans things up asynchronously after, it does wonderfully when you have a single major input point or a set of them to dump into that fans out, which you can then transform and then read from.
67+68+I will not claim that the tables you can find in the weaver repository are especially **good** database design overall, but they work, they're very much a work in progress, and we'll see how they scale. Also, Tap makes re-backfilling the data a hell of a lot easier.
6970+This is one of three main input tables. One for record writes, one for identity events, and one for account events.
71```SQL
72CREATE TABLE IF NOT EXISTS raw_records (
73 did String,
···99ENGINE = MergeTree()
100ORDER BY (collection, did, rkey, event_time, indexed_at);
101```
102+From here we fan out into a cascading series of materialized views and other specialised tables. These break out the different record types, calculate metadata, and pull critical fields out of the record JSON for easier querying. Clickhouse's wild-ass compression means we're not too badly off replicating data on disk this way. Seriously, their JSON type ends up being the same size as a CBOR BLOB on disk in my testing, though it *does* have some quirks, as I discovered when I read back Datetime fields and got...not the format I put in. Thankfully there's a config setting for that. We also build out the list of who contributed to a published entry and determine the canonical record for it, so that fetching a fully hydrated entry with all contributor profiles only takes a couple of `SELECT` queries that themselves avoid performing extensive table scans due to reasonable choices of `ORDER BY` fields in the denormalized tables they query and are thus very fast. And then I can do quirky things like power a profile fetch endpoint that will provide either a Weaver or a Bluesky profile, while also unifying fields so that we can easily get at the critical stuff in common. This is a relatively expensive calculation, but people thankfully don't edit their profiles that often, and this is why we don't keep the stats in the same table.
103104However, this is ***also*** why Clickhouse will not be the only database used in the index.
105···147148If people have ideas, I'm all ears.
149150+## Future
151+Having this available obviously improves the performance of the app, but it also enables a lot of new stuff. I have plans for social features which would have been much harder to implement without it, and can later be backfilled into the non-indexed implementation. I have more substantial rewrites of the data fetching code planned as well, beyond the straightforward replacement I did in this first pass. And there's still a **lot** more to do on the editor before it's done.
152+153+I've been joking about all sorts of ambitious things, but legitimately I think Weaver ends up being almost uniquely flexible and powerful among the atproto-based long-form writing platforms with how it's designed, and in particular how it enables people to create things together, and can end up filling some big shoes, given enough time and development effort.
154+155+I hope you found this interesting. I enjoyed writing it out. There's still a lot more to do, but this was a big milestone for me.
156+157+If you'd like to support this project, there's a GitHub Sponsorship link at the bottom of the page, but honestly I'd love if you used it to write something.