atproto blogging
1//! Render caching and incremental paragraph rendering.
2//!
3//! This module provides infrastructure for incremental markdown rendering,
4//! caching paragraph renders to avoid re-rendering unchanged content.
5
6use std::ops::Range;
7
8use smol_str::SmolStr;
9
10use crate::offset_map::OffsetMapping;
11use crate::paragraph::{ParagraphRender, hash_source, make_paragraph_id};
12use crate::syntax::SyntaxSpanInfo;
13use crate::text::TextBuffer;
14use crate::types::EditInfo;
15use crate::writer::EditorWriter;
16use crate::{EditorRope, EmbedContentProvider, ImageResolver};
17
18use markdown_weaver::Parser;
19use weaver_common::ExtractedRef;
20
21/// Cache for incremental paragraph rendering.
22/// Stores previously rendered paragraphs to avoid re-rendering unchanged content.
23#[derive(Clone, Debug, Default)]
24pub struct RenderCache {
25 /// Cached paragraph renders (content paragraphs only, gaps computed fresh).
26 pub paragraphs: Vec<CachedParagraph>,
27 /// Next available node ID for fresh renders.
28 pub next_node_id: usize,
29 /// Next available syntax span ID for fresh renders.
30 pub next_syn_id: usize,
31 /// Next available paragraph ID (monotonic counter).
32 pub next_para_id: usize,
33}
34
35/// A cached paragraph render that can be reused if source hasn't changed.
36#[derive(Clone, Debug)]
37pub struct CachedParagraph {
38 /// Stable monotonic ID for DOM element identity.
39 pub id: SmolStr,
40 /// Hash of paragraph source text for change detection.
41 pub source_hash: u64,
42 /// Byte range in source document.
43 pub byte_range: Range<usize>,
44 /// Char range in source document.
45 pub char_range: Range<usize>,
46 /// Rendered HTML.
47 pub html: String,
48 /// Offset mappings for cursor positioning.
49 pub offset_map: Vec<OffsetMapping>,
50 /// Syntax spans for conditional visibility.
51 pub syntax_spans: Vec<SyntaxSpanInfo>,
52 /// Collected refs (wikilinks, AT embeds) from this paragraph.
53 pub collected_refs: Vec<ExtractedRef>,
54}
55
56/// Check if an edit affects paragraph boundaries.
57///
58/// Edits that don't contain newlines and aren't in the block-syntax zone
59/// are considered "safe" and can skip boundary rediscovery.
60pub fn is_boundary_affecting(edit: &EditInfo) -> bool {
61 // Newlines always affect boundaries (paragraph splits/joins).
62 if edit.contains_newline {
63 return true;
64 }
65
66 // Edits in the block-syntax zone (first ~6 chars of line) could affect
67 // headings, lists, blockquotes, code fences, etc.
68 if edit.in_block_syntax_zone {
69 return true;
70 }
71
72 false
73}
74
75/// Apply a signed delta to a usize, saturating at 0 on underflow.
76pub fn apply_delta(val: usize, delta: isize) -> usize {
77 if delta >= 0 {
78 val.saturating_add(delta as usize)
79 } else {
80 val.saturating_sub((-delta) as usize)
81 }
82}
83
84/// Result of incremental paragraph rendering.
85pub struct IncrementalRenderResult {
86 /// Rendered paragraphs.
87 pub paragraphs: Vec<ParagraphRender>,
88 /// Updated cache for next render.
89 pub cache: RenderCache,
90 /// Collected refs (wikilinks, AT embeds) found during render.
91 pub collected_refs: Vec<ExtractedRef>,
92}
93
94/// Render markdown with incremental caching.
95///
96/// Uses cached paragraph renders when possible, only re-rendering changed paragraphs.
97/// Generic over any `TextBuffer` implementation.
98///
99/// # Parameters
100/// - `text`: The text buffer to render
101/// - `cache`: Optional previous render cache
102/// - `cursor_offset`: Current cursor position (for finding which NEW paragraph is the cursor para)
103/// - `edit`: Edit info for stable ID assignment
104/// - `image_resolver`: Optional image URL resolver
105/// - `entry_index`: Optional index for wikilink validation
106/// - `embed_provider`: Provider for embed content
107///
108/// # Returns
109/// `IncrementalRenderResult` containing paragraphs, updated cache, and collected refs.
110pub fn render_paragraphs_incremental<T, I, E>(
111 text: &T,
112 cache: Option<&RenderCache>,
113 cursor_offset: usize,
114 edit: Option<&EditInfo>,
115 image_resolver: Option<&I>,
116 entry_index: Option<&weaver_common::EntryIndex>,
117 embed_provider: &E,
118) -> IncrementalRenderResult
119where
120 T: TextBuffer,
121 I: ImageResolver + Clone + Default,
122 E: EmbedContentProvider,
123{
124 let source = text.to_string();
125
126 // Log source entering renderer to detect ZWC/space issues.
127 if tracing::enabled!(target: "weaver::render", tracing::Level::TRACE) {
128 tracing::trace!(
129 target: "weaver::render",
130 source_len = source.len(),
131 source_chars = source.chars().count(),
132 source_content = %source.escape_debug(),
133 "render_paragraphs: source entering renderer"
134 );
135 }
136
137 // Handle empty document.
138 if source.is_empty() {
139 let empty_node_id = "n0".to_string();
140 let empty_html = format!(r#"<span id="{}">{}</span>"#, empty_node_id, '\u{200B}');
141 let para_id = make_paragraph_id(0);
142
143 let para = ParagraphRender {
144 id: para_id.clone(),
145 byte_range: 0..0,
146 char_range: 0..0,
147 html: empty_html.clone(),
148 offset_map: vec![],
149 syntax_spans: vec![],
150 source_hash: 0,
151 };
152
153 let new_cache = RenderCache {
154 paragraphs: vec![CachedParagraph {
155 id: para_id,
156 source_hash: 0,
157 byte_range: 0..0,
158 char_range: 0..0,
159 html: empty_html,
160 offset_map: vec![],
161 syntax_spans: vec![],
162 collected_refs: vec![],
163 }],
164 next_node_id: 1,
165 next_syn_id: 0,
166 next_para_id: 1,
167 };
168
169 return IncrementalRenderResult {
170 paragraphs: vec![para],
171 cache: new_cache,
172 collected_refs: vec![],
173 };
174 }
175
176 // Determine if we can use fast path (skip boundary discovery).
177 let current_len = text.len_chars();
178 let current_byte_len = text.len_bytes();
179
180 // If we have cache but no edit, just return cached data (no re-render needed).
181 // This happens on cursor position changes, clicks, etc.
182 if let (Some(c), None) = (cache, edit) {
183 let cached_len = c.paragraphs.last().map(|p| p.char_range.end).unwrap_or(0);
184 if cached_len == current_len {
185 tracing::trace!(
186 target: "weaver::render",
187 "no edit, returning cached paragraphs"
188 );
189 let paragraphs: Vec<ParagraphRender> = c
190 .paragraphs
191 .iter()
192 .map(|p| ParagraphRender {
193 id: p.id.clone(),
194 byte_range: p.byte_range.clone(),
195 char_range: p.char_range.clone(),
196 html: p.html.clone(),
197 offset_map: p.offset_map.clone(),
198 syntax_spans: p.syntax_spans.clone(),
199 source_hash: p.source_hash,
200 })
201 .collect();
202 return IncrementalRenderResult {
203 paragraphs,
204 cache: c.clone(),
205 collected_refs: c
206 .paragraphs
207 .iter()
208 .flat_map(|p| p.collected_refs.clone())
209 .collect(),
210 };
211 }
212 }
213
214 let use_fast_path = cache.is_some() && edit.is_some() && !is_boundary_affecting(edit.unwrap());
215
216 tracing::debug!(
217 target: "weaver::render",
218 use_fast_path,
219 has_cache = cache.is_some(),
220 has_edit = edit.is_some(),
221 boundary_affecting = edit.map(is_boundary_affecting),
222 current_len,
223 "render path decision"
224 );
225
226 // Get paragraph boundaries.
227 let paragraph_ranges = if use_fast_path {
228 // Fast path: adjust cached boundaries based on actual length change.
229 let cache = cache.unwrap();
230 let edit = edit.unwrap();
231
232 let edit_pos = edit.edit_char_pos;
233
234 let (cached_len, cached_byte_len) = cache
235 .paragraphs
236 .last()
237 .map(|p| (p.char_range.end, p.byte_range.end))
238 .unwrap_or((0, 0));
239 let char_delta = current_len as isize - cached_len as isize;
240 let byte_delta = current_byte_len as isize - cached_byte_len as isize;
241
242 cache
243 .paragraphs
244 .iter()
245 .map(|p| {
246 if p.char_range.end < edit_pos {
247 (p.byte_range.clone(), p.char_range.clone())
248 } else if p.char_range.start > edit_pos {
249 (
250 apply_delta(p.byte_range.start, byte_delta)
251 ..apply_delta(p.byte_range.end, byte_delta),
252 apply_delta(p.char_range.start, char_delta)
253 ..apply_delta(p.char_range.end, char_delta),
254 )
255 } else {
256 (
257 p.byte_range.start..apply_delta(p.byte_range.end, byte_delta),
258 p.char_range.start..apply_delta(p.char_range.end, char_delta),
259 )
260 }
261 })
262 .collect::<Vec<_>>()
263 } else {
264 vec![]
265 };
266
267 // Validate fast path results.
268 let use_fast_path = if !paragraph_ranges.is_empty() {
269 let all_valid = paragraph_ranges
270 .iter()
271 .all(|(_, char_range)| char_range.start <= char_range.end);
272 if !all_valid {
273 tracing::debug!(
274 target: "weaver::render",
275 "fast path produced invalid ranges, falling back to slow path"
276 );
277 false
278 } else {
279 true
280 }
281 } else {
282 false
283 };
284
285 // ============ FAST PATH ============
286 if use_fast_path {
287 let cache = cache.unwrap();
288 let edit = edit.unwrap();
289 let edit_pos = edit.edit_char_pos;
290
291 let (cached_len, cached_byte_len) = cache
292 .paragraphs
293 .last()
294 .map(|p| (p.char_range.end, p.byte_range.end))
295 .unwrap_or((0, 0));
296 let char_delta = current_len as isize - cached_len as isize;
297 let byte_delta = current_byte_len as isize - cached_byte_len as isize;
298
299 let cursor_para_idx = cache
300 .paragraphs
301 .iter()
302 .position(|p| p.char_range.start <= edit_pos && edit_pos <= p.char_range.end);
303
304 let mut paragraphs = Vec::with_capacity(cache.paragraphs.len());
305 let mut new_cached = Vec::with_capacity(cache.paragraphs.len());
306 let mut all_refs: Vec<ExtractedRef> = Vec::new();
307
308 for (idx, cached_para) in cache.paragraphs.iter().enumerate() {
309 let is_cursor_para = Some(idx) == cursor_para_idx;
310
311 let (byte_range, char_range) = if cached_para.char_range.end < edit_pos {
312 (
313 cached_para.byte_range.clone(),
314 cached_para.char_range.clone(),
315 )
316 } else if cached_para.char_range.start > edit_pos {
317 (
318 apply_delta(cached_para.byte_range.start, byte_delta)
319 ..apply_delta(cached_para.byte_range.end, byte_delta),
320 apply_delta(cached_para.char_range.start, char_delta)
321 ..apply_delta(cached_para.char_range.end, char_delta),
322 )
323 } else {
324 (
325 cached_para.byte_range.start
326 ..apply_delta(cached_para.byte_range.end, byte_delta),
327 cached_para.char_range.start
328 ..apply_delta(cached_para.char_range.end, char_delta),
329 )
330 };
331
332 let para_source = text
333 .slice(char_range.clone())
334 .map(|s| s.to_string())
335 .unwrap_or_default();
336 let source_hash = hash_source(¶_source);
337
338 if is_cursor_para {
339 // Re-render cursor paragraph for fresh syntax detection.
340 let resolver = image_resolver.cloned().unwrap_or_default();
341 let parser = Parser::new_ext(¶_source, weaver_renderer::default_md_options())
342 .into_offset_iter();
343
344 let para_rope = EditorRope::from(para_source.as_str());
345
346 let mut writer = EditorWriter::<_, _, &E, &I, ()>::new(
347 ¶_source,
348 ¶_rope,
349 parser,
350 )
351 .with_node_id_prefix(&cached_para.id)
352 .with_image_resolver(&resolver)
353 .with_embed_provider(embed_provider);
354
355 if let Some(idx) = entry_index {
356 writer = writer.with_entry_index(idx);
357 }
358
359 let (html, offset_map, syntax_spans, para_refs) = match writer.run() {
360 Ok(result) => {
361 let mut offset_map = result
362 .offset_maps_by_paragraph
363 .into_iter()
364 .next()
365 .unwrap_or_default();
366 for m in &mut offset_map {
367 m.char_range.start += char_range.start;
368 m.char_range.end += char_range.start;
369 m.byte_range.start += byte_range.start;
370 m.byte_range.end += byte_range.start;
371 }
372 let mut syntax_spans = result
373 .syntax_spans_by_paragraph
374 .into_iter()
375 .next()
376 .unwrap_or_default();
377 for s in &mut syntax_spans {
378 s.adjust_positions(char_range.start as isize);
379 }
380 let para_refs = result
381 .collected_refs_by_paragraph
382 .into_iter()
383 .next()
384 .unwrap_or_default();
385 let html = result.html_segments.into_iter().next().unwrap_or_default();
386 (html, offset_map, syntax_spans, para_refs)
387 }
388 Err(_) => (String::new(), Vec::new(), Vec::new(), Vec::new()),
389 };
390
391 all_refs.extend(para_refs.clone());
392
393 new_cached.push(CachedParagraph {
394 id: cached_para.id.clone(),
395 source_hash,
396 byte_range: byte_range.clone(),
397 char_range: char_range.clone(),
398 html: html.clone(),
399 offset_map: offset_map.clone(),
400 syntax_spans: syntax_spans.clone(),
401 collected_refs: para_refs.clone(),
402 });
403
404 paragraphs.push(ParagraphRender {
405 id: cached_para.id.clone(),
406 byte_range,
407 char_range,
408 html,
409 offset_map,
410 syntax_spans,
411 source_hash,
412 });
413 } else {
414 // Reuse cached with adjusted offsets.
415 let mut offset_map = cached_para.offset_map.clone();
416 let mut syntax_spans = cached_para.syntax_spans.clone();
417
418 if cached_para.char_range.start > edit_pos {
419 for m in &mut offset_map {
420 m.char_range.start = apply_delta(m.char_range.start, char_delta);
421 m.char_range.end = apply_delta(m.char_range.end, char_delta);
422 m.byte_range.start = apply_delta(m.byte_range.start, byte_delta);
423 m.byte_range.end = apply_delta(m.byte_range.end, byte_delta);
424 }
425 for s in &mut syntax_spans {
426 s.adjust_positions(char_delta);
427 }
428 }
429
430 all_refs.extend(cached_para.collected_refs.clone());
431
432 new_cached.push(CachedParagraph {
433 id: cached_para.id.clone(),
434 source_hash,
435 byte_range: byte_range.clone(),
436 char_range: char_range.clone(),
437 html: cached_para.html.clone(),
438 offset_map: offset_map.clone(),
439 syntax_spans: syntax_spans.clone(),
440 collected_refs: cached_para.collected_refs.clone(),
441 });
442
443 paragraphs.push(ParagraphRender {
444 id: cached_para.id.clone(),
445 byte_range,
446 char_range,
447 html: cached_para.html.clone(),
448 offset_map,
449 syntax_spans,
450 source_hash,
451 });
452 }
453 }
454
455 let new_cache = RenderCache {
456 paragraphs: new_cached,
457 next_node_id: 0,
458 next_syn_id: 0,
459 next_para_id: cache.next_para_id,
460 };
461
462 return IncrementalRenderResult {
463 paragraphs,
464 cache: new_cache,
465 collected_refs: all_refs,
466 };
467 }
468
469 // ============ SLOW PATH ============
470 // Partial render: reuse cached paragraphs before edit, parse from affected to end.
471
472 let (reused_paragraphs, parse_start_byte, parse_start_char) =
473 if let (Some(c), Some(e)) = (cache, edit) {
474 let edit_pos = e.edit_char_pos;
475 let affected_idx = c
476 .paragraphs
477 .iter()
478 .position(|p| p.char_range.end >= edit_pos);
479
480 if let Some(mut idx) = affected_idx {
481 const BOUNDARY_SLOP: usize = 3;
482 let para_start = c.paragraphs[idx].char_range.start;
483 if idx > 0 && edit_pos < para_start + BOUNDARY_SLOP {
484 idx -= 1;
485 }
486
487 if idx > 0 {
488 let reused: Vec<_> = c.paragraphs[..idx].to_vec();
489 let last_reused = &c.paragraphs[idx - 1];
490 tracing::trace!(
491 reused_count = idx,
492 parse_start_byte = last_reused.byte_range.end,
493 parse_start_char = last_reused.char_range.end,
494 "slow path: partial parse from affected paragraph"
495 );
496 (
497 reused,
498 last_reused.byte_range.end,
499 last_reused.char_range.end,
500 )
501 } else {
502 (Vec::new(), 0, 0)
503 }
504 } else {
505 if let Some(last) = c.paragraphs.last() {
506 let reused = c.paragraphs.clone();
507 (reused, last.byte_range.end, last.char_range.end)
508 } else {
509 (Vec::new(), 0, 0)
510 }
511 }
512 } else {
513 (Vec::new(), 0, 0)
514 };
515
516 let parse_slice = &source[parse_start_byte..];
517 let parser =
518 Parser::new_ext(parse_slice, weaver_renderer::default_md_options()).into_offset_iter();
519
520 let resolver = image_resolver.cloned().unwrap_or_default();
521 let slice_rope = EditorRope::from(parse_slice);
522
523 let reused_count = reused_paragraphs.len();
524 let parsed_para_id_start = if reused_count == 0 {
525 0
526 } else {
527 cache.map(|c| c.next_para_id).unwrap_or(0)
528 };
529
530 tracing::trace!(
531 parsed_para_id_start,
532 reused_count,
533 "slow path: paragraph ID allocation"
534 );
535
536 let cursor_para_override: Option<(usize, SmolStr)> = cache.and_then(|c| {
537 let cached_cursor_idx = c.paragraphs.iter().position(|p| {
538 p.char_range.start <= cursor_offset && cursor_offset <= p.char_range.end
539 })?;
540
541 if cached_cursor_idx < reused_count {
542 return None;
543 }
544
545 let cached_para = &c.paragraphs[cached_cursor_idx];
546 let parsed_index = cached_cursor_idx - reused_count;
547
548 tracing::trace!(
549 cached_cursor_idx,
550 reused_count,
551 parsed_index,
552 cached_id = %cached_para.id,
553 "slow path: cursor paragraph override"
554 );
555
556 Some((parsed_index, cached_para.id.clone()))
557 });
558
559 let mut writer = EditorWriter::<_, _, &E, &I, ()>::new(parse_slice, &slice_rope, parser)
560 .with_auto_incrementing_prefix(parsed_para_id_start)
561 .with_image_resolver(&resolver)
562 .with_embed_provider(embed_provider);
563
564 if let Some((idx, ref prefix)) = cursor_para_override {
565 writer = writer.with_static_prefix_at_index(idx, prefix);
566 }
567
568 if let Some(idx) = entry_index {
569 writer = writer.with_entry_index(idx);
570 }
571
572 let writer_result = match writer.run() {
573 Ok(result) => result,
574 Err(_) => {
575 return IncrementalRenderResult {
576 paragraphs: Vec::new(),
577 cache: RenderCache::default(),
578 collected_refs: vec![],
579 }
580 }
581 };
582
583 let parsed_para_count = writer_result.paragraph_ranges.len();
584
585 let parsed_paragraph_ranges: Vec<_> = writer_result
586 .paragraph_ranges
587 .iter()
588 .map(|(byte_range, char_range)| {
589 (
590 (byte_range.start + parse_start_byte)..(byte_range.end + parse_start_byte),
591 (char_range.start + parse_start_char)..(char_range.end + parse_start_char),
592 )
593 })
594 .collect();
595
596 let paragraph_ranges: Vec<_> = reused_paragraphs
597 .iter()
598 .map(|p| (p.byte_range.clone(), p.char_range.clone()))
599 .chain(parsed_paragraph_ranges.clone())
600 .collect();
601
602 if tracing::enabled!(tracing::Level::TRACE) {
603 for (i, (byte_range, char_range)) in paragraph_ranges.iter().enumerate() {
604 let preview: String = text
605 .slice(char_range.clone())
606 .map(|s| s.chars().take(30).collect())
607 .unwrap_or_default();
608 tracing::trace!(
609 target: "weaver::render",
610 para_idx = i,
611 char_range = ?char_range,
612 byte_range = ?byte_range,
613 preview = %preview,
614 "paragraph boundary"
615 );
616 }
617 }
618
619 let mut paragraphs = Vec::with_capacity(paragraph_ranges.len());
620 let mut new_cached = Vec::with_capacity(paragraph_ranges.len());
621 let mut all_refs: Vec<ExtractedRef> = Vec::new();
622 let next_para_id = parsed_para_id_start + parsed_para_count;
623 let reused_count = reused_paragraphs.len();
624
625 let cursor_para_idx = paragraph_ranges.iter().position(|(_, char_range)| {
626 char_range.start <= cursor_offset && cursor_offset <= char_range.end
627 });
628
629 tracing::trace!(
630 cursor_offset,
631 ?cursor_para_idx,
632 edit_char_pos = ?edit.map(|e| e.edit_char_pos),
633 reused_count,
634 parsed_count = parsed_paragraph_ranges.len(),
635 "ID assignment: cursor and edit info"
636 );
637
638 for (idx, (byte_range, char_range)) in paragraph_ranges.iter().enumerate() {
639 let para_source = text
640 .slice(char_range.clone())
641 .map(|s| s.to_string())
642 .unwrap_or_default();
643 let source_hash = hash_source(¶_source);
644 let is_cursor_para = Some(idx) == cursor_para_idx;
645
646 let is_reused = idx < reused_count;
647
648 let para_id = if is_reused {
649 reused_paragraphs[idx].id.clone()
650 } else {
651 let parsed_idx = idx - reused_count;
652
653 let id = if let Some((override_idx, ref override_prefix)) = cursor_para_override {
654 if parsed_idx == override_idx {
655 override_prefix.clone()
656 } else {
657 make_paragraph_id(parsed_para_id_start + parsed_idx)
658 }
659 } else {
660 make_paragraph_id(parsed_para_id_start + parsed_idx)
661 };
662
663 if idx < 3 || is_cursor_para {
664 tracing::trace!(
665 idx,
666 parsed_idx,
667 is_cursor_para,
668 para_id = %id,
669 "slow path: assigned paragraph ID"
670 );
671 }
672
673 id
674 };
675
676 let (html, offset_map, syntax_spans, para_refs) = if is_reused {
677 let reused = &reused_paragraphs[idx];
678 (
679 reused.html.clone(),
680 reused.offset_map.clone(),
681 reused.syntax_spans.clone(),
682 reused.collected_refs.clone(),
683 )
684 } else {
685 let parsed_idx = idx - reused_count;
686 let html = writer_result
687 .html_segments
688 .get(parsed_idx)
689 .cloned()
690 .unwrap_or_default();
691
692 let mut offset_map = writer_result
693 .offset_maps_by_paragraph
694 .get(parsed_idx)
695 .cloned()
696 .unwrap_or_default();
697 for m in &mut offset_map {
698 m.char_range.start += parse_start_char;
699 m.char_range.end += parse_start_char;
700 m.byte_range.start += parse_start_byte;
701 m.byte_range.end += parse_start_byte;
702 }
703
704 let mut syntax_spans = writer_result
705 .syntax_spans_by_paragraph
706 .get(parsed_idx)
707 .cloned()
708 .unwrap_or_default();
709 for s in &mut syntax_spans {
710 s.adjust_positions(parse_start_char as isize);
711 }
712
713 let para_refs = writer_result
714 .collected_refs_by_paragraph
715 .get(parsed_idx)
716 .cloned()
717 .unwrap_or_default();
718 (html, offset_map, syntax_spans, para_refs)
719 };
720
721 all_refs.extend(para_refs.clone());
722
723 new_cached.push(CachedParagraph {
724 id: para_id.clone(),
725 source_hash,
726 byte_range: byte_range.clone(),
727 char_range: char_range.clone(),
728 html: html.clone(),
729 offset_map: offset_map.clone(),
730 syntax_spans: syntax_spans.clone(),
731 collected_refs: para_refs.clone(),
732 });
733
734 paragraphs.push(ParagraphRender {
735 id: para_id,
736 byte_range: byte_range.clone(),
737 char_range: char_range.clone(),
738 html,
739 offset_map,
740 syntax_spans,
741 source_hash,
742 });
743 }
744
745 let new_cache = RenderCache {
746 paragraphs: new_cached,
747 next_node_id: 0,
748 next_syn_id: 0,
749 next_para_id,
750 };
751
752 IncrementalRenderResult {
753 paragraphs,
754 cache: new_cache,
755 collected_refs: all_refs,
756 }
757}