···56use dioxus::prelude::*;
708use super::document::{EditorDocument, Selection};
9use super::offset_map::{SnapDirection, find_nearest_valid_position, is_valid_cursor_position};
10use super::paragraph::ParagraphRender;
···9091 match (anchor_rope, focus_rope) {
92 (Some(anchor), Some(focus)) => {
0000000000093 doc.cursor.write().offset = focus;
94 if anchor != focus {
95 doc.selection.set(Some(Selection {
···144 .or_else(|| element.get_attribute("data-node-id"));
145146 if let Some(id) = id {
147- if id.starts_with('n') && id[1..].parse::<usize>().is_ok() {
00148 break Some(id);
149 }
150 }
···206 }
207 }
20800000000209 for para in paragraphs {
210 for mapping in ¶.offset_map {
211 if mapping.node_id == node_id {
212 let mapping_start = mapping.char_offset_in_node;
213 let mapping_end = mapping.char_offset_in_node + mapping.utf16_len;
214000000000215 if utf16_offset_in_container >= mapping_start
216 && utf16_offset_in_container <= mapping_end
217 {
···267) {
268}
269270-/// Update paragraph DOM elements incrementally.
271///
272-/// Only modifies paragraphs that changed (by comparing source_hash).
273-/// Browser preserves cursor naturally in unchanged paragraphs.
000274///
275/// Returns true if the paragraph containing the cursor was updated.
276#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
···281 cursor_offset: usize,
282 force: bool,
283) -> bool {
0284 use wasm_bindgen::JsCast;
285286 let window = match web_sys::window() {
···298 None => return false,
299 };
300301- // Find which paragraph contains cursor
302- // Use end-inclusive matching: cursor at position N belongs to paragraph (0..N)
303- // This handles typing at end of paragraph, which is the common case
304- // The empty paragraph at document end catches any trailing cursor positions
305- let cursor_para_idx = new_paragraphs
306 .iter()
307- .position(|p| p.char_range.start <= cursor_offset && cursor_offset <= p.char_range.end);
00000000000000000000000000308309- let mut cursor_para_updated = false;
000310311- for (idx, new_para) in new_paragraphs.iter().enumerate() {
312- let para_id = format!("para-{}", idx);
00000000000000000000000000000000000313314- if let Some(old_para) = old_paragraphs.get(idx) {
315- if force || new_para.source_hash != old_para.source_hash {
316- // Changed - clear and update innerHTML
317- // We clear first to ensure any browser-added content (from IME composition,
318- // contenteditable quirks, etc.) is fully removed before setting new content
319- if let Some(elem) = document.get_element_by_id(¶_id) {
320- if force && cursor_para_idx.is_some() {
321- // skip re-rendering where the cursor is if we're forcing it
322- // we don't want to fuck up what the user is doing
0000000000000323 } else {
324- elem.set_text_content(None); // Clear completely
325- elem.set_inner_html(&new_para.html);
000000000000000000000000000000000000326 }
327- }
328329- if !force {
330- if Some(idx) == cursor_para_idx {
000000000331 cursor_para_updated = true;
332 }
333 }
334 }
335 } else {
0336 if let Ok(div) = document.create_element("div") {
337- div.set_id(¶_id);
338 div.set_inner_html(&new_para.html);
339- let _ = editor.append_child(&div);
00340 }
341342- if Some(idx) == cursor_para_idx {
343 cursor_para_updated = true;
344 }
345 }
346 }
347348- // Remove extra paragraphs if document got shorter
349- // Also mark cursor as needing restoration since structure changed
350- if new_paragraphs.len() < old_paragraphs.len() {
351- cursor_para_updated = true;
352- }
353- // TODO: i think this is the cause of a number of bits of cursor jank
354- for idx in new_paragraphs.len()..old_paragraphs.len() {
355- let para_id = format!("para-{}", idx);
356- if let Some(elem) = document.get_element_by_id(¶_id) {
357- let _ = elem.remove();
358- }
359 }
360361 cursor_para_updated
···56use dioxus::prelude::*;
78+use super::cursor::restore_cursor_position;
9use super::document::{EditorDocument, Selection};
10use super::offset_map::{SnapDirection, find_nearest_valid_position, is_valid_cursor_position};
11use super::paragraph::ParagraphRender;
···9192 match (anchor_rope, focus_rope) {
93 (Some(anchor), Some(focus)) => {
94+ let old_offset = doc.cursor.read().offset;
95+ // Warn if cursor is jumping a large distance - likely a bug
96+ let jump = if focus > old_offset { focus - old_offset } else { old_offset - focus };
97+ if jump > 100 {
98+ tracing::warn!(
99+ old_offset,
100+ new_offset = focus,
101+ jump,
102+ "sync_cursor_from_dom: LARGE CURSOR JUMP detected"
103+ );
104+ }
105 doc.cursor.write().offset = focus;
106 if anchor != focus {
107 doc.selection.set(Some(Selection {
···156 .or_else(|| element.get_attribute("data-node-id"));
157158 if let Some(id) = id {
159+ // Match both old-style "n0" and paragraph-prefixed "p-2-n0" node IDs
160+ let is_node_id = id.starts_with('n') || id.contains("-n");
161+ if is_node_id {
162 break Some(id);
163 }
164 }
···220 }
221 }
222223+ // Log what we're looking for
224+ tracing::trace!(
225+ node_id = %node_id,
226+ utf16_offset = utf16_offset_in_container,
227+ num_paragraphs = paragraphs.len(),
228+ "dom_position_to_text_offset: looking up mapping"
229+ );
230+231 for para in paragraphs {
232 for mapping in ¶.offset_map {
233 if mapping.node_id == node_id {
234 let mapping_start = mapping.char_offset_in_node;
235 let mapping_end = mapping.char_offset_in_node + mapping.utf16_len;
236237+ tracing::trace!(
238+ mapping_node_id = %mapping.node_id,
239+ mapping_start,
240+ mapping_end,
241+ char_range_start = mapping.char_range.start,
242+ char_range_end = mapping.char_range.end,
243+ "dom_position_to_text_offset: found matching node_id"
244+ );
245+246 if utf16_offset_in_container >= mapping_start
247 && utf16_offset_in_container <= mapping_end
248 {
···298) {
299}
300301+/// Update paragraph DOM elements incrementally using pool-based surgical diffing.
302///
303+/// Uses stable content-based paragraph IDs for efficient DOM reconciliation:
304+/// - Unchanged paragraphs (same ID + hash) are not touched
305+/// - Changed paragraphs (same ID, different hash) get innerHTML updated
306+/// - New paragraphs get created and inserted at correct position
307+/// - Removed paragraphs get deleted
308///
309/// Returns true if the paragraph containing the cursor was updated.
310#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
···315 cursor_offset: usize,
316 force: bool,
317) -> bool {
318+ use std::collections::HashMap;
319 use wasm_bindgen::JsCast;
320321 let window = match web_sys::window() {
···333 None => return false,
334 };
335336+ let mut cursor_para_updated = false;
337+338+ // Build lookup for old paragraphs by ID (for syntax span comparison)
339+ let old_para_map: HashMap<&str, &ParagraphRender> = old_paragraphs
0340 .iter()
341+ .map(|p| (p.id.as_str(), p))
342+ .collect();
343+344+ // Build pool of existing DOM elements by ID
345+ let mut old_elements: HashMap<String, web_sys::Element> = HashMap::new();
346+ let mut child_opt = editor.first_element_child();
347+ while let Some(child) = child_opt {
348+ if let Some(id) = child.get_attribute("id") {
349+ let next = child.next_element_sibling();
350+ old_elements.insert(id, child);
351+ child_opt = next;
352+ } else {
353+ child_opt = child.next_element_sibling();
354+ }
355+ }
356+357+ // Track position for insertBefore - starts at first element child
358+ // (use first_element_child to skip any stray text nodes)
359+ let mut cursor_node: Option<web_sys::Node> =
360+ editor.first_element_child().map(|e| e.into());
361+362+ // Single pass through new paragraphs
363+ for new_para in new_paragraphs.iter() {
364+ let para_id = &new_para.id;
365+ let new_hash = format!("{:x}", new_para.source_hash);
366+ let is_cursor_para =
367+ new_para.char_range.start <= cursor_offset && cursor_offset <= new_para.char_range.end;
368369+ if let Some(existing_elem) = old_elements.remove(para_id) {
370+ // Element exists - check if it needs updating
371+ let old_hash = existing_elem.get_attribute("data-hash").unwrap_or_default();
372+ let needs_update = force || old_hash != new_hash;
373374+ // Check if element is at correct position (compare as nodes)
375+ let existing_as_node: &web_sys::Node = existing_elem.as_ref();
376+ let at_correct_position = cursor_node
377+ .as_ref()
378+ .map(|c| c == existing_as_node)
379+ .unwrap_or(false);
380+381+ if !at_correct_position {
382+ tracing::warn!(
383+ para_id,
384+ is_cursor_para,
385+ "update_paragraph_dom: element not at correct position, moving"
386+ );
387+ let _ = editor.insert_before(existing_as_node, cursor_node.as_ref());
388+ if is_cursor_para {
389+ cursor_para_updated = true;
390+ }
391+ } else {
392+ // Use next_element_sibling to skip any stray text nodes
393+ cursor_node = existing_elem.next_element_sibling().map(|e| e.into());
394+ }
395+396+ if needs_update {
397+ // TESTING: Force innerHTML update to measure timing cost
398+ // TODO: Remove this flag after benchmarking
399+ const FORCE_INNERHTML_UPDATE: bool = true;
400+401+ // For cursor paragraph: only update if syntax/formatting changed
402+ // This prevents destroying browser selection during fast typing
403+ //
404+ // HOWEVER: we must verify browser actually updated the DOM.
405+ // PassThrough assumes browser handles edit, but sometimes it doesn't.
406+ let should_skip_cursor_update = !FORCE_INNERHTML_UPDATE && is_cursor_para && !force && {
407+ let old_para = old_para_map.get(para_id.as_str());
408+ let syntax_unchanged = old_para
409+ .map(|old| old.syntax_spans == new_para.syntax_spans)
410+ .unwrap_or(false);
411412+ // Verify DOM content length matches expected - if not, browser didn't handle it
413+ // NOTE: Get inner element (the <p>) not outer div, to avoid counting
414+ // the newline from </p>\n in the HTML
415+ let dom_matches_expected = if syntax_unchanged {
416+ let inner_elem = existing_elem.first_element_child();
417+ let dom_text = inner_elem
418+ .as_ref()
419+ .and_then(|e| e.text_content())
420+ .unwrap_or_default();
421+ let expected_len = new_para.byte_range.end - new_para.byte_range.start;
422+ let dom_len = dom_text.len();
423+ let matches = dom_len == expected_len;
424+ // Always log for debugging
425+ tracing::debug!(
426+ para_id = %para_id,
427+ dom_len,
428+ expected_len,
429+ matches,
430+ dom_text = %dom_text,
431+ "DOM sync check"
432+ );
433+ matches
434 } else {
435+ false
436+ };
437+438+ syntax_unchanged && dom_matches_expected
439+ };
440+441+ if should_skip_cursor_update {
442+ tracing::trace!(
443+ para_id,
444+ "update_paragraph_dom: skipping cursor para innerHTML (syntax unchanged, DOM verified)"
445+ );
446+ // Update hash - browser native editing has the correct content
447+ let _ = existing_elem.set_attribute("data-hash", &new_hash);
448+ } else {
449+ // Timing instrumentation for innerHTML update cost
450+ let start = web_sys::window()
451+ .and_then(|w| w.performance())
452+ .map(|p| p.now());
453+454+ existing_elem.set_inner_html(&new_para.html);
455+ let _ = existing_elem.set_attribute("data-hash", &new_hash);
456+457+ if let Some(start_time) = start {
458+ if let Some(end_time) = web_sys::window()
459+ .and_then(|w| w.performance())
460+ .map(|p| p.now())
461+ {
462+ let elapsed_ms = end_time - start_time;
463+ tracing::debug!(
464+ para_id,
465+ is_cursor_para,
466+ elapsed_ms,
467+ html_len = new_para.html.len(),
468+ old_hash = %old_hash,
469+ new_hash = %new_hash,
470+ "update_paragraph_dom: innerHTML update timing"
471+ );
472+ }
473 }
0474475+ if is_cursor_para {
476+ // Restore cursor synchronously - don't wait for rAF
477+ // This prevents race conditions with fast typing
478+ if let Err(e) = restore_cursor_position(
479+ cursor_offset,
480+ &new_para.offset_map,
481+ editor_id,
482+ None,
483+ ) {
484+ tracing::warn!("Synchronous cursor restore failed: {:?}", e);
485+ }
486 cursor_para_updated = true;
487 }
488 }
489 }
490 } else {
491+ // New element - create and insert at current position
492 if let Ok(div) = document.create_element("div") {
493+ div.set_id(para_id);
494 div.set_inner_html(&new_para.html);
495+ let _ = div.set_attribute("data-hash", &new_hash);
496+ let div_node: &web_sys::Node = div.as_ref();
497+ let _ = editor.insert_before(div_node, cursor_node.as_ref());
498 }
499500+ if is_cursor_para {
501 cursor_para_updated = true;
502 }
503 }
504 }
505506+ // Remove stale elements (still in pool = not in new paragraphs)
507+ for (_, elem) in old_elements {
508+ let _ = elem.remove();
509+ cursor_para_updated = true; // Structure changed, cursor may need restoration
0000000510 }
511512 cursor_para_updated
···11/// A rendered paragraph with its source range and offset mappings.
12#[derive(Debug, Clone, PartialEq)]
13pub struct ParagraphRender {
00014 /// Source byte range in the rope
15 pub byte_range: Range<usize>,
16···38 let mut hasher = DefaultHasher::new();
39 text.hash(&mut hasher);
40 hasher.finish()
00000041}
4243/// Extract substring from LoroText as String
···11/// A rendered paragraph with its source range and offset mappings.
12#[derive(Debug, Clone, PartialEq)]
13pub struct ParagraphRender {
14+ /// Stable content-based ID for DOM diffing (format: `p-{hash_prefix}-{collision_idx}`)
15+ pub id: String,
16+17 /// Source byte range in the rope
18 pub byte_range: Range<usize>,
19···41 let mut hasher = DefaultHasher::new();
42 text.hash(&mut hasher);
43 hasher.finish()
44+}
45+46+/// Generate a paragraph ID from monotonic counter.
47+/// IDs are stable across content changes - only position/cursor determines identity.
48+pub fn make_paragraph_id(id: usize) -> String {
49+ format!("p-{}", id)
50}
5152/// Extract substring from LoroText as String
+395-232
crates/weaver-app/src/components/editor/render.rs
···67use super::document::EditInfo;
8use super::offset_map::{OffsetMapping, RenderResult};
9-use super::paragraph::{ParagraphRender, hash_source, text_slice_to_string};
10use super::writer::{EditorImageResolver, EditorWriter, ImageResolver, SyntaxSpanInfo};
11use loro::LoroText;
12use markdown_weaver::Parser;
013use std::ops::Range;
14use weaver_common::{EntryIndex, ResolvedContent};
15···23 pub next_node_id: usize,
24 /// Next available syntax span ID for fresh renders
25 pub next_syn_id: usize,
0026}
2728/// A cached paragraph render that can be reused if source hasn't changed.
29#[derive(Clone, Debug)]
30pub struct CachedParagraph {
0031 /// Hash of paragraph source text for change detection
32 pub source_hash: u64,
33 /// Byte range in source document
···72 }
73}
7400000000000000000000000000000000000000000000000000000000000000000000000000000075/// Render markdown with incremental caching.
76///
77/// Uses cached paragraph renders when possible, only re-rendering changed paragraphs.
78-/// For "safe" edits (no boundary changes), skips boundary rediscovery entirely.
79///
80/// # Parameters
00081/// - `entry_index`: Optional index for wikilink validation (adds link-valid/link-broken classes)
82/// - `resolved_content`: Pre-resolved embed content for sync rendering
83///
···86pub fn render_paragraphs_incremental(
87 text: &LoroText,
88 cache: Option<&RenderCache>,
089 edit: Option<&EditInfo>,
90 image_resolver: Option<&EditorImageResolver>,
91 entry_index: Option<&EntryIndex>,
···102 if source.is_empty() {
103 let empty_node_id = "n0".to_string();
104 let empty_html = format!(r#"<span id="{}">{}</span>"#, empty_node_id, '\u{200B}');
0105106 let para = ParagraphRender {
0107 byte_range: 0..0,
108 char_range: 0..0,
109 html: empty_html.clone(),
···114115 let new_cache = RenderCache {
116 paragraphs: vec![CachedParagraph {
0117 source_hash: 0,
118 byte_range: 0..0,
119 char_range: 0..0,
···124 }],
125 next_node_id: 1,
126 next_syn_id: 0,
0127 };
128129 return (vec![para], new_cache, vec![]);
···132 // Determine if we can use fast path (skip boundary discovery)
133 // Need cache and non-boundary-affecting edit info (for edit position)
134 let current_len = text.len_unicode();
0135136 let use_fast_path = cache.is_some() && edit.is_some() && !is_boundary_affecting(edit.unwrap());
137···156157 // Compute delta from actual length difference, not edit info
158 // This handles stale edits gracefully (delta = 0 if lengths match)
159- let cached_len = cache
160 .paragraphs
161 .last()
162- .map(|p| p.char_range.end)
163- .unwrap_or(0);
164 let char_delta = current_len as isize - cached_len as isize;
0165166 // Adjust each cached paragraph's range
167 cache
···173 (p.byte_range.clone(), p.char_range.clone())
174 } else if p.char_range.start > edit_pos {
175 // After edit - shift by delta (edit is strictly before this paragraph)
176- // Calculate byte delta (approximation: assume 1 byte per char for ASCII)
177- // This is imprecise but boundaries are rediscovered on slow path anyway
178- let byte_delta = char_delta; // TODO: proper byte calculation
179 (
180 apply_delta(p.byte_range.start, byte_delta)
181 ..apply_delta(p.byte_range.end, byte_delta),
···185 } else {
186 // Edit is at or within this paragraph - expand its end
187 (
188- p.byte_range.start..apply_delta(p.byte_range.end, char_delta),
189 p.char_range.start..apply_delta(p.char_range.end, char_delta),
190 )
191 }
···196 };
197198 // Validate fast path results - if any ranges are invalid, use slow path
199- let paragraph_ranges = if !paragraph_ranges.is_empty() {
200 let all_valid = paragraph_ranges
201 .iter()
202 .all(|(_, char_range)| char_range.start <= char_range.end);
203- if all_valid {
204- paragraph_ranges
205- } else {
206 tracing::debug!(
207 target: "weaver::render",
208 "fast path produced invalid ranges, falling back to slow path"
209 );
210- vec![] // Trigger slow path
00211 }
212 } else {
213- paragraph_ranges
214 };
215216- // Slow path: run boundary-only pass to discover paragraph boundaries
217- let paragraph_ranges = if paragraph_ranges.is_empty() {
218- let boundary_start = crate::perf::now();
219- let parser =
220- Parser::new_ext(&source, weaver_renderer::default_md_options()).into_offset_iter();
221- let mut scratch_output = String::new();
0000000000000000222223- let result = match EditorWriter::<_, _, ()>::new_boundary_only(
224- &source,
225- text,
226- parser,
227- &mut scratch_output,
228- )
229- .run()
230- {
231- Ok(result) => result.paragraph_ranges,
232- Err(_) => return (Vec::new(), RenderCache::default(), vec![]),
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000233 };
234- let boundary_ms = crate::perf::now() - boundary_start;
235- tracing::debug!(boundary_ms, paragraphs = result.len(), "boundary discovery (slow path)");
236- result
237- } else {
238- paragraph_ranges
00000000000000000000000000000000239 };
0000240241 // Log discovered paragraphs
242 for (i, (byte_range, char_range)) in paragraph_ranges.iter().enumerate() {
···254 );
255 }
256257- // Render paragraphs, reusing cache where possible
258- let render_loop_start = crate::perf::now();
259 let mut paragraphs = Vec::with_capacity(paragraph_ranges.len());
260 let mut new_cached = Vec::with_capacity(paragraph_ranges.len());
261 let mut all_refs: Vec<weaver_common::ExtractedRef> = Vec::new();
262- let mut node_id_offset = cache.map(|c| c.next_node_id).unwrap_or(0);
263- let mut syn_id_offset = cache.map(|c| c.next_syn_id).unwrap_or(0);
264- let mut cache_hits = 0usize;
265- let mut cache_misses = 0usize;
266- let mut fresh_render_ms = 0.0f64;
0000000000000267268 for (idx, (byte_range, char_range)) in paragraph_ranges.iter().enumerate() {
269 let para_source = text_slice_to_string(text, char_range.clone());
270 let source_hash = hash_source(¶_source);
0271272- // Check if we have a cached render with matching hash
273- let cached_match =
274- cache.and_then(|c| c.paragraphs.iter().find(|p| p.source_hash == source_hash));
000000000000275276- let (html, offset_map, syntax_spans, para_refs) = if let Some(cached) = cached_match {
277- cache_hits += 1;
278- // Reuse cached HTML, offset map, and syntax spans (adjusted for position)
279- let char_delta = char_range.start as isize - cached.char_range.start as isize;
280- let byte_delta = byte_range.start as isize - cached.byte_range.start as isize;
281-282- let mut adjusted_map = cached.offset_map.clone();
283- for mapping in &mut adjusted_map {
284- mapping.char_range.start =
285- (mapping.char_range.start as isize + char_delta) as usize;
286- mapping.char_range.end = (mapping.char_range.end as isize + char_delta) as usize;
287- mapping.byte_range.start =
288- (mapping.byte_range.start as isize + byte_delta) as usize;
289- mapping.byte_range.end = (mapping.byte_range.end as isize + byte_delta) as usize;
290 }
291-292- let mut adjusted_syntax = cached.syntax_spans.clone();
293- for span in &mut adjusted_syntax {
294- span.adjust_positions(char_delta);
295- }
296-297- // Include cached refs in all_refs
298- all_refs.extend(cached.collected_refs.clone());
299-300- (
301- cached.html.clone(),
302- adjusted_map,
303- adjusted_syntax,
304- cached.collected_refs.clone(),
305- )
306 } else {
307- cache_misses += 1;
308- let para_render_start = crate::perf::now();
309- // Fresh render needed - create detached LoroDoc for this paragraph
310- let para_doc = loro::LoroDoc::new();
311- let para_text = para_doc.get_text("content");
312- let _ = para_text.insert(0, ¶_source);
0000313314- let parser = Parser::new_ext(¶_source, weaver_renderer::default_md_options())
315- .into_offset_iter();
316- let mut output = String::new();
00317318- // Use provided resolver or empty default
319- let resolver = image_resolver.cloned().unwrap_or_default();
320-321- // Build writer with optional entry index for wikilink validation
322- // Pass paragraph's document-level offsets so all embedded char/byte positions are absolute
323- let mut writer =
324- EditorWriter::<_, _, &ResolvedContent, &EditorImageResolver>::new_with_all_offsets(
325- ¶_source,
326- ¶_text,
327- parser,
328- &mut output,
329- node_id_offset,
330- syn_id_offset,
331- char_range.start,
332- byte_range.start,
333- )
334- .with_image_resolver(&resolver)
335- .with_embed_provider(resolved_content);
336-337- if let Some(idx) = entry_index {
338- writer = writer.with_entry_index(idx);
339- }
340-341- let (mut offset_map, mut syntax_spans, para_refs) = match writer.run() {
342- Ok(result) => {
343- // Update node ID offset
344- let max_node_id = result
345- .offset_maps
346- .iter()
347- .filter_map(|m| {
348- m.node_id
349- .strip_prefix("n")
350- .and_then(|s| s.parse::<usize>().ok())
351- })
352- .max()
353- .unwrap_or(node_id_offset);
354- node_id_offset = max_node_id + 1;
355-356- // Update syn ID offset
357- let max_syn_id = result
358- .syntax_spans
359- .iter()
360- .filter_map(|s| {
361- s.syn_id
362- .strip_prefix("s")
363- .and_then(|id| id.parse::<usize>().ok())
364- })
365- .max()
366- .unwrap_or(syn_id_offset.saturating_sub(1));
367- syn_id_offset = max_syn_id + 1;
368-369- // Collect refs from this paragraph
370- let para_refs = result.collected_refs;
371- all_refs.extend(para_refs.clone());
372-373- (result.offset_maps, result.syntax_spans, para_refs)
374- }
375- Err(_) => (Vec::new(), Vec::new(), Vec::new()),
376- };
377-378- // Offsets are already document-absolute since we pass char_range.start/byte_range.start
379- // to the writer constructor
380- fresh_render_ms += crate::perf::now() - para_render_start;
381- (output, offset_map, syntax_spans, para_refs)
382- };
383384 // Store in cache
385 new_cached.push(CachedParagraph {
0386 source_hash,
387 byte_range: byte_range.clone(),
388 char_range: char_range.clone(),
389 html: html.clone(),
390 offset_map: offset_map.clone(),
391 syntax_spans: syntax_spans.clone(),
392- collected_refs: para_refs,
393 });
394395 paragraphs.push(ParagraphRender {
0396 byte_range: byte_range.clone(),
397 char_range: char_range.clone(),
398 html,
···402 });
403 }
404405- // Insert gap paragraphs for EXTRA whitespace between blocks.
406- // Standard paragraph break is 2 newlines (\n\n) - no gap needed for that.
407- // Gaps are only for whitespace BEYOND the minimum, giving cursor a landing spot.
408- const MIN_PARAGRAPH_BREAK_INCR: usize = 2; // \n\n
409-410- let mut paragraphs_with_gaps = Vec::with_capacity(paragraphs.len() * 2);
411- let mut prev_end_char = 0usize;
412- let mut prev_end_byte = 0usize;
413-414- for para in paragraphs {
415- // Check for gap before this paragraph - only if MORE than minimum break
416- let gap_size = para.char_range.start.saturating_sub(prev_end_char);
417- if gap_size > MIN_PARAGRAPH_BREAK_INCR {
418- // Visible gap element covers EXTRA whitespace beyond minimum break
419- let gap_start_char = prev_end_char + MIN_PARAGRAPH_BREAK_INCR;
420- let gap_end_char = para.char_range.start;
421- let gap_start_byte = prev_end_byte + MIN_PARAGRAPH_BREAK_INCR;
422- let gap_end_byte = para.byte_range.start;
423-424- // Position-based ID: deterministic, stable across cache states
425- let gap_node_id = format!("gap-{}-{}", gap_start_char, gap_end_char);
426- let gap_html = format!(r#"<span id="{}">{}</span>"#, gap_node_id, '\u{200B}');
427-428- // Gap paragraph covers ALL whitespace (like trailing gaps do)
429- // so cursor anywhere in the inter-paragraph zone triggers restoration
430- paragraphs_with_gaps.push(ParagraphRender {
431- byte_range: prev_end_byte..gap_end_byte,
432- char_range: prev_end_char..gap_end_char,
433- html: gap_html,
434- offset_map: vec![OffsetMapping {
435- byte_range: prev_end_byte..gap_end_byte,
436- char_range: prev_end_char..gap_end_char,
437- node_id: gap_node_id,
438- char_offset_in_node: 0,
439- child_index: None,
440- utf16_len: 1,
441- }],
442- syntax_spans: vec![],
443- source_hash: hash_source(&text_slice_to_string(text, gap_start_char..gap_end_char)),
444- });
445- }
446-447- prev_end_char = para.char_range.end;
448- prev_end_byte = para.byte_range.end;
449- paragraphs_with_gaps.push(para);
450- }
451-452- // Add trailing gap if needed
453- let has_trailing_newlines = source.ends_with("\n\n") || source.ends_with("\n");
454- if has_trailing_newlines {
455- let doc_end_char = text.len_unicode();
456- let doc_end_byte = text.len_utf8();
457-458- if doc_end_char > prev_end_char {
459- // Position-based ID for trailing gap
460- let trailing_node_id = format!("gap-{}-{}", prev_end_char, doc_end_char);
461- let trailing_html = format!(r#"<span id="{}">{}</span>"#, trailing_node_id, '\u{200B}');
462463- paragraphs_with_gaps.push(ParagraphRender {
464- byte_range: prev_end_byte..doc_end_byte,
465- char_range: prev_end_char..doc_end_char,
466- html: trailing_html,
467- offset_map: vec![OffsetMapping {
468- byte_range: prev_end_byte..doc_end_byte,
469- char_range: prev_end_char..doc_end_char,
470- node_id: trailing_node_id,
471- char_offset_in_node: 0,
472- child_index: None,
473- utf16_len: 1,
474- }],
475- syntax_spans: vec![],
476- source_hash: 0,
477- });
478- }
479- }
480481 let new_cache = RenderCache {
482 paragraphs: new_cached,
483- next_node_id: node_id_offset,
484- next_syn_id: syn_id_offset,
0485 };
486487- let render_loop_ms = crate::perf::now() - render_loop_start;
488 let total_ms = crate::perf::now() - fn_start;
489 tracing::debug!(
490 total_ms,
491- render_loop_ms,
492- fresh_render_ms,
493- cache_hits,
494- cache_misses,
495 paragraphs = paragraphs_with_gaps.len(),
496- use_fast_path,
497 "render_paragraphs_incremental timing"
498 );
499
···67use super::document::EditInfo;
8use super::offset_map::{OffsetMapping, RenderResult};
9+use super::paragraph::{ParagraphRender, hash_source, make_paragraph_id, text_slice_to_string};
10use super::writer::{EditorImageResolver, EditorWriter, ImageResolver, SyntaxSpanInfo};
11use loro::LoroText;
12use markdown_weaver::Parser;
13+use std::collections::HashMap;
14use std::ops::Range;
15use weaver_common::{EntryIndex, ResolvedContent};
16···24 pub next_node_id: usize,
25 /// Next available syntax span ID for fresh renders
26 pub next_syn_id: usize,
27+ /// Next available paragraph ID (monotonic counter)
28+ pub next_para_id: usize,
29}
3031/// A cached paragraph render that can be reused if source hasn't changed.
32#[derive(Clone, Debug)]
33pub struct CachedParagraph {
34+ /// Stable monotonic ID for DOM element identity
35+ pub id: String,
36 /// Hash of paragraph source text for change detection
37 pub source_hash: u64,
38 /// Byte range in source document
···77 }
78}
7980+/// Insert gap paragraphs for extra whitespace between blocks.
81+fn add_gap_paragraphs(
82+ paragraphs: Vec<ParagraphRender>,
83+ text: &LoroText,
84+ source: &str,
85+) -> Vec<ParagraphRender> {
86+ const MIN_PARAGRAPH_BREAK_INCR: usize = 2; // \n\n
87+88+ let mut paragraphs_with_gaps = Vec::with_capacity(paragraphs.len() * 2);
89+ let mut prev_end_char = 0usize;
90+ let mut prev_end_byte = 0usize;
91+92+ for para in paragraphs {
93+ let gap_size = para.char_range.start.saturating_sub(prev_end_char);
94+ if gap_size > MIN_PARAGRAPH_BREAK_INCR {
95+ let gap_start_char = prev_end_char + MIN_PARAGRAPH_BREAK_INCR;
96+ let gap_end_char = para.char_range.start;
97+ let gap_start_byte = prev_end_byte + MIN_PARAGRAPH_BREAK_INCR;
98+ let gap_end_byte = para.byte_range.start;
99+100+ let gap_node_id = format!("gap-{}-{}", gap_start_char, gap_end_char);
101+ let gap_html = format!(r#"<span id="{}">{}</span>"#, gap_node_id, '\u{200B}');
102+103+ paragraphs_with_gaps.push(ParagraphRender {
104+ id: gap_node_id.clone(),
105+ byte_range: prev_end_byte..gap_end_byte,
106+ char_range: prev_end_char..gap_end_char,
107+ html: gap_html,
108+ offset_map: vec![OffsetMapping {
109+ byte_range: prev_end_byte..gap_end_byte,
110+ char_range: prev_end_char..gap_end_char,
111+ node_id: gap_node_id,
112+ char_offset_in_node: 0,
113+ child_index: None,
114+ utf16_len: 1,
115+ }],
116+ syntax_spans: vec![],
117+ source_hash: hash_source(&text_slice_to_string(text, gap_start_char..gap_end_char)),
118+ });
119+ }
120+121+ prev_end_char = para.char_range.end;
122+ prev_end_byte = para.byte_range.end;
123+ paragraphs_with_gaps.push(para);
124+ }
125+126+ // Add trailing gap if needed
127+ let has_trailing_newlines = source.ends_with("\n\n") || source.ends_with("\n");
128+ if has_trailing_newlines {
129+ let doc_end_char = text.len_unicode();
130+ let doc_end_byte = text.len_utf8();
131+132+ if doc_end_char > prev_end_char {
133+ let trailing_node_id = format!("gap-{}-{}", prev_end_char, doc_end_char);
134+ let trailing_html = format!(r#"<span id="{}">{}</span>"#, trailing_node_id, '\u{200B}');
135+136+ paragraphs_with_gaps.push(ParagraphRender {
137+ id: trailing_node_id.clone(),
138+ byte_range: prev_end_byte..doc_end_byte,
139+ char_range: prev_end_char..doc_end_char,
140+ html: trailing_html,
141+ offset_map: vec![OffsetMapping {
142+ byte_range: prev_end_byte..doc_end_byte,
143+ char_range: prev_end_char..doc_end_char,
144+ node_id: trailing_node_id,
145+ char_offset_in_node: 0,
146+ child_index: None,
147+ utf16_len: 1,
148+ }],
149+ syntax_spans: vec![],
150+ source_hash: 0,
151+ });
152+ }
153+ }
154+155+ paragraphs_with_gaps
156+}
157+158/// Render markdown with incremental caching.
159///
160/// Uses cached paragraph renders when possible, only re-rendering changed paragraphs.
0161///
162/// # Parameters
163+/// - `cursor_offset`: Current cursor position (for finding which NEW paragraph is the cursor para)
164+/// - `edit`: Edit info for stable ID assignment. Uses `edit_char_pos` to find which OLD cached
165+/// paragraph to reuse the ID from (since cursor may have moved after the edit).
166/// - `entry_index`: Optional index for wikilink validation (adds link-valid/link-broken classes)
167/// - `resolved_content`: Pre-resolved embed content for sync rendering
168///
···171pub fn render_paragraphs_incremental(
172 text: &LoroText,
173 cache: Option<&RenderCache>,
174+ cursor_offset: usize,
175 edit: Option<&EditInfo>,
176 image_resolver: Option<&EditorImageResolver>,
177 entry_index: Option<&EntryIndex>,
···188 if source.is_empty() {
189 let empty_node_id = "n0".to_string();
190 let empty_html = format!(r#"<span id="{}">{}</span>"#, empty_node_id, '\u{200B}');
191+ let para_id = make_paragraph_id(0);
192193 let para = ParagraphRender {
194+ id: para_id.clone(),
195 byte_range: 0..0,
196 char_range: 0..0,
197 html: empty_html.clone(),
···202203 let new_cache = RenderCache {
204 paragraphs: vec![CachedParagraph {
205+ id: para_id,
206 source_hash: 0,
207 byte_range: 0..0,
208 char_range: 0..0,
···213 }],
214 next_node_id: 1,
215 next_syn_id: 0,
216+ next_para_id: 1,
217 };
218219 return (vec![para], new_cache, vec![]);
···222 // Determine if we can use fast path (skip boundary discovery)
223 // Need cache and non-boundary-affecting edit info (for edit position)
224 let current_len = text.len_unicode();
225+ let current_byte_len = text.len_utf8();
226227 let use_fast_path = cache.is_some() && edit.is_some() && !is_boundary_affecting(edit.unwrap());
228···247248 // Compute delta from actual length difference, not edit info
249 // This handles stale edits gracefully (delta = 0 if lengths match)
250+ let (cached_len, cached_byte_len) = cache
251 .paragraphs
252 .last()
253+ .map(|p| (p.char_range.end, p.byte_range.end))
254+ .unwrap_or((0, 0));
255 let char_delta = current_len as isize - cached_len as isize;
256+ let byte_delta = current_byte_len as isize - cached_byte_len as isize;
257258 // Adjust each cached paragraph's range
259 cache
···265 (p.byte_range.clone(), p.char_range.clone())
266 } else if p.char_range.start > edit_pos {
267 // After edit - shift by delta (edit is strictly before this paragraph)
000268 (
269 apply_delta(p.byte_range.start, byte_delta)
270 ..apply_delta(p.byte_range.end, byte_delta),
···274 } else {
275 // Edit is at or within this paragraph - expand its end
276 (
277+ p.byte_range.start..apply_delta(p.byte_range.end, byte_delta),
278 p.char_range.start..apply_delta(p.char_range.end, char_delta),
279 )
280 }
···285 };
286287 // Validate fast path results - if any ranges are invalid, use slow path
288+ let use_fast_path = if !paragraph_ranges.is_empty() {
289 let all_valid = paragraph_ranges
290 .iter()
291 .all(|(_, char_range)| char_range.start <= char_range.end);
292+ if !all_valid {
00293 tracing::debug!(
294 target: "weaver::render",
295 "fast path produced invalid ranges, falling back to slow path"
296 );
297+ false
298+ } else {
299+ true
300 }
301 } else {
302+ false
303 };
304305+ // ============ FAST PATH ============
306+ // Reuse cached paragraphs with offset adjustment, only re-render cursor paragraph
307+ if use_fast_path {
308+ let fast_start = crate::perf::now();
309+ let cache = cache.unwrap();
310+ let edit = edit.unwrap();
311+ let edit_pos = edit.edit_char_pos;
312+313+ // Compute deltas
314+ let (cached_len, cached_byte_len) = cache
315+ .paragraphs
316+ .last()
317+ .map(|p| (p.char_range.end, p.byte_range.end))
318+ .unwrap_or((0, 0));
319+ let char_delta = current_len as isize - cached_len as isize;
320+ let byte_delta = current_byte_len as isize - cached_byte_len as isize;
321+322+ // Find cursor paragraph index
323+ let cursor_para_idx = cache
324+ .paragraphs
325+ .iter()
326+ .position(|p| p.char_range.start <= edit_pos && edit_pos <= p.char_range.end);
327328+ let mut paragraphs = Vec::with_capacity(cache.paragraphs.len());
329+ let mut new_cached = Vec::with_capacity(cache.paragraphs.len());
330+ let mut all_refs: Vec<weaver_common::ExtractedRef> = Vec::new();
331+332+ for (idx, cached_para) in cache.paragraphs.iter().enumerate() {
333+ let is_cursor_para = Some(idx) == cursor_para_idx;
334+335+ // Adjust ranges based on position relative to edit
336+ let (byte_range, char_range) = if cached_para.char_range.end < edit_pos {
337+ // Before edit - no change
338+ (cached_para.byte_range.clone(), cached_para.char_range.clone())
339+ } else if cached_para.char_range.start > edit_pos {
340+ // After edit - shift by delta
341+ (
342+ apply_delta(cached_para.byte_range.start, byte_delta)
343+ ..apply_delta(cached_para.byte_range.end, byte_delta),
344+ apply_delta(cached_para.char_range.start, char_delta)
345+ ..apply_delta(cached_para.char_range.end, char_delta),
346+ )
347+ } else {
348+ // Contains edit - expand end
349+ (
350+ cached_para.byte_range.start..apply_delta(cached_para.byte_range.end, byte_delta),
351+ cached_para.char_range.start..apply_delta(cached_para.char_range.end, char_delta),
352+ )
353+ };
354+355+ let para_source = text_slice_to_string(text, char_range.clone());
356+ let source_hash = hash_source(¶_source);
357+358+ if is_cursor_para {
359+ // Re-render cursor paragraph for fresh syntax detection
360+ let resolver = image_resolver.cloned().unwrap_or_default();
361+ let parser = Parser::new_ext(¶_source, weaver_renderer::default_md_options())
362+ .into_offset_iter();
363+364+ let para_doc = loro::LoroDoc::new();
365+ let para_text = para_doc.get_text("content");
366+ let _ = para_text.insert(0, ¶_source);
367+368+ let mut writer = EditorWriter::<_, &ResolvedContent, &EditorImageResolver>::new(
369+ ¶_source,
370+ ¶_text,
371+ parser,
372+ )
373+ .with_image_resolver(&resolver)
374+ .with_embed_provider(resolved_content);
375+376+ if let Some(idx) = entry_index {
377+ writer = writer.with_entry_index(idx);
378+ }
379+380+ let (html, offset_map, syntax_spans, para_refs) = match writer.run() {
381+ Ok(result) => {
382+ // Adjust offsets to be document-absolute
383+ let mut offset_map = result.offset_maps_by_paragraph.into_iter().next().unwrap_or_default();
384+ for m in &mut offset_map {
385+ m.char_range.start += char_range.start;
386+ m.char_range.end += char_range.start;
387+ m.byte_range.start += byte_range.start;
388+ m.byte_range.end += byte_range.start;
389+ }
390+ let mut syntax_spans = result.syntax_spans_by_paragraph.into_iter().next().unwrap_or_default();
391+ for s in &mut syntax_spans {
392+ s.adjust_positions(char_range.start as isize);
393+ }
394+ let para_refs = result.collected_refs_by_paragraph.into_iter().next().unwrap_or_default();
395+ let html = result.html_segments.into_iter().next().unwrap_or_default();
396+ (html, offset_map, syntax_spans, para_refs)
397+ }
398+ Err(_) => (String::new(), Vec::new(), Vec::new(), Vec::new()),
399+ };
400+401+ all_refs.extend(para_refs.clone());
402+403+ new_cached.push(CachedParagraph {
404+ id: cached_para.id.clone(),
405+ source_hash,
406+ byte_range: byte_range.clone(),
407+ char_range: char_range.clone(),
408+ html: html.clone(),
409+ offset_map: offset_map.clone(),
410+ syntax_spans: syntax_spans.clone(),
411+ collected_refs: para_refs.clone(),
412+ });
413+414+ paragraphs.push(ParagraphRender {
415+ id: cached_para.id.clone(),
416+ byte_range,
417+ char_range,
418+ html,
419+ offset_map,
420+ syntax_spans,
421+ source_hash,
422+ });
423+ } else {
424+ // Reuse cached with adjusted offsets
425+ let mut offset_map = cached_para.offset_map.clone();
426+ let mut syntax_spans = cached_para.syntax_spans.clone();
427+428+ if cached_para.char_range.start > edit_pos {
429+ // After edit - adjust offsets
430+ for m in &mut offset_map {
431+ m.char_range.start = apply_delta(m.char_range.start, char_delta);
432+ m.char_range.end = apply_delta(m.char_range.end, char_delta);
433+ m.byte_range.start = apply_delta(m.byte_range.start, byte_delta);
434+ m.byte_range.end = apply_delta(m.byte_range.end, byte_delta);
435+ }
436+ for s in &mut syntax_spans {
437+ s.adjust_positions(char_delta);
438+ }
439+ }
440+441+ all_refs.extend(cached_para.collected_refs.clone());
442+443+ new_cached.push(CachedParagraph {
444+ id: cached_para.id.clone(),
445+ source_hash,
446+ byte_range: byte_range.clone(),
447+ char_range: char_range.clone(),
448+ html: cached_para.html.clone(),
449+ offset_map: offset_map.clone(),
450+ syntax_spans: syntax_spans.clone(),
451+ collected_refs: cached_para.collected_refs.clone(),
452+ });
453+454+ paragraphs.push(ParagraphRender {
455+ id: cached_para.id.clone(),
456+ byte_range,
457+ char_range,
458+ html: cached_para.html.clone(),
459+ offset_map,
460+ syntax_spans,
461+ source_hash,
462+ });
463+ }
464+ }
465+466+ // Add gaps (reuse gap logic from below)
467+ let paragraphs_with_gaps = add_gap_paragraphs(paragraphs, text, &source);
468+469+ let new_cache = RenderCache {
470+ paragraphs: new_cached,
471+ next_node_id: 0,
472+ next_syn_id: 0,
473+ next_para_id: cache.next_para_id,
474 };
475+476+ let fast_ms = crate::perf::now() - fast_start;
477+ tracing::debug!(
478+ fast_ms,
479+ paragraphs = paragraphs_with_gaps.len(),
480+ cursor_para_idx,
481+ "fast path render timing"
482+ );
483+484+ return (paragraphs_with_gaps, new_cache, all_refs);
485+ }
486+487+ // ============ SLOW PATH ============
488+ // Full render when boundaries might have changed
489+ let render_start = crate::perf::now();
490+ let parser =
491+ Parser::new_ext(&source, weaver_renderer::default_md_options()).into_offset_iter();
492+493+ // Use provided resolver or empty default
494+ let resolver = image_resolver.cloned().unwrap_or_default();
495+496+ // Build writer with all resolvers
497+ let mut writer = EditorWriter::<_, &ResolvedContent, &EditorImageResolver>::new(
498+ &source,
499+ text,
500+ parser,
501+ )
502+ .with_image_resolver(&resolver)
503+ .with_embed_provider(resolved_content);
504+505+ if let Some(idx) = entry_index {
506+ writer = writer.with_entry_index(idx);
507+ }
508+509+ let writer_result = match writer.run() {
510+ Ok(result) => result,
511+ Err(_) => return (Vec::new(), RenderCache::default(), vec![]),
512 };
513+514+ let render_ms = crate::perf::now() - render_start;
515+516+ let paragraph_ranges = writer_result.paragraph_ranges.clone();
517518 // Log discovered paragraphs
519 for (i, (byte_range, char_range)) in paragraph_ranges.iter().enumerate() {
···531 );
532 }
533534+ // Build paragraphs from full render segments
535+ let build_start = crate::perf::now();
536 let mut paragraphs = Vec::with_capacity(paragraph_ranges.len());
537 let mut new_cached = Vec::with_capacity(paragraph_ranges.len());
538 let mut all_refs: Vec<weaver_common::ExtractedRef> = Vec::new();
539+ let mut next_para_id = cache.map(|c| c.next_para_id).unwrap_or(0);
540+541+ // Find which paragraph contains cursor (for stable ID assignment)
542+ let cursor_para_idx = paragraph_ranges.iter().position(|(_, char_range)| {
543+ char_range.start <= cursor_offset && cursor_offset <= char_range.end
544+ });
545+546+ tracing::debug!(
547+ cursor_offset,
548+ ?cursor_para_idx,
549+ edit_char_pos = ?edit.map(|e| e.edit_char_pos),
550+ "ID assignment: cursor and edit info"
551+ );
552+553+ // Build hash->cached_para lookup for non-cursor matching
554+ let cached_by_hash: HashMap<u64, &CachedParagraph> = cache
555+ .map(|c| c.paragraphs.iter().map(|p| (p.source_hash, p)).collect())
556+ .unwrap_or_default();
557558 for (idx, (byte_range, char_range)) in paragraph_ranges.iter().enumerate() {
559 let para_source = text_slice_to_string(text, char_range.clone());
560 let source_hash = hash_source(¶_source);
561+ let is_cursor_para = Some(idx) == cursor_para_idx;
562563+ // ID assignment: cursor paragraph matches by edit position, others match by hash
564+ let para_id = if is_cursor_para {
565+ let edit_in_this_para = edit
566+ .map(|e| char_range.start <= e.edit_char_pos && e.edit_char_pos <= char_range.end)
567+ .unwrap_or(false);
568+ let lookup_pos = if edit_in_this_para {
569+ edit.map(|e| e.edit_char_pos).unwrap_or(cursor_offset)
570+ } else {
571+ cursor_offset
572+ };
573+ let found_cached = cache.and_then(|c| {
574+ c.paragraphs
575+ .iter()
576+ .find(|p| p.char_range.start <= lookup_pos && lookup_pos <= p.char_range.end)
577+ });
578579+ if let Some(cached) = found_cached {
580+ tracing::debug!(
581+ lookup_pos,
582+ edit_in_this_para,
583+ cursor_offset,
584+ cached_id = %cached.id,
585+ cached_range = ?cached.char_range,
586+ "cursor para: reusing cached ID"
587+ );
588+ cached.id.clone()
589+ } else {
590+ let id = make_paragraph_id(next_para_id);
591+ next_para_id += 1;
592+ id
593 }
000000000000000594 } else {
595+ // Non-cursor: match by content hash
596+ cached_by_hash
597+ .get(&source_hash)
598+ .map(|p| p.id.clone())
599+ .unwrap_or_else(|| {
600+ let id = make_paragraph_id(next_para_id);
601+ next_para_id += 1;
602+ id
603+ })
604+ };
605606+ // Get data from full render segments
607+ let html = writer_result.html_segments.get(idx).cloned().unwrap_or_default();
608+ let offset_map = writer_result.offset_maps_by_paragraph.get(idx).cloned().unwrap_or_default();
609+ let syntax_spans = writer_result.syntax_spans_by_paragraph.get(idx).cloned().unwrap_or_default();
610+ let para_refs = writer_result.collected_refs_by_paragraph.get(idx).cloned().unwrap_or_default();
611612+ all_refs.extend(para_refs.clone());
0000000000000000000000000000000000000000000000000000000000000000613614 // Store in cache
615 new_cached.push(CachedParagraph {
616+ id: para_id.clone(),
617 source_hash,
618 byte_range: byte_range.clone(),
619 char_range: char_range.clone(),
620 html: html.clone(),
621 offset_map: offset_map.clone(),
622 syntax_spans: syntax_spans.clone(),
623+ collected_refs: para_refs.clone(),
624 });
625626 paragraphs.push(ParagraphRender {
627+ id: para_id,
628 byte_range: byte_range.clone(),
629 char_range: char_range.clone(),
630 html,
···634 });
635 }
636637+ let build_ms = crate::perf::now() - build_start;
638+ tracing::debug!(
639+ render_ms,
640+ build_ms,
641+ paragraphs = paragraph_ranges.len(),
642+ "single-pass render timing"
643+ );
00000000000000000000000000000000000000000000000000644645+ let paragraphs_with_gaps = add_gap_paragraphs(paragraphs, text, &source);
0000000000000000646647 let new_cache = RenderCache {
648 paragraphs: new_cached,
649+ next_node_id: 0, // Not used in single-pass mode
650+ next_syn_id: 0, // Not used in single-pass mode
651+ next_para_id,
652 };
6530654 let total_ms = crate::perf::now() - fn_start;
655 tracing::debug!(
656 total_ms,
657+ render_ms,
658+ build_ms,
00659 paragraphs = paragraphs_with_gaps.len(),
0660 "render_paragraphs_incremental timing"
661 );
662
+7-5
crates/weaver-app/src/components/editor/tests.rs
···59 let text = doc.get_text("content");
60 text.insert(0, input).unwrap();
61 let (paragraphs, _cache, _refs) =
62- render_paragraphs_incremental(&text, None, None, None, None, &ResolvedContent::default());
63 paragraphs.iter().map(TestParagraph::from).collect()
64}
65···648 // Initial state: "#" is a valid empty heading
649 text.insert(0, "#").unwrap();
650 let (paras1, cache1, _refs1) =
651- render_paragraphs_incremental(&text, None, None, None, None, &ResolvedContent::default());
652653 eprintln!("State 1 ('#'): {}", paras1[0].html);
654 assert!(paras1[0].html.contains("<h1"), "# alone should be heading");
···662 let (paras2, _cache2, _refs2) = render_paragraphs_incremental(
663 &text,
664 Some(&cache1),
0665 None,
666 None,
667 None,
···776 let text = doc.get_text("content");
777 text.insert(0, input).unwrap();
778 let (paragraphs, _cache, _refs) =
779- render_paragraphs_incremental(&text, None, None, None, None, &ResolvedContent::default());
780781 // With standard \n\n break, we expect 2 paragraphs (no gap element)
782 // Paragraph ranges include some trailing whitespace from markdown parsing
···806 let text = doc.get_text("content");
807 text.insert(0, input).unwrap();
808 let (paragraphs, _cache, _refs) =
809- render_paragraphs_incremental(&text, None, None, None, None, &ResolvedContent::default());
810811 // With extra newlines, we expect 3 elements: para, gap, para
812 assert_eq!(
···907 text.insert(0, input).unwrap();
908909 let (paras1, cache1, _refs1) =
910- render_paragraphs_incremental(&text, None, None, None, None, &ResolvedContent::default());
911 assert!(!cache1.paragraphs.is_empty(), "Cache should be populated");
912913 // Second render with same content should reuse cache
914 let (paras2, _cache2, _refs2) = render_paragraphs_incremental(
915 &text,
916 Some(&cache1),
0917 None,
918 None,
919 None,
···59 let text = doc.get_text("content");
60 text.insert(0, input).unwrap();
61 let (paragraphs, _cache, _refs) =
62+ render_paragraphs_incremental(&text, None, 0, None, None, None, &ResolvedContent::default());
63 paragraphs.iter().map(TestParagraph::from).collect()
64}
65···648 // Initial state: "#" is a valid empty heading
649 text.insert(0, "#").unwrap();
650 let (paras1, cache1, _refs1) =
651+ render_paragraphs_incremental(&text, None, 0, None, None, None, &ResolvedContent::default());
652653 eprintln!("State 1 ('#'): {}", paras1[0].html);
654 assert!(paras1[0].html.contains("<h1"), "# alone should be heading");
···662 let (paras2, _cache2, _refs2) = render_paragraphs_incremental(
663 &text,
664 Some(&cache1),
665+ 0,
666 None,
667 None,
668 None,
···777 let text = doc.get_text("content");
778 text.insert(0, input).unwrap();
779 let (paragraphs, _cache, _refs) =
780+ render_paragraphs_incremental(&text, None, 0, None, None, None, &ResolvedContent::default());
781782 // With standard \n\n break, we expect 2 paragraphs (no gap element)
783 // Paragraph ranges include some trailing whitespace from markdown parsing
···807 let text = doc.get_text("content");
808 text.insert(0, input).unwrap();
809 let (paragraphs, _cache, _refs) =
810+ render_paragraphs_incremental(&text, None, 0, None, None, None, &ResolvedContent::default());
811812 // With extra newlines, we expect 3 elements: para, gap, para
813 assert_eq!(
···908 text.insert(0, input).unwrap();
909910 let (paras1, cache1, _refs1) =
911+ render_paragraphs_incremental(&text, None, 0, None, None, None, &ResolvedContent::default());
912 assert!(!cache1.paragraphs.is_empty(), "Cache should be populated");
913914 // Second render with same content should reuse cache
915 let (paras2, _cache2, _refs2) = render_paragraphs_incremental(
916 &text,
917 Some(&cache1),
918+ 0,
919 None,
920 None,
921 None,