···5566use dioxus::prelude::*;
7788+use super::cursor::restore_cursor_position;
89use super::document::{EditorDocument, Selection};
910use super::offset_map::{SnapDirection, find_nearest_valid_position, is_valid_cursor_position};
1011use super::paragraph::ParagraphRender;
···90919192 match (anchor_rope, focus_rope) {
9293 (Some(anchor), Some(focus)) => {
9494+ let old_offset = doc.cursor.read().offset;
9595+ // Warn if cursor is jumping a large distance - likely a bug
9696+ let jump = if focus > old_offset { focus - old_offset } else { old_offset - focus };
9797+ if jump > 100 {
9898+ tracing::warn!(
9999+ old_offset,
100100+ new_offset = focus,
101101+ jump,
102102+ "sync_cursor_from_dom: LARGE CURSOR JUMP detected"
103103+ );
104104+ }
93105 doc.cursor.write().offset = focus;
94106 if anchor != focus {
95107 doc.selection.set(Some(Selection {
···144156 .or_else(|| element.get_attribute("data-node-id"));
145157146158 if let Some(id) = id {
147147- if id.starts_with('n') && id[1..].parse::<usize>().is_ok() {
159159+ // Match both old-style "n0" and paragraph-prefixed "p-2-n0" node IDs
160160+ let is_node_id = id.starts_with('n') || id.contains("-n");
161161+ if is_node_id {
148162 break Some(id);
149163 }
150164 }
···206220 }
207221 }
208222223223+ // Log what we're looking for
224224+ tracing::trace!(
225225+ node_id = %node_id,
226226+ utf16_offset = utf16_offset_in_container,
227227+ num_paragraphs = paragraphs.len(),
228228+ "dom_position_to_text_offset: looking up mapping"
229229+ );
230230+209231 for para in paragraphs {
210232 for mapping in ¶.offset_map {
211233 if mapping.node_id == node_id {
212234 let mapping_start = mapping.char_offset_in_node;
213235 let mapping_end = mapping.char_offset_in_node + mapping.utf16_len;
214236237237+ tracing::trace!(
238238+ mapping_node_id = %mapping.node_id,
239239+ mapping_start,
240240+ mapping_end,
241241+ char_range_start = mapping.char_range.start,
242242+ char_range_end = mapping.char_range.end,
243243+ "dom_position_to_text_offset: found matching node_id"
244244+ );
245245+215246 if utf16_offset_in_container >= mapping_start
216247 && utf16_offset_in_container <= mapping_end
217248 {
···267298) {
268299}
269300270270-/// Update paragraph DOM elements incrementally.
301301+/// Update paragraph DOM elements incrementally using pool-based surgical diffing.
271302///
272272-/// Only modifies paragraphs that changed (by comparing source_hash).
273273-/// Browser preserves cursor naturally in unchanged paragraphs.
303303+/// Uses stable content-based paragraph IDs for efficient DOM reconciliation:
304304+/// - Unchanged paragraphs (same ID + hash) are not touched
305305+/// - Changed paragraphs (same ID, different hash) get innerHTML updated
306306+/// - New paragraphs get created and inserted at correct position
307307+/// - Removed paragraphs get deleted
274308///
275309/// Returns true if the paragraph containing the cursor was updated.
276310#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
···281315 cursor_offset: usize,
282316 force: bool,
283317) -> bool {
318318+ use std::collections::HashMap;
284319 use wasm_bindgen::JsCast;
285320286321 let window = match web_sys::window() {
···298333 None => return false,
299334 };
300335301301- // Find which paragraph contains cursor
302302- // Use end-inclusive matching: cursor at position N belongs to paragraph (0..N)
303303- // This handles typing at end of paragraph, which is the common case
304304- // The empty paragraph at document end catches any trailing cursor positions
305305- let cursor_para_idx = new_paragraphs
336336+ let mut cursor_para_updated = false;
337337+338338+ // Build lookup for old paragraphs by ID (for syntax span comparison)
339339+ let old_para_map: HashMap<&str, &ParagraphRender> = old_paragraphs
306340 .iter()
307307- .position(|p| p.char_range.start <= cursor_offset && cursor_offset <= p.char_range.end);
341341+ .map(|p| (p.id.as_str(), p))
342342+ .collect();
343343+344344+ // Build pool of existing DOM elements by ID
345345+ let mut old_elements: HashMap<String, web_sys::Element> = HashMap::new();
346346+ let mut child_opt = editor.first_element_child();
347347+ while let Some(child) = child_opt {
348348+ if let Some(id) = child.get_attribute("id") {
349349+ let next = child.next_element_sibling();
350350+ old_elements.insert(id, child);
351351+ child_opt = next;
352352+ } else {
353353+ child_opt = child.next_element_sibling();
354354+ }
355355+ }
356356+357357+ // Track position for insertBefore - starts at first element child
358358+ // (use first_element_child to skip any stray text nodes)
359359+ let mut cursor_node: Option<web_sys::Node> =
360360+ editor.first_element_child().map(|e| e.into());
361361+362362+ // Single pass through new paragraphs
363363+ for new_para in new_paragraphs.iter() {
364364+ let para_id = &new_para.id;
365365+ let new_hash = format!("{:x}", new_para.source_hash);
366366+ let is_cursor_para =
367367+ new_para.char_range.start <= cursor_offset && cursor_offset <= new_para.char_range.end;
308368309309- let mut cursor_para_updated = false;
369369+ if let Some(existing_elem) = old_elements.remove(para_id) {
370370+ // Element exists - check if it needs updating
371371+ let old_hash = existing_elem.get_attribute("data-hash").unwrap_or_default();
372372+ let needs_update = force || old_hash != new_hash;
310373311311- for (idx, new_para) in new_paragraphs.iter().enumerate() {
312312- let para_id = format!("para-{}", idx);
374374+ // Check if element is at correct position (compare as nodes)
375375+ let existing_as_node: &web_sys::Node = existing_elem.as_ref();
376376+ let at_correct_position = cursor_node
377377+ .as_ref()
378378+ .map(|c| c == existing_as_node)
379379+ .unwrap_or(false);
380380+381381+ if !at_correct_position {
382382+ tracing::warn!(
383383+ para_id,
384384+ is_cursor_para,
385385+ "update_paragraph_dom: element not at correct position, moving"
386386+ );
387387+ let _ = editor.insert_before(existing_as_node, cursor_node.as_ref());
388388+ if is_cursor_para {
389389+ cursor_para_updated = true;
390390+ }
391391+ } else {
392392+ // Use next_element_sibling to skip any stray text nodes
393393+ cursor_node = existing_elem.next_element_sibling().map(|e| e.into());
394394+ }
395395+396396+ if needs_update {
397397+ // TESTING: Force innerHTML update to measure timing cost
398398+ // TODO: Remove this flag after benchmarking
399399+ const FORCE_INNERHTML_UPDATE: bool = true;
400400+401401+ // For cursor paragraph: only update if syntax/formatting changed
402402+ // This prevents destroying browser selection during fast typing
403403+ //
404404+ // HOWEVER: we must verify browser actually updated the DOM.
405405+ // PassThrough assumes browser handles edit, but sometimes it doesn't.
406406+ let should_skip_cursor_update = !FORCE_INNERHTML_UPDATE && is_cursor_para && !force && {
407407+ let old_para = old_para_map.get(para_id.as_str());
408408+ let syntax_unchanged = old_para
409409+ .map(|old| old.syntax_spans == new_para.syntax_spans)
410410+ .unwrap_or(false);
313411314314- if let Some(old_para) = old_paragraphs.get(idx) {
315315- if force || new_para.source_hash != old_para.source_hash {
316316- // Changed - clear and update innerHTML
317317- // We clear first to ensure any browser-added content (from IME composition,
318318- // contenteditable quirks, etc.) is fully removed before setting new content
319319- if let Some(elem) = document.get_element_by_id(¶_id) {
320320- if force && cursor_para_idx.is_some() {
321321- // skip re-rendering where the cursor is if we're forcing it
322322- // we don't want to fuck up what the user is doing
412412+ // Verify DOM content length matches expected - if not, browser didn't handle it
413413+ // NOTE: Get inner element (the <p>) not outer div, to avoid counting
414414+ // the newline from </p>\n in the HTML
415415+ let dom_matches_expected = if syntax_unchanged {
416416+ let inner_elem = existing_elem.first_element_child();
417417+ let dom_text = inner_elem
418418+ .as_ref()
419419+ .and_then(|e| e.text_content())
420420+ .unwrap_or_default();
421421+ let expected_len = new_para.byte_range.end - new_para.byte_range.start;
422422+ let dom_len = dom_text.len();
423423+ let matches = dom_len == expected_len;
424424+ // Always log for debugging
425425+ tracing::debug!(
426426+ para_id = %para_id,
427427+ dom_len,
428428+ expected_len,
429429+ matches,
430430+ dom_text = %dom_text,
431431+ "DOM sync check"
432432+ );
433433+ matches
323434 } else {
324324- elem.set_text_content(None); // Clear completely
325325- elem.set_inner_html(&new_para.html);
435435+ false
436436+ };
437437+438438+ syntax_unchanged && dom_matches_expected
439439+ };
440440+441441+ if should_skip_cursor_update {
442442+ tracing::trace!(
443443+ para_id,
444444+ "update_paragraph_dom: skipping cursor para innerHTML (syntax unchanged, DOM verified)"
445445+ );
446446+ // Update hash - browser native editing has the correct content
447447+ let _ = existing_elem.set_attribute("data-hash", &new_hash);
448448+ } else {
449449+ // Timing instrumentation for innerHTML update cost
450450+ let start = web_sys::window()
451451+ .and_then(|w| w.performance())
452452+ .map(|p| p.now());
453453+454454+ existing_elem.set_inner_html(&new_para.html);
455455+ let _ = existing_elem.set_attribute("data-hash", &new_hash);
456456+457457+ if let Some(start_time) = start {
458458+ if let Some(end_time) = web_sys::window()
459459+ .and_then(|w| w.performance())
460460+ .map(|p| p.now())
461461+ {
462462+ let elapsed_ms = end_time - start_time;
463463+ tracing::debug!(
464464+ para_id,
465465+ is_cursor_para,
466466+ elapsed_ms,
467467+ html_len = new_para.html.len(),
468468+ old_hash = %old_hash,
469469+ new_hash = %new_hash,
470470+ "update_paragraph_dom: innerHTML update timing"
471471+ );
472472+ }
326473 }
327327- }
328474329329- if !force {
330330- if Some(idx) == cursor_para_idx {
475475+ if is_cursor_para {
476476+ // Restore cursor synchronously - don't wait for rAF
477477+ // This prevents race conditions with fast typing
478478+ if let Err(e) = restore_cursor_position(
479479+ cursor_offset,
480480+ &new_para.offset_map,
481481+ editor_id,
482482+ None,
483483+ ) {
484484+ tracing::warn!("Synchronous cursor restore failed: {:?}", e);
485485+ }
331486 cursor_para_updated = true;
332487 }
333488 }
334489 }
335490 } else {
491491+ // New element - create and insert at current position
336492 if let Ok(div) = document.create_element("div") {
337337- div.set_id(¶_id);
493493+ div.set_id(para_id);
338494 div.set_inner_html(&new_para.html);
339339- let _ = editor.append_child(&div);
495495+ let _ = div.set_attribute("data-hash", &new_hash);
496496+ let div_node: &web_sys::Node = div.as_ref();
497497+ let _ = editor.insert_before(div_node, cursor_node.as_ref());
340498 }
341499342342- if Some(idx) == cursor_para_idx {
500500+ if is_cursor_para {
343501 cursor_para_updated = true;
344502 }
345503 }
346504 }
347505348348- // Remove extra paragraphs if document got shorter
349349- // Also mark cursor as needing restoration since structure changed
350350- if new_paragraphs.len() < old_paragraphs.len() {
351351- cursor_para_updated = true;
352352- }
353353- // TODO: i think this is the cause of a number of bits of cursor jank
354354- for idx in new_paragraphs.len()..old_paragraphs.len() {
355355- let para_id = format!("para-{}", idx);
356356- if let Some(elem) = document.get_element_by_id(¶_id) {
357357- let _ = elem.remove();
358358- }
506506+ // Remove stale elements (still in pool = not in new paragraphs)
507507+ for (_, elem) in old_elements {
508508+ let _ = elem.remove();
509509+ cursor_para_updated = true; // Structure changed, cursor may need restoration
359510 }
360511361512 cursor_para_updated
···1111/// A rendered paragraph with its source range and offset mappings.
1212#[derive(Debug, Clone, PartialEq)]
1313pub struct ParagraphRender {
1414+ /// Stable content-based ID for DOM diffing (format: `p-{hash_prefix}-{collision_idx}`)
1515+ pub id: String,
1616+1417 /// Source byte range in the rope
1518 pub byte_range: Range<usize>,
1619···3841 let mut hasher = DefaultHasher::new();
3942 text.hash(&mut hasher);
4043 hasher.finish()
4444+}
4545+4646+/// Generate a paragraph ID from monotonic counter.
4747+/// IDs are stable across content changes - only position/cursor determines identity.
4848+pub fn make_paragraph_id(id: usize) -> String {
4949+ format!("p-{}", id)
4150}
42514352/// Extract substring from LoroText as String
+395-232
crates/weaver-app/src/components/editor/render.rs
···6677use super::document::EditInfo;
88use super::offset_map::{OffsetMapping, RenderResult};
99-use super::paragraph::{ParagraphRender, hash_source, text_slice_to_string};
99+use super::paragraph::{ParagraphRender, hash_source, make_paragraph_id, text_slice_to_string};
1010use super::writer::{EditorImageResolver, EditorWriter, ImageResolver, SyntaxSpanInfo};
1111use loro::LoroText;
1212use markdown_weaver::Parser;
1313+use std::collections::HashMap;
1314use std::ops::Range;
1415use weaver_common::{EntryIndex, ResolvedContent};
1516···2324 pub next_node_id: usize,
2425 /// Next available syntax span ID for fresh renders
2526 pub next_syn_id: usize,
2727+ /// Next available paragraph ID (monotonic counter)
2828+ pub next_para_id: usize,
2629}
27302831/// A cached paragraph render that can be reused if source hasn't changed.
2932#[derive(Clone, Debug)]
3033pub struct CachedParagraph {
3434+ /// Stable monotonic ID for DOM element identity
3535+ pub id: String,
3136 /// Hash of paragraph source text for change detection
3237 pub source_hash: u64,
3338 /// Byte range in source document
···7277 }
7378}
74798080+/// Insert gap paragraphs for extra whitespace between blocks.
8181+fn add_gap_paragraphs(
8282+ paragraphs: Vec<ParagraphRender>,
8383+ text: &LoroText,
8484+ source: &str,
8585+) -> Vec<ParagraphRender> {
8686+ const MIN_PARAGRAPH_BREAK_INCR: usize = 2; // \n\n
8787+8888+ let mut paragraphs_with_gaps = Vec::with_capacity(paragraphs.len() * 2);
8989+ let mut prev_end_char = 0usize;
9090+ let mut prev_end_byte = 0usize;
9191+9292+ for para in paragraphs {
9393+ let gap_size = para.char_range.start.saturating_sub(prev_end_char);
9494+ if gap_size > MIN_PARAGRAPH_BREAK_INCR {
9595+ let gap_start_char = prev_end_char + MIN_PARAGRAPH_BREAK_INCR;
9696+ let gap_end_char = para.char_range.start;
9797+ let gap_start_byte = prev_end_byte + MIN_PARAGRAPH_BREAK_INCR;
9898+ let gap_end_byte = para.byte_range.start;
9999+100100+ let gap_node_id = format!("gap-{}-{}", gap_start_char, gap_end_char);
101101+ let gap_html = format!(r#"<span id="{}">{}</span>"#, gap_node_id, '\u{200B}');
102102+103103+ paragraphs_with_gaps.push(ParagraphRender {
104104+ id: gap_node_id.clone(),
105105+ byte_range: prev_end_byte..gap_end_byte,
106106+ char_range: prev_end_char..gap_end_char,
107107+ html: gap_html,
108108+ offset_map: vec![OffsetMapping {
109109+ byte_range: prev_end_byte..gap_end_byte,
110110+ char_range: prev_end_char..gap_end_char,
111111+ node_id: gap_node_id,
112112+ char_offset_in_node: 0,
113113+ child_index: None,
114114+ utf16_len: 1,
115115+ }],
116116+ syntax_spans: vec![],
117117+ source_hash: hash_source(&text_slice_to_string(text, gap_start_char..gap_end_char)),
118118+ });
119119+ }
120120+121121+ prev_end_char = para.char_range.end;
122122+ prev_end_byte = para.byte_range.end;
123123+ paragraphs_with_gaps.push(para);
124124+ }
125125+126126+ // Add trailing gap if needed
127127+ let has_trailing_newlines = source.ends_with("\n\n") || source.ends_with("\n");
128128+ if has_trailing_newlines {
129129+ let doc_end_char = text.len_unicode();
130130+ let doc_end_byte = text.len_utf8();
131131+132132+ if doc_end_char > prev_end_char {
133133+ let trailing_node_id = format!("gap-{}-{}", prev_end_char, doc_end_char);
134134+ let trailing_html = format!(r#"<span id="{}">{}</span>"#, trailing_node_id, '\u{200B}');
135135+136136+ paragraphs_with_gaps.push(ParagraphRender {
137137+ id: trailing_node_id.clone(),
138138+ byte_range: prev_end_byte..doc_end_byte,
139139+ char_range: prev_end_char..doc_end_char,
140140+ html: trailing_html,
141141+ offset_map: vec![OffsetMapping {
142142+ byte_range: prev_end_byte..doc_end_byte,
143143+ char_range: prev_end_char..doc_end_char,
144144+ node_id: trailing_node_id,
145145+ char_offset_in_node: 0,
146146+ child_index: None,
147147+ utf16_len: 1,
148148+ }],
149149+ syntax_spans: vec![],
150150+ source_hash: 0,
151151+ });
152152+ }
153153+ }
154154+155155+ paragraphs_with_gaps
156156+}
157157+75158/// Render markdown with incremental caching.
76159///
77160/// Uses cached paragraph renders when possible, only re-rendering changed paragraphs.
7878-/// For "safe" edits (no boundary changes), skips boundary rediscovery entirely.
79161///
80162/// # Parameters
163163+/// - `cursor_offset`: Current cursor position (for finding which NEW paragraph is the cursor para)
164164+/// - `edit`: Edit info for stable ID assignment. Uses `edit_char_pos` to find which OLD cached
165165+/// paragraph to reuse the ID from (since cursor may have moved after the edit).
81166/// - `entry_index`: Optional index for wikilink validation (adds link-valid/link-broken classes)
82167/// - `resolved_content`: Pre-resolved embed content for sync rendering
83168///
···86171pub fn render_paragraphs_incremental(
87172 text: &LoroText,
88173 cache: Option<&RenderCache>,
174174+ cursor_offset: usize,
89175 edit: Option<&EditInfo>,
90176 image_resolver: Option<&EditorImageResolver>,
91177 entry_index: Option<&EntryIndex>,
···102188 if source.is_empty() {
103189 let empty_node_id = "n0".to_string();
104190 let empty_html = format!(r#"<span id="{}">{}</span>"#, empty_node_id, '\u{200B}');
191191+ let para_id = make_paragraph_id(0);
105192106193 let para = ParagraphRender {
194194+ id: para_id.clone(),
107195 byte_range: 0..0,
108196 char_range: 0..0,
109197 html: empty_html.clone(),
···114202115203 let new_cache = RenderCache {
116204 paragraphs: vec![CachedParagraph {
205205+ id: para_id,
117206 source_hash: 0,
118207 byte_range: 0..0,
119208 char_range: 0..0,
···124213 }],
125214 next_node_id: 1,
126215 next_syn_id: 0,
216216+ next_para_id: 1,
127217 };
128218129219 return (vec![para], new_cache, vec![]);
···132222 // Determine if we can use fast path (skip boundary discovery)
133223 // Need cache and non-boundary-affecting edit info (for edit position)
134224 let current_len = text.len_unicode();
225225+ let current_byte_len = text.len_utf8();
135226136227 let use_fast_path = cache.is_some() && edit.is_some() && !is_boundary_affecting(edit.unwrap());
137228···156247157248 // Compute delta from actual length difference, not edit info
158249 // This handles stale edits gracefully (delta = 0 if lengths match)
159159- let cached_len = cache
250250+ let (cached_len, cached_byte_len) = cache
160251 .paragraphs
161252 .last()
162162- .map(|p| p.char_range.end)
163163- .unwrap_or(0);
253253+ .map(|p| (p.char_range.end, p.byte_range.end))
254254+ .unwrap_or((0, 0));
164255 let char_delta = current_len as isize - cached_len as isize;
256256+ let byte_delta = current_byte_len as isize - cached_byte_len as isize;
165257166258 // Adjust each cached paragraph's range
167259 cache
···173265 (p.byte_range.clone(), p.char_range.clone())
174266 } else if p.char_range.start > edit_pos {
175267 // After edit - shift by delta (edit is strictly before this paragraph)
176176- // Calculate byte delta (approximation: assume 1 byte per char for ASCII)
177177- // This is imprecise but boundaries are rediscovered on slow path anyway
178178- let byte_delta = char_delta; // TODO: proper byte calculation
179268 (
180269 apply_delta(p.byte_range.start, byte_delta)
181270 ..apply_delta(p.byte_range.end, byte_delta),
···185274 } else {
186275 // Edit is at or within this paragraph - expand its end
187276 (
188188- p.byte_range.start..apply_delta(p.byte_range.end, char_delta),
277277+ p.byte_range.start..apply_delta(p.byte_range.end, byte_delta),
189278 p.char_range.start..apply_delta(p.char_range.end, char_delta),
190279 )
191280 }
···196285 };
197286198287 // Validate fast path results - if any ranges are invalid, use slow path
199199- let paragraph_ranges = if !paragraph_ranges.is_empty() {
288288+ let use_fast_path = if !paragraph_ranges.is_empty() {
200289 let all_valid = paragraph_ranges
201290 .iter()
202291 .all(|(_, char_range)| char_range.start <= char_range.end);
203203- if all_valid {
204204- paragraph_ranges
205205- } else {
292292+ if !all_valid {
206293 tracing::debug!(
207294 target: "weaver::render",
208295 "fast path produced invalid ranges, falling back to slow path"
209296 );
210210- vec![] // Trigger slow path
297297+ false
298298+ } else {
299299+ true
211300 }
212301 } else {
213213- paragraph_ranges
302302+ false
214303 };
215304216216- // Slow path: run boundary-only pass to discover paragraph boundaries
217217- let paragraph_ranges = if paragraph_ranges.is_empty() {
218218- let boundary_start = crate::perf::now();
219219- let parser =
220220- Parser::new_ext(&source, weaver_renderer::default_md_options()).into_offset_iter();
221221- let mut scratch_output = String::new();
305305+ // ============ FAST PATH ============
306306+ // Reuse cached paragraphs with offset adjustment, only re-render cursor paragraph
307307+ if use_fast_path {
308308+ let fast_start = crate::perf::now();
309309+ let cache = cache.unwrap();
310310+ let edit = edit.unwrap();
311311+ let edit_pos = edit.edit_char_pos;
312312+313313+ // Compute deltas
314314+ let (cached_len, cached_byte_len) = cache
315315+ .paragraphs
316316+ .last()
317317+ .map(|p| (p.char_range.end, p.byte_range.end))
318318+ .unwrap_or((0, 0));
319319+ let char_delta = current_len as isize - cached_len as isize;
320320+ let byte_delta = current_byte_len as isize - cached_byte_len as isize;
321321+322322+ // Find cursor paragraph index
323323+ let cursor_para_idx = cache
324324+ .paragraphs
325325+ .iter()
326326+ .position(|p| p.char_range.start <= edit_pos && edit_pos <= p.char_range.end);
222327223223- let result = match EditorWriter::<_, _, ()>::new_boundary_only(
224224- &source,
225225- text,
226226- parser,
227227- &mut scratch_output,
228228- )
229229- .run()
230230- {
231231- Ok(result) => result.paragraph_ranges,
232232- Err(_) => return (Vec::new(), RenderCache::default(), vec![]),
328328+ let mut paragraphs = Vec::with_capacity(cache.paragraphs.len());
329329+ let mut new_cached = Vec::with_capacity(cache.paragraphs.len());
330330+ let mut all_refs: Vec<weaver_common::ExtractedRef> = Vec::new();
331331+332332+ for (idx, cached_para) in cache.paragraphs.iter().enumerate() {
333333+ let is_cursor_para = Some(idx) == cursor_para_idx;
334334+335335+ // Adjust ranges based on position relative to edit
336336+ let (byte_range, char_range) = if cached_para.char_range.end < edit_pos {
337337+ // Before edit - no change
338338+ (cached_para.byte_range.clone(), cached_para.char_range.clone())
339339+ } else if cached_para.char_range.start > edit_pos {
340340+ // After edit - shift by delta
341341+ (
342342+ apply_delta(cached_para.byte_range.start, byte_delta)
343343+ ..apply_delta(cached_para.byte_range.end, byte_delta),
344344+ apply_delta(cached_para.char_range.start, char_delta)
345345+ ..apply_delta(cached_para.char_range.end, char_delta),
346346+ )
347347+ } else {
348348+ // Contains edit - expand end
349349+ (
350350+ cached_para.byte_range.start..apply_delta(cached_para.byte_range.end, byte_delta),
351351+ cached_para.char_range.start..apply_delta(cached_para.char_range.end, char_delta),
352352+ )
353353+ };
354354+355355+ let para_source = text_slice_to_string(text, char_range.clone());
356356+ let source_hash = hash_source(¶_source);
357357+358358+ if is_cursor_para {
359359+ // Re-render cursor paragraph for fresh syntax detection
360360+ let resolver = image_resolver.cloned().unwrap_or_default();
361361+ let parser = Parser::new_ext(¶_source, weaver_renderer::default_md_options())
362362+ .into_offset_iter();
363363+364364+ let para_doc = loro::LoroDoc::new();
365365+ let para_text = para_doc.get_text("content");
366366+ let _ = para_text.insert(0, ¶_source);
367367+368368+ let mut writer = EditorWriter::<_, &ResolvedContent, &EditorImageResolver>::new(
369369+ ¶_source,
370370+ ¶_text,
371371+ parser,
372372+ )
373373+ .with_image_resolver(&resolver)
374374+ .with_embed_provider(resolved_content);
375375+376376+ if let Some(idx) = entry_index {
377377+ writer = writer.with_entry_index(idx);
378378+ }
379379+380380+ let (html, offset_map, syntax_spans, para_refs) = match writer.run() {
381381+ Ok(result) => {
382382+ // Adjust offsets to be document-absolute
383383+ let mut offset_map = result.offset_maps_by_paragraph.into_iter().next().unwrap_or_default();
384384+ for m in &mut offset_map {
385385+ m.char_range.start += char_range.start;
386386+ m.char_range.end += char_range.start;
387387+ m.byte_range.start += byte_range.start;
388388+ m.byte_range.end += byte_range.start;
389389+ }
390390+ let mut syntax_spans = result.syntax_spans_by_paragraph.into_iter().next().unwrap_or_default();
391391+ for s in &mut syntax_spans {
392392+ s.adjust_positions(char_range.start as isize);
393393+ }
394394+ let para_refs = result.collected_refs_by_paragraph.into_iter().next().unwrap_or_default();
395395+ let html = result.html_segments.into_iter().next().unwrap_or_default();
396396+ (html, offset_map, syntax_spans, para_refs)
397397+ }
398398+ Err(_) => (String::new(), Vec::new(), Vec::new(), Vec::new()),
399399+ };
400400+401401+ all_refs.extend(para_refs.clone());
402402+403403+ new_cached.push(CachedParagraph {
404404+ id: cached_para.id.clone(),
405405+ source_hash,
406406+ byte_range: byte_range.clone(),
407407+ char_range: char_range.clone(),
408408+ html: html.clone(),
409409+ offset_map: offset_map.clone(),
410410+ syntax_spans: syntax_spans.clone(),
411411+ collected_refs: para_refs.clone(),
412412+ });
413413+414414+ paragraphs.push(ParagraphRender {
415415+ id: cached_para.id.clone(),
416416+ byte_range,
417417+ char_range,
418418+ html,
419419+ offset_map,
420420+ syntax_spans,
421421+ source_hash,
422422+ });
423423+ } else {
424424+ // Reuse cached with adjusted offsets
425425+ let mut offset_map = cached_para.offset_map.clone();
426426+ let mut syntax_spans = cached_para.syntax_spans.clone();
427427+428428+ if cached_para.char_range.start > edit_pos {
429429+ // After edit - adjust offsets
430430+ for m in &mut offset_map {
431431+ m.char_range.start = apply_delta(m.char_range.start, char_delta);
432432+ m.char_range.end = apply_delta(m.char_range.end, char_delta);
433433+ m.byte_range.start = apply_delta(m.byte_range.start, byte_delta);
434434+ m.byte_range.end = apply_delta(m.byte_range.end, byte_delta);
435435+ }
436436+ for s in &mut syntax_spans {
437437+ s.adjust_positions(char_delta);
438438+ }
439439+ }
440440+441441+ all_refs.extend(cached_para.collected_refs.clone());
442442+443443+ new_cached.push(CachedParagraph {
444444+ id: cached_para.id.clone(),
445445+ source_hash,
446446+ byte_range: byte_range.clone(),
447447+ char_range: char_range.clone(),
448448+ html: cached_para.html.clone(),
449449+ offset_map: offset_map.clone(),
450450+ syntax_spans: syntax_spans.clone(),
451451+ collected_refs: cached_para.collected_refs.clone(),
452452+ });
453453+454454+ paragraphs.push(ParagraphRender {
455455+ id: cached_para.id.clone(),
456456+ byte_range,
457457+ char_range,
458458+ html: cached_para.html.clone(),
459459+ offset_map,
460460+ syntax_spans,
461461+ source_hash,
462462+ });
463463+ }
464464+ }
465465+466466+ // Add gaps (reuse gap logic from below)
467467+ let paragraphs_with_gaps = add_gap_paragraphs(paragraphs, text, &source);
468468+469469+ let new_cache = RenderCache {
470470+ paragraphs: new_cached,
471471+ next_node_id: 0,
472472+ next_syn_id: 0,
473473+ next_para_id: cache.next_para_id,
233474 };
234234- let boundary_ms = crate::perf::now() - boundary_start;
235235- tracing::debug!(boundary_ms, paragraphs = result.len(), "boundary discovery (slow path)");
236236- result
237237- } else {
238238- paragraph_ranges
475475+476476+ let fast_ms = crate::perf::now() - fast_start;
477477+ tracing::debug!(
478478+ fast_ms,
479479+ paragraphs = paragraphs_with_gaps.len(),
480480+ cursor_para_idx,
481481+ "fast path render timing"
482482+ );
483483+484484+ return (paragraphs_with_gaps, new_cache, all_refs);
485485+ }
486486+487487+ // ============ SLOW PATH ============
488488+ // Full render when boundaries might have changed
489489+ let render_start = crate::perf::now();
490490+ let parser =
491491+ Parser::new_ext(&source, weaver_renderer::default_md_options()).into_offset_iter();
492492+493493+ // Use provided resolver or empty default
494494+ let resolver = image_resolver.cloned().unwrap_or_default();
495495+496496+ // Build writer with all resolvers
497497+ let mut writer = EditorWriter::<_, &ResolvedContent, &EditorImageResolver>::new(
498498+ &source,
499499+ text,
500500+ parser,
501501+ )
502502+ .with_image_resolver(&resolver)
503503+ .with_embed_provider(resolved_content);
504504+505505+ if let Some(idx) = entry_index {
506506+ writer = writer.with_entry_index(idx);
507507+ }
508508+509509+ let writer_result = match writer.run() {
510510+ Ok(result) => result,
511511+ Err(_) => return (Vec::new(), RenderCache::default(), vec![]),
239512 };
513513+514514+ let render_ms = crate::perf::now() - render_start;
515515+516516+ let paragraph_ranges = writer_result.paragraph_ranges.clone();
240517241518 // Log discovered paragraphs
242519 for (i, (byte_range, char_range)) in paragraph_ranges.iter().enumerate() {
···254531 );
255532 }
256533257257- // Render paragraphs, reusing cache where possible
258258- let render_loop_start = crate::perf::now();
534534+ // Build paragraphs from full render segments
535535+ let build_start = crate::perf::now();
259536 let mut paragraphs = Vec::with_capacity(paragraph_ranges.len());
260537 let mut new_cached = Vec::with_capacity(paragraph_ranges.len());
261538 let mut all_refs: Vec<weaver_common::ExtractedRef> = Vec::new();
262262- let mut node_id_offset = cache.map(|c| c.next_node_id).unwrap_or(0);
263263- let mut syn_id_offset = cache.map(|c| c.next_syn_id).unwrap_or(0);
264264- let mut cache_hits = 0usize;
265265- let mut cache_misses = 0usize;
266266- let mut fresh_render_ms = 0.0f64;
539539+ let mut next_para_id = cache.map(|c| c.next_para_id).unwrap_or(0);
540540+541541+ // Find which paragraph contains cursor (for stable ID assignment)
542542+ let cursor_para_idx = paragraph_ranges.iter().position(|(_, char_range)| {
543543+ char_range.start <= cursor_offset && cursor_offset <= char_range.end
544544+ });
545545+546546+ tracing::debug!(
547547+ cursor_offset,
548548+ ?cursor_para_idx,
549549+ edit_char_pos = ?edit.map(|e| e.edit_char_pos),
550550+ "ID assignment: cursor and edit info"
551551+ );
552552+553553+ // Build hash->cached_para lookup for non-cursor matching
554554+ let cached_by_hash: HashMap<u64, &CachedParagraph> = cache
555555+ .map(|c| c.paragraphs.iter().map(|p| (p.source_hash, p)).collect())
556556+ .unwrap_or_default();
267557268558 for (idx, (byte_range, char_range)) in paragraph_ranges.iter().enumerate() {
269559 let para_source = text_slice_to_string(text, char_range.clone());
270560 let source_hash = hash_source(¶_source);
561561+ let is_cursor_para = Some(idx) == cursor_para_idx;
271562272272- // Check if we have a cached render with matching hash
273273- let cached_match =
274274- cache.and_then(|c| c.paragraphs.iter().find(|p| p.source_hash == source_hash));
563563+ // ID assignment: cursor paragraph matches by edit position, others match by hash
564564+ let para_id = if is_cursor_para {
565565+ let edit_in_this_para = edit
566566+ .map(|e| char_range.start <= e.edit_char_pos && e.edit_char_pos <= char_range.end)
567567+ .unwrap_or(false);
568568+ let lookup_pos = if edit_in_this_para {
569569+ edit.map(|e| e.edit_char_pos).unwrap_or(cursor_offset)
570570+ } else {
571571+ cursor_offset
572572+ };
573573+ let found_cached = cache.and_then(|c| {
574574+ c.paragraphs
575575+ .iter()
576576+ .find(|p| p.char_range.start <= lookup_pos && lookup_pos <= p.char_range.end)
577577+ });
275578276276- let (html, offset_map, syntax_spans, para_refs) = if let Some(cached) = cached_match {
277277- cache_hits += 1;
278278- // Reuse cached HTML, offset map, and syntax spans (adjusted for position)
279279- let char_delta = char_range.start as isize - cached.char_range.start as isize;
280280- let byte_delta = byte_range.start as isize - cached.byte_range.start as isize;
281281-282282- let mut adjusted_map = cached.offset_map.clone();
283283- for mapping in &mut adjusted_map {
284284- mapping.char_range.start =
285285- (mapping.char_range.start as isize + char_delta) as usize;
286286- mapping.char_range.end = (mapping.char_range.end as isize + char_delta) as usize;
287287- mapping.byte_range.start =
288288- (mapping.byte_range.start as isize + byte_delta) as usize;
289289- mapping.byte_range.end = (mapping.byte_range.end as isize + byte_delta) as usize;
579579+ if let Some(cached) = found_cached {
580580+ tracing::debug!(
581581+ lookup_pos,
582582+ edit_in_this_para,
583583+ cursor_offset,
584584+ cached_id = %cached.id,
585585+ cached_range = ?cached.char_range,
586586+ "cursor para: reusing cached ID"
587587+ );
588588+ cached.id.clone()
589589+ } else {
590590+ let id = make_paragraph_id(next_para_id);
591591+ next_para_id += 1;
592592+ id
290593 }
291291-292292- let mut adjusted_syntax = cached.syntax_spans.clone();
293293- for span in &mut adjusted_syntax {
294294- span.adjust_positions(char_delta);
295295- }
296296-297297- // Include cached refs in all_refs
298298- all_refs.extend(cached.collected_refs.clone());
299299-300300- (
301301- cached.html.clone(),
302302- adjusted_map,
303303- adjusted_syntax,
304304- cached.collected_refs.clone(),
305305- )
306594 } else {
307307- cache_misses += 1;
308308- let para_render_start = crate::perf::now();
309309- // Fresh render needed - create detached LoroDoc for this paragraph
310310- let para_doc = loro::LoroDoc::new();
311311- let para_text = para_doc.get_text("content");
312312- let _ = para_text.insert(0, ¶_source);
595595+ // Non-cursor: match by content hash
596596+ cached_by_hash
597597+ .get(&source_hash)
598598+ .map(|p| p.id.clone())
599599+ .unwrap_or_else(|| {
600600+ let id = make_paragraph_id(next_para_id);
601601+ next_para_id += 1;
602602+ id
603603+ })
604604+ };
313605314314- let parser = Parser::new_ext(¶_source, weaver_renderer::default_md_options())
315315- .into_offset_iter();
316316- let mut output = String::new();
606606+ // Get data from full render segments
607607+ let html = writer_result.html_segments.get(idx).cloned().unwrap_or_default();
608608+ let offset_map = writer_result.offset_maps_by_paragraph.get(idx).cloned().unwrap_or_default();
609609+ let syntax_spans = writer_result.syntax_spans_by_paragraph.get(idx).cloned().unwrap_or_default();
610610+ let para_refs = writer_result.collected_refs_by_paragraph.get(idx).cloned().unwrap_or_default();
317611318318- // Use provided resolver or empty default
319319- let resolver = image_resolver.cloned().unwrap_or_default();
320320-321321- // Build writer with optional entry index for wikilink validation
322322- // Pass paragraph's document-level offsets so all embedded char/byte positions are absolute
323323- let mut writer =
324324- EditorWriter::<_, _, &ResolvedContent, &EditorImageResolver>::new_with_all_offsets(
325325- ¶_source,
326326- ¶_text,
327327- parser,
328328- &mut output,
329329- node_id_offset,
330330- syn_id_offset,
331331- char_range.start,
332332- byte_range.start,
333333- )
334334- .with_image_resolver(&resolver)
335335- .with_embed_provider(resolved_content);
336336-337337- if let Some(idx) = entry_index {
338338- writer = writer.with_entry_index(idx);
339339- }
340340-341341- let (mut offset_map, mut syntax_spans, para_refs) = match writer.run() {
342342- Ok(result) => {
343343- // Update node ID offset
344344- let max_node_id = result
345345- .offset_maps
346346- .iter()
347347- .filter_map(|m| {
348348- m.node_id
349349- .strip_prefix("n")
350350- .and_then(|s| s.parse::<usize>().ok())
351351- })
352352- .max()
353353- .unwrap_or(node_id_offset);
354354- node_id_offset = max_node_id + 1;
355355-356356- // Update syn ID offset
357357- let max_syn_id = result
358358- .syntax_spans
359359- .iter()
360360- .filter_map(|s| {
361361- s.syn_id
362362- .strip_prefix("s")
363363- .and_then(|id| id.parse::<usize>().ok())
364364- })
365365- .max()
366366- .unwrap_or(syn_id_offset.saturating_sub(1));
367367- syn_id_offset = max_syn_id + 1;
368368-369369- // Collect refs from this paragraph
370370- let para_refs = result.collected_refs;
371371- all_refs.extend(para_refs.clone());
372372-373373- (result.offset_maps, result.syntax_spans, para_refs)
374374- }
375375- Err(_) => (Vec::new(), Vec::new(), Vec::new()),
376376- };
377377-378378- // Offsets are already document-absolute since we pass char_range.start/byte_range.start
379379- // to the writer constructor
380380- fresh_render_ms += crate::perf::now() - para_render_start;
381381- (output, offset_map, syntax_spans, para_refs)
382382- };
612612+ all_refs.extend(para_refs.clone());
383613384614 // Store in cache
385615 new_cached.push(CachedParagraph {
616616+ id: para_id.clone(),
386617 source_hash,
387618 byte_range: byte_range.clone(),
388619 char_range: char_range.clone(),
389620 html: html.clone(),
390621 offset_map: offset_map.clone(),
391622 syntax_spans: syntax_spans.clone(),
392392- collected_refs: para_refs,
623623+ collected_refs: para_refs.clone(),
393624 });
394625395626 paragraphs.push(ParagraphRender {
627627+ id: para_id,
396628 byte_range: byte_range.clone(),
397629 char_range: char_range.clone(),
398630 html,
···402634 });
403635 }
404636405405- // Insert gap paragraphs for EXTRA whitespace between blocks.
406406- // Standard paragraph break is 2 newlines (\n\n) - no gap needed for that.
407407- // Gaps are only for whitespace BEYOND the minimum, giving cursor a landing spot.
408408- const MIN_PARAGRAPH_BREAK_INCR: usize = 2; // \n\n
409409-410410- let mut paragraphs_with_gaps = Vec::with_capacity(paragraphs.len() * 2);
411411- let mut prev_end_char = 0usize;
412412- let mut prev_end_byte = 0usize;
413413-414414- for para in paragraphs {
415415- // Check for gap before this paragraph - only if MORE than minimum break
416416- let gap_size = para.char_range.start.saturating_sub(prev_end_char);
417417- if gap_size > MIN_PARAGRAPH_BREAK_INCR {
418418- // Visible gap element covers EXTRA whitespace beyond minimum break
419419- let gap_start_char = prev_end_char + MIN_PARAGRAPH_BREAK_INCR;
420420- let gap_end_char = para.char_range.start;
421421- let gap_start_byte = prev_end_byte + MIN_PARAGRAPH_BREAK_INCR;
422422- let gap_end_byte = para.byte_range.start;
423423-424424- // Position-based ID: deterministic, stable across cache states
425425- let gap_node_id = format!("gap-{}-{}", gap_start_char, gap_end_char);
426426- let gap_html = format!(r#"<span id="{}">{}</span>"#, gap_node_id, '\u{200B}');
427427-428428- // Gap paragraph covers ALL whitespace (like trailing gaps do)
429429- // so cursor anywhere in the inter-paragraph zone triggers restoration
430430- paragraphs_with_gaps.push(ParagraphRender {
431431- byte_range: prev_end_byte..gap_end_byte,
432432- char_range: prev_end_char..gap_end_char,
433433- html: gap_html,
434434- offset_map: vec![OffsetMapping {
435435- byte_range: prev_end_byte..gap_end_byte,
436436- char_range: prev_end_char..gap_end_char,
437437- node_id: gap_node_id,
438438- char_offset_in_node: 0,
439439- child_index: None,
440440- utf16_len: 1,
441441- }],
442442- syntax_spans: vec![],
443443- source_hash: hash_source(&text_slice_to_string(text, gap_start_char..gap_end_char)),
444444- });
445445- }
446446-447447- prev_end_char = para.char_range.end;
448448- prev_end_byte = para.byte_range.end;
449449- paragraphs_with_gaps.push(para);
450450- }
451451-452452- // Add trailing gap if needed
453453- let has_trailing_newlines = source.ends_with("\n\n") || source.ends_with("\n");
454454- if has_trailing_newlines {
455455- let doc_end_char = text.len_unicode();
456456- let doc_end_byte = text.len_utf8();
457457-458458- if doc_end_char > prev_end_char {
459459- // Position-based ID for trailing gap
460460- let trailing_node_id = format!("gap-{}-{}", prev_end_char, doc_end_char);
461461- let trailing_html = format!(r#"<span id="{}">{}</span>"#, trailing_node_id, '\u{200B}');
637637+ let build_ms = crate::perf::now() - build_start;
638638+ tracing::debug!(
639639+ render_ms,
640640+ build_ms,
641641+ paragraphs = paragraph_ranges.len(),
642642+ "single-pass render timing"
643643+ );
462644463463- paragraphs_with_gaps.push(ParagraphRender {
464464- byte_range: prev_end_byte..doc_end_byte,
465465- char_range: prev_end_char..doc_end_char,
466466- html: trailing_html,
467467- offset_map: vec![OffsetMapping {
468468- byte_range: prev_end_byte..doc_end_byte,
469469- char_range: prev_end_char..doc_end_char,
470470- node_id: trailing_node_id,
471471- char_offset_in_node: 0,
472472- child_index: None,
473473- utf16_len: 1,
474474- }],
475475- syntax_spans: vec![],
476476- source_hash: 0,
477477- });
478478- }
479479- }
645645+ let paragraphs_with_gaps = add_gap_paragraphs(paragraphs, text, &source);
480646481647 let new_cache = RenderCache {
482648 paragraphs: new_cached,
483483- next_node_id: node_id_offset,
484484- next_syn_id: syn_id_offset,
649649+ next_node_id: 0, // Not used in single-pass mode
650650+ next_syn_id: 0, // Not used in single-pass mode
651651+ next_para_id,
485652 };
486653487487- let render_loop_ms = crate::perf::now() - render_loop_start;
488654 let total_ms = crate::perf::now() - fn_start;
489655 tracing::debug!(
490656 total_ms,
491491- render_loop_ms,
492492- fresh_render_ms,
493493- cache_hits,
494494- cache_misses,
657657+ render_ms,
658658+ build_ms,
495659 paragraphs = paragraphs_with_gaps.len(),
496496- use_fast_path,
497660 "render_paragraphs_incremental timing"
498661 );
499662
+7-5
crates/weaver-app/src/components/editor/tests.rs
···5959 let text = doc.get_text("content");
6060 text.insert(0, input).unwrap();
6161 let (paragraphs, _cache, _refs) =
6262- render_paragraphs_incremental(&text, None, None, None, None, &ResolvedContent::default());
6262+ render_paragraphs_incremental(&text, None, 0, None, None, None, &ResolvedContent::default());
6363 paragraphs.iter().map(TestParagraph::from).collect()
6464}
6565···648648 // Initial state: "#" is a valid empty heading
649649 text.insert(0, "#").unwrap();
650650 let (paras1, cache1, _refs1) =
651651- render_paragraphs_incremental(&text, None, None, None, None, &ResolvedContent::default());
651651+ render_paragraphs_incremental(&text, None, 0, None, None, None, &ResolvedContent::default());
652652653653 eprintln!("State 1 ('#'): {}", paras1[0].html);
654654 assert!(paras1[0].html.contains("<h1"), "# alone should be heading");
···662662 let (paras2, _cache2, _refs2) = render_paragraphs_incremental(
663663 &text,
664664 Some(&cache1),
665665+ 0,
665666 None,
666667 None,
667668 None,
···776777 let text = doc.get_text("content");
777778 text.insert(0, input).unwrap();
778779 let (paragraphs, _cache, _refs) =
779779- render_paragraphs_incremental(&text, None, None, None, None, &ResolvedContent::default());
780780+ render_paragraphs_incremental(&text, None, 0, None, None, None, &ResolvedContent::default());
780781781782 // With standard \n\n break, we expect 2 paragraphs (no gap element)
782783 // Paragraph ranges include some trailing whitespace from markdown parsing
···806807 let text = doc.get_text("content");
807808 text.insert(0, input).unwrap();
808809 let (paragraphs, _cache, _refs) =
809809- render_paragraphs_incremental(&text, None, None, None, None, &ResolvedContent::default());
810810+ render_paragraphs_incremental(&text, None, 0, None, None, None, &ResolvedContent::default());
810811811812 // With extra newlines, we expect 3 elements: para, gap, para
812813 assert_eq!(
···907908 text.insert(0, input).unwrap();
908909909910 let (paras1, cache1, _refs1) =
910910- render_paragraphs_incremental(&text, None, None, None, None, &ResolvedContent::default());
911911+ render_paragraphs_incremental(&text, None, 0, None, None, None, &ResolvedContent::default());
911912 assert!(!cache1.paragraphs.is_empty(), "Cache should be populated");
912913913914 // Second render with same content should reuse cache
914915 let (paras2, _cache2, _refs2) = render_paragraphs_incremental(
915916 &text,
916917 Some(&cache1),
918918+ 0,
917919 None,
918920 None,
919921 None,