personal memory agent

Add speakers link-import and seed-from-imports commands

Two new CLI commands for the speakers app:
- `link-import`: links an import participant name to a journal entity by adding it as an aka, with alias collision validation
- `seed-from-imports`: scans import-stream segments for speaker-attributed conversation transcripts and seeds voiceprints from corresponding embeddings

Files changed:
- apps/speakers/call.py (new commands + updated docstring)
- apps/speakers/bootstrap.py (new seed_from_imports function + updated docstring)
- apps/speakers/tests/conftest.py (stream kwarg on create_segment, new create_import_segment helper)
- apps/speakers/tests/test_seed_imports.py (new test file, 12 tests)

+689 -2
+189 -1
apps/speakers/bootstrap.py
··· 9 9 subtract the owner's sentences, then saves the remaining embeddings as 10 10 that speaker's voiceprint. 11 11 12 + Import seeding: Scans import-stream segments for conversation transcripts 13 + with per-line speaker attribution, maps speakers to journal entities, and 14 + saves corresponding embeddings as voiceprints. 15 + 12 16 Name resolution: Compares voiceprint centroids between name variants. 13 17 Pairs with cosine similarity > 0.90 are the same person. Unambiguous 14 18 variants are auto-merged by adding the short name as an aka on the ··· 33 37 load_journal_entity, 34 38 save_journal_entity, 35 39 ) 36 - from think.utils import day_dirs, now_ms, segment_path 40 + from think.utils import day_dirs, iter_segments, now_ms, segment_path 37 41 38 42 logger = logging.getLogger(__name__) 39 43 ··· 270 274 stats["segments_scanned"], 271 275 sum(len(v) for v in entity_embeddings.values()), 272 276 ) 277 + 278 + # Batch save all collected embeddings 279 + if not dry_run: 280 + for entity_id, emb_list in entity_embeddings.items(): 281 + try: 282 + saved = _save_voiceprints_batch(entity_id, emb_list) 283 + stats["embeddings_saved"] += saved 284 + except Exception as e: 285 + name = entity_names.get(entity_id, entity_id) 286 + stats["errors"].append(f"Failed to save for {name}: {e}") 287 + logger.exception("Failed to save voiceprints for %s", entity_id) 288 + else: 289 + stats["embeddings_saved"] = sum(len(v) for v in entity_embeddings.values()) 290 + 291 + return stats 292 + 293 + 294 + # Generic speaker names to skip (AI conversation imports) 295 + _GENERIC_SPEAKERS = frozenset({"Human", "Assistant", "human", "assistant", ""}) 296 + 297 + 298 + def seed_from_imports(dry_run: bool = False) -> dict[str, Any]: 299 + """Seed voiceprints from speaker-attributed import transcripts. 300 + 301 + Scans import-stream segments for conversation_transcript.jsonl files 302 + with per-line speaker attribution. Maps speaker names to journal 303 + entities and saves corresponding embeddings as voiceprints. 304 + 305 + Unlike bootstrap_voiceprints(), this does NOT create new entities — 306 + unmatched speaker names are skipped. 307 + 308 + Args: 309 + dry_run: If True, report what would be saved without saving 310 + 311 + Returns: 312 + Dict with statistics about the seed run 313 + """ 314 + ( 315 + load_embeddings_file, 316 + normalize_embedding, 317 + _, 318 + _, 319 + ) = _routes_helpers() 320 + 321 + # Load owner centroid — required for owner subtraction 322 + centroid_data = load_owner_centroid() 323 + if centroid_data is None: 324 + return {"error": "No confirmed owner centroid. Run owner detection first."} 325 + 326 + owner_centroid, owner_threshold = centroid_data 327 + 328 + # Load all journal entities for speaker name matching 329 + journal_entities = load_all_journal_entities() 330 + entities_list = [e for e in journal_entities.values() if not e.get("blocked")] 331 + 332 + stats: dict[str, Any] = { 333 + "segments_scanned": 0, 334 + "segments_with_speakers": 0, 335 + "speakers_found": {}, 336 + "embeddings_saved": 0, 337 + "embeddings_skipped_owner": 0, 338 + "embeddings_skipped_duplicate": 0, 339 + "errors": [], 340 + } 341 + 342 + # Collect embeddings per entity for efficient batch saves 343 + entity_embeddings: dict[str, list[tuple[np.ndarray, dict]]] = defaultdict(list) 344 + entity_existing: dict[str, set] = {} 345 + entity_names: dict[str, str] = {} 346 + 347 + days = sorted(day_dirs().keys()) 348 + 349 + for day in days: 350 + for stream, seg_key, seg_dir in iter_segments(day): 351 + # Only process import streams 352 + if not stream.startswith("import."): 353 + continue 354 + 355 + stats["segments_scanned"] += 1 356 + 357 + # Read conversation_transcript.jsonl 358 + jsonl_path = seg_dir / "conversation_transcript.jsonl" 359 + if not jsonl_path.exists(): 360 + continue 361 + 362 + try: 363 + lines = jsonl_path.read_text(encoding="utf-8").strip().split("\n") 364 + except OSError as e: 365 + stats["errors"].append(f"Failed to read {jsonl_path}: {e}") 366 + continue 367 + 368 + if len(lines) < 2: 369 + continue 370 + 371 + # Build sentence_id -> speaker mapping 372 + # Line 0 is metadata header. Lines 1+ are entries. 373 + # sentence_id is 1-based, matching statement_ids in NPZ. 374 + sid_to_speaker: dict[int, str] = {} 375 + has_real_speakers = False 376 + for line_idx in range(1, len(lines)): 377 + try: 378 + entry = json.loads(lines[line_idx]) 379 + except (json.JSONDecodeError, IndexError): 380 + continue 381 + speaker = entry.get("speaker", "") 382 + if speaker and speaker not in _GENERIC_SPEAKERS: 383 + sid_to_speaker[line_idx] = speaker 384 + has_real_speakers = True 385 + 386 + if not has_real_speakers: 387 + continue 388 + 389 + stats["segments_with_speakers"] += 1 390 + 391 + # Find audio embedding NPZ files in this segment 392 + # Accept both "<source>_audio" pattern and plain "audio" 393 + npz_files = list(seg_dir.glob("*.npz")) 394 + sources = [ 395 + f.stem 396 + for f in npz_files 397 + if f.stem.endswith("_audio") or f.stem == "audio" 398 + ] 399 + if not sources: 400 + continue 401 + 402 + for source in sources: 403 + emb_data = load_embeddings_file(seg_dir / f"{source}.npz") 404 + if emb_data is None: 405 + continue 406 + 407 + embeddings, statement_ids = emb_data 408 + 409 + for embedding, sid in zip(embeddings, statement_ids): 410 + sentence_id = int(sid) 411 + speaker_name = sid_to_speaker.get(sentence_id) 412 + if not speaker_name: 413 + continue 414 + 415 + # Match speaker to entity — skip if no match 416 + entity = find_matching_entity(speaker_name, entities_list) 417 + if not entity: 418 + continue 419 + 420 + entity_id = entity["id"] 421 + entity_name = entity.get("name", speaker_name) 422 + entity_names[entity_id] = entity_name 423 + stats["speakers_found"].setdefault(entity_name, 0) 424 + 425 + # Load existing voiceprint keys for idempotency (once per entity) 426 + if entity_id not in entity_existing: 427 + entity_existing[entity_id] = _load_existing_voiceprint_keys( 428 + entity_id 429 + ) 430 + 431 + existing_keys = entity_existing[entity_id] 432 + vp_key = (day, seg_key, source, sentence_id) 433 + 434 + # Idempotency: skip if already saved 435 + if vp_key in existing_keys: 436 + stats["embeddings_skipped_duplicate"] += 1 437 + continue 438 + 439 + normalized = normalize_embedding(embedding) 440 + if normalized is None: 441 + continue 442 + 443 + # Contamination guard: reject embeddings too similar to owner 444 + owner_score = float(np.dot(normalized, owner_centroid)) 445 + if owner_score >= owner_threshold: 446 + stats["embeddings_skipped_owner"] += 1 447 + continue 448 + 449 + metadata = { 450 + "day": day, 451 + "segment_key": seg_key, 452 + "source": source, 453 + "stream": stream, 454 + "sentence_id": sentence_id, 455 + "added_at": now_ms(), 456 + } 457 + 458 + entity_embeddings[entity_id].append((normalized, metadata)) 459 + existing_keys.add(vp_key) 460 + stats["speakers_found"][entity_name] += 1 273 461 274 462 # Batch save all collected embeddings 275 463 if not dry_run:
+126
apps/speakers/call.py
··· 13 13 sol call speakers identify <cluster-id> <name> [--entity-id ID] 14 14 sol call speakers merge-names <alias> <canonical> 15 15 sol call speakers suggest [--limit N] [--json] 16 + sol call speakers link-import <name> --entity-id <ID> 17 + sol call speakers seed-from-imports [--dry-run] [--json] 16 18 """ 17 19 18 20 from __future__ import annotations ··· 405 407 from apps.speakers.suggest import format_suggestions 406 408 407 409 typer.echo(format_suggestions(results)) 410 + 411 + 412 + @app.command("link-import") 413 + def link_import( 414 + name: str = typer.Argument(..., help="Import participant name to link."), 415 + entity_id: str = typer.Option( 416 + ..., "--entity-id", help="Journal entity ID to link to." 417 + ), 418 + ) -> None: 419 + """Link an import participant name to a journal entity as an aka.""" 420 + import json as json_mod 421 + 422 + from think.entities.journal import ( 423 + load_all_journal_entities, 424 + load_journal_entity, 425 + save_journal_entity, 426 + ) 427 + from think.entities.matching import validate_aka_uniqueness 428 + from think.utils import now_ms 429 + 430 + entity = load_journal_entity(entity_id) 431 + if entity is None: 432 + typer.echo( 433 + json_mod.dumps({"error": f"Entity not found: {entity_id}"}, indent=2), 434 + err=True, 435 + ) 436 + raise typer.Exit(1) 437 + 438 + existing_aka = set(entity.get("aka", [])) 439 + already_present = name in existing_aka 440 + if not already_present: 441 + # Check for alias collision with other entities 442 + all_entities = load_all_journal_entities() 443 + entities_list = [e for e in all_entities.values() if not e.get("blocked")] 444 + collision = validate_aka_uniqueness( 445 + name, entities_list, exclude_entity_name=entity.get("name") 446 + ) 447 + if collision: 448 + typer.echo( 449 + json_mod.dumps( 450 + { 451 + "error": f"Name '{name}' conflicts with entity: {collision}", 452 + }, 453 + indent=2, 454 + ), 455 + err=True, 456 + ) 457 + raise typer.Exit(1) 458 + 459 + existing_aka.add(name) 460 + entity["aka"] = sorted(existing_aka) 461 + entity["updated_at"] = now_ms() 462 + save_journal_entity(entity) 463 + 464 + typer.echo( 465 + json_mod.dumps( 466 + { 467 + "linked": True, 468 + "entity_id": entity_id, 469 + "name_added": name, 470 + "already_present": already_present, 471 + }, 472 + indent=2, 473 + default=str, 474 + ) 475 + ) 476 + 477 + 478 + @app.command("seed-from-imports") 479 + def seed_from_imports_cmd( 480 + dry_run: bool = typer.Option( 481 + False, "--dry-run", help="Show what would be saved without saving." 482 + ), 483 + json_output: bool = typer.Option( 484 + False, "--json", help="Output full result as JSON." 485 + ), 486 + ) -> None: 487 + """Seed voiceprints from speaker-attributed import transcripts. 488 + 489 + Scans import-stream segments for conversation_transcript.jsonl files 490 + with per-line speaker attribution. Maps speaker names to journal 491 + entities and saves corresponding embeddings as voiceprints. 492 + """ 493 + from apps.speakers.bootstrap import seed_from_imports 494 + 495 + if dry_run and not json_output: 496 + typer.echo("DRY RUN — no voiceprints will be saved\n") 497 + 498 + if not json_output: 499 + typer.echo("Seeding voiceprints from import transcripts...") 500 + stats = seed_from_imports(dry_run=dry_run) 501 + 502 + if "error" in stats: 503 + typer.echo(f"Error: {stats['error']}", err=True) 504 + raise typer.Exit(1) 505 + if json_output: 506 + import json as json_mod 507 + 508 + typer.echo(json_mod.dumps(stats, indent=2, default=str)) 509 + return 510 + 511 + typer.echo(f"\nImport segments scanned: {stats['segments_scanned']}") 512 + typer.echo(f"Segments with speakers: {stats['segments_with_speakers']}") 513 + typer.echo(f"Unique speakers: {len(stats['speakers_found'])}") 514 + typer.echo(f"Embeddings saved: {stats['embeddings_saved']}") 515 + typer.echo(f"Embeddings skipped (owner): {stats['embeddings_skipped_owner']}") 516 + typer.echo( 517 + f"Embeddings skipped (duplicate): {stats['embeddings_skipped_duplicate']}" 518 + ) 519 + 520 + if stats["speakers_found"]: 521 + typer.echo("\nTop speakers by embedding count:") 522 + sorted_speakers = sorted( 523 + stats["speakers_found"].items(), key=lambda x: x[1], reverse=True 524 + ) 525 + for name, count in sorted_speakers[:15]: 526 + typer.echo(f" {name}: {count}") 527 + if len(sorted_speakers) > 15: 528 + typer.echo(f" ... and {len(sorted_speakers) - 15} more") 529 + 530 + if stats["errors"]: 531 + typer.echo(f"\nErrors ({len(stats['errors'])}):", err=True) 532 + for err in stats["errors"]: 533 + typer.echo(f" {err}", err=True)
+82 -1
apps/speakers/tests/conftest.py
··· 45 45 sources: list[str], 46 46 num_sentences: int = 5, 47 47 *, 48 + stream: str | None = None, 48 49 embeddings: np.ndarray | None = None, 49 50 ) -> Path: 50 51 """Create a segment with sentence embeddings. ··· 57 58 sources: List of audio sources (e.g., ["mic_audio", "sys_audio"]) 58 59 num_sentences: Number of sentences to create 59 60 """ 60 - segment_dir = self.journal / day / STREAM / segment_key 61 + segment_dir = self.journal / day / (stream or STREAM) / segment_key 61 62 segment_dir.mkdir(parents=True, exist_ok=True) 62 63 63 64 sentence_count = ( ··· 113 114 # Create dummy audio file 114 115 audio_path = segment_dir / f"{source}.flac" 115 116 audio_path.write_bytes(b"") # Empty placeholder 117 + 118 + return segment_dir 119 + 120 + def create_import_segment( 121 + self, 122 + day: str, 123 + segment_key: str, 124 + speakers_text: list[tuple[str, str]], 125 + source: str = "audio", 126 + *, 127 + stream: str = "import.granola", 128 + embeddings: np.ndarray | None = None, 129 + ) -> Path: 130 + """Create an import segment with conversation_transcript.jsonl and NPZ. 131 + 132 + Args: 133 + day: Day string (YYYYMMDD) 134 + segment_key: Segment key (HHMMSS_LEN) 135 + speakers_text: List of (speaker_name, text) tuples for transcript lines 136 + source: Audio source stem for NPZ file (default "audio") 137 + stream: Import stream name (default "import.granola") 138 + embeddings: Optional custom embeddings array (N×256) 139 + """ 140 + segment_dir = self.journal / day / stream / segment_key 141 + segment_dir.mkdir(parents=True, exist_ok=True) 142 + 143 + num_sentences = len(speakers_text) 144 + 145 + # Write conversation_transcript.jsonl 146 + jsonl_path = segment_dir / "conversation_transcript.jsonl" 147 + lines = [ 148 + json.dumps( 149 + { 150 + "imported": {"id": "test-import", "source": "test"}, 151 + "topics": "test", 152 + "setting": "meeting", 153 + } 154 + ) 155 + ] 156 + 157 + time_part = segment_key.split("_")[0] 158 + base_h = int(time_part[0:2]) 159 + base_m = int(time_part[2:4]) 160 + base_s = int(time_part[4:6]) 161 + base_seconds = base_h * 3600 + base_m * 60 + base_s 162 + 163 + for i, (speaker, text) in enumerate(speakers_text): 164 + offset = i * 5 165 + abs_seconds = base_seconds + offset 166 + h = (abs_seconds // 3600) % 24 167 + m = (abs_seconds % 3600) // 60 168 + s = abs_seconds % 60 169 + lines.append( 170 + json.dumps( 171 + { 172 + "start": f"{h:02d}:{m:02d}:{s:02d}", 173 + "speaker": speaker, 174 + "text": text, 175 + "source": "import", 176 + } 177 + ) 178 + ) 179 + jsonl_path.write_text("\n".join(lines) + "\n") 180 + 181 + # Create NPZ embeddings 182 + npz_path = segment_dir / f"{source}.npz" 183 + if embeddings is None: 184 + source_embeddings = np.random.randn(num_sentences, 256).astype( 185 + np.float32 186 + ) 187 + norms = np.linalg.norm(source_embeddings, axis=1, keepdims=True) 188 + source_embeddings = source_embeddings / norms 189 + else: 190 + source_embeddings = embeddings.astype(np.float32) 191 + statement_ids = np.arange(1, num_sentences + 1, dtype=np.int32) 192 + np.savez_compressed( 193 + npz_path, 194 + embeddings=source_embeddings, 195 + statement_ids=statement_ids, 196 + ) 116 197 117 198 return segment_dir 118 199
+292
apps/speakers/tests/test_seed_imports.py
··· 1 + # SPDX-License-Identifier: AGPL-3.0-only 2 + # Copyright (c) 2026 sol pbc 3 + 4 + """Tests for link-import and seed-from-imports CLI commands.""" 5 + 6 + from __future__ import annotations 7 + 8 + import json 9 + 10 + import numpy as np 11 + from typer.testing import CliRunner 12 + 13 + from apps.speakers.call import app as speakers_app 14 + 15 + _runner = CliRunner() 16 + 17 + 18 + # --- link-import tests --- 19 + 20 + 21 + def test_link_import_success(speakers_env): 22 + """link-import adds name as aka on entity.""" 23 + env = speakers_env() 24 + env.create_entity("Sarah Chen") 25 + 26 + result = _runner.invoke( 27 + speakers_app, 28 + ["link-import", "Sarah C", "--entity-id", "sarah_chen"], 29 + ) 30 + assert result.exit_code == 0 31 + data = json.loads(result.output) 32 + assert data["linked"] is True 33 + assert data["entity_id"] == "sarah_chen" 34 + assert data["name_added"] == "Sarah C" 35 + assert data["already_present"] is False 36 + 37 + # Verify entity was actually updated 38 + entity_path = env.journal / "entities" / "sarah_chen" / "entity.json" 39 + entity = json.loads(entity_path.read_text()) 40 + assert "Sarah C" in entity["aka"] 41 + assert "updated_at" in entity 42 + 43 + 44 + def test_link_import_already_present(speakers_env): 45 + """link-import reports already_present when name is already an aka.""" 46 + env = speakers_env() 47 + entity_dir = env.create_entity("Sarah Chen") 48 + # Manually add aka 49 + entity_path = entity_dir / "entity.json" 50 + entity = json.loads(entity_path.read_text()) 51 + entity["aka"] = ["Sarah C"] 52 + entity_path.write_text(json.dumps(entity)) 53 + 54 + result = _runner.invoke( 55 + speakers_app, 56 + ["link-import", "Sarah C", "--entity-id", "sarah_chen"], 57 + ) 58 + assert result.exit_code == 0 59 + data = json.loads(result.output) 60 + assert data["already_present"] is True 61 + 62 + 63 + def test_link_import_entity_not_found(speakers_env): 64 + """link-import exits 1 with error JSON for missing entity.""" 65 + speakers_env() 66 + result = _runner.invoke( 67 + speakers_app, 68 + ["link-import", "Nobody", "--entity-id", "nonexistent"], 69 + ) 70 + assert result.exit_code == 1 71 + 72 + 73 + def test_link_import_collision(speakers_env): 74 + """link-import exits 1 when aka collides with another entity.""" 75 + env = speakers_env() 76 + env.create_entity("Alice Johnson") 77 + env.create_entity("Bob Smith") 78 + 79 + result = _runner.invoke( 80 + speakers_app, 81 + ["link-import", "Bob Smith", "--entity-id", "alice_johnson"], 82 + ) 83 + assert result.exit_code == 1 84 + data = json.loads(result.output) 85 + assert "conflicts" in data["error"] 86 + 87 + 88 + # --- seed-from-imports tests --- 89 + 90 + 91 + def _create_owner_centroid(env, *, threshold: float = 0.85): 92 + """Helper: create owner centroid file for seed tests.""" 93 + env.create_entity("Owner", is_principal=True) 94 + 95 + owner_dir = env.journal / "entities" / "owner" 96 + owner_dir.mkdir(parents=True, exist_ok=True) 97 + 98 + # Create a distinct owner embedding (pointing in a specific direction) 99 + owner_emb = np.zeros(256, dtype=np.float32) 100 + owner_emb[0] = 1.0 # Owner points along axis 0 101 + 102 + centroid_path = owner_dir / "owner_centroid.npz" 103 + np.savez_compressed( 104 + centroid_path, 105 + centroid=owner_emb, 106 + threshold=np.float32(threshold), 107 + ) 108 + return owner_emb 109 + 110 + 111 + def test_seed_from_imports_happy_path(speakers_env): 112 + """seed-from-imports seeds voiceprints for matched speakers.""" 113 + env = speakers_env() 114 + _create_owner_centroid(env) 115 + env.create_entity("Alice Johnson") 116 + 117 + # Create embeddings that are NOT owner-like (orthogonal to owner) 118 + embs = np.zeros((3, 256), dtype=np.float32) 119 + embs[0, 1] = 1.0 # Points along axis 1 (orthogonal to owner axis 0) 120 + embs[1, 2] = 1.0 121 + embs[2, 3] = 1.0 122 + 123 + env.create_import_segment( 124 + "20240101", 125 + "100000_300", 126 + [ 127 + ("Alice Johnson", "Hello everyone"), 128 + ("Alice Johnson", "Let's discuss the project"), 129 + ("Alice Johnson", "Thanks for joining"), 130 + ], 131 + embeddings=embs, 132 + ) 133 + 134 + result = _runner.invoke(speakers_app, ["seed-from-imports", "--json"]) 135 + assert result.exit_code == 0, result.output 136 + data = json.loads(result.output) 137 + assert data["segments_scanned"] >= 1 138 + assert data["segments_with_speakers"] >= 1 139 + assert data["embeddings_saved"] == 3 140 + assert "Alice Johnson" in data["speakers_found"] 141 + 142 + 143 + def test_seed_from_imports_skips_generic_speakers(speakers_env): 144 + """seed-from-imports skips Human/Assistant speakers.""" 145 + env = speakers_env() 146 + _create_owner_centroid(env) 147 + 148 + embs = np.zeros((2, 256), dtype=np.float32) 149 + embs[0, 1] = 1.0 150 + embs[1, 2] = 1.0 151 + 152 + env.create_import_segment( 153 + "20240101", 154 + "100000_300", 155 + [ 156 + ("Human", "How do I fix this?"), 157 + ("Assistant", "Try restarting the service."), 158 + ], 159 + stream="import.chatgpt", 160 + embeddings=embs, 161 + ) 162 + 163 + result = _runner.invoke(speakers_app, ["seed-from-imports", "--json"]) 164 + assert result.exit_code == 0 165 + data = json.loads(result.output) 166 + assert data["embeddings_saved"] == 0 167 + assert data["segments_with_speakers"] == 0 168 + 169 + 170 + def test_seed_from_imports_skips_unmatched_speakers(speakers_env): 171 + """seed-from-imports skips speakers with no matching entity.""" 172 + env = speakers_env() 173 + _create_owner_centroid(env) 174 + # Don't create an entity for "Unknown Person" 175 + 176 + embs = np.zeros((1, 256), dtype=np.float32) 177 + embs[0, 1] = 1.0 178 + 179 + env.create_import_segment( 180 + "20240101", 181 + "100000_300", 182 + [("Unknown Person", "Hello")], 183 + embeddings=embs, 184 + ) 185 + 186 + result = _runner.invoke(speakers_app, ["seed-from-imports", "--json"]) 187 + assert result.exit_code == 0 188 + data = json.loads(result.output) 189 + assert data["embeddings_saved"] == 0 190 + 191 + 192 + def test_seed_from_imports_owner_contamination(speakers_env): 193 + """seed-from-imports skips embeddings too similar to owner.""" 194 + env = speakers_env() 195 + _create_owner_centroid(env, threshold=0.85) 196 + env.create_entity("Alice Johnson") 197 + 198 + # Create an embedding that IS owner-like (same direction as owner centroid) 199 + embs = np.zeros((1, 256), dtype=np.float32) 200 + embs[0, 0] = 1.0 # Same direction as owner (axis 0) 201 + 202 + env.create_import_segment( 203 + "20240101", 204 + "100000_300", 205 + [("Alice Johnson", "Hello")], 206 + embeddings=embs, 207 + ) 208 + 209 + result = _runner.invoke(speakers_app, ["seed-from-imports", "--json"]) 210 + assert result.exit_code == 0 211 + data = json.loads(result.output) 212 + assert data["embeddings_skipped_owner"] == 1 213 + assert data["embeddings_saved"] == 0 214 + 215 + 216 + def test_seed_from_imports_dedup(speakers_env): 217 + """seed-from-imports is idempotent — second run skips duplicates.""" 218 + env = speakers_env() 219 + _create_owner_centroid(env) 220 + env.create_entity("Alice Johnson") 221 + 222 + embs = np.zeros((1, 256), dtype=np.float32) 223 + embs[0, 1] = 1.0 224 + 225 + env.create_import_segment( 226 + "20240101", 227 + "100000_300", 228 + [("Alice Johnson", "Hello")], 229 + embeddings=embs, 230 + ) 231 + 232 + # First run 233 + result1 = _runner.invoke(speakers_app, ["seed-from-imports", "--json"]) 234 + assert result1.exit_code == 0 235 + data1 = json.loads(result1.output) 236 + assert data1["embeddings_saved"] == 1 237 + 238 + # Second run — should be all duplicates 239 + result2 = _runner.invoke(speakers_app, ["seed-from-imports", "--json"]) 240 + assert result2.exit_code == 0 241 + data2 = json.loads(result2.output) 242 + assert data2["embeddings_saved"] == 0 243 + assert data2["embeddings_skipped_duplicate"] == 1 244 + 245 + 246 + def test_seed_from_imports_dry_run(speakers_env): 247 + """seed-from-imports --dry-run reports stats but doesn't write.""" 248 + env = speakers_env() 249 + _create_owner_centroid(env) 250 + env.create_entity("Alice Johnson") 251 + 252 + embs = np.zeros((1, 256), dtype=np.float32) 253 + embs[0, 1] = 1.0 254 + 255 + env.create_import_segment( 256 + "20240101", 257 + "100000_300", 258 + [("Alice Johnson", "Hello")], 259 + embeddings=embs, 260 + ) 261 + 262 + result = _runner.invoke( 263 + speakers_app, 264 + ["seed-from-imports", "--dry-run", "--json"], 265 + ) 266 + assert result.exit_code == 0 267 + data = json.loads(result.output) 268 + assert data["embeddings_saved"] == 1 # Would-be saved count 269 + 270 + # Verify nothing was actually written 271 + vp_path = env.journal / "entities" / "alice_johnson" / "voiceprints.npz" 272 + assert not vp_path.exists() 273 + 274 + 275 + def test_seed_from_imports_no_owner_centroid(speakers_env): 276 + """seed-from-imports errors when no owner centroid exists.""" 277 + speakers_env() 278 + 279 + result = _runner.invoke(speakers_app, ["seed-from-imports", "--json"]) 280 + assert result.exit_code == 1 281 + 282 + 283 + def test_seed_from_imports_no_import_segments(speakers_env): 284 + """seed-from-imports returns zeroed stats when no import segments exist.""" 285 + env = speakers_env() 286 + _create_owner_centroid(env) 287 + 288 + result = _runner.invoke(speakers_app, ["seed-from-imports", "--json"]) 289 + assert result.exit_code == 0 290 + data = json.loads(result.output) 291 + assert data["segments_scanned"] == 0 292 + assert data["embeddings_saved"] == 0