pydantic model generator for atproto lexicons

chore: move bench.py to scripts/, use uv shebang

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

Changed files
+56 -56
scripts
-55
bench.py
··· 1 - #!/usr/bin/env python3 2 - """benchmark pmgfal on real lexicons.""" 3 - 4 - import subprocess 5 - import tempfile 6 - import time 7 - from pathlib import Path 8 - 9 - 10 - def bench_atproto(): 11 - """benchmark against full atproto lexicons.""" 12 - with tempfile.TemporaryDirectory() as tmp: 13 - # clone atproto 14 - print("cloning atproto lexicons...") 15 - subprocess.run( 16 - ["git", "clone", "--depth=1", "https://github.com/bluesky-social/atproto.git", tmp], 17 - capture_output=True, 18 - check=True, 19 - ) 20 - 21 - lexicon_dir = Path(tmp) / "lexicons" 22 - output_dir = Path(tmp) / "output" 23 - json_files = list(lexicon_dir.rglob("*.json")) 24 - 25 - print(f"found {len(json_files)} lexicon files") 26 - 27 - # benchmark generation (cold) 28 - start = time.perf_counter() 29 - subprocess.run( 30 - ["uv", "run", "pmgfal", str(lexicon_dir), "-o", str(output_dir), "--no-cache"], 31 - check=True, 32 - ) 33 - cold_time = time.perf_counter() - start 34 - 35 - # count output 36 - models_file = output_dir / "models.py" 37 - lines = len(models_file.read_text().splitlines()) if models_file.exists() else 0 38 - 39 - # benchmark cache hit 40 - start = time.perf_counter() 41 - subprocess.run( 42 - ["uv", "run", "pmgfal", str(lexicon_dir), "-o", str(output_dir)], 43 - check=True, 44 - ) 45 - cache_time = time.perf_counter() - start 46 - 47 - print(f"\nresults:") 48 - print(f" lexicons: {len(json_files)}") 49 - print(f" output: {lines} lines") 50 - print(f" cold generation: {cold_time:.3f}s") 51 - print(f" cache hit: {cache_time:.3f}s") 52 - 53 - 54 - if __name__ == "__main__": 55 - bench_atproto()
+1 -1
justfile
··· 22 22 23 23 # benchmark on atproto lexicons 24 24 bench: dev 25 - uv run python bench.py 25 + ./scripts/bench.py 26 26 27 27 # clean build artifacts 28 28 clean:
+55
scripts/bench.py
··· 1 + #!/usr/bin/env -S uv run python 2 + """benchmark pmgfal on real lexicons.""" 3 + 4 + import subprocess 5 + import tempfile 6 + import time 7 + from pathlib import Path 8 + 9 + 10 + def bench_atproto(): 11 + """benchmark against full atproto lexicons.""" 12 + with tempfile.TemporaryDirectory() as tmp: 13 + # clone atproto 14 + print("cloning atproto lexicons...") 15 + subprocess.run( 16 + ["git", "clone", "--depth=1", "https://github.com/bluesky-social/atproto.git", tmp], 17 + capture_output=True, 18 + check=True, 19 + ) 20 + 21 + lexicon_dir = Path(tmp) / "lexicons" 22 + output_dir = Path(tmp) / "output" 23 + json_files = list(lexicon_dir.rglob("*.json")) 24 + 25 + print(f"found {len(json_files)} lexicon files") 26 + 27 + # benchmark generation (cold) 28 + start = time.perf_counter() 29 + subprocess.run( 30 + ["uv", "run", "pmgfal", str(lexicon_dir), "-o", str(output_dir), "--no-cache"], 31 + check=True, 32 + ) 33 + cold_time = time.perf_counter() - start 34 + 35 + # count output 36 + models_file = output_dir / "models.py" 37 + lines = len(models_file.read_text().splitlines()) if models_file.exists() else 0 38 + 39 + # benchmark cache hit 40 + start = time.perf_counter() 41 + subprocess.run( 42 + ["uv", "run", "pmgfal", str(lexicon_dir), "-o", str(output_dir)], 43 + check=True, 44 + ) 45 + cache_time = time.perf_counter() - start 46 + 47 + print(f"\nresults:") 48 + print(f" lexicons: {len(json_files)}") 49 + print(f" output: {lines} lines") 50 + print(f" cold generation: {cold_time:.3f}s") 51 + print(f" cache hit: {cache_time:.3f}s") 52 + 53 + 54 + if __name__ == "__main__": 55 + bench_atproto()