[PATCH] x86_64: Move NUMA page_to_pfn/pfn_to_page functions out of line

Saves about ~18K .text in defconfig

There would be more optimization potential, but that's for later.

Suggestion originally from Bill Irwin.
Fix from Andy Whitcroft.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Andi Kleen and committed by Linus Torvalds cf050132 cdc4b9c0

+39 -13
+36
arch/x86_64/mm/numa.c
··· 360 EXPORT_SYMBOL(memnode_shift); 361 EXPORT_SYMBOL(memnodemap); 362 EXPORT_SYMBOL(node_data);
··· 360 EXPORT_SYMBOL(memnode_shift); 361 EXPORT_SYMBOL(memnodemap); 362 EXPORT_SYMBOL(node_data); 363 + 364 + #ifdef CONFIG_DISCONTIGMEM 365 + /* 366 + * Functions to convert PFNs from/to per node page addresses. 367 + * These are out of line because they are quite big. 368 + * They could be all tuned by pre caching more state. 369 + * Should do that. 370 + */ 371 + 372 + /* Requires pfn_valid(pfn) to be true */ 373 + struct page *pfn_to_page(unsigned long pfn) 374 + { 375 + int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); 376 + return (pfn - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; 377 + } 378 + EXPORT_SYMBOL(pfn_to_page); 379 + 380 + unsigned long page_to_pfn(struct page *page) 381 + { 382 + return (long)(((page) - page_zone(page)->zone_mem_map) + 383 + page_zone(page)->zone_start_pfn); 384 + } 385 + EXPORT_SYMBOL(page_to_pfn); 386 + 387 + int pfn_valid(unsigned long pfn) 388 + { 389 + unsigned nid; 390 + if (pfn >= num_physpages) 391 + return 0; 392 + nid = pfn_to_nid(pfn); 393 + if (nid == 0xff) 394 + return 0; 395 + return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid); 396 + } 397 + EXPORT_SYMBOL(pfn_valid); 398 + #endif
+3 -13
include/asm-x86_64/mmzone.h
··· 36 NODE_DATA(nid)->node_spanned_pages) 37 38 #ifdef CONFIG_DISCONTIGMEM 39 - 40 #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) 41 #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) 42 43 - /* Requires pfn_valid(pfn) to be true */ 44 - #define pfn_to_page(pfn) ({ \ 45 - int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \ 46 - ((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \ 47 - }) 48 - 49 - #define page_to_pfn(page) \ 50 - (long)(((page) - page_zone(page)->zone_mem_map) + page_zone(page)->zone_start_pfn) 51 - 52 - #define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \ 53 - ({ u8 nid__ = pfn_to_nid(pfn); \ 54 - nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) < node_end_pfn(nid__); })) 55 #endif 56 57 #define local_mapnr(kvaddr) \
··· 36 NODE_DATA(nid)->node_spanned_pages) 37 38 #ifdef CONFIG_DISCONTIGMEM 39 #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) 40 #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) 41 42 + extern struct page *pfn_to_page(unsigned long pfn); 43 + extern unsigned long page_to_pfn(struct page *page); 44 + extern int pfn_valid(unsigned long pfn); 45 #endif 46 47 #define local_mapnr(kvaddr) \