Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] DEBUG_PAGEALLOC for 32-bit

Here's an implementation of DEBUG_PAGEALLOC for ppc32. It disables BAT
mapping and is only tested with Hash table based processor though it
shouldn't be too hard to adapt it to others.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

arch/powerpc/Kconfig.debug | 9 ++++++
arch/powerpc/mm/init_32.c | 4 +++
arch/powerpc/mm/pgtable_32.c | 52 +++++++++++++++++++++++++++++++++++++++
arch/powerpc/mm/ppc_mmu_32.c | 4 ++-
include/asm-powerpc/cacheflush.h | 6 ++++
5 files changed, 74 insertions(+), 1 deletion(-)
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Benjamin Herrenschmidt and committed by
Paul Mackerras
88df6e90 ee4f2ea4

+74 -1
+9
arch/powerpc/Kconfig.debug
··· 18 18 19 19 This option will slow down process creation somewhat. 20 20 21 + config DEBUG_PAGEALLOC 22 + bool "Debug page memory allocations" 23 + depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && PPC32 24 + help 25 + Unmap pages from the kernel linear mapping after free_pages(). 26 + This results in a large slowdown, but helps to find certain types 27 + of memory corruptions. 28 + 29 + 21 30 config HCALL_STATS 22 31 bool "Hypervisor call instrumentation" 23 32 depends on PPC_PSERIES && DEBUG_FS
+4
arch/powerpc/mm/init_32.c
··· 115 115 if (strstr(cmd_line, "noltlbs")) { 116 116 __map_without_ltlbs = 1; 117 117 } 118 + #ifdef CONFIG_DEBUG_PAGEALLOC 119 + __map_without_bats = 1; 120 + __map_without_ltlbs = 1; 121 + #endif 118 122 } 119 123 120 124 /*
+52
arch/powerpc/mm/pgtable_32.c
··· 451 451 return ret; 452 452 } 453 453 454 + #ifdef CONFIG_DEBUG_PAGEALLOC 455 + 456 + static int __change_page_attr(struct page *page, pgprot_t prot) 457 + { 458 + pte_t *kpte; 459 + pmd_t *kpmd; 460 + unsigned long address; 461 + 462 + BUG_ON(PageHighMem(page)); 463 + address = (unsigned long)page_address(page); 464 + 465 + if (v_mapped_by_bats(address) || v_mapped_by_tlbcam(address)) 466 + return 0; 467 + if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) 468 + return -EINVAL; 469 + set_pte_at(&init_mm, address, kpte, mk_pte(page, prot)); 470 + wmb(); 471 + flush_HPTE(0, address, pmd_val(*kpmd)); 472 + pte_unmap(kpte); 473 + 474 + return 0; 475 + } 476 + 477 + /* 478 + * Change the page attributes of an page in the linear mapping. 479 + * 480 + * THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY 481 + */ 482 + static int change_page_attr(struct page *page, int numpages, pgprot_t prot) 483 + { 484 + int i, err = 0; 485 + unsigned long flags; 486 + 487 + local_irq_save(flags); 488 + for (i = 0; i < numpages; i++, page++) { 489 + err = __change_page_attr(page, prot); 490 + if (err) 491 + break; 492 + } 493 + local_irq_restore(flags); 494 + return err; 495 + } 496 + 497 + 498 + void kernel_map_pages(struct page *page, int numpages, int enable) 499 + { 500 + if (PageHighMem(page)) 501 + return; 502 + 503 + change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); 504 + } 505 + #endif /* CONFIG_DEBUG_PAGEALLOC */
+3 -1
arch/powerpc/mm/ppc_mmu_32.c
··· 85 85 unsigned long max_size = (256<<20); 86 86 unsigned long align; 87 87 88 - if (__map_without_bats) 88 + if (__map_without_bats) { 89 + printk(KERN_DEBUG "RAM mapped without BATs\n"); 89 90 return 0; 91 + } 90 92 91 93 /* Set up BAT2 and if necessary BAT3 to cover RAM. */ 92 94
+6
include/asm-powerpc/cacheflush.h
··· 64 64 memcpy(dst, src, len) 65 65 66 66 67 + 68 + #ifdef CONFIG_DEBUG_PAGEALLOC 69 + /* internal debugging function */ 70 + void kernel_map_pages(struct page *page, int numpages, int enable); 71 + #endif 72 + 67 73 #endif /* __KERNEL__ */ 68 74 69 75 #endif /* _ASM_POWERPC_CACHEFLUSH_H */