Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/mm: Add some debug output when hash insertion fails

This adds some debug output to our MMU hash code to print out some
useful debug data if the hypervisor refuses the insertion (which
should normally never happen).

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---

+34 -6
+3 -1
arch/powerpc/include/asm/mmu-hash64.h
··· 250 250 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 251 251 pte_t *ptep, unsigned long trap, int local, int ssize, 252 252 unsigned int shift, unsigned int mmu_psize); 253 - 253 + extern void hash_failure_debug(unsigned long ea, unsigned long access, 254 + unsigned long vsid, unsigned long trap, 255 + int ssize, int psize, unsigned long pte); 254 256 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 255 257 unsigned long pstart, unsigned long prot, 256 258 int psize, int ssize);
+29 -5
arch/powerpc/mm/hash_utils_64.c
··· 871 871 } 872 872 #endif 873 873 874 + void hash_failure_debug(unsigned long ea, unsigned long access, 875 + unsigned long vsid, unsigned long trap, 876 + int ssize, int psize, unsigned long pte) 877 + { 878 + if (!printk_ratelimit()) 879 + return; 880 + pr_info("mm: Hashing failure ! EA=0x%lx access=0x%lx current=%s\n", 881 + ea, access, current->comm); 882 + pr_info(" trap=0x%lx vsid=0x%lx ssize=%d psize=%d pte=0x%lx\n", 883 + trap, vsid, ssize, psize, pte); 884 + } 885 + 874 886 /* Result code is: 875 887 * 0 - handled 876 888 * 1 - normal page fault ··· 1048 1036 local, ssize, spp); 1049 1037 } 1050 1038 1039 + /* Dump some info in case of hash insertion failure, they should 1040 + * never happen so it is really useful to know if/when they do 1041 + */ 1042 + if (rc == -1) 1043 + hash_failure_debug(ea, access, vsid, trap, ssize, psize, 1044 + pte_val(*ptep)); 1051 1045 #ifndef CONFIG_PPC_64K_PAGES 1052 1046 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); 1053 1047 #else ··· 1072 1054 void *pgdir; 1073 1055 pte_t *ptep; 1074 1056 unsigned long flags; 1075 - int local = 0; 1076 - int ssize; 1057 + int rc, ssize, local = 0; 1077 1058 1078 1059 BUG_ON(REGION_ID(ea) != USER_REGION_ID); 1079 1060 ··· 1118 1101 /* Hash it in */ 1119 1102 #ifdef CONFIG_PPC_HAS_HASH_64K 1120 1103 if (mm->context.user_psize == MMU_PAGE_64K) 1121 - __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); 1104 + rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); 1122 1105 else 1123 1106 #endif /* CONFIG_PPC_HAS_HASH_64K */ 1124 - __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, 1125 - subpage_protection(pgdir, ea)); 1107 + rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, 1108 + subpage_protection(pgdir, ea)); 1109 + 1110 + /* Dump some info in case of hash insertion failure, they should 1111 + * never happen so it is really useful to know if/when they do 1112 + */ 1113 + if (rc == -1) 1114 + hash_failure_debug(ea, access, vsid, trap, ssize, 1115 + mm->context.user_psize, pte_val(*ptep)); 1126 1116 1127 1117 local_irq_restore(flags); 1128 1118 }
+2
arch/powerpc/mm/hugetlbpage-hash64.c
··· 127 127 */ 128 128 if (unlikely(slot == -2)) { 129 129 *ptep = __pte(old_pte); 130 + hash_failure_debug(ea, access, vsid, trap, ssize, 131 + mmu_psize, old_pte); 130 132 return -1; 131 133 } 132 134