Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/64s/hash: Add some SLB debugging tests

This adds CONFIG_DEBUG_VM checks to ensure:
- The kernel stack is in the SLB after it's flushed and bolted.
- We don't insert an SLB for an address that is aleady in the SLB.
- The kernel SLB miss handler does not take an SLB miss.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Nicholas Piggin and committed by
Michael Ellerman
e15a4fea 94ee4272

+53 -3
+3
arch/powerpc/include/asm/paca.h
··· 115 115 u16 vmalloc_sllp; 116 116 u8 slb_cache_ptr; 117 117 u8 stab_rr; /* stab/slb round-robin counter */ 118 + #ifdef CONFIG_DEBUG_VM 119 + u8 in_kernel_slb_handler; 120 + #endif 118 121 u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */ 119 122 u32 slb_kern_bitmap; 120 123 u32 slb_cache[SLB_CACHE_ENTRIES];
+50 -3
arch/powerpc/mm/slb.c
··· 58 58 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); 59 59 } 60 60 61 + static void assert_slb_exists(unsigned long ea) 62 + { 63 + #ifdef CONFIG_DEBUG_VM 64 + unsigned long tmp; 65 + 66 + WARN_ON_ONCE(mfmsr() & MSR_EE); 67 + 68 + asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); 69 + WARN_ON(tmp == 0); 70 + #endif 71 + } 72 + 73 + static void assert_slb_notexists(unsigned long ea) 74 + { 75 + #ifdef CONFIG_DEBUG_VM 76 + unsigned long tmp; 77 + 78 + WARN_ON_ONCE(mfmsr() & MSR_EE); 79 + 80 + asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); 81 + WARN_ON(tmp != 0); 82 + #endif 83 + } 84 + 61 85 static inline void slb_shadow_update(unsigned long ea, int ssize, 62 86 unsigned long flags, 63 87 enum slb_index index) ··· 114 90 */ 115 91 slb_shadow_update(ea, ssize, flags, index); 116 92 93 + assert_slb_notexists(ea); 117 94 asm volatile("slbmte %0,%1" : 118 95 : "r" (mk_vsid_data(ea, ssize, flags)), 119 96 "r" (mk_esid_data(ea, ssize, index)) ··· 136 111 : "r" (be64_to_cpu(p->save_area[index].vsid)), 137 112 "r" (be64_to_cpu(p->save_area[index].esid))); 138 113 } 114 + 115 + assert_slb_exists(local_paca->kstack); 139 116 } 140 117 141 118 /* ··· 185 158 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), 186 159 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) 187 160 : "memory"); 161 + assert_slb_exists(get_paca()->kstack); 188 162 189 163 get_paca()->slb_cache_ptr = 0; 190 164 ··· 438 410 unsigned long slbie_data = 0; 439 411 440 412 for (i = 0; i < offset; i++) { 441 - /* EA */ 442 - slbie_data = (unsigned long) 413 + unsigned long ea; 414 + 415 + ea = (unsigned long) 443 416 get_paca()->slb_cache[i] << SID_SHIFT; 417 + /* 418 + * Could assert_slb_exists here, but hypervisor 419 + * or machine check could have come in and 420 + * removed the entry at this point. 421 + */ 422 + 423 + slbie_data = ea; 444 424 slbie_data |= user_segment_size(slbie_data) 445 425 << SLBIE_SSIZE_SHIFT; 446 426 slbie_data |= SLBIE_C; /* user slbs have C=1 */ ··· 676 640 * User preloads should add isync afterwards in case the kernel 677 641 * accesses user memory before it returns to userspace with rfid. 678 642 */ 643 + assert_slb_notexists(ea); 679 644 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); 680 645 681 646 barrier(); ··· 777 740 * if they go via fast_exception_return too. 778 741 */ 779 742 if (id >= KERNEL_REGION_ID) { 780 - return slb_allocate_kernel(ea, id); 743 + long err; 744 + #ifdef CONFIG_DEBUG_VM 745 + /* Catch recursive kernel SLB faults. */ 746 + BUG_ON(local_paca->in_kernel_slb_handler); 747 + local_paca->in_kernel_slb_handler = 1; 748 + #endif 749 + err = slb_allocate_kernel(ea, id); 750 + #ifdef CONFIG_DEBUG_VM 751 + local_paca->in_kernel_slb_handler = 0; 752 + #endif 753 + return err; 781 754 } else { 782 755 struct mm_struct *mm = current->mm; 783 756 long err;