Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lkdtm/powerpc: Add SLB multihit test

To check machine check handling, add support to inject slb
multihit errors.

Co-developed-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
Signed-off-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
Signed-off-by: Ganesh Goudar <ganeshgr@linux.ibm.com>
[mpe: Use CONFIG_PPC_BOOK3S_64 to fix compile errors reported by lkp@intel.com]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20201130083057.135610-1-ganeshgr@linux.ibm.com

authored by

Ganesh Goudar and committed by
Michael Ellerman
3ba150fb 6c58b1b4

+156 -28
+27 -1
arch/powerpc/include/asm/book3s/64/mmu-hash.h
··· 843 843 844 844 unsigned htab_shift_for_mem_size(unsigned long mem_size); 845 845 846 - #endif /* __ASSEMBLY__ */ 846 + enum slb_index { 847 + LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ 848 + KSTACK_INDEX = 1, /* Kernel stack map */ 849 + }; 847 850 851 + #define slb_esid_mask(ssize) \ 852 + (((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T) 853 + 854 + static inline unsigned long mk_esid_data(unsigned long ea, int ssize, 855 + enum slb_index index) 856 + { 857 + return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; 858 + } 859 + 860 + static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize, 861 + unsigned long flags) 862 + { 863 + return (vsid << slb_vsid_shift(ssize)) | flags | 864 + ((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT); 865 + } 866 + 867 + static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, 868 + unsigned long flags) 869 + { 870 + return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); 871 + } 872 + 873 + #endif /* __ASSEMBLY__ */ 848 874 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
+1
arch/powerpc/mm/book3s64/hash_utils.c
··· 112 112 EXPORT_SYMBOL_GPL(mmu_linear_psize); 113 113 int mmu_virtual_psize = MMU_PAGE_4K; 114 114 int mmu_vmalloc_psize = MMU_PAGE_4K; 115 + EXPORT_SYMBOL_GPL(mmu_vmalloc_psize); 115 116 #ifdef CONFIG_SPARSEMEM_VMEMMAP 116 117 int mmu_vmemmap_psize = MMU_PAGE_4K; 117 118 #endif
-27
arch/powerpc/mm/book3s64/slb.c
··· 28 28 #include "internal.h" 29 29 30 30 31 - enum slb_index { 32 - LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ 33 - KSTACK_INDEX = 1, /* Kernel stack map */ 34 - }; 35 - 36 31 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea); 37 - 38 - #define slb_esid_mask(ssize) \ 39 - (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) 40 - 41 - static inline unsigned long mk_esid_data(unsigned long ea, int ssize, 42 - enum slb_index index) 43 - { 44 - return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; 45 - } 46 - 47 - static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize, 48 - unsigned long flags) 49 - { 50 - return (vsid << slb_vsid_shift(ssize)) | flags | 51 - ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); 52 - } 53 - 54 - static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, 55 - unsigned long flags) 56 - { 57 - return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); 58 - } 59 32 60 33 bool stress_slb_enabled __initdata; 61 34
+1
drivers/misc/lkdtm/Makefile
··· 10 10 lkdtm-$(CONFIG_LKDTM) += usercopy.o 11 11 lkdtm-$(CONFIG_LKDTM) += stackleak.o 12 12 lkdtm-$(CONFIG_LKDTM) += cfi.o 13 + lkdtm-$(CONFIG_PPC_BOOK3S_64) += powerpc.o 13 14 14 15 KASAN_SANITIZE_stackleak.o := n 15 16 KCOV_INSTRUMENT_rodata.o := n
+3
drivers/misc/lkdtm/core.c
··· 176 176 #ifdef CONFIG_X86_32 177 177 CRASHTYPE(DOUBLE_FAULT), 178 178 #endif 179 + #ifdef CONFIG_PPC_BOOK3S_64 180 + CRASHTYPE(PPC_SLB_MULTIHIT), 181 + #endif 179 182 }; 180 183 181 184
+3
drivers/misc/lkdtm/lkdtm.h
··· 102 102 /* cfi.c */ 103 103 void lkdtm_CFI_FORWARD_PROTO(void); 104 104 105 + /* powerpc.c */ 106 + void lkdtm_PPC_SLB_MULTIHIT(void); 107 + 105 108 #endif
+120
drivers/misc/lkdtm/powerpc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "lkdtm.h" 4 + #include <linux/slab.h> 5 + #include <linux/vmalloc.h> 6 + #include <asm/mmu.h> 7 + 8 + /* Inserts new slb entries */ 9 + static void insert_slb_entry(unsigned long p, int ssize, int page_size) 10 + { 11 + unsigned long flags; 12 + 13 + flags = SLB_VSID_KERNEL | mmu_psize_defs[page_size].sllp; 14 + preempt_disable(); 15 + 16 + asm volatile("slbmte %0,%1" : 17 + : "r" (mk_vsid_data(p, ssize, flags)), 18 + "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED)) 19 + : "memory"); 20 + 21 + asm volatile("slbmte %0,%1" : 22 + : "r" (mk_vsid_data(p, ssize, flags)), 23 + "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED + 1)) 24 + : "memory"); 25 + preempt_enable(); 26 + } 27 + 28 + /* Inject slb multihit on vmalloc-ed address i.e 0xD00... */ 29 + static int inject_vmalloc_slb_multihit(void) 30 + { 31 + char *p; 32 + 33 + p = vmalloc(PAGE_SIZE); 34 + if (!p) 35 + return -ENOMEM; 36 + 37 + insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_vmalloc_psize); 38 + /* 39 + * This triggers exception, If handled correctly we must recover 40 + * from this error. 41 + */ 42 + p[0] = '!'; 43 + vfree(p); 44 + return 0; 45 + } 46 + 47 + /* Inject slb multihit on kmalloc-ed address i.e 0xC00... */ 48 + static int inject_kmalloc_slb_multihit(void) 49 + { 50 + char *p; 51 + 52 + p = kmalloc(2048, GFP_KERNEL); 53 + if (!p) 54 + return -ENOMEM; 55 + 56 + insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_linear_psize); 57 + /* 58 + * This triggers exception, If handled correctly we must recover 59 + * from this error. 60 + */ 61 + p[0] = '!'; 62 + kfree(p); 63 + return 0; 64 + } 65 + 66 + /* 67 + * Few initial SLB entries are bolted. Add a test to inject 68 + * multihit in bolted entry 0. 69 + */ 70 + static void insert_dup_slb_entry_0(void) 71 + { 72 + unsigned long test_address = PAGE_OFFSET, *test_ptr; 73 + unsigned long esid, vsid; 74 + unsigned long i = 0; 75 + 76 + test_ptr = (unsigned long *)test_address; 77 + preempt_disable(); 78 + 79 + asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i)); 80 + asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); 81 + 82 + /* for i !=0 we would need to mask out the old entry number */ 83 + asm volatile("slbmte %0,%1" : 84 + : "r" (vsid), 85 + "r" (esid | SLB_NUM_BOLTED) 86 + : "memory"); 87 + 88 + asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i)); 89 + asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i)); 90 + 91 + /* for i !=0 we would need to mask out the old entry number */ 92 + asm volatile("slbmte %0,%1" : 93 + : "r" (vsid), 94 + "r" (esid | (SLB_NUM_BOLTED + 1)) 95 + : "memory"); 96 + 97 + pr_info("%s accessing test address 0x%lx: 0x%lx\n", 98 + __func__, test_address, *test_ptr); 99 + 100 + preempt_enable(); 101 + } 102 + 103 + void lkdtm_PPC_SLB_MULTIHIT(void) 104 + { 105 + if (!radix_enabled()) { 106 + pr_info("Injecting SLB multihit errors\n"); 107 + /* 108 + * These need not be separate tests, And they do pretty 109 + * much same thing. In any case we must recover from the 110 + * errors introduced by these functions, machine would not 111 + * survive these tests in case of failure to handle. 112 + */ 113 + inject_vmalloc_slb_multihit(); 114 + inject_kmalloc_slb_multihit(); 115 + insert_dup_slb_entry_0(); 116 + pr_info("Recovered from SLB multihit errors\n"); 117 + } else { 118 + pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n"); 119 + } 120 + }
+1
tools/testing/selftests/lkdtm/tests.txt
··· 68 68 USERCOPY_KERNEL 69 69 STACKLEAK_ERASING OK: the rest of the thread stack is properly erased 70 70 CFI_FORWARD_PROTO 71 + PPC_SLB_MULTIHIT Recovered