Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/uaccess: Shorten raw_copy_from_user() / raw_copy_to_user() inline assemblies

Add specific exception handler for copy_to_user() / copy_from_user()
mvcos fault handling, which allows to shorten the inline assemblies to
three instructions.

On fault the exception handler adjusts the length used by the mvcos
instruction in a way that the instruction completes with condition code
zero, indicating the number of bytes copied with the input/output operand
'size'. This allows to calculate and return the number of bytes not copied,
if any, like required.

Loop and return value handling is changed to C so that the compiler may
optimize the code.

Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>

authored by

Heiko Carstens and committed by
Vasily Gorbik
c488f518 fb5bbcdc

+103 -62
+8
arch/s390/include/asm/asm-extable.h
··· 14 14 #define EX_TYPE_UA_LOAD_REGPAIR 6 15 15 #define EX_TYPE_ZEROPAD 7 16 16 #define EX_TYPE_FPC 8 17 + #define EX_TYPE_UA_MVCOS_TO 9 18 + #define EX_TYPE_UA_MVCOS_FROM 10 17 19 18 20 #define EX_DATA_REG_ERR_SHIFT 0 19 21 #define EX_DATA_REG_ERR GENMASK(3, 0) ··· 85 83 86 84 #define EX_TABLE_FPC(_fault, _target) \ 87 85 __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0) 86 + 87 + #define EX_TABLE_UA_MVCOS_TO(_fault, _target) \ 88 + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_MVCOS_TO, __stringify(%%r0), __stringify(%%r0), 0) 89 + 90 + #define EX_TABLE_UA_MVCOS_FROM(_fault, _target) \ 91 + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_MVCOS_FROM, __stringify(%%r0), __stringify(%%r0), 0) 88 92 89 93 #endif /* __ASM_EXTABLE_H */
+48 -62
arch/s390/include/asm/uaccess.h
··· 44 44 }; 45 45 }; 46 46 47 - static __always_inline __must_check unsigned long 47 + #ifdef CONFIG_KMSAN 48 + #define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory 49 + #else 50 + #define uaccess_kmsan_or_inline __always_inline 51 + #endif 52 + 53 + static uaccess_kmsan_or_inline __must_check unsigned long 48 54 raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key) 49 55 { 50 - unsigned long rem; 56 + unsigned long osize; 51 57 union oac spec = { 52 58 .oac2.key = key, 53 59 .oac2.as = PSW_BITS_AS_SECONDARY, 54 60 .oac2.k = 1, 55 61 .oac2.a = 1, 56 62 }; 63 + int cc; 57 64 58 - asm_inline volatile( 59 - " lr %%r0,%[spec]\n" 60 - "0: mvcos 0(%[to]),0(%[from]),%[size]\n" 61 - "1: jz 5f\n" 62 - " algr %[size],%[val]\n" 63 - " slgr %[from],%[val]\n" 64 - " slgr %[to],%[val]\n" 65 - " j 0b\n" 66 - "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */ 67 - " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */ 68 - " slgr %[rem],%[from]\n" 69 - " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */ 70 - " jnh 6f\n" 71 - "3: mvcos 0(%[to]),0(%[from]),%[rem]\n" 72 - "4: slgr %[size],%[rem]\n" 73 - " j 6f\n" 74 - "5: lghi %[size],0\n" 75 - "6:\n" 76 - EX_TABLE(0b, 2b) 77 - EX_TABLE(1b, 2b) 78 - EX_TABLE(3b, 6b) 79 - EX_TABLE(4b, 6b) 80 - : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem) 81 - : [val] "a" (-4096UL), [spec] "d" (spec.val) 82 - : "cc", "memory", "0"); 83 - return size; 65 + while (1) { 66 + osize = size; 67 + asm_inline volatile( 68 + " lr %%r0,%[spec]\n" 69 + "0: mvcos %[to],%[from],%[size]\n" 70 + "1: nopr %%r7\n" 71 + CC_IPM(cc) 72 + EX_TABLE_UA_MVCOS_FROM(0b, 0b) 73 + EX_TABLE_UA_MVCOS_FROM(1b, 0b) 74 + : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to) 75 + : [spec] "d" (spec.val), [from] "Q" (*(const char __user *)from) 76 + : CC_CLOBBER_LIST("memory", "0")); 77 + if (likely(CC_TRANSFORM(cc) == 0)) 78 + return osize - size; 79 + size -= 4096; 80 + to += 4096; 81 + from += 4096; 82 + } 84 83 } 85 84 86 85 static __always_inline __must_check unsigned long ··· 88 89 return raw_copy_from_user_key(to, from, n, 0); 89 90 } 90 91 91 - static __always_inline __must_check unsigned long 92 + static uaccess_kmsan_or_inline __must_check unsigned long 92 93 raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key) 93 94 { 94 - unsigned long rem; 95 + unsigned long osize; 95 96 union oac spec = { 96 97 .oac1.key = key, 97 98 .oac1.as = PSW_BITS_AS_SECONDARY, 98 99 .oac1.k = 1, 99 100 .oac1.a = 1, 100 101 }; 102 + int cc; 101 103 102 - asm_inline volatile( 103 - " lr %%r0,%[spec]\n" 104 - "0: mvcos 0(%[to]),0(%[from]),%[size]\n" 105 - "1: jz 5f\n" 106 - " algr %[size],%[val]\n" 107 - " slgr %[to],%[val]\n" 108 - " slgr %[from],%[val]\n" 109 - " j 0b\n" 110 - "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */ 111 - " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */ 112 - " slgr %[rem],%[to]\n" 113 - " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */ 114 - " jnh 6f\n" 115 - "3: mvcos 0(%[to]),0(%[from]),%[rem]\n" 116 - "4: slgr %[size],%[rem]\n" 117 - " j 6f\n" 118 - "5: lghi %[size],0\n" 119 - "6:\n" 120 - EX_TABLE(0b, 2b) 121 - EX_TABLE(1b, 2b) 122 - EX_TABLE(3b, 6b) 123 - EX_TABLE(4b, 6b) 124 - : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem) 125 - : [val] "a" (-4096UL), [spec] "d" (spec.val) 126 - : "cc", "memory", "0"); 127 - return size; 104 + while (1) { 105 + osize = size; 106 + asm_inline volatile( 107 + " lr %%r0,%[spec]\n" 108 + "0: mvcos %[to],%[from],%[size]\n" 109 + "1: nopr %%r7\n" 110 + CC_IPM(cc) 111 + EX_TABLE_UA_MVCOS_TO(0b, 0b) 112 + EX_TABLE_UA_MVCOS_TO(1b, 0b) 113 + : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to) 114 + : [spec] "d" (spec.val), [from] "Q" (*(const char *)from) 115 + : CC_CLOBBER_LIST("memory", "0")); 116 + if (likely(CC_TRANSFORM(cc) == 0)) 117 + return osize - size; 118 + size -= 4096; 119 + to += 4096; 120 + from += 4096; 121 + } 128 122 } 129 123 130 124 static __always_inline __must_check unsigned long ··· 149 157 } 150 158 151 159 int __noreturn __put_user_bad(void); 152 - 153 - #ifdef CONFIG_KMSAN 154 - #define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory 155 - #else 156 - #define uaccess_kmsan_or_inline __always_inline 157 - #endif 158 160 159 161 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 160 162
+47
arch/s390/mm/extable.c
··· 73 73 return true; 74 74 } 75 75 76 + struct insn_ssf { 77 + u64 opc1 : 8; 78 + u64 r3 : 4; 79 + u64 opc2 : 4; 80 + u64 b1 : 4; 81 + u64 d1 : 12; 82 + u64 b2 : 4; 83 + u64 d2 : 12; 84 + } __packed; 85 + 86 + static bool ex_handler_ua_mvcos(const struct exception_table_entry *ex, 87 + bool from, struct pt_regs *regs) 88 + { 89 + unsigned long uaddr, remainder; 90 + struct insn_ssf *insn; 91 + 92 + /* 93 + * If the faulting user space access crossed a page boundary retry by 94 + * limiting the access to the first page (adjust length accordingly). 95 + * Then the mvcos instruction will either complete with condition code 96 + * zero, or generate another fault where the user space access did not 97 + * cross a page boundary. 98 + * If the faulting user space access did not cross a page boundary set 99 + * length to zero and retry. In this case no user space access will 100 + * happen, and the mvcos instruction will complete with condition code 101 + * zero. 102 + * In both cases the instruction will complete with condition code 103 + * zero (copying finished), and the register which contains the 104 + * length, indicates the number of bytes copied. 105 + */ 106 + regs->psw.addr = extable_fixup(ex); 107 + insn = (struct insn_ssf *)regs->psw.addr; 108 + if (from) 109 + uaddr = regs->gprs[insn->b2] + insn->d2; 110 + else 111 + uaddr = regs->gprs[insn->b1] + insn->d1; 112 + remainder = PAGE_SIZE - (uaddr & (PAGE_SIZE - 1)); 113 + if (regs->gprs[insn->r3] <= remainder) 114 + remainder = 0; 115 + regs->gprs[insn->r3] = remainder; 116 + return true; 117 + } 118 + 76 119 bool fixup_exception(struct pt_regs *regs) 77 120 { 78 121 const struct exception_table_entry *ex; ··· 138 95 return ex_handler_zeropad(ex, regs); 139 96 case EX_TYPE_FPC: 140 97 return ex_handler_fpc(ex, regs); 98 + case EX_TYPE_UA_MVCOS_TO: 99 + return ex_handler_ua_mvcos(ex, false, regs); 100 + case EX_TYPE_UA_MVCOS_FROM: 101 + return ex_handler_ua_mvcos(ex, true, regs); 141 102 } 142 103 panic("invalid exception table entry"); 143 104 }