Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: get rid of small constant size cases in raw_copy_{to,from}_user()

Very few call sites where that would be triggered remain, and none
of those is anywhere near hot enough to bother.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Al Viro 4b842e4e 71c3313a

+2 -145
-12
arch/x86/include/asm/uaccess.h
··· 378 378 : "=r" (err), ltype(x) \ 379 379 : "m" (__m(addr)), "i" (errret), "0" (err)) 380 380 381 - #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ 382 - asm volatile("\n" \ 383 - "1: mov"itype" %2,%"rtype"1\n" \ 384 - "2:\n" \ 385 - ".section .fixup,\"ax\"\n" \ 386 - "3: mov %3,%0\n" \ 387 - " jmp 2b\n" \ 388 - ".previous\n" \ 389 - _ASM_EXTABLE_UA(1b, 3b) \ 390 - : "=r" (err), ltype(x) \ 391 - : "m" (__m(addr)), "i" (errret), "0" (err)) 392 - 393 381 /* 394 382 * This doesn't do __uaccess_begin/end - the exception handling 395 383 * around it must do that.
-27
arch/x86/include/asm/uaccess_32.h
··· 23 23 static __always_inline unsigned long 24 24 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 25 25 { 26 - if (__builtin_constant_p(n)) { 27 - unsigned long ret; 28 - 29 - switch (n) { 30 - case 1: 31 - ret = 0; 32 - __uaccess_begin_nospec(); 33 - __get_user_asm_nozero(*(u8 *)to, from, ret, 34 - "b", "b", "=q", 1); 35 - __uaccess_end(); 36 - return ret; 37 - case 2: 38 - ret = 0; 39 - __uaccess_begin_nospec(); 40 - __get_user_asm_nozero(*(u16 *)to, from, ret, 41 - "w", "w", "=r", 2); 42 - __uaccess_end(); 43 - return ret; 44 - case 4: 45 - ret = 0; 46 - __uaccess_begin_nospec(); 47 - __get_user_asm_nozero(*(u32 *)to, from, ret, 48 - "l", "k", "=r", 4); 49 - __uaccess_end(); 50 - return ret; 51 - } 52 - } 53 26 return __copy_user_ll(to, (__force const void *)from, n); 54 27 } 55 28
+2 -106
arch/x86/include/asm/uaccess_64.h
··· 65 65 static __always_inline __must_check unsigned long 66 66 raw_copy_from_user(void *dst, const void __user *src, unsigned long size) 67 67 { 68 - int ret = 0; 69 - 70 - if (!__builtin_constant_p(size)) 71 - return copy_user_generic(dst, (__force void *)src, size); 72 - switch (size) { 73 - case 1: 74 - __uaccess_begin_nospec(); 75 - __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src, 76 - ret, "b", "b", "=q", 1); 77 - __uaccess_end(); 78 - return ret; 79 - case 2: 80 - __uaccess_begin_nospec(); 81 - __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src, 82 - ret, "w", "w", "=r", 2); 83 - __uaccess_end(); 84 - return ret; 85 - case 4: 86 - __uaccess_begin_nospec(); 87 - __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src, 88 - ret, "l", "k", "=r", 4); 89 - __uaccess_end(); 90 - return ret; 91 - case 8: 92 - __uaccess_begin_nospec(); 93 - __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, 94 - ret, "q", "", "=r", 8); 95 - __uaccess_end(); 96 - return ret; 97 - case 10: 98 - __uaccess_begin_nospec(); 99 - __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, 100 - ret, "q", "", "=r", 10); 101 - if (likely(!ret)) 102 - __get_user_asm_nozero(*(u16 *)(8 + (char *)dst), 103 - (u16 __user *)(8 + (char __user *)src), 104 - ret, "w", "w", "=r", 2); 105 - __uaccess_end(); 106 - return ret; 107 - case 16: 108 - __uaccess_begin_nospec(); 109 - __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, 110 - ret, "q", "", "=r", 16); 111 - if (likely(!ret)) 112 - __get_user_asm_nozero(*(u64 *)(8 + (char *)dst), 113 - (u64 __user *)(8 + (char __user *)src), 114 - ret, "q", "", "=r", 8); 115 - __uaccess_end(); 116 - return ret; 117 - default: 118 - return copy_user_generic(dst, (__force void *)src, size); 119 - } 68 + return copy_user_generic(dst, (__force void *)src, size); 120 69 } 121 70 122 71 static __always_inline __must_check unsigned long 123 72 raw_copy_to_user(void __user *dst, const void *src, unsigned long size) 124 73 { 125 - int ret = 0; 126 - 127 - if (!__builtin_constant_p(size)) 128 - return copy_user_generic((__force void *)dst, src, size); 129 - switch (size) { 130 - case 1: 131 - __uaccess_begin(); 132 - __put_user_asm(*(u8 *)src, (u8 __user *)dst, 133 - ret, "b", "b", "iq", 1); 134 - __uaccess_end(); 135 - return ret; 136 - case 2: 137 - __uaccess_begin(); 138 - __put_user_asm(*(u16 *)src, (u16 __user *)dst, 139 - ret, "w", "w", "ir", 2); 140 - __uaccess_end(); 141 - return ret; 142 - case 4: 143 - __uaccess_begin(); 144 - __put_user_asm(*(u32 *)src, (u32 __user *)dst, 145 - ret, "l", "k", "ir", 4); 146 - __uaccess_end(); 147 - return ret; 148 - case 8: 149 - __uaccess_begin(); 150 - __put_user_asm(*(u64 *)src, (u64 __user *)dst, 151 - ret, "q", "", "er", 8); 152 - __uaccess_end(); 153 - return ret; 154 - case 10: 155 - __uaccess_begin(); 156 - __put_user_asm(*(u64 *)src, (u64 __user *)dst, 157 - ret, "q", "", "er", 10); 158 - if (likely(!ret)) { 159 - asm("":::"memory"); 160 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, 161 - ret, "w", "w", "ir", 2); 162 - } 163 - __uaccess_end(); 164 - return ret; 165 - case 16: 166 - __uaccess_begin(); 167 - __put_user_asm(*(u64 *)src, (u64 __user *)dst, 168 - ret, "q", "", "er", 16); 169 - if (likely(!ret)) { 170 - asm("":::"memory"); 171 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, 172 - ret, "q", "", "er", 8); 173 - } 174 - __uaccess_end(); 175 - return ret; 176 - default: 177 - return copy_user_generic((__force void *)dst, src, size); 178 - } 74 + return copy_user_generic((__force void *)dst, src, size); 179 75 } 180 76 181 77 static __always_inline __must_check