Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

m68k: get rid of zeroing

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Al Viro 7cefa5a0 68acfdcb

+43 -26
+40 -17
arch/m68k/include/asm/uaccess_mm.h
··· 179 179 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n); 180 180 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n); 181 181 182 - #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ 182 + #define __suffix0 183 + #define __suffix1 b 184 + #define __suffix2 w 185 + #define __suffix4 l 186 + 187 + #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ 183 188 asm volatile ("\n" \ 184 189 "1: "MOVES"."#s1" (%2)+,%3\n" \ 185 190 " move."#s1" %3,(%1)+\n" \ 191 + " .ifnc \""#s2"\",\"\"\n" \ 186 192 "2: "MOVES"."#s2" (%2)+,%3\n" \ 187 193 " move."#s2" %3,(%1)+\n" \ 188 194 " .ifnc \""#s3"\",\"\"\n" \ 189 195 "3: "MOVES"."#s3" (%2)+,%3\n" \ 190 196 " move."#s3" %3,(%1)+\n" \ 191 197 " .endif\n" \ 198 + " .endif\n" \ 192 199 "4:\n" \ 193 200 " .section __ex_table,\"a\"\n" \ 194 201 " .align 4\n" \ 195 202 " .long 1b,10f\n" \ 203 + " .ifnc \""#s2"\",\"\"\n" \ 196 204 " .long 2b,20f\n" \ 197 205 " .ifnc \""#s3"\",\"\"\n" \ 198 206 " .long 3b,30f\n" \ 207 + " .endif\n" \ 199 208 " .endif\n" \ 200 209 " .previous\n" \ 201 210 "\n" \ 202 211 " .section .fixup,\"ax\"\n" \ 203 212 " .even\n" \ 204 - "10: clr."#s1" (%1)+\n" \ 205 - "20: clr."#s2" (%1)+\n" \ 213 + "10: addq.l #"#n1",%0\n" \ 214 + " .ifnc \""#s2"\",\"\"\n" \ 215 + "20: addq.l #"#n2",%0\n" \ 206 216 " .ifnc \""#s3"\",\"\"\n" \ 207 - "30: clr."#s3" (%1)+\n" \ 217 + "30: addq.l #"#n3",%0\n" \ 208 218 " .endif\n" \ 209 - " moveq.l #"#n",%0\n" \ 219 + " .endif\n" \ 210 220 " jra 4b\n" \ 211 221 " .previous\n" \ 212 222 : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \ 213 223 : : "memory") 224 + 225 + #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ 226 + ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3) 227 + #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \ 228 + ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \ 229 + __suffix##n1, __suffix##n2, __suffix##n3) 214 230 215 231 static __always_inline unsigned long 216 232 __constant_copy_from_user(void *to, const void __user *from, unsigned long n) ··· 235 219 236 220 switch (n) { 237 221 case 1: 238 - __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1); 222 + __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0); 239 223 break; 240 224 case 2: 241 - __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, r, 2); 225 + __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0); 242 226 break; 243 227 case 3: 244 - __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,); 228 + __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0); 245 229 break; 246 230 case 4: 247 - __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4); 231 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0); 248 232 break; 249 233 case 5: 250 - __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,); 234 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0); 251 235 break; 252 236 case 6: 253 - __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,); 237 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0); 254 238 break; 255 239 case 7: 256 - __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b); 240 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1); 257 241 break; 258 242 case 8: 259 - __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,); 243 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0); 260 244 break; 261 245 case 9: 262 - __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b); 246 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1); 263 247 break; 264 248 case 10: 265 - __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w); 249 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2); 266 250 break; 267 251 case 12: 268 - __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l); 252 + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4); 269 253 break; 270 254 default: 271 255 /* we limit the inlined version to 3 moves */ ··· 369 353 #define __copy_to_user_inatomic __copy_to_user 370 354 #define __copy_from_user_inatomic __copy_from_user 371 355 372 - #define copy_from_user(to, from, n) __copy_from_user(to, from, n) 356 + static inline unsigned long 357 + copy_from_user(void *to, const void __user *from, unsigned long n) 358 + { 359 + unsigned long res = __copy_from_user_inatomic(to, from, n); 360 + if (unlikely(res)) 361 + memset(to + (n - res), 0, res); 362 + return res; 363 + } 373 364 #define copy_to_user(to, from, n) __copy_to_user(to, from, n) 374 365 375 366 #define user_addr_max() \
+3 -9
arch/m68k/lib/uaccess.c
··· 30 30 "6:\n" 31 31 " .section .fixup,\"ax\"\n" 32 32 " .even\n" 33 - "10: move.l %0,%3\n" 34 - "7: clr.l (%2)+\n" 35 - " subq.l #1,%3\n" 36 - " jne 7b\n" 37 - " lsl.l #2,%0\n" 33 + "10: lsl.l #2,%0\n" 38 34 " btst #1,%5\n" 39 35 " jeq 8f\n" 40 - "30: clr.w (%2)+\n" 41 - " addq.l #2,%0\n" 36 + "30: addq.l #2,%0\n" 42 37 "8: btst #0,%5\n" 43 38 " jeq 6b\n" 44 - "50: clr.b (%2)+\n" 45 - " addq.l #1,%0\n" 39 + "50: addq.l #1,%0\n" 46 40 " jra 6b\n" 47 41 " .previous\n" 48 42 "\n"