Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: propage the calling conventions change down to csum_partial_copy_generic()

... and get rid of zeroing destination on error there.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Al Viro dc16c8a9 66aa3880

+39 -100
+4 -16
arch/sh/include/asm/checksum_32.h
··· 30 30 * better 64-bit) boundary 31 31 */ 32 32 33 - asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, 34 - int len, __wsum sum, 35 - int *src_err_ptr, int *dst_err_ptr); 33 + asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len); 36 34 37 35 #define _HAVE_ARCH_CSUM_AND_COPY 38 36 /* ··· 43 45 static inline 44 46 __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) 45 47 { 46 - return csum_partial_copy_generic(src, dst, len, 0, NULL, NULL); 48 + return csum_partial_copy_generic(src, dst, len); 47 49 } 48 50 49 51 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 50 52 static inline 51 53 __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len) 52 54 { 53 - int err = 0; 54 - __wsum sum = ~0U; 55 - 56 55 if (!access_ok(src, len)) 57 56 return 0; 58 - sum = csum_partial_copy_generic((__force const void *)src, dst, 59 - len, sum, &err, NULL); 60 - return err ? 0 : sum; 57 + return csum_partial_copy_generic((__force const void *)src, dst, len); 61 58 } 62 59 63 60 /* ··· 195 202 void __user *dst, 196 203 int len) 197 204 { 198 - int err = 0; 199 - __wsum sum = ~0U; 200 - 201 205 if (!access_ok(dst, len)) 202 206 return 0; 203 - sum = csum_partial_copy_generic((__force const void *)src, 204 - dst, len, sum, NULL, &err); 205 - return err ? 0 : sum; 207 + return csum_partial_copy_generic((__force const void *)src, dst, len); 206 208 } 207 209 #endif /* __ASM_SH_CHECKSUM_H */
+35 -84
arch/sh/lib/checksum.S
··· 173 173 mov r6, r0 174 174 175 175 /* 176 - unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, 177 - int sum, int *src_err_ptr, int *dst_err_ptr) 176 + unsigned int csum_partial_copy_generic (const char *src, char *dst, int len) 178 177 */ 179 178 180 179 /* 181 - * Copy from ds while checksumming, otherwise like csum_partial 182 - * 183 - * The macros SRC and DST specify the type of access for the instruction. 184 - * thus we can call a custom exception handler for all access types. 185 - * 186 - * FIXME: could someone double-check whether I haven't mixed up some SRC and 187 - * DST definitions? It's damn hard to trigger all cases. I hope I got 188 - * them all but there's no guarantee. 180 + * Copy from ds while checksumming, otherwise like csum_partial with initial 181 + * sum being ~0U 189 182 */ 190 183 191 - #define SRC(...) \ 184 + #define EXC(...) \ 192 185 9999: __VA_ARGS__ ; \ 193 186 .section __ex_table, "a"; \ 194 187 .long 9999b, 6001f ; \ 195 - .previous 196 - 197 - #define DST(...) \ 198 - 9999: __VA_ARGS__ ; \ 199 - .section __ex_table, "a"; \ 200 - .long 9999b, 6002f ; \ 201 188 .previous 202 189 203 190 ! 204 191 ! r4: const char *SRC 205 192 ! r5: char *DST 206 193 ! r6: int LEN 207 - ! r7: int SUM 208 - ! 209 - ! on stack: 210 - ! int *SRC_ERR_PTR 211 - ! int *DST_ERR_PTR 212 194 ! 213 195 ENTRY(csum_partial_copy_generic) 214 - mov.l r5,@-r15 215 - mov.l r6,@-r15 216 - 196 + mov #-1,r7 217 197 mov #3,r0 ! Check src and dest are equally aligned 218 198 mov r4,r1 219 199 and r0,r1 ··· 223 243 clrt 224 244 .align 2 225 245 5: 226 - SRC( mov.b @r4+,r1 ) 227 - SRC( mov.b @r4+,r0 ) 246 + EXC( mov.b @r4+,r1 ) 247 + EXC( mov.b @r4+,r0 ) 228 248 extu.b r1,r1 229 - DST( mov.b r1,@r5 ) 230 - DST( mov.b r0,@(1,r5) ) 249 + EXC( mov.b r1,@r5 ) 250 + EXC( mov.b r0,@(1,r5) ) 231 251 extu.b r0,r0 232 252 add #2,r5 233 253 ··· 256 276 ! Handle first two bytes as a special case 257 277 .align 2 258 278 1: 259 - SRC( mov.w @r4+,r0 ) 260 - DST( mov.w r0,@r5 ) 279 + EXC( mov.w @r4+,r0 ) 280 + EXC( mov.w r0,@r5 ) 261 281 add #2,r5 262 282 extu.w r0,r0 263 283 addc r0,r7 ··· 272 292 clrt 273 293 .align 2 274 294 1: 275 - SRC( mov.l @r4+,r0 ) 276 - SRC( mov.l @r4+,r1 ) 295 + EXC( mov.l @r4+,r0 ) 296 + EXC( mov.l @r4+,r1 ) 277 297 addc r0,r7 278 - DST( mov.l r0,@r5 ) 279 - DST( mov.l r1,@(4,r5) ) 298 + EXC( mov.l r0,@r5 ) 299 + EXC( mov.l r1,@(4,r5) ) 280 300 addc r1,r7 281 301 282 - SRC( mov.l @r4+,r0 ) 283 - SRC( mov.l @r4+,r1 ) 302 + EXC( mov.l @r4+,r0 ) 303 + EXC( mov.l @r4+,r1 ) 284 304 addc r0,r7 285 - DST( mov.l r0,@(8,r5) ) 286 - DST( mov.l r1,@(12,r5) ) 305 + EXC( mov.l r0,@(8,r5) ) 306 + EXC( mov.l r1,@(12,r5) ) 287 307 addc r1,r7 288 308 289 - SRC( mov.l @r4+,r0 ) 290 - SRC( mov.l @r4+,r1 ) 309 + EXC( mov.l @r4+,r0 ) 310 + EXC( mov.l @r4+,r1 ) 291 311 addc r0,r7 292 - DST( mov.l r0,@(16,r5) ) 293 - DST( mov.l r1,@(20,r5) ) 312 + EXC( mov.l r0,@(16,r5) ) 313 + EXC( mov.l r1,@(20,r5) ) 294 314 addc r1,r7 295 315 296 - SRC( mov.l @r4+,r0 ) 297 - SRC( mov.l @r4+,r1 ) 316 + EXC( mov.l @r4+,r0 ) 317 + EXC( mov.l @r4+,r1 ) 298 318 addc r0,r7 299 - DST( mov.l r0,@(24,r5) ) 300 - DST( mov.l r1,@(28,r5) ) 319 + EXC( mov.l r0,@(24,r5) ) 320 + EXC( mov.l r1,@(28,r5) ) 301 321 addc r1,r7 302 322 add #32,r5 303 323 movt r0 ··· 315 335 clrt 316 336 shlr2 r6 317 337 3: 318 - SRC( mov.l @r4+,r0 ) 338 + EXC( mov.l @r4+,r0 ) 319 339 addc r0,r7 320 - DST( mov.l r0,@r5 ) 340 + EXC( mov.l r0,@r5 ) 321 341 add #4,r5 322 342 movt r0 323 343 dt r6 ··· 333 353 mov #2,r1 334 354 cmp/hs r1,r6 335 355 bf 5f 336 - SRC( mov.w @r4+,r0 ) 337 - DST( mov.w r0,@r5 ) 356 + EXC( mov.w @r4+,r0 ) 357 + EXC( mov.w r0,@r5 ) 338 358 extu.w r0,r0 339 359 add #2,r5 340 360 cmp/eq r1,r6 ··· 343 363 shll16 r0 344 364 addc r0,r7 345 365 5: 346 - SRC( mov.b @r4+,r0 ) 347 - DST( mov.b r0,@r5 ) 366 + EXC( mov.b @r4+,r0 ) 367 + EXC( mov.b r0,@r5 ) 348 368 extu.b r0,r0 349 369 #ifndef __LITTLE_ENDIAN__ 350 370 shll8 r0 ··· 353 373 mov #0,r0 354 374 addc r0,r7 355 375 7: 356 - 5000: 357 376 358 377 # Exception handler: 359 378 .section .fixup, "ax" 360 379 361 380 6001: 362 - mov.l @(8,r15),r0 ! src_err_ptr 363 - mov #-EFAULT,r1 364 - mov.l r1,@r0 365 - 366 - ! zero the complete destination - computing the rest 367 - ! is too much work 368 - mov.l @(4,r15),r5 ! dst 369 - mov.l @r15,r6 ! len 370 - mov #0,r7 371 - 1: mov.b r7,@r5 372 - dt r6 373 - bf/s 1b 374 - add #1,r5 375 - mov.l 8000f,r0 376 - jmp @r0 377 - nop 378 - .align 2 379 - 8000: .long 5000b 380 - 381 - 6002: 382 - mov.l @(12,r15),r0 ! dst_err_ptr 383 - mov #-EFAULT,r1 384 - mov.l r1,@r0 385 - mov.l 8001f,r0 386 - jmp @r0 387 - nop 388 - .align 2 389 - 8001: .long 5000b 390 - 381 + rts 382 + mov #0,r0 391 383 .previous 392 - add #8,r15 393 384 rts 394 385 mov r7,r0