Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: propagate the calling conventions change down into csum_partial_copy_generic()

turn the exception handlers into returning 0.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Al Viro 2a5d2bd1 1cd95ab8

+19 -68
+4 -16
arch/xtensa/include/asm/checksum.h
··· 37 37 * better 64-bit) boundary 38 38 */ 39 39 40 - asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, 41 - int len, __wsum sum, 42 - int *src_err_ptr, int *dst_err_ptr); 40 + asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len); 43 41 44 42 #define _HAVE_ARCH_CSUM_AND_COPY 45 43 /* ··· 47 49 static inline 48 50 __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) 49 51 { 50 - return csum_partial_copy_generic(src, dst, len, 0, NULL, NULL); 52 + return csum_partial_copy_generic(src, dst, len); 51 53 } 52 54 53 55 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER ··· 55 57 __wsum csum_and_copy_from_user(const void __user *src, void *dst, 56 58 int len) 57 59 { 58 - int err = 0; 59 - 60 60 if (!access_ok(src, len)) 61 61 return 0; 62 - 63 - sum = csum_partial_copy_generic((__force const void *)src, dst, 64 - len, ~0U, &err, NULL); 65 - return err ? 0 : sum; 62 + return csum_partial_copy_generic((__force const void *)src, dst, len); 66 63 } 67 64 68 65 /* ··· 240 247 static __inline__ __wsum csum_and_copy_to_user(const void *src, 241 248 void __user *dst, int len) 242 249 { 243 - int err = 0; 244 - __wsum sum = ~0U; 245 - 246 250 if (!access_ok(dst, len)) 247 251 return 0; 248 - 249 - sum = csum_partial_copy_generic(src,dst,len,sum,NULL,&err); 250 - return err ? 0 : sum; 252 + return csum_partial_copy_generic(src, (__force void *)dst, len); 251 253 } 252 254 #endif
+15 -52
arch/xtensa/lib/checksum.S
··· 175 175 */ 176 176 177 177 /* 178 - unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, 179 - int sum, int *src_err_ptr, int *dst_err_ptr) 178 + unsigned int csum_partial_copy_generic (const char *src, char *dst, int len) 180 179 a2 = src 181 180 a3 = dst 182 181 a4 = len 183 182 a5 = sum 184 - a6 = src_err_ptr 185 - a7 = dst_err_ptr 186 183 a8 = temp 187 184 a9 = temp 188 185 a10 = temp 189 - a11 = original len for exception handling 190 - a12 = original dst for exception handling 191 186 192 187 This function is optimized for 4-byte aligned addresses. Other 193 188 alignments work, but not nearly as efficiently. ··· 191 196 ENTRY(csum_partial_copy_generic) 192 197 193 198 abi_entry_default 194 - mov a12, a3 195 - mov a11, a4 199 + movi a5, -1 196 200 or a10, a2, a3 197 201 198 202 /* We optimize the following alignment tests for the 4-byte ··· 222 228 #endif 223 229 EX(10f) l32i a9, a2, 0 224 230 EX(10f) l32i a8, a2, 4 225 - EX(11f) s32i a9, a3, 0 226 - EX(11f) s32i a8, a3, 4 231 + EX(10f) s32i a9, a3, 0 232 + EX(10f) s32i a8, a3, 4 227 233 ONES_ADD(a5, a9) 228 234 ONES_ADD(a5, a8) 229 235 EX(10f) l32i a9, a2, 8 230 236 EX(10f) l32i a8, a2, 12 231 - EX(11f) s32i a9, a3, 8 232 - EX(11f) s32i a8, a3, 12 237 + EX(10f) s32i a9, a3, 8 238 + EX(10f) s32i a8, a3, 12 233 239 ONES_ADD(a5, a9) 234 240 ONES_ADD(a5, a8) 235 241 EX(10f) l32i a9, a2, 16 236 242 EX(10f) l32i a8, a2, 20 237 - EX(11f) s32i a9, a3, 16 238 - EX(11f) s32i a8, a3, 20 243 + EX(10f) s32i a9, a3, 16 244 + EX(10f) s32i a8, a3, 20 239 245 ONES_ADD(a5, a9) 240 246 ONES_ADD(a5, a8) 241 247 EX(10f) l32i a9, a2, 24 242 248 EX(10f) l32i a8, a2, 28 243 - EX(11f) s32i a9, a3, 24 244 - EX(11f) s32i a8, a3, 28 249 + EX(10f) s32i a9, a3, 24 250 + EX(10f) s32i a8, a3, 28 245 251 ONES_ADD(a5, a9) 246 252 ONES_ADD(a5, a8) 247 253 addi a2, a2, 32 ··· 261 267 .Loop6: 262 268 #endif 263 269 EX(10f) l32i a9, a2, 0 264 - EX(11f) s32i a9, a3, 0 270 + EX(10f) s32i a9, a3, 0 265 271 ONES_ADD(a5, a9) 266 272 addi a2, a2, 4 267 273 addi a3, a3, 4 ··· 292 298 .Loop7: 293 299 #endif 294 300 EX(10f) l16ui a9, a2, 0 295 - EX(11f) s16i a9, a3, 0 301 + EX(10f) s16i a9, a3, 0 296 302 ONES_ADD(a5, a9) 297 303 addi a2, a2, 2 298 304 addi a3, a3, 2 ··· 303 309 /* This section processes a possible trailing odd byte. */ 304 310 _bbci.l a4, 0, 8f /* 1-byte chunk */ 305 311 EX(10f) l8ui a9, a2, 0 306 - EX(11f) s8i a9, a3, 0 312 + EX(10f) s8i a9, a3, 0 307 313 #ifdef __XTENSA_EB__ 308 314 slli a9, a9, 8 /* shift byte to bits 8..15 */ 309 315 #endif ··· 328 334 #endif 329 335 EX(10f) l8ui a9, a2, 0 330 336 EX(10f) l8ui a8, a2, 1 331 - EX(11f) s8i a9, a3, 0 332 - EX(11f) s8i a8, a3, 1 337 + EX(10f) s8i a9, a3, 0 338 + EX(10f) s8i a8, a3, 1 333 339 #ifdef __XTENSA_EB__ 334 340 slli a9, a9, 8 /* combine into a single 16-bit value */ 335 341 #else /* for checksum computation */ ··· 350 356 351 357 # Exception handler: 352 358 .section .fixup, "ax" 353 - /* 354 - a6 = src_err_ptr 355 - a7 = dst_err_ptr 356 - a11 = original len for exception handling 357 - a12 = original dst for exception handling 358 - */ 359 - 360 359 10: 361 - _movi a2, -EFAULT 362 - s32i a2, a6, 0 /* src_err_ptr */ 363 - 364 - # clear the complete destination - computing the rest 365 - # is too much work 366 - movi a2, 0 367 - #if XCHAL_HAVE_LOOPS 368 - loopgtz a11, 2f 369 - #else 370 - beqz a11, 2f 371 - add a11, a11, a12 /* a11 = ending address */ 372 - .Leloop: 373 - #endif 374 - s8i a2, a12, 0 375 - addi a12, a12, 1 376 - #if !XCHAL_HAVE_LOOPS 377 - blt a12, a11, .Leloop 378 - #endif 379 - 2: 380 - abi_ret_default 381 - 382 - 11: 383 - movi a2, -EFAULT 384 - s32i a2, a7, 0 /* dst_err_ptr */ 385 360 movi a2, 0 386 361 abi_ret_default 387 362