Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: fix memmove(), bcopy(), and memcpy().

- fix memmove to correctly handle overlapping src and dst;
- fix memcpy loop ending conditions from signed '<=' to '!=';
- modify bcopy to call memmove;

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Chris Zankel <chris@zankel.net>

+284 -25
+284 -25
arch/xtensa/lib/memcopy.S
··· 6 6 * License. See the file "COPYING" in the main directory of this archive 7 7 * for more details. 8 8 * 9 - * Copyright (C) 2002 - 2005 Tensilica Inc. 9 + * Copyright (C) 2002 - 2012 Tensilica Inc. 10 10 */ 11 11 12 12 #include <variant/core.h> ··· 27 27 #endif 28 28 .endm 29 29 30 - 31 30 /* 32 31 * void *memcpy(void *dst, const void *src, size_t len); 33 - * void *memmove(void *dst, const void *src, size_t len); 34 - * void *bcopy(const void *src, void *dst, size_t len); 35 32 * 36 33 * This function is intended to do the same thing as the standard 37 - * library function memcpy() (or bcopy()) for most cases. 34 + * library function memcpy() for most cases. 38 35 * However, where the source and/or destination references 39 36 * an instruction RAM or ROM or a data RAM or ROM, that 40 37 * source and/or destination will always be accessed with ··· 41 44 * !!!!!!! XTFIXME: 42 45 * !!!!!!! Handling of IRAM/IROM has not yet 43 46 * !!!!!!! been implemented. 44 - * 45 - * The bcopy version is provided here to avoid the overhead 46 - * of an extra call, for callers that require this convention. 47 47 * 48 48 * The (general case) algorithm is as follows: 49 49 * If destination is unaligned, align it by conditionally ··· 70 76 */ 71 77 72 78 .text 73 - .align 4 74 - .global bcopy 75 - .type bcopy,@function 76 - bcopy: 77 - entry sp, 16 # minimal stack frame 78 - # a2=src, a3=dst, a4=len 79 - mov a5, a3 # copy dst so that a2 is return value 80 - mov a3, a2 81 - mov a2, a5 82 - j .Lcommon # go to common code for memcpy+bcopy 83 - 84 79 85 80 /* 86 81 * Byte by byte copy ··· 90 107 s8i a6, a5, 0 91 108 addi a5, a5, 1 92 109 #if !XCHAL_HAVE_LOOPS 93 - blt a3, a7, .Lnextbyte 110 + bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end 94 111 #endif /* !XCHAL_HAVE_LOOPS */ 95 112 .Lbytecopydone: 96 113 retw ··· 127 144 .global memcpy 128 145 .type memcpy,@function 129 146 memcpy: 130 - .global memmove 131 - .type memmove,@function 132 - memmove: 133 147 134 148 entry sp, 16 # minimal stack frame 135 149 # a2/ dst, a3/ src, a4/ len ··· 162 182 s32i a7, a5, 12 163 183 addi a5, a5, 16 164 184 #if !XCHAL_HAVE_LOOPS 165 - blt a3, a8, .Loop1 185 + bne a3, a8, .Loop1 # continue loop if a3:src != a8:src_end 166 186 #endif /* !XCHAL_HAVE_LOOPS */ 167 187 .Loop1done: 168 188 bbci.l a4, 3, .L2 ··· 240 260 s32i a9, a5, 12 241 261 addi a5, a5, 16 242 262 #if !XCHAL_HAVE_LOOPS 243 - blt a3, a10, .Loop2 263 + bne a3, a10, .Loop2 # continue loop if a3:src != a10:src_end 244 264 #endif /* !XCHAL_HAVE_LOOPS */ 245 265 .Loop2done: 246 266 bbci.l a4, 3, .L12 ··· 285 305 l8ui a6, a3, 0 286 306 s8i a6, a5, 0 287 307 retw 308 + 309 + 310 + /* 311 + * void bcopy(const void *src, void *dest, size_t n); 312 + */ 313 + .align 4 314 + .global bcopy 315 + .type bcopy,@function 316 + bcopy: 317 + entry sp, 16 # minimal stack frame 318 + # a2=src, a3=dst, a4=len 319 + mov a5, a3 320 + mov a3, a2 321 + mov a2, a5 322 + j .Lmovecommon # go to common code for memmove+bcopy 323 + 324 + /* 325 + * void *memmove(void *dst, const void *src, size_t len); 326 + * 327 + * This function is intended to do the same thing as the standard 328 + * library function memmove() for most cases. 329 + * However, where the source and/or destination references 330 + * an instruction RAM or ROM or a data RAM or ROM, that 331 + * source and/or destination will always be accessed with 332 + * 32-bit load and store instructions (as required for these 333 + * types of devices). 334 + * 335 + * !!!!!!! XTFIXME: 336 + * !!!!!!! Handling of IRAM/IROM has not yet 337 + * !!!!!!! been implemented. 338 + * 339 + * The (general case) algorithm is as follows: 340 + * If end of source doesn't overlap destination then use memcpy. 341 + * Otherwise do memcpy backwards. 342 + * 343 + * Register use: 344 + * a0/ return address 345 + * a1/ stack pointer 346 + * a2/ return value 347 + * a3/ src 348 + * a4/ length 349 + * a5/ dst 350 + * a6/ tmp 351 + * a7/ tmp 352 + * a8/ tmp 353 + * a9/ tmp 354 + * a10/ tmp 355 + * a11/ tmp 356 + */ 357 + 358 + /* 359 + * Byte by byte copy 360 + */ 361 + .align 4 362 + .byte 0 # 1 mod 4 alignment for LOOPNEZ 363 + # (0 mod 4 alignment for LBEG) 364 + .Lbackbytecopy: 365 + #if XCHAL_HAVE_LOOPS 366 + loopnez a4, .Lbackbytecopydone 367 + #else /* !XCHAL_HAVE_LOOPS */ 368 + beqz a4, .Lbackbytecopydone 369 + sub a7, a3, a4 # a7 = start address for source 370 + #endif /* !XCHAL_HAVE_LOOPS */ 371 + .Lbacknextbyte: 372 + addi a3, a3, -1 373 + l8ui a6, a3, 0 374 + addi a5, a5, -1 375 + s8i a6, a5, 0 376 + #if !XCHAL_HAVE_LOOPS 377 + bne a3, a7, .Lbacknextbyte # continue loop if 378 + # $a3:src != $a7:src_start 379 + #endif /* !XCHAL_HAVE_LOOPS */ 380 + .Lbackbytecopydone: 381 + retw 382 + 383 + /* 384 + * Destination is unaligned 385 + */ 386 + 387 + .align 4 388 + .Lbackdst1mod2: # dst is only byte aligned 389 + _bltui a4, 7, .Lbackbytecopy # do short copies byte by byte 390 + 391 + # copy 1 byte 392 + addi a3, a3, -1 393 + l8ui a6, a3, 0 394 + addi a5, a5, -1 395 + s8i a6, a5, 0 396 + addi a4, a4, -1 397 + _bbci.l a5, 1, .Lbackdstaligned # if dst is now aligned, then 398 + # return to main algorithm 399 + .Lbackdst2mod4: # dst 16-bit aligned 400 + # copy 2 bytes 401 + _bltui a4, 6, .Lbackbytecopy # do short copies byte by byte 402 + addi a3, a3, -2 403 + l8ui a6, a3, 0 404 + l8ui a7, a3, 1 405 + addi a5, a5, -2 406 + s8i a6, a5, 0 407 + s8i a7, a5, 1 408 + addi a4, a4, -2 409 + j .Lbackdstaligned # dst is now aligned, 410 + # return to main algorithm 411 + 412 + .align 4 413 + .global memmove 414 + .type memmove,@function 415 + memmove: 416 + 417 + entry sp, 16 # minimal stack frame 418 + # a2/ dst, a3/ src, a4/ len 419 + mov a5, a2 # copy dst so that a2 is return value 420 + .Lmovecommon: 421 + sub a6, a5, a3 422 + bgeu a6, a4, .Lcommon 423 + 424 + add a5, a5, a4 425 + add a3, a3, a4 426 + 427 + _bbsi.l a5, 0, .Lbackdst1mod2 # if dst is 1 mod 2 428 + _bbsi.l a5, 1, .Lbackdst2mod4 # if dst is 2 mod 4 429 + .Lbackdstaligned: # return here from .Lbackdst?mod? once dst is aligned 430 + srli a7, a4, 4 # number of loop iterations with 16B 431 + # per iteration 432 + movi a8, 3 # if source is not aligned, 433 + _bany a3, a8, .Lbacksrcunaligned # then use shifting copy 434 + /* 435 + * Destination and source are word-aligned, use word copy. 436 + */ 437 + # copy 16 bytes per iteration for word-aligned dst and word-aligned src 438 + #if XCHAL_HAVE_LOOPS 439 + loopnez a7, .backLoop1done 440 + #else /* !XCHAL_HAVE_LOOPS */ 441 + beqz a7, .backLoop1done 442 + slli a8, a7, 4 443 + sub a8, a3, a8 # a8 = start of first 16B source chunk 444 + #endif /* !XCHAL_HAVE_LOOPS */ 445 + .backLoop1: 446 + addi a3, a3, -16 447 + l32i a7, a3, 12 448 + l32i a6, a3, 8 449 + addi a5, a5, -16 450 + s32i a7, a5, 12 451 + l32i a7, a3, 4 452 + s32i a6, a5, 8 453 + l32i a6, a3, 0 454 + s32i a7, a5, 4 455 + s32i a6, a5, 0 456 + #if !XCHAL_HAVE_LOOPS 457 + bne a3, a8, .backLoop1 # continue loop if a3:src != a8:src_start 458 + #endif /* !XCHAL_HAVE_LOOPS */ 459 + .backLoop1done: 460 + bbci.l a4, 3, .Lback2 461 + # copy 8 bytes 462 + addi a3, a3, -8 463 + l32i a6, a3, 0 464 + l32i a7, a3, 4 465 + addi a5, a5, -8 466 + s32i a6, a5, 0 467 + s32i a7, a5, 4 468 + .Lback2: 469 + bbsi.l a4, 2, .Lback3 470 + bbsi.l a4, 1, .Lback4 471 + bbsi.l a4, 0, .Lback5 472 + retw 473 + .Lback3: 474 + # copy 4 bytes 475 + addi a3, a3, -4 476 + l32i a6, a3, 0 477 + addi a5, a5, -4 478 + s32i a6, a5, 0 479 + bbsi.l a4, 1, .Lback4 480 + bbsi.l a4, 0, .Lback5 481 + retw 482 + .Lback4: 483 + # copy 2 bytes 484 + addi a3, a3, -2 485 + l16ui a6, a3, 0 486 + addi a5, a5, -2 487 + s16i a6, a5, 0 488 + bbsi.l a4, 0, .Lback5 489 + retw 490 + .Lback5: 491 + # copy 1 byte 492 + addi a3, a3, -1 493 + l8ui a6, a3, 0 494 + addi a5, a5, -1 495 + s8i a6, a5, 0 496 + retw 497 + 498 + /* 499 + * Destination is aligned, Source is unaligned 500 + */ 501 + 502 + .align 4 503 + .Lbacksrcunaligned: 504 + _beqz a4, .Lbackdone # avoid loading anything for zero-length copies 505 + # copy 16 bytes per iteration for word-aligned dst and unaligned src 506 + ssa8 a3 # set shift amount from byte offset 507 + #define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with 508 + * the lint or ferret client, or 0 509 + * to save a few cycles */ 510 + #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT 511 + and a11, a3, a8 # save unalignment offset for below 512 + sub a3, a3, a11 # align a3 513 + #endif 514 + l32i a6, a3, 0 # load first word 515 + #if XCHAL_HAVE_LOOPS 516 + loopnez a7, .backLoop2done 517 + #else /* !XCHAL_HAVE_LOOPS */ 518 + beqz a7, .backLoop2done 519 + slli a10, a7, 4 520 + sub a10, a3, a10 # a10 = start of first 16B source chunk 521 + #endif /* !XCHAL_HAVE_LOOPS */ 522 + .backLoop2: 523 + addi a3, a3, -16 524 + l32i a7, a3, 12 525 + l32i a8, a3, 8 526 + addi a5, a5, -16 527 + src_b a6, a7, a6 528 + s32i a6, a5, 12 529 + l32i a9, a3, 4 530 + src_b a7, a8, a7 531 + s32i a7, a5, 8 532 + l32i a6, a3, 0 533 + src_b a8, a9, a8 534 + s32i a8, a5, 4 535 + src_b a9, a6, a9 536 + s32i a9, a5, 0 537 + #if !XCHAL_HAVE_LOOPS 538 + bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start 539 + #endif /* !XCHAL_HAVE_LOOPS */ 540 + .backLoop2done: 541 + bbci.l a4, 3, .Lback12 542 + # copy 8 bytes 543 + addi a3, a3, -8 544 + l32i a7, a3, 4 545 + l32i a8, a3, 0 546 + addi a5, a5, -8 547 + src_b a6, a7, a6 548 + s32i a6, a5, 4 549 + src_b a7, a8, a7 550 + s32i a7, a5, 0 551 + mov a6, a8 552 + .Lback12: 553 + bbci.l a4, 2, .Lback13 554 + # copy 4 bytes 555 + addi a3, a3, -4 556 + l32i a7, a3, 0 557 + addi a5, a5, -4 558 + src_b a6, a7, a6 559 + s32i a6, a5, 0 560 + mov a6, a7 561 + .Lback13: 562 + #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT 563 + add a3, a3, a11 # readjust a3 with correct misalignment 564 + #endif 565 + bbsi.l a4, 1, .Lback14 566 + bbsi.l a4, 0, .Lback15 567 + .Lbackdone: 568 + retw 569 + .Lback14: 570 + # copy 2 bytes 571 + addi a3, a3, -2 572 + l8ui a6, a3, 0 573 + l8ui a7, a3, 1 574 + addi a5, a5, -2 575 + s8i a6, a5, 0 576 + s8i a7, a5, 1 577 + bbsi.l a4, 0, .Lback15 578 + retw 579 + .Lback15: 580 + # copy 1 byte 581 + addi a3, a3, -1 582 + addi a5, a5, -1 583 + l8ui a6, a3, 0 584 + s8i a6, a5, 0 585 + retw 586 + 288 587 289 588 /* 290 589 * Local Variables: