Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: Prepare asm files for straight-line-speculation

Replace all ret/retq instructions with RET in preparation of making
RET a macro. Since AS is case insensitive it's a big no-op without
RET defined.

find arch/x86/ -name \*.S | while read file
do
sed -i 's/\<ret[q]*\>/RET/' $file
done

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20211204134907.905503893@infradead.org

authored by

Peter Zijlstra and committed by
Borislav Petkov
f94909ce 22da5a07

+349 -349
+1 -1
arch/x86/boot/compressed/efi_thunk_64.S
··· 93 93 94 94 pop %rbx 95 95 pop %rbp 96 - ret 96 + RET 97 97 SYM_FUNC_END(__efi64_thunk) 98 98 99 99 .code32
+4 -4
arch/x86/boot/compressed/head_64.S
··· 813 813 2: popl %edi // restore callee-save registers 814 814 popl %ebx 815 815 leave 816 - ret 816 + RET 817 817 SYM_FUNC_END(efi32_pe_entry) 818 818 819 819 .section ".rodata" ··· 868 868 869 869 pop %ecx 870 870 pop %ebx 871 - ret 871 + RET 872 872 SYM_FUNC_END(startup32_set_idt_entry) 873 873 #endif 874 874 ··· 884 884 movl %eax, rva(boot32_idt_desc+2)(%ebp) 885 885 lidt rva(boot32_idt_desc)(%ebp) 886 886 #endif 887 - ret 887 + RET 888 888 SYM_FUNC_END(startup32_load_idt) 889 889 890 890 /* ··· 954 954 popl %ebx 955 955 popl %eax 956 956 #endif 957 - ret 957 + RET 958 958 SYM_FUNC_END(startup32_check_sev_cbit) 959 959 960 960 /*
+3 -3
arch/x86/boot/compressed/mem_encrypt.S
··· 58 58 59 59 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 60 60 61 - ret 61 + RET 62 62 SYM_FUNC_END(get_sev_encryption_bit) 63 63 64 64 /** ··· 92 92 /* All good - return success */ 93 93 xorl %eax, %eax 94 94 1: 95 - ret 95 + RET 96 96 2: 97 97 movl $-1, %eax 98 98 jmp 1b ··· 221 221 #endif 222 222 223 223 xor %rax, %rax 224 - ret 224 + RET 225 225 SYM_FUNC_END(set_sev_encryption_mask) 226 226 227 227 .data
+24 -24
arch/x86/crypto/aegis128-aesni-asm.S
··· 122 122 pxor T0, MSG 123 123 124 124 .Lld_partial_8: 125 - ret 125 + RET 126 126 SYM_FUNC_END(__load_partial) 127 127 128 128 /* ··· 180 180 mov %r10b, (%r9) 181 181 182 182 .Lst_partial_1: 183 - ret 183 + RET 184 184 SYM_FUNC_END(__store_partial) 185 185 186 186 /* ··· 225 225 movdqu STATE4, 0x40(STATEP) 226 226 227 227 FRAME_END 228 - ret 228 + RET 229 229 SYM_FUNC_END(crypto_aegis128_aesni_init) 230 230 231 231 /* ··· 337 337 movdqu STATE3, 0x30(STATEP) 338 338 movdqu STATE4, 0x40(STATEP) 339 339 FRAME_END 340 - ret 340 + RET 341 341 342 342 .Lad_out_1: 343 343 movdqu STATE4, 0x00(STATEP) ··· 346 346 movdqu STATE2, 0x30(STATEP) 347 347 movdqu STATE3, 0x40(STATEP) 348 348 FRAME_END 349 - ret 349 + RET 350 350 351 351 .Lad_out_2: 352 352 movdqu STATE3, 0x00(STATEP) ··· 355 355 movdqu STATE1, 0x30(STATEP) 356 356 movdqu STATE2, 0x40(STATEP) 357 357 FRAME_END 358 - ret 358 + RET 359 359 360 360 .Lad_out_3: 361 361 movdqu STATE2, 0x00(STATEP) ··· 364 364 movdqu STATE0, 0x30(STATEP) 365 365 movdqu STATE1, 0x40(STATEP) 366 366 FRAME_END 367 - ret 367 + RET 368 368 369 369 .Lad_out_4: 370 370 movdqu STATE1, 0x00(STATEP) ··· 373 373 movdqu STATE4, 0x30(STATEP) 374 374 movdqu STATE0, 0x40(STATEP) 375 375 FRAME_END 376 - ret 376 + RET 377 377 378 378 .Lad_out: 379 379 FRAME_END 380 - ret 380 + RET 381 381 SYM_FUNC_END(crypto_aegis128_aesni_ad) 382 382 383 383 .macro encrypt_block a s0 s1 s2 s3 s4 i ··· 452 452 movdqu STATE2, 0x30(STATEP) 453 453 movdqu STATE3, 0x40(STATEP) 454 454 FRAME_END 455 - ret 455 + RET 456 456 457 457 .Lenc_out_1: 458 458 movdqu STATE3, 0x00(STATEP) ··· 461 461 movdqu STATE1, 0x30(STATEP) 462 462 movdqu STATE2, 0x40(STATEP) 463 463 FRAME_END 464 - ret 464 + RET 465 465 466 466 .Lenc_out_2: 467 467 movdqu STATE2, 0x00(STATEP) ··· 470 470 movdqu STATE0, 0x30(STATEP) 471 471 movdqu STATE1, 0x40(STATEP) 472 472 FRAME_END 473 - ret 473 + RET 474 474 475 475 .Lenc_out_3: 476 476 movdqu STATE1, 0x00(STATEP) ··· 479 479 movdqu STATE4, 0x30(STATEP) 480 480 movdqu STATE0, 0x40(STATEP) 481 481 FRAME_END 482 - ret 482 + RET 483 483 484 484 .Lenc_out_4: 485 485 movdqu STATE0, 0x00(STATEP) ··· 488 488 movdqu STATE3, 0x30(STATEP) 489 489 movdqu STATE4, 0x40(STATEP) 490 490 FRAME_END 491 - ret 491 + RET 492 492 493 493 .Lenc_out: 494 494 FRAME_END 495 - ret 495 + RET 496 496 SYM_FUNC_END(crypto_aegis128_aesni_enc) 497 497 498 498 /* ··· 532 532 movdqu STATE3, 0x40(STATEP) 533 533 534 534 FRAME_END 535 - ret 535 + RET 536 536 SYM_FUNC_END(crypto_aegis128_aesni_enc_tail) 537 537 538 538 .macro decrypt_block a s0 s1 s2 s3 s4 i ··· 606 606 movdqu STATE2, 0x30(STATEP) 607 607 movdqu STATE3, 0x40(STATEP) 608 608 FRAME_END 609 - ret 609 + RET 610 610 611 611 .Ldec_out_1: 612 612 movdqu STATE3, 0x00(STATEP) ··· 615 615 movdqu STATE1, 0x30(STATEP) 616 616 movdqu STATE2, 0x40(STATEP) 617 617 FRAME_END 618 - ret 618 + RET 619 619 620 620 .Ldec_out_2: 621 621 movdqu STATE2, 0x00(STATEP) ··· 624 624 movdqu STATE0, 0x30(STATEP) 625 625 movdqu STATE1, 0x40(STATEP) 626 626 FRAME_END 627 - ret 627 + RET 628 628 629 629 .Ldec_out_3: 630 630 movdqu STATE1, 0x00(STATEP) ··· 633 633 movdqu STATE4, 0x30(STATEP) 634 634 movdqu STATE0, 0x40(STATEP) 635 635 FRAME_END 636 - ret 636 + RET 637 637 638 638 .Ldec_out_4: 639 639 movdqu STATE0, 0x00(STATEP) ··· 642 642 movdqu STATE3, 0x30(STATEP) 643 643 movdqu STATE4, 0x40(STATEP) 644 644 FRAME_END 645 - ret 645 + RET 646 646 647 647 .Ldec_out: 648 648 FRAME_END 649 - ret 649 + RET 650 650 SYM_FUNC_END(crypto_aegis128_aesni_dec) 651 651 652 652 /* ··· 696 696 movdqu STATE3, 0x40(STATEP) 697 697 698 698 FRAME_END 699 - ret 699 + RET 700 700 SYM_FUNC_END(crypto_aegis128_aesni_dec_tail) 701 701 702 702 /* ··· 743 743 movdqu MSG, (%rsi) 744 744 745 745 FRAME_END 746 - ret 746 + RET 747 747 SYM_FUNC_END(crypto_aegis128_aesni_final)
+1 -1
arch/x86/crypto/aes_ctrby8_avx-x86_64.S
··· 525 525 /* return updated IV */ 526 526 vpshufb xbyteswap, xcounter, xcounter 527 527 vmovdqu xcounter, (p_iv) 528 - ret 528 + RET 529 529 .endm 530 530 531 531 /*
+28 -28
arch/x86/crypto/aesni-intel_asm.S
··· 1594 1594 GCM_ENC_DEC dec 1595 1595 GCM_COMPLETE arg10, arg11 1596 1596 FUNC_RESTORE 1597 - ret 1597 + RET 1598 1598 SYM_FUNC_END(aesni_gcm_dec) 1599 1599 1600 1600 ··· 1683 1683 1684 1684 GCM_COMPLETE arg10, arg11 1685 1685 FUNC_RESTORE 1686 - ret 1686 + RET 1687 1687 SYM_FUNC_END(aesni_gcm_enc) 1688 1688 1689 1689 /***************************************************************************** ··· 1701 1701 FUNC_SAVE 1702 1702 GCM_INIT %arg3, %arg4,%arg5, %arg6 1703 1703 FUNC_RESTORE 1704 - ret 1704 + RET 1705 1705 SYM_FUNC_END(aesni_gcm_init) 1706 1706 1707 1707 /***************************************************************************** ··· 1716 1716 FUNC_SAVE 1717 1717 GCM_ENC_DEC enc 1718 1718 FUNC_RESTORE 1719 - ret 1719 + RET 1720 1720 SYM_FUNC_END(aesni_gcm_enc_update) 1721 1721 1722 1722 /***************************************************************************** ··· 1731 1731 FUNC_SAVE 1732 1732 GCM_ENC_DEC dec 1733 1733 FUNC_RESTORE 1734 - ret 1734 + RET 1735 1735 SYM_FUNC_END(aesni_gcm_dec_update) 1736 1736 1737 1737 /***************************************************************************** ··· 1746 1746 FUNC_SAVE 1747 1747 GCM_COMPLETE %arg3 %arg4 1748 1748 FUNC_RESTORE 1749 - ret 1749 + RET 1750 1750 SYM_FUNC_END(aesni_gcm_finalize) 1751 1751 1752 1752 #endif ··· 1762 1762 pxor %xmm1, %xmm0 1763 1763 movaps %xmm0, (TKEYP) 1764 1764 add $0x10, TKEYP 1765 - ret 1765 + RET 1766 1766 SYM_FUNC_END(_key_expansion_256a) 1767 1767 SYM_FUNC_END_ALIAS(_key_expansion_128) 1768 1768 ··· 1787 1787 shufps $0b01001110, %xmm2, %xmm1 1788 1788 movaps %xmm1, 0x10(TKEYP) 1789 1789 add $0x20, TKEYP 1790 - ret 1790 + RET 1791 1791 SYM_FUNC_END(_key_expansion_192a) 1792 1792 1793 1793 SYM_FUNC_START_LOCAL(_key_expansion_192b) ··· 1806 1806 1807 1807 movaps %xmm0, (TKEYP) 1808 1808 add $0x10, TKEYP 1809 - ret 1809 + RET 1810 1810 SYM_FUNC_END(_key_expansion_192b) 1811 1811 1812 1812 SYM_FUNC_START_LOCAL(_key_expansion_256b) ··· 1818 1818 pxor %xmm1, %xmm2 1819 1819 movaps %xmm2, (TKEYP) 1820 1820 add $0x10, TKEYP 1821 - ret 1821 + RET 1822 1822 SYM_FUNC_END(_key_expansion_256b) 1823 1823 1824 1824 /* ··· 1933 1933 popl KEYP 1934 1934 #endif 1935 1935 FRAME_END 1936 - ret 1936 + RET 1937 1937 SYM_FUNC_END(aesni_set_key) 1938 1938 1939 1939 /* ··· 1957 1957 popl KEYP 1958 1958 #endif 1959 1959 FRAME_END 1960 - ret 1960 + RET 1961 1961 SYM_FUNC_END(aesni_enc) 1962 1962 1963 1963 /* ··· 2014 2014 aesenc KEY, STATE 2015 2015 movaps 0x70(TKEYP), KEY 2016 2016 aesenclast KEY, STATE 2017 - ret 2017 + RET 2018 2018 SYM_FUNC_END(_aesni_enc1) 2019 2019 2020 2020 /* ··· 2122 2122 aesenclast KEY, STATE2 2123 2123 aesenclast KEY, STATE3 2124 2124 aesenclast KEY, STATE4 2125 - ret 2125 + RET 2126 2126 SYM_FUNC_END(_aesni_enc4) 2127 2127 2128 2128 /* ··· 2147 2147 popl KEYP 2148 2148 #endif 2149 2149 FRAME_END 2150 - ret 2150 + RET 2151 2151 SYM_FUNC_END(aesni_dec) 2152 2152 2153 2153 /* ··· 2204 2204 aesdec KEY, STATE 2205 2205 movaps 0x70(TKEYP), KEY 2206 2206 aesdeclast KEY, STATE 2207 - ret 2207 + RET 2208 2208 SYM_FUNC_END(_aesni_dec1) 2209 2209 2210 2210 /* ··· 2312 2312 aesdeclast KEY, STATE2 2313 2313 aesdeclast KEY, STATE3 2314 2314 aesdeclast KEY, STATE4 2315 - ret 2315 + RET 2316 2316 SYM_FUNC_END(_aesni_dec4) 2317 2317 2318 2318 /* ··· 2372 2372 popl LEN 2373 2373 #endif 2374 2374 FRAME_END 2375 - ret 2375 + RET 2376 2376 SYM_FUNC_END(aesni_ecb_enc) 2377 2377 2378 2378 /* ··· 2433 2433 popl LEN 2434 2434 #endif 2435 2435 FRAME_END 2436 - ret 2436 + RET 2437 2437 SYM_FUNC_END(aesni_ecb_dec) 2438 2438 2439 2439 /* ··· 2477 2477 popl IVP 2478 2478 #endif 2479 2479 FRAME_END 2480 - ret 2480 + RET 2481 2481 SYM_FUNC_END(aesni_cbc_enc) 2482 2482 2483 2483 /* ··· 2570 2570 popl IVP 2571 2571 #endif 2572 2572 FRAME_END 2573 - ret 2573 + RET 2574 2574 SYM_FUNC_END(aesni_cbc_dec) 2575 2575 2576 2576 /* ··· 2627 2627 popl IVP 2628 2628 #endif 2629 2629 FRAME_END 2630 - ret 2630 + RET 2631 2631 SYM_FUNC_END(aesni_cts_cbc_enc) 2632 2632 2633 2633 /* ··· 2688 2688 popl IVP 2689 2689 #endif 2690 2690 FRAME_END 2691 - ret 2691 + RET 2692 2692 SYM_FUNC_END(aesni_cts_cbc_dec) 2693 2693 2694 2694 .pushsection .rodata ··· 2725 2725 mov $1, TCTR_LOW 2726 2726 movq TCTR_LOW, INC 2727 2727 movq CTR, TCTR_LOW 2728 - ret 2728 + RET 2729 2729 SYM_FUNC_END(_aesni_inc_init) 2730 2730 2731 2731 /* ··· 2753 2753 .Linc_low: 2754 2754 movaps CTR, IV 2755 2755 pshufb BSWAP_MASK, IV 2756 - ret 2756 + RET 2757 2757 SYM_FUNC_END(_aesni_inc) 2758 2758 2759 2759 /* ··· 2816 2816 movups IV, (IVP) 2817 2817 .Lctr_enc_just_ret: 2818 2818 FRAME_END 2819 - ret 2819 + RET 2820 2820 SYM_FUNC_END(aesni_ctr_enc) 2821 2821 2822 2822 #endif ··· 2932 2932 popl IVP 2933 2933 #endif 2934 2934 FRAME_END 2935 - ret 2935 + RET 2936 2936 2937 2937 .Lxts_enc_1x: 2938 2938 add $64, LEN ··· 3092 3092 popl IVP 3093 3093 #endif 3094 3094 FRAME_END 3095 - ret 3095 + RET 3096 3096 3097 3097 .Lxts_dec_1x: 3098 3098 add $64, LEN
+20 -20
arch/x86/crypto/aesni-intel_avx-x86_64.S
··· 1767 1767 FUNC_SAVE 1768 1768 INIT GHASH_MUL_AVX, PRECOMPUTE_AVX 1769 1769 FUNC_RESTORE 1770 - ret 1770 + RET 1771 1771 SYM_FUNC_END(aesni_gcm_init_avx_gen2) 1772 1772 1773 1773 ############################################################################### ··· 1788 1788 # must be 192 1789 1789 GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11 1790 1790 FUNC_RESTORE 1791 - ret 1791 + RET 1792 1792 key_128_enc_update: 1793 1793 GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9 1794 1794 FUNC_RESTORE 1795 - ret 1795 + RET 1796 1796 key_256_enc_update: 1797 1797 GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13 1798 1798 FUNC_RESTORE 1799 - ret 1799 + RET 1800 1800 SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2) 1801 1801 1802 1802 ############################################################################### ··· 1817 1817 # must be 192 1818 1818 GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11 1819 1819 FUNC_RESTORE 1820 - ret 1820 + RET 1821 1821 key_128_dec_update: 1822 1822 GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9 1823 1823 FUNC_RESTORE 1824 - ret 1824 + RET 1825 1825 key_256_dec_update: 1826 1826 GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13 1827 1827 FUNC_RESTORE 1828 - ret 1828 + RET 1829 1829 SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2) 1830 1830 1831 1831 ############################################################################### ··· 1846 1846 # must be 192 1847 1847 GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4 1848 1848 FUNC_RESTORE 1849 - ret 1849 + RET 1850 1850 key_128_finalize: 1851 1851 GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4 1852 1852 FUNC_RESTORE 1853 - ret 1853 + RET 1854 1854 key_256_finalize: 1855 1855 GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4 1856 1856 FUNC_RESTORE 1857 - ret 1857 + RET 1858 1858 SYM_FUNC_END(aesni_gcm_finalize_avx_gen2) 1859 1859 1860 1860 ############################################################################### ··· 2735 2735 FUNC_SAVE 2736 2736 INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2 2737 2737 FUNC_RESTORE 2738 - ret 2738 + RET 2739 2739 SYM_FUNC_END(aesni_gcm_init_avx_gen4) 2740 2740 2741 2741 ############################################################################### ··· 2756 2756 # must be 192 2757 2757 GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11 2758 2758 FUNC_RESTORE 2759 - ret 2759 + RET 2760 2760 key_128_enc_update4: 2761 2761 GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9 2762 2762 FUNC_RESTORE 2763 - ret 2763 + RET 2764 2764 key_256_enc_update4: 2765 2765 GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13 2766 2766 FUNC_RESTORE 2767 - ret 2767 + RET 2768 2768 SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4) 2769 2769 2770 2770 ############################################################################### ··· 2785 2785 # must be 192 2786 2786 GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11 2787 2787 FUNC_RESTORE 2788 - ret 2788 + RET 2789 2789 key_128_dec_update4: 2790 2790 GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9 2791 2791 FUNC_RESTORE 2792 - ret 2792 + RET 2793 2793 key_256_dec_update4: 2794 2794 GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13 2795 2795 FUNC_RESTORE 2796 - ret 2796 + RET 2797 2797 SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4) 2798 2798 2799 2799 ############################################################################### ··· 2814 2814 # must be 192 2815 2815 GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4 2816 2816 FUNC_RESTORE 2817 - ret 2817 + RET 2818 2818 key_128_finalize4: 2819 2819 GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4 2820 2820 FUNC_RESTORE 2821 - ret 2821 + RET 2822 2822 key_256_finalize4: 2823 2823 GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4 2824 2824 FUNC_RESTORE 2825 - ret 2825 + RET 2826 2826 SYM_FUNC_END(aesni_gcm_finalize_avx_gen4)
+2 -2
arch/x86/crypto/blake2s-core.S
··· 171 171 movdqu %xmm1,0x10(%rdi) 172 172 movdqu %xmm14,0x20(%rdi) 173 173 .Lendofloop: 174 - ret 174 + RET 175 175 SYM_FUNC_END(blake2s_compress_ssse3) 176 176 177 177 #ifdef CONFIG_AS_AVX512 ··· 251 251 vmovdqu %xmm1,0x10(%rdi) 252 252 vmovdqu %xmm4,0x20(%rdi) 253 253 vzeroupper 254 - retq 254 + RET 255 255 SYM_FUNC_END(blake2s_compress_avx512) 256 256 #endif /* CONFIG_AS_AVX512 */
+6 -6
arch/x86/crypto/blowfish-x86_64-asm_64.S
··· 135 135 jnz .L__enc_xor; 136 136 137 137 write_block(); 138 - ret; 138 + RET; 139 139 .L__enc_xor: 140 140 xor_block(); 141 - ret; 141 + RET; 142 142 SYM_FUNC_END(__blowfish_enc_blk) 143 143 144 144 SYM_FUNC_START(blowfish_dec_blk) ··· 170 170 171 171 movq %r11, %r12; 172 172 173 - ret; 173 + RET; 174 174 SYM_FUNC_END(blowfish_dec_blk) 175 175 176 176 /********************************************************************** ··· 322 322 323 323 popq %rbx; 324 324 popq %r12; 325 - ret; 325 + RET; 326 326 327 327 .L__enc_xor4: 328 328 xor_block4(); 329 329 330 330 popq %rbx; 331 331 popq %r12; 332 - ret; 332 + RET; 333 333 SYM_FUNC_END(__blowfish_enc_blk_4way) 334 334 335 335 SYM_FUNC_START(blowfish_dec_blk_4way) ··· 364 364 popq %rbx; 365 365 popq %r12; 366 366 367 - ret; 367 + RET; 368 368 SYM_FUNC_END(blowfish_dec_blk_4way)
+7 -7
arch/x86/crypto/camellia-aesni-avx-asm_64.S
··· 192 192 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 193 193 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, 194 194 %rcx, (%r9)); 195 - ret; 195 + RET; 196 196 SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) 197 197 198 198 .align 8 ··· 200 200 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, 201 201 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, 202 202 %rax, (%r9)); 203 - ret; 203 + RET; 204 204 SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) 205 205 206 206 /* ··· 778 778 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax)); 779 779 780 780 FRAME_END 781 - ret; 781 + RET; 782 782 783 783 .align 8 784 784 .Lenc_max32: ··· 865 865 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); 866 866 867 867 FRAME_END 868 - ret; 868 + RET; 869 869 870 870 .align 8 871 871 .Ldec_max32: ··· 906 906 %xmm8, %rsi); 907 907 908 908 FRAME_END 909 - ret; 909 + RET; 910 910 SYM_FUNC_END(camellia_ecb_enc_16way) 911 911 912 912 SYM_FUNC_START(camellia_ecb_dec_16way) ··· 936 936 %xmm8, %rsi); 937 937 938 938 FRAME_END 939 - ret; 939 + RET; 940 940 SYM_FUNC_END(camellia_ecb_dec_16way) 941 941 942 942 SYM_FUNC_START(camellia_cbc_dec_16way) ··· 987 987 %xmm8, %rsi); 988 988 989 989 FRAME_END 990 - ret; 990 + RET; 991 991 SYM_FUNC_END(camellia_cbc_dec_16way)
+7 -7
arch/x86/crypto/camellia-aesni-avx2-asm_64.S
··· 226 226 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, 227 227 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, 228 228 %rcx, (%r9)); 229 - ret; 229 + RET; 230 230 SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) 231 231 232 232 .align 8 ··· 234 234 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, 235 235 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, 236 236 %rax, (%r9)); 237 - ret; 237 + RET; 238 238 SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) 239 239 240 240 /* ··· 814 814 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax)); 815 815 816 816 FRAME_END 817 - ret; 817 + RET; 818 818 819 819 .align 8 820 820 .Lenc_max32: ··· 901 901 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); 902 902 903 903 FRAME_END 904 - ret; 904 + RET; 905 905 906 906 .align 8 907 907 .Ldec_max32: ··· 946 946 vzeroupper; 947 947 948 948 FRAME_END 949 - ret; 949 + RET; 950 950 SYM_FUNC_END(camellia_ecb_enc_32way) 951 951 952 952 SYM_FUNC_START(camellia_ecb_dec_32way) ··· 980 980 vzeroupper; 981 981 982 982 FRAME_END 983 - ret; 983 + RET; 984 984 SYM_FUNC_END(camellia_ecb_dec_32way) 985 985 986 986 SYM_FUNC_START(camellia_cbc_dec_32way) ··· 1047 1047 1048 1048 addq $(16 * 32), %rsp; 1049 1049 FRAME_END 1050 - ret; 1050 + RET; 1051 1051 SYM_FUNC_END(camellia_cbc_dec_32way)
+6 -6
arch/x86/crypto/camellia-x86_64-asm_64.S
··· 213 213 enc_outunpack(mov, RT1); 214 214 215 215 movq RR12, %r12; 216 - ret; 216 + RET; 217 217 218 218 .L__enc_xor: 219 219 enc_outunpack(xor, RT1); 220 220 221 221 movq RR12, %r12; 222 - ret; 222 + RET; 223 223 SYM_FUNC_END(__camellia_enc_blk) 224 224 225 225 SYM_FUNC_START(camellia_dec_blk) ··· 257 257 dec_outunpack(); 258 258 259 259 movq RR12, %r12; 260 - ret; 260 + RET; 261 261 SYM_FUNC_END(camellia_dec_blk) 262 262 263 263 /********************************************************************** ··· 448 448 449 449 movq RR12, %r12; 450 450 popq %rbx; 451 - ret; 451 + RET; 452 452 453 453 .L__enc2_xor: 454 454 enc_outunpack2(xor, RT2); 455 455 456 456 movq RR12, %r12; 457 457 popq %rbx; 458 - ret; 458 + RET; 459 459 SYM_FUNC_END(__camellia_enc_blk_2way) 460 460 461 461 SYM_FUNC_START(camellia_dec_blk_2way) ··· 495 495 496 496 movq RR12, %r12; 497 497 movq RXOR, %rbx; 498 - ret; 498 + RET; 499 499 SYM_FUNC_END(camellia_dec_blk_2way)
+6 -6
arch/x86/crypto/cast5-avx-x86_64-asm_64.S
··· 279 279 outunpack_blocks(RR3, RL3, RTMP, RX, RKM); 280 280 outunpack_blocks(RR4, RL4, RTMP, RX, RKM); 281 281 282 - ret; 282 + RET; 283 283 SYM_FUNC_END(__cast5_enc_blk16) 284 284 285 285 .align 16 ··· 352 352 outunpack_blocks(RR3, RL3, RTMP, RX, RKM); 353 353 outunpack_blocks(RR4, RL4, RTMP, RX, RKM); 354 354 355 - ret; 355 + RET; 356 356 357 357 .L__skip_dec: 358 358 vpsrldq $4, RKR, RKR; ··· 393 393 394 394 popq %r15; 395 395 FRAME_END 396 - ret; 396 + RET; 397 397 SYM_FUNC_END(cast5_ecb_enc_16way) 398 398 399 399 SYM_FUNC_START(cast5_ecb_dec_16way) ··· 431 431 432 432 popq %r15; 433 433 FRAME_END 434 - ret; 434 + RET; 435 435 SYM_FUNC_END(cast5_ecb_dec_16way) 436 436 437 437 SYM_FUNC_START(cast5_cbc_dec_16way) ··· 483 483 popq %r15; 484 484 popq %r12; 485 485 FRAME_END 486 - ret; 486 + RET; 487 487 SYM_FUNC_END(cast5_cbc_dec_16way) 488 488 489 489 SYM_FUNC_START(cast5_ctr_16way) ··· 559 559 popq %r15; 560 560 popq %r12; 561 561 FRAME_END 562 - ret; 562 + RET; 563 563 SYM_FUNC_END(cast5_ctr_16way)
+5 -5
arch/x86/crypto/cast6-avx-x86_64-asm_64.S
··· 289 289 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); 290 290 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); 291 291 292 - ret; 292 + RET; 293 293 SYM_FUNC_END(__cast6_enc_blk8) 294 294 295 295 .align 8 ··· 336 336 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); 337 337 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); 338 338 339 - ret; 339 + RET; 340 340 SYM_FUNC_END(__cast6_dec_blk8) 341 341 342 342 SYM_FUNC_START(cast6_ecb_enc_8way) ··· 359 359 360 360 popq %r15; 361 361 FRAME_END 362 - ret; 362 + RET; 363 363 SYM_FUNC_END(cast6_ecb_enc_8way) 364 364 365 365 SYM_FUNC_START(cast6_ecb_dec_8way) ··· 382 382 383 383 popq %r15; 384 384 FRAME_END 385 - ret; 385 + RET; 386 386 SYM_FUNC_END(cast6_ecb_dec_8way) 387 387 388 388 SYM_FUNC_START(cast6_cbc_dec_8way) ··· 408 408 popq %r15; 409 409 popq %r12; 410 410 FRAME_END 411 - ret; 411 + RET; 412 412 SYM_FUNC_END(cast6_cbc_dec_8way)
+3 -3
arch/x86/crypto/chacha-avx2-x86_64.S
··· 193 193 194 194 .Ldone2: 195 195 vzeroupper 196 - ret 196 + RET 197 197 198 198 .Lxorpart2: 199 199 # xor remaining bytes from partial register into output ··· 498 498 499 499 .Ldone4: 500 500 vzeroupper 501 - ret 501 + RET 502 502 503 503 .Lxorpart4: 504 504 # xor remaining bytes from partial register into output ··· 992 992 .Ldone8: 993 993 vzeroupper 994 994 lea -8(%r10),%rsp 995 - ret 995 + RET 996 996 997 997 .Lxorpart8: 998 998 # xor remaining bytes from partial register into output
+3 -3
arch/x86/crypto/chacha-avx512vl-x86_64.S
··· 166 166 167 167 .Ldone2: 168 168 vzeroupper 169 - ret 169 + RET 170 170 171 171 .Lxorpart2: 172 172 # xor remaining bytes from partial register into output ··· 432 432 433 433 .Ldone4: 434 434 vzeroupper 435 - ret 435 + RET 436 436 437 437 .Lxorpart4: 438 438 # xor remaining bytes from partial register into output ··· 812 812 813 813 .Ldone8: 814 814 vzeroupper 815 - ret 815 + RET 816 816 817 817 .Lxorpart8: 818 818 # xor remaining bytes from partial register into output
+4 -4
arch/x86/crypto/chacha-ssse3-x86_64.S
··· 108 108 sub $2,%r8d 109 109 jnz .Ldoubleround 110 110 111 - ret 111 + RET 112 112 SYM_FUNC_END(chacha_permute) 113 113 114 114 SYM_FUNC_START(chacha_block_xor_ssse3) ··· 166 166 167 167 .Ldone: 168 168 FRAME_END 169 - ret 169 + RET 170 170 171 171 .Lxorpart: 172 172 # xor remaining bytes from partial register into output ··· 217 217 movdqu %xmm3,0x10(%rsi) 218 218 219 219 FRAME_END 220 - ret 220 + RET 221 221 SYM_FUNC_END(hchacha_block_ssse3) 222 222 223 223 SYM_FUNC_START(chacha_4block_xor_ssse3) ··· 762 762 763 763 .Ldone4: 764 764 lea -8(%r10),%rsp 765 - ret 765 + RET 766 766 767 767 .Lxorpart4: 768 768 # xor remaining bytes from partial register into output
+1 -1
arch/x86/crypto/crc32-pclmul_asm.S
··· 236 236 pxor %xmm2, %xmm1 237 237 pextrd $0x01, %xmm1, %eax 238 238 239 - ret 239 + RET 240 240 SYM_FUNC_END(crc32_pclmul_le_16)
+1 -1
arch/x86/crypto/crc32c-pcl-intel-asm_64.S
··· 306 306 popq %rsi 307 307 popq %rdi 308 308 popq %rbx 309 - ret 309 + RET 310 310 SYM_FUNC_END(crc_pcl) 311 311 312 312 .section .rodata, "a", @progbits
+1 -1
arch/x86/crypto/crct10dif-pcl-asm_64.S
··· 257 257 # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0. 258 258 259 259 pextrw $0, %xmm0, %eax 260 - ret 260 + RET 261 261 262 262 .align 16 263 263 .Lless_than_256_bytes:
+2 -2
arch/x86/crypto/des3_ede-asm_64.S
··· 243 243 popq %r12; 244 244 popq %rbx; 245 245 246 - ret; 246 + RET; 247 247 SYM_FUNC_END(des3_ede_x86_64_crypt_blk) 248 248 249 249 /*********************************************************************** ··· 528 528 popq %r12; 529 529 popq %rbx; 530 530 531 - ret; 531 + RET; 532 532 SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way) 533 533 534 534 .section .rodata, "a", @progbits
+3 -3
arch/x86/crypto/ghash-clmulni-intel_asm.S
··· 85 85 psrlq $1, T2 86 86 pxor T2, T1 87 87 pxor T1, DATA 88 - ret 88 + RET 89 89 SYM_FUNC_END(__clmul_gf128mul_ble) 90 90 91 91 /* void clmul_ghash_mul(char *dst, const u128 *shash) */ ··· 99 99 pshufb BSWAP, DATA 100 100 movups DATA, (%rdi) 101 101 FRAME_END 102 - ret 102 + RET 103 103 SYM_FUNC_END(clmul_ghash_mul) 104 104 105 105 /* ··· 128 128 movups DATA, (%rdi) 129 129 .Lupdate_just_ret: 130 130 FRAME_END 131 - ret 131 + RET 132 132 SYM_FUNC_END(clmul_ghash_update)
+1 -1
arch/x86/crypto/nh-avx2-x86_64.S
··· 153 153 vpaddq T1, T0, T0 154 154 vpaddq T4, T0, T0 155 155 vmovdqu T0, (HASH) 156 - ret 156 + RET 157 157 SYM_FUNC_END(nh_avx2)
+1 -1
arch/x86/crypto/nh-sse2-x86_64.S
··· 119 119 paddq PASS2_SUMS, T1 120 120 movdqu T0, 0x00(HASH) 121 121 movdqu T1, 0x10(HASH) 122 - ret 122 + RET 123 123 SYM_FUNC_END(nh_sse2)
+5 -5
arch/x86/crypto/serpent-avx-x86_64-asm_64.S
··· 601 601 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); 602 602 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); 603 603 604 - ret; 604 + RET; 605 605 SYM_FUNC_END(__serpent_enc_blk8_avx) 606 606 607 607 .align 8 ··· 655 655 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); 656 656 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); 657 657 658 - ret; 658 + RET; 659 659 SYM_FUNC_END(__serpent_dec_blk8_avx) 660 660 661 661 SYM_FUNC_START(serpent_ecb_enc_8way_avx) ··· 673 673 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 674 674 675 675 FRAME_END 676 - ret; 676 + RET; 677 677 SYM_FUNC_END(serpent_ecb_enc_8way_avx) 678 678 679 679 SYM_FUNC_START(serpent_ecb_dec_8way_avx) ··· 691 691 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); 692 692 693 693 FRAME_END 694 - ret; 694 + RET; 695 695 SYM_FUNC_END(serpent_ecb_dec_8way_avx) 696 696 697 697 SYM_FUNC_START(serpent_cbc_dec_8way_avx) ··· 709 709 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); 710 710 711 711 FRAME_END 712 - ret; 712 + RET; 713 713 SYM_FUNC_END(serpent_cbc_dec_8way_avx)
+5 -5
arch/x86/crypto/serpent-avx2-asm_64.S
··· 601 601 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); 602 602 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); 603 603 604 - ret; 604 + RET; 605 605 SYM_FUNC_END(__serpent_enc_blk16) 606 606 607 607 .align 8 ··· 655 655 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); 656 656 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); 657 657 658 - ret; 658 + RET; 659 659 SYM_FUNC_END(__serpent_dec_blk16) 660 660 661 661 SYM_FUNC_START(serpent_ecb_enc_16way) ··· 677 677 vzeroupper; 678 678 679 679 FRAME_END 680 - ret; 680 + RET; 681 681 SYM_FUNC_END(serpent_ecb_enc_16way) 682 682 683 683 SYM_FUNC_START(serpent_ecb_dec_16way) ··· 699 699 vzeroupper; 700 700 701 701 FRAME_END 702 - ret; 702 + RET; 703 703 SYM_FUNC_END(serpent_ecb_dec_16way) 704 704 705 705 SYM_FUNC_START(serpent_cbc_dec_16way) ··· 722 722 vzeroupper; 723 723 724 724 FRAME_END 725 - ret; 725 + RET; 726 726 SYM_FUNC_END(serpent_cbc_dec_16way)
+3 -3
arch/x86/crypto/serpent-sse2-i586-asm_32.S
··· 553 553 554 554 write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); 555 555 556 - ret; 556 + RET; 557 557 558 558 .L__enc_xor4: 559 559 xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); 560 560 561 - ret; 561 + RET; 562 562 SYM_FUNC_END(__serpent_enc_blk_4way) 563 563 564 564 SYM_FUNC_START(serpent_dec_blk_4way) ··· 612 612 movl arg_dst(%esp), %eax; 613 613 write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); 614 614 615 - ret; 615 + RET; 616 616 SYM_FUNC_END(serpent_dec_blk_4way)
+3 -3
arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
··· 675 675 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); 676 676 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); 677 677 678 - ret; 678 + RET; 679 679 680 680 .L__enc_xor8: 681 681 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); 682 682 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); 683 683 684 - ret; 684 + RET; 685 685 SYM_FUNC_END(__serpent_enc_blk_8way) 686 686 687 687 SYM_FUNC_START(serpent_dec_blk_8way) ··· 735 735 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); 736 736 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); 737 737 738 - ret; 738 + RET; 739 739 SYM_FUNC_END(serpent_dec_blk_8way)
+1 -1
arch/x86/crypto/sha1_avx2_x86_64_asm.S
··· 674 674 pop %r12 675 675 pop %rbx 676 676 677 - ret 677 + RET 678 678 679 679 SYM_FUNC_END(\name) 680 680 .endm
+1 -1
arch/x86/crypto/sha1_ni_asm.S
··· 290 290 mov %rbp, %rsp 291 291 pop %rbp 292 292 293 - ret 293 + RET 294 294 SYM_FUNC_END(sha1_ni_transform) 295 295 296 296 .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
+1 -1
arch/x86/crypto/sha1_ssse3_asm.S
··· 99 99 pop %rbp 100 100 pop %r12 101 101 pop %rbx 102 - ret 102 + RET 103 103 104 104 SYM_FUNC_END(\name) 105 105 .endm
+1 -1
arch/x86/crypto/sha256-avx-asm.S
··· 458 458 popq %r13 459 459 popq %r12 460 460 popq %rbx 461 - ret 461 + RET 462 462 SYM_FUNC_END(sha256_transform_avx) 463 463 464 464 .section .rodata.cst256.K256, "aM", @progbits, 256
+1 -1
arch/x86/crypto/sha256-avx2-asm.S
··· 710 710 popq %r13 711 711 popq %r12 712 712 popq %rbx 713 - ret 713 + RET 714 714 SYM_FUNC_END(sha256_transform_rorx) 715 715 716 716 .section .rodata.cst512.K256, "aM", @progbits, 512
+1 -1
arch/x86/crypto/sha256-ssse3-asm.S
··· 472 472 popq %r12 473 473 popq %rbx 474 474 475 - ret 475 + RET 476 476 SYM_FUNC_END(sha256_transform_ssse3) 477 477 478 478 .section .rodata.cst256.K256, "aM", @progbits, 256
+1 -1
arch/x86/crypto/sha256_ni_asm.S
··· 326 326 327 327 .Ldone_hash: 328 328 329 - ret 329 + RET 330 330 SYM_FUNC_END(sha256_ni_transform) 331 331 332 332 .section .rodata.cst256.K256, "aM", @progbits, 256
+1 -1
arch/x86/crypto/sha512-avx-asm.S
··· 361 361 pop %rbx 362 362 363 363 nowork: 364 - ret 364 + RET 365 365 SYM_FUNC_END(sha512_transform_avx) 366 366 367 367 ########################################################################
+1 -1
arch/x86/crypto/sha512-avx2-asm.S
··· 679 679 pop %r12 680 680 pop %rbx 681 681 682 - ret 682 + RET 683 683 SYM_FUNC_END(sha512_transform_rorx) 684 684 685 685 ########################################################################
+1 -1
arch/x86/crypto/sha512-ssse3-asm.S
··· 363 363 pop %rbx 364 364 365 365 nowork: 366 - ret 366 + RET 367 367 SYM_FUNC_END(sha512_transform_ssse3) 368 368 369 369 ########################################################################
+6 -6
arch/x86/crypto/sm4-aesni-avx-asm_64.S
··· 246 246 .Lblk4_store_output_done: 247 247 vzeroall; 248 248 FRAME_END 249 - ret; 249 + RET; 250 250 SYM_FUNC_END(sm4_aesni_avx_crypt4) 251 251 252 252 .align 8 ··· 356 356 vpshufb RTMP2, RB3, RB3; 357 357 358 358 FRAME_END 359 - ret; 359 + RET; 360 360 SYM_FUNC_END(__sm4_crypt_blk8) 361 361 362 362 /* ··· 412 412 .Lblk8_store_output_done: 413 413 vzeroall; 414 414 FRAME_END 415 - ret; 415 + RET; 416 416 SYM_FUNC_END(sm4_aesni_avx_crypt8) 417 417 418 418 /* ··· 487 487 488 488 vzeroall; 489 489 FRAME_END 490 - ret; 490 + RET; 491 491 SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8) 492 492 493 493 /* ··· 537 537 538 538 vzeroall; 539 539 FRAME_END 540 - ret; 540 + RET; 541 541 SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8) 542 542 543 543 /* ··· 590 590 591 591 vzeroall; 592 592 FRAME_END 593 - ret; 593 + RET; 594 594 SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)
+4 -4
arch/x86/crypto/sm4-aesni-avx2-asm_64.S
··· 268 268 vpshufb RTMP2, RB3, RB3; 269 269 270 270 FRAME_END 271 - ret; 271 + RET; 272 272 SYM_FUNC_END(__sm4_crypt_blk16) 273 273 274 274 #define inc_le128(x, minus_one, tmp) \ ··· 387 387 388 388 vzeroall; 389 389 FRAME_END 390 - ret; 390 + RET; 391 391 SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16) 392 392 393 393 /* ··· 441 441 442 442 vzeroall; 443 443 FRAME_END 444 - ret; 444 + RET; 445 445 SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16) 446 446 447 447 /* ··· 497 497 498 498 vzeroall; 499 499 FRAME_END 500 - ret; 500 + RET; 501 501 SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
+5 -5
arch/x86/crypto/twofish-avx-x86_64-asm_64.S
··· 267 267 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); 268 268 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); 269 269 270 - ret; 270 + RET; 271 271 SYM_FUNC_END(__twofish_enc_blk8) 272 272 273 273 .align 8 ··· 307 307 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); 308 308 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); 309 309 310 - ret; 310 + RET; 311 311 SYM_FUNC_END(__twofish_dec_blk8) 312 312 313 313 SYM_FUNC_START(twofish_ecb_enc_8way) ··· 327 327 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); 328 328 329 329 FRAME_END 330 - ret; 330 + RET; 331 331 SYM_FUNC_END(twofish_ecb_enc_8way) 332 332 333 333 SYM_FUNC_START(twofish_ecb_dec_8way) ··· 347 347 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); 348 348 349 349 FRAME_END 350 - ret; 350 + RET; 351 351 SYM_FUNC_END(twofish_ecb_dec_8way) 352 352 353 353 SYM_FUNC_START(twofish_cbc_dec_8way) ··· 372 372 popq %r12; 373 373 374 374 FRAME_END 375 - ret; 375 + RET; 376 376 SYM_FUNC_END(twofish_cbc_dec_8way)
+2 -2
arch/x86/crypto/twofish-i586-asm_32.S
··· 260 260 pop %ebx 261 261 pop %ebp 262 262 mov $1, %eax 263 - ret 263 + RET 264 264 SYM_FUNC_END(twofish_enc_blk) 265 265 266 266 SYM_FUNC_START(twofish_dec_blk) ··· 317 317 pop %ebx 318 318 pop %ebp 319 319 mov $1, %eax 320 - ret 320 + RET 321 321 SYM_FUNC_END(twofish_dec_blk)
+3 -3
arch/x86/crypto/twofish-x86_64-asm_64-3way.S
··· 258 258 popq %rbx; 259 259 popq %r12; 260 260 popq %r13; 261 - ret; 261 + RET; 262 262 263 263 .L__enc_xor3: 264 264 outunpack_enc3(xor); ··· 266 266 popq %rbx; 267 267 popq %r12; 268 268 popq %r13; 269 - ret; 269 + RET; 270 270 SYM_FUNC_END(__twofish_enc_blk_3way) 271 271 272 272 SYM_FUNC_START(twofish_dec_blk_3way) ··· 301 301 popq %rbx; 302 302 popq %r12; 303 303 popq %r13; 304 - ret; 304 + RET; 305 305 SYM_FUNC_END(twofish_dec_blk_3way)
+2 -2
arch/x86/crypto/twofish-x86_64-asm_64.S
··· 252 252 253 253 popq R1 254 254 movl $1,%eax 255 - ret 255 + RET 256 256 SYM_FUNC_END(twofish_enc_blk) 257 257 258 258 SYM_FUNC_START(twofish_dec_blk) ··· 304 304 305 305 popq R1 306 306 movl $1,%eax 307 - ret 307 + RET 308 308 SYM_FUNC_END(twofish_dec_blk)
+1 -1
arch/x86/entry/entry_32.S
··· 740 740 popl %eax 741 741 742 742 FRAME_END 743 - ret 743 + RET 744 744 SYM_FUNC_END(schedule_tail_wrapper) 745 745 .popsection 746 746
+5 -5
arch/x86/entry/entry_64.S
··· 738 738 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 739 739 swapgs 740 740 FRAME_END 741 - ret 741 + RET 742 742 SYM_FUNC_END(asm_load_gs_index) 743 743 EXPORT_SYMBOL(asm_load_gs_index) 744 744 ··· 889 889 * is needed here. 890 890 */ 891 891 SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx 892 - ret 892 + RET 893 893 894 894 .Lparanoid_entry_checkgs: 895 895 /* EBX = 1 -> kernel GSBASE active, no restore required */ ··· 910 910 .Lparanoid_kernel_gsbase: 911 911 912 912 FENCE_SWAPGS_KERNEL_ENTRY 913 - ret 913 + RET 914 914 SYM_CODE_END(paranoid_entry) 915 915 916 916 /* ··· 989 989 movq %rax, %rsp /* switch stack */ 990 990 ENCODE_FRAME_POINTER 991 991 pushq %r12 992 - ret 992 + RET 993 993 994 994 /* 995 995 * There are two places in the kernel that can potentially fault with ··· 1020 1020 */ 1021 1021 .Lerror_entry_done_lfence: 1022 1022 FENCE_SWAPGS_KERNEL_ENTRY 1023 - ret 1023 + RET 1024 1024 1025 1025 .Lbstep_iret: 1026 1026 /* Fix truncated RIP */
+1 -1
arch/x86/entry/thunk_32.S
··· 24 24 popl %edx 25 25 popl %ecx 26 26 popl %eax 27 - ret 27 + RET 28 28 _ASM_NOKPROBE(\name) 29 29 SYM_CODE_END(\name) 30 30 .endm
+1 -1
arch/x86/entry/thunk_64.S
··· 50 50 popq %rsi 51 51 popq %rdi 52 52 popq %rbp 53 - ret 53 + RET 54 54 _ASM_NOKPROBE(__thunk_restore) 55 55 SYM_CODE_END(__thunk_restore) 56 56 #endif
+1 -1
arch/x86/entry/vdso/vdso32/system_call.S
··· 78 78 popl %ecx 79 79 CFI_RESTORE ecx 80 80 CFI_ADJUST_CFA_OFFSET -4 81 - ret 81 + RET 82 82 CFI_ENDPROC 83 83 84 84 .size __kernel_vsyscall,.-__kernel_vsyscall
+1 -1
arch/x86/entry/vdso/vsgx.S
··· 81 81 pop %rbx 82 82 leave 83 83 .cfi_def_cfa %rsp, 8 84 - ret 84 + RET 85 85 86 86 /* The out-of-line code runs with the pre-leave stack frame. */ 87 87 .cfi_def_cfa %rbp, 16
+3 -3
arch/x86/entry/vsyscall/vsyscall_emu_64.S
··· 19 19 20 20 mov $__NR_gettimeofday, %rax 21 21 syscall 22 - ret 22 + RET 23 23 24 24 .balign 1024, 0xcc 25 25 mov $__NR_time, %rax 26 26 syscall 27 - ret 27 + RET 28 28 29 29 .balign 1024, 0xcc 30 30 mov $__NR_getcpu, %rax 31 31 syscall 32 - ret 32 + RET 33 33 34 34 .balign 4096, 0xcc 35 35
+3 -3
arch/x86/kernel/acpi/wakeup_32.S
··· 60 60 popl saved_context_eflags 61 61 62 62 movl $ret_point, saved_eip 63 - ret 63 + RET 64 64 65 65 66 66 restore_registers: ··· 70 70 movl saved_context_edi, %edi 71 71 pushl saved_context_eflags 72 72 popfl 73 - ret 73 + RET 74 74 75 75 SYM_CODE_START(do_suspend_lowlevel) 76 76 call save_processor_state ··· 86 86 ret_point: 87 87 call restore_registers 88 88 call restore_processor_state 89 - ret 89 + RET 90 90 SYM_CODE_END(do_suspend_lowlevel) 91 91 92 92 .data
+3 -3
arch/x86/kernel/ftrace_32.S
··· 19 19 #endif 20 20 21 21 SYM_FUNC_START(__fentry__) 22 - ret 22 + RET 23 23 SYM_FUNC_END(__fentry__) 24 24 EXPORT_SYMBOL(__fentry__) 25 25 ··· 84 84 85 85 /* This is weak to keep gas from relaxing the jumps */ 86 86 SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) 87 - ret 87 + RET 88 88 SYM_CODE_END(ftrace_caller) 89 89 90 90 SYM_CODE_START(ftrace_regs_caller) ··· 177 177 popl %edx 178 178 popl %ecx 179 179 popl %eax 180 - ret 180 + RET 181 181 SYM_CODE_END(ftrace_graph_caller) 182 182 183 183 .globl return_to_handler
+4 -4
arch/x86/kernel/ftrace_64.S
··· 132 132 #ifdef CONFIG_DYNAMIC_FTRACE 133 133 134 134 SYM_FUNC_START(__fentry__) 135 - retq 135 + RET 136 136 SYM_FUNC_END(__fentry__) 137 137 EXPORT_SYMBOL(__fentry__) 138 138 ··· 176 176 SYM_FUNC_START(ftrace_epilogue) 177 177 /* 178 178 * This is weak to keep gas from relaxing the jumps. 179 - * It is also used to copy the retq for trampolines. 179 + * It is also used to copy the RET for trampolines. 180 180 */ 181 181 SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) 182 182 UNWIND_HINT_FUNC 183 - retq 183 + RET 184 184 SYM_FUNC_END(ftrace_epilogue) 185 185 186 186 SYM_FUNC_START(ftrace_regs_caller) ··· 284 284 jnz trace 285 285 286 286 SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL) 287 - retq 287 + RET 288 288 289 289 trace: 290 290 /* save_mcount_regs fills in first two parameters */
+1 -1
arch/x86/kernel/head_32.S
··· 340 340 __INIT 341 341 setup_once: 342 342 andl $0,setup_once_ref /* Once is enough, thanks */ 343 - ret 343 + RET 344 344 345 345 SYM_FUNC_START(early_idt_handler_array) 346 346 # 36(%esp) %eflags
+1 -1
arch/x86/kernel/irqflags.S
··· 11 11 SYM_FUNC_START(native_save_fl) 12 12 pushf 13 13 pop %_ASM_AX 14 - ret 14 + RET 15 15 SYM_FUNC_END(native_save_fl) 16 16 .popsection 17 17 EXPORT_SYMBOL(native_save_fl)
+5 -5
arch/x86/kernel/relocate_kernel_32.S
··· 91 91 movl %edi, %eax 92 92 addl $(identity_mapped - relocate_kernel), %eax 93 93 pushl %eax 94 - ret 94 + RET 95 95 SYM_CODE_END(relocate_kernel) 96 96 97 97 SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) ··· 159 159 xorl %edx, %edx 160 160 xorl %esi, %esi 161 161 xorl %ebp, %ebp 162 - ret 162 + RET 163 163 1: 164 164 popl %edx 165 165 movl CP_PA_SWAP_PAGE(%edi), %esp ··· 190 190 movl %edi, %eax 191 191 addl $(virtual_mapped - relocate_kernel), %eax 192 192 pushl %eax 193 - ret 193 + RET 194 194 SYM_CODE_END(identity_mapped) 195 195 196 196 SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) ··· 208 208 popl %edi 209 209 popl %esi 210 210 popl %ebx 211 - ret 211 + RET 212 212 SYM_CODE_END(virtual_mapped) 213 213 214 214 /* Do the copies */ ··· 271 271 popl %edi 272 272 popl %ebx 273 273 popl %ebp 274 - ret 274 + RET 275 275 SYM_CODE_END(swap_pages) 276 276 277 277 .globl kexec_control_code_size
+5 -5
arch/x86/kernel/relocate_kernel_64.S
··· 104 104 /* jump to identity mapped page */ 105 105 addq $(identity_mapped - relocate_kernel), %r8 106 106 pushq %r8 107 - ret 107 + RET 108 108 SYM_CODE_END(relocate_kernel) 109 109 110 110 SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) ··· 191 191 xorl %r14d, %r14d 192 192 xorl %r15d, %r15d 193 193 194 - ret 194 + RET 195 195 196 196 1: 197 197 popq %rdx ··· 210 210 call swap_pages 211 211 movq $virtual_mapped, %rax 212 212 pushq %rax 213 - ret 213 + RET 214 214 SYM_CODE_END(identity_mapped) 215 215 216 216 SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) ··· 231 231 popq %r12 232 232 popq %rbp 233 233 popq %rbx 234 - ret 234 + RET 235 235 SYM_CODE_END(virtual_mapped) 236 236 237 237 /* Do the copies */ ··· 288 288 lea PAGE_SIZE(%rax), %rsi 289 289 jmp 0b 290 290 3: 291 - ret 291 + RET 292 292 SYM_CODE_END(swap_pages) 293 293 294 294 .globl kexec_control_code_size
+1 -1
arch/x86/kernel/sev_verify_cbit.S
··· 85 85 #endif 86 86 /* Return page-table pointer */ 87 87 movq %rdi, %rax 88 - ret 88 + RET 89 89 SYM_FUNC_END(sev_verify_cbit)
+2 -2
arch/x86/kernel/verify_cpu.S
··· 132 132 .Lverify_cpu_no_longmode: 133 133 popf # Restore caller passed flags 134 134 movl $1,%eax 135 - ret 135 + RET 136 136 .Lverify_cpu_sse_ok: 137 137 popf # Restore caller passed flags 138 138 xorl %eax, %eax 139 - ret 139 + RET 140 140 SYM_FUNC_END(verify_cpu)
+2 -2
arch/x86/kvm/svm/vmenter.S
··· 148 148 pop %edi 149 149 #endif 150 150 pop %_ASM_BP 151 - ret 151 + RET 152 152 153 153 3: cmpb $0, kvm_rebooting 154 154 jne 2b ··· 202 202 pop %edi 203 203 #endif 204 204 pop %_ASM_BP 205 - ret 205 + RET 206 206 207 207 3: cmpb $0, kvm_rebooting 208 208 jne 2b
+7 -7
arch/x86/kvm/vmx/vmenter.S
··· 49 49 je 2f 50 50 51 51 1: vmresume 52 - ret 52 + RET 53 53 54 54 2: vmlaunch 55 - ret 55 + RET 56 56 57 57 3: cmpb $0, kvm_rebooting 58 58 je 4f 59 - ret 59 + RET 60 60 4: ud2 61 61 62 62 _ASM_EXTABLE(1b, 3b) ··· 89 89 pop %_ASM_AX 90 90 .Lvmexit_skip_rsb: 91 91 #endif 92 - ret 92 + RET 93 93 SYM_FUNC_END(vmx_vmexit) 94 94 95 95 /** ··· 228 228 pop %edi 229 229 #endif 230 230 pop %_ASM_BP 231 - ret 231 + RET 232 232 233 233 /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ 234 234 2: mov $1, %eax ··· 293 293 pop %_ASM_AX 294 294 pop %_ASM_BP 295 295 296 - ret 296 + RET 297 297 SYM_FUNC_END(vmread_error_trampoline) 298 298 299 299 SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) ··· 326 326 */ 327 327 mov %_ASM_BP, %_ASM_SP 328 328 pop %_ASM_BP 329 - ret 329 + RET 330 330 SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)
+1 -1
arch/x86/lib/atomic64_386_32.S
··· 30 30 31 31 #define RET_IRQ_RESTORE \ 32 32 IRQ_RESTORE v; \ 33 - ret 33 + RET 34 34 35 35 #define v %ecx 36 36 BEGIN_IRQ_SAVE(read)
+8 -8
arch/x86/lib/atomic64_cx8_32.S
··· 18 18 19 19 SYM_FUNC_START(atomic64_read_cx8) 20 20 read64 %ecx 21 - ret 21 + RET 22 22 SYM_FUNC_END(atomic64_read_cx8) 23 23 24 24 SYM_FUNC_START(atomic64_set_cx8) ··· 28 28 cmpxchg8b (%esi) 29 29 jne 1b 30 30 31 - ret 31 + RET 32 32 SYM_FUNC_END(atomic64_set_cx8) 33 33 34 34 SYM_FUNC_START(atomic64_xchg_cx8) ··· 37 37 cmpxchg8b (%esi) 38 38 jne 1b 39 39 40 - ret 40 + RET 41 41 SYM_FUNC_END(atomic64_xchg_cx8) 42 42 43 43 .macro addsub_return func ins insc ··· 68 68 popl %esi 69 69 popl %ebx 70 70 popl %ebp 71 - ret 71 + RET 72 72 SYM_FUNC_END(atomic64_\func\()_return_cx8) 73 73 .endm 74 74 ··· 93 93 movl %ebx, %eax 94 94 movl %ecx, %edx 95 95 popl %ebx 96 - ret 96 + RET 97 97 SYM_FUNC_END(atomic64_\func\()_return_cx8) 98 98 .endm 99 99 ··· 118 118 movl %ebx, %eax 119 119 movl %ecx, %edx 120 120 popl %ebx 121 - ret 121 + RET 122 122 SYM_FUNC_END(atomic64_dec_if_positive_cx8) 123 123 124 124 SYM_FUNC_START(atomic64_add_unless_cx8) ··· 149 149 addl $8, %esp 150 150 popl %ebx 151 151 popl %ebp 152 - ret 152 + RET 153 153 4: 154 154 cmpl %edx, 4(%esp) 155 155 jne 2b ··· 176 176 movl $1, %eax 177 177 3: 178 178 popl %ebx 179 - ret 179 + RET 180 180 SYM_FUNC_END(atomic64_inc_not_zero_cx8)
+4 -4
arch/x86/lib/checksum_32.S
··· 127 127 8: 128 128 popl %ebx 129 129 popl %esi 130 - ret 130 + RET 131 131 SYM_FUNC_END(csum_partial) 132 132 133 133 #else ··· 245 245 90: 246 246 popl %ebx 247 247 popl %esi 248 - ret 248 + RET 249 249 SYM_FUNC_END(csum_partial) 250 250 251 251 #endif ··· 371 371 popl %esi 372 372 popl %edi 373 373 popl %ecx # equivalent to addl $4,%esp 374 - ret 374 + RET 375 375 SYM_FUNC_END(csum_partial_copy_generic) 376 376 377 377 #else ··· 447 447 popl %esi 448 448 popl %edi 449 449 popl %ebx 450 - ret 450 + RET 451 451 SYM_FUNC_END(csum_partial_copy_generic) 452 452 453 453 #undef ROUND
+3 -3
arch/x86/lib/clear_page_64.S
··· 17 17 movl $4096/8,%ecx 18 18 xorl %eax,%eax 19 19 rep stosq 20 - ret 20 + RET 21 21 SYM_FUNC_END(clear_page_rep) 22 22 EXPORT_SYMBOL_GPL(clear_page_rep) 23 23 ··· 39 39 leaq 64(%rdi),%rdi 40 40 jnz .Lloop 41 41 nop 42 - ret 42 + RET 43 43 SYM_FUNC_END(clear_page_orig) 44 44 EXPORT_SYMBOL_GPL(clear_page_orig) 45 45 ··· 47 47 movl $4096,%ecx 48 48 xorl %eax,%eax 49 49 rep stosb 50 - ret 50 + RET 51 51 SYM_FUNC_END(clear_page_erms) 52 52 EXPORT_SYMBOL_GPL(clear_page_erms)
+2 -2
arch/x86/lib/cmpxchg16b_emu.S
··· 37 37 38 38 popfq 39 39 mov $1, %al 40 - ret 40 + RET 41 41 42 42 .Lnot_same: 43 43 popfq 44 44 xor %al,%al 45 - ret 45 + RET 46 46 47 47 SYM_FUNC_END(this_cpu_cmpxchg16b_emu)
+2 -2
arch/x86/lib/cmpxchg8b_emu.S
··· 32 32 movl %ecx, 4(%esi) 33 33 34 34 popfl 35 - ret 35 + RET 36 36 37 37 .Lnot_same: 38 38 movl (%esi), %eax ··· 40 40 movl 4(%esi), %edx 41 41 42 42 popfl 43 - ret 43 + RET 44 44 45 45 SYM_FUNC_END(cmpxchg8b_emu) 46 46 EXPORT_SYMBOL(cmpxchg8b_emu)
+3 -3
arch/x86/lib/copy_mc_64.S
··· 77 77 .L_done_memcpy_trap: 78 78 xorl %eax, %eax 79 79 .L_done: 80 - ret 80 + RET 81 81 SYM_FUNC_END(copy_mc_fragile) 82 82 83 83 .section .fixup, "ax" ··· 132 132 rep movsb 133 133 /* Copy successful. Return zero */ 134 134 xorl %eax, %eax 135 - ret 135 + RET 136 136 SYM_FUNC_END(copy_mc_enhanced_fast_string) 137 137 138 138 .section .fixup, "ax" ··· 145 145 * user-copy routines. 146 146 */ 147 147 movq %rcx, %rax 148 - ret 148 + RET 149 149 150 150 .previous 151 151
+2 -2
arch/x86/lib/copy_page_64.S
··· 17 17 ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD 18 18 movl $4096/8, %ecx 19 19 rep movsq 20 - ret 20 + RET 21 21 SYM_FUNC_END(copy_page) 22 22 EXPORT_SYMBOL(copy_page) 23 23 ··· 85 85 movq (%rsp), %rbx 86 86 movq 1*8(%rsp), %r12 87 87 addq $2*8, %rsp 88 - ret 88 + RET 89 89 SYM_FUNC_END(copy_page_regs)
+5 -5
arch/x86/lib/copy_user_64.S
··· 105 105 jnz 21b 106 106 23: xor %eax,%eax 107 107 ASM_CLAC 108 - ret 108 + RET 109 109 110 110 .section .fixup,"ax" 111 111 30: shll $6,%ecx ··· 173 173 movsb 174 174 xorl %eax,%eax 175 175 ASM_CLAC 176 - ret 176 + RET 177 177 178 178 .section .fixup,"ax" 179 179 11: leal (%rdx,%rcx,8),%ecx ··· 207 207 movsb 208 208 xorl %eax,%eax 209 209 ASM_CLAC 210 - ret 210 + RET 211 211 212 212 .section .fixup,"ax" 213 213 12: movl %ecx,%edx /* ecx is zerorest also */ ··· 237 237 1: rep movsb 238 238 2: mov %ecx,%eax 239 239 ASM_CLAC 240 - ret 240 + RET 241 241 242 242 _ASM_EXTABLE_CPY(1b, 2b) 243 243 SYM_CODE_END(.Lcopy_user_handle_tail) ··· 348 348 xorl %eax,%eax 349 349 ASM_CLAC 350 350 sfence 351 - ret 351 + RET 352 352 353 353 .section .fixup,"ax" 354 354 .L_fixup_4x8b_copy:
+1 -1
arch/x86/lib/csum-copy_64.S
··· 201 201 movq 3*8(%rsp), %r13 202 202 movq 4*8(%rsp), %r15 203 203 addq $5*8, %rsp 204 - ret 204 + RET 205 205 .Lshort: 206 206 movl %ecx, %r10d 207 207 jmp .L1
+11 -11
arch/x86/lib/getuser.S
··· 57 57 1: movzbl (%_ASM_AX),%edx 58 58 xor %eax,%eax 59 59 ASM_CLAC 60 - ret 60 + RET 61 61 SYM_FUNC_END(__get_user_1) 62 62 EXPORT_SYMBOL(__get_user_1) 63 63 ··· 71 71 2: movzwl (%_ASM_AX),%edx 72 72 xor %eax,%eax 73 73 ASM_CLAC 74 - ret 74 + RET 75 75 SYM_FUNC_END(__get_user_2) 76 76 EXPORT_SYMBOL(__get_user_2) 77 77 ··· 85 85 3: movl (%_ASM_AX),%edx 86 86 xor %eax,%eax 87 87 ASM_CLAC 88 - ret 88 + RET 89 89 SYM_FUNC_END(__get_user_4) 90 90 EXPORT_SYMBOL(__get_user_4) 91 91 ··· 100 100 4: movq (%_ASM_AX),%rdx 101 101 xor %eax,%eax 102 102 ASM_CLAC 103 - ret 103 + RET 104 104 #else 105 105 LOAD_TASK_SIZE_MINUS_N(7) 106 106 cmp %_ASM_DX,%_ASM_AX ··· 112 112 5: movl 4(%_ASM_AX),%ecx 113 113 xor %eax,%eax 114 114 ASM_CLAC 115 - ret 115 + RET 116 116 #endif 117 117 SYM_FUNC_END(__get_user_8) 118 118 EXPORT_SYMBOL(__get_user_8) ··· 124 124 6: movzbl (%_ASM_AX),%edx 125 125 xor %eax,%eax 126 126 ASM_CLAC 127 - ret 127 + RET 128 128 SYM_FUNC_END(__get_user_nocheck_1) 129 129 EXPORT_SYMBOL(__get_user_nocheck_1) 130 130 ··· 134 134 7: movzwl (%_ASM_AX),%edx 135 135 xor %eax,%eax 136 136 ASM_CLAC 137 - ret 137 + RET 138 138 SYM_FUNC_END(__get_user_nocheck_2) 139 139 EXPORT_SYMBOL(__get_user_nocheck_2) 140 140 ··· 144 144 8: movl (%_ASM_AX),%edx 145 145 xor %eax,%eax 146 146 ASM_CLAC 147 - ret 147 + RET 148 148 SYM_FUNC_END(__get_user_nocheck_4) 149 149 EXPORT_SYMBOL(__get_user_nocheck_4) 150 150 ··· 159 159 #endif 160 160 xor %eax,%eax 161 161 ASM_CLAC 162 - ret 162 + RET 163 163 SYM_FUNC_END(__get_user_nocheck_8) 164 164 EXPORT_SYMBOL(__get_user_nocheck_8) 165 165 ··· 169 169 bad_get_user: 170 170 xor %edx,%edx 171 171 mov $(-EFAULT),%_ASM_AX 172 - ret 172 + RET 173 173 SYM_CODE_END(.Lbad_get_user_clac) 174 174 175 175 #ifdef CONFIG_X86_32 ··· 179 179 xor %edx,%edx 180 180 xor %ecx,%ecx 181 181 mov $(-EFAULT),%_ASM_AX 182 - ret 182 + RET 183 183 SYM_CODE_END(.Lbad_get_user_8_clac) 184 184 #endif 185 185
+3 -3
arch/x86/lib/hweight.S
··· 32 32 imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101 33 33 shrl $24, %eax # w = w_tmp >> 24 34 34 __ASM_SIZE(pop,) %__ASM_REG(dx) 35 - ret 35 + RET 36 36 SYM_FUNC_END(__sw_hweight32) 37 37 EXPORT_SYMBOL(__sw_hweight32) 38 38 ··· 65 65 66 66 popq %rdx 67 67 popq %rdi 68 - ret 68 + RET 69 69 #else /* CONFIG_X86_32 */ 70 70 /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */ 71 71 pushl %ecx ··· 77 77 addl %ecx, %eax # result 78 78 79 79 popl %ecx 80 - ret 80 + RET 81 81 #endif 82 82 SYM_FUNC_END(__sw_hweight64) 83 83 EXPORT_SYMBOL(__sw_hweight64)
+1 -1
arch/x86/lib/iomap_copy_64.S
··· 11 11 SYM_FUNC_START(__iowrite32_copy) 12 12 movl %edx,%ecx 13 13 rep movsd 14 - ret 14 + RET 15 15 SYM_FUNC_END(__iowrite32_copy)
+6 -6
arch/x86/lib/memcpy_64.S
··· 39 39 rep movsq 40 40 movl %edx, %ecx 41 41 rep movsb 42 - ret 42 + RET 43 43 SYM_FUNC_END(memcpy) 44 44 SYM_FUNC_END_ALIAS(__memcpy) 45 45 EXPORT_SYMBOL(memcpy) ··· 53 53 movq %rdi, %rax 54 54 movq %rdx, %rcx 55 55 rep movsb 56 - ret 56 + RET 57 57 SYM_FUNC_END(memcpy_erms) 58 58 59 59 SYM_FUNC_START_LOCAL(memcpy_orig) ··· 137 137 movq %r9, 1*8(%rdi) 138 138 movq %r10, -2*8(%rdi, %rdx) 139 139 movq %r11, -1*8(%rdi, %rdx) 140 - retq 140 + RET 141 141 .p2align 4 142 142 .Lless_16bytes: 143 143 cmpl $8, %edx ··· 149 149 movq -1*8(%rsi, %rdx), %r9 150 150 movq %r8, 0*8(%rdi) 151 151 movq %r9, -1*8(%rdi, %rdx) 152 - retq 152 + RET 153 153 .p2align 4 154 154 .Lless_8bytes: 155 155 cmpl $4, %edx ··· 162 162 movl -4(%rsi, %rdx), %r8d 163 163 movl %ecx, (%rdi) 164 164 movl %r8d, -4(%rdi, %rdx) 165 - retq 165 + RET 166 166 .p2align 4 167 167 .Lless_3bytes: 168 168 subl $1, %edx ··· 180 180 movb %cl, (%rdi) 181 181 182 182 .Lend: 183 - retq 183 + RET 184 184 SYM_FUNC_END(memcpy_orig) 185 185 186 186 .popsection
+2 -2
arch/x86/lib/memmove_64.S
··· 40 40 /* FSRM implies ERMS => no length checks, do the copy directly */ 41 41 .Lmemmove_begin_forward: 42 42 ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM 43 - ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS 43 + ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; RET", X86_FEATURE_ERMS 44 44 45 45 /* 46 46 * movsq instruction have many startup latency ··· 205 205 movb (%rsi), %r11b 206 206 movb %r11b, (%rdi) 207 207 13: 208 - retq 208 + RET 209 209 SYM_FUNC_END(__memmove) 210 210 SYM_FUNC_END_ALIAS(memmove) 211 211 EXPORT_SYMBOL(__memmove)
+3 -3
arch/x86/lib/memset_64.S
··· 40 40 movl %edx,%ecx 41 41 rep stosb 42 42 movq %r9,%rax 43 - ret 43 + RET 44 44 SYM_FUNC_END(__memset) 45 45 SYM_FUNC_END_ALIAS(memset) 46 46 EXPORT_SYMBOL(memset) ··· 63 63 movq %rdx,%rcx 64 64 rep stosb 65 65 movq %r9,%rax 66 - ret 66 + RET 67 67 SYM_FUNC_END(memset_erms) 68 68 69 69 SYM_FUNC_START_LOCAL(memset_orig) ··· 125 125 126 126 .Lende: 127 127 movq %r10,%rax 128 - ret 128 + RET 129 129 130 130 .Lbad_alignment: 131 131 cmpq $7,%rdx
+2 -2
arch/x86/lib/msr-reg.S
··· 35 35 movl %edi, 28(%r10) 36 36 popq %r12 37 37 popq %rbx 38 - ret 38 + RET 39 39 3: 40 40 movl $-EIO, %r11d 41 41 jmp 2b ··· 77 77 popl %esi 78 78 popl %ebp 79 79 popl %ebx 80 - ret 80 + RET 81 81 3: 82 82 movl $-EIO, 4(%esp) 83 83 jmp 2b
+3 -3
arch/x86/lib/putuser.S
··· 52 52 1: movb %al,(%_ASM_CX) 53 53 xor %ecx,%ecx 54 54 ASM_CLAC 55 - ret 55 + RET 56 56 SYM_FUNC_END(__put_user_1) 57 57 EXPORT_SYMBOL(__put_user_1) 58 58 EXPORT_SYMBOL(__put_user_nocheck_1) ··· 66 66 2: movw %ax,(%_ASM_CX) 67 67 xor %ecx,%ecx 68 68 ASM_CLAC 69 - ret 69 + RET 70 70 SYM_FUNC_END(__put_user_2) 71 71 EXPORT_SYMBOL(__put_user_2) 72 72 EXPORT_SYMBOL(__put_user_nocheck_2) ··· 80 80 3: movl %eax,(%_ASM_CX) 81 81 xor %ecx,%ecx 82 82 ASM_CLAC 83 - ret 83 + RET 84 84 SYM_FUNC_END(__put_user_4) 85 85 EXPORT_SYMBOL(__put_user_4) 86 86 EXPORT_SYMBOL(__put_user_nocheck_4)
+1 -1
arch/x86/lib/retpoline.S
··· 23 23 .Ldo_rop_\@: 24 24 mov %\reg, (%_ASM_SP) 25 25 UNWIND_HINT_FUNC 26 - ret 26 + RET 27 27 .endm 28 28 29 29 .macro THUNK reg
+1 -1
arch/x86/math-emu/div_Xsig.S
··· 341 341 popl %esi 342 342 343 343 leave 344 - ret 344 + RET 345 345 346 346 347 347 #ifdef PARANOID
+1 -1
arch/x86/math-emu/div_small.S
··· 44 44 popl %esi 45 45 46 46 leave 47 - ret 47 + RET 48 48 SYM_FUNC_END(FPU_div_small)
+3 -3
arch/x86/math-emu/mul_Xsig.S
··· 62 62 63 63 popl %esi 64 64 leave 65 - ret 65 + RET 66 66 SYM_FUNC_END(mul32_Xsig) 67 67 68 68 ··· 115 115 116 116 popl %esi 117 117 leave 118 - ret 118 + RET 119 119 SYM_FUNC_END(mul64_Xsig) 120 120 121 121 ··· 175 175 176 176 popl %esi 177 177 leave 178 - ret 178 + RET 179 179 SYM_FUNC_END(mul_Xsig_Xsig)
+1 -1
arch/x86/math-emu/polynom_Xsig.S
··· 133 133 popl %edi 134 134 popl %esi 135 135 leave 136 - ret 136 + RET 137 137 SYM_FUNC_END(polynomial_Xsig)
+3 -3
arch/x86/math-emu/reg_norm.S
··· 72 72 L_exit: 73 73 popl %ebx 74 74 leave 75 - ret 75 + RET 76 76 77 77 78 78 L_zero: ··· 138 138 139 139 popl %ebx 140 140 leave 141 - ret 141 + RET 142 142 143 143 L_exit_nuo_zero: 144 144 movl TAG_Zero,%eax ··· 146 146 147 147 popl %ebx 148 148 leave 149 - ret 149 + RET 150 150 SYM_FUNC_END(FPU_normalize_nuo)
+1 -1
arch/x86/math-emu/reg_round.S
··· 437 437 popl %edi 438 438 popl %esi 439 439 leave 440 - ret 440 + RET 441 441 442 442 443 443 /*
+1 -1
arch/x86/math-emu/reg_u_add.S
··· 164 164 popl %edi 165 165 popl %esi 166 166 leave 167 - ret 167 + RET 168 168 #endif /* PARANOID */ 169 169 SYM_FUNC_END(FPU_u_add)
+1 -1
arch/x86/math-emu/reg_u_div.S
··· 468 468 popl %esi 469 469 470 470 leave 471 - ret 471 + RET 472 472 #endif /* PARANOID */ 473 473 474 474 SYM_FUNC_END(FPU_u_div)
+1 -1
arch/x86/math-emu/reg_u_mul.S
··· 144 144 popl %edi 145 145 popl %esi 146 146 leave 147 - ret 147 + RET 148 148 #endif /* PARANOID */ 149 149 150 150 SYM_FUNC_END(FPU_u_mul)
+1 -1
arch/x86/math-emu/reg_u_sub.S
··· 270 270 popl %edi 271 271 popl %esi 272 272 leave 273 - ret 273 + RET 274 274 SYM_FUNC_END(FPU_u_sub)
+2 -2
arch/x86/math-emu/round_Xsig.S
··· 78 78 popl %esi 79 79 popl %ebx 80 80 leave 81 - ret 81 + RET 82 82 SYM_FUNC_END(round_Xsig) 83 83 84 84 ··· 138 138 popl %esi 139 139 popl %ebx 140 140 leave 141 - ret 141 + RET 142 142 SYM_FUNC_END(norm_Xsig)
+4 -4
arch/x86/math-emu/shr_Xsig.S
··· 45 45 popl %ebx 46 46 popl %esi 47 47 leave 48 - ret 48 + RET 49 49 50 50 L_more_than_31: 51 51 cmpl $64,%ecx ··· 61 61 movl $0,8(%esi) 62 62 popl %esi 63 63 leave 64 - ret 64 + RET 65 65 66 66 L_more_than_63: 67 67 cmpl $96,%ecx ··· 76 76 movl %edx,8(%esi) 77 77 popl %esi 78 78 leave 79 - ret 79 + RET 80 80 81 81 L_more_than_95: 82 82 xorl %eax,%eax ··· 85 85 movl %eax,8(%esi) 86 86 popl %esi 87 87 leave 88 - ret 88 + RET 89 89 SYM_FUNC_END(shr_Xsig)
+8 -8
arch/x86/math-emu/wm_shrx.S
··· 55 55 popl %ebx 56 56 popl %esi 57 57 leave 58 - ret 58 + RET 59 59 60 60 L_more_than_31: 61 61 cmpl $64,%ecx ··· 70 70 movl $0,4(%esi) 71 71 popl %esi 72 72 leave 73 - ret 73 + RET 74 74 75 75 L_more_than_63: 76 76 cmpl $96,%ecx ··· 84 84 movl %edx,4(%esi) 85 85 popl %esi 86 86 leave 87 - ret 87 + RET 88 88 89 89 L_more_than_95: 90 90 xorl %eax,%eax ··· 92 92 movl %eax,4(%esi) 93 93 popl %esi 94 94 leave 95 - ret 95 + RET 96 96 SYM_FUNC_END(FPU_shrx) 97 97 98 98 ··· 146 146 popl %ebx 147 147 popl %esi 148 148 leave 149 - ret 149 + RET 150 150 151 151 /* Shift by [0..31] bits */ 152 152 Ls_less_than_32: ··· 163 163 popl %ebx 164 164 popl %esi 165 165 leave 166 - ret 166 + RET 167 167 168 168 /* Shift by [64..95] bits */ 169 169 Ls_more_than_63: ··· 189 189 popl %ebx 190 190 popl %esi 191 191 leave 192 - ret 192 + RET 193 193 194 194 Ls_more_than_95: 195 195 /* Shift by [96..inf) bits */ ··· 203 203 popl %ebx 204 204 popl %esi 205 205 leave 206 - ret 206 + RET 207 207 SYM_FUNC_END(FPU_shrxs)
+2 -2
arch/x86/mm/mem_encrypt_boot.S
··· 65 65 movq %rbp, %rsp /* Restore original stack pointer */ 66 66 pop %rbp 67 67 68 - ret 68 + RET 69 69 SYM_FUNC_END(sme_encrypt_execute) 70 70 71 71 SYM_FUNC_START(__enc_copy) ··· 151 151 pop %r12 152 152 pop %r15 153 153 154 - ret 154 + RET 155 155 .L__enc_copy_end: 156 156 SYM_FUNC_END(__enc_copy)
+1 -1
arch/x86/platform/efi/efi_stub_32.S
··· 56 56 57 57 movl 16(%esp), %ebx 58 58 leave 59 - ret 59 + RET 60 60 SYM_FUNC_END(efi_call_svam)
+1 -1
arch/x86/platform/efi/efi_stub_64.S
··· 23 23 mov %rsi, %rcx 24 24 CALL_NOSPEC rdi 25 25 leave 26 - ret 26 + RET 27 27 SYM_FUNC_END(__efi_call)
+1 -1
arch/x86/platform/efi/efi_thunk_64.S
··· 63 63 1: movq 24(%rsp), %rsp 64 64 pop %rbx 65 65 pop %rbp 66 - retq 66 + RET 67 67 68 68 .code32 69 69 2: pushl $__KERNEL_CS
+3 -3
arch/x86/platform/olpc/xo1-wakeup.S
··· 77 77 pushfl 78 78 popl saved_context_eflags 79 79 80 - ret 80 + RET 81 81 82 82 restore_registers: 83 83 movl saved_context_ebp, %ebp ··· 88 88 pushl saved_context_eflags 89 89 popfl 90 90 91 - ret 91 + RET 92 92 93 93 SYM_CODE_START(do_olpc_suspend_lowlevel) 94 94 call save_processor_state ··· 109 109 110 110 call restore_registers 111 111 call restore_processor_state 112 - ret 112 + RET 113 113 SYM_CODE_END(do_olpc_suspend_lowlevel) 114 114 115 115 .data
+2 -2
arch/x86/power/hibernate_asm_32.S
··· 32 32 FRAME_BEGIN 33 33 call swsusp_save 34 34 FRAME_END 35 - ret 35 + RET 36 36 SYM_FUNC_END(swsusp_arch_suspend) 37 37 38 38 SYM_CODE_START(restore_image) ··· 108 108 /* tell the hibernation core that we've just restored the memory */ 109 109 movl %eax, in_suspend 110 110 111 - ret 111 + RET 112 112 SYM_FUNC_END(restore_registers)
+2 -2
arch/x86/power/hibernate_asm_64.S
··· 66 66 /* tell the hibernation core that we've just restored the memory */ 67 67 movq %rax, in_suspend(%rip) 68 68 69 - ret 69 + RET 70 70 SYM_FUNC_END(restore_registers) 71 71 72 72 SYM_FUNC_START(swsusp_arch_suspend) ··· 96 96 FRAME_BEGIN 97 97 call swsusp_save 98 98 FRAME_END 99 - ret 99 + RET 100 100 SYM_FUNC_END(swsusp_arch_suspend) 101 101 102 102 SYM_FUNC_START(restore_image)
+2 -2
arch/x86/um/checksum_32.S
··· 110 110 7: 111 111 popl %ebx 112 112 popl %esi 113 - ret 113 + RET 114 114 115 115 #else 116 116 ··· 208 208 80: 209 209 popl %ebx 210 210 popl %esi 211 - ret 211 + RET 212 212 213 213 #endif 214 214 EXPORT_SYMBOL(csum_partial)
+1 -1
arch/x86/um/setjmp_32.S
··· 34 34 movl %esi,12(%edx) 35 35 movl %edi,16(%edx) 36 36 movl %ecx,20(%edx) # Return address 37 - ret 37 + RET 38 38 39 39 .size kernel_setjmp,.-kernel_setjmp 40 40
+1 -1
arch/x86/um/setjmp_64.S
··· 33 33 movq %r14,40(%rdi) 34 34 movq %r15,48(%rdi) 35 35 movq %rsi,56(%rdi) # Return address 36 - ret 36 + RET 37 37 38 38 .size kernel_setjmp,.-kernel_setjmp 39 39
+6 -6
arch/x86/xen/xen-asm.S
··· 29 29 */ 30 30 SYM_FUNC_START(xen_irq_disable_direct) 31 31 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 32 - ret 32 + RET 33 33 SYM_FUNC_END(xen_irq_disable_direct) 34 34 35 35 /* ··· 58 58 pop %rcx 59 59 pop %rax 60 60 FRAME_END 61 - ret 61 + RET 62 62 SYM_FUNC_END(check_events) 63 63 64 64 /* ··· 84 84 call check_events 85 85 1: 86 86 FRAME_END 87 - ret 87 + RET 88 88 SYM_FUNC_END(xen_irq_enable_direct) 89 89 90 90 /* ··· 100 100 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 101 101 setz %ah 102 102 addb %ah, %ah 103 - ret 103 + RET 104 104 SYM_FUNC_END(xen_save_fl_direct) 105 105 106 106 SYM_FUNC_START(xen_read_cr2) ··· 108 108 _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX 109 109 _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX 110 110 FRAME_END 111 - ret 111 + RET 112 112 SYM_FUNC_END(xen_read_cr2); 113 113 114 114 SYM_FUNC_START(xen_read_cr2_direct) 115 115 FRAME_BEGIN 116 116 _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX 117 117 FRAME_END 118 - ret 118 + RET 119 119 SYM_FUNC_END(xen_read_cr2_direct); 120 120 .popsection 121 121
+1 -1
arch/x86/xen/xen-head.S
··· 26 26 .rept (PAGE_SIZE / 32) 27 27 UNWIND_HINT_FUNC 28 28 .skip 31, 0x90 29 - ret 29 + RET 30 30 .endr 31 31 32 32 #define HYPERCALL(n) \