Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: lib: accelerate crc32_be

It makes no sense to leave crc32_be using the generic code while we
only accelerate the little-endian ops.

Even though the big-endian form doesn't fit as smoothly into the arm64,
we can speed it up and avoid hitting the D cache.

Tested on Cortex-A53. Without acceleration:

crc32: CRC_LE_BITS = 64, CRC_BE BITS = 64
crc32: self tests passed, processed 225944 bytes in 192240 nsec
crc32c: CRC_LE_BITS = 64
crc32c: self tests passed, processed 112972 bytes in 21360 nsec

With acceleration:

crc32: CRC_LE_BITS = 64, CRC_BE BITS = 64
crc32: self tests passed, processed 225944 bytes in 53480 nsec
crc32c: CRC_LE_BITS = 64
crc32c: self tests passed, processed 112972 bytes in 21480 nsec

Signed-off-by: Kevin Bracey <kevin@bracey.fi>
Tested-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Kevin Bracey and committed by
Herbert Xu
5f2f5eaa 1b3dce8b

+73 -14
+73 -14
arch/arm64/lib/crc32.S
··· 11 11 12 12 .arch armv8-a+crc 13 13 14 - .macro __crc32, c 14 + .macro byteorder, reg, be 15 + .if \be 16 + CPU_LE( rev \reg, \reg ) 17 + .else 18 + CPU_BE( rev \reg, \reg ) 19 + .endif 20 + .endm 21 + 22 + .macro byteorder16, reg, be 23 + .if \be 24 + CPU_LE( rev16 \reg, \reg ) 25 + .else 26 + CPU_BE( rev16 \reg, \reg ) 27 + .endif 28 + .endm 29 + 30 + .macro bitorder, reg, be 31 + .if \be 32 + rbit \reg, \reg 33 + .endif 34 + .endm 35 + 36 + .macro bitorder16, reg, be 37 + .if \be 38 + rbit \reg, \reg 39 + lsr \reg, \reg, #16 40 + .endif 41 + .endm 42 + 43 + .macro bitorder8, reg, be 44 + .if \be 45 + rbit \reg, \reg 46 + lsr \reg, \reg, #24 47 + .endif 48 + .endm 49 + 50 + .macro __crc32, c, be=0 51 + bitorder w0, \be 15 52 cmp x2, #16 16 53 b.lt 8f // less than 16 bytes 17 54 ··· 61 24 add x8, x8, x1 62 25 add x1, x1, x7 63 26 ldp x5, x6, [x8] 64 - CPU_BE( rev x3, x3 ) 65 - CPU_BE( rev x4, x4 ) 66 - CPU_BE( rev x5, x5 ) 67 - CPU_BE( rev x6, x6 ) 27 + byteorder x3, \be 28 + byteorder x4, \be 29 + byteorder x5, \be 30 + byteorder x6, \be 31 + bitorder x3, \be 32 + bitorder x4, \be 33 + bitorder x5, \be 34 + bitorder x6, \be 68 35 69 36 tst x7, #8 70 37 crc32\c\()x w8, w0, x3 ··· 96 55 32: ldp x3, x4, [x1], #32 97 56 sub x2, x2, #32 98 57 ldp x5, x6, [x1, #-16] 99 - CPU_BE( rev x3, x3 ) 100 - CPU_BE( rev x4, x4 ) 101 - CPU_BE( rev x5, x5 ) 102 - CPU_BE( rev x6, x6 ) 58 + byteorder x3, \be 59 + byteorder x4, \be 60 + byteorder x5, \be 61 + byteorder x6, \be 62 + bitorder x3, \be 63 + bitorder x4, \be 64 + bitorder x5, \be 65 + bitorder x6, \be 103 66 crc32\c\()x w0, w0, x3 104 67 crc32\c\()x w0, w0, x4 105 68 crc32\c\()x w0, w0, x5 106 69 crc32\c\()x w0, w0, x6 107 70 cbnz x2, 32b 108 - 0: ret 71 + 0: bitorder w0, \be 72 + ret 109 73 110 74 8: tbz x2, #3, 4f 111 75 ldr x3, [x1], #8 112 - CPU_BE( rev x3, x3 ) 76 + byteorder x3, \be 77 + bitorder x3, \be 113 78 crc32\c\()x w0, w0, x3 114 79 4: tbz x2, #2, 2f 115 80 ldr w3, [x1], #4 116 - CPU_BE( rev w3, w3 ) 81 + byteorder w3, \be 82 + bitorder w3, \be 117 83 crc32\c\()w w0, w0, w3 118 84 2: tbz x2, #1, 1f 119 85 ldrh w3, [x1], #2 120 - CPU_BE( rev16 w3, w3 ) 86 + byteorder16 w3, \be 87 + bitorder16 w3, \be 121 88 crc32\c\()h w0, w0, w3 122 89 1: tbz x2, #0, 0f 123 90 ldrb w3, [x1] 91 + bitorder8 w3, \be 124 92 crc32\c\()b w0, w0, w3 125 - 0: ret 93 + 0: bitorder w0, \be 94 + ret 126 95 .endm 127 96 128 97 .align 5 ··· 150 99 alternative_else_nop_endif 151 100 __crc32 c 152 101 SYM_FUNC_END(__crc32c_le) 102 + 103 + .align 5 104 + SYM_FUNC_START(crc32_be) 105 + alternative_if_not ARM64_HAS_CRC32 106 + b crc32_be_base 107 + alternative_else_nop_endif 108 + __crc32 be=1 109 + SYM_FUNC_END(crc32_be)