Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lib/crypto: Switch ARM and arm64 to 'ksimd' scoped guard API

Before modifying the prototypes of kernel_neon_begin() and
kernel_neon_end() to accommodate kernel mode FP/SIMD state buffers
allocated on the stack, move arm64 to the new 'ksimd' scoped guard API,
which encapsulates the calls to those functions.

For symmetry, do the same for 32-bit ARM too.

Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

+43 -60
+4 -7
lib/crypto/arm/chacha.h
··· 12 12 13 13 #include <asm/cputype.h> 14 14 #include <asm/hwcap.h> 15 - #include <asm/neon.h> 16 15 #include <asm/simd.h> 17 16 18 17 asmlinkage void chacha_block_xor_neon(const struct chacha_state *state, ··· 67 68 if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) { 68 69 hchacha_block_arm(state, out, nrounds); 69 70 } else { 70 - kernel_neon_begin(); 71 - hchacha_block_neon(state, out, nrounds); 72 - kernel_neon_end(); 71 + scoped_ksimd() 72 + hchacha_block_neon(state, out, nrounds); 73 73 } 74 74 } 75 75 ··· 85 87 do { 86 88 unsigned int todo = min_t(unsigned int, bytes, SZ_4K); 87 89 88 - kernel_neon_begin(); 89 - chacha_doneon(state, dst, src, todo, nrounds); 90 - kernel_neon_end(); 90 + scoped_ksimd() 91 + chacha_doneon(state, dst, src, todo, nrounds); 91 92 92 93 bytes -= todo; 93 94 src += todo;
+2 -3
lib/crypto/arm/curve25519.h
··· 25 25 const u8 point[CURVE25519_KEY_SIZE]) 26 26 { 27 27 if (static_branch_likely(&have_neon) && crypto_simd_usable()) { 28 - kernel_neon_begin(); 29 - curve25519_neon(out, scalar, point); 30 - kernel_neon_end(); 28 + scoped_ksimd() 29 + curve25519_neon(out, scalar, point); 31 30 } else { 32 31 curve25519_generic(out, scalar, point); 33 32 }
+2 -4
lib/crypto/arm/poly1305.h
··· 6 6 */ 7 7 8 8 #include <asm/hwcap.h> 9 - #include <asm/neon.h> 10 9 #include <asm/simd.h> 11 10 #include <linux/cpufeature.h> 12 11 #include <linux/jump_label.h> ··· 31 32 do { 32 33 unsigned int todo = min_t(unsigned int, len, SZ_4K); 33 34 34 - kernel_neon_begin(); 35 - poly1305_blocks_neon(state, src, todo, padbit); 36 - kernel_neon_end(); 35 + scoped_ksimd() 36 + poly1305_blocks_neon(state, src, todo, padbit); 37 37 38 38 len -= todo; 39 39 src += todo;
+6 -7
lib/crypto/arm/sha1.h
··· 4 4 * 5 5 * Copyright 2025 Google LLC 6 6 */ 7 - #include <asm/neon.h> 8 7 #include <asm/simd.h> 9 8 10 9 static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); ··· 21 22 { 22 23 if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && 23 24 static_branch_likely(&have_neon) && likely(may_use_simd())) { 24 - kernel_neon_begin(); 25 - if (static_branch_likely(&have_ce)) 26 - sha1_ce_transform(state, data, nblocks); 27 - else 28 - sha1_transform_neon(state, data, nblocks); 29 - kernel_neon_end(); 25 + scoped_ksimd() { 26 + if (static_branch_likely(&have_ce)) 27 + sha1_ce_transform(state, data, nblocks); 28 + else 29 + sha1_transform_neon(state, data, nblocks); 30 + } 30 31 } else { 31 32 sha1_block_data_order(state, data, nblocks); 32 33 }
+6 -6
lib/crypto/arm/sha256.h
··· 22 22 { 23 23 if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && 24 24 static_branch_likely(&have_neon) && likely(may_use_simd())) { 25 - kernel_neon_begin(); 26 - if (static_branch_likely(&have_ce)) 27 - sha256_ce_transform(state, data, nblocks); 28 - else 29 - sha256_block_data_order_neon(state, data, nblocks); 30 - kernel_neon_end(); 25 + scoped_ksimd() { 26 + if (static_branch_likely(&have_ce)) 27 + sha256_ce_transform(state, data, nblocks); 28 + else 29 + sha256_block_data_order_neon(state, data, nblocks); 30 + } 31 31 } else { 32 32 sha256_block_data_order(state, data, nblocks); 33 33 }
+2 -3
lib/crypto/arm/sha512.h
··· 19 19 { 20 20 if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && 21 21 static_branch_likely(&have_neon) && likely(may_use_simd())) { 22 - kernel_neon_begin(); 23 - sha512_block_data_order_neon(state, data, nblocks); 24 - kernel_neon_end(); 22 + scoped_ksimd() 23 + sha512_block_data_order_neon(state, data, nblocks); 25 24 } else { 26 25 sha512_block_data_order(state, data, nblocks); 27 26 }
+4 -7
lib/crypto/arm64/chacha.h
··· 23 23 #include <linux/kernel.h> 24 24 25 25 #include <asm/hwcap.h> 26 - #include <asm/neon.h> 27 26 #include <asm/simd.h> 28 27 29 28 asmlinkage void chacha_block_xor_neon(const struct chacha_state *state, ··· 64 65 if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) { 65 66 hchacha_block_generic(state, out, nrounds); 66 67 } else { 67 - kernel_neon_begin(); 68 - hchacha_block_neon(state, out, nrounds); 69 - kernel_neon_end(); 68 + scoped_ksimd() 69 + hchacha_block_neon(state, out, nrounds); 70 70 } 71 71 } 72 72 ··· 79 81 do { 80 82 unsigned int todo = min_t(unsigned int, bytes, SZ_4K); 81 83 82 - kernel_neon_begin(); 83 - chacha_doneon(state, dst, src, todo, nrounds); 84 - kernel_neon_end(); 84 + scoped_ksimd() 85 + chacha_doneon(state, dst, src, todo, nrounds); 85 86 86 87 bytes -= todo; 87 88 src += todo;
+2 -4
lib/crypto/arm64/poly1305.h
··· 6 6 */ 7 7 8 8 #include <asm/hwcap.h> 9 - #include <asm/neon.h> 10 9 #include <asm/simd.h> 11 10 #include <linux/cpufeature.h> 12 11 #include <linux/jump_label.h> ··· 30 31 do { 31 32 unsigned int todo = min_t(unsigned int, len, SZ_4K); 32 33 33 - kernel_neon_begin(); 34 - poly1305_blocks_neon(state, src, todo, padbit); 35 - kernel_neon_end(); 34 + scoped_ksimd() 35 + poly1305_blocks_neon(state, src, todo, padbit); 36 36 37 37 len -= todo; 38 38 src += todo;
+3 -4
lib/crypto/arm64/sha1.h
··· 4 4 * 5 5 * Copyright 2025 Google LLC 6 6 */ 7 - #include <asm/neon.h> 8 7 #include <asm/simd.h> 9 8 #include <linux/cpufeature.h> 10 9 ··· 19 20 do { 20 21 size_t rem; 21 22 22 - kernel_neon_begin(); 23 - rem = __sha1_ce_transform(state, data, nblocks); 24 - kernel_neon_end(); 23 + scoped_ksimd() 24 + rem = __sha1_ce_transform(state, data, nblocks); 25 + 25 26 data += (nblocks - rem) * SHA1_BLOCK_SIZE; 26 27 nblocks = rem; 27 28 } while (nblocks);
+8 -11
lib/crypto/arm64/sha256.h
··· 4 4 * 5 5 * Copyright 2025 Google LLC 6 6 */ 7 - #include <asm/neon.h> 8 7 #include <asm/simd.h> 9 8 #include <linux/cpufeature.h> 10 9 ··· 26 27 do { 27 28 size_t rem; 28 29 29 - kernel_neon_begin(); 30 - rem = __sha256_ce_transform(state, 31 - data, nblocks); 32 - kernel_neon_end(); 30 + scoped_ksimd() 31 + rem = __sha256_ce_transform(state, data, 32 + nblocks); 33 + 33 34 data += (nblocks - rem) * SHA256_BLOCK_SIZE; 34 35 nblocks = rem; 35 36 } while (nblocks); 36 37 } else { 37 - kernel_neon_begin(); 38 - sha256_block_neon(state, data, nblocks); 39 - kernel_neon_end(); 38 + scoped_ksimd() 39 + sha256_block_neon(state, data, nblocks); 40 40 } 41 41 } else { 42 42 sha256_block_data_order(state, data, nblocks); ··· 64 66 if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && 65 67 static_branch_likely(&have_ce) && len >= SHA256_BLOCK_SIZE && 66 68 len <= 65536 && likely(may_use_simd())) { 67 - kernel_neon_begin(); 68 - sha256_ce_finup2x(ctx, data1, data2, len, out1, out2); 69 - kernel_neon_end(); 69 + scoped_ksimd() 70 + sha256_ce_finup2x(ctx, data1, data2, len, out1, out2); 70 71 kmsan_unpoison_memory(out1, SHA256_DIGEST_SIZE); 71 72 kmsan_unpoison_memory(out2, SHA256_DIGEST_SIZE); 72 73 return true;
+4 -4
lib/crypto/arm64/sha512.h
··· 4 4 * 5 5 * Copyright 2025 Google LLC 6 6 */ 7 - #include <asm/neon.h> 7 + 8 8 #include <asm/simd.h> 9 9 #include <linux/cpufeature.h> 10 10 ··· 24 24 do { 25 25 size_t rem; 26 26 27 - kernel_neon_begin(); 28 - rem = __sha512_ce_transform(state, data, nblocks); 29 - kernel_neon_end(); 27 + scoped_ksimd() 28 + rem = __sha512_ce_transform(state, data, nblocks); 29 + 30 30 data += (nblocks - rem) * SHA512_BLOCK_SIZE; 31 31 nblocks = rem; 32 32 } while (nblocks);