Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lib/crypto: arm64/polyval: Migrate optimized code into library

Migrate the arm64 implementation of POLYVAL into lib/crypto/, wiring it
up to the POLYVAL library interface. This makes the POLYVAL library be
properly optimized on arm64.

This drops the arm64 optimizations of polyval in the crypto_shash API.
That's fine, since polyval will be removed from crypto_shash entirely
since it is unneeded there. But even if it comes back, the crypto_shash
API could just be implemented on top of the library API, as usual.

Adjust the names and prototypes of the assembly functions to align more
closely with the rest of the library code.

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20251109234726.638437-5-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>

+110 -191
-10
arch/arm64/crypto/Kconfig
··· 47 47 Architecture: arm64 using: 48 48 - ARMv8.2 Crypto Extensions 49 49 50 - config CRYPTO_POLYVAL_ARM64_CE 51 - tristate "Hash functions: POLYVAL (ARMv8 Crypto Extensions)" 52 - depends on KERNEL_MODE_NEON 53 - select CRYPTO_POLYVAL 54 - help 55 - POLYVAL hash function for HCTR2 56 - 57 - Architecture: arm64 using: 58 - - ARMv8 Crypto Extensions 59 - 60 50 config CRYPTO_AES_ARM64 61 51 tristate "Ciphers: AES, modes: ECB, CBC, CTR, CTS, XCTR, XTS" 62 52 select CRYPTO_AES
-3
arch/arm64/crypto/Makefile
··· 29 29 obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o 30 30 ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o 31 31 32 - obj-$(CONFIG_CRYPTO_POLYVAL_ARM64_CE) += polyval-ce.o 33 - polyval-ce-y := polyval-ce-glue.o polyval-ce-core.o 34 - 35 32 obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o 36 33 aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o 37 34
+18 -20
arch/arm64/crypto/polyval-ce-core.S lib/crypto/arm64/polyval-ce-core.S
··· 27 27 #include <linux/linkage.h> 28 28 #define STRIDE_BLOCKS 8 29 29 30 - KEY_POWERS .req x0 31 - MSG .req x1 32 - BLOCKS_LEFT .req x2 33 - ACCUMULATOR .req x3 30 + ACCUMULATOR .req x0 31 + KEY_POWERS .req x1 32 + MSG .req x2 33 + BLOCKS_LEFT .req x3 34 34 KEY_START .req x10 35 35 EXTRA_BYTES .req x11 36 36 TMP .req x13 ··· 300 300 .endm 301 301 302 302 /* 303 - * Perform montgomery multiplication in GF(2^128) and store result in op1. 303 + * Computes a = a * b * x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1. 304 304 * 305 - * Computes op1*op2*x^{-128} mod x^128 + x^127 + x^126 + x^121 + 1 306 - * If op1, op2 are in montgomery form, this computes the montgomery 307 - * form of op1*op2. 308 - * 309 - * void pmull_polyval_mul(u8 *op1, const u8 *op2); 305 + * void polyval_mul_pmull(struct polyval_elem *a, 306 + * const struct polyval_elem *b); 310 307 */ 311 - SYM_FUNC_START(pmull_polyval_mul) 308 + SYM_FUNC_START(polyval_mul_pmull) 312 309 adr TMP, .Lgstar 313 310 ld1 {GSTAR.2d}, [TMP] 314 311 ld1 {v0.16b}, [x0] ··· 315 318 montgomery_reduction SUM 316 319 st1 {SUM.16b}, [x0] 317 320 ret 318 - SYM_FUNC_END(pmull_polyval_mul) 321 + SYM_FUNC_END(polyval_mul_pmull) 319 322 320 323 /* 321 324 * Perform polynomial evaluation as specified by POLYVAL. This computes: 322 325 * h^n * accumulator + h^n * m_0 + ... + h^1 * m_{n-1} 323 326 * where n=nblocks, h is the hash key, and m_i are the message blocks. 324 327 * 325 - * x0 - pointer to precomputed key powers h^8 ... h^1 326 - * x1 - pointer to message blocks 327 - * x2 - number of blocks to hash 328 - * x3 - pointer to accumulator 328 + * x0 - pointer to accumulator 329 + * x1 - pointer to precomputed key powers h^8 ... h^1 330 + * x2 - pointer to message blocks 331 + * x3 - number of blocks to hash 329 332 * 330 - * void pmull_polyval_update(const struct polyval_ctx *ctx, const u8 *in, 331 - * size_t nblocks, u8 *accumulator); 333 + * void polyval_blocks_pmull(struct polyval_elem *acc, 334 + * const struct polyval_key *key, 335 + * const u8 *data, size_t nblocks); 332 336 */ 333 - SYM_FUNC_START(pmull_polyval_update) 337 + SYM_FUNC_START(polyval_blocks_pmull) 334 338 adr TMP, .Lgstar 335 339 mov KEY_START, KEY_POWERS 336 340 ld1 {GSTAR.2d}, [TMP] ··· 356 358 .LskipPartial: 357 359 st1 {SUM.16b}, [ACCUMULATOR] 358 360 ret 359 - SYM_FUNC_END(pmull_polyval_update) 361 + SYM_FUNC_END(polyval_blocks_pmull)
-158
arch/arm64/crypto/polyval-ce-glue.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Glue code for POLYVAL using ARMv8 Crypto Extensions 4 - * 5 - * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> 6 - * Copyright (c) 2009 Intel Corp. 7 - * Author: Huang Ying <ying.huang@intel.com> 8 - * Copyright 2021 Google LLC 9 - */ 10 - 11 - /* 12 - * Glue code based on ghash-clmulni-intel_glue.c. 13 - * 14 - * This implementation of POLYVAL uses montgomery multiplication accelerated by 15 - * ARMv8 Crypto Extensions instructions to implement the finite field operations. 16 - */ 17 - 18 - #include <asm/neon.h> 19 - #include <crypto/internal/hash.h> 20 - #include <crypto/polyval.h> 21 - #include <crypto/utils.h> 22 - #include <linux/cpufeature.h> 23 - #include <linux/errno.h> 24 - #include <linux/kernel.h> 25 - #include <linux/module.h> 26 - #include <linux/string.h> 27 - 28 - #define NUM_KEY_POWERS 8 29 - 30 - struct polyval_tfm_ctx { 31 - /* 32 - * These powers must be in the order h^8, ..., h^1. 33 - */ 34 - u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE]; 35 - }; 36 - 37 - struct polyval_desc_ctx { 38 - u8 buffer[POLYVAL_BLOCK_SIZE]; 39 - }; 40 - 41 - asmlinkage void pmull_polyval_update(const struct polyval_tfm_ctx *keys, 42 - const u8 *in, size_t nblocks, u8 *accumulator); 43 - asmlinkage void pmull_polyval_mul(u8 *op1, const u8 *op2); 44 - 45 - static void internal_polyval_update(const struct polyval_tfm_ctx *keys, 46 - const u8 *in, size_t nblocks, u8 *accumulator) 47 - { 48 - kernel_neon_begin(); 49 - pmull_polyval_update(keys, in, nblocks, accumulator); 50 - kernel_neon_end(); 51 - } 52 - 53 - static void internal_polyval_mul(u8 *op1, const u8 *op2) 54 - { 55 - kernel_neon_begin(); 56 - pmull_polyval_mul(op1, op2); 57 - kernel_neon_end(); 58 - } 59 - 60 - static int polyval_arm64_setkey(struct crypto_shash *tfm, 61 - const u8 *key, unsigned int keylen) 62 - { 63 - struct polyval_tfm_ctx *tctx = crypto_shash_ctx(tfm); 64 - int i; 65 - 66 - if (keylen != POLYVAL_BLOCK_SIZE) 67 - return -EINVAL; 68 - 69 - memcpy(tctx->key_powers[NUM_KEY_POWERS-1], key, POLYVAL_BLOCK_SIZE); 70 - 71 - for (i = NUM_KEY_POWERS-2; i >= 0; i--) { 72 - memcpy(tctx->key_powers[i], key, POLYVAL_BLOCK_SIZE); 73 - internal_polyval_mul(tctx->key_powers[i], 74 - tctx->key_powers[i+1]); 75 - } 76 - 77 - return 0; 78 - } 79 - 80 - static int polyval_arm64_init(struct shash_desc *desc) 81 - { 82 - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); 83 - 84 - memset(dctx, 0, sizeof(*dctx)); 85 - 86 - return 0; 87 - } 88 - 89 - static int polyval_arm64_update(struct shash_desc *desc, 90 - const u8 *src, unsigned int srclen) 91 - { 92 - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); 93 - const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 94 - unsigned int nblocks; 95 - 96 - do { 97 - /* allow rescheduling every 4K bytes */ 98 - nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE; 99 - internal_polyval_update(tctx, src, nblocks, dctx->buffer); 100 - srclen -= nblocks * POLYVAL_BLOCK_SIZE; 101 - src += nblocks * POLYVAL_BLOCK_SIZE; 102 - } while (srclen >= POLYVAL_BLOCK_SIZE); 103 - 104 - return srclen; 105 - } 106 - 107 - static int polyval_arm64_finup(struct shash_desc *desc, const u8 *src, 108 - unsigned int len, u8 *dst) 109 - { 110 - struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); 111 - const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 112 - 113 - if (len) { 114 - crypto_xor(dctx->buffer, src, len); 115 - internal_polyval_mul(dctx->buffer, 116 - tctx->key_powers[NUM_KEY_POWERS-1]); 117 - } 118 - 119 - memcpy(dst, dctx->buffer, POLYVAL_BLOCK_SIZE); 120 - 121 - return 0; 122 - } 123 - 124 - static struct shash_alg polyval_alg = { 125 - .digestsize = POLYVAL_DIGEST_SIZE, 126 - .init = polyval_arm64_init, 127 - .update = polyval_arm64_update, 128 - .finup = polyval_arm64_finup, 129 - .setkey = polyval_arm64_setkey, 130 - .descsize = sizeof(struct polyval_desc_ctx), 131 - .base = { 132 - .cra_name = "polyval", 133 - .cra_driver_name = "polyval-ce", 134 - .cra_priority = 200, 135 - .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, 136 - .cra_blocksize = POLYVAL_BLOCK_SIZE, 137 - .cra_ctxsize = sizeof(struct polyval_tfm_ctx), 138 - .cra_module = THIS_MODULE, 139 - }, 140 - }; 141 - 142 - static int __init polyval_ce_mod_init(void) 143 - { 144 - return crypto_register_shash(&polyval_alg); 145 - } 146 - 147 - static void __exit polyval_ce_mod_exit(void) 148 - { 149 - crypto_unregister_shash(&polyval_alg); 150 - } 151 - 152 - module_cpu_feature_match(PMULL, polyval_ce_mod_init) 153 - module_exit(polyval_ce_mod_exit); 154 - 155 - MODULE_LICENSE("GPL"); 156 - MODULE_DESCRIPTION("POLYVAL hash function accelerated by ARMv8 Crypto Extensions"); 157 - MODULE_ALIAS_CRYPTO("polyval"); 158 - MODULE_ALIAS_CRYPTO("polyval-ce");
+8
include/crypto/polyval.h
··· 39 39 * This may contain just the raw key H, or it may contain precomputed key 40 40 * powers, depending on the platform's POLYVAL implementation. Use 41 41 * polyval_preparekey() to initialize this. 42 + * 43 + * By H^i we mean H^(i-1) * H * x^-128, with base case H^1 = H. I.e. the 44 + * exponentiation repeats the POLYVAL dot operation, with its "extra" x^-128. 42 45 */ 43 46 struct polyval_key { 44 47 #ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH 48 + #ifdef CONFIG_ARM64 49 + /** @h_powers: Powers of the hash key H^8 through H^1 */ 50 + struct polyval_elem h_powers[8]; 51 + #else 45 52 #error "Unhandled arch" 53 + #endif 46 54 #else /* CONFIG_CRYPTO_LIB_POLYVAL_ARCH */ 47 55 /** @h: The hash key H */ 48 56 struct polyval_elem h;
+1
lib/crypto/Kconfig
··· 144 144 config CRYPTO_LIB_POLYVAL_ARCH 145 145 bool 146 146 depends on CRYPTO_LIB_POLYVAL && !UML 147 + default y if ARM64 && KERNEL_MODE_NEON 147 148 148 149 config CRYPTO_LIB_CHACHA20POLY1305 149 150 tristate
+1
lib/crypto/Makefile
··· 202 202 libpolyval-y := polyval.o 203 203 ifeq ($(CONFIG_CRYPTO_LIB_POLYVAL_ARCH),y) 204 204 CFLAGS_polyval.o += -I$(src)/$(SRCARCH) 205 + libpolyval-$(CONFIG_ARM64) += arm64/polyval-ce-core.o 205 206 endif 206 207 207 208 ################################################################################
+82
lib/crypto/arm64/polyval.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * POLYVAL library functions, arm64 optimized 4 + * 5 + * Copyright 2025 Google LLC 6 + */ 7 + #include <asm/neon.h> 8 + #include <asm/simd.h> 9 + #include <linux/cpufeature.h> 10 + 11 + #define NUM_H_POWERS 8 12 + 13 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_pmull); 14 + 15 + asmlinkage void polyval_mul_pmull(struct polyval_elem *a, 16 + const struct polyval_elem *b); 17 + asmlinkage void polyval_blocks_pmull(struct polyval_elem *acc, 18 + const struct polyval_key *key, 19 + const u8 *data, size_t nblocks); 20 + 21 + static void polyval_preparekey_arch(struct polyval_key *key, 22 + const u8 raw_key[POLYVAL_BLOCK_SIZE]) 23 + { 24 + static_assert(ARRAY_SIZE(key->h_powers) == NUM_H_POWERS); 25 + memcpy(&key->h_powers[NUM_H_POWERS - 1], raw_key, POLYVAL_BLOCK_SIZE); 26 + if (static_branch_likely(&have_pmull) && may_use_simd()) { 27 + kernel_neon_begin(); 28 + for (int i = NUM_H_POWERS - 2; i >= 0; i--) { 29 + key->h_powers[i] = key->h_powers[i + 1]; 30 + polyval_mul_pmull(&key->h_powers[i], 31 + &key->h_powers[NUM_H_POWERS - 1]); 32 + } 33 + kernel_neon_end(); 34 + } else { 35 + for (int i = NUM_H_POWERS - 2; i >= 0; i--) { 36 + key->h_powers[i] = key->h_powers[i + 1]; 37 + polyval_mul_generic(&key->h_powers[i], 38 + &key->h_powers[NUM_H_POWERS - 1]); 39 + } 40 + } 41 + } 42 + 43 + static void polyval_mul_arch(struct polyval_elem *acc, 44 + const struct polyval_key *key) 45 + { 46 + if (static_branch_likely(&have_pmull) && may_use_simd()) { 47 + kernel_neon_begin(); 48 + polyval_mul_pmull(acc, &key->h_powers[NUM_H_POWERS - 1]); 49 + kernel_neon_end(); 50 + } else { 51 + polyval_mul_generic(acc, &key->h_powers[NUM_H_POWERS - 1]); 52 + } 53 + } 54 + 55 + static void polyval_blocks_arch(struct polyval_elem *acc, 56 + const struct polyval_key *key, 57 + const u8 *data, size_t nblocks) 58 + { 59 + if (static_branch_likely(&have_pmull) && may_use_simd()) { 60 + do { 61 + /* Allow rescheduling every 4 KiB. */ 62 + size_t n = min_t(size_t, nblocks, 63 + 4096 / POLYVAL_BLOCK_SIZE); 64 + 65 + kernel_neon_begin(); 66 + polyval_blocks_pmull(acc, key, data, n); 67 + kernel_neon_end(); 68 + data += n * POLYVAL_BLOCK_SIZE; 69 + nblocks -= n; 70 + } while (nblocks); 71 + } else { 72 + polyval_blocks_generic(acc, &key->h_powers[NUM_H_POWERS - 1], 73 + data, nblocks); 74 + } 75 + } 76 + 77 + #define polyval_mod_init_arch polyval_mod_init_arch 78 + static void polyval_mod_init_arch(void) 79 + { 80 + if (cpu_have_named_feature(PMULL)) 81 + static_branch_enable(&have_pmull); 82 + }