Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: x86 - remove glue helper module

All dependencies on the x86 glue helper module have been replaced by
local instantiations of the new ECB/CBC preprocessor helper macros, so
the glue helper module can be retired.

Acked-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
64ca771c 165f3573

-243
-2
arch/x86/crypto/Makefile
··· 4 4 5 5 OBJECT_FILES_NON_STANDARD := y 6 6 7 - obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o 8 - 9 7 obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o 10 8 twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o 11 9 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
-155
arch/x86/crypto/glue_helper.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * Shared glue code for 128bit block ciphers 4 - * 5 - * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> 6 - * 7 - * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: 8 - * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 9 - */ 10 - 11 - #include <linux/module.h> 12 - #include <crypto/b128ops.h> 13 - #include <crypto/internal/skcipher.h> 14 - #include <crypto/scatterwalk.h> 15 - #include <asm/crypto/glue_helper.h> 16 - 17 - int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, 18 - struct skcipher_request *req) 19 - { 20 - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 21 - const unsigned int bsize = 128 / 8; 22 - struct skcipher_walk walk; 23 - bool fpu_enabled = false; 24 - unsigned int nbytes; 25 - int err; 26 - 27 - err = skcipher_walk_virt(&walk, req, false); 28 - 29 - while ((nbytes = walk.nbytes)) { 30 - const u8 *src = walk.src.virt.addr; 31 - u8 *dst = walk.dst.virt.addr; 32 - unsigned int func_bytes; 33 - unsigned int i; 34 - 35 - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, 36 - &walk, fpu_enabled, nbytes); 37 - for (i = 0; i < gctx->num_funcs; i++) { 38 - func_bytes = bsize * gctx->funcs[i].num_blocks; 39 - 40 - if (nbytes < func_bytes) 41 - continue; 42 - 43 - /* Process multi-block batch */ 44 - do { 45 - gctx->funcs[i].fn_u.ecb(ctx, dst, src); 46 - src += func_bytes; 47 - dst += func_bytes; 48 - nbytes -= func_bytes; 49 - } while (nbytes >= func_bytes); 50 - 51 - if (nbytes < bsize) 52 - break; 53 - } 54 - err = skcipher_walk_done(&walk, nbytes); 55 - } 56 - 57 - glue_fpu_end(fpu_enabled); 58 - return err; 59 - } 60 - EXPORT_SYMBOL_GPL(glue_ecb_req_128bit); 61 - 62 - int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn, 63 - struct skcipher_request *req) 64 - { 65 - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 66 - const unsigned int bsize = 128 / 8; 67 - struct skcipher_walk walk; 68 - unsigned int nbytes; 69 - int err; 70 - 71 - err = skcipher_walk_virt(&walk, req, false); 72 - 73 - while ((nbytes = walk.nbytes)) { 74 - const u128 *src = (u128 *)walk.src.virt.addr; 75 - u128 *dst = (u128 *)walk.dst.virt.addr; 76 - u128 *iv = (u128 *)walk.iv; 77 - 78 - do { 79 - u128_xor(dst, src, iv); 80 - fn(ctx, (u8 *)dst, (u8 *)dst); 81 - iv = dst; 82 - src++; 83 - dst++; 84 - nbytes -= bsize; 85 - } while (nbytes >= bsize); 86 - 87 - *(u128 *)walk.iv = *iv; 88 - err = skcipher_walk_done(&walk, nbytes); 89 - } 90 - return err; 91 - } 92 - EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit); 93 - 94 - int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, 95 - struct skcipher_request *req) 96 - { 97 - void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 98 - const unsigned int bsize = 128 / 8; 99 - struct skcipher_walk walk; 100 - bool fpu_enabled = false; 101 - unsigned int nbytes; 102 - int err; 103 - 104 - err = skcipher_walk_virt(&walk, req, false); 105 - 106 - while ((nbytes = walk.nbytes)) { 107 - const u128 *src = walk.src.virt.addr; 108 - u128 *dst = walk.dst.virt.addr; 109 - unsigned int func_bytes, num_blocks; 110 - unsigned int i; 111 - u128 last_iv; 112 - 113 - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, 114 - &walk, fpu_enabled, nbytes); 115 - /* Start of the last block. */ 116 - src += nbytes / bsize - 1; 117 - dst += nbytes / bsize - 1; 118 - 119 - last_iv = *src; 120 - 121 - for (i = 0; i < gctx->num_funcs; i++) { 122 - num_blocks = gctx->funcs[i].num_blocks; 123 - func_bytes = bsize * num_blocks; 124 - 125 - if (nbytes < func_bytes) 126 - continue; 127 - 128 - /* Process multi-block batch */ 129 - do { 130 - src -= num_blocks - 1; 131 - dst -= num_blocks - 1; 132 - 133 - gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, 134 - (const u8 *)src); 135 - 136 - nbytes -= func_bytes; 137 - if (nbytes < bsize) 138 - goto done; 139 - 140 - u128_xor(dst, dst, --src); 141 - dst--; 142 - } while (nbytes >= func_bytes); 143 - } 144 - done: 145 - u128_xor(dst, dst, (u128 *)walk.iv); 146 - *(u128 *)walk.iv = last_iv; 147 - err = skcipher_walk_done(&walk, nbytes); 148 - } 149 - 150 - glue_fpu_end(fpu_enabled); 151 - return err; 152 - } 153 - EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit); 154 - 155 - MODULE_LICENSE("GPL");
-74
arch/x86/include/asm/crypto/glue_helper.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Shared glue code for 128bit block ciphers 4 - */ 5 - 6 - #ifndef _CRYPTO_GLUE_HELPER_H 7 - #define _CRYPTO_GLUE_HELPER_H 8 - 9 - #include <crypto/internal/skcipher.h> 10 - #include <linux/kernel.h> 11 - #include <asm/fpu/api.h> 12 - 13 - typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src); 14 - typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src); 15 - 16 - struct common_glue_func_entry { 17 - unsigned int num_blocks; /* number of blocks that @fn will process */ 18 - union { 19 - common_glue_func_t ecb; 20 - common_glue_cbc_func_t cbc; 21 - } fn_u; 22 - }; 23 - 24 - struct common_glue_ctx { 25 - unsigned int num_funcs; 26 - int fpu_blocks_limit; /* -1 means fpu not needed at all */ 27 - 28 - /* 29 - * First funcs entry must have largest num_blocks and last funcs entry 30 - * must have num_blocks == 1! 31 - */ 32 - struct common_glue_func_entry funcs[]; 33 - }; 34 - 35 - static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit, 36 - struct skcipher_walk *walk, 37 - bool fpu_enabled, unsigned int nbytes) 38 - { 39 - if (likely(fpu_blocks_limit < 0)) 40 - return false; 41 - 42 - if (fpu_enabled) 43 - return true; 44 - 45 - /* 46 - * Vector-registers are only used when chunk to be processed is large 47 - * enough, so do not enable FPU until it is necessary. 48 - */ 49 - if (nbytes < bsize * (unsigned int)fpu_blocks_limit) 50 - return false; 51 - 52 - /* prevent sleeping if FPU is in use */ 53 - skcipher_walk_atomise(walk); 54 - 55 - kernel_fpu_begin(); 56 - return true; 57 - } 58 - 59 - static inline void glue_fpu_end(bool fpu_enabled) 60 - { 61 - if (fpu_enabled) 62 - kernel_fpu_end(); 63 - } 64 - 65 - extern int glue_ecb_req_128bit(const struct common_glue_ctx *gctx, 66 - struct skcipher_request *req); 67 - 68 - extern int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn, 69 - struct skcipher_request *req); 70 - 71 - extern int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx, 72 - struct skcipher_request *req); 73 - 74 - #endif /* _CRYPTO_GLUE_HELPER_H */
-5
crypto/Kconfig
··· 210 210 tristate 211 211 select CRYPTO_CRYPTD 212 212 213 - config CRYPTO_GLUE_HELPER_X86 214 - tristate 215 - depends on X86 216 - select CRYPTO_SKCIPHER 217 - 218 213 config CRYPTO_ENGINE 219 214 tristate 220 215
-6
crypto/skcipher.c
··· 491 491 } 492 492 EXPORT_SYMBOL_GPL(skcipher_walk_virt); 493 493 494 - void skcipher_walk_atomise(struct skcipher_walk *walk) 495 - { 496 - walk->flags &= ~SKCIPHER_WALK_SLEEP; 497 - } 498 - EXPORT_SYMBOL_GPL(skcipher_walk_atomise); 499 - 500 494 int skcipher_walk_async(struct skcipher_walk *walk, 501 495 struct skcipher_request *req) 502 496 {
-1
include/crypto/internal/skcipher.h
··· 133 133 int skcipher_walk_virt(struct skcipher_walk *walk, 134 134 struct skcipher_request *req, 135 135 bool atomic); 136 - void skcipher_walk_atomise(struct skcipher_walk *walk); 137 136 int skcipher_walk_async(struct skcipher_walk *walk, 138 137 struct skcipher_request *req); 139 138 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,