Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: x86 - Regularize glue function prototypes

The crypto glue performed function prototype casting via macros to make
indirect calls to assembly routines. Instead of performing casts at the
call sites (which trips Control Flow Integrity prototype checking), switch
each prototype to a common standard set of arguments which allows the
removal of the existing macros. In order to keep pointer math unchanged,
internal casting between u128 pointers and u8 pointers is added.

Co-developed-by: João Moreira <joao.moreira@intel.com>
Signed-off-by: João Moreira <joao.moreira@intel.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Kees Cook and committed by
Herbert Xu
9c1e8836 7278fa25

+370 -409
+4 -4
arch/x86/crypto/aesni-intel_asm.S
··· 1942 1942 SYM_FUNC_END(aesni_set_key) 1943 1943 1944 1944 /* 1945 - * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) 1945 + * void aesni_enc(const void *ctx, u8 *dst, const u8 *src) 1946 1946 */ 1947 1947 SYM_FUNC_START(aesni_enc) 1948 1948 FRAME_BEGIN ··· 2131 2131 SYM_FUNC_END(_aesni_enc4) 2132 2132 2133 2133 /* 2134 - * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) 2134 + * void aesni_dec (const void *ctx, u8 *dst, const u8 *src) 2135 2135 */ 2136 2136 SYM_FUNC_START(aesni_dec) 2137 2137 FRAME_BEGIN ··· 2716 2716 pxor CTR, IV; 2717 2717 2718 2718 /* 2719 - * void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, 2720 - * bool enc, u8 *iv) 2719 + * void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *dst, 2720 + * const u8 *src, bool enc, le128 *iv) 2721 2721 */ 2722 2722 SYM_FUNC_START(aesni_xts_crypt8) 2723 2723 FRAME_BEGIN
+18 -27
arch/x86/crypto/aesni-intel_glue.c
··· 83 83 84 84 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, 85 85 unsigned int key_len); 86 - asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, 87 - const u8 *in); 88 - asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, 89 - const u8 *in); 86 + asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in); 87 + asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in); 90 88 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, 91 89 const u8 *in, unsigned int len); 92 90 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, ··· 104 106 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, 105 107 const u8 *in, unsigned int len, u8 *iv); 106 108 107 - asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out, 108 - const u8 *in, bool enc, u8 *iv); 109 + asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out, 110 + const u8 *in, bool enc, le128 *iv); 109 111 110 112 /* asmlinkage void aesni_gcm_enc() 111 113 * void *ctx, AES Key schedule. Starts on a 16 byte boundary. ··· 548 550 } 549 551 550 552 551 - static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) 553 + static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 552 554 { 553 - aesni_enc(ctx, out, in); 555 + glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc); 554 556 } 555 557 556 - static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) 558 + static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 557 559 { 558 - glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); 560 + glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec); 559 561 } 560 562 561 - static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) 563 + static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 562 564 { 563 - glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); 565 + aesni_xts_crypt8(ctx, dst, src, true, iv); 564 566 } 565 567 566 - static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) 568 + static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 567 569 { 568 - aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); 569 - } 570 - 571 - static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) 572 - { 573 - aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); 570 + aesni_xts_crypt8(ctx, dst, src, false, iv); 574 571 } 575 572 576 573 static const struct common_glue_ctx aesni_enc_xts = { ··· 574 581 575 582 .funcs = { { 576 583 .num_blocks = 8, 577 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) } 584 + .fn_u = { .xts = aesni_xts_enc8 } 578 585 }, { 579 586 .num_blocks = 1, 580 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) } 587 + .fn_u = { .xts = aesni_xts_enc } 581 588 } } 582 589 }; 583 590 ··· 587 594 588 595 .funcs = { { 589 596 .num_blocks = 8, 590 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) } 597 + .fn_u = { .xts = aesni_xts_dec8 } 591 598 }, { 592 599 .num_blocks = 1, 593 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) } 600 + .fn_u = { .xts = aesni_xts_dec } 594 601 } } 595 602 }; 596 603 ··· 599 606 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 600 607 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 601 608 602 - return glue_xts_req_128bit(&aesni_enc_xts, req, 603 - XTS_TWEAK_CAST(aesni_xts_tweak), 609 + return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc, 604 610 aes_ctx(ctx->raw_tweak_ctx), 605 611 aes_ctx(ctx->raw_crypt_ctx), 606 612 false); ··· 610 618 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 611 619 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 612 620 613 - return glue_xts_req_128bit(&aesni_dec_xts, req, 614 - XTS_TWEAK_CAST(aesni_xts_tweak), 621 + return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc, 615 622 aes_ctx(ctx->raw_tweak_ctx), 616 623 aes_ctx(ctx->raw_crypt_ctx), 617 624 true);
+34 -40
arch/x86/crypto/camellia_aesni_avx2_glue.c
··· 19 19 #define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32 20 20 21 21 /* 32-way AVX2/AES-NI parallel cipher functions */ 22 - asmlinkage void camellia_ecb_enc_32way(struct camellia_ctx *ctx, u8 *dst, 23 - const u8 *src); 24 - asmlinkage void camellia_ecb_dec_32way(struct camellia_ctx *ctx, u8 *dst, 25 - const u8 *src); 22 + asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src); 23 + asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src); 26 24 27 - asmlinkage void camellia_cbc_dec_32way(struct camellia_ctx *ctx, u8 *dst, 28 - const u8 *src); 29 - asmlinkage void camellia_ctr_32way(struct camellia_ctx *ctx, u8 *dst, 30 - const u8 *src, le128 *iv); 25 + asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src); 26 + asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src, 27 + le128 *iv); 31 28 32 - asmlinkage void camellia_xts_enc_32way(struct camellia_ctx *ctx, u8 *dst, 33 - const u8 *src, le128 *iv); 34 - asmlinkage void camellia_xts_dec_32way(struct camellia_ctx *ctx, u8 *dst, 35 - const u8 *src, le128 *iv); 29 + asmlinkage void camellia_xts_enc_32way(const void *ctx, u8 *dst, const u8 *src, 30 + le128 *iv); 31 + asmlinkage void camellia_xts_dec_32way(const void *ctx, u8 *dst, const u8 *src, 32 + le128 *iv); 36 33 37 34 static const struct common_glue_ctx camellia_enc = { 38 35 .num_funcs = 4, ··· 37 40 38 41 .funcs = { { 39 42 .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, 40 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_32way) } 43 + .fn_u = { .ecb = camellia_ecb_enc_32way } 41 44 }, { 42 45 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 43 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) } 46 + .fn_u = { .ecb = camellia_ecb_enc_16way } 44 47 }, { 45 48 .num_blocks = 2, 46 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } 49 + .fn_u = { .ecb = camellia_enc_blk_2way } 47 50 }, { 48 51 .num_blocks = 1, 49 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } 52 + .fn_u = { .ecb = camellia_enc_blk } 50 53 } } 51 54 }; 52 55 ··· 56 59 57 60 .funcs = { { 58 61 .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, 59 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_32way) } 62 + .fn_u = { .ctr = camellia_ctr_32way } 60 63 }, { 61 64 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 62 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) } 65 + .fn_u = { .ctr = camellia_ctr_16way } 63 66 }, { 64 67 .num_blocks = 2, 65 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } 68 + .fn_u = { .ctr = camellia_crypt_ctr_2way } 66 69 }, { 67 70 .num_blocks = 1, 68 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } 71 + .fn_u = { .ctr = camellia_crypt_ctr } 69 72 } } 70 73 }; 71 74 ··· 75 78 76 79 .funcs = { { 77 80 .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, 78 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_32way) } 81 + .fn_u = { .xts = camellia_xts_enc_32way } 79 82 }, { 80 83 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 81 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) } 84 + .fn_u = { .xts = camellia_xts_enc_16way } 82 85 }, { 83 86 .num_blocks = 1, 84 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) } 87 + .fn_u = { .xts = camellia_xts_enc } 85 88 } } 86 89 }; 87 90 ··· 91 94 92 95 .funcs = { { 93 96 .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, 94 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_32way) } 97 + .fn_u = { .ecb = camellia_ecb_dec_32way } 95 98 }, { 96 99 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 97 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) } 100 + .fn_u = { .ecb = camellia_ecb_dec_16way } 98 101 }, { 99 102 .num_blocks = 2, 100 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } 103 + .fn_u = { .ecb = camellia_dec_blk_2way } 101 104 }, { 102 105 .num_blocks = 1, 103 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } 106 + .fn_u = { .ecb = camellia_dec_blk } 104 107 } } 105 108 }; 106 109 ··· 110 113 111 114 .funcs = { { 112 115 .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, 113 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_32way) } 116 + .fn_u = { .cbc = camellia_cbc_dec_32way } 114 117 }, { 115 118 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 116 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) } 119 + .fn_u = { .cbc = camellia_cbc_dec_16way } 117 120 }, { 118 121 .num_blocks = 2, 119 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } 122 + .fn_u = { .cbc = camellia_decrypt_cbc_2way } 120 123 }, { 121 124 .num_blocks = 1, 122 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } 125 + .fn_u = { .cbc = camellia_dec_blk } 123 126 } } 124 127 }; 125 128 ··· 129 132 130 133 .funcs = { { 131 134 .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS, 132 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_32way) } 135 + .fn_u = { .xts = camellia_xts_dec_32way } 133 136 }, { 134 137 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 135 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) } 138 + .fn_u = { .xts = camellia_xts_dec_16way } 136 139 }, { 137 140 .num_blocks = 1, 138 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) } 141 + .fn_u = { .xts = camellia_xts_dec } 139 142 } } 140 143 }; 141 144 ··· 158 161 159 162 static int cbc_encrypt(struct skcipher_request *req) 160 163 { 161 - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), 162 - req); 164 + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); 163 165 } 164 166 165 167 static int cbc_decrypt(struct skcipher_request *req) ··· 176 180 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 177 181 struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 178 182 179 - return glue_xts_req_128bit(&camellia_enc_xts, req, 180 - XTS_TWEAK_CAST(camellia_enc_blk), 183 + return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, 181 184 &ctx->tweak_ctx, &ctx->crypt_ctx, false); 182 185 } 183 186 ··· 185 190 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 186 191 struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 187 192 188 - return glue_xts_req_128bit(&camellia_dec_xts, req, 189 - XTS_TWEAK_CAST(camellia_enc_blk), 193 + return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, 190 194 &ctx->tweak_ctx, &ctx->crypt_ctx, true); 191 195 } 192 196
+32 -40
arch/x86/crypto/camellia_aesni_avx_glue.c
··· 18 18 #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16 19 19 20 20 /* 16-way parallel cipher functions (avx/aes-ni) */ 21 - asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst, 22 - const u8 *src); 21 + asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); 23 22 EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way); 24 23 25 - asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst, 26 - const u8 *src); 24 + asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); 27 25 EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way); 28 26 29 - asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst, 30 - const u8 *src); 27 + asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); 31 28 EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way); 32 29 33 - asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst, 34 - const u8 *src, le128 *iv); 30 + asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, 31 + le128 *iv); 35 32 EXPORT_SYMBOL_GPL(camellia_ctr_16way); 36 33 37 - asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst, 38 - const u8 *src, le128 *iv); 34 + asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, 35 + le128 *iv); 39 36 EXPORT_SYMBOL_GPL(camellia_xts_enc_16way); 40 37 41 - asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst, 42 - const u8 *src, le128 *iv); 38 + asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, 39 + le128 *iv); 43 40 EXPORT_SYMBOL_GPL(camellia_xts_dec_16way); 44 41 45 - void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) 42 + void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 46 43 { 47 - glue_xts_crypt_128bit_one(ctx, dst, src, iv, 48 - GLUE_FUNC_CAST(camellia_enc_blk)); 44 + glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_enc_blk); 49 45 } 50 46 EXPORT_SYMBOL_GPL(camellia_xts_enc); 51 47 52 - void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) 48 + void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 53 49 { 54 - glue_xts_crypt_128bit_one(ctx, dst, src, iv, 55 - GLUE_FUNC_CAST(camellia_dec_blk)); 50 + glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_dec_blk); 56 51 } 57 52 EXPORT_SYMBOL_GPL(camellia_xts_dec); 58 53 ··· 57 62 58 63 .funcs = { { 59 64 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 60 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) } 65 + .fn_u = { .ecb = camellia_ecb_enc_16way } 61 66 }, { 62 67 .num_blocks = 2, 63 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } 68 + .fn_u = { .ecb = camellia_enc_blk_2way } 64 69 }, { 65 70 .num_blocks = 1, 66 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } 71 + .fn_u = { .ecb = camellia_enc_blk } 67 72 } } 68 73 }; 69 74 ··· 73 78 74 79 .funcs = { { 75 80 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 76 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) } 81 + .fn_u = { .ctr = camellia_ctr_16way } 77 82 }, { 78 83 .num_blocks = 2, 79 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } 84 + .fn_u = { .ctr = camellia_crypt_ctr_2way } 80 85 }, { 81 86 .num_blocks = 1, 82 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } 87 + .fn_u = { .ctr = camellia_crypt_ctr } 83 88 } } 84 89 }; 85 90 ··· 89 94 90 95 .funcs = { { 91 96 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 92 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) } 97 + .fn_u = { .xts = camellia_xts_enc_16way } 93 98 }, { 94 99 .num_blocks = 1, 95 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) } 100 + .fn_u = { .xts = camellia_xts_enc } 96 101 } } 97 102 }; 98 103 ··· 102 107 103 108 .funcs = { { 104 109 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 105 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) } 110 + .fn_u = { .ecb = camellia_ecb_dec_16way } 106 111 }, { 107 112 .num_blocks = 2, 108 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } 113 + .fn_u = { .ecb = camellia_dec_blk_2way } 109 114 }, { 110 115 .num_blocks = 1, 111 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } 116 + .fn_u = { .ecb = camellia_dec_blk } 112 117 } } 113 118 }; 114 119 ··· 118 123 119 124 .funcs = { { 120 125 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 121 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) } 126 + .fn_u = { .cbc = camellia_cbc_dec_16way } 122 127 }, { 123 128 .num_blocks = 2, 124 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } 129 + .fn_u = { .cbc = camellia_decrypt_cbc_2way } 125 130 }, { 126 131 .num_blocks = 1, 127 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } 132 + .fn_u = { .cbc = camellia_dec_blk } 128 133 } } 129 134 }; 130 135 ··· 134 139 135 140 .funcs = { { 136 141 .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS, 137 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) } 142 + .fn_u = { .xts = camellia_xts_dec_16way } 138 143 }, { 139 144 .num_blocks = 1, 140 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) } 145 + .fn_u = { .xts = camellia_xts_dec } 141 146 } } 142 147 }; 143 148 ··· 160 165 161 166 static int cbc_encrypt(struct skcipher_request *req) 162 167 { 163 - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), 164 - req); 168 + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); 165 169 } 166 170 167 171 static int cbc_decrypt(struct skcipher_request *req) ··· 200 206 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 201 207 struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 202 208 203 - return glue_xts_req_128bit(&camellia_enc_xts, req, 204 - XTS_TWEAK_CAST(camellia_enc_blk), 209 + return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk, 205 210 &ctx->tweak_ctx, &ctx->crypt_ctx, false); 206 211 } 207 212 ··· 209 216 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 210 217 struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 211 218 212 - return glue_xts_req_128bit(&camellia_dec_xts, req, 213 - XTS_TWEAK_CAST(camellia_enc_blk), 219 + return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk, 214 220 &ctx->tweak_ctx, &ctx->crypt_ctx, true); 215 221 } 216 222
+24 -21
arch/x86/crypto/camellia_glue.c
··· 18 18 #include <asm/crypto/glue_helper.h> 19 19 20 20 /* regular block cipher functions */ 21 - asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, 22 - const u8 *src, bool xor); 21 + asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, 22 + bool xor); 23 23 EXPORT_SYMBOL_GPL(__camellia_enc_blk); 24 - asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, 25 - const u8 *src); 24 + asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src); 26 25 EXPORT_SYMBOL_GPL(camellia_dec_blk); 27 26 28 27 /* 2-way parallel cipher functions */ 29 - asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, 30 - const u8 *src, bool xor); 28 + asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src, 29 + bool xor); 31 30 EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way); 32 - asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, 33 - const u8 *src); 31 + asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src); 34 32 EXPORT_SYMBOL_GPL(camellia_dec_blk_2way); 35 33 36 34 static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ··· 1265 1267 return camellia_setkey(&tfm->base, key, key_len); 1266 1268 } 1267 1269 1268 - void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src) 1270 + void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s) 1269 1271 { 1272 + u128 *dst = (u128 *)d; 1273 + const u128 *src = (const u128 *)s; 1270 1274 u128 iv = *src; 1271 1275 1272 1276 camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src); ··· 1277 1277 } 1278 1278 EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way); 1279 1279 1280 - void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) 1280 + void camellia_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) 1281 1281 { 1282 1282 be128 ctrblk; 1283 + u128 *dst = (u128 *)d; 1284 + const u128 *src = (const u128 *)s; 1283 1285 1284 1286 if (dst != src) 1285 1287 *dst = *src; ··· 1293 1291 } 1294 1292 EXPORT_SYMBOL_GPL(camellia_crypt_ctr); 1295 1293 1296 - void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, le128 *iv) 1294 + void camellia_crypt_ctr_2way(const void *ctx, u8 *d, const u8 *s, le128 *iv) 1297 1295 { 1298 1296 be128 ctrblks[2]; 1297 + u128 *dst = (u128 *)d; 1298 + const u128 *src = (const u128 *)s; 1299 1299 1300 1300 if (dst != src) { 1301 1301 dst[0] = src[0]; ··· 1319 1315 1320 1316 .funcs = { { 1321 1317 .num_blocks = 2, 1322 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) } 1318 + .fn_u = { .ecb = camellia_enc_blk_2way } 1323 1319 }, { 1324 1320 .num_blocks = 1, 1325 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) } 1321 + .fn_u = { .ecb = camellia_enc_blk } 1326 1322 } } 1327 1323 }; 1328 1324 ··· 1332 1328 1333 1329 .funcs = { { 1334 1330 .num_blocks = 2, 1335 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) } 1331 + .fn_u = { .ctr = camellia_crypt_ctr_2way } 1336 1332 }, { 1337 1333 .num_blocks = 1, 1338 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) } 1334 + .fn_u = { .ctr = camellia_crypt_ctr } 1339 1335 } } 1340 1336 }; 1341 1337 ··· 1345 1341 1346 1342 .funcs = { { 1347 1343 .num_blocks = 2, 1348 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) } 1344 + .fn_u = { .ecb = camellia_dec_blk_2way } 1349 1345 }, { 1350 1346 .num_blocks = 1, 1351 - .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) } 1347 + .fn_u = { .ecb = camellia_dec_blk } 1352 1348 } } 1353 1349 }; 1354 1350 ··· 1358 1354 1359 1355 .funcs = { { 1360 1356 .num_blocks = 2, 1361 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) } 1357 + .fn_u = { .cbc = camellia_decrypt_cbc_2way } 1362 1358 }, { 1363 1359 .num_blocks = 1, 1364 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) } 1360 + .fn_u = { .cbc = camellia_dec_blk } 1365 1361 } } 1366 1362 }; 1367 1363 ··· 1377 1373 1378 1374 static int cbc_encrypt(struct skcipher_request *req) 1379 1375 { 1380 - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk), 1381 - req); 1376 + return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req); 1382 1377 } 1383 1378 1384 1379 static int cbc_decrypt(struct skcipher_request *req)
+30 -36
arch/x86/crypto/cast6_avx_glue.c
··· 20 20 21 21 #define CAST6_PARALLEL_BLOCKS 8 22 22 23 - asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst, 24 - const u8 *src); 25 - asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst, 26 - const u8 *src); 23 + asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); 24 + asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); 27 25 28 - asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst, 29 - const u8 *src); 30 - asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src, 26 + asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); 27 + asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src, 31 28 le128 *iv); 32 29 33 - asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst, 34 - const u8 *src, le128 *iv); 35 - asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst, 36 - const u8 *src, le128 *iv); 30 + asmlinkage void cast6_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, 31 + le128 *iv); 32 + asmlinkage void cast6_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, 33 + le128 *iv); 37 34 38 35 static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, 39 36 const u8 *key, unsigned int keylen) ··· 38 41 return cast6_setkey(&tfm->base, key, keylen); 39 42 } 40 43 41 - static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) 44 + static void cast6_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 42 45 { 43 - glue_xts_crypt_128bit_one(ctx, dst, src, iv, 44 - GLUE_FUNC_CAST(__cast6_encrypt)); 46 + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_encrypt); 45 47 } 46 48 47 - static void cast6_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) 49 + static void cast6_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 48 50 { 49 - glue_xts_crypt_128bit_one(ctx, dst, src, iv, 50 - GLUE_FUNC_CAST(__cast6_decrypt)); 51 + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_decrypt); 51 52 } 52 53 53 - static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) 54 + static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) 54 55 { 55 56 be128 ctrblk; 57 + u128 *dst = (u128 *)d; 58 + const u128 *src = (const u128 *)s; 56 59 57 60 le128_to_be128(&ctrblk, iv); 58 61 le128_inc(iv); ··· 67 70 68 71 .funcs = { { 69 72 .num_blocks = CAST6_PARALLEL_BLOCKS, 70 - .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_enc_8way) } 73 + .fn_u = { .ecb = cast6_ecb_enc_8way } 71 74 }, { 72 75 .num_blocks = 1, 73 - .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_encrypt) } 76 + .fn_u = { .ecb = __cast6_encrypt } 74 77 } } 75 78 }; 76 79 ··· 80 83 81 84 .funcs = { { 82 85 .num_blocks = CAST6_PARALLEL_BLOCKS, 83 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_ctr_8way) } 86 + .fn_u = { .ctr = cast6_ctr_8way } 84 87 }, { 85 88 .num_blocks = 1, 86 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr) } 89 + .fn_u = { .ctr = cast6_crypt_ctr } 87 90 } } 88 91 }; 89 92 ··· 93 96 94 97 .funcs = { { 95 98 .num_blocks = CAST6_PARALLEL_BLOCKS, 96 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc_8way) } 99 + .fn_u = { .xts = cast6_xts_enc_8way } 97 100 }, { 98 101 .num_blocks = 1, 99 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc) } 102 + .fn_u = { .xts = cast6_xts_enc } 100 103 } } 101 104 }; 102 105 ··· 106 109 107 110 .funcs = { { 108 111 .num_blocks = CAST6_PARALLEL_BLOCKS, 109 - .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_dec_8way) } 112 + .fn_u = { .ecb = cast6_ecb_dec_8way } 110 113 }, { 111 114 .num_blocks = 1, 112 - .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_decrypt) } 115 + .fn_u = { .ecb = __cast6_decrypt } 113 116 } } 114 117 }; 115 118 ··· 119 122 120 123 .funcs = { { 121 124 .num_blocks = CAST6_PARALLEL_BLOCKS, 122 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_cbc_dec_8way) } 125 + .fn_u = { .cbc = cast6_cbc_dec_8way } 123 126 }, { 124 127 .num_blocks = 1, 125 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__cast6_decrypt) } 128 + .fn_u = { .cbc = __cast6_decrypt } 126 129 } } 127 130 }; 128 131 ··· 132 135 133 136 .funcs = { { 134 137 .num_blocks = CAST6_PARALLEL_BLOCKS, 135 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec_8way) } 138 + .fn_u = { .xts = cast6_xts_dec_8way } 136 139 }, { 137 140 .num_blocks = 1, 138 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec) } 141 + .fn_u = { .xts = cast6_xts_dec } 139 142 } } 140 143 }; 141 144 ··· 151 154 152 155 static int cbc_encrypt(struct skcipher_request *req) 153 156 { 154 - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt), 155 - req); 157 + return glue_cbc_encrypt_req_128bit(__cast6_encrypt, req); 156 158 } 157 159 158 160 static int cbc_decrypt(struct skcipher_request *req) ··· 195 199 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 196 200 struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 197 201 198 - return glue_xts_req_128bit(&cast6_enc_xts, req, 199 - XTS_TWEAK_CAST(__cast6_encrypt), 202 + return glue_xts_req_128bit(&cast6_enc_xts, req, __cast6_encrypt, 200 203 &ctx->tweak_ctx, &ctx->crypt_ctx, false); 201 204 } 202 205 ··· 204 209 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 205 210 struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 206 211 207 - return glue_xts_req_128bit(&cast6_dec_xts, req, 208 - XTS_TWEAK_CAST(__cast6_encrypt), 212 + return glue_xts_req_128bit(&cast6_dec_xts, req, __cast6_encrypt, 209 213 &ctx->tweak_ctx, &ctx->crypt_ctx, true); 210 214 } 211 215
+14 -9
arch/x86/crypto/glue_helper.c
··· 134 134 src -= num_blocks - 1; 135 135 dst -= num_blocks - 1; 136 136 137 - gctx->funcs[i].fn_u.cbc(ctx, dst, src); 137 + gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst, 138 + (const u8 *)src); 138 139 139 140 nbytes -= func_bytes; 140 141 if (nbytes < bsize) ··· 189 188 190 189 /* Process multi-block batch */ 191 190 do { 192 - gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk); 191 + gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst, 192 + (const u8 *)src, 193 + &ctrblk); 193 194 src += num_blocks; 194 195 dst += num_blocks; 195 196 nbytes -= func_bytes; ··· 213 210 214 211 be128_to_le128(&ctrblk, (be128 *)walk.iv); 215 212 memcpy(&tmp, walk.src.virt.addr, nbytes); 216 - gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp, 213 + gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp, 214 + (const u8 *)&tmp, 217 215 &ctrblk); 218 216 memcpy(walk.dst.virt.addr, &tmp, nbytes); 219 217 le128_to_be128((be128 *)walk.iv, &ctrblk); ··· 244 240 245 241 if (nbytes >= func_bytes) { 246 242 do { 247 - gctx->funcs[i].fn_u.xts(ctx, dst, src, 243 + gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst, 244 + (const u8 *)src, 248 245 walk->iv); 249 246 250 247 src += num_blocks; ··· 359 354 } 360 355 EXPORT_SYMBOL_GPL(glue_xts_req_128bit); 361 356 362 - void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv, 363 - common_glue_func_t fn) 357 + void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src, 358 + le128 *iv, common_glue_func_t fn) 364 359 { 365 360 le128 ivblk = *iv; 366 361 ··· 368 363 gf128mul_x_ble(iv, &ivblk); 369 364 370 365 /* CC <- T xor C */ 371 - u128_xor(dst, src, (u128 *)&ivblk); 366 + u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk); 372 367 373 368 /* PP <- D(Key2,CC) */ 374 - fn(ctx, (u8 *)dst, (u8 *)dst); 369 + fn(ctx, dst, dst); 375 370 376 371 /* P <- T xor PP */ 377 - u128_xor(dst, dst, (u128 *)&ivblk); 372 + u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk); 378 373 } 379 374 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one); 380 375
+31 -34
arch/x86/crypto/serpent_avx2_glue.c
··· 19 19 #define SERPENT_AVX2_PARALLEL_BLOCKS 16 20 20 21 21 /* 16-way AVX2 parallel cipher functions */ 22 - asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst, 23 - const u8 *src); 24 - asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst, 25 - const u8 *src); 26 - asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src); 22 + asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); 23 + asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); 24 + asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); 27 25 28 - asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src, 26 + asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src, 29 27 le128 *iv); 30 - asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst, 31 - const u8 *src, le128 *iv); 32 - asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst, 33 - const u8 *src, le128 *iv); 28 + asmlinkage void serpent_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, 29 + le128 *iv); 30 + asmlinkage void serpent_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, 31 + le128 *iv); 34 32 35 33 static int serpent_setkey_skcipher(struct crypto_skcipher *tfm, 36 34 const u8 *key, unsigned int keylen) ··· 42 44 43 45 .funcs = { { 44 46 .num_blocks = 16, 45 - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_16way) } 47 + .fn_u = { .ecb = serpent_ecb_enc_16way } 46 48 }, { 47 49 .num_blocks = 8, 48 - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) } 50 + .fn_u = { .ecb = serpent_ecb_enc_8way_avx } 49 51 }, { 50 52 .num_blocks = 1, 51 - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } 53 + .fn_u = { .ecb = __serpent_encrypt } 52 54 } } 53 55 }; 54 56 ··· 58 60 59 61 .funcs = { { 60 62 .num_blocks = 16, 61 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_16way) } 63 + .fn_u = { .ctr = serpent_ctr_16way } 62 64 }, { 63 65 .num_blocks = 8, 64 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) } 66 + .fn_u = { .ctr = serpent_ctr_8way_avx } 65 67 }, { 66 68 .num_blocks = 1, 67 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) } 69 + .fn_u = { .ctr = __serpent_crypt_ctr } 68 70 } } 69 71 }; 70 72 ··· 74 76 75 77 .funcs = { { 76 78 .num_blocks = 16, 77 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_16way) } 79 + .fn_u = { .xts = serpent_xts_enc_16way } 78 80 }, { 79 81 .num_blocks = 8, 80 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) } 82 + .fn_u = { .xts = serpent_xts_enc_8way_avx } 81 83 }, { 82 84 .num_blocks = 1, 83 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) } 85 + .fn_u = { .xts = serpent_xts_enc } 84 86 } } 85 87 }; 86 88 ··· 90 92 91 93 .funcs = { { 92 94 .num_blocks = 16, 93 - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_16way) } 95 + .fn_u = { .ecb = serpent_ecb_dec_16way } 94 96 }, { 95 97 .num_blocks = 8, 96 - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) } 98 + .fn_u = { .ecb = serpent_ecb_dec_8way_avx } 97 99 }, { 98 100 .num_blocks = 1, 99 - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } 101 + .fn_u = { .ecb = __serpent_decrypt } 100 102 } } 101 103 }; 102 104 ··· 106 108 107 109 .funcs = { { 108 110 .num_blocks = 16, 109 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_16way) } 111 + .fn_u = { .cbc = serpent_cbc_dec_16way } 110 112 }, { 111 113 .num_blocks = 8, 112 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) } 114 + .fn_u = { .cbc = serpent_cbc_dec_8way_avx } 113 115 }, { 114 116 .num_blocks = 1, 115 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } 117 + .fn_u = { .cbc = __serpent_decrypt } 116 118 } } 117 119 }; 118 120 ··· 122 124 123 125 .funcs = { { 124 126 .num_blocks = 16, 125 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_16way) } 127 + .fn_u = { .xts = serpent_xts_dec_16way } 126 128 }, { 127 129 .num_blocks = 8, 128 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) } 130 + .fn_u = { .xts = serpent_xts_dec_8way_avx } 129 131 }, { 130 132 .num_blocks = 1, 131 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) } 133 + .fn_u = { .xts = serpent_xts_dec } 132 134 } } 133 135 }; 134 136 ··· 144 146 145 147 static int cbc_encrypt(struct skcipher_request *req) 146 148 { 147 - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), 148 - req); 149 + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); 149 150 } 150 151 151 152 static int cbc_decrypt(struct skcipher_request *req) ··· 163 166 struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 164 167 165 168 return glue_xts_req_128bit(&serpent_enc_xts, req, 166 - XTS_TWEAK_CAST(__serpent_encrypt), 167 - &ctx->tweak_ctx, &ctx->crypt_ctx, false); 169 + __serpent_encrypt, &ctx->tweak_ctx, 170 + &ctx->crypt_ctx, false); 168 171 } 169 172 170 173 static int xts_decrypt(struct skcipher_request *req) ··· 173 176 struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 174 177 175 178 return glue_xts_req_128bit(&serpent_dec_xts, req, 176 - XTS_TWEAK_CAST(__serpent_encrypt), 177 - &ctx->tweak_ctx, &ctx->crypt_ctx, true); 179 + __serpent_encrypt, &ctx->tweak_ctx, 180 + &ctx->crypt_ctx, true); 178 181 } 179 182 180 183 static struct skcipher_alg serpent_algs[] = {
+31 -32
arch/x86/crypto/serpent_avx_glue.c
··· 20 20 #include <asm/crypto/serpent-avx.h> 21 21 22 22 /* 8-way parallel cipher functions */ 23 - asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, 23 + asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, 24 24 const u8 *src); 25 25 EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx); 26 26 27 - asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, 27 + asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, 28 28 const u8 *src); 29 29 EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx); 30 30 31 - asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, 31 + asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, 32 32 const u8 *src); 33 33 EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx); 34 34 35 - asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, 36 - const u8 *src, le128 *iv); 35 + asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, 36 + le128 *iv); 37 37 EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx); 38 38 39 - asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, 39 + asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, 40 40 const u8 *src, le128 *iv); 41 41 EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx); 42 42 43 - asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, 43 + asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, 44 44 const u8 *src, le128 *iv); 45 45 EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx); 46 46 47 - void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) 47 + void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) 48 48 { 49 49 be128 ctrblk; 50 + u128 *dst = (u128 *)d; 51 + const u128 *src = (const u128 *)s; 50 52 51 53 le128_to_be128(&ctrblk, iv); 52 54 le128_inc(iv); ··· 58 56 } 59 57 EXPORT_SYMBOL_GPL(__serpent_crypt_ctr); 60 58 61 - void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) 59 + void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 62 60 { 63 - glue_xts_crypt_128bit_one(ctx, dst, src, iv, 64 - GLUE_FUNC_CAST(__serpent_encrypt)); 61 + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt); 65 62 } 66 63 EXPORT_SYMBOL_GPL(serpent_xts_enc); 67 64 68 - void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) 65 + void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 69 66 { 70 - glue_xts_crypt_128bit_one(ctx, dst, src, iv, 71 - GLUE_FUNC_CAST(__serpent_decrypt)); 67 + glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt); 72 68 } 73 69 EXPORT_SYMBOL_GPL(serpent_xts_dec); 74 70 ··· 102 102 103 103 .funcs = { { 104 104 .num_blocks = SERPENT_PARALLEL_BLOCKS, 105 - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) } 105 + .fn_u = { .ecb = serpent_ecb_enc_8way_avx } 106 106 }, { 107 107 .num_blocks = 1, 108 - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } 108 + .fn_u = { .ecb = __serpent_encrypt } 109 109 } } 110 110 }; 111 111 ··· 115 115 116 116 .funcs = { { 117 117 .num_blocks = SERPENT_PARALLEL_BLOCKS, 118 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) } 118 + .fn_u = { .ctr = serpent_ctr_8way_avx } 119 119 }, { 120 120 .num_blocks = 1, 121 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) } 121 + .fn_u = { .ctr = __serpent_crypt_ctr } 122 122 } } 123 123 }; 124 124 ··· 128 128 129 129 .funcs = { { 130 130 .num_blocks = SERPENT_PARALLEL_BLOCKS, 131 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) } 131 + .fn_u = { .xts = serpent_xts_enc_8way_avx } 132 132 }, { 133 133 .num_blocks = 1, 134 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) } 134 + .fn_u = { .xts = serpent_xts_enc } 135 135 } } 136 136 }; 137 137 ··· 141 141 142 142 .funcs = { { 143 143 .num_blocks = SERPENT_PARALLEL_BLOCKS, 144 - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) } 144 + .fn_u = { .ecb = serpent_ecb_dec_8way_avx } 145 145 }, { 146 146 .num_blocks = 1, 147 - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } 147 + .fn_u = { .ecb = __serpent_decrypt } 148 148 } } 149 149 }; 150 150 ··· 154 154 155 155 .funcs = { { 156 156 .num_blocks = SERPENT_PARALLEL_BLOCKS, 157 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) } 157 + .fn_u = { .cbc = serpent_cbc_dec_8way_avx } 158 158 }, { 159 159 .num_blocks = 1, 160 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } 160 + .fn_u = { .cbc = __serpent_decrypt } 161 161 } } 162 162 }; 163 163 ··· 167 167 168 168 .funcs = { { 169 169 .num_blocks = SERPENT_PARALLEL_BLOCKS, 170 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) } 170 + .fn_u = { .xts = serpent_xts_dec_8way_avx } 171 171 }, { 172 172 .num_blocks = 1, 173 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) } 173 + .fn_u = { .xts = serpent_xts_dec } 174 174 } } 175 175 }; 176 176 ··· 186 186 187 187 static int cbc_encrypt(struct skcipher_request *req) 188 188 { 189 - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), 190 - req); 189 + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req); 191 190 } 192 191 193 192 static int cbc_decrypt(struct skcipher_request *req) ··· 205 206 struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 206 207 207 208 return glue_xts_req_128bit(&serpent_enc_xts, req, 208 - XTS_TWEAK_CAST(__serpent_encrypt), 209 - &ctx->tweak_ctx, &ctx->crypt_ctx, false); 209 + __serpent_encrypt, &ctx->tweak_ctx, 210 + &ctx->crypt_ctx, false); 210 211 } 211 212 212 213 static int xts_decrypt(struct skcipher_request *req) ··· 215 216 struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 216 217 217 218 return glue_xts_req_128bit(&serpent_dec_xts, req, 218 - XTS_TWEAK_CAST(__serpent_encrypt), 219 - &ctx->tweak_ctx, &ctx->crypt_ctx, true); 219 + __serpent_encrypt, &ctx->tweak_ctx, 220 + &ctx->crypt_ctx, true); 220 221 } 221 222 222 223 static struct skcipher_alg serpent_algs[] = {
+18 -12
arch/x86/crypto/serpent_sse2_glue.c
··· 31 31 return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen); 32 32 } 33 33 34 - static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) 34 + static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s) 35 35 { 36 36 u128 ivs[SERPENT_PARALLEL_BLOCKS - 1]; 37 + u128 *dst = (u128 *)d; 38 + const u128 *src = (const u128 *)s; 37 39 unsigned int j; 38 40 39 41 for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++) ··· 47 45 u128_xor(dst + (j + 1), dst + (j + 1), ivs + j); 48 46 } 49 47 50 - static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) 48 + static void serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) 51 49 { 52 50 be128 ctrblk; 51 + u128 *dst = (u128 *)d; 52 + const u128 *src = (const u128 *)s; 53 53 54 54 le128_to_be128(&ctrblk, iv); 55 55 le128_inc(iv); ··· 60 56 u128_xor(dst, src, (u128 *)&ctrblk); 61 57 } 62 58 63 - static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src, 59 + static void serpent_crypt_ctr_xway(const void *ctx, u8 *d, const u8 *s, 64 60 le128 *iv) 65 61 { 66 62 be128 ctrblks[SERPENT_PARALLEL_BLOCKS]; 63 + u128 *dst = (u128 *)d; 64 + const u128 *src = (const u128 *)s; 67 65 unsigned int i; 68 66 69 67 for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) { ··· 85 79 86 80 .funcs = { { 87 81 .num_blocks = SERPENT_PARALLEL_BLOCKS, 88 - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) } 82 + .fn_u = { .ecb = serpent_enc_blk_xway } 89 83 }, { 90 84 .num_blocks = 1, 91 - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) } 85 + .fn_u = { .ecb = __serpent_encrypt } 92 86 } } 93 87 }; 94 88 ··· 98 92 99 93 .funcs = { { 100 94 .num_blocks = SERPENT_PARALLEL_BLOCKS, 101 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) } 95 + .fn_u = { .ctr = serpent_crypt_ctr_xway } 102 96 }, { 103 97 .num_blocks = 1, 104 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) } 98 + .fn_u = { .ctr = serpent_crypt_ctr } 105 99 } } 106 100 }; 107 101 ··· 111 105 112 106 .funcs = { { 113 107 .num_blocks = SERPENT_PARALLEL_BLOCKS, 114 - .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) } 108 + .fn_u = { .ecb = serpent_dec_blk_xway } 115 109 }, { 116 110 .num_blocks = 1, 117 - .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) } 111 + .fn_u = { .ecb = __serpent_decrypt } 118 112 } } 119 113 }; 120 114 ··· 124 118 125 119 .funcs = { { 126 120 .num_blocks = SERPENT_PARALLEL_BLOCKS, 127 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) } 121 + .fn_u = { .cbc = serpent_decrypt_cbc_xway } 128 122 }, { 129 123 .num_blocks = 1, 130 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) } 124 + .fn_u = { .cbc = __serpent_decrypt } 131 125 } } 132 126 }; 133 127 ··· 143 137 144 138 static int cbc_encrypt(struct skcipher_request *req) 145 139 { 146 - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt), 140 + return glue_cbc_encrypt_req_128bit(__serpent_encrypt, 147 141 req); 148 142 } 149 143
+33 -42
arch/x86/crypto/twofish_avx_glue.c
··· 22 22 #define TWOFISH_PARALLEL_BLOCKS 8 23 23 24 24 /* 8-way parallel cipher functions */ 25 - asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst, 26 - const u8 *src); 27 - asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst, 28 - const u8 *src); 25 + asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); 26 + asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); 29 27 30 - asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst, 31 - const u8 *src); 32 - asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst, 33 - const u8 *src, le128 *iv); 28 + asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); 29 + asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src, 30 + le128 *iv); 34 31 35 - asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst, 36 - const u8 *src, le128 *iv); 37 - asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst, 38 - const u8 *src, le128 *iv); 32 + asmlinkage void twofish_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src, 33 + le128 *iv); 34 + asmlinkage void twofish_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src, 35 + le128 *iv); 39 36 40 37 static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, 41 38 const u8 *key, unsigned int keylen) ··· 40 43 return twofish_setkey(&tfm->base, key, keylen); 41 44 } 42 45 43 - static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, 44 - const u8 *src) 46 + static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) 45 47 { 46 48 __twofish_enc_blk_3way(ctx, dst, src, false); 47 49 } 48 50 49 - static void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) 51 + static void twofish_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 50 52 { 51 - glue_xts_crypt_128bit_one(ctx, dst, src, iv, 52 - GLUE_FUNC_CAST(twofish_enc_blk)); 53 + glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_enc_blk); 53 54 } 54 55 55 - static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) 56 + static void twofish_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) 56 57 { 57 - glue_xts_crypt_128bit_one(ctx, dst, src, iv, 58 - GLUE_FUNC_CAST(twofish_dec_blk)); 58 + glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_dec_blk); 59 59 } 60 60 61 61 struct twofish_xts_ctx { ··· 87 93 88 94 .funcs = { { 89 95 .num_blocks = TWOFISH_PARALLEL_BLOCKS, 90 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_8way) } 96 + .fn_u = { .ecb = twofish_ecb_enc_8way } 91 97 }, { 92 98 .num_blocks = 3, 93 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } 99 + .fn_u = { .ecb = twofish_enc_blk_3way } 94 100 }, { 95 101 .num_blocks = 1, 96 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } 102 + .fn_u = { .ecb = twofish_enc_blk } 97 103 } } 98 104 }; 99 105 ··· 103 109 104 110 .funcs = { { 105 111 .num_blocks = TWOFISH_PARALLEL_BLOCKS, 106 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_8way) } 112 + .fn_u = { .ctr = twofish_ctr_8way } 107 113 }, { 108 114 .num_blocks = 3, 109 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) } 115 + .fn_u = { .ctr = twofish_enc_blk_ctr_3way } 110 116 }, { 111 117 .num_blocks = 1, 112 - .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) } 118 + .fn_u = { .ctr = twofish_enc_blk_ctr } 113 119 } } 114 120 }; 115 121 ··· 119 125 120 126 .funcs = { { 121 127 .num_blocks = TWOFISH_PARALLEL_BLOCKS, 122 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc_8way) } 128 + .fn_u = { .xts = twofish_xts_enc_8way } 123 129 }, { 124 130 .num_blocks = 1, 125 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc) } 131 + .fn_u = { .xts = twofish_xts_enc } 126 132 } } 127 133 }; 128 134 ··· 132 138 133 139 .funcs = { { 134 140 .num_blocks = TWOFISH_PARALLEL_BLOCKS, 135 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_8way) } 141 + .fn_u = { .ecb = twofish_ecb_dec_8way } 136 142 }, { 137 143 .num_blocks = 3, 138 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } 144 + .fn_u = { .ecb = twofish_dec_blk_3way } 139 145 }, { 140 146 .num_blocks = 1, 141 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } 147 + .fn_u = { .ecb = twofish_dec_blk } 142 148 } } 143 149 }; 144 150 ··· 148 154 149 155 .funcs = { { 150 156 .num_blocks = TWOFISH_PARALLEL_BLOCKS, 151 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_8way) } 157 + .fn_u = { .cbc = twofish_cbc_dec_8way } 152 158 }, { 153 159 .num_blocks = 3, 154 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } 160 + .fn_u = { .cbc = twofish_dec_blk_cbc_3way } 155 161 }, { 156 162 .num_blocks = 1, 157 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } 163 + .fn_u = { .cbc = twofish_dec_blk } 158 164 } } 159 165 }; 160 166 ··· 164 170 165 171 .funcs = { { 166 172 .num_blocks = TWOFISH_PARALLEL_BLOCKS, 167 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec_8way) } 173 + .fn_u = { .xts = twofish_xts_dec_8way } 168 174 }, { 169 175 .num_blocks = 1, 170 - .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec) } 176 + .fn_u = { .xts = twofish_xts_dec } 171 177 } } 172 178 }; 173 179 ··· 183 189 184 190 static int cbc_encrypt(struct skcipher_request *req) 185 191 { 186 - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk), 187 - req); 192 + return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); 188 193 } 189 194 190 195 static int cbc_decrypt(struct skcipher_request *req) ··· 201 208 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 202 209 struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 203 210 204 - return glue_xts_req_128bit(&twofish_enc_xts, req, 205 - XTS_TWEAK_CAST(twofish_enc_blk), 211 + return glue_xts_req_128bit(&twofish_enc_xts, req, twofish_enc_blk, 206 212 &ctx->tweak_ctx, &ctx->crypt_ctx, false); 207 213 } 208 214 ··· 210 218 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 211 219 struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 212 220 213 - return glue_xts_req_128bit(&twofish_dec_xts, req, 214 - XTS_TWEAK_CAST(twofish_enc_blk), 221 + return glue_xts_req_128bit(&twofish_dec_xts, req, twofish_enc_blk, 215 222 &ctx->tweak_ctx, &ctx->crypt_ctx, true); 216 223 } 217 224
+20 -17
arch/x86/crypto/twofish_glue_3way.c
··· 25 25 return twofish_setkey(&tfm->base, key, keylen); 26 26 } 27 27 28 - static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, 29 - const u8 *src) 28 + static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) 30 29 { 31 30 __twofish_enc_blk_3way(ctx, dst, src, false); 32 31 } 33 32 34 - static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst, 33 + static inline void twofish_enc_blk_xor_3way(const void *ctx, u8 *dst, 35 34 const u8 *src) 36 35 { 37 36 __twofish_enc_blk_3way(ctx, dst, src, true); 38 37 } 39 38 40 - void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src) 39 + void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s) 41 40 { 42 41 u128 ivs[2]; 42 + u128 *dst = (u128 *)d; 43 + const u128 *src = (const u128 *)s; 43 44 44 45 ivs[0] = src[0]; 45 46 ivs[1] = src[1]; ··· 52 51 } 53 52 EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way); 54 53 55 - void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) 54 + void twofish_enc_blk_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv) 56 55 { 57 56 be128 ctrblk; 57 + u128 *dst = (u128 *)d; 58 + const u128 *src = (const u128 *)s; 58 59 59 60 if (dst != src) 60 61 *dst = *src; ··· 69 66 } 70 67 EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr); 71 68 72 - void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, 73 - le128 *iv) 69 + void twofish_enc_blk_ctr_3way(const void *ctx, u8 *d, const u8 *s, le128 *iv) 74 70 { 75 71 be128 ctrblks[3]; 72 + u128 *dst = (u128 *)d; 73 + const u128 *src = (const u128 *)s; 76 74 77 75 if (dst != src) { 78 76 dst[0] = src[0]; ··· 98 94 99 95 .funcs = { { 100 96 .num_blocks = 3, 101 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) } 97 + .fn_u = { .ecb = twofish_enc_blk_3way } 102 98 }, { 103 99 .num_blocks = 1, 104 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) } 100 + .fn_u = { .ecb = twofish_enc_blk } 105 101 } } 106 102 }; 107 103 ··· 111 107 112 108 .funcs = { { 113 109 .num_blocks = 3, 114 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr_3way) } 110 + .fn_u = { .ctr = twofish_enc_blk_ctr_3way } 115 111 }, { 116 112 .num_blocks = 1, 117 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr) } 113 + .fn_u = { .ctr = twofish_enc_blk_ctr } 118 114 } } 119 115 }; 120 116 ··· 124 120 125 121 .funcs = { { 126 122 .num_blocks = 3, 127 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) } 123 + .fn_u = { .ecb = twofish_dec_blk_3way } 128 124 }, { 129 125 .num_blocks = 1, 130 - .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) } 126 + .fn_u = { .ecb = twofish_dec_blk } 131 127 } } 132 128 }; 133 129 ··· 137 133 138 134 .funcs = { { 139 135 .num_blocks = 3, 140 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) } 136 + .fn_u = { .cbc = twofish_dec_blk_cbc_3way } 141 137 }, { 142 138 .num_blocks = 1, 143 - .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) } 139 + .fn_u = { .cbc = twofish_dec_blk } 144 140 } } 145 141 }; 146 142 ··· 156 152 157 153 static int cbc_encrypt(struct skcipher_request *req) 158 154 { 159 - return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk), 160 - req); 155 + return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req); 161 156 } 162 157 163 158 static int cbc_decrypt(struct skcipher_request *req)
+26 -31
arch/x86/include/asm/crypto/camellia.h
··· 32 32 unsigned int keylen); 33 33 34 34 /* regular block cipher functions */ 35 - asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, 36 - const u8 *src, bool xor); 37 - asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst, 38 - const u8 *src); 35 + asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src, 36 + bool xor); 37 + asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src); 39 38 40 39 /* 2-way parallel cipher functions */ 41 - asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, 42 - const u8 *src, bool xor); 43 - asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst, 44 - const u8 *src); 40 + asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src, 41 + bool xor); 42 + asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src); 45 43 46 44 /* 16-way parallel cipher functions (avx/aes-ni) */ 47 - asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst, 48 - const u8 *src); 49 - asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst, 50 - const u8 *src); 45 + asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src); 46 + asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src); 51 47 52 - asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst, 53 - const u8 *src); 54 - asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst, 55 - const u8 *src, le128 *iv); 48 + asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src); 49 + asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src, 50 + le128 *iv); 56 51 57 - asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst, 58 - const u8 *src, le128 *iv); 59 - asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst, 60 - const u8 *src, le128 *iv); 52 + asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src, 53 + le128 *iv); 54 + asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src, 55 + le128 *iv); 61 56 62 - static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, 63 - const u8 *src) 57 + static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src) 64 58 { 65 59 __camellia_enc_blk(ctx, dst, src, false); 66 60 } 67 61 68 - static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst, 69 - const u8 *src) 62 + static inline void camellia_enc_blk_xor(const void *ctx, u8 *dst, const u8 *src) 70 63 { 71 64 __camellia_enc_blk(ctx, dst, src, true); 72 65 } 73 66 74 - static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, 67 + static inline void camellia_enc_blk_2way(const void *ctx, u8 *dst, 75 68 const u8 *src) 76 69 { 77 70 __camellia_enc_blk_2way(ctx, dst, src, false); 78 71 } 79 72 80 - static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst, 73 + static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst, 81 74 const u8 *src) 82 75 { 83 76 __camellia_enc_blk_2way(ctx, dst, src, true); 84 77 } 85 78 86 79 /* glue helpers */ 87 - extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src); 88 - extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, 80 + extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src); 81 + extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, 89 82 le128 *iv); 90 - extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, 83 + extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src, 91 84 le128 *iv); 92 85 93 - extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); 94 - extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); 86 + extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, 87 + le128 *iv); 88 + extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, 89 + le128 *iv); 95 90 96 91 #endif /* ASM_X86_CAMELLIA_H */
+7 -11
arch/x86/include/asm/crypto/glue_helper.h
··· 11 11 #include <asm/fpu/api.h> 12 12 #include <crypto/b128ops.h> 13 13 14 - typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src); 15 - typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src); 16 - typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src, 14 + typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src); 15 + typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src); 16 + typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src, 17 17 le128 *iv); 18 - typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src, 18 + typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src, 19 19 le128 *iv); 20 - 21 - #define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn)) 22 - #define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn)) 23 - #define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn)) 24 - #define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn)) 25 20 26 21 struct common_glue_func_entry { 27 22 unsigned int num_blocks; /* number of blocks that @fn will process */ ··· 111 116 common_glue_func_t tweak_fn, void *tweak_ctx, 112 117 void *crypt_ctx, bool decrypt); 113 118 114 - extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, 115 - le128 *iv, common_glue_func_t fn); 119 + extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, 120 + const u8 *src, le128 *iv, 121 + common_glue_func_t fn); 116 122 117 123 #endif /* _CRYPTO_GLUE_HELPER_H */
+10 -10
arch/x86/include/asm/crypto/serpent-avx.h
··· 15 15 struct serpent_ctx crypt_ctx; 16 16 }; 17 17 18 - asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, 18 + asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst, 19 19 const u8 *src); 20 - asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, 20 + asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst, 21 21 const u8 *src); 22 22 23 - asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, 23 + asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst, 24 24 const u8 *src); 25 - asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst, 26 - const u8 *src, le128 *iv); 25 + asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src, 26 + le128 *iv); 27 27 28 - asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst, 28 + asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst, 29 29 const u8 *src, le128 *iv); 30 - asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst, 30 + asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst, 31 31 const u8 *src, le128 *iv); 32 32 33 - extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, 33 + extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src, 34 34 le128 *iv); 35 35 36 - extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv); 37 - extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv); 36 + extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv); 37 + extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv); 38 38 39 39 extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key, 40 40 unsigned int keylen);
+12 -16
arch/x86/include/asm/crypto/serpent-sse2.h
··· 9 9 10 10 #define SERPENT_PARALLEL_BLOCKS 4 11 11 12 - asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst, 12 + asmlinkage void __serpent_enc_blk_4way(const struct serpent_ctx *ctx, u8 *dst, 13 13 const u8 *src, bool xor); 14 - asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst, 14 + asmlinkage void serpent_dec_blk_4way(const struct serpent_ctx *ctx, u8 *dst, 15 15 const u8 *src); 16 16 17 - static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, 18 - const u8 *src) 17 + static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src) 19 18 { 20 19 __serpent_enc_blk_4way(ctx, dst, src, false); 21 20 } 22 21 23 - static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, 24 - const u8 *src) 22 + static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx, 23 + u8 *dst, const u8 *src) 25 24 { 26 25 __serpent_enc_blk_4way(ctx, dst, src, true); 27 26 } 28 27 29 - static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, 30 - const u8 *src) 28 + static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src) 31 29 { 32 30 serpent_dec_blk_4way(ctx, dst, src); 33 31 } ··· 34 36 35 37 #define SERPENT_PARALLEL_BLOCKS 8 36 38 37 - asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst, 39 + asmlinkage void __serpent_enc_blk_8way(const struct serpent_ctx *ctx, u8 *dst, 38 40 const u8 *src, bool xor); 39 - asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst, 41 + asmlinkage void serpent_dec_blk_8way(const struct serpent_ctx *ctx, u8 *dst, 40 42 const u8 *src); 41 43 42 - static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, 43 - const u8 *src) 44 + static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src) 44 45 { 45 46 __serpent_enc_blk_8way(ctx, dst, src, false); 46 47 } 47 48 48 - static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, 49 - const u8 *src) 49 + static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx, 50 + u8 *dst, const u8 *src) 50 51 { 51 52 __serpent_enc_blk_8way(ctx, dst, src, true); 52 53 } 53 54 54 - static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, 55 - const u8 *src) 55 + static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src) 56 56 { 57 57 serpent_dec_blk_8way(ctx, dst, src); 58 58 }
+8 -11
arch/x86/include/asm/crypto/twofish.h
··· 7 7 #include <crypto/b128ops.h> 8 8 9 9 /* regular block cipher functions from twofish_x86_64 module */ 10 - asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst, 11 - const u8 *src); 12 - asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst, 13 - const u8 *src); 10 + asmlinkage void twofish_enc_blk(const void *ctx, u8 *dst, const u8 *src); 11 + asmlinkage void twofish_dec_blk(const void *ctx, u8 *dst, const u8 *src); 14 12 15 13 /* 3-way parallel cipher functions */ 16 - asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, 17 - const u8 *src, bool xor); 18 - asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst, 19 - const u8 *src); 14 + asmlinkage void __twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src, 15 + bool xor); 16 + asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src); 20 17 21 18 /* helpers from twofish_x86_64-3way module */ 22 - extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src); 23 - extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, 19 + extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src); 20 + extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src, 24 21 le128 *iv); 25 - extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, 22 + extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src, 26 23 le128 *iv); 27 24 28 25 #endif /* ASM_X86_TWOFISH_H */
+10 -8
crypto/cast6_generic.c
··· 154 154 EXPORT_SYMBOL_GPL(cast6_setkey); 155 155 156 156 /*forward quad round*/ 157 - static inline void Q(u32 *block, u8 *Kr, u32 *Km) 157 + static inline void Q(u32 *block, const u8 *Kr, const u32 *Km) 158 158 { 159 159 u32 I; 160 160 block[2] ^= F1(block[3], Kr[0], Km[0]); ··· 164 164 } 165 165 166 166 /*reverse quad round*/ 167 - static inline void QBAR(u32 *block, u8 *Kr, u32 *Km) 167 + static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km) 168 168 { 169 169 u32 I; 170 170 block[3] ^= F1(block[0], Kr[3], Km[3]); ··· 173 173 block[2] ^= F1(block[3], Kr[0], Km[0]); 174 174 } 175 175 176 - void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) 176 + void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) 177 177 { 178 + const struct cast6_ctx *c = ctx; 178 179 const __be32 *src = (const __be32 *)inbuf; 179 180 __be32 *dst = (__be32 *)outbuf; 180 181 u32 block[4]; 181 - u32 *Km; 182 - u8 *Kr; 182 + const u32 *Km; 183 + const u8 *Kr; 183 184 184 185 block[0] = be32_to_cpu(src[0]); 185 186 block[1] = be32_to_cpu(src[1]); ··· 212 211 __cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); 213 212 } 214 213 215 - void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf) 214 + void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf) 216 215 { 216 + const struct cast6_ctx *c = ctx; 217 217 const __be32 *src = (const __be32 *)inbuf; 218 218 __be32 *dst = (__be32 *)outbuf; 219 219 u32 block[4]; 220 - u32 *Km; 221 - u8 *Kr; 220 + const u32 *Km; 221 + const u8 *Kr; 222 222 223 223 block[0] = be32_to_cpu(src[0]); 224 224 block[1] = be32_to_cpu(src[1]);
+4 -2
crypto/serpent_generic.c
··· 449 449 } 450 450 EXPORT_SYMBOL_GPL(serpent_setkey); 451 451 452 - void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src) 452 + void __serpent_encrypt(const void *c, u8 *dst, const u8 *src) 453 453 { 454 + const struct serpent_ctx *ctx = c; 454 455 const u32 *k = ctx->expkey; 455 456 const __le32 *s = (const __le32 *)src; 456 457 __le32 *d = (__le32 *)dst; ··· 515 514 __serpent_encrypt(ctx, dst, src); 516 515 } 517 516 518 - void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src) 517 + void __serpent_decrypt(const void *c, u8 *dst, const u8 *src) 519 518 { 519 + const struct serpent_ctx *ctx = c; 520 520 const u32 *k = ctx->expkey; 521 521 const __le32 *s = (const __le32 *)src; 522 522 __le32 *d = (__le32 *)dst;
+2 -2
include/crypto/cast6.h
··· 19 19 unsigned int keylen, u32 *flags); 20 20 int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); 21 21 22 - void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); 23 - void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src); 22 + void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src); 23 + void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src); 24 24 25 25 #endif
+2 -2
include/crypto/serpent.h
··· 22 22 unsigned int keylen); 23 23 int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); 24 24 25 - void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); 26 - void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src); 25 + void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src); 26 + void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src); 27 27 28 28 #endif
-2
include/crypto/xts.h
··· 8 8 9 9 #define XTS_BLOCK_SIZE 16 10 10 11 - #define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x)) 12 - 13 11 static inline int xts_check_key(struct crypto_tfm *tfm, 14 12 const u8 *key, unsigned int keylen) 15 13 {