Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Merge crypto tree to pick up vmx changes.

+100 -129
+4
crypto/hmac.c
··· 157 157 158 158 parent->descsize = sizeof(struct shash_desc) + 159 159 crypto_shash_descsize(hash); 160 + if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE)) { 161 + crypto_free_shash(hash); 162 + return -EINVAL; 163 + } 160 164 161 165 ctx->hash = hash; 162 166 return 0;
+1 -1
crypto/jitterentropy-kcapi.c
··· 193 193 crypto_unregister_rng(&jent_alg); 194 194 } 195 195 196 - subsys_initcall(jent_mod_init); 196 + module_init(jent_mod_init); 197 197 module_exit(jent_mod_exit); 198 198 199 199 MODULE_LICENSE("Dual BSD/GPL");
+1 -1
drivers/crypto/caam/ctrl.c
··· 469 469 } 470 470 471 471 /* 472 - * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6DQ) 472 + * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP) 473 473 * have an issue wherein AXI bus transactions may not occur in the correct 474 474 * order. This isn't a problem running single descriptors, but can be if 475 475 * running multiple concurrent descriptors. Reworking the driver to throttle
+1 -1
drivers/crypto/vmx/aesp8-ppc.pl
··· 1357 1357 addi $idx,$idx,16 1358 1358 bdnz Loop_ctr32_enc 1359 1359 1360 - vadduwm $ivec,$ivec,$one 1360 + vadduqm $ivec,$ivec,$one 1361 1361 vmr $dat,$inptail 1362 1362 lvx $inptail,0,$inp 1363 1363 addi $inp,$inp,16
+86 -125
drivers/crypto/vmx/ghash.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /** 2 3 * GHASH routines supporting VMX instructions on the Power 8 3 4 * 4 - * Copyright (C) 2015 International Business Machines Inc. 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; version 2 only. 9 - * 10 - * This program is distributed in the hope that it will be useful, 11 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - * GNU General Public License for more details. 14 - * 15 - * You should have received a copy of the GNU General Public License 16 - * along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 5 + * Copyright (C) 2015, 2019 International Business Machines Inc. 18 6 * 19 7 * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com> 8 + * 9 + * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback 10 + * mechanism. The new approach is based on arm64 code, which is: 11 + * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org> 20 12 */ 21 13 22 14 #include <linux/types.h> ··· 30 38 const u8 *in, size_t len); 31 39 32 40 struct p8_ghash_ctx { 41 + /* key used by vector asm */ 33 42 u128 htable[16]; 34 - struct crypto_shash *fallback; 43 + /* key used by software fallback */ 44 + be128 key; 35 45 }; 36 46 37 47 struct p8_ghash_desc_ctx { 38 48 u64 shash[2]; 39 49 u8 buffer[GHASH_DIGEST_SIZE]; 40 50 int bytes; 41 - struct shash_desc fallback_desc; 42 51 }; 43 - 44 - static int p8_ghash_init_tfm(struct crypto_tfm *tfm) 45 - { 46 - const char *alg = "ghash-generic"; 47 - struct crypto_shash *fallback; 48 - struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm); 49 - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); 50 - 51 - fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK); 52 - if (IS_ERR(fallback)) { 53 - printk(KERN_ERR 54 - "Failed to allocate transformation for '%s': %ld\n", 55 - alg, PTR_ERR(fallback)); 56 - return PTR_ERR(fallback); 57 - } 58 - 59 - crypto_shash_set_flags(fallback, 60 - crypto_shash_get_flags((struct crypto_shash 61 - *) tfm)); 62 - 63 - /* Check if the descsize defined in the algorithm is still enough. */ 64 - if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx) 65 - + crypto_shash_descsize(fallback)) { 66 - printk(KERN_ERR 67 - "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n", 68 - alg, 69 - shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx), 70 - crypto_shash_descsize(fallback)); 71 - return -EINVAL; 72 - } 73 - ctx->fallback = fallback; 74 - 75 - return 0; 76 - } 77 - 78 - static void p8_ghash_exit_tfm(struct crypto_tfm *tfm) 79 - { 80 - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); 81 - 82 - if (ctx->fallback) { 83 - crypto_free_shash(ctx->fallback); 84 - ctx->fallback = NULL; 85 - } 86 - } 87 52 88 53 static int p8_ghash_init(struct shash_desc *desc) 89 54 { 90 - struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); 91 55 struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); 92 56 93 57 dctx->bytes = 0; 94 58 memset(dctx->shash, 0, GHASH_DIGEST_SIZE); 95 - dctx->fallback_desc.tfm = ctx->fallback; 96 - return crypto_shash_init(&dctx->fallback_desc); 59 + return 0; 97 60 } 98 61 99 62 static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, ··· 66 119 disable_kernel_vsx(); 67 120 pagefault_enable(); 68 121 preempt_enable(); 69 - return crypto_shash_setkey(ctx->fallback, key, keylen); 122 + 123 + memcpy(&ctx->key, key, GHASH_BLOCK_SIZE); 124 + 125 + return 0; 126 + } 127 + 128 + static inline void __ghash_block(struct p8_ghash_ctx *ctx, 129 + struct p8_ghash_desc_ctx *dctx) 130 + { 131 + if (crypto_simd_usable()) { 132 + preempt_disable(); 133 + pagefault_disable(); 134 + enable_kernel_vsx(); 135 + gcm_ghash_p8(dctx->shash, ctx->htable, 136 + dctx->buffer, GHASH_DIGEST_SIZE); 137 + disable_kernel_vsx(); 138 + pagefault_enable(); 139 + preempt_enable(); 140 + } else { 141 + crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE); 142 + gf128mul_lle((be128 *)dctx->shash, &ctx->key); 143 + } 144 + } 145 + 146 + static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, 147 + struct p8_ghash_desc_ctx *dctx, 148 + const u8 *src, unsigned int srclen) 149 + { 150 + if (crypto_simd_usable()) { 151 + preempt_disable(); 152 + pagefault_disable(); 153 + enable_kernel_vsx(); 154 + gcm_ghash_p8(dctx->shash, ctx->htable, 155 + src, srclen); 156 + disable_kernel_vsx(); 157 + pagefault_enable(); 158 + preempt_enable(); 159 + } else { 160 + while (srclen >= GHASH_BLOCK_SIZE) { 161 + crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); 162 + gf128mul_lle((be128 *)dctx->shash, &ctx->key); 163 + srclen -= GHASH_BLOCK_SIZE; 164 + src += GHASH_BLOCK_SIZE; 165 + } 166 + } 70 167 } 71 168 72 169 static int p8_ghash_update(struct shash_desc *desc, ··· 120 129 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); 121 130 struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); 122 131 123 - if (!crypto_simd_usable()) { 124 - return crypto_shash_update(&dctx->fallback_desc, src, 125 - srclen); 126 - } else { 127 - if (dctx->bytes) { 128 - if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { 129 - memcpy(dctx->buffer + dctx->bytes, src, 130 - srclen); 131 - dctx->bytes += srclen; 132 - return 0; 133 - } 132 + if (dctx->bytes) { 133 + if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { 134 134 memcpy(dctx->buffer + dctx->bytes, src, 135 - GHASH_DIGEST_SIZE - dctx->bytes); 136 - preempt_disable(); 137 - pagefault_disable(); 138 - enable_kernel_vsx(); 139 - gcm_ghash_p8(dctx->shash, ctx->htable, 140 - dctx->buffer, GHASH_DIGEST_SIZE); 141 - disable_kernel_vsx(); 142 - pagefault_enable(); 143 - preempt_enable(); 144 - src += GHASH_DIGEST_SIZE - dctx->bytes; 145 - srclen -= GHASH_DIGEST_SIZE - dctx->bytes; 146 - dctx->bytes = 0; 135 + srclen); 136 + dctx->bytes += srclen; 137 + return 0; 147 138 } 148 - len = srclen & ~(GHASH_DIGEST_SIZE - 1); 149 - if (len) { 150 - preempt_disable(); 151 - pagefault_disable(); 152 - enable_kernel_vsx(); 153 - gcm_ghash_p8(dctx->shash, ctx->htable, src, len); 154 - disable_kernel_vsx(); 155 - pagefault_enable(); 156 - preempt_enable(); 157 - src += len; 158 - srclen -= len; 159 - } 160 - if (srclen) { 161 - memcpy(dctx->buffer, src, srclen); 162 - dctx->bytes = srclen; 163 - } 164 - return 0; 139 + memcpy(dctx->buffer + dctx->bytes, src, 140 + GHASH_DIGEST_SIZE - dctx->bytes); 141 + 142 + __ghash_block(ctx, dctx); 143 + 144 + src += GHASH_DIGEST_SIZE - dctx->bytes; 145 + srclen -= GHASH_DIGEST_SIZE - dctx->bytes; 146 + dctx->bytes = 0; 165 147 } 148 + len = srclen & ~(GHASH_DIGEST_SIZE - 1); 149 + if (len) { 150 + __ghash_blocks(ctx, dctx, src, len); 151 + src += len; 152 + srclen -= len; 153 + } 154 + if (srclen) { 155 + memcpy(dctx->buffer, src, srclen); 156 + dctx->bytes = srclen; 157 + } 158 + return 0; 166 159 } 167 160 168 161 static int p8_ghash_final(struct shash_desc *desc, u8 *out) ··· 155 180 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); 156 181 struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); 157 182 158 - if (!crypto_simd_usable()) { 159 - return crypto_shash_final(&dctx->fallback_desc, out); 160 - } else { 161 - if (dctx->bytes) { 162 - for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) 163 - dctx->buffer[i] = 0; 164 - preempt_disable(); 165 - pagefault_disable(); 166 - enable_kernel_vsx(); 167 - gcm_ghash_p8(dctx->shash, ctx->htable, 168 - dctx->buffer, GHASH_DIGEST_SIZE); 169 - disable_kernel_vsx(); 170 - pagefault_enable(); 171 - preempt_enable(); 172 - dctx->bytes = 0; 173 - } 174 - memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); 175 - return 0; 183 + if (dctx->bytes) { 184 + for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) 185 + dctx->buffer[i] = 0; 186 + __ghash_block(ctx, dctx); 187 + dctx->bytes = 0; 176 188 } 189 + memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); 190 + return 0; 177 191 } 178 192 179 193 struct shash_alg p8_ghash_alg = { ··· 177 213 .cra_name = "ghash", 178 214 .cra_driver_name = "p8_ghash", 179 215 .cra_priority = 1000, 180 - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, 181 216 .cra_blocksize = GHASH_BLOCK_SIZE, 182 217 .cra_ctxsize = sizeof(struct p8_ghash_ctx), 183 218 .cra_module = THIS_MODULE, 184 - .cra_init = p8_ghash_init_tfm, 185 - .cra_exit = p8_ghash_exit_tfm, 186 219 }, 187 220 };
+7 -1
include/crypto/hash.h
··· 150 150 }; 151 151 152 152 #define HASH_MAX_DIGESTSIZE 64 153 - #define HASH_MAX_DESCSIZE 360 153 + 154 + /* 155 + * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc' 156 + * containing a 'struct sha3_state'. 157 + */ 158 + #define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) 159 + 154 160 #define HASH_MAX_STATESIZE 512 155 161 156 162 #define SHASH_DESC_ON_STACK(shash, ctx) \