Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccp - Move HMAC calculation down to ccp ops file

Move the support to perform an HMAC calculation into
the CCP operations file. This eliminates the need to
perform a synchronous SHA operation used to calculate
the HMAC.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Tom Lendacky and committed by
Herbert Xu
c11baa02 d81ed653

+139 -110
+25 -105
drivers/crypto/ccp/ccp-crypto-sha.c
··· 24 24 #include "ccp-crypto.h" 25 25 26 26 27 - struct ccp_sha_result { 28 - struct completion completion; 29 - int err; 30 - }; 31 - 32 - static void ccp_sync_hash_complete(struct crypto_async_request *req, int err) 33 - { 34 - struct ccp_sha_result *result = req->data; 35 - 36 - if (err == -EINPROGRESS) 37 - return; 38 - 39 - result->err = err; 40 - complete(&result->completion); 41 - } 42 - 43 - static int ccp_sync_hash(struct crypto_ahash *tfm, u8 *buf, 44 - struct scatterlist *sg, unsigned int len) 45 - { 46 - struct ccp_sha_result result; 47 - struct ahash_request *req; 48 - int ret; 49 - 50 - init_completion(&result.completion); 51 - 52 - req = ahash_request_alloc(tfm, GFP_KERNEL); 53 - if (!req) 54 - return -ENOMEM; 55 - 56 - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 57 - ccp_sync_hash_complete, &result); 58 - ahash_request_set_crypt(req, sg, buf, len); 59 - 60 - ret = crypto_ahash_digest(req); 61 - if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { 62 - ret = wait_for_completion_interruptible(&result.completion); 63 - if (!ret) 64 - ret = result.err; 65 - } 66 - 67 - ahash_request_free(req); 68 - 69 - return ret; 70 - } 71 - 72 - static int ccp_sha_finish_hmac(struct crypto_async_request *async_req) 73 - { 74 - struct ahash_request *req = ahash_request_cast(async_req); 75 - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 76 - struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); 77 - struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 78 - struct scatterlist sg[2]; 79 - unsigned int block_size = 80 - crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 81 - unsigned int digest_size = crypto_ahash_digestsize(tfm); 82 - 83 - sg_init_table(sg, ARRAY_SIZE(sg)); 84 - sg_set_buf(&sg[0], ctx->u.sha.opad, block_size); 85 - sg_set_buf(&sg[1], rctx->ctx, digest_size); 86 - 87 - return ccp_sync_hash(ctx->u.sha.hmac_tfm, req->result, sg, 88 - block_size + digest_size); 89 - } 90 - 91 27 static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) 92 28 { 93 29 struct ahash_request *req = ahash_request_cast(async_req); 94 30 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 95 - struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); 96 31 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 97 32 unsigned int digest_size = crypto_ahash_digestsize(tfm); 98 33 ··· 47 112 if (req->result) 48 113 memcpy(req->result, rctx->ctx, digest_size); 49 114 50 - /* If we're doing an HMAC, we need to perform that on the final op */ 51 - if (rctx->final && ctx->u.sha.key_len) 52 - ret = ccp_sha_finish_hmac(async_req); 53 - 54 115 e_free: 55 116 sg_free_table(&rctx->data_sg); 56 117 ··· 57 126 unsigned int final) 58 127 { 59 128 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 129 + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); 60 130 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 61 131 struct scatterlist *sg; 62 132 unsigned int block_size = ··· 128 196 rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); 129 197 rctx->cmd.u.sha.src = sg; 130 198 rctx->cmd.u.sha.src_len = rctx->hash_cnt; 199 + rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? 200 + &ctx->u.sha.opad_sg : NULL; 201 + rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ? 202 + ctx->u.sha.opad_count : 0; 203 + rctx->cmd.u.sha.first = rctx->first; 131 204 rctx->cmd.u.sha.final = rctx->final; 132 205 rctx->cmd.u.sha.msg_bits = rctx->msg_bits; 133 206 ··· 155 218 156 219 memset(rctx, 0, sizeof(*rctx)); 157 220 158 - memcpy(rctx->ctx, alg->init, sizeof(rctx->ctx)); 159 221 rctx->type = alg->type; 160 222 rctx->first = 1; 161 223 ··· 197 261 unsigned int key_len) 198 262 { 199 263 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 200 - struct scatterlist sg; 201 - unsigned int block_size = 202 - crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 203 - unsigned int digest_size = crypto_ahash_digestsize(tfm); 264 + struct crypto_shash *shash = ctx->u.sha.hmac_tfm; 265 + struct { 266 + struct shash_desc sdesc; 267 + char ctx[crypto_shash_descsize(shash)]; 268 + } desc; 269 + unsigned int block_size = crypto_shash_blocksize(shash); 270 + unsigned int digest_size = crypto_shash_digestsize(shash); 204 271 int i, ret; 205 272 206 273 /* Set to zero until complete */ ··· 216 277 217 278 if (key_len > block_size) { 218 279 /* Must hash the input key */ 219 - sg_init_one(&sg, key, key_len); 220 - ret = ccp_sync_hash(tfm, ctx->u.sha.key, &sg, key_len); 280 + desc.sdesc.tfm = shash; 281 + desc.sdesc.flags = crypto_ahash_get_flags(tfm) & 282 + CRYPTO_TFM_REQ_MAY_SLEEP; 283 + 284 + ret = crypto_shash_digest(&desc.sdesc, key, key_len, 285 + ctx->u.sha.key); 221 286 if (ret) { 222 287 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 223 288 return -EINVAL; ··· 235 292 ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36; 236 293 ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; 237 294 } 295 + 296 + sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); 297 + ctx->u.sha.opad_count = block_size; 238 298 239 299 ctx->u.sha.key_len = key_len; 240 300 ··· 265 319 { 266 320 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 267 321 struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); 268 - struct crypto_ahash *hmac_tfm; 322 + struct crypto_shash *hmac_tfm; 269 323 270 - hmac_tfm = crypto_alloc_ahash(alg->child_alg, 271 - CRYPTO_ALG_TYPE_AHASH, 0); 324 + hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); 272 325 if (IS_ERR(hmac_tfm)) { 273 326 pr_warn("could not load driver %s need for HMAC support\n", 274 327 alg->child_alg); ··· 284 339 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 285 340 286 341 if (ctx->u.sha.hmac_tfm) 287 - crypto_free_ahash(ctx->u.sha.hmac_tfm); 342 + crypto_free_shash(ctx->u.sha.hmac_tfm); 288 343 289 344 ccp_sha_cra_exit(tfm); 290 345 } 291 346 292 - static const __be32 sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { 293 - cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), 294 - cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), 295 - cpu_to_be32(SHA1_H4), 0, 0, 0, 296 - }; 297 - 298 - static const __be32 sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { 299 - cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), 300 - cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), 301 - cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), 302 - cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), 303 - }; 304 - 305 - static const __be32 sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { 306 - cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), 307 - cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), 308 - cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), 309 - cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 310 - }; 311 - 312 347 struct ccp_sha_def { 313 348 const char *name; 314 349 const char *drv_name; 315 - const __be32 *init; 316 350 enum ccp_sha_type type; 317 351 u32 digest_size; 318 352 u32 block_size; ··· 301 377 { 302 378 .name = "sha1", 303 379 .drv_name = "sha1-ccp", 304 - .init = sha1_init, 305 380 .type = CCP_SHA_TYPE_1, 306 381 .digest_size = SHA1_DIGEST_SIZE, 307 382 .block_size = SHA1_BLOCK_SIZE, ··· 308 385 { 309 386 .name = "sha224", 310 387 .drv_name = "sha224-ccp", 311 - .init = sha224_init, 312 388 .type = CCP_SHA_TYPE_224, 313 389 .digest_size = SHA224_DIGEST_SIZE, 314 390 .block_size = SHA224_BLOCK_SIZE, ··· 315 393 { 316 394 .name = "sha256", 317 395 .drv_name = "sha256-ccp", 318 - .init = sha256_init, 319 396 .type = CCP_SHA_TYPE_256, 320 397 .digest_size = SHA256_DIGEST_SIZE, 321 398 .block_size = SHA256_BLOCK_SIZE, ··· 381 460 382 461 INIT_LIST_HEAD(&ccp_alg->entry); 383 462 384 - ccp_alg->init = def->init; 385 463 ccp_alg->type = def->type; 386 464 387 465 alg = &ccp_alg->alg;
+4 -4
drivers/crypto/ccp/ccp-crypto.h
··· 137 137 #define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE 138 138 139 139 struct ccp_sha_ctx { 140 + struct scatterlist opad_sg; 141 + unsigned int opad_count; 142 + 140 143 unsigned int key_len; 141 144 u8 key[MAX_SHA_BLOCK_SIZE]; 142 145 u8 ipad[MAX_SHA_BLOCK_SIZE]; 143 146 u8 opad[MAX_SHA_BLOCK_SIZE]; 144 - struct crypto_ahash *hmac_tfm; 147 + struct crypto_shash *hmac_tfm; 145 148 }; 146 149 147 150 struct ccp_sha_req_ctx { ··· 169 166 struct scatterlist buf_sg; 170 167 unsigned int buf_count; 171 168 u8 buf[MAX_SHA_BLOCK_SIZE]; 172 - 173 - /* HMAC support field */ 174 - struct scatterlist pad_sg; 175 169 176 170 /* CCP driver command */ 177 171 struct ccp_cmd cmd;
+103 -1
drivers/crypto/ccp/ccp-ops.c
··· 23 23 #include <linux/ccp.h> 24 24 #include <linux/scatterlist.h> 25 25 #include <crypto/scatterwalk.h> 26 + #include <crypto/sha.h> 26 27 27 28 #include "ccp-dev.h" 28 29 ··· 131 130 struct ccp_passthru_op passthru; 132 131 struct ccp_ecc_op ecc; 133 132 } u; 133 + }; 134 + 135 + /* SHA initial context values */ 136 + static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { 137 + cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), 138 + cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), 139 + cpu_to_be32(SHA1_H4), 0, 0, 0, 140 + }; 141 + 142 + static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { 143 + cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), 144 + cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), 145 + cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), 146 + cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), 147 + }; 148 + 149 + static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = { 150 + cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), 151 + cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), 152 + cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), 153 + cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), 134 154 }; 135 155 136 156 /* The CCP cannot perform zero-length sha operations so the caller ··· 1433 1411 if (ret) 1434 1412 return ret; 1435 1413 1436 - ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); 1414 + if (sha->first) { 1415 + const __be32 *init; 1416 + 1417 + switch (sha->type) { 1418 + case CCP_SHA_TYPE_1: 1419 + init = ccp_sha1_init; 1420 + break; 1421 + case CCP_SHA_TYPE_224: 1422 + init = ccp_sha224_init; 1423 + break; 1424 + case CCP_SHA_TYPE_256: 1425 + init = ccp_sha256_init; 1426 + break; 1427 + default: 1428 + ret = -EINVAL; 1429 + goto e_ctx; 1430 + } 1431 + memcpy(ctx.address, init, CCP_SHA_CTXSIZE); 1432 + } else 1433 + ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); 1434 + 1437 1435 ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx, 1438 1436 CCP_PASSTHRU_BYTESWAP_256BIT); 1439 1437 if (ret) { ··· 1492 1450 } 1493 1451 1494 1452 ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len); 1453 + 1454 + if (sha->final && sha->opad) { 1455 + /* HMAC operation, recursively perform final SHA */ 1456 + struct ccp_cmd hmac_cmd; 1457 + struct scatterlist sg; 1458 + u64 block_size, digest_size; 1459 + u8 *hmac_buf; 1460 + 1461 + switch (sha->type) { 1462 + case CCP_SHA_TYPE_1: 1463 + block_size = SHA1_BLOCK_SIZE; 1464 + digest_size = SHA1_DIGEST_SIZE; 1465 + break; 1466 + case CCP_SHA_TYPE_224: 1467 + block_size = SHA224_BLOCK_SIZE; 1468 + digest_size = SHA224_DIGEST_SIZE; 1469 + break; 1470 + case CCP_SHA_TYPE_256: 1471 + block_size = SHA256_BLOCK_SIZE; 1472 + digest_size = SHA256_DIGEST_SIZE; 1473 + break; 1474 + default: 1475 + ret = -EINVAL; 1476 + goto e_data; 1477 + } 1478 + 1479 + if (sha->opad_len != block_size) { 1480 + ret = -EINVAL; 1481 + goto e_data; 1482 + } 1483 + 1484 + hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); 1485 + if (!hmac_buf) { 1486 + ret = -ENOMEM; 1487 + goto e_data; 1488 + } 1489 + sg_init_one(&sg, hmac_buf, block_size + digest_size); 1490 + 1491 + scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); 1492 + memcpy(hmac_buf + block_size, ctx.address, digest_size); 1493 + 1494 + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); 1495 + hmac_cmd.engine = CCP_ENGINE_SHA; 1496 + hmac_cmd.u.sha.type = sha->type; 1497 + hmac_cmd.u.sha.ctx = sha->ctx; 1498 + hmac_cmd.u.sha.ctx_len = sha->ctx_len; 1499 + hmac_cmd.u.sha.src = &sg; 1500 + hmac_cmd.u.sha.src_len = block_size + digest_size; 1501 + hmac_cmd.u.sha.opad = NULL; 1502 + hmac_cmd.u.sha.opad_len = 0; 1503 + hmac_cmd.u.sha.first = 1; 1504 + hmac_cmd.u.sha.final = 1; 1505 + hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; 1506 + 1507 + ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); 1508 + if (ret) 1509 + cmd->engine_error = hmac_cmd.engine_error; 1510 + 1511 + kfree(hmac_buf); 1512 + } 1495 1513 1496 1514 e_data: 1497 1515 ccp_free_data(&src, cmd_q);
+7
include/linux/ccp.h
··· 232 232 * @ctx_len: length in bytes of hash value 233 233 * @src: data to be used for this operation 234 234 * @src_len: length in bytes of data used for this operation 235 + * @opad: data to be used for final HMAC operation 236 + * @opad_len: length in bytes of data used for final HMAC operation 237 + * @first: indicates first SHA operation 235 238 * @final: indicates final SHA operation 236 239 * @msg_bits: total length of the message in bits used in final SHA operation 237 240 * ··· 254 251 struct scatterlist *src; 255 252 u64 src_len; /* In bytes */ 256 253 254 + struct scatterlist *opad; 255 + u32 opad_len; /* In bytes */ 256 + 257 + u32 first; /* Indicates first sha cmd */ 257 258 u32 final; /* Indicates final sha cmd */ 258 259 u64 msg_bits; /* Message length in bits required for 259 260 * final sha cmd */