crypto: qat - add AES-XTS support for QAT GEN4 devices

Add handling of AES-XTS specific to QAT GEN4 devices.

Co-developed-by: Tomaszx Kowalik <tomaszx.kowalik@intel.com>
Signed-off-by: Tomaszx Kowalik <tomaszx.kowalik@intel.com>
Signed-off-by: Marco Chiappero <marco.chiappero@intel.com>
Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by Marco Chiappero and committed by Herbert Xu 5106dfea 67916c95

+89 -7
+89 -7
drivers/crypto/qat/qat_common/qat_algs.c
··· 33 33 ICP_QAT_HW_CIPHER_KEY_CONVERT, \ 34 34 ICP_QAT_HW_CIPHER_DECRYPT) 35 35 36 + #define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \ 37 + ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \ 38 + ICP_QAT_HW_CIPHER_NO_CONVERT, \ 39 + ICP_QAT_HW_CIPHER_DECRYPT) 40 + 36 41 #define HW_CAP_AES_V2(accel_dev) \ 37 42 (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \ 38 43 ICP_ACCEL_CAPABILITIES_AES_V2) ··· 100 95 struct icp_qat_fw_la_bulk_req dec_fw_req; 101 96 struct qat_crypto_instance *inst; 102 97 struct crypto_skcipher *ftfm; 98 + struct crypto_cipher *tweak; 103 99 bool fallback; 104 100 int mode; 105 101 }; ··· 434 428 cd_pars->u.s.content_desc_params_sz = 435 429 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3; 436 430 437 - if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) { 431 + if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) { 432 + ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags, 433 + ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE); 434 + 435 + /* Store both XTS keys in CD, only the first key is sent 436 + * to the HW, the second key is used for tweak calculation 437 + */ 438 + memcpy(cd->ucs_aes.key, key, keylen); 439 + keylen = keylen / 2; 440 + } else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) { 438 441 ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags, 439 442 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE); 440 443 keylen = round_up(keylen, 16); ··· 473 458 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode); 474 459 } 475 460 461 + static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen, 462 + u8 *key_reverse) 463 + { 464 + struct crypto_aes_ctx aes_expanded; 465 + int nrounds; 466 + u8 *key; 467 + 468 + aes_expandkey(&aes_expanded, key_forward, keylen); 469 + if (keylen == AES_KEYSIZE_128) { 470 + nrounds = 10; 471 + key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds); 472 + memcpy(key_reverse, key, AES_BLOCK_SIZE); 473 + } else { 474 + /* AES_KEYSIZE_256 */ 475 + nrounds = 14; 476 + key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds); 477 + memcpy(key_reverse, key, AES_BLOCK_SIZE); 478 + memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE, 479 + AES_BLOCK_SIZE); 480 + } 481 + } 482 + 476 483 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx, 477 484 int alg, const u8 *key, 478 485 unsigned int keylen, int mode) ··· 502 465 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; 503 466 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req; 504 467 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars; 468 + bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev); 505 469 506 470 qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen); 507 471 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; 508 472 509 - if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) 473 + if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) { 474 + /* Key reversing not supported, set no convert */ 475 + dec_cd->aes.cipher_config.val = 476 + QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode); 477 + 478 + /* In-place key reversal */ 479 + qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2, 480 + dec_cd->ucs_aes.key); 481 + } else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) { 510 482 dec_cd->aes.cipher_config.val = 511 483 QAT_AES_HW_CONFIG_DEC(alg, mode); 512 - else 484 + } else { 513 485 dec_cd->aes.cipher_config.val = 514 486 QAT_AES_HW_CONFIG_ENC(alg, mode); 487 + } 515 488 } 516 489 517 490 static int qat_alg_validate_key(int key_len, int *alg, int mode) ··· 1128 1081 1129 1082 ctx->fallback = false; 1130 1083 1131 - return qat_alg_skcipher_setkey(tfm, key, keylen, 1132 - ICP_QAT_HW_CIPHER_XTS_MODE); 1084 + ret = qat_alg_skcipher_setkey(tfm, key, keylen, 1085 + ICP_QAT_HW_CIPHER_XTS_MODE); 1086 + if (ret) 1087 + return ret; 1088 + 1089 + if (HW_CAP_AES_V2(ctx->inst->accel_dev)) 1090 + ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2), 1091 + keylen / 2); 1092 + 1093 + return ret; 1094 + } 1095 + 1096 + static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req) 1097 + { 1098 + struct icp_qat_fw_la_cipher_req_params *cipher_param; 1099 + struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx; 1100 + bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev); 1101 + u8 *iv = qat_req->skcipher_req->iv; 1102 + 1103 + cipher_param = (void *)&qat_req->req.serv_specif_rqpars; 1104 + 1105 + if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE) 1106 + crypto_cipher_encrypt_one(ctx->tweak, 1107 + (u8 *)cipher_param->u.cipher_IV_array, 1108 + iv); 1109 + else 1110 + memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE); 1133 1111 } 1134 1112 1135 1113 static int qat_alg_skcipher_encrypt(struct skcipher_request *req) ··· 1186 1114 cipher_param = (void *)&qat_req->req.serv_specif_rqpars; 1187 1115 cipher_param->cipher_length = req->cryptlen; 1188 1116 cipher_param->cipher_offset = 0; 1189 - memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE); 1117 + 1118 + qat_alg_set_req_iv(qat_req); 1190 1119 1191 1120 do { 1192 1121 ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); ··· 1255 1182 cipher_param = (void *)&qat_req->req.serv_specif_rqpars; 1256 1183 cipher_param->cipher_length = req->cryptlen; 1257 1184 cipher_param->cipher_offset = 0; 1258 - memcpy(cipher_param->u.cipher_IV_array, req->iv, AES_BLOCK_SIZE); 1259 1185 1186 + qat_alg_set_req_iv(qat_req); 1260 1187 qat_alg_update_iv(qat_req); 1261 1188 1262 1189 do { ··· 1366 1293 if (IS_ERR(ctx->ftfm)) 1367 1294 return PTR_ERR(ctx->ftfm); 1368 1295 1296 + ctx->tweak = crypto_alloc_cipher("aes", 0, 0); 1297 + if (IS_ERR(ctx->tweak)) { 1298 + crypto_free_skcipher(ctx->ftfm); 1299 + return PTR_ERR(ctx->tweak); 1300 + } 1301 + 1369 1302 reqsize = max(sizeof(struct qat_crypto_request), 1370 1303 sizeof(struct skcipher_request) + 1371 1304 crypto_skcipher_reqsize(ctx->ftfm)); ··· 1413 1334 1414 1335 if (ctx->ftfm) 1415 1336 crypto_free_skcipher(ctx->ftfm); 1337 + 1338 + if (ctx->tweak) 1339 + crypto_free_cipher(ctx->tweak); 1416 1340 1417 1341 qat_alg_skcipher_exit_tfm(tfm); 1418 1342 }