Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: caam - add Derived Key Protocol (DKP) support

Offload split key generation in CAAM engine, using DKP.
DKP is supported starting with Era 6.

Note that the way assoclen is transmitted from the job descriptor
to the shared descriptor changes - DPOVRD register is used instead
of MATH3 (where available), since DKP protocol thrashes the MATH
registers.

The replacement of MDHA split key generation with DKP has the side
effect of the crypto engine writing the authentication key, and thus
the DMA mapping direction for the buffer holding the key has to change
from DMA_TO_DEVICE to DMA_BIDIRECTIONAL.
There are two cases:
-key is inlined in descriptor - descriptor buffer mapping changes
-key is referenced - key buffer mapping changes

Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Horia Geantă and committed by
Herbert Xu
7e0880b9 9fe712df

+384 -171
+79 -33
drivers/crypto/caam/caamalg.c
··· 108 108 dma_addr_t sh_desc_dec_dma; 109 109 dma_addr_t sh_desc_givenc_dma; 110 110 dma_addr_t key_dma; 111 + enum dma_data_direction dir; 111 112 struct device *jrdev; 112 113 struct alginfo adata; 113 114 struct alginfo cdata; ··· 119 118 { 120 119 struct caam_ctx *ctx = crypto_aead_ctx(aead); 121 120 struct device *jrdev = ctx->jrdev; 121 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 122 122 u32 *desc; 123 123 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - 124 124 ctx->adata.keylen_pad; ··· 138 136 139 137 /* aead_encrypt shared descriptor */ 140 138 desc = ctx->sh_desc_enc; 141 - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize); 139 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize, 140 + ctrlpriv->era); 142 141 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 143 - desc_bytes(desc), DMA_TO_DEVICE); 142 + desc_bytes(desc), ctx->dir); 144 143 145 144 /* 146 145 * Job Descriptor and Shared Descriptors ··· 157 154 158 155 /* aead_decrypt shared descriptor */ 159 156 desc = ctx->sh_desc_dec; 160 - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize); 157 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize, 158 + ctrlpriv->era); 161 159 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 162 - desc_bytes(desc), DMA_TO_DEVICE); 160 + desc_bytes(desc), ctx->dir); 163 161 164 162 return 0; 165 163 } ··· 172 168 unsigned int ivsize = crypto_aead_ivsize(aead); 173 169 struct caam_ctx *ctx = crypto_aead_ctx(aead); 174 170 struct device *jrdev = ctx->jrdev; 171 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 175 172 u32 ctx1_iv_off = 0; 176 173 u32 *desc, *nonce = NULL; 177 174 u32 inl_mask; ··· 239 234 desc = ctx->sh_desc_enc; 240 235 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, 241 236 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, 242 - false); 237 + false, ctrlpriv->era); 243 238 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 244 - desc_bytes(desc), DMA_TO_DEVICE); 239 + desc_bytes(desc), ctx->dir); 245 240 246 241 skip_enc: 247 242 /* ··· 271 266 desc = ctx->sh_desc_dec; 272 267 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, 273 268 ctx->authsize, alg->caam.geniv, is_rfc3686, 274 - nonce, ctx1_iv_off, false); 269 + nonce, ctx1_iv_off, false, ctrlpriv->era); 275 270 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 276 - desc_bytes(desc), DMA_TO_DEVICE); 271 + desc_bytes(desc), ctx->dir); 277 272 278 273 if (!alg->caam.geniv) 279 274 goto skip_givenc; ··· 305 300 desc = ctx->sh_desc_enc; 306 301 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, 307 302 ctx->authsize, is_rfc3686, nonce, 308 - ctx1_iv_off, false); 303 + ctx1_iv_off, false, ctrlpriv->era); 309 304 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 310 - desc_bytes(desc), DMA_TO_DEVICE); 305 + desc_bytes(desc), ctx->dir); 311 306 312 307 skip_givenc: 313 308 return 0; ··· 351 346 desc = ctx->sh_desc_enc; 352 347 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize); 353 348 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 354 - desc_bytes(desc), DMA_TO_DEVICE); 349 + desc_bytes(desc), ctx->dir); 355 350 356 351 /* 357 352 * Job Descriptor and Shared Descriptors ··· 368 363 desc = ctx->sh_desc_dec; 369 364 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize); 370 365 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 371 - desc_bytes(desc), DMA_TO_DEVICE); 366 + desc_bytes(desc), ctx->dir); 372 367 373 368 return 0; 374 369 } ··· 410 405 desc = ctx->sh_desc_enc; 411 406 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize); 412 407 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 413 - desc_bytes(desc), DMA_TO_DEVICE); 408 + desc_bytes(desc), ctx->dir); 414 409 415 410 /* 416 411 * Job Descriptor and Shared Descriptors ··· 427 422 desc = ctx->sh_desc_dec; 428 423 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize); 429 424 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 430 - desc_bytes(desc), DMA_TO_DEVICE); 425 + desc_bytes(desc), ctx->dir); 431 426 432 427 return 0; 433 428 } ··· 470 465 desc = ctx->sh_desc_enc; 471 466 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize); 472 467 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 473 - desc_bytes(desc), DMA_TO_DEVICE); 468 + desc_bytes(desc), ctx->dir); 474 469 475 470 /* 476 471 * Job Descriptor and Shared Descriptors ··· 487 482 desc = ctx->sh_desc_dec; 488 483 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize); 489 484 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 490 - desc_bytes(desc), DMA_TO_DEVICE); 485 + desc_bytes(desc), ctx->dir); 491 486 492 487 return 0; 493 488 } ··· 508 503 { 509 504 struct caam_ctx *ctx = crypto_aead_ctx(aead); 510 505 struct device *jrdev = ctx->jrdev; 506 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 511 507 struct crypto_authenc_keys keys; 512 508 int ret = 0; 513 509 ··· 523 517 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 524 518 #endif 525 519 520 + /* 521 + * If DKP is supported, use it in the shared descriptor to generate 522 + * the split key. 523 + */ 524 + if (ctrlpriv->era >= 6) { 525 + ctx->adata.keylen = keys.authkeylen; 526 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 527 + OP_ALG_ALGSEL_MASK); 528 + 529 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 530 + goto badkey; 531 + 532 + memcpy(ctx->key, keys.authkey, keys.authkeylen); 533 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 534 + keys.enckeylen); 535 + dma_sync_single_for_device(jrdev, ctx->key_dma, 536 + ctx->adata.keylen_pad + 537 + keys.enckeylen, ctx->dir); 538 + goto skip_split_key; 539 + } 540 + 526 541 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, 527 542 keys.authkeylen, CAAM_MAX_KEY_SIZE - 528 543 keys.enckeylen); ··· 554 527 /* postpend encryption key to auth split key */ 555 528 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 556 529 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 557 - keys.enckeylen, DMA_TO_DEVICE); 530 + keys.enckeylen, ctx->dir); 558 531 #ifdef DEBUG 559 532 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 560 533 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 561 534 ctx->adata.keylen_pad + keys.enckeylen, 1); 562 535 #endif 536 + 537 + skip_split_key: 563 538 ctx->cdata.keylen = keys.enckeylen; 564 539 return aead_set_sh_desc(aead); 565 540 badkey: ··· 581 552 #endif 582 553 583 554 memcpy(ctx->key, key, keylen); 584 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 555 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); 585 556 ctx->cdata.keylen = keylen; 586 557 587 558 return gcm_set_sh_desc(aead); ··· 609 580 */ 610 581 ctx->cdata.keylen = keylen - 4; 611 582 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 612 - DMA_TO_DEVICE); 583 + ctx->dir); 613 584 return rfc4106_set_sh_desc(aead); 614 585 } 615 586 ··· 635 606 */ 636 607 ctx->cdata.keylen = keylen - 4; 637 608 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 638 - DMA_TO_DEVICE); 609 + ctx->dir); 639 610 return rfc4543_set_sh_desc(aead); 640 611 } 641 612 ··· 685 656 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, 686 657 ctx1_iv_off); 687 658 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 688 - desc_bytes(desc), DMA_TO_DEVICE); 659 + desc_bytes(desc), ctx->dir); 689 660 690 661 /* ablkcipher_decrypt shared descriptor */ 691 662 desc = ctx->sh_desc_dec; 692 663 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, 693 664 ctx1_iv_off); 694 665 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 695 - desc_bytes(desc), DMA_TO_DEVICE); 666 + desc_bytes(desc), ctx->dir); 696 667 697 668 /* ablkcipher_givencrypt shared descriptor */ 698 669 desc = ctx->sh_desc_givenc; 699 670 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, 700 671 ctx1_iv_off); 701 672 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma, 702 - desc_bytes(desc), DMA_TO_DEVICE); 673 + desc_bytes(desc), ctx->dir); 703 674 704 675 return 0; 705 676 } ··· 726 697 desc = ctx->sh_desc_enc; 727 698 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); 728 699 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 729 - desc_bytes(desc), DMA_TO_DEVICE); 700 + desc_bytes(desc), ctx->dir); 730 701 731 702 /* xts_ablkcipher_decrypt shared descriptor */ 732 703 desc = ctx->sh_desc_dec; 733 704 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); 734 705 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 735 - desc_bytes(desc), DMA_TO_DEVICE); 706 + desc_bytes(desc), ctx->dir); 736 707 737 708 return 0; 738 709 } ··· 1004 975 append_seq_out_ptr(desc, dst_dma, 1005 976 req->assoclen + req->cryptlen - authsize, 1006 977 out_options); 1007 - 1008 - /* REG3 = assoclen */ 1009 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); 1010 978 } 1011 979 1012 980 static void init_gcm_job(struct aead_request *req, ··· 1018 992 unsigned int last; 1019 993 1020 994 init_aead_job(req, edesc, all_contig, encrypt); 995 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); 1021 996 1022 997 /* BUG This should not be specific to generic GCM. */ 1023 998 last = 0; ··· 1045 1018 struct caam_aead_alg, aead); 1046 1019 unsigned int ivsize = crypto_aead_ivsize(aead); 1047 1020 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1021 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 1048 1022 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 1049 1023 OP_ALG_AAI_CTR_MOD128); 1050 1024 const bool is_rfc3686 = alg->caam.rfc3686; ··· 1068 1040 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE; 1069 1041 1070 1042 init_aead_job(req, edesc, all_contig, encrypt); 1043 + 1044 + /* 1045 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports 1046 + * having DPOVRD as destination. 1047 + */ 1048 + if (ctrlpriv->era < 3) 1049 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); 1050 + else 1051 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen); 1071 1052 1072 1053 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) 1073 1054 append_load_as_imm(desc, req->iv, ivsize, ··· 3261 3224 struct caam_alg_entry caam; 3262 3225 }; 3263 3226 3264 - static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) 3227 + static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 3228 + bool uses_dkp) 3265 3229 { 3266 3230 dma_addr_t dma_addr; 3231 + struct caam_drv_private *priv; 3267 3232 3268 3233 ctx->jrdev = caam_jr_alloc(); 3269 3234 if (IS_ERR(ctx->jrdev)) { ··· 3273 3234 return PTR_ERR(ctx->jrdev); 3274 3235 } 3275 3236 3237 + priv = dev_get_drvdata(ctx->jrdev->parent); 3238 + if (priv->era >= 6 && uses_dkp) 3239 + ctx->dir = DMA_BIDIRECTIONAL; 3240 + else 3241 + ctx->dir = DMA_TO_DEVICE; 3242 + 3276 3243 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, 3277 3244 offsetof(struct caam_ctx, 3278 3245 sh_desc_enc_dma), 3279 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 3246 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 3280 3247 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 3281 3248 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); 3282 3249 caam_jr_free(ctx->jrdev); ··· 3310 3265 container_of(alg, struct caam_crypto_alg, crypto_alg); 3311 3266 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 3312 3267 3313 - return caam_init_common(ctx, &caam_alg->caam); 3268 + return caam_init_common(ctx, &caam_alg->caam, false); 3314 3269 } 3315 3270 3316 3271 static int caam_aead_init(struct crypto_aead *tfm) ··· 3320 3275 container_of(alg, struct caam_aead_alg, aead); 3321 3276 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 3322 3277 3323 - return caam_init_common(ctx, &caam_alg->caam); 3278 + return caam_init_common(ctx, &caam_alg->caam, 3279 + alg->setkey == aead_setkey); 3324 3280 } 3325 3281 3326 3282 static void caam_exit_common(struct caam_ctx *ctx) 3327 3283 { 3328 3284 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, 3329 3285 offsetof(struct caam_ctx, sh_desc_enc_dma), 3330 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 3286 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 3331 3287 caam_jr_free(ctx->jrdev); 3332 3288 } 3333 3289
+107 -69
drivers/crypto/caam/caamalg_desc.c
··· 45 45 * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor 46 46 * (non-protocol) with no (null) encryption. 47 47 * @desc: pointer to buffer used for descriptor construction 48 - * @adata: pointer to authentication transform definitions. Note that since a 49 - * split key is to be used, the size of the split key itself is 50 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 51 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 48 + * @adata: pointer to authentication transform definitions. 49 + * A split key is required for SEC Era < 6; the size of the split key 50 + * is specified in this case. Valid algorithm values - one of 51 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed 52 + * with OP_ALG_AAI_HMAC_PRECOMP. 52 53 * @icvsize: integrity check value (ICV) size (truncated or full) 53 - * 54 - * Note: Requires an MDHA split key. 54 + * @era: SEC Era 55 55 */ 56 56 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, 57 - unsigned int icvsize) 57 + unsigned int icvsize, int era) 58 58 { 59 59 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; 60 60 ··· 63 63 /* Skip if already shared */ 64 64 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 65 65 JUMP_COND_SHRD); 66 - if (adata->key_inline) 67 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, 68 - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | 69 - KEY_ENC); 70 - else 71 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | 72 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 66 + if (era < 6) { 67 + if (adata->key_inline) 68 + append_key_as_imm(desc, adata->key_virt, 69 + adata->keylen_pad, adata->keylen, 70 + CLASS_2 | KEY_DEST_MDHA_SPLIT | 71 + KEY_ENC); 72 + else 73 + append_key(desc, adata->key_dma, adata->keylen, 74 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); 75 + } else { 76 + append_proto_dkp(desc, adata); 77 + } 73 78 set_jump_tgt_here(desc, key_jump_cmd); 74 79 75 80 /* assoclen + cryptlen = seqinlen */ ··· 126 121 * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor 127 122 * (non-protocol) with no (null) decryption. 128 123 * @desc: pointer to buffer used for descriptor construction 129 - * @adata: pointer to authentication transform definitions. Note that since a 130 - * split key is to be used, the size of the split key itself is 131 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 132 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 124 + * @adata: pointer to authentication transform definitions. 125 + * A split key is required for SEC Era < 6; the size of the split key 126 + * is specified in this case. Valid algorithm values - one of 127 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed 128 + * with OP_ALG_AAI_HMAC_PRECOMP. 133 129 * @icvsize: integrity check value (ICV) size (truncated or full) 134 - * 135 - * Note: Requires an MDHA split key. 130 + * @era: SEC Era 136 131 */ 137 132 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, 138 - unsigned int icvsize) 133 + unsigned int icvsize, int era) 139 134 { 140 135 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; 141 136 ··· 144 139 /* Skip if already shared */ 145 140 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 146 141 JUMP_COND_SHRD); 147 - if (adata->key_inline) 148 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, 149 - adata->keylen, CLASS_2 | 150 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 151 - else 152 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | 153 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 142 + if (era < 6) { 143 + if (adata->key_inline) 144 + append_key_as_imm(desc, adata->key_virt, 145 + adata->keylen_pad, adata->keylen, 146 + CLASS_2 | KEY_DEST_MDHA_SPLIT | 147 + KEY_ENC); 148 + else 149 + append_key(desc, adata->key_dma, adata->keylen, 150 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); 151 + } else { 152 + append_proto_dkp(desc, adata); 153 + } 154 154 set_jump_tgt_here(desc, key_jump_cmd); 155 155 156 156 /* Class 2 operation */ ··· 214 204 static void init_sh_desc_key_aead(u32 * const desc, 215 205 struct alginfo * const cdata, 216 206 struct alginfo * const adata, 217 - const bool is_rfc3686, u32 *nonce) 207 + const bool is_rfc3686, u32 *nonce, int era) 218 208 { 219 209 u32 *key_jump_cmd; 220 210 unsigned int enckeylen = cdata->keylen; ··· 234 224 if (is_rfc3686) 235 225 enckeylen -= CTR_RFC3686_NONCE_SIZE; 236 226 237 - if (adata->key_inline) 238 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, 239 - adata->keylen, CLASS_2 | 240 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 241 - else 242 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | 243 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 227 + if (era < 6) { 228 + if (adata->key_inline) 229 + append_key_as_imm(desc, adata->key_virt, 230 + adata->keylen_pad, adata->keylen, 231 + CLASS_2 | KEY_DEST_MDHA_SPLIT | 232 + KEY_ENC); 233 + else 234 + append_key(desc, adata->key_dma, adata->keylen, 235 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); 236 + } else { 237 + append_proto_dkp(desc, adata); 238 + } 244 239 245 240 if (cdata->key_inline) 246 241 append_key_as_imm(desc, cdata->key_virt, enckeylen, ··· 276 261 * @cdata: pointer to block cipher transform definitions 277 262 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed 278 263 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. 279 - * @adata: pointer to authentication transform definitions. Note that since a 280 - * split key is to be used, the size of the split key itself is 281 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 282 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 264 + * @adata: pointer to authentication transform definitions. 265 + * A split key is required for SEC Era < 6; the size of the split key 266 + * is specified in this case. Valid algorithm values - one of 267 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed 268 + * with OP_ALG_AAI_HMAC_PRECOMP. 283 269 * @ivsize: initialization vector size 284 270 * @icvsize: integrity check value (ICV) size (truncated or full) 285 271 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 286 272 * @nonce: pointer to rfc3686 nonce 287 273 * @ctx1_iv_off: IV offset in CONTEXT1 register 288 274 * @is_qi: true when called from caam/qi 289 - * 290 - * Note: Requires an MDHA split key. 275 + * @era: SEC Era 291 276 */ 292 277 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, 293 278 struct alginfo *adata, unsigned int ivsize, 294 279 unsigned int icvsize, const bool is_rfc3686, 295 - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi) 280 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi, 281 + int era) 296 282 { 297 283 /* Note: Context registers are saved. */ 298 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); 284 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); 299 285 300 286 /* Class 2 operation */ 301 287 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ··· 322 306 } 323 307 324 308 /* Read and write assoclen bytes */ 325 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 326 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 309 + if (is_qi || era < 3) { 310 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 311 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 312 + } else { 313 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); 314 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); 315 + } 327 316 328 317 /* Skip assoc data */ 329 318 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ··· 371 350 * @cdata: pointer to block cipher transform definitions 372 351 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed 373 352 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. 374 - * @adata: pointer to authentication transform definitions. Note that since a 375 - * split key is to be used, the size of the split key itself is 376 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 377 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 353 + * @adata: pointer to authentication transform definitions. 354 + * A split key is required for SEC Era < 6; the size of the split key 355 + * is specified in this case. Valid algorithm values - one of 356 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed 357 + * with OP_ALG_AAI_HMAC_PRECOMP. 378 358 * @ivsize: initialization vector size 379 359 * @icvsize: integrity check value (ICV) size (truncated or full) 380 360 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 381 361 * @nonce: pointer to rfc3686 nonce 382 362 * @ctx1_iv_off: IV offset in CONTEXT1 register 383 363 * @is_qi: true when called from caam/qi 384 - * 385 - * Note: Requires an MDHA split key. 364 + * @era: SEC Era 386 365 */ 387 366 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, 388 367 struct alginfo *adata, unsigned int ivsize, 389 368 unsigned int icvsize, const bool geniv, 390 369 const bool is_rfc3686, u32 *nonce, 391 - const u32 ctx1_iv_off, const bool is_qi) 370 + const u32 ctx1_iv_off, const bool is_qi, int era) 392 371 { 393 372 /* Note: Context registers are saved. */ 394 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); 373 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); 395 374 396 375 /* Class 2 operation */ 397 376 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ··· 418 397 } 419 398 420 399 /* Read and write assoclen bytes */ 421 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 422 - if (geniv) 423 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); 424 - else 425 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 400 + if (is_qi || era < 3) { 401 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 402 + if (geniv) 403 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, 404 + ivsize); 405 + else 406 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 407 + CAAM_CMD_SZ); 408 + } else { 409 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); 410 + if (geniv) 411 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM, 412 + ivsize); 413 + else 414 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, 415 + CAAM_CMD_SZ); 416 + } 426 417 427 418 /* Skip assoc data */ 428 419 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ··· 489 456 * @cdata: pointer to block cipher transform definitions 490 457 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed 491 458 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. 492 - * @adata: pointer to authentication transform definitions. Note that since a 493 - * split key is to be used, the size of the split key itself is 494 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 495 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 459 + * @adata: pointer to authentication transform definitions. 460 + * A split key is required for SEC Era < 6; the size of the split key 461 + * is specified in this case. Valid algorithm values - one of 462 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed 463 + * with OP_ALG_AAI_HMAC_PRECOMP. 496 464 * @ivsize: initialization vector size 497 465 * @icvsize: integrity check value (ICV) size (truncated or full) 498 466 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 499 467 * @nonce: pointer to rfc3686 nonce 500 468 * @ctx1_iv_off: IV offset in CONTEXT1 register 501 469 * @is_qi: true when called from caam/qi 502 - * 503 - * Note: Requires an MDHA split key. 470 + * @era: SEC Era 504 471 */ 505 472 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, 506 473 struct alginfo *adata, unsigned int ivsize, 507 474 unsigned int icvsize, const bool is_rfc3686, 508 475 u32 *nonce, const u32 ctx1_iv_off, 509 - const bool is_qi) 476 + const bool is_qi, int era) 510 477 { 511 478 u32 geniv, moveiv; 512 479 513 480 /* Note: Context registers are saved. */ 514 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); 481 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); 515 482 516 483 if (is_qi) { 517 484 u32 *wait_load_cmd; ··· 561 528 OP_ALG_ENCRYPT); 562 529 563 530 /* Read and write assoclen bytes */ 564 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 565 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 531 + if (is_qi || era < 3) { 532 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 533 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 534 + } else { 535 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); 536 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); 537 + } 566 538 567 539 /* Skip assoc data */ 568 540 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+5 -5
drivers/crypto/caam/caamalg_desc.h
··· 43 43 15 * CAAM_CMD_SZ) 44 44 45 45 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, 46 - unsigned int icvsize); 46 + unsigned int icvsize, int era); 47 47 48 48 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, 49 - unsigned int icvsize); 49 + unsigned int icvsize, int era); 50 50 51 51 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, 52 52 struct alginfo *adata, unsigned int ivsize, 53 53 unsigned int icvsize, const bool is_rfc3686, 54 54 u32 *nonce, const u32 ctx1_iv_off, 55 - const bool is_qi); 55 + const bool is_qi, int era); 56 56 57 57 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, 58 58 struct alginfo *adata, unsigned int ivsize, 59 59 unsigned int icvsize, const bool geniv, 60 60 const bool is_rfc3686, u32 *nonce, 61 - const u32 ctx1_iv_off, const bool is_qi); 61 + const u32 ctx1_iv_off, const bool is_qi, int era); 62 62 63 63 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, 64 64 struct alginfo *adata, unsigned int ivsize, 65 65 unsigned int icvsize, const bool is_rfc3686, 66 66 u32 *nonce, const u32 ctx1_iv_off, 67 - const bool is_qi); 67 + const bool is_qi, int era); 68 68 69 69 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, 70 70 unsigned int icvsize);
+43 -11
drivers/crypto/caam/caamalg_qi.c
··· 53 53 u32 sh_desc_givenc[DESC_MAX_USED_LEN]; 54 54 u8 key[CAAM_MAX_KEY_SIZE]; 55 55 dma_addr_t key_dma; 56 + enum dma_data_direction dir; 56 57 struct alginfo adata; 57 58 struct alginfo cdata; 58 59 unsigned int authsize; ··· 75 74 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 76 75 OP_ALG_AAI_CTR_MOD128); 77 76 const bool is_rfc3686 = alg->caam.rfc3686; 77 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 78 78 79 79 if (!ctx->cdata.keylen || !ctx->authsize) 80 80 return 0; ··· 126 124 127 125 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 128 126 ivsize, ctx->authsize, is_rfc3686, nonce, 129 - ctx1_iv_off, true); 127 + ctx1_iv_off, true, ctrlpriv->era); 130 128 131 129 skip_enc: 132 130 /* aead_decrypt shared descriptor */ ··· 151 149 152 150 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, 153 151 ivsize, ctx->authsize, alg->caam.geniv, 154 - is_rfc3686, nonce, ctx1_iv_off, true); 152 + is_rfc3686, nonce, ctx1_iv_off, true, 153 + ctrlpriv->era); 155 154 156 155 if (!alg->caam.geniv) 157 156 goto skip_givenc; ··· 179 176 180 177 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 181 178 ivsize, ctx->authsize, is_rfc3686, nonce, 182 - ctx1_iv_off, true); 179 + ctx1_iv_off, true, ctrlpriv->era); 183 180 184 181 skip_givenc: 185 182 return 0; ··· 200 197 { 201 198 struct caam_ctx *ctx = crypto_aead_ctx(aead); 202 199 struct device *jrdev = ctx->jrdev; 200 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 203 201 struct crypto_authenc_keys keys; 204 202 int ret = 0; 205 203 ··· 215 211 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 216 212 #endif 217 213 214 + /* 215 + * If DKP is supported, use it in the shared descriptor to generate 216 + * the split key. 217 + */ 218 + if (ctrlpriv->era >= 6) { 219 + ctx->adata.keylen = keys.authkeylen; 220 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 221 + OP_ALG_ALGSEL_MASK); 222 + 223 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 224 + goto badkey; 225 + 226 + memcpy(ctx->key, keys.authkey, keys.authkeylen); 227 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 228 + keys.enckeylen); 229 + dma_sync_single_for_device(jrdev, ctx->key_dma, 230 + ctx->adata.keylen_pad + 231 + keys.enckeylen, ctx->dir); 232 + goto skip_split_key; 233 + } 234 + 218 235 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, 219 236 keys.authkeylen, CAAM_MAX_KEY_SIZE - 220 237 keys.enckeylen); ··· 245 220 /* postpend encryption key to auth split key */ 246 221 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 247 222 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 248 - keys.enckeylen, DMA_TO_DEVICE); 223 + keys.enckeylen, ctx->dir); 249 224 #ifdef DEBUG 250 225 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 251 226 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 252 227 ctx->adata.keylen_pad + keys.enckeylen, 1); 253 228 #endif 254 229 230 + skip_split_key: 255 231 ctx->cdata.keylen = keys.enckeylen; 256 232 257 233 ret = aead_set_sh_desc(aead); ··· 2145 2119 struct caam_alg_entry caam; 2146 2120 }; 2147 2121 2148 - static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) 2122 + static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 2123 + bool uses_dkp) 2149 2124 { 2150 2125 struct caam_drv_private *priv; 2151 2126 ··· 2160 2133 return PTR_ERR(ctx->jrdev); 2161 2134 } 2162 2135 2136 + priv = dev_get_drvdata(ctx->jrdev->parent); 2137 + if (priv->era >= 6 && uses_dkp) 2138 + ctx->dir = DMA_BIDIRECTIONAL; 2139 + else 2140 + ctx->dir = DMA_TO_DEVICE; 2141 + 2163 2142 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), 2164 - DMA_TO_DEVICE); 2143 + ctx->dir); 2165 2144 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { 2166 2145 dev_err(ctx->jrdev, "unable to map key\n"); 2167 2146 caam_jr_free(ctx->jrdev); ··· 2178 2145 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2179 2146 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2180 2147 2181 - priv = dev_get_drvdata(ctx->jrdev->parent); 2182 2148 ctx->qidev = priv->qidev; 2183 2149 2184 2150 spin_lock_init(&ctx->lock); ··· 2195 2163 crypto_alg); 2196 2164 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2197 2165 2198 - return caam_init_common(ctx, &caam_alg->caam); 2166 + return caam_init_common(ctx, &caam_alg->caam, false); 2199 2167 } 2200 2168 2201 2169 static int caam_aead_init(struct crypto_aead *tfm) ··· 2205 2173 aead); 2206 2174 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 2207 2175 2208 - return caam_init_common(ctx, &caam_alg->caam); 2176 + return caam_init_common(ctx, &caam_alg->caam, 2177 + alg->setkey == aead_setkey); 2209 2178 } 2210 2179 2211 2180 static void caam_exit_common(struct caam_ctx *ctx) ··· 2215 2182 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2216 2183 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]); 2217 2184 2218 - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), 2219 - DMA_TO_DEVICE); 2185 + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); 2220 2186 2221 2187 caam_jr_free(ctx->jrdev); 2222 2188 }
+50 -23
drivers/crypto/caam/caamhash.c
··· 107 107 dma_addr_t sh_desc_update_first_dma; 108 108 dma_addr_t sh_desc_fin_dma; 109 109 dma_addr_t sh_desc_digest_dma; 110 + enum dma_data_direction dir; 110 111 struct device *jrdev; 111 112 u8 key[CAAM_MAX_HASH_KEY_SIZE]; 112 113 int ctx_len; ··· 242 241 * read and write to seqout 243 242 */ 244 243 static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, 245 - struct caam_hash_ctx *ctx, bool import_ctx) 244 + struct caam_hash_ctx *ctx, bool import_ctx, 245 + int era) 246 246 { 247 247 u32 op = ctx->adata.algtype; 248 248 u32 *skip_key_load; ··· 256 254 skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 257 255 JUMP_COND_SHRD); 258 256 259 - append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, 260 - ctx->adata.keylen, CLASS_2 | 261 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 257 + if (era < 6) 258 + append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, 259 + ctx->adata.keylen, CLASS_2 | 260 + KEY_DEST_MDHA_SPLIT | KEY_ENC); 261 + else 262 + append_proto_dkp(desc, &ctx->adata); 262 263 263 264 set_jump_tgt_here(desc, skip_key_load); 264 265 ··· 294 289 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 295 290 int digestsize = crypto_ahash_digestsize(ahash); 296 291 struct device *jrdev = ctx->jrdev; 292 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 297 293 u32 *desc; 294 + 295 + ctx->adata.key_virt = ctx->key; 298 296 299 297 /* ahash_update shared descriptor */ 300 298 desc = ctx->sh_desc_update; 301 - ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); 299 + ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true, 300 + ctrlpriv->era); 302 301 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 303 - desc_bytes(desc), DMA_TO_DEVICE); 302 + desc_bytes(desc), ctx->dir); 304 303 #ifdef DEBUG 305 304 print_hex_dump(KERN_ERR, 306 305 "ahash update shdesc@"__stringify(__LINE__)": ", ··· 313 304 314 305 /* ahash_update_first shared descriptor */ 315 306 desc = ctx->sh_desc_update_first; 316 - ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); 307 + ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false, 308 + ctrlpriv->era); 317 309 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 318 - desc_bytes(desc), DMA_TO_DEVICE); 310 + desc_bytes(desc), ctx->dir); 319 311 #ifdef DEBUG 320 312 print_hex_dump(KERN_ERR, 321 313 "ahash update first shdesc@"__stringify(__LINE__)": ", ··· 325 315 326 316 /* ahash_final shared descriptor */ 327 317 desc = ctx->sh_desc_fin; 328 - ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); 318 + ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true, 319 + ctrlpriv->era); 329 320 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 330 - desc_bytes(desc), DMA_TO_DEVICE); 321 + desc_bytes(desc), ctx->dir); 331 322 #ifdef DEBUG 332 323 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 333 324 DUMP_PREFIX_ADDRESS, 16, 4, desc, ··· 337 326 338 327 /* ahash_digest shared descriptor */ 339 328 desc = ctx->sh_desc_digest; 340 - ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); 329 + ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false, 330 + ctrlpriv->era); 341 331 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 342 - desc_bytes(desc), DMA_TO_DEVICE); 332 + desc_bytes(desc), ctx->dir); 343 333 #ifdef DEBUG 344 334 print_hex_dump(KERN_ERR, 345 335 "ahash digest shdesc@"__stringify(__LINE__)": ", ··· 433 421 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 434 422 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 435 423 int digestsize = crypto_ahash_digestsize(ahash); 424 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 436 425 int ret; 437 426 u8 *hashed_key = NULL; 438 427 ··· 454 441 key = hashed_key; 455 442 } 456 443 457 - ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, 458 - CAAM_MAX_HASH_KEY_SIZE); 459 - if (ret) 460 - goto bad_free_key; 444 + /* 445 + * If DKP is supported, use it in the shared descriptor to generate 446 + * the split key. 447 + */ 448 + if (ctrlpriv->era >= 6) { 449 + ctx->adata.key_inline = true; 450 + ctx->adata.keylen = keylen; 451 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 452 + OP_ALG_ALGSEL_MASK); 461 453 462 - #ifdef DEBUG 463 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 464 - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 465 - ctx->adata.keylen_pad, 1); 466 - #endif 454 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 455 + goto bad_free_key; 456 + 457 + memcpy(ctx->key, key, keylen); 458 + } else { 459 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 460 + keylen, CAAM_MAX_HASH_KEY_SIZE); 461 + if (ret) 462 + goto bad_free_key; 463 + } 467 464 468 465 kfree(hashed_key); 469 466 return ahash_set_sh_desc(ahash); ··· 1738 1715 HASH_MSG_LEN + 64, 1739 1716 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1740 1717 dma_addr_t dma_addr; 1718 + struct caam_drv_private *priv; 1741 1719 1742 1720 /* 1743 1721 * Get a Job ring from Job Ring driver to ensure in-order ··· 1750 1726 return PTR_ERR(ctx->jrdev); 1751 1727 } 1752 1728 1729 + priv = dev_get_drvdata(ctx->jrdev->parent); 1730 + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 1731 + 1753 1732 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1754 1733 offsetof(struct caam_hash_ctx, 1755 1734 sh_desc_update_dma), 1756 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 1735 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1757 1736 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1758 1737 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1759 1738 caam_jr_free(ctx->jrdev); ··· 1791 1764 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1792 1765 offsetof(struct caam_hash_ctx, 1793 1766 sh_desc_update_dma), 1794 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 1767 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1795 1768 caam_jr_free(ctx->jrdev); 1796 1769 } 1797 1770
+29
drivers/crypto/caam/desc.h
··· 444 444 #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) 445 445 #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT) 446 446 #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT) 447 + #define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT) 448 + #define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT) 449 + #define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT) 450 + #define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT) 451 + #define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT) 452 + #define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT) 453 + #define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT) 454 + #define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT) 455 + #define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT) 456 + #define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT) 457 + #define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT) 458 + #define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT) 447 459 448 460 /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ 449 461 #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) ··· 1105 1093 /* MacSec protinfos */ 1106 1094 #define OP_PCL_MACSEC 0x0001 1107 1095 1096 + /* Derived Key Protocol (DKP) Protinfo */ 1097 + #define OP_PCL_DKP_SRC_SHIFT 14 1098 + #define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT) 1099 + #define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT) 1100 + #define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT) 1101 + #define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT) 1102 + #define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT) 1103 + #define OP_PCL_DKP_DST_SHIFT 12 1104 + #define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT) 1105 + #define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT) 1106 + #define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT) 1107 + #define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT) 1108 + #define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT) 1109 + #define OP_PCL_DKP_KEY_SHIFT 0 1110 + #define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT) 1111 + 1108 1112 /* PKI unidirectional protocol protinfo bits */ 1109 1113 #define OP_PCL_PKPROT_TEST 0x0008 1110 1114 #define OP_PCL_PKPROT_DECRYPT 0x0004 ··· 1480 1452 #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) 1481 1453 #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) 1482 1454 #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) 1455 + #define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT) 1483 1456 #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) 1484 1457 #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) 1485 1458 #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+41
drivers/crypto/caam/desc_constr.h
··· 496 496 return (rem_bytes >= 0) ? 0 : -1; 497 497 } 498 498 499 + /** 500 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key 501 + * @desc: pointer to buffer used for descriptor construction 502 + * @adata: pointer to authentication transform definitions. 503 + * keylen should be the length of initial key, while keylen_pad 504 + * the length of the derived (split) key. 505 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, 506 + * SHA256, SHA384, SHA512}. 507 + */ 508 + static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata) 509 + { 510 + u32 protid; 511 + 512 + /* 513 + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*} 514 + * to OP_PCLID_DKP_{MD5, SHA*} 515 + */ 516 + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) | 517 + (0x20 << OP_ALG_ALGSEL_SHIFT); 518 + 519 + if (adata->key_inline) { 520 + int words; 521 + 522 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | 523 + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM | 524 + adata->keylen); 525 + append_data(desc, adata->key_virt, adata->keylen); 526 + 527 + /* Reserve space in descriptor buffer for the derived key */ 528 + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - 529 + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ; 530 + if (words) 531 + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); 532 + } else { 533 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | 534 + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR | 535 + adata->keylen); 536 + append_ptr(desc, adata->key_dma); 537 + } 538 + } 539 + 499 540 #endif /* DESC_CONSTR_H */
-30
drivers/crypto/caam/key_gen.c
··· 11 11 #include "desc_constr.h" 12 12 #include "key_gen.h" 13 13 14 - /** 15 - * split_key_len - Compute MDHA split key length for a given algorithm 16 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, 17 - * SHA224, SHA384, SHA512. 18 - * 19 - * Return: MDHA split key length 20 - */ 21 - static inline u32 split_key_len(u32 hash) 22 - { 23 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 24 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 25 - u32 idx; 26 - 27 - idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; 28 - 29 - return (u32)(mdpadlen[idx] * 2); 30 - } 31 - 32 - /** 33 - * split_key_pad_len - Compute MDHA split key pad length for a given algorithm 34 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, 35 - * SHA224, SHA384, SHA512. 36 - * 37 - * Return: MDHA split key pad length 38 - */ 39 - static inline u32 split_key_pad_len(u32 hash) 40 - { 41 - return ALIGN(split_key_len(hash), 16); 42 - } 43 - 44 14 void split_key_done(struct device *dev, u32 *desc, u32 err, 45 15 void *context) 46 16 {
+30
drivers/crypto/caam/key_gen.h
··· 6 6 * 7 7 */ 8 8 9 + /** 10 + * split_key_len - Compute MDHA split key length for a given algorithm 11 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, 12 + * SHA224, SHA384, SHA512. 13 + * 14 + * Return: MDHA split key length 15 + */ 16 + static inline u32 split_key_len(u32 hash) 17 + { 18 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 19 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 20 + u32 idx; 21 + 22 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; 23 + 24 + return (u32)(mdpadlen[idx] * 2); 25 + } 26 + 27 + /** 28 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm 29 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, 30 + * SHA224, SHA384, SHA512. 31 + * 32 + * Return: MDHA split key pad length 33 + */ 34 + static inline u32 split_key_pad_len(u32 hash) 35 + { 36 + return ALIGN(split_key_len(hash), 16); 37 + } 38 + 9 39 struct split_key_result { 10 40 struct completion completion; 11 41 int err;