Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: nx - don't abuse blkcipher_desc to pass iv around

The NX crypto driver is using 'struct blkcipher_desc' to pass the IV
around, even for AEADs (for which it creates the struct on the stack).
This is not appropriate since this structure is part of the "blkcipher"
API, which is deprecated and will be removed.

Just pass around the IV directly instead.

Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Eric Biggers and committed by
Herbert Xu
7740bd51 713b2e72

+43 -57
+3 -2
drivers/crypto/nx/nx-aes-cbc.c
··· 72 72 do { 73 73 to_process = nbytes - processed; 74 74 75 - rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, 76 - processed, csbcpb->cpb.aes_cbc.iv); 75 + rc = nx_build_sg_lists(nx_ctx, desc->info, dst, src, 76 + &to_process, processed, 77 + csbcpb->cpb.aes_cbc.iv); 77 78 if (rc) 78 79 goto out; 79 80
+14 -26
drivers/crypto/nx/nx-aes-ccm.c
··· 327 327 } 328 328 329 329 static int ccm_nx_decrypt(struct aead_request *req, 330 - struct blkcipher_desc *desc, 330 + u8 *iv, 331 331 unsigned int assoclen) 332 332 { 333 333 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); ··· 348 348 req->src, nbytes + req->assoclen, authsize, 349 349 SCATTERWALK_FROM_SG); 350 350 351 - rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen, 351 + rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen, 352 352 csbcpb->cpb.aes_ccm.in_pat_or_b0); 353 353 if (rc) 354 354 goto out; ··· 367 367 368 368 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; 369 369 370 - rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, 370 + rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src, 371 371 &to_process, processed + req->assoclen, 372 372 csbcpb->cpb.aes_ccm.iv_or_ctr); 373 373 if (rc) ··· 381 381 /* for partial completion, copy following for next 382 382 * entry into loop... 383 383 */ 384 - memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); 384 + memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); 385 385 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, 386 386 csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); 387 387 memcpy(csbcpb->cpb.aes_ccm.in_s0, ··· 405 405 } 406 406 407 407 static int ccm_nx_encrypt(struct aead_request *req, 408 - struct blkcipher_desc *desc, 408 + u8 *iv, 409 409 unsigned int assoclen) 410 410 { 411 411 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); ··· 418 418 419 419 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 420 420 421 - rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen, 421 + rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen, 422 422 csbcpb->cpb.aes_ccm.in_pat_or_b0); 423 423 if (rc) 424 424 goto out; ··· 436 436 437 437 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 438 438 439 - rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, 439 + rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src, 440 440 &to_process, processed + req->assoclen, 441 441 csbcpb->cpb.aes_ccm.iv_or_ctr); 442 442 if (rc) ··· 450 450 /* for partial completion, copy following for next 451 451 * entry into loop... 452 452 */ 453 - memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); 453 + memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); 454 454 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, 455 455 csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); 456 456 memcpy(csbcpb->cpb.aes_ccm.in_s0, ··· 481 481 { 482 482 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 483 483 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 484 - struct blkcipher_desc desc; 485 484 u8 *iv = rctx->iv; 486 485 487 486 iv[0] = 3; 488 487 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); 489 488 memcpy(iv + 4, req->iv, 8); 490 489 491 - desc.info = iv; 492 - 493 - return ccm_nx_encrypt(req, &desc, req->assoclen - 8); 490 + return ccm_nx_encrypt(req, iv, req->assoclen - 8); 494 491 } 495 492 496 493 static int ccm_aes_nx_encrypt(struct aead_request *req) 497 494 { 498 - struct blkcipher_desc desc; 499 495 int rc; 500 496 501 - desc.info = req->iv; 502 - 503 - rc = crypto_ccm_check_iv(desc.info); 497 + rc = crypto_ccm_check_iv(req->iv); 504 498 if (rc) 505 499 return rc; 506 500 507 - return ccm_nx_encrypt(req, &desc, req->assoclen); 501 + return ccm_nx_encrypt(req, req->iv, req->assoclen); 508 502 } 509 503 510 504 static int ccm4309_aes_nx_decrypt(struct aead_request *req) 511 505 { 512 506 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 513 507 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 514 - struct blkcipher_desc desc; 515 508 u8 *iv = rctx->iv; 516 509 517 510 iv[0] = 3; 518 511 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); 519 512 memcpy(iv + 4, req->iv, 8); 520 513 521 - desc.info = iv; 522 - 523 - return ccm_nx_decrypt(req, &desc, req->assoclen - 8); 514 + return ccm_nx_decrypt(req, iv, req->assoclen - 8); 524 515 } 525 516 526 517 static int ccm_aes_nx_decrypt(struct aead_request *req) 527 518 { 528 - struct blkcipher_desc desc; 529 519 int rc; 530 520 531 - desc.info = req->iv; 532 - 533 - rc = crypto_ccm_check_iv(desc.info); 521 + rc = crypto_ccm_check_iv(req->iv); 534 522 if (rc) 535 523 return rc; 536 524 537 - return ccm_nx_decrypt(req, &desc, req->assoclen); 525 + return ccm_nx_decrypt(req, req->iv, req->assoclen); 538 526 } 539 527 540 528 /* tell the block cipher walk routines that this is a stream cipher by
+3 -2
drivers/crypto/nx/nx-aes-ctr.c
··· 85 85 do { 86 86 to_process = nbytes - processed; 87 87 88 - rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, 89 - processed, csbcpb->cpb.aes_ctr.iv); 88 + rc = nx_build_sg_lists(nx_ctx, desc->info, dst, src, 89 + &to_process, processed, 90 + csbcpb->cpb.aes_ctr.iv); 90 91 if (rc) 91 92 goto out; 92 93
+2 -2
drivers/crypto/nx/nx-aes-ecb.c
··· 72 72 do { 73 73 to_process = nbytes - processed; 74 74 75 - rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, 76 - processed, NULL); 75 + rc = nx_build_sg_lists(nx_ctx, NULL, dst, src, &to_process, 76 + processed, NULL); 77 77 if (rc) 78 78 goto out; 79 79
+10 -14
drivers/crypto/nx/nx-aes-gcm.c
··· 166 166 return rc; 167 167 } 168 168 169 - static int gmac(struct aead_request *req, struct blkcipher_desc *desc, 170 - unsigned int assoclen) 169 + static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen) 171 170 { 172 171 int rc; 173 172 struct nx_crypto_ctx *nx_ctx = ··· 189 190 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 190 191 191 192 /* Copy IV */ 192 - memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE); 193 + memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE); 193 194 194 195 do { 195 196 /* ··· 239 240 return rc; 240 241 } 241 242 242 - static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, 243 - int enc) 243 + static int gcm_empty(struct aead_request *req, const u8 *iv, int enc) 244 244 { 245 245 int rc; 246 246 struct nx_crypto_ctx *nx_ctx = ··· 266 268 len = AES_BLOCK_SIZE; 267 269 268 270 /* Encrypt the counter/IV */ 269 - in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info, 271 + in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv, 270 272 &len, nx_ctx->ap->sglen); 271 273 272 274 if (len != AES_BLOCK_SIZE) ··· 283 285 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 284 286 285 287 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 286 - desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 288 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); 287 289 if (rc) 288 290 goto out; 289 291 atomic_inc(&(nx_ctx->stats->aes_ops)); ··· 311 313 crypto_aead_ctx(crypto_aead_reqtfm(req)); 312 314 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 313 315 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 314 - struct blkcipher_desc desc; 315 316 unsigned int nbytes = req->cryptlen; 316 317 unsigned int processed = 0, to_process; 317 318 unsigned long irq_flags; ··· 318 321 319 322 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 320 323 321 - desc.info = rctx->iv; 322 324 /* initialize the counter */ 323 - *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; 325 + *(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1; 324 326 325 327 if (nbytes == 0) { 326 328 if (assoclen == 0) 327 - rc = gcm_empty(req, &desc, enc); 329 + rc = gcm_empty(req, rctx->iv, enc); 328 330 else 329 - rc = gmac(req, &desc, assoclen); 331 + rc = gmac(req, rctx->iv, assoclen); 330 332 if (rc) 331 333 goto out; 332 334 else ··· 354 358 to_process = nbytes - processed; 355 359 356 360 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; 357 - rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, 361 + rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst, 358 362 req->src, &to_process, 359 363 processed + req->assoclen, 360 364 csbcpb->cpb.aes_gcm.iv_or_cnt); ··· 373 377 if (rc) 374 378 goto out; 375 379 376 - memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); 380 + memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); 377 381 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, 378 382 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); 379 383 memcpy(csbcpb->cpb.aes_gcm.in_s0,
+8 -8
drivers/crypto/nx/nx.c
··· 243 243 * scatterlists based on them. 244 244 * 245 245 * @nx_ctx: NX crypto context for the lists we're building 246 - * @desc: the block cipher descriptor for the operation 246 + * @iv: iv data, if the algorithm requires it 247 247 * @dst: destination scatterlist 248 248 * @src: source scatterlist 249 249 * @nbytes: length of data described in the scatterlists 250 250 * @offset: number of bytes to fast-forward past at the beginning of 251 251 * scatterlists. 252 - * @iv: destination for the iv data, if the algorithm requires it 252 + * @oiv: destination for the iv data, if the algorithm requires it 253 253 * 254 - * This is common code shared by all the AES algorithms. It uses the block 255 - * cipher walk routines to traverse input and output scatterlists, building 254 + * This is common code shared by all the AES algorithms. It uses the crypto 255 + * scatterlist walk routines to traverse input and output scatterlists, building 256 256 * corresponding NX scatterlists 257 257 */ 258 258 int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, 259 - struct blkcipher_desc *desc, 259 + const u8 *iv, 260 260 struct scatterlist *dst, 261 261 struct scatterlist *src, 262 262 unsigned int *nbytes, 263 263 unsigned int offset, 264 - u8 *iv) 264 + u8 *oiv) 265 265 { 266 266 unsigned int delta = 0; 267 267 unsigned int total = *nbytes; ··· 274 274 max_sg_len = min_t(u64, max_sg_len, 275 275 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 276 276 277 - if (iv) 278 - memcpy(iv, desc->info, AES_BLOCK_SIZE); 277 + if (oiv) 278 + memcpy(oiv, iv, AES_BLOCK_SIZE); 279 279 280 280 *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); 281 281
+3 -3
drivers/crypto/nx/nx.h
··· 155 155 int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op, 156 156 u32 may_sleep); 157 157 struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32); 158 - int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *, 159 - struct scatterlist *, struct scatterlist *, unsigned int *, 160 - unsigned int, u8 *); 158 + int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, const u8 *iv, 159 + struct scatterlist *dst, struct scatterlist *src, 160 + unsigned int *nbytes, unsigned int offset, u8 *oiv); 161 161 struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int, 162 162 struct scatterlist *, unsigned int, 163 163 unsigned int *);