Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccree - fix backlog notifications

We were doing backlog notification callbacks via a cipher/hash/aead
request structure cast to the base structure, which may or may not
work based on how the structure is laid in memory and is not safe.

Fix it by delegating the backlog notification to the appropriate
internal callbacks which are type aware.

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Cc: stable@vger.kernel.org # v4.19+
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Gilad Ben-Yossef and committed by
Herbert Xu
a108f931 dcf6285d

+39 -14
+4
drivers/crypto/ccree/cc_aead.c
··· 220 220 struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req); 221 221 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 222 222 223 + /* BACKLOG notification */ 224 + if (err == -EINPROGRESS) 225 + goto done; 226 + 223 227 cc_unmap_aead_request(dev, areq); 224 228 225 229 /* Restore ordinary iv pointer */
+7 -3
drivers/crypto/ccree/cc_cipher.c
··· 818 818 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req); 819 819 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); 820 820 821 - cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); 822 - memcpy(req->iv, req_ctx->iv, ivsize); 823 - kzfree(req_ctx->iv); 821 + if (err != -EINPROGRESS) { 822 + /* Not a BACKLOG notification */ 823 + cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); 824 + memcpy(req->iv, req_ctx->iv, ivsize); 825 + kzfree(req_ctx->iv); 826 + } 827 + 824 828 skcipher_request_complete(req, err); 825 829 } 826 830
+20 -8
drivers/crypto/ccree/cc_hash.c
··· 280 280 281 281 dev_dbg(dev, "req=%pK\n", req); 282 282 283 - cc_unmap_hash_request(dev, state, req->src, false); 284 - cc_unmap_req(dev, state, ctx); 283 + if (err != -EINPROGRESS) { 284 + /* Not a BACKLOG notification */ 285 + cc_unmap_hash_request(dev, state, req->src, false); 286 + cc_unmap_req(dev, state, ctx); 287 + } 288 + 285 289 req->base.complete(&req->base, err); 286 290 } 287 291 ··· 299 295 300 296 dev_dbg(dev, "req=%pK\n", req); 301 297 302 - cc_unmap_hash_request(dev, state, req->src, false); 303 - cc_unmap_result(dev, state, digestsize, req->result); 304 - cc_unmap_req(dev, state, ctx); 298 + if (err != -EINPROGRESS) { 299 + /* Not a BACKLOG notification */ 300 + cc_unmap_hash_request(dev, state, req->src, false); 301 + cc_unmap_result(dev, state, digestsize, req->result); 302 + cc_unmap_req(dev, state, ctx); 303 + } 304 + 305 305 req->base.complete(&req->base, err); 306 306 } 307 307 ··· 319 311 320 312 dev_dbg(dev, "req=%pK\n", req); 321 313 322 - cc_unmap_hash_request(dev, state, req->src, false); 323 - cc_unmap_result(dev, state, digestsize, req->result); 324 - cc_unmap_req(dev, state, ctx); 314 + if (err != -EINPROGRESS) { 315 + /* Not a BACKLOG notification */ 316 + cc_unmap_hash_request(dev, state, req->src, false); 317 + cc_unmap_result(dev, state, digestsize, req->result); 318 + cc_unmap_req(dev, state, ctx); 319 + } 320 + 325 321 req->base.complete(&req->base, err); 326 322 } 327 323
+8 -3
drivers/crypto/ccree/cc_request_mgr.c
··· 364 364 struct cc_bl_item *bli) 365 365 { 366 366 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 367 + struct device *dev = drvdata_to_dev(drvdata); 367 368 368 369 spin_lock_bh(&mgr->bl_lock); 369 370 list_add_tail(&bli->list, &mgr->backlog); 370 371 ++mgr->bl_len; 372 + dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len); 371 373 spin_unlock_bh(&mgr->bl_lock); 372 374 tasklet_schedule(&mgr->comptask); 373 375 } ··· 379 377 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 380 378 struct cc_bl_item *bli; 381 379 struct cc_crypto_req *creq; 382 - struct crypto_async_request *req; 380 + void *req; 383 381 bool ivgen; 384 382 unsigned int total_len; 385 383 struct device *dev = drvdata_to_dev(drvdata); ··· 389 387 390 388 while (mgr->bl_len) { 391 389 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list); 390 + dev_dbg(dev, "---bl len: %d\n", mgr->bl_len); 391 + 392 392 spin_unlock(&mgr->bl_lock); 393 393 394 + 394 395 creq = &bli->creq; 395 - req = (struct crypto_async_request *)creq->user_arg; 396 + req = creq->user_arg; 396 397 397 398 /* 398 399 * Notify the request we're moving out of the backlog 399 400 * but only if we haven't done so already. 400 401 */ 401 402 if (!bli->notif) { 402 - req->complete(req, -EINPROGRESS); 403 + creq->user_cb(dev, req, -EINPROGRESS); 403 404 bli->notif = true; 404 405 } 405 406