Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: inside-secure - use one queue per hw ring

Update the inside-secure safexcel driver from using one global queue to
one queue per hw ring. This ease the request management and keep the hw
in sync with what's done in sw.

Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Antoine Ténart and committed by
Herbert Xu
86671abb 97858434

+89 -85
+39 -47
drivers/crypto/inside-secure/safexcel.c
··· 422 422 return 0; 423 423 } 424 424 425 - void safexcel_dequeue(struct safexcel_crypto_priv *priv) 425 + void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) 426 426 { 427 427 struct crypto_async_request *req, *backlog; 428 428 struct safexcel_context *ctx; 429 429 struct safexcel_request *request; 430 - int i, ret, n = 0, nreq[EIP197_MAX_RINGS] = {0}; 431 - int cdesc[EIP197_MAX_RINGS] = {0}, rdesc[EIP197_MAX_RINGS] = {0}; 432 - int commands, results; 430 + int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; 433 431 434 432 do { 435 - spin_lock_bh(&priv->lock); 436 - req = crypto_dequeue_request(&priv->queue); 437 - backlog = crypto_get_backlog(&priv->queue); 438 - spin_unlock_bh(&priv->lock); 433 + spin_lock_bh(&priv->ring[ring].queue_lock); 434 + req = crypto_dequeue_request(&priv->ring[ring].queue); 435 + backlog = crypto_get_backlog(&priv->ring[ring].queue); 436 + spin_unlock_bh(&priv->ring[ring].queue_lock); 439 437 440 438 if (!req) 441 439 goto finalize; ··· 443 445 goto requeue; 444 446 445 447 ctx = crypto_tfm_ctx(req->tfm); 446 - ret = ctx->send(req, ctx->ring, request, &commands, &results); 448 + ret = ctx->send(req, ring, request, &commands, &results); 447 449 if (ret) { 448 450 kfree(request); 449 451 requeue: 450 - spin_lock_bh(&priv->lock); 451 - crypto_enqueue_request(&priv->queue, req); 452 - spin_unlock_bh(&priv->lock); 452 + spin_lock_bh(&priv->ring[ring].queue_lock); 453 + crypto_enqueue_request(&priv->ring[ring].queue, req); 454 + spin_unlock_bh(&priv->ring[ring].queue_lock); 453 455 454 - priv->need_dequeue = true; 456 + priv->ring[ring].need_dequeue = true; 455 457 continue; 456 458 } 457 459 458 460 if (backlog) 459 461 backlog->complete(backlog, -EINPROGRESS); 460 462 461 - spin_lock_bh(&priv->ring[ctx->ring].egress_lock); 462 - list_add_tail(&request->list, &priv->ring[ctx->ring].list); 463 - spin_unlock_bh(&priv->ring[ctx->ring].egress_lock); 463 + spin_lock_bh(&priv->ring[ring].egress_lock); 464 + list_add_tail(&request->list, &priv->ring[ring].list); 465 + spin_unlock_bh(&priv->ring[ring].egress_lock); 464 466 465 - cdesc[ctx->ring] += commands; 466 - rdesc[ctx->ring] += results; 467 - 468 - nreq[ctx->ring]++; 469 - } while (n++ < EIP197_MAX_BATCH_SZ); 467 + cdesc += commands; 468 + rdesc += results; 469 + } while (nreq++ < EIP197_MAX_BATCH_SZ); 470 470 471 471 finalize: 472 - if (n == EIP197_MAX_BATCH_SZ) 473 - priv->need_dequeue = true; 474 - else if (!n) 472 + if (nreq == EIP197_MAX_BATCH_SZ) 473 + priv->ring[ring].need_dequeue = true; 474 + else if (!nreq) 475 475 return; 476 476 477 - for (i = 0; i < priv->config.rings; i++) { 478 - if (!nreq[i]) 479 - continue; 477 + spin_lock_bh(&priv->ring[ring].lock); 480 478 481 - spin_lock_bh(&priv->ring[i].lock); 479 + /* Configure when we want an interrupt */ 480 + writel(EIP197_HIA_RDR_THRESH_PKT_MODE | 481 + EIP197_HIA_RDR_THRESH_PROC_PKT(nreq), 482 + priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH); 482 483 483 - /* Configure when we want an interrupt */ 484 - writel(EIP197_HIA_RDR_THRESH_PKT_MODE | 485 - EIP197_HIA_RDR_THRESH_PROC_PKT(nreq[i]), 486 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_THRESH); 484 + /* let the RDR know we have pending descriptors */ 485 + writel((rdesc * priv->config.rd_offset) << 2, 486 + priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT); 487 487 488 - /* let the RDR know we have pending descriptors */ 489 - writel((rdesc[i] * priv->config.rd_offset) << 2, 490 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT); 488 + /* let the CDR know we have pending descriptors */ 489 + writel((cdesc * priv->config.cd_offset) << 2, 490 + priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT); 491 491 492 - /* let the CDR know we have pending descriptors */ 493 - writel((cdesc[i] * priv->config.cd_offset) << 2, 494 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT); 495 - 496 - spin_unlock_bh(&priv->ring[i].lock); 497 - } 492 + spin_unlock_bh(&priv->ring[ring].lock); 498 493 } 499 494 500 495 void safexcel_free_context(struct safexcel_crypto_priv *priv, ··· 629 638 630 639 safexcel_handle_result_descriptor(priv, data->ring); 631 640 632 - if (priv->need_dequeue) { 633 - priv->need_dequeue = false; 634 - safexcel_dequeue(data->priv); 641 + if (priv->ring[data->ring].need_dequeue) { 642 + priv->ring[data->ring].need_dequeue = false; 643 + safexcel_dequeue(data->priv, data->ring); 635 644 } 636 645 } 637 646 ··· 855 864 goto err_clk; 856 865 } 857 866 867 + crypto_init_queue(&priv->ring[i].queue, 868 + EIP197_DEFAULT_RING_SIZE); 869 + 858 870 INIT_LIST_HEAD(&priv->ring[i].list); 859 871 spin_lock_init(&priv->ring[i].lock); 860 872 spin_lock_init(&priv->ring[i].egress_lock); 873 + spin_lock_init(&priv->ring[i].queue_lock); 861 874 } 862 875 863 876 platform_set_drvdata(pdev, priv); 864 877 atomic_set(&priv->ring_used, 0); 865 - 866 - spin_lock_init(&priv->lock); 867 - crypto_init_queue(&priv->queue, EIP197_DEFAULT_RING_SIZE); 868 878 869 879 ret = safexcel_hw_init(priv); 870 880 if (ret) {
+6 -6
drivers/crypto/inside-secure/safexcel.h
··· 469 469 struct clk *clk; 470 470 struct safexcel_config config; 471 471 472 - spinlock_t lock; 473 - struct crypto_queue queue; 474 - 475 - bool need_dequeue; 476 - 477 472 /* context DMA pool */ 478 473 struct dma_pool *context_pool; 479 474 ··· 485 490 /* command/result rings */ 486 491 struct safexcel_ring cdr; 487 492 struct safexcel_ring rdr; 493 + 494 + /* queue */ 495 + struct crypto_queue queue; 496 + spinlock_t queue_lock; 497 + bool need_dequeue; 488 498 } ring[EIP197_MAX_RINGS]; 489 499 }; 490 500 ··· 533 533 int error; 534 534 }; 535 535 536 - void safexcel_dequeue(struct safexcel_crypto_priv *priv); 536 + void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring); 537 537 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring); 538 538 void safexcel_free_context(struct safexcel_crypto_priv *priv, 539 539 struct crypto_async_request *req,
+22 -16
drivers/crypto/inside-secure/safexcel_cipher.c
··· 339 339 return ndesc; 340 340 } 341 341 342 + ring = safexcel_select_ring(priv); 343 + ctx->base.ring = ring; 342 344 ctx->base.needs_inv = false; 343 - ctx->base.ring = safexcel_select_ring(priv); 344 345 ctx->base.send = safexcel_aes_send; 345 346 346 - spin_lock_bh(&priv->lock); 347 - enq_ret = crypto_enqueue_request(&priv->queue, async); 348 - spin_unlock_bh(&priv->lock); 347 + spin_lock_bh(&priv->ring[ring].queue_lock); 348 + enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); 349 + spin_unlock_bh(&priv->ring[ring].queue_lock); 349 350 350 351 if (enq_ret != -EINPROGRESS) 351 352 *ret = enq_ret; 352 353 353 - priv->need_dequeue = true; 354 + if (!priv->ring[ring].need_dequeue) 355 + safexcel_dequeue(priv, ring); 356 + 354 357 *should_complete = false; 355 358 356 359 return ndesc; ··· 387 384 struct safexcel_crypto_priv *priv = ctx->priv; 388 385 struct skcipher_request req; 389 386 struct safexcel_inv_result result = { 0 }; 387 + int ring = ctx->base.ring; 390 388 391 389 memset(&req, 0, sizeof(struct skcipher_request)); 392 390 ··· 401 397 ctx->base.exit_inv = true; 402 398 ctx->base.send = safexcel_cipher_send_inv; 403 399 404 - spin_lock_bh(&priv->lock); 405 - crypto_enqueue_request(&priv->queue, &req.base); 406 - spin_unlock_bh(&priv->lock); 400 + spin_lock_bh(&priv->ring[ring].queue_lock); 401 + crypto_enqueue_request(&priv->ring[ring].queue, &req.base); 402 + spin_unlock_bh(&priv->ring[ring].queue_lock); 407 403 408 - if (!priv->need_dequeue) 409 - safexcel_dequeue(priv); 404 + if (!priv->ring[ring].need_dequeue) 405 + safexcel_dequeue(priv, ring); 410 406 411 407 wait_for_completion_interruptible(&result.completion); 412 408 ··· 425 421 { 426 422 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 427 423 struct safexcel_crypto_priv *priv = ctx->priv; 428 - int ret; 424 + int ret, ring; 429 425 430 426 ctx->direction = dir; 431 427 ctx->mode = mode; ··· 444 440 return -ENOMEM; 445 441 } 446 442 447 - spin_lock_bh(&priv->lock); 448 - ret = crypto_enqueue_request(&priv->queue, &req->base); 449 - spin_unlock_bh(&priv->lock); 443 + ring = ctx->base.ring; 450 444 451 - if (!priv->need_dequeue) 452 - safexcel_dequeue(priv); 445 + spin_lock_bh(&priv->ring[ring].queue_lock); 446 + ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base); 447 + spin_unlock_bh(&priv->ring[ring].queue_lock); 448 + 449 + if (!priv->ring[ring].need_dequeue) 450 + safexcel_dequeue(priv, ring); 453 451 454 452 return ret; 455 453 }
+22 -16
drivers/crypto/inside-secure/safexcel_hash.c
··· 374 374 return 1; 375 375 } 376 376 377 - ctx->base.ring = safexcel_select_ring(priv); 377 + ring = safexcel_select_ring(priv); 378 + ctx->base.ring = ring; 378 379 ctx->base.needs_inv = false; 379 380 ctx->base.send = safexcel_ahash_send; 380 381 381 - spin_lock_bh(&priv->lock); 382 - enq_ret = crypto_enqueue_request(&priv->queue, async); 383 - spin_unlock_bh(&priv->lock); 382 + spin_lock_bh(&priv->ring[ring].queue_lock); 383 + enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); 384 + spin_unlock_bh(&priv->ring[ring].queue_lock); 384 385 385 386 if (enq_ret != -EINPROGRESS) 386 387 *ret = enq_ret; 387 388 388 - priv->need_dequeue = true; 389 + if (!priv->ring[ring].need_dequeue) 390 + safexcel_dequeue(priv, ring); 391 + 389 392 *should_complete = false; 390 393 391 394 return 1; ··· 420 417 struct safexcel_crypto_priv *priv = ctx->priv; 421 418 struct ahash_request req; 422 419 struct safexcel_inv_result result = { 0 }; 420 + int ring = ctx->base.ring; 423 421 424 422 memset(&req, 0, sizeof(struct ahash_request)); 425 423 ··· 434 430 ctx->base.exit_inv = true; 435 431 ctx->base.send = safexcel_ahash_send_inv; 436 432 437 - spin_lock_bh(&priv->lock); 438 - crypto_enqueue_request(&priv->queue, &req.base); 439 - spin_unlock_bh(&priv->lock); 433 + spin_lock_bh(&priv->ring[ring].queue_lock); 434 + crypto_enqueue_request(&priv->ring[ring].queue, &req.base); 435 + spin_unlock_bh(&priv->ring[ring].queue_lock); 440 436 441 - if (!priv->need_dequeue) 442 - safexcel_dequeue(priv); 437 + if (!priv->ring[ring].need_dequeue) 438 + safexcel_dequeue(priv, ring); 443 439 444 440 wait_for_completion_interruptible(&result.completion); 445 441 ··· 481 477 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); 482 478 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 483 479 struct safexcel_crypto_priv *priv = ctx->priv; 484 - int ret; 480 + int ret, ring; 485 481 486 482 ctx->base.send = safexcel_ahash_send; 487 483 ··· 500 496 return -ENOMEM; 501 497 } 502 498 503 - spin_lock_bh(&priv->lock); 504 - ret = crypto_enqueue_request(&priv->queue, &areq->base); 505 - spin_unlock_bh(&priv->lock); 499 + ring = ctx->base.ring; 506 500 507 - if (!priv->need_dequeue) 508 - safexcel_dequeue(priv); 501 + spin_lock_bh(&priv->ring[ring].queue_lock); 502 + ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base); 503 + spin_unlock_bh(&priv->ring[ring].queue_lock); 504 + 505 + if (!priv->ring[ring].need_dequeue) 506 + safexcel_dequeue(priv, ring); 509 507 510 508 return ret; 511 509 }