Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: engine - remove request batching support

Remove request batching support from crypto_engine, as there are no
drivers using this feature and it doesn't really work that well.

Instead of doing batching based on backlog, a more optimal approach
would be for the user to handle the batching (similar to how IPsec
can hook into GSO to get 64K of data each time or how block encryption
can use unit sizes much greater than 4K).

Suggested-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Ovidiu Panait <ovidiu.panait.oss@gmail.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ovidiu Panait and committed by
Herbert Xu
c470ffa6 3d4df408

+5 -34
+1 -1
arch/s390/crypto/paes_s390.c
··· 1633 1633 /* with this pseudo devie alloc and start a crypto engine */ 1634 1634 paes_crypto_engine = 1635 1635 crypto_engine_alloc_init_and_set(paes_dev.this_device, 1636 - true, NULL, false, MAX_QLEN); 1636 + true, false, MAX_QLEN); 1637 1637 if (!paes_crypto_engine) { 1638 1638 rc = -ENOMEM; 1639 1639 goto out_err;
+1 -1
arch/s390/crypto/phmac_s390.c
··· 1006 1006 /* with this pseudo device alloc and start a crypto engine */ 1007 1007 phmac_crypto_engine = 1008 1008 crypto_engine_alloc_init_and_set(phmac_dev.this_device, 1009 - true, NULL, false, MAX_QLEN); 1009 + true, false, MAX_QLEN); 1010 1010 if (!phmac_crypto_engine) { 1011 1011 rc = -ENOMEM; 1012 1012 goto out_err;
+1 -24
crypto/crypto_engine.c
··· 195 195 out: 196 196 spin_unlock_irqrestore(&engine->queue_lock, flags); 197 197 198 - /* 199 - * Batch requests is possible only if 200 - * hardware can enqueue multiple requests 201 - */ 202 - if (engine->do_batch_requests) { 203 - ret = engine->do_batch_requests(engine); 204 - if (ret) 205 - dev_err(engine->dev, "failed to do batch requests: %d\n", 206 - ret); 207 - } 208 - 209 198 return; 210 199 } 211 200 ··· 451 462 * crypto-engine queue. 452 463 * @dev: the device attached with one hardware engine 453 464 * @retry_support: whether hardware has support for retry mechanism 454 - * @cbk_do_batch: pointer to a callback function to be invoked when executing 455 - * a batch of requests. 456 - * This has the form: 457 - * callback(struct crypto_engine *engine) 458 - * where: 459 - * engine: the crypto engine structure. 460 465 * @rt: whether this queue is set to run as a realtime task 461 466 * @qlen: maximum size of the crypto-engine queue 462 467 * ··· 459 476 */ 460 477 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, 461 478 bool retry_support, 462 - int (*cbk_do_batch)(struct crypto_engine *engine), 463 479 bool rt, int qlen) 464 480 { 465 481 struct crypto_engine *engine; ··· 477 495 engine->idling = false; 478 496 engine->retry_support = retry_support; 479 497 engine->priv_data = dev; 480 - /* 481 - * Batch requests is possible only if 482 - * hardware has support for retry mechanism. 483 - */ 484 - engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; 485 498 486 499 snprintf(engine->name, sizeof(engine->name), 487 500 "%s-engine", dev_name(dev)); ··· 511 534 */ 512 535 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) 513 536 { 514 - return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, 537 + return crypto_engine_alloc_init_and_set(dev, false, rt, 515 538 CRYPTO_ENGINE_MAX_QLEN); 516 539 } 517 540 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
+1 -2
drivers/crypto/caam/jr.c
··· 629 629 } 630 630 631 631 /* Initialize crypto engine */ 632 - jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL, 633 - false, 632 + jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, false, 634 633 CRYPTO_ENGINE_MAX_QLEN); 635 634 if (!jrpriv->engine) { 636 635 dev_err(jrdev, "Could not init crypto-engine\n");
+1 -1
drivers/crypto/virtio/virtio_crypto_core.c
··· 139 139 spin_lock_init(&vi->data_vq[i].lock); 140 140 vi->data_vq[i].vq = vqs[i]; 141 141 /* Initialize crypto engine */ 142 - vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true, 142 + vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, true, 143 143 virtqueue_get_vring_size(vqs[i])); 144 144 if (!vi->data_vq[i].engine) { 145 145 ret = -ENOMEM;
-1
include/crypto/engine.h
··· 76 76 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); 77 77 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, 78 78 bool retry_support, 79 - int (*cbk_do_batch)(struct crypto_engine *engine), 80 79 bool rt, int qlen); 81 80 void crypto_engine_exit(struct crypto_engine *engine); 82 81
-4
include/crypto/internal/engine.h
··· 37 37 * @unprepare_crypt_hardware: there are currently no more requests on the 38 38 * queue so the subsystem notifies the driver that it may relax the 39 39 * hardware by issuing this call 40 - * @do_batch_requests: execute a batch of requests. Depends on multiple 41 - * requests support. 42 40 * @kworker: kthread worker struct for request pump 43 41 * @pump_requests: work struct for scheduling work to the request pump 44 42 * @priv_data: the engine private data ··· 58 60 59 61 int (*prepare_crypt_hardware)(struct crypto_engine *engine); 60 62 int (*unprepare_crypt_hardware)(struct crypto_engine *engine); 61 - int (*do_batch_requests)(struct crypto_engine *engine); 62 - 63 63 64 64 struct kthread_worker *kworker; 65 65 struct kthread_work pump_requests;