Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: inside-secure - dynamic ring configuration allocation

The Inside Secure SafeXcel driver currently uses 4 rings, but the
eip197d engines has 8 of them. This patch updates the driver so that
rings are allocated dynamically based on the number of available rings
supported by a given engine.

Signed-off-by: Ofer Heifetz <oferh@marvell.com>
Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ofer Heifetz and committed by
Herbert Xu
18e0e95b 53c83e91

+47 -38
+7
drivers/crypto/inside-secure/safexcel.c
··· 981 981 982 982 safexcel_configure(priv); 983 983 984 + priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring), 985 + GFP_KERNEL); 986 + if (!priv->ring) { 987 + ret = -ENOMEM; 988 + goto err_reg_clk; 989 + } 990 + 984 991 for (i = 0; i < priv->config.rings; i++) { 985 992 char irq_name[6] = {0}; /* "ringX\0" */ 986 993 char wq_name[9] = {0}; /* "wq_ringX\0" */
+35 -33
drivers/crypto/inside-secure/safexcel.h
··· 487 487 FW_NB 488 488 }; 489 489 490 - struct safexcel_ring { 490 + struct safexcel_desc_ring { 491 491 void *base; 492 492 void *base_end; 493 493 dma_addr_t base_dma; ··· 528 528 int ring; 529 529 }; 530 530 531 + struct safexcel_ring { 532 + spinlock_t lock; 533 + spinlock_t egress_lock; 534 + 535 + struct list_head list; 536 + struct workqueue_struct *workqueue; 537 + struct safexcel_work_data work_data; 538 + 539 + /* command/result rings */ 540 + struct safexcel_desc_ring cdr; 541 + struct safexcel_desc_ring rdr; 542 + 543 + /* queue */ 544 + struct crypto_queue queue; 545 + spinlock_t queue_lock; 546 + 547 + /* Number of requests in the engine. */ 548 + int requests; 549 + 550 + /* The ring is currently handling at least one request */ 551 + bool busy; 552 + 553 + /* Store for current requests when bailing out of the dequeueing 554 + * function when no enough resources are available. 555 + */ 556 + struct crypto_async_request *req; 557 + struct crypto_async_request *backlog; 558 + }; 559 + 531 560 enum safexcel_eip_version { 532 561 EIP97IES = BIT(0), 533 562 EIP197B = BIT(1), ··· 595 566 596 567 atomic_t ring_used; 597 568 598 - struct { 599 - spinlock_t lock; 600 - spinlock_t egress_lock; 601 - 602 - struct list_head list; 603 - struct workqueue_struct *workqueue; 604 - struct safexcel_work_data work_data; 605 - 606 - /* command/result rings */ 607 - struct safexcel_ring cdr; 608 - struct safexcel_ring rdr; 609 - 610 - /* queue */ 611 - struct crypto_queue queue; 612 - spinlock_t queue_lock; 613 - 614 - /* Number of requests in the engine. */ 615 - int requests; 616 - 617 - /* The ring is currently handling at least one request */ 618 - bool busy; 619 - 620 - /* Store for current requests when bailing out of the dequeueing 621 - * function when no enough resources are available. 622 - */ 623 - struct crypto_async_request *req; 624 - struct crypto_async_request *backlog; 625 - } ring[EIP197_MAX_RINGS]; 569 + struct safexcel_ring *ring; 626 570 }; 627 571 628 572 struct safexcel_context { ··· 653 651 dma_addr_t ctxr_dma, int ring, 654 652 struct safexcel_request *request); 655 653 int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, 656 - struct safexcel_ring *cdr, 657 - struct safexcel_ring *rdr); 654 + struct safexcel_desc_ring *cdr, 655 + struct safexcel_desc_ring *rdr); 658 656 int safexcel_select_ring(struct safexcel_crypto_priv *priv); 659 657 void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, 660 - struct safexcel_ring *ring); 658 + struct safexcel_desc_ring *ring); 661 659 void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, 662 - struct safexcel_ring *ring); 660 + struct safexcel_desc_ring *ring); 663 661 struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, 664 662 int ring_id, 665 663 bool first, bool last,
+5 -5
drivers/crypto/inside-secure/safexcel_ring.c
··· 14 14 #include "safexcel.h" 15 15 16 16 int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, 17 - struct safexcel_ring *cdr, 18 - struct safexcel_ring *rdr) 17 + struct safexcel_desc_ring *cdr, 18 + struct safexcel_desc_ring *rdr) 19 19 { 20 20 cdr->offset = sizeof(u32) * priv->config.cd_offset; 21 21 cdr->base = dmam_alloc_coherent(priv->dev, ··· 46 46 } 47 47 48 48 static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv, 49 - struct safexcel_ring *ring) 49 + struct safexcel_desc_ring *ring) 50 50 { 51 51 void *ptr = ring->write; 52 52 ··· 62 62 } 63 63 64 64 void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, 65 - struct safexcel_ring *ring) 65 + struct safexcel_desc_ring *ring) 66 66 { 67 67 void *ptr = ring->read; 68 68 ··· 78 78 } 79 79 80 80 void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, 81 - struct safexcel_ring *ring) 81 + struct safexcel_desc_ring *ring) 82 82 { 83 83 if (!ring->nr) 84 84 return;