Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccp - Support for multiple CCPs

Enable management of >1 CCPs in a system. Each device will
get a unique identifier, as well as uniquely named
resources. Treat each CCP as an orthogonal unit and register
resources individually.

Signed-off-by: Gary R Hook <gary.hook@amd.com>
Acked-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Gary R Hook and committed by
Herbert Xu
553d2374 3f19ce20

+114 -19
+100 -13
drivers/crypto/ccp/ccp-dev.c
··· 16 16 #include <linux/sched.h> 17 17 #include <linux/interrupt.h> 18 18 #include <linux/spinlock.h> 19 + #include <linux/rwlock_types.h> 20 + #include <linux/types.h> 19 21 #include <linux/mutex.h> 20 22 #include <linux/delay.h> 21 23 #include <linux/hw_random.h> ··· 39 37 struct ccp_cmd *cmd; 40 38 }; 41 39 42 - static struct ccp_device *ccp_dev; 43 - static inline struct ccp_device *ccp_get_device(void) 40 + /* List of CCPs, CCP count, read-write access lock, and access functions 41 + * 42 + * Lock structure: get ccp_unit_lock for reading whenever we need to 43 + * examine the CCP list. While holding it for reading we can acquire 44 + * the RR lock to update the round-robin next-CCP pointer. The unit lock 45 + * must be acquired before the RR lock. 46 + * 47 + * If the unit-lock is acquired for writing, we have total control over 48 + * the list, so there's no value in getting the RR lock. 49 + */ 50 + static DEFINE_RWLOCK(ccp_unit_lock); 51 + static LIST_HEAD(ccp_units); 52 + 53 + /* Round-robin counter */ 54 + static DEFINE_RWLOCK(ccp_rr_lock); 55 + static struct ccp_device *ccp_rr; 56 + 57 + /* Ever-increasing value to produce unique unit numbers */ 58 + static atomic_t ccp_unit_ordinal; 59 + unsigned int ccp_increment_unit_ordinal(void) 44 60 { 45 - return ccp_dev; 61 + return atomic_inc_return(&ccp_unit_ordinal); 46 62 } 47 63 64 + /* 65 + * Put this CCP on the unit list, which makes it available 66 + * for use. 67 + */ 48 68 static inline void ccp_add_device(struct ccp_device *ccp) 49 69 { 50 - ccp_dev = ccp; 70 + unsigned long flags; 71 + 72 + write_lock_irqsave(&ccp_unit_lock, flags); 73 + list_add_tail(&ccp->entry, &ccp_units); 74 + if (!ccp_rr) 75 + /* We already have the list lock (we're first) so this 76 + * pointer can't change on us. Set its initial value. 77 + */ 78 + ccp_rr = ccp; 79 + write_unlock_irqrestore(&ccp_unit_lock, flags); 51 80 } 52 81 82 + /* Remove this unit from the list of devices. If the next device 83 + * up for use is this one, adjust the pointer. If this is the last 84 + * device, NULL the pointer. 85 + */ 53 86 static inline void ccp_del_device(struct ccp_device *ccp) 54 87 { 55 - ccp_dev = NULL; 88 + unsigned long flags; 89 + 90 + write_lock_irqsave(&ccp_unit_lock, flags); 91 + if (ccp_rr == ccp) { 92 + /* ccp_unit_lock is read/write; any read access 93 + * will be suspended while we make changes to the 94 + * list and RR pointer. 95 + */ 96 + if (list_is_last(&ccp_rr->entry, &ccp_units)) 97 + ccp_rr = list_first_entry(&ccp_units, struct ccp_device, 98 + entry); 99 + else 100 + ccp_rr = list_next_entry(ccp_rr, entry); 101 + } 102 + list_del(&ccp->entry); 103 + if (list_empty(&ccp_units)) 104 + ccp_rr = NULL; 105 + write_unlock_irqrestore(&ccp_unit_lock, flags); 106 + } 107 + 108 + static struct ccp_device *ccp_get_device(void) 109 + { 110 + unsigned long flags; 111 + struct ccp_device *dp = NULL; 112 + 113 + /* We round-robin through the unit list. 114 + * The (ccp_rr) pointer refers to the next unit to use. 115 + */ 116 + read_lock_irqsave(&ccp_unit_lock, flags); 117 + if (!list_empty(&ccp_units)) { 118 + write_lock_irqsave(&ccp_rr_lock, flags); 119 + dp = ccp_rr; 120 + if (list_is_last(&ccp_rr->entry, &ccp_units)) 121 + ccp_rr = list_first_entry(&ccp_units, struct ccp_device, 122 + entry); 123 + else 124 + ccp_rr = list_next_entry(ccp_rr, entry); 125 + write_unlock_irqrestore(&ccp_rr_lock, flags); 126 + } 127 + read_unlock_irqrestore(&ccp_unit_lock, flags); 128 + 129 + return dp; 56 130 } 57 131 58 132 /** ··· 138 60 */ 139 61 int ccp_present(void) 140 62 { 141 - if (ccp_get_device()) 142 - return 0; 63 + unsigned long flags; 64 + int ret; 143 65 144 - return -ENODEV; 66 + read_lock_irqsave(&ccp_unit_lock, flags); 67 + ret = list_empty(&ccp_units); 68 + read_unlock_irqrestore(&ccp_unit_lock, flags); 69 + 70 + return ret ? -ENODEV : 0; 145 71 } 146 72 EXPORT_SYMBOL_GPL(ccp_present); 147 73 ··· 391 309 ccp->ksb_count = KSB_COUNT; 392 310 ccp->ksb_start = 0; 393 311 312 + ccp->ord = ccp_increment_unit_ordinal(); 313 + snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); 314 + snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord); 315 + 394 316 return ccp; 395 317 } 396 318 ··· 420 334 continue; 421 335 422 336 /* Allocate a dma pool for this queue */ 423 - snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i); 337 + snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", 338 + ccp->name, i); 424 339 dma_pool = dma_pool_create(dma_pool_name, dev, 425 340 CCP_DMAPOOL_MAX_SIZE, 426 341 CCP_DMAPOOL_ALIGN, 0); ··· 503 416 cmd_q = &ccp->cmd_q[i]; 504 417 505 418 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, 506 - "ccp-q%u", cmd_q->id); 419 + "%s-q%u", ccp->name, cmd_q->id); 507 420 if (IS_ERR(kthread)) { 508 421 dev_err(dev, "error creating queue thread (%ld)\n", 509 422 PTR_ERR(kthread)); ··· 516 429 } 517 430 518 431 /* Register the RNG */ 519 - ccp->hwrng.name = "ccp-rng"; 432 + ccp->hwrng.name = ccp->rngname; 520 433 ccp->hwrng.read = ccp_trng_read; 521 434 ret = hwrng_register(&ccp->hwrng); 522 435 if (ret) { ··· 674 587 return ret; 675 588 676 589 /* Don't leave the driver loaded if init failed */ 677 - if (!ccp_get_device()) { 590 + if (ccp_present() != 0) { 678 591 ccp_pci_exit(); 679 592 return -ENODEV; 680 593 } ··· 690 603 return ret; 691 604 692 605 /* Don't leave the driver loaded if init failed */ 693 - if (!ccp_get_device()) { 606 + if (ccp_present() != 0) { 694 607 ccp_platform_exit(); 695 608 return -ENODEV; 696 609 }
+8 -1
drivers/crypto/ccp/ccp-dev.h
··· 1 1 /* 2 2 * AMD Cryptographic Coprocessor (CCP) driver 3 3 * 4 - * Copyright (C) 2013 Advanced Micro Devices, Inc. 4 + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. 5 5 * 6 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 7 * ··· 23 23 #include <linux/hw_random.h> 24 24 #include <linux/bitops.h> 25 25 26 + #define MAX_CCP_NAME_LEN 16 26 27 #define MAX_DMAPOOL_NAME_LEN 32 27 28 28 29 #define MAX_HW_QUEUES 5 ··· 185 184 } ____cacheline_aligned; 186 185 187 186 struct ccp_device { 187 + struct list_head entry; 188 + 189 + unsigned int ord; 190 + char name[MAX_CCP_NAME_LEN]; 191 + char rngname[MAX_CCP_NAME_LEN]; 192 + 188 193 struct device *dev; 189 194 190 195 /*
+4 -3
drivers/crypto/ccp/ccp-pci.c
··· 1 1 /* 2 2 * AMD Cryptographic Coprocessor (CCP) driver 3 3 * 4 - * Copyright (C) 2013 Advanced Micro Devices, Inc. 4 + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. 5 5 * 6 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 7 * ··· 59 59 ccp_pci->msix_count = ret; 60 60 for (v = 0; v < ccp_pci->msix_count; v++) { 61 61 /* Set the interrupt names and request the irqs */ 62 - snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v); 62 + snprintf(ccp_pci->msix[v].name, name_len, "%s-%u", 63 + ccp->name, v); 63 64 ccp_pci->msix[v].vector = msix_entry[v].vector; 64 65 ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler, 65 66 0, ccp_pci->msix[v].name, dev); ··· 95 94 return ret; 96 95 97 96 ccp->irq = pdev->irq; 98 - ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); 97 + ret = request_irq(ccp->irq, ccp_irq_handler, 0, ccp->name, dev); 99 98 if (ret) { 100 99 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); 101 100 goto e_msi;
+2 -2
drivers/crypto/ccp/ccp-platform.c
··· 1 1 /* 2 2 * AMD Cryptographic Coprocessor (CCP) driver 3 3 * 4 - * Copyright (C) 2014 Advanced Micro Devices, Inc. 4 + * Copyright (C) 2014,2016 Advanced Micro Devices, Inc. 5 5 * 6 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 7 * ··· 43 43 return ret; 44 44 45 45 ccp->irq = ret; 46 - ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev); 46 + ret = request_irq(ccp->irq, ccp_irq_handler, 0, ccp->name, dev); 47 47 if (ret) { 48 48 dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); 49 49 return ret;