Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccp - Introduce the AMD Secure Processor device

The CCP device is part of the AMD Secure Processor. In order to expand
the usage of the AMD Secure Processor, create a framework that allows
functional components of the AMD Secure Processor to be initialized and
handled appropriately.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Acked-by: Gary R Hook <gary.hook@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Brijesh Singh and committed by
Herbert Xu
720419f0 970e8303

+468 -169
+3 -3
drivers/crypto/Kconfig
··· 540 540 will be called atmel-ecc. 541 541 542 542 config CRYPTO_DEV_CCP 543 - bool "Support for AMD Cryptographic Coprocessor" 543 + bool "Support for AMD Secure Processor" 544 544 depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM 545 545 help 546 - The AMD Cryptographic Coprocessor provides hardware offload support 547 - for encryption, hashing and related operations. 546 + The AMD Secure Processor provides support for the Cryptographic Coprocessor 547 + (CCP) and the Platform Security Processor (PSP) devices. 548 548 549 549 if CRYPTO_DEV_CCP 550 550 source "drivers/crypto/ccp/Kconfig"
+14 -7
drivers/crypto/ccp/Kconfig
··· 1 1 config CRYPTO_DEV_CCP_DD 2 - tristate "Cryptographic Coprocessor device driver" 3 - depends on CRYPTO_DEV_CCP 2 + tristate "Secure Processor device driver" 4 3 default m 4 + help 5 + Provides AMD Secure Processor device driver. 6 + If you choose 'M' here, this module will be called ccp. 7 + 8 + config CRYPTO_DEV_SP_CCP 9 + bool "Cryptographic Coprocessor device" 10 + default y 11 + depends on CRYPTO_DEV_CCP_DD 5 12 select HW_RANDOM 6 13 select DMA_ENGINE 7 14 select DMADEVICES 8 15 select CRYPTO_SHA1 9 16 select CRYPTO_SHA256 10 17 help 11 - Provides the interface to use the AMD Cryptographic Coprocessor 12 - which can be used to offload encryption operations such as SHA, 13 - AES and more. If you choose 'M' here, this module will be called 14 - ccp. 18 + Provides the support for AMD Cryptographic Coprocessor (CCP) device 19 + which can be used to offload encryption operations such as SHA, AES 20 + and more. 15 21 16 22 config CRYPTO_DEV_CCP_CRYPTO 17 23 tristate "Encryption and hashing offload support" 18 - depends on CRYPTO_DEV_CCP_DD 19 24 default m 25 + depends on CRYPTO_DEV_CCP_DD 26 + depends on CRYPTO_DEV_SP_CCP 20 27 select CRYPTO_HASH 21 28 select CRYPTO_BLKCIPHER 22 29 select CRYPTO_AUTHENC
+2 -2
drivers/crypto/ccp/Makefile
··· 1 1 obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o 2 - ccp-objs := ccp-dev.o \ 2 + ccp-objs := sp-dev.o ccp-platform.o 3 + ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ 3 4 ccp-ops.o \ 4 5 ccp-dev-v3.o \ 5 6 ccp-dev-v5.o \ 6 - ccp-platform.o \ 7 7 ccp-dmaengine.o \ 8 8 ccp-debugfs.o 9 9 ccp-$(CONFIG_PCI) += ccp-pci.o
+1 -3
drivers/crypto/ccp/ccp-dev-v3.c
··· 359 359 360 360 static irqreturn_t ccp_irq_handler(int irq, void *data) 361 361 { 362 - struct device *dev = data; 363 - struct ccp_device *ccp = dev_get_drvdata(dev); 362 + struct ccp_device *ccp = (struct ccp_device *)data; 364 363 365 364 ccp_disable_queue_interrupts(ccp); 366 365 if (ccp->use_tasklet) ··· 596 597 .version = CCP_VERSION(3, 0), 597 598 .setup = NULL, 598 599 .perform = &ccp3_actions, 599 - .bar = 2, 600 600 .offset = 0x20000, 601 601 };
+1 -4
drivers/crypto/ccp/ccp-dev-v5.c
··· 769 769 770 770 static irqreturn_t ccp5_irq_handler(int irq, void *data) 771 771 { 772 - struct device *dev = data; 773 - struct ccp_device *ccp = dev_get_drvdata(dev); 772 + struct ccp_device *ccp = (struct ccp_device *)data; 774 773 775 774 ccp5_disable_queue_interrupts(ccp); 776 775 ccp->total_interrupts++; ··· 1112 1113 .version = CCP_VERSION(5, 0), 1113 1114 .setup = ccp5_config, 1114 1115 .perform = &ccp5_actions, 1115 - .bar = 2, 1116 1116 .offset = 0x0, 1117 1117 }; 1118 1118 ··· 1120 1122 .dma_chan_attr = DMA_PRIVATE, 1121 1123 .setup = ccp5other_config, 1122 1124 .perform = &ccp5_actions, 1123 - .bar = 2, 1124 1125 .offset = 0x0, 1125 1126 };
+48 -68
drivers/crypto/ccp/ccp-dev.c
··· 111 111 static DEFINE_SPINLOCK(ccp_rr_lock); 112 112 static struct ccp_device *ccp_rr; 113 113 114 - /* Ever-increasing value to produce unique unit numbers */ 115 - static atomic_t ccp_unit_ordinal; 116 - static unsigned int ccp_increment_unit_ordinal(void) 117 - { 118 - return atomic_inc_return(&ccp_unit_ordinal); 119 - } 120 - 121 114 /** 122 115 * ccp_add_device - add a CCP device to the list 123 116 * ··· 458 465 * 459 466 * @dev: device struct of the CCP 460 467 */ 461 - struct ccp_device *ccp_alloc_struct(struct device *dev) 468 + struct ccp_device *ccp_alloc_struct(struct sp_device *sp) 462 469 { 470 + struct device *dev = sp->dev; 463 471 struct ccp_device *ccp; 464 472 465 473 ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); 466 474 if (!ccp) 467 475 return NULL; 468 476 ccp->dev = dev; 477 + ccp->sp = sp; 478 + ccp->axcache = sp->axcache; 469 479 470 480 INIT_LIST_HEAD(&ccp->cmd); 471 481 INIT_LIST_HEAD(&ccp->backlog); ··· 483 487 init_waitqueue_head(&ccp->sb_queue); 484 488 init_waitqueue_head(&ccp->suspend_queue); 485 489 486 - ccp->ord = ccp_increment_unit_ordinal(); 487 - snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); 488 - snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord); 490 + snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord); 491 + snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord); 489 492 490 493 return ccp; 491 494 } ··· 535 540 return ccp->cmd_q_count == suspended; 536 541 } 537 542 538 - int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state) 543 + int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) 539 544 { 545 + struct ccp_device *ccp = sp->ccp_data; 540 546 unsigned long flags; 541 547 unsigned int i; 542 548 ··· 559 563 return 0; 560 564 } 561 565 562 - int ccp_dev_resume(struct ccp_device *ccp) 566 + int ccp_dev_resume(struct sp_device *sp) 563 567 { 568 + struct ccp_device *ccp = sp->ccp_data; 564 569 unsigned long flags; 565 570 unsigned int i; 566 571 ··· 581 584 } 582 585 #endif 583 586 584 - int ccp_dev_init(struct ccp_device *ccp) 587 + int ccp_dev_init(struct sp_device *sp) 585 588 { 586 - ccp->io_regs = ccp->io_map + ccp->vdata->offset; 589 + struct device *dev = sp->dev; 590 + struct ccp_device *ccp; 591 + int ret; 587 592 593 + ret = -ENOMEM; 594 + ccp = ccp_alloc_struct(sp); 595 + if (!ccp) 596 + goto e_err; 597 + sp->ccp_data = ccp; 598 + 599 + ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; 600 + if (!ccp->vdata || !ccp->vdata->version) { 601 + ret = -ENODEV; 602 + dev_err(dev, "missing driver data\n"); 603 + goto e_err; 604 + } 605 + 606 + ccp->get_irq = sp->get_irq; 607 + ccp->free_irq = sp->free_irq; 608 + 609 + ccp->io_regs = sp->io_map + ccp->vdata->offset; 588 610 if (ccp->vdata->setup) 589 611 ccp->vdata->setup(ccp); 590 612 591 - return ccp->vdata->perform->init(ccp); 613 + ret = ccp->vdata->perform->init(ccp); 614 + if (ret) 615 + goto e_err; 616 + 617 + dev_notice(dev, "ccp enabled\n"); 618 + 619 + return 0; 620 + 621 + e_err: 622 + sp->ccp_data = NULL; 623 + 624 + dev_notice(dev, "ccp initialization failed\n"); 625 + 626 + return ret; 592 627 } 593 628 594 - void ccp_dev_destroy(struct ccp_device *ccp) 629 + void ccp_dev_destroy(struct sp_device *sp) 595 630 { 631 + struct ccp_device *ccp = sp->ccp_data; 632 + 596 633 if (!ccp) 597 634 return; 598 635 599 636 ccp->vdata->perform->destroy(ccp); 600 637 } 601 - 602 - static int __init ccp_mod_init(void) 603 - { 604 - #ifdef CONFIG_X86 605 - int ret; 606 - 607 - ret = ccp_pci_init(); 608 - if (ret) 609 - return ret; 610 - 611 - /* Don't leave the driver loaded if init failed */ 612 - if (ccp_present() != 0) { 613 - ccp_pci_exit(); 614 - return -ENODEV; 615 - } 616 - 617 - return 0; 618 - #endif 619 - 620 - #ifdef CONFIG_ARM64 621 - int ret; 622 - 623 - ret = ccp_platform_init(); 624 - if (ret) 625 - return ret; 626 - 627 - /* Don't leave the driver loaded if init failed */ 628 - if (ccp_present() != 0) { 629 - ccp_platform_exit(); 630 - return -ENODEV; 631 - } 632 - 633 - return 0; 634 - #endif 635 - 636 - return -ENODEV; 637 - } 638 - 639 - static void __exit ccp_mod_exit(void) 640 - { 641 - #ifdef CONFIG_X86 642 - ccp_pci_exit(); 643 - #endif 644 - 645 - #ifdef CONFIG_ARM64 646 - ccp_platform_exit(); 647 - #endif 648 - } 649 - 650 - module_init(ccp_mod_init); 651 - module_exit(ccp_mod_exit);
+4 -17
drivers/crypto/ccp/ccp-dev.h
··· 27 27 #include <linux/irqreturn.h> 28 28 #include <linux/dmaengine.h> 29 29 30 + #include "sp-dev.h" 31 + 30 32 #define MAX_CCP_NAME_LEN 16 31 33 #define MAX_DMAPOOL_NAME_LEN 32 32 34 ··· 346 344 char rngname[MAX_CCP_NAME_LEN]; 347 345 348 346 struct device *dev; 347 + struct sp_device *sp; 349 348 350 349 /* Bus specific device information 351 350 */ ··· 365 362 * them. 366 363 */ 367 364 struct mutex req_mutex ____cacheline_aligned; 368 - void __iomem *io_map; 369 365 void __iomem *io_regs; 370 366 371 367 /* Master lists that all cmds are queued on. Because there can be ··· 639 637 640 638 extern void ccp_log_error(struct ccp_device *, int); 641 639 642 - struct ccp_device *ccp_alloc_struct(struct device *dev); 640 + struct ccp_device *ccp_alloc_struct(struct sp_device *sp); 643 641 bool ccp_queues_suspended(struct ccp_device *ccp); 644 642 int ccp_cmd_queue_thread(void *data); 645 643 int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait); ··· 653 651 654 652 void ccp5_debugfs_setup(struct ccp_device *ccp); 655 653 void ccp5_debugfs_destroy(void); 656 - 657 - int ccp_dev_init(struct ccp_device *ccp); 658 - void ccp_dev_destroy(struct ccp_device *ccp); 659 - int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state); 660 - int ccp_dev_resume(struct ccp_device *ccp); 661 654 662 655 /* Structure for computation functions that are device-specific */ 663 656 struct ccp_actions { ··· 669 672 int (*init)(struct ccp_device *); 670 673 void (*destroy)(struct ccp_device *); 671 674 irqreturn_t (*irqhandler)(int, void *); 672 - }; 673 - 674 - /* Structure to hold CCP version-specific values */ 675 - struct ccp_vdata { 676 - const unsigned int version; 677 - const unsigned int dma_chan_attr; 678 - void (*setup)(struct ccp_device *); 679 - const struct ccp_actions *perform; 680 - const unsigned int bar; 681 - const unsigned int offset; 682 675 }; 683 676 684 677 extern const struct ccp_vdata ccpv3_platform;
+52 -29
drivers/crypto/ccp/ccp-pci.c
··· 40 40 41 41 static int ccp_get_msix_irqs(struct ccp_device *ccp) 42 42 { 43 - struct ccp_pci *ccp_pci = ccp->dev_specific; 43 + struct sp_device *sp = ccp->sp; 44 + struct ccp_pci *ccp_pci = sp->dev_specific; 44 45 struct device *dev = ccp->dev; 45 46 struct pci_dev *pdev = to_pci_dev(dev); 46 47 struct msix_entry msix_entry[MSIX_VECTORS]; ··· 59 58 for (v = 0; v < ccp_pci->msix_count; v++) { 60 59 /* Set the interrupt names and request the irqs */ 61 60 snprintf(ccp_pci->msix[v].name, name_len, "%s-%u", 62 - ccp->name, v); 61 + sp->name, v); 63 62 ccp_pci->msix[v].vector = msix_entry[v].vector; 64 63 ret = request_irq(ccp_pci->msix[v].vector, 65 64 ccp->vdata->perform->irqhandler, 66 - 0, ccp_pci->msix[v].name, dev); 65 + 0, ccp_pci->msix[v].name, ccp); 67 66 if (ret) { 68 67 dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", 69 68 ret); ··· 87 86 88 87 static int ccp_get_msi_irq(struct ccp_device *ccp) 89 88 { 89 + struct sp_device *sp = ccp->sp; 90 90 struct device *dev = ccp->dev; 91 91 struct pci_dev *pdev = to_pci_dev(dev); 92 92 int ret; ··· 98 96 99 97 ccp->irq = pdev->irq; 100 98 ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, 101 - ccp->name, dev); 99 + sp->name, ccp); 102 100 if (ret) { 103 101 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); 104 102 goto e_msi; ··· 136 134 137 135 static void ccp_free_irqs(struct ccp_device *ccp) 138 136 { 139 - struct ccp_pci *ccp_pci = ccp->dev_specific; 137 + struct sp_device *sp = ccp->sp; 138 + struct ccp_pci *ccp_pci = sp->dev_specific; 140 139 struct device *dev = ccp->dev; 141 140 struct pci_dev *pdev = to_pci_dev(dev); 142 141 143 142 if (ccp_pci->msix_count) { 144 143 while (ccp_pci->msix_count--) 145 144 free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, 146 - dev); 145 + ccp); 147 146 pci_disable_msix(pdev); 148 147 } else if (ccp->irq) { 149 - free_irq(ccp->irq, dev); 148 + free_irq(ccp->irq, ccp); 150 149 pci_disable_msi(pdev); 151 150 } 152 151 ccp->irq = 0; ··· 155 152 156 153 static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 157 154 { 158 - struct ccp_device *ccp; 155 + struct sp_device *sp; 159 156 struct ccp_pci *ccp_pci; 160 157 struct device *dev = &pdev->dev; 161 158 void __iomem * const *iomap_table; ··· 163 160 int ret; 164 161 165 162 ret = -ENOMEM; 166 - ccp = ccp_alloc_struct(dev); 167 - if (!ccp) 163 + sp = sp_alloc_struct(dev); 164 + if (!sp) 168 165 goto e_err; 169 166 170 167 ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL); 171 168 if (!ccp_pci) 172 169 goto e_err; 173 170 174 - ccp->dev_specific = ccp_pci; 175 - ccp->vdata = (struct ccp_vdata *)id->driver_data; 176 - if (!ccp->vdata || !ccp->vdata->version) { 171 + sp->dev_specific = ccp_pci; 172 + sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data; 173 + if (!sp->dev_vdata) { 177 174 ret = -ENODEV; 178 175 dev_err(dev, "missing driver data\n"); 179 176 goto e_err; 180 177 } 181 - ccp->get_irq = ccp_get_irqs; 182 - ccp->free_irq = ccp_free_irqs; 178 + sp->get_irq = ccp_get_irqs; 179 + sp->free_irq = ccp_free_irqs; 183 180 184 181 ret = pcim_enable_device(pdev); 185 182 if (ret) { ··· 201 198 goto e_err; 202 199 } 203 200 204 - ccp->io_map = iomap_table[ccp->vdata->bar]; 205 - if (!ccp->io_map) { 201 + sp->io_map = iomap_table[sp->dev_vdata->bar]; 202 + if (!sp->io_map) { 206 203 dev_err(dev, "ioremap failed\n"); 207 204 ret = -ENOMEM; 208 205 goto e_err; ··· 220 217 } 221 218 } 222 219 223 - dev_set_drvdata(dev, ccp); 220 + dev_set_drvdata(dev, sp); 224 221 225 - ret = ccp_dev_init(ccp); 222 + ret = sp_init(sp); 226 223 if (ret) 227 224 goto e_err; 228 225 ··· 238 235 static void ccp_pci_remove(struct pci_dev *pdev) 239 236 { 240 237 struct device *dev = &pdev->dev; 241 - struct ccp_device *ccp = dev_get_drvdata(dev); 238 + struct sp_device *sp = dev_get_drvdata(dev); 242 239 243 - if (!ccp) 240 + if (!sp) 244 241 return; 245 242 246 - ccp_dev_destroy(ccp); 243 + sp_destroy(sp); 247 244 248 245 dev_notice(dev, "disabled\n"); 249 246 } ··· 252 249 static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state) 253 250 { 254 251 struct device *dev = &pdev->dev; 255 - struct ccp_device *ccp = dev_get_drvdata(dev); 252 + struct sp_device *sp = dev_get_drvdata(dev); 256 253 257 - return ccp_dev_suspend(ccp, state); 254 + return sp_suspend(sp, state); 258 255 } 259 256 260 257 static int ccp_pci_resume(struct pci_dev *pdev) 261 258 { 262 259 struct device *dev = &pdev->dev; 263 - struct ccp_device *ccp = dev_get_drvdata(dev); 260 + struct sp_device *sp = dev_get_drvdata(dev); 264 261 265 - return ccp_dev_resume(ccp); 262 + return sp_resume(sp); 266 263 } 267 264 #endif 268 265 266 + static const struct sp_dev_vdata dev_vdata[] = { 267 + { 268 + .bar = 2, 269 + #ifdef CONFIG_CRYPTO_DEV_SP_CCP 270 + .ccp_vdata = &ccpv3, 271 + #endif 272 + }, 273 + { 274 + .bar = 2, 275 + #ifdef CONFIG_CRYPTO_DEV_SP_CCP 276 + .ccp_vdata = &ccpv5a, 277 + #endif 278 + }, 279 + { 280 + .bar = 2, 281 + #ifdef CONFIG_CRYPTO_DEV_SP_CCP 282 + .ccp_vdata = &ccpv5b, 283 + #endif 284 + }, 285 + }; 269 286 static const struct pci_device_id ccp_pci_table[] = { 270 - { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 }, 271 - { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a }, 272 - { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b }, 287 + { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] }, 288 + { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, 289 + { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, 273 290 /* Last entry must be zero */ 274 291 { 0, } 275 292 };
+38 -32
drivers/crypto/ccp/ccp-platform.c
··· 35 35 static const struct acpi_device_id ccp_acpi_match[]; 36 36 static const struct of_device_id ccp_of_match[]; 37 37 38 - static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev) 38 + static struct sp_dev_vdata *ccp_get_of_version(struct platform_device *pdev) 39 39 { 40 40 #ifdef CONFIG_OF 41 41 const struct of_device_id *match; 42 42 43 43 match = of_match_node(ccp_of_match, pdev->dev.of_node); 44 44 if (match && match->data) 45 - return (struct ccp_vdata *)match->data; 45 + return (struct sp_dev_vdata *)match->data; 46 46 #endif 47 47 return NULL; 48 48 } 49 49 50 - static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev) 50 + static struct sp_dev_vdata *ccp_get_acpi_version(struct platform_device *pdev) 51 51 { 52 52 #ifdef CONFIG_ACPI 53 53 const struct acpi_device_id *match; 54 54 55 55 match = acpi_match_device(ccp_acpi_match, &pdev->dev); 56 56 if (match && match->driver_data) 57 - return (struct ccp_vdata *)match->driver_data; 57 + return (struct sp_dev_vdata *)match->driver_data; 58 58 #endif 59 59 return NULL; 60 60 } ··· 73 73 74 74 ccp->irq = ret; 75 75 ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, 76 - ccp->name, dev); 76 + ccp->name, ccp); 77 77 if (ret) { 78 78 dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); 79 79 return ret; ··· 99 99 100 100 static void ccp_free_irqs(struct ccp_device *ccp) 101 101 { 102 - struct device *dev = ccp->dev; 103 - 104 - free_irq(ccp->irq, dev); 102 + free_irq(ccp->irq, ccp); 105 103 } 106 104 107 105 static int ccp_platform_probe(struct platform_device *pdev) 108 106 { 109 - struct ccp_device *ccp; 107 + struct sp_device *sp; 110 108 struct ccp_platform *ccp_platform; 111 109 struct device *dev = &pdev->dev; 112 110 enum dev_dma_attr attr; ··· 112 114 int ret; 113 115 114 116 ret = -ENOMEM; 115 - ccp = ccp_alloc_struct(dev); 116 - if (!ccp) 117 + sp = sp_alloc_struct(dev); 118 + if (!sp) 117 119 goto e_err; 118 120 119 121 ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL); 120 122 if (!ccp_platform) 121 123 goto e_err; 122 124 123 - ccp->dev_specific = ccp_platform; 124 - ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev) 125 + sp->dev_specific = ccp_platform; 126 + sp->dev_vdata = pdev->dev.of_node ? ccp_get_of_version(pdev) 125 127 : ccp_get_acpi_version(pdev); 126 - if (!ccp->vdata || !ccp->vdata->version) { 128 + if (!sp->dev_vdata) { 127 129 ret = -ENODEV; 128 130 dev_err(dev, "missing driver data\n"); 129 131 goto e_err; 130 132 } 131 - ccp->get_irq = ccp_get_irqs; 132 - ccp->free_irq = ccp_free_irqs; 133 + sp->get_irq = ccp_get_irqs; 134 + sp->free_irq = ccp_free_irqs; 133 135 134 136 ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); 135 - ccp->io_map = devm_ioremap_resource(dev, ior); 136 - if (IS_ERR(ccp->io_map)) { 137 - ret = PTR_ERR(ccp->io_map); 137 + sp->io_map = devm_ioremap_resource(dev, ior); 138 + if (IS_ERR(sp->io_map)) { 139 + ret = PTR_ERR(sp->io_map); 138 140 goto e_err; 139 141 } 140 - ccp->io_regs = ccp->io_map; 141 142 142 143 attr = device_get_dma_attr(dev); 143 144 if (attr == DEV_DMA_NOT_SUPPORTED) { ··· 146 149 147 150 ccp_platform->coherent = (attr == DEV_DMA_COHERENT); 148 151 if (ccp_platform->coherent) 149 - ccp->axcache = CACHE_WB_NO_ALLOC; 152 + sp->axcache = CACHE_WB_NO_ALLOC; 150 153 else 151 - ccp->axcache = CACHE_NONE; 154 + sp->axcache = CACHE_NONE; 152 155 153 156 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 154 157 if (ret) { ··· 156 159 goto e_err; 157 160 } 158 161 159 - dev_set_drvdata(dev, ccp); 162 + dev_set_drvdata(dev, sp); 160 163 161 - ret = ccp_dev_init(ccp); 164 + ret = sp_init(sp); 162 165 if (ret) 163 166 goto e_err; 164 167 ··· 174 177 static int ccp_platform_remove(struct platform_device *pdev) 175 178 { 176 179 struct device *dev = &pdev->dev; 177 - struct ccp_device *ccp = dev_get_drvdata(dev); 180 + struct sp_device *sp = dev_get_drvdata(dev); 178 181 179 - ccp_dev_destroy(ccp); 182 + sp_destroy(sp); 180 183 181 184 dev_notice(dev, "disabled\n"); 182 185 ··· 188 191 pm_message_t state) 189 192 { 190 193 struct device *dev = &pdev->dev; 191 - struct ccp_device *ccp = dev_get_drvdata(dev); 194 + struct sp_device *sp = dev_get_drvdata(dev); 192 195 193 - return ccp_dev_suspend(ccp, state); 196 + return sp_suspend(sp, state); 194 197 } 195 198 196 199 static int ccp_platform_resume(struct platform_device *pdev) 197 200 { 198 201 struct device *dev = &pdev->dev; 199 - struct ccp_device *ccp = dev_get_drvdata(dev); 202 + struct sp_device *sp = dev_get_drvdata(dev); 200 203 201 - return ccp_dev_resume(ccp); 204 + return sp_resume(sp); 202 205 } 203 206 #endif 204 207 208 + static const struct sp_dev_vdata dev_vdata[] = { 209 + { 210 + .bar = 0, 211 + #ifdef CONFIG_CRYPTO_DEV_SP_CCP 212 + .ccp_vdata = &ccpv3_platform, 213 + #endif 214 + }, 215 + }; 216 + 205 217 #ifdef CONFIG_ACPI 206 218 static const struct acpi_device_id ccp_acpi_match[] = { 207 - { "AMDI0C00", (kernel_ulong_t)&ccpv3 }, 219 + { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] }, 208 220 { }, 209 221 }; 210 222 MODULE_DEVICE_TABLE(acpi, ccp_acpi_match); ··· 222 216 #ifdef CONFIG_OF 223 217 static const struct of_device_id ccp_of_match[] = { 224 218 { .compatible = "amd,ccp-seattle-v1a", 225 - .data = (const void *)&ccpv3_platform }, 219 + .data = (const void *)&dev_vdata[0] }, 226 220 { }, 227 221 }; 228 222 MODULE_DEVICE_TABLE(of, ccp_of_match);
+182
drivers/crypto/ccp/sp-dev.c
··· 1 + /* 2 + * AMD Secure Processor driver 3 + * 4 + * Copyright (C) 2017 Advanced Micro Devices, Inc. 5 + * 6 + * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 + * Author: Gary R Hook <gary.hook@amd.com> 8 + * Author: Brijesh Singh <brijesh.singh@amd.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as 12 + * published by the Free Software Foundation. 13 + */ 14 + 15 + #include <linux/module.h> 16 + #include <linux/kernel.h> 17 + #include <linux/kthread.h> 18 + #include <linux/sched.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/spinlock.h> 21 + #include <linux/spinlock_types.h> 22 + #include <linux/types.h> 23 + #include <linux/ccp.h> 24 + 25 + #include "ccp-dev.h" 26 + #include "sp-dev.h" 27 + 28 + MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); 29 + MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>"); 30 + MODULE_LICENSE("GPL"); 31 + MODULE_VERSION("1.1.0"); 32 + MODULE_DESCRIPTION("AMD Secure Processor driver"); 33 + 34 + /* List of SPs, SP count, read-write access lock, and access functions 35 + * 36 + * Lock structure: get sp_unit_lock for reading whenever we need to 37 + * examine the SP list. 38 + */ 39 + static DEFINE_RWLOCK(sp_unit_lock); 40 + static LIST_HEAD(sp_units); 41 + 42 + /* Ever-increasing value to produce unique unit numbers */ 43 + static atomic_t sp_ordinal; 44 + 45 + static void sp_add_device(struct sp_device *sp) 46 + { 47 + unsigned long flags; 48 + 49 + write_lock_irqsave(&sp_unit_lock, flags); 50 + 51 + list_add_tail(&sp->entry, &sp_units); 52 + 53 + write_unlock_irqrestore(&sp_unit_lock, flags); 54 + } 55 + 56 + static void sp_del_device(struct sp_device *sp) 57 + { 58 + unsigned long flags; 59 + 60 + write_lock_irqsave(&sp_unit_lock, flags); 61 + 62 + list_del(&sp->entry); 63 + 64 + write_unlock_irqrestore(&sp_unit_lock, flags); 65 + } 66 + 67 + /** 68 + * sp_alloc_struct - allocate and initialize the sp_device struct 69 + * 70 + * @dev: device struct of the SP 71 + */ 72 + struct sp_device *sp_alloc_struct(struct device *dev) 73 + { 74 + struct sp_device *sp; 75 + 76 + sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL); 77 + if (!sp) 78 + return NULL; 79 + 80 + sp->dev = dev; 81 + sp->ord = atomic_inc_return(&sp_ordinal); 82 + snprintf(sp->name, SP_MAX_NAME_LEN, "sp-%u", sp->ord); 83 + 84 + return sp; 85 + } 86 + 87 + int sp_init(struct sp_device *sp) 88 + { 89 + sp_add_device(sp); 90 + 91 + if (sp->dev_vdata->ccp_vdata) 92 + ccp_dev_init(sp); 93 + 94 + return 0; 95 + } 96 + 97 + void sp_destroy(struct sp_device *sp) 98 + { 99 + if (sp->dev_vdata->ccp_vdata) 100 + ccp_dev_destroy(sp); 101 + 102 + sp_del_device(sp); 103 + } 104 + 105 + #ifdef CONFIG_PM 106 + int sp_suspend(struct sp_device *sp, pm_message_t state) 107 + { 108 + int ret; 109 + 110 + if (sp->dev_vdata->ccp_vdata) { 111 + ret = ccp_dev_suspend(sp, state); 112 + if (ret) 113 + return ret; 114 + } 115 + 116 + return 0; 117 + } 118 + 119 + int sp_resume(struct sp_device *sp) 120 + { 121 + int ret; 122 + 123 + if (sp->dev_vdata->ccp_vdata) { 124 + ret = ccp_dev_resume(sp); 125 + if (ret) 126 + return ret; 127 + } 128 + 129 + return 0; 130 + } 131 + #endif 132 + 133 + static int __init sp_mod_init(void) 134 + { 135 + #ifdef CONFIG_X86 136 + int ret; 137 + 138 + ret = ccp_pci_init(); 139 + if (ret) 140 + return ret; 141 + 142 + /* Don't leave the driver loaded if init failed */ 143 + if (ccp_present() != 0) { 144 + ccp_pci_exit(); 145 + return -ENODEV; 146 + } 147 + 148 + return 0; 149 + #endif 150 + 151 + #ifdef CONFIG_ARM64 152 + int ret; 153 + 154 + ret = ccp_platform_init(); 155 + if (ret) 156 + return ret; 157 + 158 + /* Don't leave the driver loaded if init failed */ 159 + if (ccp_present() != 0) { 160 + ccp_platform_exit(); 161 + return -ENODEV; 162 + } 163 + 164 + return 0; 165 + #endif 166 + 167 + return -ENODEV; 168 + } 169 + 170 + static void __exit sp_mod_exit(void) 171 + { 172 + #ifdef CONFIG_X86 173 + ccp_pci_exit(); 174 + #endif 175 + 176 + #ifdef CONFIG_ARM64 177 + ccp_platform_exit(); 178 + #endif 179 + } 180 + 181 + module_init(sp_mod_init); 182 + module_exit(sp_mod_exit);
+120
drivers/crypto/ccp/sp-dev.h
··· 1 + /* 2 + * AMD Secure Processor driver 3 + * 4 + * Copyright (C) 2017 Advanced Micro Devices, Inc. 5 + * 6 + * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 + * Author: Gary R Hook <gary.hook@amd.com> 8 + * Author: Brijesh Singh <brijesh.singh@amd.com> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as 12 + * published by the Free Software Foundation. 13 + */ 14 + 15 + #ifndef __SP_DEV_H__ 16 + #define __SP_DEV_H__ 17 + 18 + #include <linux/device.h> 19 + #include <linux/pci.h> 20 + #include <linux/spinlock.h> 21 + #include <linux/mutex.h> 22 + #include <linux/list.h> 23 + #include <linux/wait.h> 24 + #include <linux/dmapool.h> 25 + #include <linux/hw_random.h> 26 + #include <linux/bitops.h> 27 + #include <linux/interrupt.h> 28 + #include <linux/irqreturn.h> 29 + 30 + #define SP_MAX_NAME_LEN 32 31 + 32 + #define CACHE_NONE 0x00 33 + #define CACHE_WB_NO_ALLOC 0xb7 34 + 35 + /* Structure to hold CCP device data */ 36 + struct ccp_device; 37 + struct ccp_vdata { 38 + const unsigned int version; 39 + const unsigned int dma_chan_attr; 40 + void (*setup)(struct ccp_device *); 41 + const struct ccp_actions *perform; 42 + const unsigned int offset; 43 + }; 44 + /* Structure to hold SP device data */ 45 + struct sp_dev_vdata { 46 + const unsigned int bar; 47 + 48 + const struct ccp_vdata *ccp_vdata; 49 + void *psp_vdata; 50 + }; 51 + 52 + struct sp_device { 53 + struct list_head entry; 54 + 55 + struct device *dev; 56 + 57 + struct sp_dev_vdata *dev_vdata; 58 + unsigned int ord; 59 + char name[SP_MAX_NAME_LEN]; 60 + 61 + /* Bus specific device information */ 62 + void *dev_specific; 63 + 64 + /* I/O area used for device communication. */ 65 + void __iomem *io_map; 66 + 67 + /* DMA caching attribute support */ 68 + unsigned int axcache; 69 + 70 + bool irq_registered; 71 + 72 + int (*get_irq)(struct ccp_device *ccp); 73 + void (*free_irq)(struct ccp_device *ccp); 74 + 75 + void *ccp_data; 76 + void *psp_data; 77 + }; 78 + 79 + int sp_pci_init(void); 80 + void sp_pci_exit(void); 81 + 82 + int sp_platform_init(void); 83 + void sp_platform_exit(void); 84 + 85 + struct sp_device *sp_alloc_struct(struct device *dev); 86 + 87 + int sp_init(struct sp_device *sp); 88 + void sp_destroy(struct sp_device *sp); 89 + struct sp_device *sp_get_master(void); 90 + 91 + int sp_suspend(struct sp_device *sp, pm_message_t state); 92 + int sp_resume(struct sp_device *sp); 93 + 94 + #ifdef CONFIG_CRYPTO_DEV_SP_CCP 95 + 96 + int ccp_dev_init(struct sp_device *sp); 97 + void ccp_dev_destroy(struct sp_device *sp); 98 + 99 + int ccp_dev_suspend(struct sp_device *sp, pm_message_t state); 100 + int ccp_dev_resume(struct sp_device *sp); 101 + 102 + #else /* !CONFIG_CRYPTO_DEV_SP_CCP */ 103 + 104 + static inline int ccp_dev_init(struct sp_device *sp) 105 + { 106 + return 0; 107 + } 108 + static inline void ccp_dev_destroy(struct sp_device *sp) { } 109 + 110 + static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) 111 + { 112 + return 0; 113 + } 114 + static inline int ccp_dev_resume(struct sp_device *sp) 115 + { 116 + return 0; 117 + } 118 + #endif /* CONFIG_CRYPTO_DEV_SP_CCP */ 119 + 120 + #endif
+3 -4
include/linux/ccp.h
··· 23 23 struct ccp_device; 24 24 struct ccp_cmd; 25 25 26 - #if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \ 27 - defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE) 26 + #if defined(CONFIG_CRYPTO_DEV_SP_CCP) 28 27 29 28 /** 30 29 * ccp_present - check if a CCP device is present ··· 69 70 */ 70 71 int ccp_enqueue_cmd(struct ccp_cmd *cmd); 71 72 72 - #else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */ 73 + #else /* CONFIG_CRYPTO_DEV_CCP_SP_DEV is not enabled */ 73 74 74 75 static inline int ccp_present(void) 75 76 { ··· 86 87 return -ENODEV; 87 88 } 88 89 89 - #endif /* CONFIG_CRYPTO_DEV_CCP_DD */ 90 + #endif /* CONFIG_CRYPTO_DEV_SP_CCP */ 90 91 91 92 92 93 /***** AES engine *****/