Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccp - Abstract interrupt registeration

The CCP and PSP devices part of AMD Secure Procesor may share the same
interrupt. Hence we expand the SP device to register a common interrupt
handler and provide functions to CCP and PSP devices to register their
interrupt callback which will be invoked upon interrupt.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Acked-by: Gary R Hook <gary.hook@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Brijesh Singh and committed by
Herbert Xu
f4d18d65 720419f0

+188 -115
+3 -3
drivers/crypto/ccp/ccp-dev-v3.c
··· 453 453 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); 454 454 455 455 /* Request an irq */ 456 - ret = ccp->get_irq(ccp); 456 + ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp); 457 457 if (ret) { 458 458 dev_err(dev, "unable to allocate an IRQ\n"); 459 459 goto e_pool; ··· 510 510 if (ccp->cmd_q[i].kthread) 511 511 kthread_stop(ccp->cmd_q[i].kthread); 512 512 513 - ccp->free_irq(ccp); 513 + sp_free_ccp_irq(ccp->sp, ccp); 514 514 515 515 e_pool: 516 516 for (i = 0; i < ccp->cmd_q_count; i++) ··· 549 549 if (ccp->cmd_q[i].kthread) 550 550 kthread_stop(ccp->cmd_q[i].kthread); 551 551 552 - ccp->free_irq(ccp); 552 + sp_free_ccp_irq(ccp->sp, ccp); 553 553 554 554 for (i = 0; i < ccp->cmd_q_count; i++) 555 555 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+3 -4
drivers/crypto/ccp/ccp-dev-v5.c
··· 880 880 881 881 dev_dbg(dev, "Requesting an IRQ...\n"); 882 882 /* Request an irq */ 883 - ret = ccp->get_irq(ccp); 883 + ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); 884 884 if (ret) { 885 885 dev_err(dev, "unable to allocate an IRQ\n"); 886 886 goto e_pool; ··· 986 986 kthread_stop(ccp->cmd_q[i].kthread); 987 987 988 988 e_irq: 989 - ccp->free_irq(ccp); 989 + sp_free_ccp_irq(ccp->sp, ccp); 990 990 991 991 e_pool: 992 992 for (i = 0; i < ccp->cmd_q_count; i++) ··· 1036 1036 if (ccp->cmd_q[i].kthread) 1037 1037 kthread_stop(ccp->cmd_q[i].kthread); 1038 1038 1039 - ccp->free_irq(ccp); 1039 + sp_free_ccp_irq(ccp->sp, ccp); 1040 1040 1041 1041 for (i = 0; i < ccp->cmd_q_count; i++) { 1042 1042 cmd_q = &ccp->cmd_q[i]; ··· 1105 1105 .init = ccp5_init, 1106 1106 .destroy = ccp5_destroy, 1107 1107 .get_free_slots = ccp5_get_free_slots, 1108 - .irqhandler = ccp5_irq_handler, 1109 1108 }; 1110 1109 1111 1110 const struct ccp_vdata ccpv5a = {
+1 -2
drivers/crypto/ccp/ccp-dev.c
··· 600 600 goto e_err; 601 601 } 602 602 603 - ccp->get_irq = sp->get_irq; 604 - ccp->free_irq = sp->free_irq; 603 + ccp->use_tasklet = sp->use_tasklet; 605 604 606 605 ccp->io_regs = sp->io_map + ccp->vdata->offset; 607 606 if (ccp->vdata->setup)
-2
drivers/crypto/ccp/ccp-dev.h
··· 351 351 /* Bus specific device information 352 352 */ 353 353 void *dev_specific; 354 - int (*get_irq)(struct ccp_device *ccp); 355 - void (*free_irq)(struct ccp_device *ccp); 356 354 unsigned int qim; 357 355 unsigned int irq; 358 356 bool use_tasklet;
+31 -72
drivers/crypto/ccp/ccp-pci.c
··· 28 28 29 29 #define MSIX_VECTORS 2 30 30 31 - struct ccp_msix { 32 - u32 vector; 33 - char name[16]; 34 - }; 35 - 36 31 struct ccp_pci { 37 32 int msix_count; 38 - struct ccp_msix msix[MSIX_VECTORS]; 33 + struct msix_entry msix_entry[MSIX_VECTORS]; 39 34 }; 40 35 41 - static int ccp_get_msix_irqs(struct ccp_device *ccp) 36 + static int ccp_get_msix_irqs(struct sp_device *sp) 42 37 { 43 - struct sp_device *sp = ccp->sp; 44 38 struct ccp_pci *ccp_pci = sp->dev_specific; 45 - struct device *dev = ccp->dev; 39 + struct device *dev = sp->dev; 46 40 struct pci_dev *pdev = to_pci_dev(dev); 47 - struct msix_entry msix_entry[MSIX_VECTORS]; 48 - unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1; 49 41 int v, ret; 50 42 51 - for (v = 0; v < ARRAY_SIZE(msix_entry); v++) 52 - msix_entry[v].entry = v; 43 + for (v = 0; v < ARRAY_SIZE(ccp_pci->msix_entry); v++) 44 + ccp_pci->msix_entry[v].entry = v; 53 45 54 - ret = pci_enable_msix_range(pdev, msix_entry, 1, v); 46 + ret = pci_enable_msix_range(pdev, ccp_pci->msix_entry, 1, v); 55 47 if (ret < 0) 56 48 return ret; 57 49 58 50 ccp_pci->msix_count = ret; 59 - for (v = 0; v < ccp_pci->msix_count; v++) { 60 - /* Set the interrupt names and request the irqs */ 61 - snprintf(ccp_pci->msix[v].name, name_len, "%s-%u", 62 - sp->name, v); 63 - ccp_pci->msix[v].vector = msix_entry[v].vector; 64 - ret = request_irq(ccp_pci->msix[v].vector, 65 - ccp->vdata->perform->irqhandler, 66 - 0, ccp_pci->msix[v].name, ccp); 67 - if (ret) { 68 - dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n", 69 - ret); 70 - goto e_irq; 71 - } 72 - } 73 - ccp->use_tasklet = true; 51 + sp->use_tasklet = true; 74 52 53 + sp->psp_irq = ccp_pci->msix_entry[0].vector; 54 + sp->ccp_irq = (ccp_pci->msix_count > 1) ? ccp_pci->msix_entry[1].vector 55 + : ccp_pci->msix_entry[0].vector; 75 56 return 0; 76 - 77 - e_irq: 78 - while (v--) 79 - free_irq(ccp_pci->msix[v].vector, dev); 80 - 81 - pci_disable_msix(pdev); 82 - 83 - ccp_pci->msix_count = 0; 84 - 85 - return ret; 86 57 } 87 58 88 - static int ccp_get_msi_irq(struct ccp_device *ccp) 59 + static int ccp_get_msi_irq(struct sp_device *sp) 89 60 { 90 - struct sp_device *sp = ccp->sp; 91 - struct device *dev = ccp->dev; 61 + struct device *dev = sp->dev; 92 62 struct pci_dev *pdev = to_pci_dev(dev); 93 63 int ret; 94 64 ··· 66 96 if (ret) 67 97 return ret; 68 98 69 - ccp->irq = pdev->irq; 70 - ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, 71 - sp->name, ccp); 72 - if (ret) { 73 - dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret); 74 - goto e_msi; 75 - } 76 - ccp->use_tasklet = true; 99 + sp->ccp_irq = pdev->irq; 100 + sp->psp_irq = pdev->irq; 77 101 78 102 return 0; 79 - 80 - e_msi: 81 - pci_disable_msi(pdev); 82 - 83 - return ret; 84 103 } 85 104 86 - static int ccp_get_irqs(struct ccp_device *ccp) 105 + static int ccp_get_irqs(struct sp_device *sp) 87 106 { 88 - struct device *dev = ccp->dev; 107 + struct device *dev = sp->dev; 89 108 int ret; 90 109 91 - ret = ccp_get_msix_irqs(ccp); 110 + ret = ccp_get_msix_irqs(sp); 92 111 if (!ret) 93 112 return 0; 94 113 95 114 /* Couldn't get MSI-X vectors, try MSI */ 96 115 dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); 97 - ret = ccp_get_msi_irq(ccp); 116 + ret = ccp_get_msi_irq(sp); 98 117 if (!ret) 99 118 return 0; 100 119 ··· 93 134 return ret; 94 135 } 95 136 96 - static void ccp_free_irqs(struct ccp_device *ccp) 137 + static void ccp_free_irqs(struct sp_device *sp) 97 138 { 98 - struct sp_device *sp = ccp->sp; 99 139 struct ccp_pci *ccp_pci = sp->dev_specific; 100 - struct device *dev = ccp->dev; 140 + struct device *dev = sp->dev; 101 141 struct pci_dev *pdev = to_pci_dev(dev); 102 142 103 - if (ccp_pci->msix_count) { 104 - while (ccp_pci->msix_count--) 105 - free_irq(ccp_pci->msix[ccp_pci->msix_count].vector, 106 - ccp); 143 + if (ccp_pci->msix_count) 107 144 pci_disable_msix(pdev); 108 - } else if (ccp->irq) { 109 - free_irq(ccp->irq, ccp); 145 + else if (sp->psp_irq) 110 146 pci_disable_msi(pdev); 111 - } 112 - ccp->irq = 0; 147 + 148 + sp->ccp_irq = 0; 149 + sp->psp_irq = 0; 113 150 } 114 151 115 152 static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ··· 133 178 dev_err(dev, "missing driver data\n"); 134 179 goto e_err; 135 180 } 136 - sp->get_irq = ccp_get_irqs; 137 - sp->free_irq = ccp_free_irqs; 138 181 139 182 ret = pcim_enable_device(pdev); 140 183 if (ret) { ··· 160 207 ret = -ENOMEM; 161 208 goto e_err; 162 209 } 210 + 211 + ret = ccp_get_irqs(sp); 212 + if (ret) 213 + goto e_err; 163 214 164 215 pci_set_master(pdev); 165 216 ··· 201 244 return; 202 245 203 246 sp_destroy(sp); 247 + 248 + ccp_free_irqs(sp); 204 249 205 250 dev_notice(dev, "disabled\n"); 206 251 }
+29 -30
drivers/crypto/ccp/ccp-platform.c
··· 30 30 31 31 struct ccp_platform { 32 32 int coherent; 33 + unsigned int irq_count; 33 34 }; 34 35 35 36 static const struct acpi_device_id ccp_acpi_match[]; ··· 60 59 return NULL; 61 60 } 62 61 63 - static int ccp_get_irq(struct ccp_device *ccp) 62 + static int ccp_get_irqs(struct sp_device *sp) 64 63 { 65 - struct device *dev = ccp->dev; 64 + struct ccp_platform *ccp_platform = sp->dev_specific; 65 + struct device *dev = sp->dev; 66 66 struct platform_device *pdev = to_platform_device(dev); 67 + unsigned int i, count; 67 68 int ret; 69 + 70 + for (i = 0, count = 0; i < pdev->num_resources; i++) { 71 + struct resource *res = &pdev->resource[i]; 72 + 73 + if (resource_type(res) == IORESOURCE_IRQ) 74 + count++; 75 + } 76 + 77 + ccp_platform->irq_count = count; 68 78 69 79 ret = platform_get_irq(pdev, 0); 70 80 if (ret < 0) { ··· 83 71 return ret; 84 72 } 85 73 86 - ccp->irq = ret; 87 - ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0, 88 - ccp->name, ccp); 89 - if (ret) { 90 - dev_notice(dev, "unable to allocate IRQ (%d)\n", ret); 91 - return ret; 74 + sp->psp_irq = ret; 75 + if (count == 1) { 76 + sp->ccp_irq = ret; 77 + } else { 78 + ret = platform_get_irq(pdev, 1); 79 + if (ret < 0) { 80 + dev_notice(dev, "unable to get IRQ (%d)\n", ret); 81 + return ret; 82 + } 83 + 84 + sp->ccp_irq = ret; 92 85 } 93 86 94 87 return 0; 95 - } 96 - 97 - static int ccp_get_irqs(struct ccp_device *ccp) 98 - { 99 - struct device *dev = ccp->dev; 100 - int ret; 101 - 102 - ret = ccp_get_irq(ccp); 103 - if (!ret) 104 - return 0; 105 - 106 - /* Couldn't get an interrupt */ 107 - dev_notice(dev, "could not enable interrupts (%d)\n", ret); 108 - 109 - return ret; 110 - } 111 - 112 - static void ccp_free_irqs(struct ccp_device *ccp) 113 - { 114 - free_irq(ccp->irq, ccp); 115 88 } 116 89 117 90 static int ccp_platform_probe(struct platform_device *pdev) ··· 125 128 dev_err(dev, "missing driver data\n"); 126 129 goto e_err; 127 130 } 128 - sp->get_irq = ccp_get_irqs; 129 - sp->free_irq = ccp_free_irqs; 130 131 131 132 ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); 132 133 sp->io_map = devm_ioremap_resource(dev, ior); ··· 150 155 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); 151 156 goto e_err; 152 157 } 158 + 159 + ret = ccp_get_irqs(sp); 160 + if (ret) 161 + goto e_err; 153 162 154 163 dev_set_drvdata(dev, sp); 155 164
+107
drivers/crypto/ccp/sp-dev.c
··· 64 64 write_unlock_irqrestore(&sp_unit_lock, flags); 65 65 } 66 66 67 + static irqreturn_t sp_irq_handler(int irq, void *data) 68 + { 69 + struct sp_device *sp = data; 70 + 71 + if (sp->ccp_irq_handler) 72 + sp->ccp_irq_handler(irq, sp->ccp_irq_data); 73 + 74 + if (sp->psp_irq_handler) 75 + sp->psp_irq_handler(irq, sp->psp_irq_data); 76 + 77 + return IRQ_HANDLED; 78 + } 79 + 80 + int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler, 81 + const char *name, void *data) 82 + { 83 + int ret; 84 + 85 + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) { 86 + /* Need a common routine to manage all interrupts */ 87 + sp->ccp_irq_data = data; 88 + sp->ccp_irq_handler = handler; 89 + 90 + if (!sp->irq_registered) { 91 + ret = request_irq(sp->ccp_irq, sp_irq_handler, 0, 92 + sp->name, sp); 93 + if (ret) 94 + return ret; 95 + 96 + sp->irq_registered = true; 97 + } 98 + } else { 99 + /* Each sub-device can manage it's own interrupt */ 100 + ret = request_irq(sp->ccp_irq, handler, 0, name, data); 101 + if (ret) 102 + return ret; 103 + } 104 + 105 + return 0; 106 + } 107 + 108 + int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler, 109 + const char *name, void *data) 110 + { 111 + int ret; 112 + 113 + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) { 114 + /* Need a common routine to manage all interrupts */ 115 + sp->psp_irq_data = data; 116 + sp->psp_irq_handler = handler; 117 + 118 + if (!sp->irq_registered) { 119 + ret = request_irq(sp->psp_irq, sp_irq_handler, 0, 120 + sp->name, sp); 121 + if (ret) 122 + return ret; 123 + 124 + sp->irq_registered = true; 125 + } 126 + } else { 127 + /* Each sub-device can manage it's own interrupt */ 128 + ret = request_irq(sp->psp_irq, handler, 0, name, data); 129 + if (ret) 130 + return ret; 131 + } 132 + 133 + return 0; 134 + } 135 + 136 + void sp_free_ccp_irq(struct sp_device *sp, void *data) 137 + { 138 + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) { 139 + /* Using common routine to manage all interrupts */ 140 + if (!sp->psp_irq_handler) { 141 + /* Nothing else using it, so free it */ 142 + free_irq(sp->ccp_irq, sp); 143 + 144 + sp->irq_registered = false; 145 + } 146 + 147 + sp->ccp_irq_handler = NULL; 148 + sp->ccp_irq_data = NULL; 149 + } else { 150 + /* Each sub-device can manage it's own interrupt */ 151 + free_irq(sp->ccp_irq, data); 152 + } 153 + } 154 + 155 + void sp_free_psp_irq(struct sp_device *sp, void *data) 156 + { 157 + if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) { 158 + /* Using common routine to manage all interrupts */ 159 + if (!sp->ccp_irq_handler) { 160 + /* Nothing else using it, so free it */ 161 + free_irq(sp->psp_irq, sp); 162 + 163 + sp->irq_registered = false; 164 + } 165 + 166 + sp->psp_irq_handler = NULL; 167 + sp->psp_irq_data = NULL; 168 + } else { 169 + /* Each sub-device can manage it's own interrupt */ 170 + free_irq(sp->psp_irq, data); 171 + } 172 + } 173 + 67 174 /** 68 175 * sp_alloc_struct - allocate and initialize the sp_device struct 69 176 *
+14 -2
drivers/crypto/ccp/sp-dev.h
··· 68 68 unsigned int axcache; 69 69 70 70 bool irq_registered; 71 + bool use_tasklet; 71 72 72 - int (*get_irq)(struct ccp_device *ccp); 73 - void (*free_irq)(struct ccp_device *ccp); 73 + unsigned int ccp_irq; 74 + irq_handler_t ccp_irq_handler; 75 + void *ccp_irq_data; 76 + 77 + unsigned int psp_irq; 78 + irq_handler_t psp_irq_handler; 79 + void *psp_irq_data; 74 80 75 81 void *ccp_data; 76 82 void *psp_data; ··· 96 90 97 91 int sp_suspend(struct sp_device *sp, pm_message_t state); 98 92 int sp_resume(struct sp_device *sp); 93 + int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler, 94 + const char *name, void *data); 95 + void sp_free_ccp_irq(struct sp_device *sp, void *data); 96 + int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler, 97 + const char *name, void *data); 98 + void sp_free_psp_irq(struct sp_device *sp, void *data); 99 99 100 100 #ifdef CONFIG_CRYPTO_DEV_SP_CCP 101 101