Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccp - Use devres interface to allocate PCI/iomap and cleanup

Update pci and platform files to use devres interface to allocate the PCI
and iomap resources. Also add helper functions to consolicate module init,
exit and power mangagement code duplication.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Acked-by: Gary R Hook <gary.hook@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Brijesh Singh and committed by
Herbert Xu
970e8303 a0a613ab

+108 -140
+7
drivers/crypto/ccp/ccp-dev-v3.c
··· 586 586 .irqhandler = ccp_irq_handler, 587 587 }; 588 588 589 + const struct ccp_vdata ccpv3_platform = { 590 + .version = CCP_VERSION(3, 0), 591 + .setup = NULL, 592 + .perform = &ccp3_actions, 593 + .offset = 0, 594 + }; 595 + 589 596 const struct ccp_vdata ccpv3 = { 590 597 .version = CCP_VERSION(3, 0), 591 598 .setup = NULL,
+61
drivers/crypto/ccp/ccp-dev.c
··· 539 539 540 540 return ccp->cmd_q_count == suspended; 541 541 } 542 + 543 + int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state) 544 + { 545 + unsigned long flags; 546 + unsigned int i; 547 + 548 + spin_lock_irqsave(&ccp->cmd_lock, flags); 549 + 550 + ccp->suspending = 1; 551 + 552 + /* Wake all the queue kthreads to prepare for suspend */ 553 + for (i = 0; i < ccp->cmd_q_count; i++) 554 + wake_up_process(ccp->cmd_q[i].kthread); 555 + 556 + spin_unlock_irqrestore(&ccp->cmd_lock, flags); 557 + 558 + /* Wait for all queue kthreads to say they're done */ 559 + while (!ccp_queues_suspended(ccp)) 560 + wait_event_interruptible(ccp->suspend_queue, 561 + ccp_queues_suspended(ccp)); 562 + 563 + return 0; 564 + } 565 + 566 + int ccp_dev_resume(struct ccp_device *ccp) 567 + { 568 + unsigned long flags; 569 + unsigned int i; 570 + 571 + spin_lock_irqsave(&ccp->cmd_lock, flags); 572 + 573 + ccp->suspending = 0; 574 + 575 + /* Wake up all the kthreads */ 576 + for (i = 0; i < ccp->cmd_q_count; i++) { 577 + ccp->cmd_q[i].suspended = 0; 578 + wake_up_process(ccp->cmd_q[i].kthread); 579 + } 580 + 581 + spin_unlock_irqrestore(&ccp->cmd_lock, flags); 582 + 583 + return 0; 584 + } 542 585 #endif 586 + 587 + int ccp_dev_init(struct ccp_device *ccp) 588 + { 589 + ccp->io_regs = ccp->io_map + ccp->vdata->offset; 590 + 591 + if (ccp->vdata->setup) 592 + ccp->vdata->setup(ccp); 593 + 594 + return ccp->vdata->perform->init(ccp); 595 + } 596 + 597 + void ccp_dev_destroy(struct ccp_device *ccp) 598 + { 599 + if (!ccp) 600 + return; 601 + 602 + ccp->vdata->perform->destroy(ccp); 603 + } 543 604 544 605 static int __init ccp_mod_init(void) 545 606 {
+6
drivers/crypto/ccp/ccp-dev.h
··· 652 652 void ccp5_debugfs_setup(struct ccp_device *ccp); 653 653 void ccp5_debugfs_destroy(void); 654 654 655 + int ccp_dev_init(struct ccp_device *ccp); 656 + void ccp_dev_destroy(struct ccp_device *ccp); 657 + int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state); 658 + int ccp_dev_resume(struct ccp_device *ccp); 659 + 655 660 /* Structure for computation functions that are device-specific */ 656 661 struct ccp_actions { 657 662 int (*aes)(struct ccp_op *); ··· 684 679 const unsigned int offset; 685 680 }; 686 681 682 + extern const struct ccp_vdata ccpv3_platform; 687 683 extern const struct ccp_vdata ccpv3; 688 684 extern const struct ccp_vdata ccpv5a; 689 685 extern const struct ccp_vdata ccpv5b;
+28 -90
drivers/crypto/ccp/ccp-pci.c
··· 150 150 ccp->irq = 0; 151 151 } 152 152 153 - static int ccp_find_mmio_area(struct ccp_device *ccp) 154 - { 155 - struct device *dev = ccp->dev; 156 - struct pci_dev *pdev = to_pci_dev(dev); 157 - resource_size_t io_len; 158 - unsigned long io_flags; 159 - 160 - io_flags = pci_resource_flags(pdev, ccp->vdata->bar); 161 - io_len = pci_resource_len(pdev, ccp->vdata->bar); 162 - if ((io_flags & IORESOURCE_MEM) && 163 - (io_len >= (ccp->vdata->offset + 0x800))) 164 - return ccp->vdata->bar; 165 - 166 - return -EIO; 167 - } 168 - 169 153 static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 170 154 { 171 155 struct ccp_device *ccp; 172 156 struct ccp_pci *ccp_pci; 173 157 struct device *dev = &pdev->dev; 174 - unsigned int bar; 158 + void __iomem * const *iomap_table; 159 + int bar_mask; 175 160 int ret; 176 161 177 162 ret = -ENOMEM; ··· 178 193 ccp->get_irq = ccp_get_irqs; 179 194 ccp->free_irq = ccp_free_irqs; 180 195 181 - ret = pci_request_regions(pdev, "ccp"); 196 + ret = pcim_enable_device(pdev); 182 197 if (ret) { 183 - dev_err(dev, "pci_request_regions failed (%d)\n", ret); 198 + dev_err(dev, "pcim_enable_device failed (%d)\n", ret); 184 199 goto e_err; 185 200 } 186 201 187 - ret = pci_enable_device(pdev); 202 + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 203 + ret = pcim_iomap_regions(pdev, bar_mask, "ccp"); 188 204 if (ret) { 189 - dev_err(dev, "pci_enable_device failed (%d)\n", ret); 190 - goto e_regions; 205 + dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret); 206 + goto e_err; 207 + } 208 + 209 + iomap_table = pcim_iomap_table(pdev); 210 + if (!iomap_table) { 211 + dev_err(dev, "pcim_iomap_table failed\n"); 212 + ret = -ENOMEM; 213 + goto e_err; 214 + } 215 + 216 + ccp->io_map = iomap_table[ccp->vdata->bar]; 217 + if (!ccp->io_map) { 218 + dev_err(dev, "ioremap failed\n"); 219 + ret = -ENOMEM; 220 + goto e_err; 191 221 } 192 222 193 223 pci_set_master(pdev); 194 - 195 - ret = ccp_find_mmio_area(ccp); 196 - if (ret < 0) 197 - goto e_device; 198 - bar = ret; 199 - 200 - ret = -EIO; 201 - ccp->io_map = pci_iomap(pdev, bar, 0); 202 - if (!ccp->io_map) { 203 - dev_err(dev, "pci_iomap failed\n"); 204 - goto e_device; 205 - } 206 - ccp->io_regs = ccp->io_map + ccp->vdata->offset; 207 224 208 225 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 209 226 if (ret) { ··· 213 226 if (ret) { 214 227 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", 215 228 ret); 216 - goto e_iomap; 229 + goto e_err; 217 230 } 218 231 } 219 232 220 233 dev_set_drvdata(dev, ccp); 221 234 222 - if (ccp->vdata->setup) 223 - ccp->vdata->setup(ccp); 224 - 225 - ret = ccp->vdata->perform->init(ccp); 235 + ret = ccp_dev_init(ccp); 226 236 if (ret) 227 - goto e_iomap; 237 + goto e_err; 228 238 229 239 dev_notice(dev, "enabled\n"); 230 240 231 241 return 0; 232 - 233 - e_iomap: 234 - pci_iounmap(pdev, ccp->io_map); 235 - 236 - e_device: 237 - pci_disable_device(pdev); 238 - 239 - e_regions: 240 - pci_release_regions(pdev); 241 242 242 243 e_err: 243 244 dev_notice(dev, "initialization failed\n"); ··· 240 265 if (!ccp) 241 266 return; 242 267 243 - ccp->vdata->perform->destroy(ccp); 244 - 245 - pci_iounmap(pdev, ccp->io_map); 246 - 247 - pci_disable_device(pdev); 248 - 249 - pci_release_regions(pdev); 268 + ccp_dev_destroy(ccp); 250 269 251 270 dev_notice(dev, "disabled\n"); 252 271 } ··· 250 281 { 251 282 struct device *dev = &pdev->dev; 252 283 struct ccp_device *ccp = dev_get_drvdata(dev); 253 - unsigned long flags; 254 - unsigned int i; 255 284 256 - spin_lock_irqsave(&ccp->cmd_lock, flags); 257 - 258 - ccp->suspending = 1; 259 - 260 - /* Wake all the queue kthreads to prepare for suspend */ 261 - for (i = 0; i < ccp->cmd_q_count; i++) 262 - wake_up_process(ccp->cmd_q[i].kthread); 263 - 264 - spin_unlock_irqrestore(&ccp->cmd_lock, flags); 265 - 266 - /* Wait for all queue kthreads to say they're done */ 267 - while (!ccp_queues_suspended(ccp)) 268 - wait_event_interruptible(ccp->suspend_queue, 269 - ccp_queues_suspended(ccp)); 270 - 271 - return 0; 285 + return ccp_dev_suspend(ccp, state); 272 286 } 273 287 274 288 static int ccp_pci_resume(struct pci_dev *pdev) 275 289 { 276 290 struct device *dev = &pdev->dev; 277 291 struct ccp_device *ccp = dev_get_drvdata(dev); 278 - unsigned long flags; 279 - unsigned int i; 280 292 281 - spin_lock_irqsave(&ccp->cmd_lock, flags); 282 - 283 - ccp->suspending = 0; 284 - 285 - /* Wake up all the kthreads */ 286 - for (i = 0; i < ccp->cmd_q_count; i++) { 287 - ccp->cmd_q[i].suspended = 0; 288 - wake_up_process(ccp->cmd_q[i].kthread); 289 - } 290 - 291 - spin_unlock_irqrestore(&ccp->cmd_lock, flags); 292 - 293 - return 0; 293 + return ccp_dev_resume(ccp); 294 294 } 295 295 #endif 296 296
+6 -50
drivers/crypto/ccp/ccp-platform.c
··· 104 104 free_irq(ccp->irq, dev); 105 105 } 106 106 107 - static struct resource *ccp_find_mmio_area(struct ccp_device *ccp) 108 - { 109 - struct device *dev = ccp->dev; 110 - struct platform_device *pdev = to_platform_device(dev); 111 - struct resource *ior; 112 - 113 - ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); 114 - if (ior && (resource_size(ior) >= 0x800)) 115 - return ior; 116 - 117 - return NULL; 118 - } 119 - 120 107 static int ccp_platform_probe(struct platform_device *pdev) 121 108 { 122 109 struct ccp_device *ccp; ··· 133 146 ccp->get_irq = ccp_get_irqs; 134 147 ccp->free_irq = ccp_free_irqs; 135 148 136 - ior = ccp_find_mmio_area(ccp); 149 + ior = platform_get_resource(pdev, IORESOURCE_MEM, 0); 137 150 ccp->io_map = devm_ioremap_resource(dev, ior); 138 151 if (IS_ERR(ccp->io_map)) { 139 152 ret = PTR_ERR(ccp->io_map); ··· 161 174 162 175 dev_set_drvdata(dev, ccp); 163 176 164 - ret = ccp->vdata->perform->init(ccp); 177 + ret = ccp_dev_init(ccp); 165 178 if (ret) 166 179 goto e_err; 167 180 ··· 179 192 struct device *dev = &pdev->dev; 180 193 struct ccp_device *ccp = dev_get_drvdata(dev); 181 194 182 - ccp->vdata->perform->destroy(ccp); 195 + ccp_dev_destroy(ccp); 183 196 184 197 dev_notice(dev, "disabled\n"); 185 198 ··· 192 205 { 193 206 struct device *dev = &pdev->dev; 194 207 struct ccp_device *ccp = dev_get_drvdata(dev); 195 - unsigned long flags; 196 - unsigned int i; 197 208 198 - spin_lock_irqsave(&ccp->cmd_lock, flags); 199 - 200 - ccp->suspending = 1; 201 - 202 - /* Wake all the queue kthreads to prepare for suspend */ 203 - for (i = 0; i < ccp->cmd_q_count; i++) 204 - wake_up_process(ccp->cmd_q[i].kthread); 205 - 206 - spin_unlock_irqrestore(&ccp->cmd_lock, flags); 207 - 208 - /* Wait for all queue kthreads to say they're done */ 209 - while (!ccp_queues_suspended(ccp)) 210 - wait_event_interruptible(ccp->suspend_queue, 211 - ccp_queues_suspended(ccp)); 212 - 213 - return 0; 209 + return ccp_dev_suspend(ccp, state); 214 210 } 215 211 216 212 static int ccp_platform_resume(struct platform_device *pdev) 217 213 { 218 214 struct device *dev = &pdev->dev; 219 215 struct ccp_device *ccp = dev_get_drvdata(dev); 220 - unsigned long flags; 221 - unsigned int i; 222 216 223 - spin_lock_irqsave(&ccp->cmd_lock, flags); 224 - 225 - ccp->suspending = 0; 226 - 227 - /* Wake up all the kthreads */ 228 - for (i = 0; i < ccp->cmd_q_count; i++) { 229 - ccp->cmd_q[i].suspended = 0; 230 - wake_up_process(ccp->cmd_q[i].kthread); 231 - } 232 - 233 - spin_unlock_irqrestore(&ccp->cmd_lock, flags); 234 - 235 - return 0; 217 + return ccp_dev_resume(ccp); 236 218 } 237 219 #endif 238 220 ··· 216 260 #ifdef CONFIG_OF 217 261 static const struct of_device_id ccp_of_match[] = { 218 262 { .compatible = "amd,ccp-seattle-v1a", 219 - .data = (const void *)&ccpv3 }, 263 + .data = (const void *)&ccpv3_platform }, 220 264 { }, 221 265 }; 222 266 MODULE_DEVICE_TABLE(of, ccp_of_match);