Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: cavium - switch to pci_alloc_irq_vectors

pci_enable_msix has been long deprecated, but this driver adds a new
instance. Convert it to pci_alloc_irq_vectors and greatly simplify
the code, and make sure the prope code properly unwinds.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Christoph Hellwig and committed by
Herbert Xu
15c0b9ed 613844e8

+65 -141
-3
drivers/crypto/cavium/cpt/cptvf.h
··· 107 107 void __iomem *reg_base; /* Register start address */ 108 108 void *wqe_info; /* BH worker info */ 109 109 /* MSI-X */ 110 - bool msix_enabled; 111 - struct msix_entry msix_entries[CPT_VF_MSIX_VECTORS]; 112 - bool irq_allocated[CPT_VF_MSIX_VECTORS]; 113 110 cpumask_var_t affinity_mask[CPT_VF_MSIX_VECTORS]; 114 111 /* Command and Pending queues */ 115 112 u32 qsize;
+65 -138
drivers/crypto/cavium/cpt/cptvf_main.c
··· 357 357 return ret; 358 358 } 359 359 360 - static void cptvf_disable_msix(struct cpt_vf *cptvf) 360 + static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec) 361 361 { 362 - if (cptvf->msix_enabled) { 363 - pci_disable_msix(cptvf->pdev); 364 - cptvf->msix_enabled = 0; 365 - } 366 - } 367 - 368 - static int cptvf_enable_msix(struct cpt_vf *cptvf) 369 - { 370 - int i, ret; 371 - 372 - for (i = 0; i < CPT_VF_MSIX_VECTORS; i++) 373 - cptvf->msix_entries[i].entry = i; 374 - 375 - ret = pci_enable_msix(cptvf->pdev, cptvf->msix_entries, 376 - CPT_VF_MSIX_VECTORS); 377 - if (ret) { 378 - dev_err(&cptvf->pdev->dev, "Request for #%d msix vectors failed\n", 379 - CPT_VF_MSIX_VECTORS); 380 - return ret; 381 - } 382 - 383 - cptvf->msix_enabled = 1; 384 - /* Mark MSIX enabled */ 385 - cptvf->flags |= CPT_FLAG_MSIX_ENABLED; 386 - 387 - return 0; 388 - } 389 - 390 - static void cptvf_free_all_interrupts(struct cpt_vf *cptvf) 391 - { 392 - int irq; 393 - 394 - for (irq = 0; irq < CPT_VF_MSIX_VECTORS; irq++) { 395 - if (cptvf->irq_allocated[irq]) 396 - irq_set_affinity_hint(cptvf->msix_entries[irq].vector, 397 - NULL); 398 - free_cpumask_var(cptvf->affinity_mask[irq]); 399 - free_irq(cptvf->msix_entries[irq].vector, cptvf); 400 - cptvf->irq_allocated[irq] = false; 401 - } 362 + irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL); 363 + free_cpumask_var(cptvf->affinity_mask[vec]); 402 364 } 403 365 404 366 static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val) ··· 612 650 return IRQ_HANDLED; 613 651 } 614 652 615 - static int cptvf_register_misc_intr(struct cpt_vf *cptvf) 653 + static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec) 616 654 { 617 655 struct pci_dev *pdev = cptvf->pdev; 618 - int ret; 656 + int cpu; 619 657 620 - /* Register misc interrupt handlers */ 621 - ret = request_irq(cptvf->msix_entries[CPT_VF_INT_VEC_E_MISC].vector, 622 - cptvf_misc_intr_handler, 0, "CPT VF misc intr", 623 - cptvf); 624 - if (ret) 625 - goto fail; 626 - 627 - cptvf->irq_allocated[CPT_VF_INT_VEC_E_MISC] = true; 628 - 629 - /* Enable mailbox interrupt */ 630 - cptvf_enable_mbox_interrupts(cptvf); 631 - cptvf_enable_swerr_interrupts(cptvf); 632 - 633 - return 0; 634 - 635 - fail: 636 - dev_err(&pdev->dev, "Request misc irq failed"); 637 - cptvf_free_all_interrupts(cptvf); 638 - return ret; 639 - } 640 - 641 - static int cptvf_register_done_intr(struct cpt_vf *cptvf) 642 - { 643 - struct pci_dev *pdev = cptvf->pdev; 644 - int ret; 645 - 646 - /* Register DONE interrupt handlers */ 647 - ret = request_irq(cptvf->msix_entries[CPT_VF_INT_VEC_E_DONE].vector, 648 - cptvf_done_intr_handler, 0, "CPT VF done intr", 649 - cptvf); 650 - if (ret) 651 - goto fail; 652 - 653 - cptvf->irq_allocated[CPT_VF_INT_VEC_E_DONE] = true; 654 - 655 - /* Enable mailbox interrupt */ 656 - cptvf_enable_done_interrupts(cptvf); 657 - return 0; 658 - 659 - fail: 660 - dev_err(&pdev->dev, "Request done irq failed\n"); 661 - cptvf_free_all_interrupts(cptvf); 662 - return ret; 663 - } 664 - 665 - static void cptvf_unregister_interrupts(struct cpt_vf *cptvf) 666 - { 667 - cptvf_free_all_interrupts(cptvf); 668 - cptvf_disable_msix(cptvf); 669 - } 670 - 671 - static void cptvf_set_irq_affinity(struct cpt_vf *cptvf) 672 - { 673 - struct pci_dev *pdev = cptvf->pdev; 674 - int vec, cpu; 675 - int irqnum; 676 - 677 - for (vec = 0; vec < CPT_VF_MSIX_VECTORS; vec++) { 678 - if (!cptvf->irq_allocated[vec]) 679 - continue; 680 - 681 - if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec], 682 - GFP_KERNEL)) { 683 - dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d", 684 - cptvf->vfid); 685 - return; 686 - } 687 - 688 - cpu = cptvf->vfid % num_online_cpus(); 689 - cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node), 690 - cptvf->affinity_mask[vec]); 691 - irqnum = cptvf->msix_entries[vec].vector; 692 - irq_set_affinity_hint(irqnum, cptvf->affinity_mask[vec]); 658 + if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec], 659 + GFP_KERNEL)) { 660 + dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d", 661 + cptvf->vfid); 662 + return; 693 663 } 664 + 665 + cpu = cptvf->vfid % num_online_cpus(); 666 + cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node), 667 + cptvf->affinity_mask[vec]); 668 + irq_set_affinity_hint(pci_irq_vector(pdev, vec), 669 + cptvf->affinity_mask[vec]); 694 670 } 695 671 696 672 static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val) ··· 709 809 } 710 810 711 811 cptvf->node = dev_to_node(&pdev->dev); 712 - /* Enable MSI-X */ 713 - err = cptvf_enable_msix(cptvf); 714 - if (err) { 715 - dev_err(dev, "cptvf_enable_msix() failed"); 812 + err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS, 813 + CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX); 814 + if (err < 0) { 815 + dev_err(dev, "Request for #%d msix vectors failed\n", 816 + CPT_VF_MSIX_VECTORS); 716 817 goto cptvf_err_release_regions; 717 818 } 718 819 719 - /* Register mailbox interrupts */ 720 - cptvf_register_misc_intr(cptvf); 820 + err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), 821 + cptvf_misc_intr_handler, 0, "CPT VF misc intr", 822 + cptvf); 823 + if (err) { 824 + dev_err(dev, "Request misc irq failed"); 825 + goto cptvf_free_vectors; 826 + } 827 + 828 + /* Enable mailbox interrupt */ 829 + cptvf_enable_mbox_interrupts(cptvf); 830 + cptvf_enable_swerr_interrupts(cptvf); 721 831 722 832 /* Check ready with PF */ 723 833 /* Gets chip ID / device Id from PF if ready */ 724 834 err = cptvf_check_pf_ready(cptvf); 725 835 if (err) { 726 836 dev_err(dev, "PF not responding to READY msg"); 727 - goto cptvf_err_release_regions; 837 + goto cptvf_free_misc_irq; 728 838 } 729 839 730 840 /* CPT VF software resources initialization */ ··· 742 832 err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF); 743 833 if (err) { 744 834 dev_err(dev, "cptvf_sw_init() failed"); 745 - goto cptvf_err_release_regions; 835 + goto cptvf_free_misc_irq; 746 836 } 747 837 /* Convey VQ LEN to PF */ 748 838 err = cptvf_send_vq_size_msg(cptvf); 749 839 if (err) { 750 840 dev_err(dev, "PF not responding to QLEN msg"); 751 - goto cptvf_err_release_regions; 841 + goto cptvf_free_misc_irq; 752 842 } 753 843 754 844 /* CPT VF device initialization */ ··· 758 848 err = cptvf_send_vf_to_grp_msg(cptvf); 759 849 if (err) { 760 850 dev_err(dev, "PF not responding to VF_GRP msg"); 761 - goto cptvf_err_release_regions; 851 + goto cptvf_free_misc_irq; 762 852 } 763 853 764 854 cptvf->priority = 1; 765 855 err = cptvf_send_vf_priority_msg(cptvf); 766 856 if (err) { 767 857 dev_err(dev, "PF not responding to VF_PRIO msg"); 768 - goto cptvf_err_release_regions; 858 + goto cptvf_free_misc_irq; 769 859 } 770 - /* Register DONE interrupts */ 771 - err = cptvf_register_done_intr(cptvf); 772 - if (err) 773 - goto cptvf_err_release_regions; 860 + 861 + err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), 862 + cptvf_done_intr_handler, 0, "CPT VF done intr", 863 + cptvf); 864 + if (err) { 865 + dev_err(dev, "Request done irq failed\n"); 866 + goto cptvf_free_misc_irq; 867 + } 868 + 869 + /* Enable mailbox interrupt */ 870 + cptvf_enable_done_interrupts(cptvf); 774 871 775 872 /* Set irq affinity masks */ 776 - cptvf_set_irq_affinity(cptvf); 777 - /* Convey UP to PF */ 873 + cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); 874 + cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); 875 + 778 876 err = cptvf_send_vf_up(cptvf); 779 877 if (err) { 780 878 dev_err(dev, "PF not responding to UP msg"); 781 - goto cptvf_up_fail; 879 + goto cptvf_free_irq_affinity; 782 880 } 783 881 err = cvm_crypto_init(cptvf); 784 882 if (err) { 785 883 dev_err(dev, "Algorithm register failed\n"); 786 - goto cptvf_up_fail; 884 + goto cptvf_free_irq_affinity; 787 885 } 788 886 return 0; 789 887 790 - cptvf_up_fail: 791 - cptvf_unregister_interrupts(cptvf); 888 + cptvf_free_irq_affinity: 889 + cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); 890 + cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); 891 + cptvf_free_misc_irq: 892 + free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf); 893 + cptvf_free_vectors: 894 + pci_free_irq_vectors(cptvf->pdev); 792 895 cptvf_err_release_regions: 793 896 pci_release_regions(pdev); 794 897 cptvf_err_disable_device: ··· 822 899 if (cptvf_send_vf_down(cptvf)) { 823 900 dev_err(&pdev->dev, "PF not responding to DOWN msg"); 824 901 } else { 825 - cptvf_unregister_interrupts(cptvf); 902 + cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); 903 + cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); 904 + free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf); 905 + free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf); 906 + pci_free_irq_vectors(cptvf->pdev); 826 907 cptvf_sw_cleanup(cptvf); 827 908 pci_set_drvdata(pdev, NULL); 828 909 pci_release_regions(pdev);