Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/hns: Use IDA interface to manage pd index

Switch pd index allocation and release from hns own bitmap interface
to IDA interface.

Link: https://lore.kernel.org/r/1623325814-55737-6-git-send-email-liweihang@huawei.com
Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Yangyang Li and committed by
Jason Gunthorpe
645f0593 d38936f0

+25 -37
+1 -1
drivers/infiniband/hw/hns/hns_roce_alloc.c
··· 252 252 hns_roce_cleanup_qp_table(hr_dev); 253 253 hns_roce_cleanup_cq_table(hr_dev); 254 254 ida_destroy(&hr_dev->mr_table.mtpt_ida.ida); 255 - hns_roce_cleanup_pd_table(hr_dev); 255 + ida_destroy(&hr_dev->pd_ida.ida); 256 256 hns_roce_cleanup_uar_table(hr_dev); 257 257 }
+2 -3
drivers/infiniband/hw/hns/hns_roce_device.h
··· 961 961 void __iomem *priv_addr; 962 962 963 963 struct hns_roce_cmdq cmd; 964 - struct hns_roce_bitmap pd_bitmap; 964 + struct hns_roce_ida pd_ida; 965 965 struct hns_roce_bitmap xrcd_bitmap; 966 966 struct hns_roce_uar_table uar_table; 967 967 struct hns_roce_mr_table mr_table; ··· 1143 1143 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, 1144 1144 dma_addr_t *pages, unsigned int page_cnt); 1145 1145 1146 - int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); 1146 + void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); 1147 1147 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); 1148 1148 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); 1149 1149 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); 1150 1150 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev); 1151 1151 int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev); 1152 1152 1153 - void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev); 1154 1153 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev); 1155 1154 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); 1156 1155 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
+2 -8
drivers/infiniband/hw/hns/hns_roce_main.c
··· 748 748 goto err_uar_table_free; 749 749 } 750 750 751 - ret = hns_roce_init_pd_table(hr_dev); 752 - if (ret) { 753 - dev_err(dev, "Failed to init protected domain table.\n"); 754 - goto err_uar_alloc_free; 755 - } 751 + hns_roce_init_pd_table(hr_dev); 756 752 757 753 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) { 758 754 ret = hns_roce_init_xrcd_table(hr_dev); ··· 791 795 hns_roce_cleanup_xrcd_table(hr_dev); 792 796 793 797 err_pd_table_free: 794 - hns_roce_cleanup_pd_table(hr_dev); 795 - 796 - err_uar_alloc_free: 798 + ida_destroy(&hr_dev->pd_ida.ida); 797 799 hns_roce_uar_free(hr_dev, &hr_dev->priv_uar); 798 800 799 801 err_uar_table_free:
+20 -25
drivers/infiniband/hw/hns/hns_roce_pd.c
··· 34 34 #include <linux/pci.h> 35 35 #include "hns_roce_device.h" 36 36 37 - static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) 37 + void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev) 38 38 { 39 - return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0; 40 - } 39 + struct hns_roce_ida *pd_ida = &hr_dev->pd_ida; 41 40 42 - static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) 43 - { 44 - hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn); 45 - } 46 - 47 - int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev) 48 - { 49 - return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds, 50 - hr_dev->caps.num_pds - 1, 51 - hr_dev->caps.reserved_pds, 0); 52 - } 53 - 54 - void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) 55 - { 56 - hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); 41 + ida_init(&pd_ida->ida); 42 + pd_ida->max = hr_dev->caps.num_pds - 1; 43 + pd_ida->min = hr_dev->caps.reserved_pds; 57 44 } 58 45 59 46 int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) 60 47 { 61 48 struct ib_device *ib_dev = ibpd->device; 49 + struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); 50 + struct hns_roce_ida *pd_ida = &hr_dev->pd_ida; 62 51 struct hns_roce_pd *pd = to_hr_pd(ibpd); 63 - int ret; 52 + int ret = 0; 53 + int id; 64 54 65 - ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); 66 - if (ret) { 67 - ibdev_err(ib_dev, "failed to alloc pd, ret = %d.\n", ret); 68 - return ret; 55 + id = ida_alloc_range(&pd_ida->ida, pd_ida->min, pd_ida->max, 56 + GFP_KERNEL); 57 + if (id < 0) { 58 + ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id); 59 + return -ENOMEM; 69 60 } 61 + pd->pdn = (unsigned long)id; 70 62 71 63 if (udata) { 72 64 struct hns_roce_ib_alloc_pd_resp resp = {.pdn = pd->pdn}; ··· 66 74 ret = ib_copy_to_udata(udata, &resp, 67 75 min(udata->outlen, sizeof(resp))); 68 76 if (ret) { 69 - hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); 77 + ida_free(&pd_ida->ida, id); 70 78 ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret); 71 79 } 72 80 } ··· 76 84 77 85 int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) 78 86 { 79 - hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); 87 + struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 88 + 89 + ida_free(&hr_dev->pd_ida.ida, (int)to_hr_pd(pd)->pdn); 90 + 80 91 return 0; 81 92 } 82 93