Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/hns: Use IDA interface to manage mtpt index

Switch mtpt index allocation and release from hns own bitmap interface
to IDA interface.

Link: https://lore.kernel.org/r/1623325814-55737-5-git-send-email-liweihang@huawei.com
Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Yangyang Li and committed by
Jason Gunthorpe
d38936f0 38e375b7

+43 -48
+1 -1
drivers/infiniband/hw/hns/hns_roce_alloc.c
··· 251 251 hns_roce_cleanup_srq_table(hr_dev); 252 252 hns_roce_cleanup_qp_table(hr_dev); 253 253 hns_roce_cleanup_cq_table(hr_dev); 254 - hns_roce_cleanup_mr_table(hr_dev); 254 + ida_destroy(&hr_dev->mr_table.mtpt_ida.ida); 255 255 hns_roce_cleanup_pd_table(hr_dev); 256 256 hns_roce_cleanup_uar_table(hr_dev); 257 257 }
+8 -3
drivers/infiniband/hw/hns/hns_roce_device.h
··· 253 253 unsigned long *table; 254 254 }; 255 255 256 + struct hns_roce_ida { 257 + struct ida ida; 258 + u32 min; /* Lowest ID to allocate. */ 259 + u32 max; /* Highest ID to allocate. */ 260 + }; 261 + 256 262 /* For Hardware Entry Memory */ 257 263 struct hns_roce_hem_table { 258 264 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */ ··· 351 345 }; 352 346 353 347 struct hns_roce_mr_table { 354 - struct hns_roce_bitmap mtpt_bitmap; 348 + struct hns_roce_ida mtpt_ida; 355 349 struct hns_roce_hem_table mtpt_table; 356 350 }; 357 351 ··· 1144 1138 dma_addr_t *pages, unsigned int page_cnt); 1145 1139 1146 1140 int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); 1147 - int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); 1141 + void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); 1148 1142 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); 1149 1143 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); 1150 1144 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev); 1151 1145 int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev); 1152 1146 1153 1147 void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev); 1154 - void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev); 1155 1148 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev); 1156 1149 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); 1157 1150 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
+1 -2
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
··· 1198 1198 dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n", 1199 1199 mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start)); 1200 1200 1201 - hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, 1202 - key_to_hw_index(mr->key)); 1201 + ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)key_to_hw_index(mr->key)); 1203 1202 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr); 1204 1203 kfree(mr); 1205 1204
+2 -7
drivers/infiniband/hw/hns/hns_roce_main.c
··· 763 763 } 764 764 } 765 765 766 - ret = hns_roce_init_mr_table(hr_dev); 767 - if (ret) { 768 - dev_err(dev, "Failed to init memory region table.\n"); 769 - goto err_xrcd_table_free; 770 - } 766 + hns_roce_init_mr_table(hr_dev); 771 767 772 768 hns_roce_init_cq_table(hr_dev); 773 769 ··· 789 793 790 794 err_cq_table_free: 791 795 hns_roce_cleanup_cq_table(hr_dev); 792 - hns_roce_cleanup_mr_table(hr_dev); 796 + ida_destroy(&hr_dev->mr_table.mtpt_ida.ida); 793 797 794 - err_xrcd_table_free: 795 798 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) 796 799 hns_roce_cleanup_xrcd_table(hr_dev); 797 800
+31 -35
drivers/infiniband/hw/hns/hns_roce_mr.c
··· 38 38 #include "hns_roce_cmd.h" 39 39 #include "hns_roce_hem.h" 40 40 41 - static u32 hw_index_to_key(unsigned long ind) 41 + static u32 hw_index_to_key(int ind) 42 42 { 43 - return (u32)(ind >> 24) | (ind << 8); 43 + return ((u32)ind >> 24) | ((u32)ind << 8); 44 44 } 45 45 46 46 unsigned long key_to_hw_index(u32 key) ··· 68 68 69 69 static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) 70 70 { 71 + struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida; 71 72 struct ib_device *ibdev = &hr_dev->ib_dev; 72 - unsigned long obj = 0; 73 73 int err; 74 + int id; 74 75 75 76 /* Allocate a key for mr from mr_table */ 76 - err = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &obj); 77 - if (err) { 78 - ibdev_err(ibdev, 79 - "failed to alloc bitmap for MR key, ret = %d.\n", 80 - err); 77 + id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max, 78 + GFP_KERNEL); 79 + if (id < 0) { 80 + ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id); 81 81 return -ENOMEM; 82 82 } 83 83 84 - mr->key = hw_index_to_key(obj); /* MR key */ 84 + mr->key = hw_index_to_key(id); /* MR key */ 85 85 86 - err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj); 86 + err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, 87 + (unsigned long)id); 87 88 if (err) { 88 89 ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err); 89 90 goto err_free_bitmap; ··· 92 91 93 92 return 0; 94 93 err_free_bitmap: 95 - hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj); 94 + ida_free(&mtpt_ida->ida, id); 96 95 return err; 97 96 } 98 97 ··· 101 100 unsigned long obj = key_to_hw_index(mr->key); 102 101 103 102 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj); 104 - hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj); 103 + ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj); 105 104 } 106 105 107 106 static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, ··· 197 196 return ret; 198 197 } 199 198 200 - int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) 199 + void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) 201 200 { 202 - struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 203 - int ret; 201 + struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida; 204 202 205 - ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap, 206 - hr_dev->caps.num_mtpts, 207 - hr_dev->caps.num_mtpts - 1, 208 - hr_dev->caps.reserved_mrws, 0); 209 - return ret; 210 - } 211 - 212 - void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev) 213 - { 214 - struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; 215 - 216 - hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); 203 + ida_init(&mtpt_ida->ida); 204 + mtpt_ida->max = hr_dev->caps.num_mtpts - 1; 205 + mtpt_ida->min = hr_dev->caps.reserved_mrws; 217 206 } 218 207 219 208 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc) ··· 494 503 key_to_hw_index(mw->rkey)); 495 504 } 496 505 497 - hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, 498 - key_to_hw_index(mw->rkey)); 506 + ida_free(&hr_dev->mr_table.mtpt_ida.ida, 507 + (int)key_to_hw_index(mw->rkey)); 499 508 } 500 509 501 510 static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev, ··· 549 558 int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) 550 559 { 551 560 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device); 561 + struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida; 562 + struct ib_device *ibdev = &hr_dev->ib_dev; 552 563 struct hns_roce_mw *mw = to_hr_mw(ibmw); 553 - unsigned long index = 0; 554 564 int ret; 565 + int id; 555 566 556 - /* Allocate a key for mw from bitmap */ 557 - ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index); 558 - if (ret) 559 - return ret; 567 + /* Allocate a key for mw from mr_table */ 568 + id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max, 569 + GFP_KERNEL); 570 + if (id < 0) { 571 + ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id); 572 + return -ENOMEM; 573 + } 560 574 561 - mw->rkey = hw_index_to_key(index); 575 + mw->rkey = hw_index_to_key(id); 562 576 563 577 ibmw->rkey = mw->rkey; 564 578 mw->pdn = to_hr_pd(ibmw->pd)->pdn;