Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/umem: Remove hugetlb flag

The drivers i40iw and bnxt_re no longer dependent on the hugetlb flag. So
remove this flag from ib_umem structure.

Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

authored by

Shiraz Saleem and committed by
Jason Gunthorpe
db6c6774 d8558251

+1 -29
+1 -25
drivers/infiniband/core/umem.c
··· 37 37 #include <linux/sched/signal.h> 38 38 #include <linux/sched/mm.h> 39 39 #include <linux/export.h> 40 - #include <linux/hugetlb.h> 41 40 #include <linux/slab.h> 42 41 #include <linux/pagemap.h> 43 42 #include <rdma/ib_umem_odp.h> ··· 198 199 struct ib_ucontext *context; 199 200 struct ib_umem *umem; 200 201 struct page **page_list; 201 - struct vm_area_struct **vma_list; 202 202 unsigned long lock_limit; 203 203 unsigned long new_pinned; 204 204 unsigned long cur_base; 205 205 struct mm_struct *mm; 206 206 unsigned long npages; 207 207 int ret; 208 - int i; 209 208 unsigned long dma_attrs = 0; 210 209 struct scatterlist *sg; 211 210 unsigned int gup_flags = FOLL_WRITE; ··· 261 264 return umem; 262 265 } 263 266 264 - /* We assume the memory is from hugetlb until proved otherwise */ 265 - umem->hugetlb = 1; 266 - 267 267 page_list = (struct page **) __get_free_page(GFP_KERNEL); 268 268 if (!page_list) { 269 269 ret = -ENOMEM; 270 270 goto umem_kfree; 271 271 } 272 - 273 - /* 274 - * if we can't alloc the vma_list, it's not so bad; 275 - * just assume the memory is not hugetlb memory 276 - */ 277 - vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL); 278 - if (!vma_list) 279 - umem->hugetlb = 0; 280 272 281 273 npages = ib_umem_num_pages(umem); 282 274 if (npages == 0 || npages > UINT_MAX) { ··· 298 312 ret = get_user_pages_longterm(cur_base, 299 313 min_t(unsigned long, npages, 300 314 PAGE_SIZE / sizeof (struct page *)), 301 - gup_flags, page_list, vma_list); 315 + gup_flags, page_list, NULL); 302 316 if (ret < 0) { 303 317 up_read(&mm->mmap_sem); 304 318 goto umem_release; ··· 310 324 sg = ib_umem_add_sg_table(sg, page_list, ret, 311 325 dma_get_max_seg_size(context->device->dma_device), 312 326 &umem->sg_nents); 313 - 314 - /* Continue to hold the mmap_sem as vma_list access 315 - * needs to be protected. 316 - */ 317 - for (i = 0; i < ret && umem->hugetlb; i++) { 318 - if (vma_list && !is_vm_hugetlb_page(vma_list[i])) 319 - umem->hugetlb = 0; 320 - } 321 327 322 328 up_read(&mm->mmap_sem); 323 329 } ··· 335 357 vma: 336 358 atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm); 337 359 out: 338 - if (vma_list) 339 - free_page((unsigned long) vma_list); 340 360 free_page((unsigned long) page_list); 341 361 umem_kfree: 342 362 if (ret) {
-3
drivers/infiniband/core/umem_odp.c
··· 417 417 h = hstate_vma(vma); 418 418 umem->page_shift = huge_page_shift(h); 419 419 up_read(&mm->mmap_sem); 420 - umem->hugetlb = 1; 421 - } else { 422 - umem->hugetlb = 0; 423 420 } 424 421 425 422 mutex_init(&umem_odp->umem_mutex);
-1
include/rdma/ib_umem.h
··· 48 48 unsigned long address; 49 49 int page_shift; 50 50 u32 writable : 1; 51 - u32 hugetlb : 1; 52 51 u32 is_odp : 1; 53 52 struct work_struct work; 54 53 struct sg_table sg_head;