Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: rework on ttm_resource to use size_t type

Change ttm_resource structure from num_pages to size_t size in bytes.
v1 -> v2: change PFN_UP(dst_mem->size) to ttm->num_pages
v1 -> v2: change bo->resource->size to bo->base.size at some places
v1 -> v2: remove the local variable
v1 -> v2: cleanup cmp_size_smaller_first()
v2 -> v3: adding missing PFN_UP in ttm_bo_vm_fault_reserved

Signed-off-by: Somalapuram Amaranath <Amaranath.Somalapuram@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221027091237.983582-1-Amaranath.Somalapuram@amd.com
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>

authored by

Somalapuram Amaranath and committed by
Christian König
e3c92eb4 e1e7bc48

+78 -80
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
··· 144 144 node->base.start = node->mm_nodes[0].start; 145 145 } else { 146 146 node->mm_nodes[0].start = 0; 147 - node->mm_nodes[0].size = node->base.num_pages; 147 + node->mm_nodes[0].size = PFN_UP(node->base.size); 148 148 node->base.start = AMDGPU_BO_INVALID_OFFSET; 149 149 } 150 150
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 542 542 /* GWS and OA don't need any alignment. */ 543 543 page_align = bp->byte_align; 544 544 size <<= PAGE_SHIFT; 545 + 545 546 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) { 546 547 /* Both size and alignment must be a multiple of 4. */ 547 548 page_align = ALIGN(bp->byte_align, 4); ··· 777 776 return 0; 778 777 } 779 778 780 - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); 779 + r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap); 781 780 if (r) 782 781 return r; 783 782
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
··· 62 62 if (!res) 63 63 goto fallback; 64 64 65 - BUG_ON(start + size > res->num_pages << PAGE_SHIFT); 65 + BUG_ON(start + size > res->size); 66 66 67 67 cur->mem_type = res->mem_type; 68 68 ··· 110 110 cur->size = size; 111 111 cur->remaining = size; 112 112 cur->node = NULL; 113 - WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); 113 + WARN_ON(res && start + size > res->size); 114 114 return; 115 115 } 116 116
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
··· 127 127 128 128 TP_fast_assign( 129 129 __entry->bo = bo; 130 - __entry->pages = bo->tbo.resource->num_pages; 130 + __entry->pages = PFN_UP(bo->tbo.resource->size); 131 131 __entry->type = bo->tbo.resource->mem_type; 132 132 __entry->prefer = bo->preferred_domains; 133 133 __entry->allow = bo->allowed_domains;
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 381 381 dst.offset = 0; 382 382 383 383 r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, 384 - new_mem->num_pages << PAGE_SHIFT, 384 + new_mem->size, 385 385 amdgpu_bo_encrypted(abo), 386 386 bo->base.resv, &fence); 387 387 if (r) ··· 424 424 static bool amdgpu_mem_visible(struct amdgpu_device *adev, 425 425 struct ttm_resource *mem) 426 426 { 427 - u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT; 427 + u64 mem_size = (u64)mem->size; 428 428 struct amdgpu_res_cursor cursor; 429 429 u64 end; 430 430 ··· 568 568 struct ttm_resource *mem) 569 569 { 570 570 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 571 - size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; 571 + size_t bus_size = (size_t)mem->size; 572 572 573 573 switch (mem->mem_type) { 574 574 case TTM_PL_SYSTEM:
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 439 439 /* Allocate blocks in desired range */ 440 440 vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; 441 441 442 - remaining_size = (u64)vres->base.num_pages << PAGE_SHIFT; 442 + remaining_size = (u64)vres->base.size; 443 443 444 444 mutex_lock(&mgr->lock); 445 445 while (remaining_size) { ··· 498 498 LIST_HEAD(temp); 499 499 500 500 trim_list = &vres->blocks; 501 - original_size = (u64)vres->base.num_pages << PAGE_SHIFT; 501 + original_size = (u64)vres->base.size; 502 502 503 503 /* 504 504 * If size value is rounded up to min_block_size, trim the last ··· 533 533 amdgpu_vram_mgr_block_size(block); 534 534 start >>= PAGE_SHIFT; 535 535 536 - if (start > vres->base.num_pages) 537 - start -= vres->base.num_pages; 536 + if (start > PFN_UP(vres->base.size)) 537 + start -= PFN_UP(vres->base.size); 538 538 else 539 539 start = 0; 540 540 vres->base.start = max(vres->base.start, start);
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
··· 649 649 if (!i915_ttm_cpu_maps_iomem(res)) 650 650 return true; 651 651 652 - return bman_res->used_visible_size == bman_res->base.num_pages; 652 + return bman_res->used_visible_size == PFN_UP(bman_res->base.size); 653 653 } 654 654 655 655 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
+2 -2
drivers/gpu/drm/i915/i915_scatterlist.c
··· 158 158 u32 page_alignment) 159 159 { 160 160 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 161 - const u64 size = res->num_pages << PAGE_SHIFT; 161 + const u64 size = res->size; 162 162 const u32 max_segment = round_down(UINT_MAX, page_alignment); 163 163 struct drm_buddy *mm = bman_res->mm; 164 164 struct list_head *blocks = &bman_res->blocks; ··· 177 177 178 178 i915_refct_sgt_init(rsgt, size); 179 179 st = &rsgt->table; 180 - if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) { 180 + if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) { 181 181 i915_refct_sgt_put(rsgt); 182 182 return ERR_PTR(-ENOMEM); 183 183 }
+6 -6
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
··· 62 62 if (place->fpfn || lpfn != man->size) 63 63 bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION; 64 64 65 - GEM_BUG_ON(!bman_res->base.num_pages); 66 - size = bman_res->base.num_pages << PAGE_SHIFT; 65 + GEM_BUG_ON(!bman_res->base.size); 66 + size = bman_res->base.size; 67 67 68 68 min_page_size = bman->default_page_size; 69 69 if (bo->page_alignment) ··· 72 72 GEM_BUG_ON(min_page_size < mm->chunk_size); 73 73 GEM_BUG_ON(!IS_ALIGNED(size, min_page_size)); 74 74 75 - if (place->fpfn + bman_res->base.num_pages != place->lpfn && 75 + if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn && 76 76 place->flags & TTM_PL_FLAG_CONTIGUOUS) { 77 77 unsigned long pages; 78 78 ··· 108 108 goto err_free_blocks; 109 109 110 110 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 111 - u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT; 111 + u64 original_size = (u64)bman_res->base.size; 112 112 113 113 drm_buddy_block_trim(mm, 114 114 original_size, ··· 116 116 } 117 117 118 118 if (lpfn <= bman->visible_size) { 119 - bman_res->used_visible_size = bman_res->base.num_pages; 119 + bman_res->used_visible_size = PFN_UP(bman_res->base.size); 120 120 } else { 121 121 struct drm_buddy_block *block; 122 122 ··· 228 228 229 229 if (!place->fpfn && 230 230 place->lpfn == i915_ttm_buddy_man_visible_size(man)) 231 - return bman_res->used_visible_size == res->num_pages; 231 + return bman_res->used_visible_size == PFN_UP(res->size); 232 232 233 233 /* Check each drm buddy block individually */ 234 234 list_for_each_entry(block, &bman_res->blocks, link) {
+1 -1
drivers/gpu/drm/i915/intel_region_ttm.c
··· 244 244 struct ttm_resource_manager *man = mem->region_private; 245 245 struct ttm_buffer_object mock_bo = {}; 246 246 247 - mock_bo.base.size = res->num_pages << PAGE_SHIFT; 247 + mock_bo.base.size = res->size; 248 248 mock_bo.bdev = &mem->i915->bdev; 249 249 res->bo = &mock_bo; 250 250
+2 -2
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 532 532 if (ret) 533 533 return ret; 534 534 535 - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap); 535 + ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap); 536 536 537 537 ttm_bo_unreserve(&nvbo->bo); 538 538 return ret; ··· 1236 1236 } else { 1237 1237 /* make sure bo is in mappable vram */ 1238 1238 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || 1239 - bo->resource->start + bo->resource->num_pages < mappable) 1239 + bo->resource->start + PFN_UP(bo->resource->size) < mappable) 1240 1240 return 0; 1241 1241 1242 1242 for (i = 0; i < nvbo->placement.num_placement; ++i) {
+2 -2
drivers/gpu/drm/nouveau/nouveau_bo0039.c
··· 52 52 u32 src_offset = old_reg->start << PAGE_SHIFT; 53 53 u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg); 54 54 u32 dst_offset = new_reg->start << PAGE_SHIFT; 55 - u32 page_count = new_reg->num_pages; 55 + u32 page_count = PFN_UP(new_reg->size); 56 56 int ret; 57 57 58 58 ret = PUSH_WAIT(push, 3); ··· 62 62 PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_BUFFER_IN, src_ctxdma, 63 63 SET_CONTEXT_DMA_BUFFER_OUT, dst_ctxdma); 64 64 65 - page_count = new_reg->num_pages; 65 + page_count = PFN_UP(new_reg->size); 66 66 while (page_count) { 67 67 int line_count = (page_count > 2047) ? 2047 : page_count; 68 68
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo5039.c
··· 41 41 { 42 42 struct nouveau_mem *mem = nouveau_mem(old_reg); 43 43 struct nvif_push *push = chan->chan.push; 44 - u64 length = (new_reg->num_pages << PAGE_SHIFT); 44 + u64 length = new_reg->size; 45 45 u64 src_offset = mem->vma[0].addr; 46 46 u64 dst_offset = mem->vma[1].addr; 47 47 int src_tiled = !!mem->kind;
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo74c1.c
··· 44 44 if (ret) 45 45 return ret; 46 46 47 - PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->num_pages << PAGE_SHIFT, 47 + PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->size, 48 48 0x0308, upper_32_bits(mem->vma[0].addr), 49 49 0x030c, lower_32_bits(mem->vma[0].addr), 50 50 0x0310, upper_32_bits(mem->vma[1].addr),
+2 -2
drivers/gpu/drm/nouveau/nouveau_bo85b5.c
··· 44 44 struct nvif_push *push = chan->chan.push; 45 45 u64 src_offset = mem->vma[0].addr; 46 46 u64 dst_offset = mem->vma[1].addr; 47 - u32 page_count = new_reg->num_pages; 47 + u32 page_count = PFN_UP(new_reg->size); 48 48 int ret; 49 49 50 - page_count = new_reg->num_pages; 50 + page_count = PFN_UP(new_reg->size); 51 51 while (page_count) { 52 52 int line_count = (page_count > 8191) ? 8191 : page_count; 53 53
+2 -2
drivers/gpu/drm/nouveau/nouveau_bo9039.c
··· 42 42 struct nouveau_mem *mem = nouveau_mem(old_reg); 43 43 u64 src_offset = mem->vma[0].addr; 44 44 u64 dst_offset = mem->vma[1].addr; 45 - u32 page_count = new_reg->num_pages; 45 + u32 page_count = PFN_UP(new_reg->size); 46 46 int ret; 47 47 48 - page_count = new_reg->num_pages; 48 + page_count = PFN_UP(new_reg->size); 49 49 while (page_count) { 50 50 int line_count = (page_count > 2047) ? 2047 : page_count; 51 51
+2 -2
drivers/gpu/drm/nouveau/nouveau_bo90b5.c
··· 37 37 struct nvif_push *push = chan->chan.push; 38 38 u64 src_offset = mem->vma[0].addr; 39 39 u64 dst_offset = mem->vma[1].addr; 40 - u32 page_count = new_reg->num_pages; 40 + u32 page_count = PFN_UP(new_reg->size); 41 41 int ret; 42 42 43 - page_count = new_reg->num_pages; 43 + page_count = PFN_UP(new_reg->size); 44 44 while (page_count) { 45 45 int line_count = (page_count > 8191) ? 8191 : page_count; 46 46
+1 -1
drivers/gpu/drm/nouveau/nouveau_boa0b5.c
··· 58 58 PITCH_IN, PAGE_SIZE, 59 59 PITCH_OUT, PAGE_SIZE, 60 60 LINE_LENGTH_IN, PAGE_SIZE, 61 - LINE_COUNT, new_reg->num_pages); 61 + LINE_COUNT, PFN_UP(new_reg->size)); 62 62 63 63 PUSH_IMMD(push, NVA0B5, LAUNCH_DMA, 64 64 NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
+2 -3
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 679 679 } 680 680 681 681 if (!nvbo->kmap.virtual) { 682 - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, 682 + ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), 683 683 &nvbo->kmap); 684 684 if (ret) { 685 685 NV_PRINTK(err, cli, "failed kmap for reloc\n"); ··· 868 868 if (unlikely(cmd != req->suffix0)) { 869 869 if (!nvbo->kmap.virtual) { 870 870 ret = ttm_bo_kmap(&nvbo->bo, 0, 871 - nvbo->bo.resource-> 872 - num_pages, 871 + PFN_UP(nvbo->bo.base.size), 873 872 &nvbo->kmap); 874 873 if (ret) { 875 874 WIND_RING(chan);
+2 -2
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 115 115 116 116 mutex_lock(&drm->master.lock); 117 117 ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT, 118 - reg->num_pages << PAGE_SHIFT, 118 + reg->size, 119 119 &args, sizeof(args), &mem->mem); 120 120 mutex_unlock(&drm->master.lock); 121 121 return ret; ··· 128 128 struct nouveau_cli *cli = mem->cli; 129 129 struct nouveau_drm *drm = cli->drm; 130 130 struct nvif_mmu *mmu = &cli->mmu; 131 - u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page); 131 + u64 size = ALIGN(reg->size, 1 << page); 132 132 int ret; 133 133 134 134 mutex_lock(&drm->master.lock);
+1 -1
drivers/gpu/drm/nouveau/nouveau_ttm.c
··· 139 139 mem = nouveau_mem(*res); 140 140 ttm_resource_init(bo, place, *res); 141 141 ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0, 142 - (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]); 142 + (long)(*res)->size, &mem->vma[0]); 143 143 if (ret) { 144 144 nouveau_mem_del(man, *res); 145 145 return ret;
+5 -2
drivers/gpu/drm/radeon/radeon_cs.c
··· 400 400 struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); 401 401 402 402 /* Sort A before B if A is smaller. */ 403 - return (int)la->robj->tbo.resource->num_pages - 404 - (int)lb->robj->tbo.resource->num_pages; 403 + if (la->robj->tbo.base.size > lb->robj->tbo.base.size) 404 + return 1; 405 + if (la->robj->tbo.base.size < lb->robj->tbo.base.size) 406 + return -1; 407 + return 0; 405 408 } 406 409 407 410 /**
+2 -2
drivers/gpu/drm/radeon/radeon_object.c
··· 232 232 } 233 233 return 0; 234 234 } 235 - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); 235 + r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap); 236 236 if (r) { 237 237 return r; 238 238 } ··· 737 737 if (bo->resource->mem_type != TTM_PL_VRAM) 738 738 return 0; 739 739 740 - size = bo->resource->num_pages << PAGE_SHIFT; 740 + size = bo->resource->size; 741 741 offset = bo->resource->start << PAGE_SHIFT; 742 742 if ((offset + size) <= rdev->mc.visible_vram_size) 743 743 return 0;
+1 -1
drivers/gpu/drm/radeon/radeon_trace.h
··· 22 22 23 23 TP_fast_assign( 24 24 __entry->bo = bo; 25 - __entry->pages = bo->tbo.resource->num_pages; 25 + __entry->pages = PFN_UP(bo->tbo.resource->size); 26 26 ), 27 27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) 28 28 );
+2 -2
drivers/gpu/drm/radeon/radeon_ttm.c
··· 181 181 182 182 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); 183 183 184 - num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 184 + num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); 185 185 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv); 186 186 if (IS_ERR(fence)) 187 187 return PTR_ERR(fence); ··· 268 268 static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) 269 269 { 270 270 struct radeon_device *rdev = radeon_get_rdev(bdev); 271 - size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; 271 + size_t bus_size = (size_t)mem->size; 272 272 273 273 switch (mem->mem_type) { 274 274 case TTM_PL_SYSTEM:
-3
drivers/gpu/drm/ttm/ttm_bo.c
··· 51 51 struct ttm_resource_manager *man; 52 52 int i, mem_type; 53 53 54 - drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n", 55 - bo, bo->resource->num_pages, bo->base.size >> 10, 56 - bo->base.size >> 20); 57 54 for (i = 0; i < placement->num_placement; i++) { 58 55 mem_type = placement->placement[i].mem_type; 59 56 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
+3 -3
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 173 173 174 174 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); 175 175 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) 176 - ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter); 176 + ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter); 177 177 178 178 if (!src_iter->ops->maps_tt) 179 179 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem); ··· 357 357 358 358 map->virtual = NULL; 359 359 map->bo = bo; 360 - if (num_pages > bo->resource->num_pages) 360 + if (num_pages > PFN_UP(bo->resource->size)) 361 361 return -EINVAL; 362 - if ((start_page + num_pages) > bo->resource->num_pages) 362 + if ((start_page + num_pages) > PFN_UP(bo->resource->size)) 363 363 return -EINVAL; 364 364 365 365 ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
+2 -2
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 217 217 page_last = vma_pages(vma) + vma->vm_pgoff - 218 218 drm_vma_node_start(&bo->base.vma_node); 219 219 220 - if (unlikely(page_offset >= bo->resource->num_pages)) 220 + if (unlikely(page_offset >= PFN_UP(bo->base.size))) 221 221 return VM_FAULT_SIGBUS; 222 222 223 223 prot = ttm_io_prot(bo, bo->resource, prot); ··· 412 412 << PAGE_SHIFT); 413 413 int ret; 414 414 415 - if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages) 415 + if (len < 1 || (offset + len) > bo->base.size) 416 416 return -EIO; 417 417 418 418 ret = ttm_bo_reserve(bo, true, false, NULL);
+1 -1
drivers/gpu/drm/ttm/ttm_range_manager.c
··· 83 83 84 84 spin_lock(&rman->lock); 85 85 ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0], 86 - node->base.num_pages, 86 + PFN_UP(node->base.size), 87 87 bo->page_alignment, 0, 88 88 place->fpfn, lpfn, mode); 89 89 spin_unlock(&rman->lock);
+6 -8
drivers/gpu/drm/ttm/ttm_resource.c
··· 177 177 struct ttm_resource_manager *man; 178 178 179 179 res->start = 0; 180 - res->num_pages = PFN_UP(bo->base.size); 180 + res->size = bo->base.size; 181 181 res->mem_type = place->mem_type; 182 182 res->placement = place->flags; 183 183 res->bus.addr = NULL; ··· 192 192 list_add_tail(&res->lru, &bo->bdev->pinned); 193 193 else 194 194 list_add_tail(&res->lru, &man->lru[bo->priority]); 195 - man->usage += res->num_pages << PAGE_SHIFT; 195 + man->usage += res->size; 196 196 spin_unlock(&bo->bdev->lru_lock); 197 197 } 198 198 EXPORT_SYMBOL(ttm_resource_init); ··· 214 214 215 215 spin_lock(&bdev->lru_lock); 216 216 list_del_init(&res->lru); 217 - man->usage -= res->num_pages << PAGE_SHIFT; 217 + man->usage -= res->size; 218 218 spin_unlock(&bdev->lru_lock); 219 219 } 220 220 EXPORT_SYMBOL(ttm_resource_fini); ··· 665 665 iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); 666 666 iter_io->needs_unmap = false; 667 667 } else { 668 - size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; 669 - 670 668 iter_io->needs_unmap = true; 671 669 memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); 672 670 if (mem->bus.caching == ttm_write_combined) 673 671 iosys_map_set_vaddr_iomem(&iter_io->dmap, 674 672 ioremap_wc(mem->bus.offset, 675 - bus_size)); 673 + mem->size)); 676 674 else if (mem->bus.caching == ttm_cached) 677 675 iosys_map_set_vaddr(&iter_io->dmap, 678 - memremap(mem->bus.offset, bus_size, 676 + memremap(mem->bus.offset, mem->size, 679 677 MEMREMAP_WB | 680 678 MEMREMAP_WT | 681 679 MEMREMAP_WC)); ··· 682 684 if (iosys_map_is_null(&iter_io->dmap)) 683 685 iosys_map_set_vaddr_iomem(&iter_io->dmap, 684 686 ioremap(mem->bus.offset, 685 - bus_size)); 687 + mem->size)); 686 688 687 689 if (iosys_map_is_null(&iter_io->dmap)) { 688 690 ret = -ENOMEM;
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
··· 483 483 d.src_addr = NULL; 484 484 d.dst_pages = dst->ttm->pages; 485 485 d.src_pages = src->ttm->pages; 486 - d.dst_num_pages = dst->resource->num_pages; 487 - d.src_num_pages = src->resource->num_pages; 486 + d.dst_num_pages = PFN_UP(dst->resource->size); 487 + d.src_num_pages = PFN_UP(src->resource->size); 488 488 d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL); 489 489 d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL); 490 490 d.diff = diff;
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 194 194 int ret = 0; 195 195 196 196 place = vmw_vram_placement.placement[0]; 197 - place.lpfn = bo->resource->num_pages; 197 + place.lpfn = PFN_UP(bo->resource->size); 198 198 placement.num_placement = 1; 199 199 placement.placement = &place; 200 200 placement.num_busy_placement = 1; ··· 211 211 * that situation. 212 212 */ 213 213 if (bo->resource->mem_type == TTM_PL_VRAM && 214 - bo->resource->start < bo->resource->num_pages && 214 + bo->resource->start < PFN_UP(bo->resource->size) && 215 215 bo->resource->start > 0 && 216 216 buf->base.pin_count == 0) { 217 217 ctx.interruptible = false; ··· 352 352 if (virtual) 353 353 return virtual; 354 354 355 - ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map); 355 + ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map); 356 356 if (ret) 357 357 DRM_ERROR("Buffer object map failed: %d.\n", ret); 358 358
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 443 443 * Do a page by page copy of COTables. This eliminates slow vmap()s. 444 444 * This should really be a TTM utility. 445 445 */ 446 - for (i = 0; i < old_bo->resource->num_pages; ++i) { 446 + for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) { 447 447 bool dummy; 448 448 449 449 ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 1047 1047 1048 1048 if (unlikely(new_query_bo != sw_context->cur_query_bo)) { 1049 1049 1050 - if (unlikely(new_query_bo->base.resource->num_pages > 4)) { 1050 + if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) { 1051 1051 VMW_DEBUG_USER("Query buffer too large.\n"); 1052 1052 return -EINVAL; 1053 1053 }
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
··· 71 71 spin_lock(&gman->lock); 72 72 73 73 if (gman->max_gmr_pages > 0) { 74 - gman->used_gmr_pages += (*res)->num_pages; 74 + gman->used_gmr_pages += PFN_UP((*res)->size); 75 75 /* 76 76 * Because the graphics memory is a soft limit we can try to 77 77 * expand it instead of letting the userspace apps crash. ··· 114 114 return 0; 115 115 116 116 nospace: 117 - gman->used_gmr_pages -= (*res)->num_pages; 117 + gman->used_gmr_pages -= PFN_UP((*res)->size); 118 118 spin_unlock(&gman->lock); 119 119 ida_free(&gman->gmr_ida, id); 120 120 ttm_resource_fini(man, *res); ··· 129 129 130 130 ida_free(&gman->gmr_ida, res->start); 131 131 spin_lock(&gman->lock); 132 - gman->used_gmr_pages -= res->num_pages; 132 + gman->used_gmr_pages -= PFN_UP(res->size); 133 133 spin_unlock(&gman->lock); 134 134 ttm_resource_fini(man, res); 135 135 kfree(res);
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
··· 230 230 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) 231 231 { 232 232 struct vmw_bo_dirty *dirty = vbo->dirty; 233 - pgoff_t num_pages = vbo->base.resource->num_pages; 233 + pgoff_t num_pages = PFN_UP(vbo->base.resource->size); 234 234 size_t size; 235 235 int ret; 236 236 ··· 395 395 return ret; 396 396 397 397 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); 398 - if (unlikely(page_offset >= bo->resource->num_pages)) { 398 + if (unlikely(page_offset >= PFN_UP(bo->resource->size))) { 399 399 ret = VM_FAULT_SIGBUS; 400 400 goto out_unlock; 401 401 } ··· 438 438 439 439 page_offset = vmf->pgoff - 440 440 drm_vma_node_start(&bo->base.vma_node); 441 - if (page_offset >= bo->resource->num_pages || 441 + if (page_offset >= PFN_UP(bo->resource->size) || 442 442 vmw_resources_clean(vbo, page_offset, 443 443 page_offset + PAGE_SIZE, 444 444 &allowed_prefault)) {
+2 -2
include/drm/ttm/ttm_resource.h
··· 197 197 * struct ttm_resource 198 198 * 199 199 * @start: Start of the allocation. 200 - * @num_pages: Actual size of resource in pages. 200 + * @size: Actual size of resource in bytes. 201 201 * @mem_type: Resource type of the allocation. 202 202 * @placement: Placement flags. 203 203 * @bus: Placement on io bus accessible to the CPU ··· 208 208 */ 209 209 struct ttm_resource { 210 210 unsigned long start; 211 - unsigned long num_pages; 211 + size_t size; 212 212 uint32_t mem_type; 213 213 uint32_t placement; 214 214 struct ttm_bus_placement bus;