Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: remove adev pointer from struct amdgpu_bo v2

It's completely pointless to have two pointers to the
device in the same structure.

v2: rename function to amdgpu_ttm_adev, fix typos

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
a7d64de6 f3fd4512

+65 -62
+5 -1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 465 465 */ 466 466 struct list_head va; 467 467 /* Constant after initialization */ 468 - struct amdgpu_device *adev; 469 468 struct drm_gem_object gem_base; 470 469 struct amdgpu_bo *parent; 471 470 struct amdgpu_bo *shadow; ··· 2125 2126 struct list_head gtt_list; 2126 2127 2127 2128 }; 2129 + 2130 + static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) 2131 + { 2132 + return container_of(bdev, struct amdgpu_device, mman.bdev); 2133 + } 2128 2134 2129 2135 bool amdgpu_device_is_px(struct drm_device *dev); 2130 2136 int amdgpu_device_init(struct amdgpu_device *adev,
+6 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 355 355 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, 356 356 struct amdgpu_bo *bo) 357 357 { 358 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 358 359 u64 initial_bytes_moved; 359 360 uint32_t domain; 360 361 int r; ··· 373 372 374 373 retry: 375 374 amdgpu_ttm_placement_from_domain(bo, domain); 376 - initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); 375 + initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); 377 376 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 378 - p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - 377 + p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - 379 378 initial_bytes_moved; 380 379 381 380 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { ··· 401 400 402 401 struct amdgpu_bo_list_entry *candidate = p->evictable; 403 402 struct amdgpu_bo *bo = candidate->robj; 403 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 404 404 u64 initial_bytes_moved; 405 405 uint32_t other; 406 406 ··· 422 420 423 421 /* Good we can try to move this BO somewhere else */ 424 422 amdgpu_ttm_placement_from_domain(bo, other); 425 - initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); 423 + initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); 426 424 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 427 - p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - 425 + p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - 428 426 initial_bytes_moved; 429 427 430 428 if (unlikely(r))
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 116 116 * Call from drm_gem_handle_create which appear in both new and open ioctl 117 117 * case. 118 118 */ 119 - int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 119 + int amdgpu_gem_object_open(struct drm_gem_object *obj, 120 + struct drm_file *file_priv) 120 121 { 121 122 struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); 122 - struct amdgpu_device *adev = abo->adev; 123 + struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 123 124 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 124 125 struct amdgpu_vm *vm = &fpriv->vm; 125 126 struct amdgpu_bo_va *bo_va; ··· 143 142 struct drm_file *file_priv) 144 143 { 145 144 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 146 - struct amdgpu_device *adev = bo->adev; 145 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 147 146 struct amdgpu_fpriv *fpriv = file_priv->driver_priv; 148 147 struct amdgpu_vm *vm = &fpriv->vm; 149 148
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
··· 285 285 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 286 286 { 287 287 unsigned long end = addr + amdgpu_bo_size(bo) - 1; 288 - struct amdgpu_device *adev = bo->adev; 288 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 289 289 struct amdgpu_mn *rmn; 290 290 struct amdgpu_mn_node *node = NULL; 291 291 struct list_head bos; ··· 340 340 */ 341 341 void amdgpu_mn_unregister(struct amdgpu_bo *bo) 342 342 { 343 - struct amdgpu_device *adev = bo->adev; 343 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 344 344 struct amdgpu_mn *rmn; 345 345 struct list_head *head; 346 346
+27 -23
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 88 88 89 89 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) 90 90 { 91 + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 91 92 struct amdgpu_bo *bo; 92 93 93 94 bo = container_of(tbo, struct amdgpu_bo, tbo); 94 95 95 - amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); 96 + amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL); 96 97 97 98 drm_gem_object_release(&bo->gem_base); 98 99 amdgpu_bo_unref(&bo->parent); 99 100 if (!list_empty(&bo->shadow_list)) { 100 - mutex_lock(&bo->adev->shadow_list_lock); 101 + mutex_lock(&adev->shadow_list_lock); 101 102 list_del_init(&bo->shadow_list); 102 - mutex_unlock(&bo->adev->shadow_list_lock); 103 + mutex_unlock(&adev->shadow_list_lock); 103 104 } 104 105 kfree(bo->metadata); 105 106 kfree(bo); ··· 211 210 212 211 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) 213 212 { 214 - amdgpu_ttm_placement_init(abo->adev, &abo->placement, 215 - abo->placements, domain, abo->flags); 213 + struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 214 + 215 + amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements, 216 + domain, abo->flags); 216 217 } 217 218 218 219 static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, ··· 360 357 kfree(bo); 361 358 return r; 362 359 } 363 - bo->adev = adev; 364 360 INIT_LIST_HEAD(&bo->shadow_list); 365 361 INIT_LIST_HEAD(&bo->va); 366 362 bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | ··· 624 622 u64 min_offset, u64 max_offset, 625 623 u64 *gpu_addr) 626 624 { 625 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 627 626 int r, i; 628 627 unsigned fpfn, lpfn; 629 628 ··· 660 657 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && 661 658 !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && 662 659 (!max_offset || max_offset > 663 - bo->adev->mc.visible_vram_size)) { 660 + adev->mc.visible_vram_size)) { 664 661 if (WARN_ON_ONCE(min_offset > 665 - bo->adev->mc.visible_vram_size)) 662 + adev->mc.visible_vram_size)) 666 663 return -EINVAL; 667 664 fpfn = min_offset >> PAGE_SHIFT; 668 - lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT; 665 + lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; 669 666 } else { 670 667 fpfn = min_offset >> PAGE_SHIFT; 671 668 lpfn = max_offset >> PAGE_SHIFT; ··· 680 677 681 678 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 682 679 if (unlikely(r)) { 683 - dev_err(bo->adev->dev, "%p pin failed\n", bo); 680 + dev_err(adev->dev, "%p pin failed\n", bo); 684 681 goto error; 685 682 } 686 683 r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); 687 684 if (unlikely(r)) { 688 - dev_err(bo->adev->dev, "%p bind failed\n", bo); 685 + dev_err(adev->dev, "%p bind failed\n", bo); 689 686 goto error; 690 687 } 691 688 ··· 693 690 if (gpu_addr != NULL) 694 691 *gpu_addr = amdgpu_bo_gpu_offset(bo); 695 692 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 696 - bo->adev->vram_pin_size += amdgpu_bo_size(bo); 693 + adev->vram_pin_size += amdgpu_bo_size(bo); 697 694 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 698 - bo->adev->invisible_pin_size += amdgpu_bo_size(bo); 695 + adev->invisible_pin_size += amdgpu_bo_size(bo); 699 696 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { 700 - bo->adev->gart_pin_size += amdgpu_bo_size(bo); 697 + adev->gart_pin_size += amdgpu_bo_size(bo); 701 698 } 702 699 703 700 error: ··· 711 708 712 709 int amdgpu_bo_unpin(struct amdgpu_bo *bo) 713 710 { 711 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 714 712 int r, i; 715 713 716 714 if (!bo->pin_count) { 717 - dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); 715 + dev_warn(adev->dev, "%p unpin not necessary\n", bo); 718 716 return 0; 719 717 } 720 718 bo->pin_count--; ··· 727 723 } 728 724 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 729 725 if (unlikely(r)) { 730 - dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); 726 + dev_err(adev->dev, "%p validate failed for unpin\n", bo); 731 727 goto error; 732 728 } 733 729 734 730 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 735 - bo->adev->vram_pin_size -= amdgpu_bo_size(bo); 731 + adev->vram_pin_size -= amdgpu_bo_size(bo); 736 732 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 737 - bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); 733 + adev->invisible_pin_size -= amdgpu_bo_size(bo); 738 734 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 739 - bo->adev->gart_pin_size -= amdgpu_bo_size(bo); 735 + adev->gart_pin_size -= amdgpu_bo_size(bo); 740 736 } 741 737 742 738 error: ··· 861 857 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 862 858 struct ttm_mem_reg *new_mem) 863 859 { 860 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 864 861 struct amdgpu_bo *abo; 865 862 struct ttm_mem_reg *old_mem = &bo->mem; 866 863 ··· 869 864 return; 870 865 871 866 abo = container_of(bo, struct amdgpu_bo, tbo); 872 - amdgpu_vm_bo_invalidate(abo->adev, abo); 867 + amdgpu_vm_bo_invalidate(adev, abo); 873 868 874 869 /* update statistics */ 875 870 if (!new_mem) 876 871 return; 877 872 878 873 /* move_notify is called before move happens */ 879 - amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem); 874 + amdgpu_update_memory_usage(adev, &bo->mem, new_mem); 880 875 881 876 trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); 882 877 } 883 878 884 879 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 885 880 { 886 - struct amdgpu_device *adev; 881 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 887 882 struct amdgpu_bo *abo; 888 883 unsigned long offset, size, lpfn; 889 884 int i, r; ··· 892 887 return 0; 893 888 894 889 abo = container_of(bo, struct amdgpu_bo, tbo); 895 - adev = abo->adev; 896 890 if (bo->mem.mem_type != TTM_PL_VRAM) 897 891 return 0; 898 892
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 71 71 */ 72 72 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) 73 73 { 74 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 74 75 int r; 75 76 76 77 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 77 78 if (unlikely(r != 0)) { 78 79 if (r != -ERESTARTSYS) 79 - dev_err(bo->adev->dev, "%p reserve failed\n", bo); 80 + dev_err(adev->dev, "%p reserve failed\n", bo); 80 81 return r; 81 82 } 82 83 return 0;
+17 -26
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 51 51 static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); 52 52 static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); 53 53 54 - static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev) 55 - { 56 - struct amdgpu_mman *mman; 57 - struct amdgpu_device *adev; 58 - 59 - mman = container_of(bdev, struct amdgpu_mman, bdev); 60 - adev = container_of(mman, struct amdgpu_device, mman); 61 - return adev; 62 - } 63 - 64 54 65 55 /* 66 56 * Global memory. ··· 140 150 { 141 151 struct amdgpu_device *adev; 142 152 143 - adev = amdgpu_get_adev(bdev); 153 + adev = amdgpu_ttm_adev(bdev); 144 154 145 155 switch (type) { 146 156 case TTM_PL_SYSTEM: ··· 185 195 static void amdgpu_evict_flags(struct ttm_buffer_object *bo, 186 196 struct ttm_placement *placement) 187 197 { 198 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 188 199 struct amdgpu_bo *abo; 189 200 static struct ttm_place placements = { 190 201 .fpfn = 0, ··· 204 213 abo = container_of(bo, struct amdgpu_bo, tbo); 205 214 switch (bo->mem.mem_type) { 206 215 case TTM_PL_VRAM: 207 - if (abo->adev->mman.buffer_funcs_ring->ready == false) { 216 + if (adev->mman.buffer_funcs_ring->ready == false) { 208 217 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); 209 218 } else { 210 219 amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); ··· 220 229 * allocating address space for the BO. 221 230 */ 222 231 abo->placements[i].lpfn = 223 - abo->adev->mc.gtt_size >> PAGE_SHIFT; 232 + adev->mc.gtt_size >> PAGE_SHIFT; 224 233 } 225 234 } 226 235 break; ··· 281 290 struct ttm_mem_reg *new_mem, 282 291 struct ttm_mem_reg *old_mem) 283 292 { 284 - struct amdgpu_device *adev = amdgpu_get_adev(bo->bdev); 293 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 285 294 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 286 295 287 296 struct drm_mm_node *old_mm, *new_mm; ··· 375 384 struct ttm_placement placement; 376 385 int r; 377 386 378 - adev = amdgpu_get_adev(bo->bdev); 387 + adev = amdgpu_ttm_adev(bo->bdev); 379 388 tmp_mem = *new_mem; 380 389 tmp_mem.mm_node = NULL; 381 390 placement.num_placement = 1; ··· 422 431 struct ttm_place placements; 423 432 int r; 424 433 425 - adev = amdgpu_get_adev(bo->bdev); 434 + adev = amdgpu_ttm_adev(bo->bdev); 426 435 tmp_mem = *new_mem; 427 436 tmp_mem.mm_node = NULL; 428 437 placement.num_placement = 1; ··· 465 474 if (WARN_ON_ONCE(abo->pin_count > 0)) 466 475 return -EINVAL; 467 476 468 - adev = amdgpu_get_adev(bo->bdev); 477 + adev = amdgpu_ttm_adev(bo->bdev); 469 478 470 479 /* remember the eviction */ 471 480 if (evict) ··· 518 527 static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 519 528 { 520 529 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 521 - struct amdgpu_device *adev = amdgpu_get_adev(bdev); 530 + struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); 522 531 523 532 mem->bus.addr = NULL; 524 533 mem->bus.offset = 0; ··· 650 659 /* prepare the sg table with the user pages */ 651 660 static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) 652 661 { 653 - struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); 662 + struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 654 663 struct amdgpu_ttm_tt *gtt = (void *)ttm; 655 664 unsigned nents; 656 665 int r; ··· 682 691 683 692 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) 684 693 { 685 - struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); 694 + struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 686 695 struct amdgpu_ttm_tt *gtt = (void *)ttm; 687 696 struct sg_page_iter sg_iter; 688 697 ··· 842 851 struct amdgpu_device *adev; 843 852 struct amdgpu_ttm_tt *gtt; 844 853 845 - adev = amdgpu_get_adev(bdev); 854 + adev = amdgpu_ttm_adev(bdev); 846 855 847 856 gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); 848 857 if (gtt == NULL) { ··· 886 895 return 0; 887 896 } 888 897 889 - adev = amdgpu_get_adev(ttm->bdev); 898 + adev = amdgpu_ttm_adev(ttm->bdev); 890 899 891 900 #ifdef CONFIG_SWIOTLB 892 901 if (swiotlb_nr_tbl()) { ··· 932 941 if (slave) 933 942 return; 934 943 935 - adev = amdgpu_get_adev(ttm->bdev); 944 + adev = amdgpu_ttm_adev(ttm->bdev); 936 945 937 946 #ifdef CONFIG_SWIOTLB 938 947 if (swiotlb_nr_tbl()) { ··· 1055 1064 1056 1065 static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) 1057 1066 { 1058 - struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); 1067 + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 1059 1068 unsigned i, j; 1060 1069 1061 1070 for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { ··· 1072 1081 1073 1082 static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) 1074 1083 { 1075 - struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); 1084 + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 1076 1085 unsigned log2_size = min(ilog2(tbo->num_pages), 1077 1086 AMDGPU_TTM_LRU_SIZE - 1); 1078 1087 ··· 1361 1370 struct reservation_object *resv, 1362 1371 struct fence **fence) 1363 1372 { 1364 - struct amdgpu_device *adev = bo->adev; 1373 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1365 1374 struct amdgpu_job *job; 1366 1375 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 1367 1376
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 931 931 if (r) 932 932 return r; 933 933 934 - if (!bo->adev->uvd.address_64_bit) { 934 + if (!ring->adev->uvd.address_64_bit) { 935 935 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 936 936 amdgpu_uvd_force_into_uvd_segment(bo); 937 937 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1195 1195 1196 1196 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); 1197 1197 gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && 1198 - adev == bo_va->bo->adev) ? flags : 0; 1198 + adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0; 1199 1199 1200 1200 spin_lock(&vm->status_lock); 1201 1201 if (!list_empty(&bo_va->vm_status))