Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: remove pointers to globals

As the name says global memory and bo accounting is global. So it doesn't
make to much sense having pointers to global structures all around the code.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Thomas Hellström <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/332879/

+57 -88
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
··· 71 71 */ 72 72 static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) 73 73 { 74 - struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page; 74 + struct page *dummy_page = ttm_bo_glob.dummy_read_page; 75 75 76 76 if (adev->dummy_page_addr) 77 77 return 0;
+4 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 600 600 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, 601 601 struct amdgpu_vm *vm) 602 602 { 603 - struct ttm_bo_global *glob = adev->mman.bdev.glob; 604 603 struct amdgpu_vm_bo_base *bo_base; 605 604 606 605 if (vm->bulk_moveable) { 607 - spin_lock(&glob->lru_lock); 606 + spin_lock(&ttm_bo_glob.lru_lock); 608 607 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); 609 - spin_unlock(&glob->lru_lock); 608 + spin_unlock(&ttm_bo_glob.lru_lock); 610 609 return; 611 610 } 612 611 613 612 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); 614 613 615 - spin_lock(&glob->lru_lock); 614 + spin_lock(&ttm_bo_glob.lru_lock); 616 615 list_for_each_entry(bo_base, &vm->idle, vm_status) { 617 616 struct amdgpu_bo *bo = bo_base->bo; 618 617 ··· 623 624 ttm_bo_move_to_lru_tail(&bo->shadow->tbo, 624 625 &vm->lru_bulk_move); 625 626 } 626 - spin_unlock(&glob->lru_lock); 627 + spin_unlock(&ttm_bo_glob.lru_lock); 627 628 628 629 vm->bulk_moveable = true; 629 630 }
+2 -3
drivers/gpu/drm/drm_gem_vram_helper.c
··· 1013 1013 struct drm_info_node *node = (struct drm_info_node *) m->private; 1014 1014 struct drm_vram_mm *vmm = node->minor->dev->vram_mm; 1015 1015 struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv; 1016 - struct ttm_bo_global *glob = vmm->bdev.glob; 1017 1016 struct drm_printer p = drm_seq_file_printer(m); 1018 1017 1019 - spin_lock(&glob->lru_lock); 1018 + spin_lock(&ttm_bo_glob.lru_lock); 1020 1019 drm_mm_print(mm, &p); 1021 - spin_unlock(&glob->lru_lock); 1020 + spin_unlock(&ttm_bo_glob.lru_lock); 1022 1021 return 0; 1023 1022 } 1024 1023
+2 -5
drivers/gpu/drm/qxl/qxl_release.c
··· 429 429 void qxl_release_fence_buffer_objects(struct qxl_release *release) 430 430 { 431 431 struct ttm_buffer_object *bo; 432 - struct ttm_bo_global *glob; 433 432 struct ttm_bo_device *bdev; 434 433 struct ttm_validate_buffer *entry; 435 434 struct qxl_device *qdev; ··· 450 451 release->id | 0xf0000000, release->base.seqno); 451 452 trace_dma_fence_emit(&release->base); 452 453 453 - glob = bdev->glob; 454 - 455 - spin_lock(&glob->lru_lock); 454 + spin_lock(&ttm_bo_glob.lru_lock); 456 455 457 456 list_for_each_entry(entry, &release->bos, head) { 458 457 bo = entry->bo; ··· 459 462 ttm_bo_move_to_lru_tail(bo, NULL); 460 463 dma_resv_unlock(bo->base.resv); 461 464 } 462 - spin_unlock(&glob->lru_lock); 465 + spin_unlock(&ttm_bo_glob.lru_lock); 463 466 ww_acquire_fini(&release->ticket); 464 467 } 465 468
+2 -5
drivers/gpu/drm/qxl/qxl_ttm.c
··· 319 319 { 320 320 struct drm_info_node *node = (struct drm_info_node *)m->private; 321 321 struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; 322 - struct drm_device *dev = node->minor->dev; 323 - struct qxl_device *rdev = dev->dev_private; 324 - struct ttm_bo_global *glob = rdev->mman.bdev.glob; 325 322 struct drm_printer p = drm_seq_file_printer(m); 326 323 327 - spin_lock(&glob->lru_lock); 324 + spin_lock(&ttm_bo_glob.lru_lock); 328 325 drm_mm_print(mm, &p); 329 - spin_unlock(&glob->lru_lock); 326 + spin_unlock(&ttm_bo_glob.lru_lock); 330 327 return 0; 331 328 } 332 329 #endif
+1 -1
drivers/gpu/drm/ttm/ttm_agp_backend.c
··· 51 51 static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 52 52 { 53 53 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); 54 - struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page; 54 + struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; 55 55 struct drm_mm_node *node = bo_mem->mm_node; 56 56 struct agp_memory *mem; 57 57 int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+29 -36
drivers/gpu/drm/ttm/ttm_bo.c
··· 51 51 DEFINE_MUTEX(ttm_global_mutex); 52 52 unsigned ttm_bo_glob_use_count; 53 53 struct ttm_bo_global ttm_bo_glob; 54 + EXPORT_SYMBOL(ttm_bo_glob); 54 55 55 56 static struct attribute ttm_bo_count = { 56 57 .name = "bo_count", ··· 149 148 { 150 149 struct ttm_buffer_object *bo = 151 150 container_of(list_kref, struct ttm_buffer_object, list_kref); 152 - struct ttm_bo_device *bdev = bo->bdev; 153 151 size_t acc_size = bo->acc_size; 154 152 155 153 BUG_ON(kref_read(&bo->list_kref)); ··· 157 157 BUG_ON(!list_empty(&bo->lru)); 158 158 BUG_ON(!list_empty(&bo->ddestroy)); 159 159 ttm_tt_destroy(bo->ttm); 160 - atomic_dec(&bo->bdev->glob->bo_count); 160 + atomic_dec(&ttm_bo_glob.bo_count); 161 161 dma_fence_put(bo->moving); 162 162 if (!ttm_bo_uses_embedded_gem_object(bo)) 163 163 dma_resv_fini(&bo->base._resv); 164 164 mutex_destroy(&bo->wu_mutex); 165 165 bo->destroy(bo); 166 - ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 166 + ttm_mem_global_free(&ttm_mem_glob, acc_size); 167 167 } 168 168 169 169 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, ··· 187 187 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm && 188 188 !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | 189 189 TTM_PAGE_FLAG_SWAPPED))) { 190 - list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]); 190 + list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); 191 191 kref_get(&bo->list_kref); 192 192 } 193 193 } ··· 294 294 dma_resv_assert_held(pos->first->base.resv); 295 295 dma_resv_assert_held(pos->last->base.resv); 296 296 297 - lru = &pos->first->bdev->glob->swap_lru[i]; 297 + lru = &ttm_bo_glob.swap_lru[i]; 298 298 list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); 299 299 } 300 300 } ··· 458 458 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 459 459 { 460 460 struct ttm_bo_device *bdev = bo->bdev; 461 - struct ttm_bo_global *glob = bdev->glob; 462 461 int ret; 463 462 464 463 ret = ttm_bo_individualize_resv(bo); ··· 467 468 */ 468 469 dma_resv_wait_timeout_rcu(bo->base.resv, true, false, 469 470 30 * HZ); 470 - spin_lock(&glob->lru_lock); 471 + spin_lock(&ttm_bo_glob.lru_lock); 471 472 goto error; 472 473 } 473 474 474 - spin_lock(&glob->lru_lock); 475 + spin_lock(&ttm_bo_glob.lru_lock); 475 476 ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY; 476 477 if (!ret) { 477 478 if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) { 478 479 ttm_bo_del_from_lru(bo); 479 - spin_unlock(&glob->lru_lock); 480 + spin_unlock(&ttm_bo_glob.lru_lock); 480 481 if (bo->base.resv != &bo->base._resv) 481 482 dma_resv_unlock(&bo->base._resv); 482 483 ··· 505 506 error: 506 507 kref_get(&bo->list_kref); 507 508 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 508 - spin_unlock(&glob->lru_lock); 509 + spin_unlock(&ttm_bo_glob.lru_lock); 509 510 510 511 schedule_delayed_work(&bdev->wq, 511 512 ((HZ / 100) < 1) ? 1 : HZ / 100); ··· 528 529 bool interruptible, bool no_wait_gpu, 529 530 bool unlock_resv) 530 531 { 531 - struct ttm_bo_global *glob = bo->bdev->glob; 532 532 struct dma_resv *resv; 533 533 int ret; 534 534 ··· 546 548 547 549 if (unlock_resv) 548 550 dma_resv_unlock(bo->base.resv); 549 - spin_unlock(&glob->lru_lock); 551 + spin_unlock(&ttm_bo_glob.lru_lock); 550 552 551 553 lret = dma_resv_wait_timeout_rcu(resv, true, 552 554 interruptible, ··· 557 559 else if (lret == 0) 558 560 return -EBUSY; 559 561 560 - spin_lock(&glob->lru_lock); 562 + spin_lock(&ttm_bo_glob.lru_lock); 561 563 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { 562 564 /* 563 565 * We raced, and lost, someone else holds the reservation now, ··· 567 569 * delayed destruction would succeed, so just return success 568 570 * here. 569 571 */ 570 - spin_unlock(&glob->lru_lock); 572 + spin_unlock(&ttm_bo_glob.lru_lock); 571 573 return 0; 572 574 } 573 575 ret = 0; ··· 576 578 if (ret || unlikely(list_empty(&bo->ddestroy))) { 577 579 if (unlock_resv) 578 580 dma_resv_unlock(bo->base.resv); 579 - spin_unlock(&glob->lru_lock); 581 + spin_unlock(&ttm_bo_glob.lru_lock); 580 582 return ret; 581 583 } 582 584 ··· 584 586 list_del_init(&bo->ddestroy); 585 587 kref_put(&bo->list_kref, ttm_bo_ref_bug); 586 588 587 - spin_unlock(&glob->lru_lock); 589 + spin_unlock(&ttm_bo_glob.lru_lock); 588 590 ttm_bo_cleanup_memtype_use(bo); 589 591 590 592 if (unlock_resv) ··· 599 601 */ 600 602 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 601 603 { 602 - struct ttm_bo_global *glob = bdev->glob; 604 + struct ttm_bo_global *glob = &ttm_bo_glob; 603 605 struct list_head removed; 604 606 bool empty; 605 607 ··· 823 825 struct ww_acquire_ctx *ticket) 824 826 { 825 827 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; 826 - struct ttm_bo_global *glob = bdev->glob; 827 828 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 828 829 bool locked = false; 829 830 unsigned i; 830 831 int ret; 831 832 832 - spin_lock(&glob->lru_lock); 833 + spin_lock(&ttm_bo_glob.lru_lock); 833 834 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { 834 835 list_for_each_entry(bo, &man->lru[i], lru) { 835 836 bool busy; ··· 860 863 if (!bo) { 861 864 if (busy_bo) 862 865 kref_get(&busy_bo->list_kref); 863 - spin_unlock(&glob->lru_lock); 866 + spin_unlock(&ttm_bo_glob.lru_lock); 864 867 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); 865 868 if (busy_bo) 866 869 kref_put(&busy_bo->list_kref, ttm_bo_release_list); ··· 876 879 return ret; 877 880 } 878 881 879 - spin_unlock(&glob->lru_lock); 882 + spin_unlock(&ttm_bo_glob.lru_lock); 880 883 881 884 ret = ttm_bo_evict(bo, ctx); 882 885 if (locked) ··· 1042 1045 mem->mem_type = mem_type; 1043 1046 mem->placement = cur_flags; 1044 1047 1045 - spin_lock(&bo->bdev->glob->lru_lock); 1048 + spin_lock(&ttm_bo_glob.lru_lock); 1046 1049 ttm_bo_del_from_lru(bo); 1047 1050 ttm_bo_add_mem_to_lru(bo, mem); 1048 - spin_unlock(&bo->bdev->glob->lru_lock); 1051 + spin_unlock(&ttm_bo_glob.lru_lock); 1049 1052 1050 1053 return 0; 1051 1054 } ··· 1132 1135 1133 1136 error: 1134 1137 if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { 1135 - spin_lock(&bo->bdev->glob->lru_lock); 1138 + spin_lock(&ttm_bo_glob.lru_lock); 1136 1139 ttm_bo_move_to_lru_tail(bo, NULL); 1137 - spin_unlock(&bo->bdev->glob->lru_lock); 1140 + spin_unlock(&ttm_bo_glob.lru_lock); 1138 1141 } 1139 1142 1140 1143 return ret; ··· 1258 1261 struct dma_resv *resv, 1259 1262 void (*destroy) (struct ttm_buffer_object *)) 1260 1263 { 1264 + struct ttm_mem_global *mem_glob = &ttm_mem_glob; 1261 1265 int ret = 0; 1262 1266 unsigned long num_pages; 1263 - struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1264 1267 bool locked; 1265 1268 1266 1269 ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx); ··· 1320 1323 dma_resv_init(&bo->base._resv); 1321 1324 drm_vma_node_reset(&bo->base.vma_node); 1322 1325 } 1323 - atomic_inc(&bo->bdev->glob->bo_count); 1326 + atomic_inc(&ttm_bo_glob.bo_count); 1324 1327 1325 1328 /* 1326 1329 * For ttm_bo_type_device buffers, allocate ··· 1350 1353 return ret; 1351 1354 } 1352 1355 1353 - spin_lock(&bdev->glob->lru_lock); 1356 + spin_lock(&ttm_bo_glob.lru_lock); 1354 1357 ttm_bo_move_to_lru_tail(bo, NULL); 1355 - spin_unlock(&bdev->glob->lru_lock); 1358 + spin_unlock(&ttm_bo_glob.lru_lock); 1356 1359 1357 1360 return ret; 1358 1361 } ··· 1450 1453 .flags = TTM_OPT_FLAG_FORCE_ALLOC 1451 1454 }; 1452 1455 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1453 - struct ttm_bo_global *glob = bdev->glob; 1456 + struct ttm_bo_global *glob = &ttm_bo_glob; 1454 1457 struct dma_fence *fence; 1455 1458 int ret; 1456 1459 unsigned i; ··· 1619 1622 goto out; 1620 1623 1621 1624 spin_lock_init(&glob->lru_lock); 1622 - glob->mem_glob = &ttm_mem_glob; 1623 - glob->mem_glob->bo_glob = glob; 1624 1625 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); 1625 1626 1626 1627 if (unlikely(glob->dummy_read_page == NULL)) { ··· 1642 1647 1643 1648 int ttm_bo_device_release(struct ttm_bo_device *bdev) 1644 1649 { 1650 + struct ttm_bo_global *glob = &ttm_bo_glob; 1645 1651 int ret = 0; 1646 1652 unsigned i = TTM_NUM_MEM_TYPES; 1647 1653 struct ttm_mem_type_manager *man; 1648 - struct ttm_bo_global *glob = bdev->glob; 1649 1654 1650 1655 while (i--) { 1651 1656 man = &bdev->man[i]; ··· 1714 1719 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1715 1720 INIT_LIST_HEAD(&bdev->ddestroy); 1716 1721 bdev->dev_mapping = mapping; 1717 - bdev->glob = glob; 1718 1722 bdev->need_dma32 = need_dma32; 1719 1723 mutex_lock(&ttm_global_mutex); 1720 1724 list_add_tail(&bdev->device_list, &glob->device_list); ··· 1892 1898 .no_wait_gpu = false 1893 1899 }; 1894 1900 1895 - while (ttm_bo_swapout(bdev->glob, &ctx) == 0) 1896 - ; 1901 + while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0); 1897 1902 } 1898 1903 EXPORT_SYMBOL(ttm_bo_swapout_all); 1899 1904
+1 -1
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 503 503 * TODO: Explicit member copy would probably be better here. 504 504 */ 505 505 506 - atomic_inc(&bo->bdev->glob->bo_count); 506 + atomic_inc(&ttm_bo_glob.bo_count); 507 507 INIT_LIST_HEAD(&fbo->base.ddestroy); 508 508 INIT_LIST_HEAD(&fbo->base.lru); 509 509 INIT_LIST_HEAD(&fbo->base.swap);
+2 -2
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 177 177 } 178 178 179 179 if (bo->moving != moving) { 180 - spin_lock(&bdev->glob->lru_lock); 180 + spin_lock(&ttm_bo_glob.lru_lock); 181 181 ttm_bo_move_to_lru_tail(bo, NULL); 182 - spin_unlock(&bdev->glob->lru_lock); 182 + spin_unlock(&ttm_bo_glob.lru_lock); 183 183 } 184 184 dma_fence_put(moving); 185 185 }
+6 -19
drivers/gpu/drm/ttm/ttm_execbuf_util.c
··· 47 47 struct list_head *list) 48 48 { 49 49 struct ttm_validate_buffer *entry; 50 - struct ttm_bo_global *glob; 51 50 52 51 if (list_empty(list)) 53 52 return; 54 53 55 - entry = list_first_entry(list, struct ttm_validate_buffer, head); 56 - glob = entry->bo->bdev->glob; 57 - 58 - spin_lock(&glob->lru_lock); 54 + spin_lock(&ttm_bo_glob.lru_lock); 59 55 list_for_each_entry(entry, list, head) { 60 56 struct ttm_buffer_object *bo = entry->bo; 61 57 62 58 ttm_bo_move_to_lru_tail(bo, NULL); 63 59 dma_resv_unlock(bo->base.resv); 64 60 } 65 - spin_unlock(&glob->lru_lock); 61 + spin_unlock(&ttm_bo_glob.lru_lock); 66 62 67 63 if (ticket) 68 64 ww_acquire_fini(ticket); ··· 81 85 struct list_head *list, bool intr, 82 86 struct list_head *dups) 83 87 { 84 - struct ttm_bo_global *glob; 85 88 struct ttm_validate_buffer *entry; 86 89 int ret; 87 90 88 91 if (list_empty(list)) 89 92 return 0; 90 - 91 - entry = list_first_entry(list, struct ttm_validate_buffer, head); 92 - glob = entry->bo->bdev->glob; 93 93 94 94 if (ticket) 95 95 ww_acquire_init(ticket, &reservation_ww_class); ··· 158 166 struct dma_fence *fence) 159 167 { 160 168 struct ttm_validate_buffer *entry; 161 - struct ttm_buffer_object *bo; 162 - struct ttm_bo_global *glob; 163 169 164 170 if (list_empty(list)) 165 171 return; 166 172 167 - bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; 168 - glob = bo->bdev->glob; 169 - 170 - spin_lock(&glob->lru_lock); 171 - 173 + spin_lock(&ttm_bo_glob.lru_lock); 172 174 list_for_each_entry(entry, list, head) { 173 - bo = entry->bo; 175 + struct ttm_buffer_object *bo = entry->bo; 176 + 174 177 if (entry->num_shared) 175 178 dma_resv_add_shared_fence(bo->base.resv, fence); 176 179 else ··· 173 186 ttm_bo_move_to_lru_tail(bo, NULL); 174 187 dma_resv_unlock(bo->base.resv); 175 188 } 176 - spin_unlock(&glob->lru_lock); 189 + spin_unlock(&ttm_bo_glob.lru_lock); 177 190 if (ticket) 178 191 ww_acquire_fini(ticket); 179 192 }
+1 -1
drivers/gpu/drm/ttm/ttm_memory.c
··· 275 275 276 276 while (ttm_zones_above_swap_target(glob, from_wq, extra)) { 277 277 spin_unlock(&glob->lock); 278 - ret = ttm_bo_swapout(glob->bo_glob, ctx); 278 + ret = ttm_bo_swapout(&ttm_bo_glob, ctx); 279 279 spin_lock(&glob->lock); 280 280 if (unlikely(ret != 0)) 281 281 break;
+2 -2
drivers/gpu/drm/ttm/ttm_page_alloc.c
··· 1028 1028 static void 1029 1029 ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) 1030 1030 { 1031 - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; 1031 + struct ttm_mem_global *mem_glob = &ttm_mem_glob; 1032 1032 unsigned i; 1033 1033 1034 1034 if (mem_count_update == 0) ··· 1049 1049 1050 1050 int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 1051 1051 { 1052 - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; 1052 + struct ttm_mem_global *mem_glob = &ttm_mem_glob; 1053 1053 unsigned i; 1054 1054 int ret; 1055 1055
+2 -2
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
··· 886 886 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, 887 887 struct ttm_operation_ctx *ctx) 888 888 { 889 + struct ttm_mem_global *mem_glob = &ttm_mem_glob; 889 890 struct ttm_tt *ttm = &ttm_dma->ttm; 890 - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; 891 891 unsigned long num_pages = ttm->num_pages; 892 892 struct dma_pool *pool; 893 893 struct dma_page *d_page; ··· 991 991 /* Put all pages in pages list to correct pool to wait for reuse */ 992 992 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) 993 993 { 994 + struct ttm_mem_global *mem_glob = &ttm_mem_glob; 994 995 struct ttm_tt *ttm = &ttm_dma->ttm; 995 - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; 996 996 struct dma_pool *pool; 997 997 struct dma_page *d_page, *next; 998 998 enum pool_type type;
+2 -4
include/drm/ttm/ttm_bo_driver.h
··· 423 423 */ 424 424 425 425 struct kobject kobj; 426 - struct ttm_mem_global *mem_glob; 427 426 struct page *dummy_read_page; 428 427 spinlock_t lru_lock; 429 428 ··· 466 467 * Constant after bo device init / atomic. 467 468 */ 468 469 struct list_head device_list; 469 - struct ttm_bo_global *glob; 470 470 struct ttm_bo_driver *driver; 471 471 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 472 472 ··· 766 768 */ 767 769 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) 768 770 { 769 - spin_lock(&bo->bdev->glob->lru_lock); 771 + spin_lock(&ttm_bo_glob.lru_lock); 770 772 ttm_bo_move_to_lru_tail(bo, NULL); 771 - spin_unlock(&bo->bdev->glob->lru_lock); 773 + spin_unlock(&ttm_bo_glob.lru_lock); 772 774 dma_resv_unlock(bo->base.resv); 773 775 } 774 776
-1
include/drm/ttm/ttm_memory.h
··· 65 65 struct ttm_mem_zone; 66 66 extern struct ttm_mem_global { 67 67 struct kobject kobj; 68 - struct ttm_bo_global *bo_glob; 69 68 struct workqueue_struct *swap_queue; 70 69 struct work_struct work; 71 70 spinlock_t lock;