Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: merge ttm_dma_tt back into ttm_tt

It makes no difference to kmalloc if the structure
is 48 or 64 bytes in size.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/396950/

+119 -156
+3 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 45 45 uint64_t *addr, uint64_t *flags) 46 46 { 47 47 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 48 - struct ttm_dma_tt *ttm; 49 48 50 49 switch (bo->tbo.mem.mem_type) { 51 50 case TTM_PL_TT: 52 - ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); 53 - *addr = ttm->dma_address[0]; 51 + *addr = bo->tbo.ttm->dma_address[0]; 54 52 break; 55 53 case TTM_PL_VRAM: 56 54 *addr = amdgpu_bo_gpu_offset(bo); ··· 120 122 uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) 121 123 { 122 124 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 123 - struct ttm_dma_tt *ttm; 124 125 125 126 if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached) 126 127 return AMDGPU_BO_INVALID_OFFSET; 127 128 128 - ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm); 129 - if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) 129 + if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) 130 130 return AMDGPU_BO_INVALID_OFFSET; 131 131 132 - return adev->gmc.agp_start + ttm->dma_address[0]; 132 + return adev->gmc.agp_start + bo->ttm->dma_address[0]; 133 133 } 134 134 135 135 /**
+6 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 294 294 cpu_addr = &job->ibs[0].ptr[num_dw]; 295 295 296 296 if (mem->mem_type == TTM_PL_TT) { 297 - struct ttm_dma_tt *dma; 298 297 dma_addr_t *dma_address; 299 298 300 - dma = container_of(bo->ttm, struct ttm_dma_tt, ttm); 301 - dma_address = &dma->dma_address[offset >> PAGE_SHIFT]; 299 + dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT]; 302 300 r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, 303 301 cpu_addr); 304 302 if (r) ··· 839 841 * TTM backend functions. 840 842 */ 841 843 struct amdgpu_ttm_tt { 842 - struct ttm_dma_tt ttm; 844 + struct ttm_tt ttm; 843 845 struct drm_gem_object *gobj; 844 846 u64 offset; 845 847 uint64_t userptr; ··· 1290 1292 r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); 1291 1293 if (r) 1292 1294 DRM_ERROR("failed to unbind %u pages at 0x%08llX\n", 1293 - gtt->ttm.ttm.num_pages, gtt->offset); 1295 + gtt->ttm.num_pages, gtt->offset); 1294 1296 gtt->bound = false; 1295 1297 } 1296 1298 ··· 1304 1306 if (gtt->usertask) 1305 1307 put_task_struct(gtt->usertask); 1306 1308 1307 - ttm_dma_tt_fini(&gtt->ttm); 1309 + ttm_tt_fini(&gtt->ttm); 1308 1310 kfree(gtt); 1309 1311 } 1310 1312 ··· 1338 1340 kfree(gtt); 1339 1341 return NULL; 1340 1342 } 1341 - return &gtt->ttm.ttm; 1343 + return &gtt->ttm; 1342 1344 } 1343 1345 1344 1346 /** ··· 1505 1507 /* Return false if no part of the ttm_tt object lies within 1506 1508 * the range 1507 1509 */ 1508 - size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; 1510 + size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE; 1509 1511 if (gtt->userptr > end || gtt->userptr + size <= start) 1510 1512 return false; 1511 1513
+2 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1781 1781 resv = vm->root.base.bo->tbo.base.resv; 1782 1782 } else { 1783 1783 struct drm_gem_object *obj = &bo->tbo.base; 1784 - struct ttm_dma_tt *ttm; 1785 1784 1786 1785 resv = bo->tbo.base.resv; 1787 1786 if (obj->import_attach && bo_va->is_xgmi) { ··· 1793 1794 } 1794 1795 mem = &bo->tbo.mem; 1795 1796 nodes = mem->mm_node; 1796 - if (mem->mem_type == TTM_PL_TT) { 1797 - ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); 1798 - pages_addr = ttm->dma_address; 1799 - } 1797 + if (mem->mem_type == TTM_PL_TT) 1798 + pages_addr = bo->tbo.ttm->dma_address; 1800 1799 } 1801 1800 1802 1801 if (bo) {
+6 -6
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 547 547 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) 548 548 { 549 549 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 550 - struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; 550 + struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; 551 551 int i; 552 552 553 553 if (!ttm_dma) ··· 557 557 if (nvbo->force_coherent) 558 558 return; 559 559 560 - for (i = 0; i < ttm_dma->ttm.num_pages; i++) 560 + for (i = 0; i < ttm_dma->num_pages; i++) 561 561 dma_sync_single_for_device(drm->dev->dev, 562 562 ttm_dma->dma_address[i], 563 563 PAGE_SIZE, DMA_TO_DEVICE); ··· 567 567 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) 568 568 { 569 569 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 570 - struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; 570 + struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; 571 571 int i; 572 572 573 573 if (!ttm_dma) ··· 577 577 if (nvbo->force_coherent) 578 578 return; 579 579 580 - for (i = 0; i < ttm_dma->ttm.num_pages; i++) 580 + for (i = 0; i < ttm_dma->num_pages; i++) 581 581 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], 582 582 PAGE_SIZE, DMA_FROM_DEVICE); 583 583 } ··· 1309 1309 nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, 1310 1310 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 1311 1311 { 1312 - struct ttm_dma_tt *ttm_dma = (void *)ttm; 1312 + struct ttm_tt *ttm_dma = (void *)ttm; 1313 1313 struct nouveau_drm *drm; 1314 1314 struct device *dev; 1315 1315 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); ··· 1345 1345 nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, 1346 1346 struct ttm_tt *ttm) 1347 1347 { 1348 - struct ttm_dma_tt *ttm_dma = (void *)ttm; 1348 + struct ttm_tt *ttm_dma = (void *)ttm; 1349 1349 struct nouveau_drm *drm; 1350 1350 struct device *dev; 1351 1351 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+5 -3
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 92 92 } 93 93 94 94 int 95 - nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt) 95 + nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) 96 96 { 97 97 struct nouveau_mem *mem = nouveau_mem(reg); 98 98 struct nouveau_cli *cli = mem->cli; ··· 116 116 mem->comp = 0; 117 117 } 118 118 119 - if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl; 120 - else args.dma = tt->dma_address; 119 + if (tt->sg) 120 + args.sgl = tt->sg->sgl; 121 + else 122 + args.dma = tt->dma_address; 121 123 122 124 mutex_lock(&drm->master.lock); 123 125 cli->base.super = true;
+2 -2
drivers/gpu/drm/nouveau/nouveau_mem.h
··· 1 1 #ifndef __NOUVEAU_MEM_H__ 2 2 #define __NOUVEAU_MEM_H__ 3 3 #include <drm/ttm/ttm_bo_api.h> 4 - struct ttm_dma_tt; 4 + struct ttm_tt; 5 5 6 6 #include <nvif/mem.h> 7 7 #include <nvif/vmm.h> ··· 24 24 struct ttm_resource *); 25 25 void nouveau_mem_del(struct ttm_resource *); 26 26 int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); 27 - int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *); 27 + int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *); 28 28 void nouveau_mem_fini(struct nouveau_mem *); 29 29 int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *); 30 30 #endif
+3 -3
drivers/gpu/drm/nouveau/nouveau_sgdma.c
··· 11 11 /* this has to be the first field so populate/unpopulated in 12 12 * nouve_bo.c works properly, otherwise have to move them here 13 13 */ 14 - struct ttm_dma_tt ttm; 14 + struct ttm_tt ttm; 15 15 struct nouveau_mem *mem; 16 16 }; 17 17 ··· 23 23 if (ttm) { 24 24 nouveau_sgdma_unbind(bdev, ttm); 25 25 ttm_tt_destroy_common(bdev, ttm); 26 - ttm_dma_tt_fini(&nvbe->ttm); 26 + ttm_tt_fini(&nvbe->ttm); 27 27 kfree(nvbe); 28 28 } 29 29 } ··· 88 88 kfree(nvbe); 89 89 return NULL; 90 90 } 91 - return &nvbe->ttm.ttm; 91 + return &nvbe->ttm; 92 92 }
+1 -1
drivers/gpu/drm/qxl/qxl_ttm.c
··· 116 116 ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); 117 117 if (ttm == NULL) 118 118 return NULL; 119 - if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) { 119 + if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) { 120 120 kfree(ttm); 121 121 return NULL; 122 122 }
+4 -4
drivers/gpu/drm/radeon/radeon_ttm.c
··· 437 437 * TTM backend functions. 438 438 */ 439 439 struct radeon_ttm_tt { 440 - struct ttm_dma_tt ttm; 440 + struct ttm_tt ttm; 441 441 u64 offset; 442 442 443 443 uint64_t userptr; ··· 602 602 radeon_ttm_backend_unbind(bdev, ttm); 603 603 ttm_tt_destroy_common(bdev, ttm); 604 604 605 - ttm_dma_tt_fini(&gtt->ttm); 605 + ttm_tt_fini(&gtt->ttm); 606 606 kfree(gtt); 607 607 } 608 608 ··· 640 640 kfree(gtt); 641 641 return NULL; 642 642 } 643 - return &gtt->ttm.ttm; 643 + return &gtt->ttm; 644 644 } 645 645 646 646 static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev, ··· 653 653 654 654 if (!ttm) 655 655 return NULL; 656 - return container_of(ttm, struct radeon_ttm_tt, ttm.ttm); 656 + return container_of(ttm, struct radeon_ttm_tt, ttm); 657 657 } 658 658 659 659 static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
+1 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 1192 1192 1193 1193 size += ttm_round_pot(struct_size); 1194 1194 size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); 1195 - size += ttm_round_pot(sizeof(struct ttm_dma_tt)); 1195 + size += ttm_round_pot(sizeof(struct ttm_tt)); 1196 1196 return size; 1197 1197 } 1198 1198 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
+15 -15
drivers/gpu/drm/ttm/ttm_page_alloc.c
··· 1081 1081 } 1082 1082 EXPORT_SYMBOL(ttm_pool_unpopulate); 1083 1083 1084 - int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, 1084 + int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt, 1085 1085 struct ttm_operation_ctx *ctx) 1086 1086 { 1087 1087 unsigned i, j; 1088 1088 int r; 1089 1089 1090 - r = ttm_pool_populate(&tt->ttm, ctx); 1090 + r = ttm_pool_populate(tt, ctx); 1091 1091 if (r) 1092 1092 return r; 1093 1093 1094 - for (i = 0; i < tt->ttm.num_pages; ++i) { 1095 - struct page *p = tt->ttm.pages[i]; 1094 + for (i = 0; i < tt->num_pages; ++i) { 1095 + struct page *p = tt->pages[i]; 1096 1096 size_t num_pages = 1; 1097 1097 1098 - for (j = i + 1; j < tt->ttm.num_pages; ++j) { 1099 - if (++p != tt->ttm.pages[j]) 1098 + for (j = i + 1; j < tt->num_pages; ++j) { 1099 + if (++p != tt->pages[j]) 1100 1100 break; 1101 1101 1102 1102 ++num_pages; 1103 1103 } 1104 1104 1105 - tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], 1105 + tt->dma_address[i] = dma_map_page(dev, tt->pages[i], 1106 1106 0, num_pages * PAGE_SIZE, 1107 1107 DMA_BIDIRECTIONAL); 1108 1108 if (dma_mapping_error(dev, tt->dma_address[i])) { ··· 1111 1111 PAGE_SIZE, DMA_BIDIRECTIONAL); 1112 1112 tt->dma_address[i] = 0; 1113 1113 } 1114 - ttm_pool_unpopulate(&tt->ttm); 1114 + ttm_pool_unpopulate(tt); 1115 1115 return -EFAULT; 1116 1116 } 1117 1117 ··· 1124 1124 } 1125 1125 EXPORT_SYMBOL(ttm_populate_and_map_pages); 1126 1126 1127 - void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) 1127 + void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt) 1128 1128 { 1129 1129 unsigned i, j; 1130 1130 1131 - for (i = 0; i < tt->ttm.num_pages;) { 1132 - struct page *p = tt->ttm.pages[i]; 1131 + for (i = 0; i < tt->num_pages;) { 1132 + struct page *p = tt->pages[i]; 1133 1133 size_t num_pages = 1; 1134 1134 1135 - if (!tt->dma_address[i] || !tt->ttm.pages[i]) { 1135 + if (!tt->dma_address[i] || !tt->pages[i]) { 1136 1136 ++i; 1137 1137 continue; 1138 1138 } 1139 1139 1140 - for (j = i + 1; j < tt->ttm.num_pages; ++j) { 1141 - if (++p != tt->ttm.pages[j]) 1140 + for (j = i + 1; j < tt->num_pages; ++j) { 1141 + if (++p != tt->pages[j]) 1142 1142 break; 1143 1143 1144 1144 ++num_pages; ··· 1149 1149 1150 1150 i += num_pages; 1151 1151 } 1152 - ttm_pool_unpopulate(&tt->ttm); 1152 + ttm_pool_unpopulate(tt); 1153 1153 } 1154 1154 EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); 1155 1155
+20 -24
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
··· 832 832 * return dma_page pointer if success, otherwise NULL. 833 833 */ 834 834 static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, 835 - struct ttm_dma_tt *ttm_dma, 835 + struct ttm_tt *ttm, 836 836 unsigned index) 837 837 { 838 838 struct dma_page *d_page = NULL; 839 - struct ttm_tt *ttm = &ttm_dma->ttm; 840 839 unsigned long irq_flags; 841 840 int count; 842 841 ··· 844 845 if (count) { 845 846 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); 846 847 ttm->pages[index] = d_page->p; 847 - ttm_dma->dma_address[index] = d_page->dma; 848 - list_move_tail(&d_page->page_list, &ttm_dma->pages_list); 848 + ttm->dma_address[index] = d_page->dma; 849 + list_move_tail(&d_page->page_list, &ttm->pages_list); 849 850 pool->npages_in_use += 1; 850 851 pool->npages_free -= 1; 851 852 } ··· 853 854 return d_page; 854 855 } 855 856 856 - static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) 857 + static gfp_t ttm_dma_pool_gfp_flags(struct ttm_tt *ttm, bool huge) 857 858 { 858 - struct ttm_tt *ttm = &ttm_dma->ttm; 859 859 gfp_t gfp_flags; 860 860 861 861 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) ··· 881 883 * On success pages list will hold count number of correctly 882 884 * cached pages. On failure will hold the negative return value (-ENOMEM, etc). 883 885 */ 884 - int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, 886 + int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev, 885 887 struct ttm_operation_ctx *ctx) 886 888 { 887 889 struct ttm_mem_global *mem_glob = &ttm_mem_glob; 888 - struct ttm_tt *ttm = &ttm_dma->ttm; 889 890 unsigned long num_pages = ttm->num_pages; 890 891 struct dma_pool *pool; 891 892 struct dma_page *d_page; ··· 898 901 if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx)) 899 902 return -ENOMEM; 900 903 901 - INIT_LIST_HEAD(&ttm_dma->pages_list); 904 + INIT_LIST_HEAD(&ttm->pages_list); 902 905 i = 0; 903 906 904 907 type = ttm_to_type(ttm->page_flags, ttm->caching); ··· 909 912 910 913 pool = ttm_dma_find_pool(dev, type | IS_HUGE); 911 914 if (!pool) { 912 - gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true); 915 + gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, true); 913 916 914 917 pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE); 915 918 if (IS_ERR_OR_NULL(pool)) ··· 919 922 while (num_pages >= HPAGE_PMD_NR) { 920 923 unsigned j; 921 924 922 - d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); 925 + d_page = ttm_dma_pool_get_pages(pool, ttm, i); 923 926 if (!d_page) 924 927 break; 925 928 926 929 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 927 930 pool->size, ctx); 928 931 if (unlikely(ret != 0)) { 929 - ttm_dma_unpopulate(ttm_dma, dev); 932 + ttm_dma_unpopulate(ttm, dev); 930 933 return -ENOMEM; 931 934 } 932 935 933 936 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; 934 937 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { 935 938 ttm->pages[j] = ttm->pages[j - 1] + 1; 936 - ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] + 939 + ttm->dma_address[j] = ttm->dma_address[j - 1] + 937 940 PAGE_SIZE; 938 941 } 939 942 ··· 946 949 947 950 pool = ttm_dma_find_pool(dev, type); 948 951 if (!pool) { 949 - gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false); 952 + gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, false); 950 953 951 954 pool = ttm_dma_pool_init(dev, gfp_flags, type); 952 955 if (IS_ERR_OR_NULL(pool)) ··· 954 957 } 955 958 956 959 while (num_pages) { 957 - d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); 960 + d_page = ttm_dma_pool_get_pages(pool, ttm, i); 958 961 if (!d_page) { 959 - ttm_dma_unpopulate(ttm_dma, dev); 962 + ttm_dma_unpopulate(ttm, dev); 960 963 return -ENOMEM; 961 964 } 962 965 963 966 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 964 967 pool->size, ctx); 965 968 if (unlikely(ret != 0)) { 966 - ttm_dma_unpopulate(ttm_dma, dev); 969 + ttm_dma_unpopulate(ttm, dev); 967 970 return -ENOMEM; 968 971 } 969 972 ··· 977 980 EXPORT_SYMBOL_GPL(ttm_dma_populate); 978 981 979 982 /* Put all pages in pages list to correct pool to wait for reuse */ 980 - void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) 983 + void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev) 981 984 { 982 985 struct ttm_mem_global *mem_glob = &ttm_mem_glob; 983 - struct ttm_tt *ttm = &ttm_dma->ttm; 984 986 struct dma_pool *pool; 985 987 struct dma_page *d_page, *next; 986 988 enum pool_type type; ··· 993 997 pool = ttm_dma_find_pool(dev, type | IS_HUGE); 994 998 if (pool) { 995 999 count = 0; 996 - list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, 1000 + list_for_each_entry_safe(d_page, next, &ttm->pages_list, 997 1001 page_list) { 998 1002 if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL)) 999 1003 continue; ··· 1023 1027 1024 1028 /* make sure pages array match list and count number of pages */ 1025 1029 count = 0; 1026 - list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, 1030 + list_for_each_entry_safe(d_page, next, &ttm->pages_list, 1027 1031 page_list) { 1028 1032 ttm->pages[count] = d_page->p; 1029 1033 count++; ··· 1044 1048 pool->nfrees += count; 1045 1049 } else { 1046 1050 pool->npages_free += count; 1047 - list_splice(&ttm_dma->pages_list, &pool->free_list); 1051 + list_splice(&ttm->pages_list, &pool->free_list); 1048 1052 /* 1049 1053 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages 1050 1054 * to free in order to minimize calls to set_memory_wb(). ··· 1055 1059 } 1056 1060 spin_unlock_irqrestore(&pool->lock, irq_flags); 1057 1061 1058 - INIT_LIST_HEAD(&ttm_dma->pages_list); 1062 + INIT_LIST_HEAD(&ttm->pages_list); 1059 1063 for (i = 0; i < ttm->num_pages; i++) { 1060 1064 ttm->pages[i] = NULL; 1061 - ttm_dma->dma_address[i] = 0; 1065 + ttm->dma_address[i] = 0; 1062 1066 } 1063 1067 1064 1068 /* shrink pool if necessary (only on !is_cached pools)*/
+22 -33
drivers/gpu/drm/ttm/ttm_tt.c
··· 92 92 return 0; 93 93 } 94 94 95 - static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 95 + static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm) 96 96 { 97 - ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, 98 - sizeof(*ttm->ttm.pages) + 99 - sizeof(*ttm->dma_address), 100 - GFP_KERNEL | __GFP_ZERO); 101 - if (!ttm->ttm.pages) 97 + ttm->pages = kvmalloc_array(ttm->num_pages, 98 + sizeof(*ttm->pages) + 99 + sizeof(*ttm->dma_address), 100 + GFP_KERNEL | __GFP_ZERO); 101 + if (!ttm->pages) 102 102 return -ENOMEM; 103 - ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); 103 + 104 + ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); 104 105 return 0; 105 106 } 106 107 107 - static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) 108 + static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm) 108 109 { 109 - ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages, 110 + ttm->dma_address = kvmalloc_array(ttm->num_pages, 110 111 sizeof(*ttm->dma_address), 111 112 GFP_KERNEL | __GFP_ZERO); 112 113 if (!ttm->dma_address) ··· 139 138 ttm->num_pages = bo->num_pages; 140 139 ttm->caching = ttm_cached; 141 140 ttm->page_flags = page_flags; 141 + ttm->dma_address = NULL; 142 142 ttm->swap_storage = NULL; 143 143 ttm->sg = bo->sg; 144 + INIT_LIST_HEAD(&ttm->pages_list); 144 145 ttm->caching = caching; 145 146 } 146 147 ··· 161 158 162 159 void ttm_tt_fini(struct ttm_tt *ttm) 163 160 { 164 - kvfree(ttm->pages); 161 + if (ttm->pages) 162 + kvfree(ttm->pages); 163 + else 164 + kvfree(ttm->dma_address); 165 165 ttm->pages = NULL; 166 + ttm->dma_address = NULL; 166 167 } 167 168 EXPORT_SYMBOL(ttm_tt_fini); 168 169 169 - int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 170 + int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 170 171 uint32_t page_flags, enum ttm_caching caching) 171 172 { 172 - struct ttm_tt *ttm = &ttm_dma->ttm; 173 - 174 173 ttm_tt_init_fields(ttm, bo, page_flags, caching); 175 174 176 - INIT_LIST_HEAD(&ttm_dma->pages_list); 177 - if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { 175 + if (ttm_dma_tt_alloc_page_directory(ttm)) { 178 176 pr_err("Failed allocating page table\n"); 179 177 return -ENOMEM; 180 178 } ··· 183 179 } 184 180 EXPORT_SYMBOL(ttm_dma_tt_init); 185 181 186 - int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 182 + int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 187 183 uint32_t page_flags, enum ttm_caching caching) 188 184 { 189 - struct ttm_tt *ttm = &ttm_dma->ttm; 190 185 int ret; 191 186 192 187 ttm_tt_init_fields(ttm, bo, page_flags, caching); 193 188 194 - INIT_LIST_HEAD(&ttm_dma->pages_list); 195 189 if (page_flags & TTM_PAGE_FLAG_SG) 196 - ret = ttm_sg_tt_alloc_page_directory(ttm_dma); 190 + ret = ttm_sg_tt_alloc_page_directory(ttm); 197 191 else 198 - ret = ttm_dma_tt_alloc_page_directory(ttm_dma); 192 + ret = ttm_dma_tt_alloc_page_directory(ttm); 199 193 if (ret) { 200 194 pr_err("Failed allocating page table\n"); 201 195 return -ENOMEM; ··· 201 199 return 0; 202 200 } 203 201 EXPORT_SYMBOL(ttm_sg_tt_init); 204 - 205 - void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) 206 - { 207 - struct ttm_tt *ttm = &ttm_dma->ttm; 208 - 209 - if (ttm->pages) 210 - kvfree(ttm->pages); 211 - else 212 - kvfree(ttm_dma->dma_address); 213 - ttm->pages = NULL; 214 - ttm_dma->dma_address = NULL; 215 - } 216 - EXPORT_SYMBOL(ttm_dma_tt_fini); 217 202 218 203 int ttm_tt_swapin(struct ttm_tt *ttm) 219 204 {
+13 -13
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
··· 186 186 }; 187 187 188 188 struct vmw_ttm_tt { 189 - struct ttm_dma_tt dma_ttm; 189 + struct ttm_tt dma_ttm; 190 190 struct vmw_private *dev_priv; 191 191 int gmr_id; 192 192 struct vmw_mob *mob; ··· 374 374 return 0; 375 375 376 376 vsgt->mode = dev_priv->map_mode; 377 - vsgt->pages = vmw_tt->dma_ttm.ttm.pages; 378 - vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; 377 + vsgt->pages = vmw_tt->dma_ttm.pages; 378 + vsgt->num_pages = vmw_tt->dma_ttm.num_pages; 379 379 vsgt->addrs = vmw_tt->dma_ttm.dma_address; 380 380 vsgt->sgt = &vmw_tt->sgt; 381 381 ··· 483 483 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) 484 484 { 485 485 struct vmw_ttm_tt *vmw_tt = 486 - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); 486 + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); 487 487 488 488 return &vmw_tt->vsgt; 489 489 } ··· 493 493 struct ttm_tt *ttm, struct ttm_resource *bo_mem) 494 494 { 495 495 struct vmw_ttm_tt *vmw_be = 496 - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 496 + container_of(ttm, struct vmw_ttm_tt, dma_ttm); 497 497 int ret = 0; 498 498 499 499 if (!bo_mem) ··· 537 537 struct ttm_tt *ttm) 538 538 { 539 539 struct vmw_ttm_tt *vmw_be = 540 - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 540 + container_of(ttm, struct vmw_ttm_tt, dma_ttm); 541 541 542 542 if (!vmw_be->bound) 543 543 return; ··· 562 562 static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) 563 563 { 564 564 struct vmw_ttm_tt *vmw_be = 565 - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 565 + container_of(ttm, struct vmw_ttm_tt, dma_ttm); 566 566 567 567 vmw_ttm_unbind(bdev, ttm); 568 568 ttm_tt_destroy_common(bdev, ttm); 569 569 vmw_ttm_unmap_dma(vmw_be); 570 570 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) 571 - ttm_dma_tt_fini(&vmw_be->dma_ttm); 571 + ttm_tt_fini(&vmw_be->dma_ttm); 572 572 else 573 573 ttm_tt_fini(ttm); 574 574 ··· 583 583 struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 584 584 { 585 585 struct vmw_ttm_tt *vmw_tt = 586 - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 586 + container_of(ttm, struct vmw_ttm_tt, dma_ttm); 587 587 struct vmw_private *dev_priv = vmw_tt->dev_priv; 588 588 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 589 589 int ret; ··· 612 612 struct ttm_tt *ttm) 613 613 { 614 614 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, 615 - dma_ttm.ttm); 615 + dma_ttm); 616 616 struct vmw_private *dev_priv = vmw_tt->dev_priv; 617 617 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 618 618 ··· 650 650 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags, 651 651 ttm_cached); 652 652 else 653 - ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags, 653 + ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags, 654 654 ttm_cached); 655 655 if (unlikely(ret != 0)) 656 656 goto out_no_init; 657 657 658 - return &vmw_be->dma_ttm.ttm; 658 + return &vmw_be->dma_ttm; 659 659 out_no_init: 660 660 kfree(vmw_be); 661 661 return NULL; ··· 813 813 ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); 814 814 if (likely(ret == 0)) { 815 815 struct vmw_ttm_tt *vmw_tt = 816 - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); 816 + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); 817 817 ret = vmw_ttm_map_dma(vmw_tt); 818 818 } 819 819
+6 -6
include/drm/ttm/ttm_page_alloc.h
··· 61 61 /** 62 62 * Populates and DMA maps pages to fullfil a ttm_dma_populate() request 63 63 */ 64 - int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, 64 + int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt, 65 65 struct ttm_operation_ctx *ctx); 66 66 67 67 /** 68 68 * Unpopulates and DMA unmaps pages as part of a 69 69 * ttm_dma_unpopulate() request */ 70 - void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); 70 + void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt); 71 71 72 72 /** 73 73 * Output the state of pools to debugfs file ··· 90 90 */ 91 91 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); 92 92 93 - int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, 93 + int ttm_dma_populate(struct ttm_tt *ttm_dma, struct device *dev, 94 94 struct ttm_operation_ctx *ctx); 95 - void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); 95 + void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, struct device *dev); 96 96 97 97 #else 98 98 static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, ··· 107 107 { 108 108 return 0; 109 109 } 110 - static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, 110 + static inline int ttm_dma_populate(struct ttm_tt *ttm_dma, 111 111 struct device *dev, 112 112 struct ttm_operation_ctx *ctx) 113 113 { 114 114 return -ENOMEM; 115 115 } 116 - static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, 116 + static inline void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, 117 117 struct device *dev) 118 118 { 119 119 }
+10 -25
include/drm/ttm/ttm_tt.h
··· 47 47 * struct ttm_tt 48 48 * 49 49 * @pages: Array of pages backing the data. 50 + * @page_flags: see TTM_PAGE_FLAG_* 50 51 * @num_pages: Number of pages in the page array. 51 - * @bdev: Pointer to the current struct ttm_bo_device. 52 - * @be: Pointer to the ttm backend. 52 + * @sg: for SG objects via dma-buf 53 + * @dma_address: The DMA (bus) addresses of the pages 53 54 * @swap_storage: Pointer to shmem struct file for swap storage. 54 - * @caching_state: The current caching state of the pages. 55 - * @state: The current binding state of the pages. 55 + * @pages_list: used by some page allocation backend 56 + * @caching: The current caching state of the pages. 56 57 * 57 58 * This is a structure holding the pages, caching- and aperture binding 58 59 * status for a buffer object that isn't backed by fixed (VRAM / AGP) ··· 63 62 struct page **pages; 64 63 uint32_t page_flags; 65 64 uint32_t num_pages; 66 - struct sg_table *sg; /* for SG objects via dma-buf */ 65 + struct sg_table *sg; 66 + dma_addr_t *dma_address; 67 67 struct file *swap_storage; 68 + struct list_head pages_list; 68 69 enum ttm_caching caching; 69 70 }; 70 71 ··· 74 71 { 75 72 return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED; 76 73 } 77 - 78 - /** 79 - * struct ttm_dma_tt 80 - * 81 - * @ttm: Base ttm_tt struct. 82 - * @dma_address: The DMA (bus) addresses of the pages 83 - * @pages_list: used by some page allocation backend 84 - * 85 - * This is a structure holding the pages, caching- and aperture binding 86 - * status for a buffer object that isn't backed by fixed (VRAM / AGP) 87 - * memory. 88 - */ 89 - struct ttm_dma_tt { 90 - struct ttm_tt ttm; 91 - dma_addr_t *dma_address; 92 - struct list_head pages_list; 93 - }; 94 74 95 75 /** 96 76 * ttm_tt_create ··· 101 115 */ 102 116 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, 103 117 uint32_t page_flags, enum ttm_caching caching); 104 - int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 118 + int ttm_dma_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo, 105 119 uint32_t page_flags, enum ttm_caching caching); 106 - int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, 120 + int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo, 107 121 uint32_t page_flags, enum ttm_caching caching); 108 122 109 123 /** ··· 114 128 * Free memory of ttm_tt structure 115 129 */ 116 130 void ttm_tt_fini(struct ttm_tt *ttm); 117 - void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); 118 131 119 132 /** 120 133 * ttm_ttm_destroy: