Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: use an operation ctx for ttm_tt_populate in ttm_bo_driver (v2)

forward the operation context to ttm_tt_populate as well,
and the ultimate goal is swapout enablement for reserved BOs.

v2: squash in fix for vboxvideo

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Roger He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Roger He and committed by
Alex Deucher
d0cef9fa 9de2fb99

+95 -64
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 990 990 return &gtt->ttm.ttm; 991 991 } 992 992 993 - static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) 993 + static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, 994 + struct ttm_operation_ctx *ctx) 994 995 { 995 996 struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); 996 997 struct amdgpu_ttm_tt *gtt = (void *)ttm; ··· 1019 1018 1020 1019 #ifdef CONFIG_SWIOTLB 1021 1020 if (swiotlb_nr_tbl()) { 1022 - return ttm_dma_populate(&gtt->ttm, adev->dev); 1021 + return ttm_dma_populate(&gtt->ttm, adev->dev, ctx); 1023 1022 } 1024 1023 #endif 1025 1024 1026 - return ttm_populate_and_map_pages(adev->dev, &gtt->ttm); 1025 + return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx); 1027 1026 } 1028 1027 1029 1028 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+3 -2
drivers/gpu/drm/ast/ast_ttm.c
··· 216 216 return tt; 217 217 } 218 218 219 - static int ast_ttm_tt_populate(struct ttm_tt *ttm) 219 + static int ast_ttm_tt_populate(struct ttm_tt *ttm, 220 + struct ttm_operation_ctx *ctx) 220 221 { 221 - return ttm_pool_populate(ttm); 222 + return ttm_pool_populate(ttm, ctx); 222 223 } 223 224 224 225 static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
+3 -2
drivers/gpu/drm/cirrus/cirrus_ttm.c
··· 216 216 return tt; 217 217 } 218 218 219 - static int cirrus_ttm_tt_populate(struct ttm_tt *ttm) 219 + static int cirrus_ttm_tt_populate(struct ttm_tt *ttm, 220 + struct ttm_operation_ctx *ctx) 220 221 { 221 - return ttm_pool_populate(ttm); 222 + return ttm_pool_populate(ttm, ctx); 222 223 } 223 224 224 225 static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
+3 -2
drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
··· 223 223 return tt; 224 224 } 225 225 226 - static int hibmc_ttm_tt_populate(struct ttm_tt *ttm) 226 + static int hibmc_ttm_tt_populate(struct ttm_tt *ttm, 227 + struct ttm_operation_ctx *ctx) 227 228 { 228 - return ttm_pool_populate(ttm); 229 + return ttm_pool_populate(ttm, ctx); 229 230 } 230 231 231 232 static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm)
+3 -2
drivers/gpu/drm/mgag200/mgag200_ttm.c
··· 216 216 return tt; 217 217 } 218 218 219 - static int mgag200_ttm_tt_populate(struct ttm_tt *ttm) 219 + static int mgag200_ttm_tt_populate(struct ttm_tt *ttm, 220 + struct ttm_operation_ctx *ctx) 220 221 { 221 - return ttm_pool_populate(ttm); 222 + return ttm_pool_populate(ttm, ctx); 222 223 } 223 224 224 225 static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
+4 -4
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1547 1547 } 1548 1548 1549 1549 static int 1550 - nouveau_ttm_tt_populate(struct ttm_tt *ttm) 1550 + nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 1551 1551 { 1552 1552 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1553 1553 struct nouveau_drm *drm; ··· 1572 1572 1573 1573 #if IS_ENABLED(CONFIG_AGP) 1574 1574 if (drm->agp.bridge) { 1575 - return ttm_agp_tt_populate(ttm); 1575 + return ttm_agp_tt_populate(ttm, ctx); 1576 1576 } 1577 1577 #endif 1578 1578 1579 1579 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) 1580 1580 if (swiotlb_nr_tbl()) { 1581 - return ttm_dma_populate((void *)ttm, dev); 1581 + return ttm_dma_populate((void *)ttm, dev, ctx); 1582 1582 } 1583 1583 #endif 1584 1584 1585 - r = ttm_pool_populate(ttm); 1585 + r = ttm_pool_populate(ttm, ctx); 1586 1586 if (r) { 1587 1587 return r; 1588 1588 }
+3 -2
drivers/gpu/drm/qxl/qxl_ttm.c
··· 291 291 .destroy = &qxl_ttm_backend_destroy, 292 292 }; 293 293 294 - static int qxl_ttm_tt_populate(struct ttm_tt *ttm) 294 + static int qxl_ttm_tt_populate(struct ttm_tt *ttm, 295 + struct ttm_operation_ctx *ctx) 295 296 { 296 297 int r; 297 298 298 299 if (ttm->state != tt_unpopulated) 299 300 return 0; 300 301 301 - r = ttm_pool_populate(ttm); 302 + r = ttm_pool_populate(ttm, ctx); 302 303 if (r) 303 304 return r; 304 305
+5 -4
drivers/gpu/drm/radeon/radeon_ttm.c
··· 721 721 return (struct radeon_ttm_tt *)ttm; 722 722 } 723 723 724 - static int radeon_ttm_tt_populate(struct ttm_tt *ttm) 724 + static int radeon_ttm_tt_populate(struct ttm_tt *ttm, 725 + struct ttm_operation_ctx *ctx) 725 726 { 726 727 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); 727 728 struct radeon_device *rdev; ··· 751 750 rdev = radeon_get_rdev(ttm->bdev); 752 751 #if IS_ENABLED(CONFIG_AGP) 753 752 if (rdev->flags & RADEON_IS_AGP) { 754 - return ttm_agp_tt_populate(ttm); 753 + return ttm_agp_tt_populate(ttm, ctx); 755 754 } 756 755 #endif 757 756 758 757 #ifdef CONFIG_SWIOTLB 759 758 if (swiotlb_nr_tbl()) { 760 - return ttm_dma_populate(&gtt->ttm, rdev->dev); 759 + return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx); 761 760 } 762 761 #endif 763 762 764 - return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm); 763 + return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx); 765 764 } 766 765 767 766 static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+2 -2
drivers/gpu/drm/ttm/ttm_agp_backend.c
··· 133 133 } 134 134 EXPORT_SYMBOL(ttm_agp_tt_create); 135 135 136 - int ttm_agp_tt_populate(struct ttm_tt *ttm) 136 + int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 137 137 { 138 138 if (ttm->state != tt_unpopulated) 139 139 return 0; 140 140 141 - return ttm_pool_populate(ttm); 141 + return ttm_pool_populate(ttm, ctx); 142 142 } 143 143 EXPORT_SYMBOL(ttm_agp_tt_populate); 144 144
+8 -3
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 376 376 * TTM might be null for moves within the same region. 377 377 */ 378 378 if (ttm && ttm->state == tt_unpopulated) { 379 - ret = ttm->bdev->driver->ttm_tt_populate(ttm); 379 + ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx); 380 380 if (ret) 381 381 goto out1; 382 382 } ··· 545 545 unsigned long num_pages, 546 546 struct ttm_bo_kmap_obj *map) 547 547 { 548 - struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; 548 + struct ttm_mem_reg *mem = &bo->mem; 549 + struct ttm_operation_ctx ctx = { 550 + .interruptible = false, 551 + .no_wait_gpu = false 552 + }; 549 553 struct ttm_tt *ttm = bo->ttm; 554 + pgprot_t prot; 550 555 int ret; 551 556 552 557 BUG_ON(!ttm); 553 558 554 559 if (ttm->state == tt_unpopulated) { 555 - ret = ttm->bdev->driver->ttm_tt_populate(ttm); 560 + ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx); 556 561 if (ret) 557 562 return ret; 558 563 }
+6 -1
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 226 226 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, 227 227 cvma.vm_page_prot); 228 228 } else { 229 + struct ttm_operation_ctx ctx = { 230 + .interruptible = false, 231 + .no_wait_gpu = false 232 + }; 233 + 229 234 ttm = bo->ttm; 230 235 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, 231 236 cvma.vm_page_prot); 232 237 233 238 /* Allocate all page at once, most common usage */ 234 - if (ttm->bdev->driver->ttm_tt_populate(ttm)) { 239 + if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) { 235 240 retval = VM_FAULT_OOM; 236 241 goto out_io_unlock; 237 242 }
+5 -8
drivers/gpu/drm/ttm/ttm_page_alloc.c
··· 1058 1058 _manager = NULL; 1059 1059 } 1060 1060 1061 - int ttm_pool_populate(struct ttm_tt *ttm) 1061 + int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 1062 1062 { 1063 1063 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 1064 - struct ttm_operation_ctx ctx = { 1065 - .interruptible = false, 1066 - .no_wait_gpu = false 1067 - }; 1068 1064 unsigned i; 1069 1065 int ret; 1070 1066 ··· 1076 1080 1077 1081 for (i = 0; i < ttm->num_pages; ++i) { 1078 1082 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 1079 - PAGE_SIZE, &ctx); 1083 + PAGE_SIZE, ctx); 1080 1084 if (unlikely(ret != 0)) { 1081 1085 ttm_pool_unpopulate(ttm); 1082 1086 return -ENOMEM; ··· 1113 1117 } 1114 1118 EXPORT_SYMBOL(ttm_pool_unpopulate); 1115 1119 1116 - int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) 1120 + int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, 1121 + struct ttm_operation_ctx *ctx) 1117 1122 { 1118 1123 unsigned i, j; 1119 1124 int r; 1120 1125 1121 - r = ttm_pool_populate(&tt->ttm); 1126 + r = ttm_pool_populate(&tt->ttm, ctx); 1122 1127 if (r) 1123 1128 return r; 1124 1129
+4 -7
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
··· 923 923 * On success pages list will hold count number of correctly 924 924 * cached pages. On failure will hold the negative return value (-ENOMEM, etc). 925 925 */ 926 - int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) 926 + int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, 927 + struct ttm_operation_ctx *ctx) 927 928 { 928 929 struct ttm_tt *ttm = &ttm_dma->ttm; 929 930 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 930 - struct ttm_operation_ctx ctx = { 931 - .interruptible = false, 932 - .no_wait_gpu = false 933 - }; 934 931 unsigned long num_pages = ttm->num_pages; 935 932 struct dma_pool *pool; 936 933 enum pool_type type; ··· 963 966 break; 964 967 965 968 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 966 - pool->size, &ctx); 969 + pool->size, ctx); 967 970 if (unlikely(ret != 0)) { 968 971 ttm_dma_unpopulate(ttm_dma, dev); 969 972 return -ENOMEM; ··· 999 1002 } 1000 1003 1001 1004 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], 1002 - pool->size, &ctx); 1005 + pool->size, ctx); 1003 1006 if (unlikely(ret != 0)) { 1004 1007 ttm_dma_unpopulate(ttm_dma, dev); 1005 1008 return -ENOMEM;
+5 -1
drivers/gpu/drm/ttm/ttm_tt.c
··· 263 263 264 264 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) 265 265 { 266 + struct ttm_operation_ctx ctx = { 267 + .interruptible = false, 268 + .no_wait_gpu = false 269 + }; 266 270 int ret = 0; 267 271 268 272 if (!ttm) ··· 275 271 if (ttm->state == tt_bound) 276 272 return 0; 277 273 278 - ret = ttm->bdev->driver->ttm_tt_populate(ttm); 274 + ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx); 279 275 if (ret) 280 276 return ret; 281 277
+5 -1
drivers/gpu/drm/virtio/virtgpu_object.c
··· 124 124 int ret; 125 125 struct page **pages = bo->tbo.ttm->pages; 126 126 int nr_pages = bo->tbo.num_pages; 127 + struct ttm_operation_ctx ctx = { 128 + .interruptible = false, 129 + .no_wait_gpu = false 130 + }; 127 131 128 132 /* wtf swapping */ 129 133 if (bo->pages) 130 134 return 0; 131 135 132 136 if (bo->tbo.ttm->state == tt_unpopulated) 133 - bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm); 137 + bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx); 134 138 bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 135 139 if (!bo->pages) 136 140 goto out;
+3 -2
drivers/gpu/drm/virtio/virtgpu_ttm.c
··· 324 324 .destroy = &virtio_gpu_ttm_backend_destroy, 325 325 }; 326 326 327 - static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm) 327 + static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm, 328 + struct ttm_operation_ctx *ctx) 328 329 { 329 330 if (ttm->state != tt_unpopulated) 330 331 return 0; 331 332 332 - return ttm_pool_populate(ttm); 333 + return ttm_pool_populate(ttm, ctx); 333 334 } 334 335 335 336 static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+5 -8
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
··· 635 635 } 636 636 637 637 638 - static int vmw_ttm_populate(struct ttm_tt *ttm) 638 + static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 639 639 { 640 640 struct vmw_ttm_tt *vmw_tt = 641 641 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); 642 642 struct vmw_private *dev_priv = vmw_tt->dev_priv; 643 643 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); 644 - struct ttm_operation_ctx ctx = { 645 - .interruptible = true, 646 - .no_wait_gpu = false 647 - }; 648 644 int ret; 649 645 650 646 if (ttm->state != tt_unpopulated) ··· 649 653 if (dev_priv->map_mode == vmw_dma_alloc_coherent) { 650 654 size_t size = 651 655 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); 652 - ret = ttm_mem_global_alloc(glob, size, &ctx); 656 + ret = ttm_mem_global_alloc(glob, size, ctx); 653 657 if (unlikely(ret != 0)) 654 658 return ret; 655 659 656 - ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); 660 + ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev, 661 + ctx); 657 662 if (unlikely(ret != 0)) 658 663 ttm_mem_global_free(glob, size); 659 664 } else 660 - ret = ttm_pool_populate(ttm); 665 + ret = ttm_pool_populate(ttm, ctx); 661 666 662 667 return ret; 663 668 }
+11 -2
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
··· 240 240 unsigned long offset; 241 241 unsigned long bo_size; 242 242 struct vmw_otable *otables = batch->otables; 243 + struct ttm_operation_ctx ctx = { 244 + .interruptible = false, 245 + .no_wait_gpu = false 246 + }; 243 247 SVGAOTableType i; 244 248 int ret; 245 249 ··· 268 264 269 265 ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL); 270 266 BUG_ON(ret != 0); 271 - ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); 267 + ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx); 272 268 if (unlikely(ret != 0)) 273 269 goto out_unreserve; 274 270 ret = vmw_bo_map_dma(batch->otable_bo); ··· 434 430 struct vmw_mob *mob) 435 431 { 436 432 int ret; 433 + struct ttm_operation_ctx ctx = { 434 + .interruptible = false, 435 + .no_wait_gpu = false 436 + }; 437 + 437 438 BUG_ON(mob->pt_bo != NULL); 438 439 439 440 ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, ··· 451 442 ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL); 452 443 453 444 BUG_ON(ret != 0); 454 - ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); 445 + ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx); 455 446 if (unlikely(ret != 0)) 456 447 goto out_unreserve; 457 448 ret = vmw_bo_map_dma(mob->pt_bo);
+3 -2
drivers/staging/vboxvideo/vbox_ttm.c
··· 213 213 return tt; 214 214 } 215 215 216 - static int vbox_ttm_tt_populate(struct ttm_tt *ttm) 216 + static int vbox_ttm_tt_populate(struct ttm_tt *ttm, 217 + struct ttm_operation_ctx *ctx) 217 218 { 218 - return ttm_pool_populate(ttm); 219 + return ttm_pool_populate(ttm, ctx); 219 220 } 220 221 221 222 static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
+3 -2
include/drm/ttm/ttm_bo_driver.h
··· 352 352 * Returns: 353 353 * -ENOMEM: Out of memory. 354 354 */ 355 - int (*ttm_tt_populate)(struct ttm_tt *ttm); 355 + int (*ttm_tt_populate)(struct ttm_tt *ttm, 356 + struct ttm_operation_ctx *ctx); 356 357 357 358 /** 358 359 * ttm_tt_unpopulate ··· 1078 1077 struct agp_bridge_data *bridge, 1079 1078 unsigned long size, uint32_t page_flags, 1080 1079 struct page *dummy_read_page); 1081 - int ttm_agp_tt_populate(struct ttm_tt *ttm); 1080 + int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); 1082 1081 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); 1083 1082 #endif 1084 1083
+7 -4
include/drm/ttm/ttm_page_alloc.h
··· 47 47 * 48 48 * Add backing pages to all of @ttm 49 49 */ 50 - int ttm_pool_populate(struct ttm_tt *ttm); 50 + int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); 51 51 52 52 /** 53 53 * ttm_pool_unpopulate: ··· 61 61 /** 62 62 * Populates and DMA maps pages to fullfil a ttm_dma_populate() request 63 63 */ 64 - int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); 64 + int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, 65 + struct ttm_operation_ctx *ctx); 65 66 66 67 /** 67 68 * Unpopulates and DMA unmaps pages as part of a ··· 90 89 */ 91 90 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); 92 91 93 - int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); 92 + int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, 93 + struct ttm_operation_ctx *ctx); 94 94 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); 95 95 96 96 #else ··· 108 106 return 0; 109 107 } 110 108 static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, 111 - struct device *dev) 109 + struct device *dev, 110 + struct ttm_operation_ctx *ctx) 112 111 { 113 112 return -ENOMEM; 114 113 }