Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: add operation ctx to ttm_bo_validate v2

Give moving a BO into place an operation context to work with.

v2: rebased

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Tested-by: Michel Dänzer <michel.daenzer@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
19be5570 750a2503

+131 -76
+8 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 343 343 struct amdgpu_bo *bo) 344 344 { 345 345 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 346 + struct ttm_operation_ctx ctx = { true, false }; 346 347 u64 initial_bytes_moved, bytes_moved; 347 348 uint32_t domain; 348 349 int r; ··· 375 374 retry: 376 375 amdgpu_ttm_placement_from_domain(bo, domain); 377 376 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); 378 - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 377 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 379 378 bytes_moved = atomic64_read(&adev->num_bytes_moved) - 380 379 initial_bytes_moved; 381 380 p->bytes_moved += bytes_moved; ··· 397 396 struct amdgpu_bo *validated) 398 397 { 399 398 uint32_t domain = validated->allowed_domains; 399 + struct ttm_operation_ctx ctx = { true, false }; 400 400 int r; 401 401 402 402 if (!p->evictable) ··· 439 437 bo->tbo.mem.mem_type == TTM_PL_VRAM && 440 438 bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT; 441 439 initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); 442 - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 440 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 443 441 bytes_moved = atomic64_read(&adev->num_bytes_moved) - 444 442 initial_bytes_moved; 445 443 p->bytes_moved += bytes_moved; ··· 478 476 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, 479 477 struct list_head *validated) 480 478 { 479 + struct ttm_operation_ctx ctx = { true, false }; 481 480 struct amdgpu_bo_list_entry *lobj; 482 481 int r; 483 482 ··· 496 493 lobj->user_pages) { 497 494 amdgpu_ttm_placement_from_domain(bo, 498 495 AMDGPU_GEM_DOMAIN_CPU); 499 - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, 500 - false); 496 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 501 497 if (r) 502 498 return r; 503 499 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, ··· 1577 1575 struct amdgpu_bo_va_mapping **map) 1578 1576 { 1579 1577 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; 1578 + struct ttm_operation_ctx ctx = { false, false }; 1580 1579 struct amdgpu_vm *vm = &fpriv->vm; 1581 1580 struct amdgpu_bo_va_mapping *mapping; 1582 1581 int r; ··· 1598 1595 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { 1599 1596 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1600 1597 amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains); 1601 - r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, 1602 - false); 1598 + r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx); 1603 1599 if (r) 1604 1600 return r; 1605 1601 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 282 282 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, 283 283 struct drm_file *filp) 284 284 { 285 + struct ttm_operation_ctx ctx = { true, false }; 285 286 struct amdgpu_device *adev = dev->dev_private; 286 287 struct drm_amdgpu_gem_userptr *args = data; 287 288 struct drm_gem_object *gobj; ··· 336 335 goto free_pages; 337 336 338 337 amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 339 - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 338 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 340 339 amdgpu_bo_unreserve(bo); 341 340 if (r) 342 341 goto free_pages;
+8 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 552 552 553 553 int amdgpu_bo_validate(struct amdgpu_bo *bo) 554 554 { 555 + struct ttm_operation_ctx ctx = { false, false }; 555 556 uint32_t domain; 556 557 int r; 557 558 ··· 563 562 564 563 retry: 565 564 amdgpu_ttm_placement_from_domain(bo, domain); 566 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 565 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 567 566 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { 568 567 domain = bo->allowed_domains; 569 568 goto retry; ··· 674 673 u64 *gpu_addr) 675 674 { 676 675 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 676 + struct ttm_operation_ctx ctx = { false, false }; 677 677 int r, i; 678 678 679 679 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) ··· 725 723 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 726 724 } 727 725 728 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 726 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 729 727 if (unlikely(r)) { 730 728 dev_err(adev->dev, "%p pin failed\n", bo); 731 729 goto error; ··· 762 760 int amdgpu_bo_unpin(struct amdgpu_bo *bo) 763 761 { 764 762 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 763 + struct ttm_operation_ctx ctx = { false, false }; 765 764 int r, i; 766 765 767 766 if (!bo->pin_count) { ··· 776 773 bo->placements[i].lpfn = 0; 777 774 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 778 775 } 779 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 776 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 780 777 if (unlikely(r)) { 781 778 dev_err(adev->dev, "%p validate failed for unpin\n", bo); 782 779 goto error; ··· 948 945 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 949 946 { 950 947 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 948 + struct ttm_operation_ctx ctx = { false, false }; 951 949 struct amdgpu_bo *abo; 952 950 unsigned long offset, size; 953 951 int r; ··· 982 978 abo->placement.num_busy_placement = 1; 983 979 abo->placement.busy_placement = &abo->placements[1]; 984 980 985 - r = ttm_bo_validate(bo, &abo->placement, false, false); 981 + r = ttm_bo_validate(bo, &abo->placement, &ctx); 986 982 if (unlikely(r != 0)) 987 983 return r; 988 984
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 408 408 */ 409 409 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) 410 410 { 411 + struct ttm_operation_ctx tctx = { false, false }; 411 412 struct amdgpu_bo_va_mapping *mapping; 412 413 struct amdgpu_bo *bo; 413 414 uint32_t cmd; ··· 431 430 } 432 431 amdgpu_uvd_force_into_uvd_segment(bo); 433 432 434 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 433 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx); 435 434 } 436 435 437 436 return r; ··· 950 949 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 951 950 bool direct, struct dma_fence **fence) 952 951 { 952 + struct ttm_operation_ctx ctx = { true, false }; 953 953 struct ttm_validate_buffer tv; 954 954 struct ww_acquire_ctx ticket; 955 955 struct list_head head; ··· 977 975 amdgpu_uvd_force_into_uvd_segment(bo); 978 976 } 979 977 980 - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 978 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 981 979 if (r) 982 980 goto err; 983 981
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 558 558 int lo, int hi, unsigned size, int32_t index) 559 559 { 560 560 int64_t offset = ((uint64_t)size) * ((int64_t)index); 561 + struct ttm_operation_ctx ctx = { false, false }; 561 562 struct amdgpu_bo_va_mapping *mapping; 562 563 unsigned i, fpfn, lpfn; 563 564 struct amdgpu_bo *bo; ··· 588 587 bo->placements[i].lpfn = bo->placements[i].fpfn ? 589 588 min(bo->placements[i].fpfn, lpfn) : lpfn; 590 589 } 591 - return ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 590 + return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 592 591 } 593 592 594 593
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 274 274 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, 275 275 bool direct, struct dma_fence **fence) 276 276 { 277 + struct ttm_operation_ctx ctx = { true, false }; 277 278 struct ttm_validate_buffer tv; 278 279 struct ww_acquire_ctx ticket; 279 280 struct list_head head; ··· 295 294 if (r) 296 295 return r; 297 296 298 - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 297 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 299 298 if (r) 300 299 goto err; 301 300
+6 -3
drivers/gpu/drm/ast/ast_ttm.c
··· 354 354 355 355 int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) 356 356 { 357 + struct ttm_operation_ctx ctx = { false, false }; 357 358 int i, ret; 358 359 359 360 if (bo->pin_count) { ··· 366 365 ast_ttm_placement(bo, pl_flag); 367 366 for (i = 0; i < bo->placement.num_placement; i++) 368 367 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 369 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 368 + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 370 369 if (ret) 371 370 return ret; 372 371 ··· 378 377 379 378 int ast_bo_unpin(struct ast_bo *bo) 380 379 { 380 + struct ttm_operation_ctx ctx = { false, false }; 381 381 int i; 382 382 if (!bo->pin_count) { 383 383 DRM_ERROR("unpin bad %p\n", bo); ··· 390 388 391 389 for (i = 0; i < bo->placement.num_placement ; i++) 392 390 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 393 - return ttm_bo_validate(&bo->bo, &bo->placement, false, false); 391 + return ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 394 392 } 395 393 396 394 int ast_bo_push_sysram(struct ast_bo *bo) 397 395 { 396 + struct ttm_operation_ctx ctx = { false, false }; 398 397 int i, ret; 399 398 if (!bo->pin_count) { 400 399 DRM_ERROR("unpin bad %p\n", bo); ··· 412 409 for (i = 0; i < bo->placement.num_placement ; i++) 413 410 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 414 411 415 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 412 + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 416 413 if (ret) { 417 414 DRM_ERROR("pushing to VRAM failed\n"); 418 415 return ret;
+4 -2
drivers/gpu/drm/bochs/bochs_mm.c
··· 283 283 284 284 int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) 285 285 { 286 + struct ttm_operation_ctx ctx = { false, false }; 286 287 int i, ret; 287 288 288 289 if (bo->pin_count) { ··· 296 295 bochs_ttm_placement(bo, pl_flag); 297 296 for (i = 0; i < bo->placement.num_placement; i++) 298 297 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 299 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 298 + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 300 299 if (ret) 301 300 return ret; 302 301 ··· 308 307 309 308 int bochs_bo_unpin(struct bochs_bo *bo) 310 309 { 310 + struct ttm_operation_ctx ctx = { false, false }; 311 311 int i, ret; 312 312 313 313 if (!bo->pin_count) { ··· 322 320 323 321 for (i = 0; i < bo->placement.num_placement; i++) 324 322 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 325 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 323 + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 326 324 if (ret) 327 325 return ret; 328 326
+4 -2
drivers/gpu/drm/cirrus/cirrus_ttm.c
··· 358 358 359 359 int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr) 360 360 { 361 + struct ttm_operation_ctx ctx = { false, false }; 361 362 int i, ret; 362 363 363 364 if (bo->pin_count) { ··· 370 369 cirrus_ttm_placement(bo, pl_flag); 371 370 for (i = 0; i < bo->placement.num_placement; i++) 372 371 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 373 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 372 + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 374 373 if (ret) 375 374 return ret; 376 375 ··· 382 381 383 382 int cirrus_bo_push_sysram(struct cirrus_bo *bo) 384 383 { 384 + struct ttm_operation_ctx ctx = { false, false }; 385 385 int i, ret; 386 386 if (!bo->pin_count) { 387 387 DRM_ERROR("unpin bad %p\n", bo); ··· 399 397 for (i = 0; i < bo->placement.num_placement ; i++) 400 398 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 401 399 402 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 400 + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 403 401 if (ret) { 404 402 DRM_ERROR("pushing to VRAM failed\n"); 405 403 return ret;
+4 -2
drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
··· 344 344 345 345 int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr) 346 346 { 347 + struct ttm_operation_ctx ctx = { false, false }; 347 348 int i, ret; 348 349 349 350 if (bo->pin_count) { ··· 357 356 hibmc_ttm_placement(bo, pl_flag); 358 357 for (i = 0; i < bo->placement.num_placement; i++) 359 358 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 360 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 359 + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 361 360 if (ret) 362 361 return ret; 363 362 ··· 369 368 370 369 int hibmc_bo_unpin(struct hibmc_bo *bo) 371 370 { 371 + struct ttm_operation_ctx ctx = { false, false }; 372 372 int i, ret; 373 373 374 374 if (!bo->pin_count) { ··· 382 380 383 381 for (i = 0; i < bo->placement.num_placement ; i++) 384 382 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 385 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 383 + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 386 384 if (ret) { 387 385 DRM_ERROR("validate failed for unpin: %d\n", ret); 388 386 return ret;
+6 -3
drivers/gpu/drm/mgag200/mgag200_ttm.c
··· 354 354 355 355 int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) 356 356 { 357 + struct ttm_operation_ctx ctx = { false, false }; 357 358 int i, ret; 358 359 359 360 if (bo->pin_count) { ··· 367 366 mgag200_ttm_placement(bo, pl_flag); 368 367 for (i = 0; i < bo->placement.num_placement; i++) 369 368 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 370 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 369 + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 371 370 if (ret) 372 371 return ret; 373 372 ··· 379 378 380 379 int mgag200_bo_unpin(struct mgag200_bo *bo) 381 380 { 381 + struct ttm_operation_ctx ctx = { false, false }; 382 382 int i; 383 383 if (!bo->pin_count) { 384 384 DRM_ERROR("unpin bad %p\n", bo); ··· 391 389 392 390 for (i = 0; i < bo->placement.num_placement ; i++) 393 391 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 394 - return ttm_bo_validate(&bo->bo, &bo->placement, false, false); 392 + return ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 395 393 } 396 394 397 395 int mgag200_bo_push_sysram(struct mgag200_bo *bo) 398 396 { 397 + struct ttm_operation_ctx ctx = { false, false }; 399 398 int i, ret; 400 399 if (!bo->pin_count) { 401 400 DRM_ERROR("unpin bad %p\n", bo); ··· 413 410 for (i = 0; i < bo->placement.num_placement ; i++) 414 411 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 415 412 416 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 413 + ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 417 414 if (ret) { 418 415 DRM_ERROR("pushing to VRAM failed\n"); 419 416 return ret;
+2 -2
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 548 548 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, 549 549 bool no_wait_gpu) 550 550 { 551 + struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; 551 552 int ret; 552 553 553 - ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 554 - interruptible, no_wait_gpu); 554 + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx); 555 555 if (ret) 556 556 return ret; 557 557
+2 -2
drivers/gpu/drm/qxl/qxl_ioctl.c
··· 309 309 int ret; 310 310 struct drm_gem_object *gobj = NULL; 311 311 struct qxl_bo *qobj = NULL; 312 + struct ttm_operation_ctx ctx = { true, false }; 312 313 313 314 if (update_area->left >= update_area->right || 314 315 update_area->top >= update_area->bottom) ··· 327 326 328 327 if (!qobj->pin_count) { 329 328 qxl_ttm_placement_from_domain(qobj, qobj->type, false); 330 - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 331 - true, false); 329 + ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); 332 330 if (unlikely(ret)) 333 331 goto out; 334 332 }
+4 -2
drivers/gpu/drm/qxl/qxl_object.c
··· 223 223 224 224 static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 225 225 { 226 + struct ttm_operation_ctx ctx = { false, false }; 226 227 struct drm_device *ddev = bo->gem_base.dev; 227 228 int r; 228 229 ··· 234 233 return 0; 235 234 } 236 235 qxl_ttm_placement_from_domain(bo, domain, true); 237 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 236 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 238 237 if (likely(r == 0)) { 239 238 bo->pin_count = 1; 240 239 if (gpu_addr != NULL) ··· 247 246 248 247 static int __qxl_bo_unpin(struct qxl_bo *bo) 249 248 { 249 + struct ttm_operation_ctx ctx = { false, false }; 250 250 struct drm_device *ddev = bo->gem_base.dev; 251 251 int r, i; 252 252 ··· 260 258 return 0; 261 259 for (i = 0; i < bo->placement.num_placement; i++) 262 260 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 263 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 261 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 264 262 if (unlikely(r != 0)) 265 263 dev_err(ddev->dev, "%p validate failed for unpin\n", bo); 266 264 return r;
+2 -2
drivers/gpu/drm/qxl/qxl_release.c
··· 230 230 231 231 static int qxl_release_validate_bo(struct qxl_bo *bo) 232 232 { 233 + struct ttm_operation_ctx ctx = { true, false }; 233 234 int ret; 234 235 235 236 if (!bo->pin_count) { 236 237 qxl_ttm_placement_from_domain(bo, bo->type, false); 237 - ret = ttm_bo_validate(&bo->tbo, &bo->placement, 238 - true, false); 238 + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 239 239 if (ret) 240 240 return ret; 241 241 }
+2 -1
drivers/gpu/drm/radeon/radeon_gem.c
··· 285 285 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, 286 286 struct drm_file *filp) 287 287 { 288 + struct ttm_operation_ctx ctx = { true, false }; 288 289 struct radeon_device *rdev = dev->dev_private; 289 290 struct drm_radeon_gem_userptr *args = data; 290 291 struct drm_gem_object *gobj; ··· 344 343 } 345 344 346 345 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); 347 - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 346 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 348 347 radeon_bo_unreserve(bo); 349 348 up_read(&current->mm->mmap_sem); 350 349 if (r)
+2 -1
drivers/gpu/drm/radeon/radeon_mn.c
··· 124 124 unsigned long end) 125 125 { 126 126 struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); 127 + struct ttm_operation_ctx ctx = { false, false }; 127 128 struct interval_tree_node *it; 128 129 129 130 /* notification is exclusive, but interval is inclusive */ ··· 158 157 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 159 158 160 159 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); 161 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 160 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 162 161 if (r) 163 162 DRM_ERROR("(%ld) failed to validate user bo\n", r); 164 163
+9 -5
drivers/gpu/drm/radeon/radeon_object.c
··· 329 329 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, 330 330 u64 *gpu_addr) 331 331 { 332 + struct ttm_operation_ctx ctx = { false, false }; 332 333 int r, i; 333 334 334 335 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) ··· 372 371 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 373 372 } 374 373 375 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 374 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 376 375 if (likely(r == 0)) { 377 376 bo->pin_count = 1; 378 377 if (gpu_addr != NULL) ··· 394 393 395 394 int radeon_bo_unpin(struct radeon_bo *bo) 396 395 { 396 + struct ttm_operation_ctx ctx = { false, false }; 397 397 int r, i; 398 398 399 399 if (!bo->pin_count) { ··· 408 406 bo->placements[i].lpfn = 0; 409 407 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 410 408 } 411 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 409 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 412 410 if (likely(r == 0)) { 413 411 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 414 412 bo->rdev->vram_pin_size -= radeon_bo_size(bo); ··· 533 531 struct ww_acquire_ctx *ticket, 534 532 struct list_head *head, int ring) 535 533 { 534 + struct ttm_operation_ctx ctx = { true, false }; 536 535 struct radeon_bo_list *lobj; 537 536 struct list_head duplicates; 538 537 int r; ··· 575 572 radeon_uvd_force_into_uvd_segment(bo, allowed); 576 573 577 574 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); 578 - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 575 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 579 576 bytes_moved += atomic64_read(&rdev->num_bytes_moved) - 580 577 initial_bytes_moved; 581 578 ··· 795 792 796 793 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 797 794 { 795 + struct ttm_operation_ctx ctx = { false, false }; 798 796 struct radeon_device *rdev; 799 797 struct radeon_bo *rbo; 800 798 unsigned long offset, size, lpfn; ··· 827 823 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) 828 824 rbo->placements[i].lpfn = lpfn; 829 825 } 830 - r = ttm_bo_validate(bo, &rbo->placement, false, false); 826 + r = ttm_bo_validate(bo, &rbo->placement, &ctx); 831 827 if (unlikely(r == -ENOMEM)) { 832 828 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 833 - return ttm_bo_validate(bo, &rbo->placement, false, false); 829 + return ttm_bo_validate(bo, &rbo->placement, &ctx); 834 830 } else if (unlikely(r != 0)) { 835 831 return r; 836 832 }
+2 -1
drivers/gpu/drm/radeon/radeon_vm.c
··· 387 387 static int radeon_vm_clear_bo(struct radeon_device *rdev, 388 388 struct radeon_bo *bo) 389 389 { 390 + struct ttm_operation_ctx ctx = { true, false }; 390 391 struct radeon_ib ib; 391 392 unsigned entries; 392 393 uint64_t addr; ··· 397 396 if (r) 398 397 return r; 399 398 400 - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 399 + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 401 400 if (r) 402 401 goto error_unreserve; 403 402
+9 -7
drivers/gpu/drm/ttm/ttm_bo.c
··· 1091 1091 EXPORT_SYMBOL(ttm_bo_mem_compat); 1092 1092 1093 1093 int ttm_bo_validate(struct ttm_buffer_object *bo, 1094 - struct ttm_placement *placement, 1095 - bool interruptible, 1096 - bool no_wait_gpu) 1094 + struct ttm_placement *placement, 1095 + struct ttm_operation_ctx *ctx) 1097 1096 { 1098 1097 int ret; 1099 1098 uint32_t new_flags; ··· 1102 1103 * Check whether we need to move buffer. 1103 1104 */ 1104 1105 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 1105 - ret = ttm_bo_move_buffer(bo, placement, interruptible, 1106 - no_wait_gpu); 1106 + ret = ttm_bo_move_buffer(bo, placement, ctx->interruptible, 1107 + ctx->no_wait_gpu); 1107 1108 if (ret) 1108 1109 return ret; 1109 1110 } else { ··· 1218 1219 WARN_ON(!locked); 1219 1220 } 1220 1221 1221 - if (likely(!ret)) 1222 - ret = ttm_bo_validate(bo, placement, interruptible, false); 1222 + if (likely(!ret)) { 1223 + struct ttm_operation_ctx ctx = { interruptible, false }; 1224 + 1225 + ret = ttm_bo_validate(bo, placement, &ctx); 1226 + } 1223 1227 1224 1228 if (unlikely(ret)) { 1225 1229 if (!resv)
+6 -5
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 56 56 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, 57 57 struct list_head *head) 58 58 { 59 + struct ttm_operation_ctx ctx = { false, false }; 59 60 struct ttm_validate_buffer *buf; 60 61 struct ttm_buffer_object *bo; 61 62 struct virtio_gpu_object *qobj; ··· 69 68 list_for_each_entry(buf, head, head) { 70 69 bo = buf->bo; 71 70 qobj = container_of(bo, struct virtio_gpu_object, tbo); 72 - ret = ttm_bo_validate(bo, &qobj->placement, false, false); 71 + ret = ttm_bo_validate(bo, &qobj->placement, &ctx); 73 72 if (ret) { 74 73 ttm_eu_backoff_reservation(ticket, head); 75 74 return ret; ··· 353 352 struct virtio_gpu_device *vgdev = dev->dev_private; 354 353 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 355 354 struct drm_virtgpu_3d_transfer_from_host *args = data; 355 + struct ttm_operation_ctx ctx = { true, false }; 356 356 struct drm_gem_object *gobj = NULL; 357 357 struct virtio_gpu_object *qobj = NULL; 358 358 struct virtio_gpu_fence *fence; ··· 374 372 if (ret) 375 373 goto out; 376 374 377 - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 378 - true, false); 375 + ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); 379 376 if (unlikely(ret)) 380 377 goto out_unres; 381 378 ··· 400 399 struct virtio_gpu_device *vgdev = dev->dev_private; 401 400 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 402 401 struct drm_virtgpu_3d_transfer_to_host *args = data; 402 + struct ttm_operation_ctx ctx = { true, false }; 403 403 struct drm_gem_object *gobj = NULL; 404 404 struct virtio_gpu_object *qobj = NULL; 405 405 struct virtio_gpu_fence *fence; ··· 418 416 if (ret) 419 417 goto out; 420 418 421 - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 422 - true, false); 419 + ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); 423 420 if (unlikely(ret)) 424 421 goto out_unres; 425 422
+2 -1
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 387 387 */ 388 388 static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) 389 389 { 390 + struct ttm_operation_ctx ctx = { false, false }; 390 391 struct vmw_private *dev_priv = res->dev_priv; 391 392 struct vmw_cotable *vcotbl = vmw_cotable(res); 392 393 struct vmw_dma_buffer *buf, *old_buf = res->backup; ··· 456 455 } 457 456 458 457 /* Unpin new buffer, and switch backup buffers. */ 459 - ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false); 458 + ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx); 460 459 if (unlikely(ret != 0)) { 461 460 DRM_ERROR("Failed validating new COTable backup buffer.\n"); 462 461 goto out_wait;
+13 -8
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
··· 47 47 struct ttm_placement *placement, 48 48 bool interruptible) 49 49 { 50 + struct ttm_operation_ctx ctx = {interruptible, false }; 50 51 struct ttm_buffer_object *bo = &buf->base; 51 52 int ret; 52 53 uint32_t new_flags; ··· 66 65 ret = ttm_bo_mem_compat(placement, &bo->mem, 67 66 &new_flags) == true ? 0 : -EINVAL; 68 67 else 69 - ret = ttm_bo_validate(bo, placement, interruptible, false); 68 + ret = ttm_bo_validate(bo, placement, &ctx); 70 69 71 70 if (!ret) 72 71 vmw_bo_pin_reserved(buf, true); ··· 96 95 struct vmw_dma_buffer *buf, 97 96 bool interruptible) 98 97 { 98 + struct ttm_operation_ctx ctx = {interruptible, false }; 99 99 struct ttm_buffer_object *bo = &buf->base; 100 100 int ret; 101 101 uint32_t new_flags; ··· 117 115 goto out_unreserve; 118 116 } 119 117 120 - ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible, 121 - false); 118 + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); 122 119 if (likely(ret == 0) || ret == -ERESTARTSYS) 123 120 goto out_unreserve; 124 121 125 - ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false); 122 + ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); 126 123 127 124 out_unreserve: 128 125 if (!ret) ··· 171 170 struct vmw_dma_buffer *buf, 172 171 bool interruptible) 173 172 { 173 + struct ttm_operation_ctx ctx = {interruptible, false }; 174 174 struct ttm_buffer_object *bo = &buf->base; 175 175 struct ttm_placement placement; 176 176 struct ttm_place place; ··· 202 200 if (bo->mem.mem_type == TTM_PL_VRAM && 203 201 bo->mem.start < bo->num_pages && 204 202 bo->mem.start > 0 && 205 - buf->pin_count == 0) 206 - (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false); 203 + buf->pin_count == 0) { 204 + ctx.interruptible = false; 205 + (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx); 206 + } 207 207 208 208 if (buf->pin_count > 0) 209 209 ret = ttm_bo_mem_compat(&placement, &bo->mem, 210 210 &new_flags) == true ? 0 : -EINVAL; 211 211 else 212 - ret = ttm_bo_validate(bo, &placement, interruptible, false); 212 + ret = ttm_bo_validate(bo, &placement, &ctx); 213 213 214 214 /* For some reason we didn't end up at the start of vram */ 215 215 WARN_ON(ret == 0 && bo->offset != 0); ··· 290 286 */ 291 287 void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin) 292 288 { 289 + struct ttm_operation_ctx ctx = { false, true }; 293 290 struct ttm_place pl; 294 291 struct ttm_placement placement; 295 292 struct ttm_buffer_object *bo = &vbo->base; ··· 319 314 placement.num_placement = 1; 320 315 placement.placement = &pl; 321 316 322 - ret = ttm_bo_validate(bo, &placement, false, true); 317 + ret = ttm_bo_validate(bo, &placement, &ctx); 323 318 324 319 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); 325 320 }
+4 -5
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 3701 3701 { 3702 3702 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer, 3703 3703 base); 3704 + struct ttm_operation_ctx ctx = { interruptible, true }; 3704 3705 int ret; 3705 3706 3706 3707 if (vbo->pin_count > 0) 3707 3708 return 0; 3708 3709 3709 3710 if (validate_as_mob) 3710 - return ttm_bo_validate(bo, &vmw_mob_placement, interruptible, 3711 - false); 3711 + return ttm_bo_validate(bo, &vmw_mob_placement, &ctx); 3712 3712 3713 3713 /** 3714 3714 * Put BO in VRAM if there is space, otherwise as a GMR. ··· 3717 3717 * used as a GMR, this will return -ENOMEM. 3718 3718 */ 3719 3719 3720 - ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible, 3721 - false); 3720 + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx); 3722 3721 if (likely(ret == 0 || ret == -ERESTARTSYS)) 3723 3722 return ret; 3724 3723 ··· 3726 3727 * previous contents. 3727 3728 */ 3728 3729 3729 - ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false); 3730 + ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx); 3730 3731 return ret; 3731 3732 } 3732 3733
+4 -2
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 968 968 bool interruptible, 969 969 struct ttm_validate_buffer *val_buf) 970 970 { 971 + struct ttm_operation_ctx ctx = { true, false }; 971 972 struct list_head val_list; 972 973 bool backup_dirty = false; 973 974 int ret; ··· 993 992 backup_dirty = res->backup_dirty; 994 993 ret = ttm_bo_validate(&res->backup->base, 995 994 res->func->backup_placement, 996 - true, false); 995 + &ctx); 997 996 998 997 if (unlikely(ret != 0)) 999 998 goto out_no_validate; ··· 1447 1446 */ 1448 1447 int vmw_resource_pin(struct vmw_resource *res, bool interruptible) 1449 1448 { 1449 + struct ttm_operation_ctx ctx = { interruptible, false }; 1450 1450 struct vmw_private *dev_priv = res->dev_priv; 1451 1451 int ret; 1452 1452 ··· 1468 1466 ret = ttm_bo_validate 1469 1467 (&vbo->base, 1470 1468 res->func->backup_placement, 1471 - interruptible, false); 1469 + &ctx); 1472 1470 if (ret) { 1473 1471 ttm_bo_unreserve(&vbo->base); 1474 1472 goto out_no_validate;
+2 -1
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 970 970 size_t size, 971 971 struct list_head *list) 972 972 { 973 + struct ttm_operation_ctx ctx = { false, true }; 973 974 struct vmw_dma_buffer *buf; 974 975 struct ttm_bo_kmap_obj map; 975 976 bool is_iomem; ··· 1006 1005 WARN_ON(is_iomem); 1007 1006 1008 1007 ttm_bo_kunmap(&map); 1009 - ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true); 1008 + ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx); 1010 1009 WARN_ON(ret != 0); 1011 1010 ttm_bo_unreserve(&buf->base); 1012 1011
+16 -4
include/drm/ttm/ttm_bo_api.h
··· 259 259 }; 260 260 261 261 /** 262 + * struct ttm_operation_ctx 263 + * 264 + * @interruptible: Sleep interruptible if sleeping. 265 + * @no_wait_gpu: Return immediately if the GPU is busy. 266 + * 267 + * Context for TTM operations like changing buffer placement or general memory 268 + * allocation. 269 + */ 270 + struct ttm_operation_ctx { 271 + bool interruptible; 272 + bool no_wait_gpu; 273 + }; 274 + 275 + /** 262 276 * ttm_bo_reference - reference a struct ttm_buffer_object 263 277 * 264 278 * @bo: The buffer object. ··· 320 306 * 321 307 * @bo: The buffer object. 322 308 * @placement: Proposed placement for the buffer object. 323 - * @interruptible: Sleep interruptible if sleeping. 324 - * @no_wait_gpu: Return immediately if the GPU is busy. 309 + * @ctx: validation parameters. 325 310 * 326 311 * Changes placement and caching policy of the buffer object 327 312 * according proposed placement. ··· 332 319 */ 333 320 int ttm_bo_validate(struct ttm_buffer_object *bo, 334 321 struct ttm_placement *placement, 335 - bool interruptible, 336 - bool no_wait_gpu); 322 + struct ttm_operation_ctx *ctx); 337 323 338 324 /** 339 325 * ttm_bo_unref