Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: Document and optimize ttm_bo_pipeline_gutting()

If the bo is idle when calling ttm_bo_pipeline_gutting(), we unnecessarily
create a ghost object and push it out to delayed destroy.
Fix this by adding a path for idle, and document the function.

Also avoid having the bo end up in a bad state vulnerable to user-space
triggered kernel BUGs if the call to ttm_tt_create() fails.

Finally reuse ttm_bo_pipeline_gutting() in ttm_bo_evict().

Cc: Christian König <christian.koenig@amd.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://lore.kernel.org/r/20210602083818.241793-7-thomas.hellstrom@linux.intel.com

+72 -15
+10 -10
drivers/gpu/drm/ttm/ttm_bo.c
··· 503 503 bdev->funcs->evict_flags(bo, &placement); 504 504 505 505 if (!placement.num_placement && !placement.num_busy_placement) { 506 - ttm_bo_wait(bo, false, false); 506 + ret = ttm_bo_wait(bo, true, false); 507 + if (ret) 508 + return ret; 507 509 508 - ttm_bo_cleanup_memtype_use(bo); 509 - return ttm_tt_create(bo, false); 510 + /* 511 + * Since we've already synced, this frees backing store 512 + * immediately. 513 + */ 514 + return ttm_bo_pipeline_gutting(bo); 510 515 } 511 516 512 517 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx); ··· 952 947 /* 953 948 * Remove the backing store if no placement is given. 954 949 */ 955 - if (!placement->num_placement && !placement->num_busy_placement) { 956 - ret = ttm_bo_pipeline_gutting(bo); 957 - if (ret) 958 - return ret; 959 - 960 - return ttm_tt_create(bo, false); 961 - } 950 + if (!placement->num_placement && !placement->num_busy_placement) 951 + return ttm_bo_pipeline_gutting(bo); 962 952 963 953 /* 964 954 * Check whether we need to move buffer.
+49 -5
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 566 566 } 567 567 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 568 568 569 + /** 570 + * ttm_bo_pipeline_gutting - purge the contents of a bo 571 + * @bo: The buffer object 572 + * 573 + * Purge the contents of a bo, async if the bo is not idle. 574 + * After a successful call, the bo is left unpopulated in 575 + * system placement. The function may wait uninterruptible 576 + * for idle on OOM. 577 + * 578 + * Return: 0 if successful, negative error code on failure. 579 + */ 569 580 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) 570 581 { 571 582 static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM }; 572 583 struct ttm_buffer_object *ghost; 584 + struct ttm_tt *ttm; 573 585 int ret; 574 586 575 - ret = ttm_buffer_object_transfer(bo, &ghost); 587 + /* If already idle, no need for ghost object dance. */ 588 + ret = ttm_bo_wait(bo, false, true); 589 + if (ret != -EBUSY) { 590 + if (!bo->ttm) { 591 + /* See comment below about clearing. */ 592 + ret = ttm_tt_create(bo, true); 593 + if (ret) 594 + return ret; 595 + } else { 596 + ttm_tt_unpopulate(bo->bdev, bo->ttm); 597 + if (bo->type == ttm_bo_type_device) 598 + ttm_tt_mark_for_clear(bo->ttm); 599 + } 600 + ttm_resource_free(bo, &bo->resource); 601 + return ttm_resource_alloc(bo, &sys_mem, &bo->resource); 602 + } 603 + 604 + /* 605 + * We need an unpopulated ttm_tt after giving our current one, 606 + * if any, to the ghost object. And we can't afford to fail 607 + * creating one *after* the operation. If the bo subsequently gets 608 + * resurrected, make sure it's cleared (if ttm_bo_type_device) 609 + * to avoid leaking sensitive information to user-space. 610 + */ 611 + 612 + ttm = bo->ttm; 613 + bo->ttm = NULL; 614 + ret = ttm_tt_create(bo, true); 615 + swap(bo->ttm, ttm); 576 616 if (ret) 577 617 return ret; 618 + 619 + ret = ttm_buffer_object_transfer(bo, &ghost); 620 + if (ret) { 621 + ttm_tt_destroy(bo->bdev, ttm); 622 + return ret; 623 + } 578 624 579 625 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); 580 626 /* Last resort, wait for the BO to be idle when we are OOM */ 581 627 if (ret) 582 628 ttm_bo_wait(bo, false, false); 583 629 584 - ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource); 585 - bo->ttm = NULL; 586 - 587 630 dma_resv_unlock(&ghost->base._resv); 588 631 ttm_bo_put(ghost); 632 + bo->ttm = ttm; 589 633 590 - return ret; 634 + return ttm_resource_alloc(bo, &sys_mem, &bo->resource); 591 635 }
+13
include/drm/ttm/ttm_tt.h
··· 170 170 */ 171 171 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm); 172 172 173 + /** 174 + * ttm_tt_mark_for_clear - Mark pages for clearing on populate. 175 + * 176 + * @ttm: Pointer to the ttm_tt structure 177 + * 178 + * Marks pages for clearing so that the next time the page vector is 179 + * populated, the pages will be cleared. 180 + */ 181 + static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm) 182 + { 183 + ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; 184 + } 185 + 173 186 void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages); 174 187 175 188 struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,