Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: drop special pipeline accel cleanup function.

The two accel cleanup paths were mostly the same once refactored.

Just pass a bool to say if the evictions are to be pipelined.

Signed-off-by: Dave Airlie <airlied@redhat.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200917064132.148521-2-airlied@gmail.com

+38 -76
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 500 500 501 501 /* Always block for VM page tables before committing the new location */ 502 502 if (bo->type == ttm_bo_type_kernel) 503 - r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); 503 + r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem); 504 504 else 505 - r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); 505 + r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem); 506 506 dma_fence_put(fence); 507 507 return r; 508 508
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 824 824 if (ret == 0) { 825 825 ret = ttm_bo_move_accel_cleanup(bo, 826 826 &fence->base, 827 - evict, 827 + evict, false, 828 828 new_reg); 829 829 nouveau_fence_unref(&fence); 830 830 }
+1 -1
drivers/gpu/drm/radeon/radeon_ttm.c
··· 200 200 if (IS_ERR(fence)) 201 201 return PTR_ERR(fence); 202 202 203 - r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem); 203 + r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem); 204 204 radeon_fence_unref(&fence); 205 205 return r; 206 206 }
+32 -57
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 580 580 return 0; 581 581 } 582 582 583 + static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, 584 + struct dma_fence *fence) 585 + { 586 + struct ttm_bo_device *bdev = bo->bdev; 587 + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); 588 + 589 + /** 590 + * BO doesn't have a TTM we need to bind/unbind. Just remember 591 + * this eviction and free up the allocation 592 + */ 593 + spin_lock(&from->move_lock); 594 + if (!from->move || dma_fence_is_later(fence, from->move)) { 595 + dma_fence_put(from->move); 596 + from->move = dma_fence_get(fence); 597 + } 598 + spin_unlock(&from->move_lock); 599 + 600 + ttm_bo_free_old_node(bo); 601 + 602 + dma_fence_put(bo->moving); 603 + bo->moving = dma_fence_get(fence); 604 + } 605 + 583 606 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 584 607 struct dma_fence *fence, 585 608 bool evict, 609 + bool pipeline, 586 610 struct ttm_resource *new_mem) 587 611 { 588 612 struct ttm_bo_device *bdev = bo->bdev; 613 + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); 589 614 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); 590 - int ret; 615 + int ret = 0; 591 616 592 617 dma_resv_add_excl_fence(bo->base.resv, fence); 593 - if (evict) 594 - ret = ttm_bo_wait_free_node(bo, man->use_tt); 595 - else 618 + if (!evict) 596 619 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); 620 + else if (!from->use_tt && pipeline) 621 + ttm_bo_move_pipeline_evict(bo, fence); 622 + else 623 + ret = ttm_bo_wait_free_node(bo, man->use_tt); 624 + 597 625 if (ret) 598 626 return ret; 599 627 ··· 630 602 return 0; 631 603 } 632 604 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 633 - 634 - int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, 635 - struct dma_fence *fence, bool evict, 636 - struct ttm_resource *new_mem) 637 - { 638 - struct ttm_bo_device *bdev = bo->bdev; 639 - 640 - struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); 641 - struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); 642 - 643 - int ret; 644 - 645 - dma_resv_add_excl_fence(bo->base.resv, fence); 646 - 647 - if (!evict) { 648 - ret = ttm_bo_move_to_ghost(bo, fence, to->use_tt); 649 - if (ret) 650 - return ret; 651 - } else if (!from->use_tt) { 652 - 653 - /** 654 - * BO doesn't have a TTM we need to bind/unbind. Just remember 655 - * this eviction and free up the allocation 656 - */ 657 - 658 - spin_lock(&from->move_lock); 659 - if (!from->move || dma_fence_is_later(fence, from->move)) { 660 - dma_fence_put(from->move); 661 - from->move = dma_fence_get(fence); 662 - } 663 - spin_unlock(&from->move_lock); 664 - 665 - ttm_bo_free_old_node(bo); 666 - 667 - dma_fence_put(bo->moving); 668 - bo->moving = dma_fence_get(fence); 669 - 670 - } else { 671 - /** 672 - * Last resort, wait for the move to be completed. 673 - * 674 - * Should never happen in pratice. 675 - */ 676 - ret = ttm_bo_wait_free_node(bo, to->use_tt); 677 - if (ret) 678 - return ret; 679 - } 680 - 681 - ttm_bo_assign_mem(bo, new_mem); 682 - 683 - return 0; 684 - } 685 - EXPORT_SYMBOL(ttm_bo_pipeline_move); 686 605 687 606 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) 688 607 {
+2 -15
include/drm/ttm/ttm_bo_driver.h
··· 642 642 * @bo: A pointer to a struct ttm_buffer_object. 643 643 * @fence: A fence object that signals when moving is complete. 644 644 * @evict: This is an evict move. Don't return until the buffer is idle. 645 + * @pipeline: evictions are to be pipelined. 645 646 * @new_mem: struct ttm_resource indicating where to move. 646 647 * 647 648 * Accelerated move function to be called when an accelerated move ··· 654 653 */ 655 654 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 656 655 struct dma_fence *fence, bool evict, 656 + bool pipeline, 657 657 struct ttm_resource *new_mem); 658 - 659 - /** 660 - * ttm_bo_pipeline_move. 661 - * 662 - * @bo: A pointer to a struct ttm_buffer_object. 663 - * @fence: A fence object that signals when moving is complete. 664 - * @evict: This is an evict move. Don't return until the buffer is idle. 665 - * @new_mem: struct ttm_resource indicating where to move. 666 - * 667 - * Function for pipelining accelerated moves. Either free the memory 668 - * immediately or hang it on a temporary buffer object. 669 - */ 670 - int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, 671 - struct dma_fence *fence, bool evict, 672 - struct ttm_resource *new_mem); 673 658 674 659 /** 675 660 * ttm_bo_pipeline_gutting.