Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: fix and cleanup amdgpu_gem_object_close v4

The problem is that we can't add the clear fence to the BO
when there is an exclusive fence on it since we can't
guarantee the the clear fence will complete after the
exclusive one.

To fix this refactor the function and also add the exclusive
fence as shared to the resv object.

v2: fix warning
v3: add excl fence as shared instead
v4: squash in fix for fence handling in amdgpu_gem_object_close

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: xinhui pan <xinhui.pan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
82c416b1 9ec420d8

+25 -18
+25 -18
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 161 161 162 162 struct amdgpu_bo_list_entry vm_pd; 163 163 struct list_head list, duplicates; 164 + struct dma_fence *fence = NULL; 164 165 struct ttm_validate_buffer tv; 165 166 struct ww_acquire_ctx ticket; 166 167 struct amdgpu_bo_va *bo_va; 167 - int r; 168 + long r; 168 169 169 170 INIT_LIST_HEAD(&list); 170 171 INIT_LIST_HEAD(&duplicates); 171 172 172 173 tv.bo = &bo->tbo; 173 - tv.num_shared = 1; 174 + tv.num_shared = 2; 174 175 list_add(&tv.head, &list); 175 176 176 177 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); ··· 179 178 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); 180 179 if (r) { 181 180 dev_err(adev->dev, "leaking bo va because " 182 - "we fail to reserve bo (%d)\n", r); 181 + "we fail to reserve bo (%ld)\n", r); 183 182 return; 184 183 } 185 184 bo_va = amdgpu_vm_bo_find(vm, bo); 186 - if (bo_va && --bo_va->ref_count == 0) { 187 - amdgpu_vm_bo_rmv(adev, bo_va); 185 + if (!bo_va || --bo_va->ref_count) 186 + goto out_unlock; 188 187 189 - if (amdgpu_vm_ready(vm)) { 190 - struct dma_fence *fence = NULL; 188 + amdgpu_vm_bo_rmv(adev, bo_va); 189 + if (!amdgpu_vm_ready(vm)) 190 + goto out_unlock; 191 191 192 - r = amdgpu_vm_clear_freed(adev, vm, &fence); 193 - if (unlikely(r)) { 194 - dev_err(adev->dev, "failed to clear page " 195 - "tables on GEM object close (%d)\n", r); 196 - } 197 - 198 - if (fence) { 199 - amdgpu_bo_fence(bo, fence, true); 200 - dma_fence_put(fence); 201 - } 202 - } 192 + fence = dma_resv_get_excl(bo->tbo.base.resv); 193 + if (fence) { 194 + amdgpu_bo_fence(bo, fence, true); 195 + fence = NULL; 203 196 } 197 + 198 + r = amdgpu_vm_clear_freed(adev, vm, &fence); 199 + if (r || !fence) 200 + goto out_unlock; 201 + 202 + amdgpu_bo_fence(bo, fence, true); 203 + dma_fence_put(fence); 204 + 205 + out_unlock: 206 + if (unlikely(r < 0)) 207 + dev_err(adev->dev, "failed to clear page " 208 + "tables on GEM object close (%ld)\n", r); 204 209 ttm_eu_backoff_reservation(&ticket, &list); 205 210 } 206 211