Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: fix waiting for all fences before flipping

Otherwise we might see corruption.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
1ffd2652 4127a59e

+56 -29
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 828 828 uint64_t base; 829 829 struct drm_pending_vblank_event *event; 830 830 struct amdgpu_bo *old_rbo; 831 - struct fence *fence; 831 + struct fence *excl; 832 + unsigned shared_count; 833 + struct fence **shared; 832 834 }; 833 835 834 836
+53 -28
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 35 35 #include <drm/drm_crtc_helper.h> 36 36 #include <drm/drm_edid.h> 37 37 38 + static void amdgpu_flip_wait_fence(struct amdgpu_device *adev, 39 + struct fence **f) 40 + { 41 + struct amdgpu_fence *fence; 42 + long r; 43 + 44 + if (*f == NULL) 45 + return; 46 + 47 + fence = to_amdgpu_fence(*f); 48 + if (fence) { 49 + r = fence_wait(&fence->base, false); 50 + if (r == -EDEADLK) { 51 + up_read(&adev->exclusive_lock); 52 + r = amdgpu_gpu_reset(adev); 53 + down_read(&adev->exclusive_lock); 54 + } 55 + } else 56 + r = fence_wait(*f, false); 57 + 58 + if (r) 59 + DRM_ERROR("failed to wait on page flip fence (%ld)!\n", r); 60 + 61 + /* We continue with the page flip even if we failed to wait on 62 + * the fence, otherwise the DRM core and userspace will be 63 + * confused about which BO the CRTC is scanning out 64 + */ 65 + fence_put(*f); 66 + *f = NULL; 67 + } 38 68 39 69 static void amdgpu_flip_work_func(struct work_struct *__work) 40 70 { ··· 74 44 struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; 75 45 76 46 struct drm_crtc *crtc = &amdgpuCrtc->base; 77 - struct amdgpu_fence *fence; 78 47 unsigned long flags; 79 - int r; 48 + unsigned i; 80 49 81 50 down_read(&adev->exclusive_lock); 82 - if (work->fence) { 83 - fence = to_amdgpu_fence(work->fence); 84 - if (fence) { 85 - r = fence_wait(&fence->base, false); 86 - if (r == -EDEADLK) { 87 - up_read(&adev->exclusive_lock); 88 - r = amdgpu_gpu_reset(adev); 89 - down_read(&adev->exclusive_lock); 90 - } 91 - } else 92 - r = fence_wait(work->fence, false); 93 - 94 - if (r) 95 - DRM_ERROR("failed to wait on page flip fence (%d)!\n", r); 96 - 97 - /* We continue with the page flip even if we failed to wait on 98 - * the fence, otherwise the DRM core and userspace will be 99 - * confused about which BO the CRTC is scanning out 100 - */ 101 - 102 - fence_put(work->fence); 103 - work->fence = NULL; 104 - } 51 + amdgpu_flip_wait_fence(adev, &work->excl); 52 + for (i = 0; i < work->shared_count; ++i) 53 + amdgpu_flip_wait_fence(adev, &work->shared[i]); 105 54 106 55 /* We borrow the event spin lock for protecting flip_status */ 107 56 spin_lock_irqsave(&crtc->dev->event_lock, flags); ··· 117 108 DRM_ERROR("failed to reserve buffer after flip\n"); 118 109 119 110 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 111 + kfree(work->shared); 120 112 kfree(work); 121 113 } 122 114 ··· 137 127 unsigned long flags; 138 128 u64 tiling_flags; 139 129 u64 base; 140 - int r; 130 + int i, r; 141 131 142 132 work = kzalloc(sizeof *work, GFP_KERNEL); 143 133 if (work == NULL) ··· 177 167 goto cleanup; 178 168 } 179 169 180 - work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); 170 + r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, 171 + &work->shared_count, 172 + &work->shared); 173 + if (unlikely(r != 0)) { 174 + amdgpu_bo_unreserve(new_rbo); 175 + DRM_ERROR("failed to get fences for buffer\n"); 176 + goto cleanup; 177 + } 178 + 179 + fence_get(work->excl); 180 + for (i = 0; i < work->shared_count; ++i) 181 + fence_get(work->shared[i]); 182 + 181 183 amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); 182 184 amdgpu_bo_unreserve(new_rbo); 183 185 ··· 234 212 235 213 cleanup: 236 214 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 237 - fence_put(work->fence); 215 + fence_put(work->excl); 216 + for (i = 0; i < work->shared_count; ++i) 217 + fence_put(work->shared[i]); 218 + kfree(work->shared); 238 219 kfree(work); 239 220 240 221 return r;