Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm: remove fence_cbs

This was only used for atomic commit these days. So instead just give
atomic it's own work-queue where we can do a block on each bo in turn.
Simplifies things a whole bunch and makes the 'struct fence' conversion
easier.

Signed-off-by: Rob Clark <robdclark@gmail.com>

Rob Clark ba00c3f2 ca762a8a

+47 -107
+32 -45
drivers/gpu/drm/msm/msm_atomic.c
··· 18 18 #include "msm_drv.h" 19 19 #include "msm_kms.h" 20 20 #include "msm_gem.h" 21 - #include "msm_gpu.h" /* temporary */ 22 21 #include "msm_fence.h" 23 22 24 23 struct msm_commit { 25 24 struct drm_device *dev; 26 25 struct drm_atomic_state *state; 27 - uint32_t fence; 28 - struct msm_fence_cb fence_cb; 26 + struct work_struct work; 29 27 uint32_t crtc_mask; 30 28 }; 31 29 32 - static void fence_cb(struct msm_fence_cb *cb); 30 + static void commit_worker(struct work_struct *work); 33 31 34 32 /* block until specified crtcs are no longer pending update, and 35 33 * atomically mark them as pending update ··· 69 71 c->dev = state->dev; 70 72 c->state = state; 71 73 72 - /* TODO we might need a way to indicate to run the cb on a 73 - * different wq so wait_for_vblanks() doesn't block retiring 74 - * bo's.. 75 - */ 76 - INIT_FENCE_CB(&c->fence_cb, fence_cb); 74 + INIT_WORK(&c->work, commit_worker); 77 75 78 76 return c; 79 77 } ··· 107 113 } 108 114 } 109 115 116 + static void wait_fences(struct msm_commit *c, bool async) 117 + { 118 + int nplanes = c->dev->mode_config.num_total_plane; 119 + ktime_t timeout = ktime_add_ms(ktime_get(), 1000); 120 + int i; 121 + 122 + for (i = 0; i < nplanes; i++) { 123 + struct drm_plane *plane = c->state->planes[i]; 124 + struct drm_plane_state *new_state = c->state->plane_states[i]; 125 + 126 + if (!plane) 127 + continue; 128 + 129 + if ((plane->state->fb != new_state->fb) && new_state->fb) { 130 + struct drm_gem_object *obj = 131 + msm_framebuffer_bo(new_state->fb, 0); 132 + msm_gem_cpu_sync(obj, MSM_PREP_READ, &timeout); 133 + } 134 + } 135 + } 136 + 110 137 /* The (potentially) asynchronous part of the commit. At this point 111 138 * nothing can fail short of armageddon. 112 139 */ 113 - static void complete_commit(struct msm_commit *c) 140 + static void complete_commit(struct msm_commit *c, bool async) 114 141 { 115 142 struct drm_atomic_state *state = c->state; 116 143 struct drm_device *dev = state->dev; 117 144 struct msm_drm_private *priv = dev->dev_private; 118 145 struct msm_kms *kms = priv->kms; 146 + 147 + wait_fences(c, async); 119 148 120 149 kms->funcs->prepare_commit(kms, state); 121 150 ··· 172 155 commit_destroy(c); 173 156 } 174 157 175 - static void fence_cb(struct msm_fence_cb *cb) 158 + static void commit_worker(struct work_struct *work) 176 159 { 177 - struct msm_commit *c = 178 - container_of(cb, struct msm_commit, fence_cb); 179 - complete_commit(c); 180 - } 181 - 182 - static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb) 183 - { 184 - struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0); 185 - c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ)); 160 + complete_commit(container_of(work, struct msm_commit, work), true); 186 161 } 187 162 188 163 int msm_atomic_check(struct drm_device *dev, ··· 213 204 struct drm_atomic_state *state, bool nonblock) 214 205 { 215 206 struct msm_drm_private *priv = dev->dev_private; 216 - int nplanes = dev->mode_config.num_total_plane; 217 207 int ncrtcs = dev->mode_config.num_crtc; 218 - ktime_t timeout; 219 208 struct msm_commit *c; 220 209 int i, ret; 221 210 ··· 235 228 if (!crtc) 236 229 continue; 237 230 c->crtc_mask |= (1 << drm_crtc_index(crtc)); 238 - } 239 - 240 - /* 241 - * Figure out what fence to wait for: 242 - */ 243 - for (i = 0; i < nplanes; i++) { 244 - struct drm_plane *plane = state->planes[i]; 245 - struct drm_plane_state *new_state = state->plane_states[i]; 246 - 247 - if (!plane) 248 - continue; 249 - 250 - if ((plane->state->fb != new_state->fb) && new_state->fb) 251 - add_fb(c, new_state->fb); 252 231 } 253 232 254 233 /* ··· 271 278 * current layout. 272 279 */ 273 280 274 - if (nonblock && priv->gpu) { 275 - msm_queue_fence_cb(priv->gpu->fctx, &c->fence_cb, c->fence); 281 + if (nonblock) { 282 + queue_work(priv->atomic_wq, &c->work); 276 283 return 0; 277 284 } 278 285 279 - timeout = ktime_add_ms(ktime_get(), 1000); 280 - 281 - /* uninterruptible wait */ 282 - if (priv->gpu) 283 - msm_wait_fence(priv->gpu->fctx, c->fence, &timeout, false); 284 - 285 - complete_commit(c); 286 + complete_commit(c, false); 286 287 287 288 return 0; 288 289
+4
drivers/gpu/drm/msm/msm_drv.c
··· 213 213 flush_workqueue(priv->wq); 214 214 destroy_workqueue(priv->wq); 215 215 216 + flush_workqueue(priv->atomic_wq); 217 + destroy_workqueue(priv->atomic_wq); 218 + 216 219 if (kms) { 217 220 pm_runtime_disable(dev->dev); 218 221 kms->funcs->destroy(kms); ··· 342 339 dev->dev_private = priv; 343 340 344 341 priv->wq = alloc_ordered_workqueue("msm", 0); 342 + priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0); 345 343 init_waitqueue_head(&priv->pending_crtcs_event); 346 344 347 345 INIT_LIST_HEAD(&priv->inactive_list);
+3 -2
drivers/gpu/drm/msm/msm_drv.h
··· 109 109 struct list_head inactive_list; 110 110 111 111 struct workqueue_struct *wq; 112 + struct workqueue_struct *atomic_wq; 112 113 113 114 /* crtcs pending async atomic updates: */ 114 115 uint32_t pending_crtcs; ··· 193 192 void msm_gem_move_to_active(struct drm_gem_object *obj, 194 193 struct msm_gpu *gpu, bool write, uint32_t fence); 195 194 void msm_gem_move_to_inactive(struct drm_gem_object *obj); 196 - int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, 197 - ktime_t *timeout); 195 + int msm_gem_cpu_sync(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); 196 + int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); 198 197 int msm_gem_cpu_fini(struct drm_gem_object *obj); 199 198 void msm_gem_free_object(struct drm_gem_object *obj); 200 199 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-43
drivers/gpu/drm/msm/msm_fence.c
··· 33 33 fctx->dev = dev; 34 34 fctx->name = name; 35 35 init_waitqueue_head(&fctx->event); 36 - INIT_LIST_HEAD(&fctx->fence_cbs); 37 36 38 37 return fctx; 39 38 } ··· 85 86 return ret; 86 87 } 87 88 88 - int msm_queue_fence_cb(struct msm_fence_context *fctx, 89 - struct msm_fence_cb *cb, uint32_t fence) 90 - { 91 - struct msm_drm_private *priv = fctx->dev->dev_private; 92 - int ret = 0; 93 - 94 - mutex_lock(&fctx->dev->struct_mutex); 95 - if (!list_empty(&cb->work.entry)) { 96 - ret = -EINVAL; 97 - } else if (fence > fctx->completed_fence) { 98 - cb->fence = fence; 99 - list_add_tail(&cb->work.entry, &fctx->fence_cbs); 100 - } else { 101 - queue_work(priv->wq, &cb->work); 102 - } 103 - mutex_unlock(&fctx->dev->struct_mutex); 104 - 105 - return ret; 106 - } 107 - 108 89 /* called from workqueue */ 109 90 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) 110 91 { 111 - struct msm_drm_private *priv = fctx->dev->dev_private; 112 - 113 92 mutex_lock(&fctx->dev->struct_mutex); 114 93 fctx->completed_fence = max(fence, fctx->completed_fence); 115 - 116 - while (!list_empty(&fctx->fence_cbs)) { 117 - struct msm_fence_cb *cb; 118 - 119 - cb = list_first_entry(&fctx->fence_cbs, 120 - struct msm_fence_cb, work.entry); 121 - 122 - if (cb->fence > fctx->completed_fence) 123 - break; 124 - 125 - list_del_init(&cb->work.entry); 126 - queue_work(priv->wq, &cb->work); 127 - } 128 - 129 94 mutex_unlock(&fctx->dev->struct_mutex); 130 95 131 96 wake_up_all(&fctx->event); 132 - } 133 - 134 - void __msm_fence_worker(struct work_struct *work) 135 - { 136 - struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work); 137 - cb->func(cb); 138 97 }
-16
drivers/gpu/drm/msm/msm_fence.h
··· 27 27 uint32_t last_fence; /* last assigned fence */ 28 28 uint32_t completed_fence; /* last completed fence */ 29 29 wait_queue_head_t event; 30 - /* callbacks deferred until bo is inactive: */ 31 - struct list_head fence_cbs; 32 30 }; 33 31 34 32 struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev, 35 33 const char *name); 36 34 void msm_fence_context_free(struct msm_fence_context *fctx); 37 - 38 - /* callback from wq once fence has passed: */ 39 - struct msm_fence_cb { 40 - struct work_struct work; 41 - uint32_t fence; 42 - void (*func)(struct msm_fence_cb *cb); 43 - }; 44 - 45 - void __msm_fence_worker(struct work_struct *work); 46 - 47 - #define INIT_FENCE_CB(_cb, _func) do { \ 48 - INIT_WORK(&(_cb)->work, __msm_fence_worker); \ 49 - (_cb)->func = _func; \ 50 - } while (0) 51 35 52 36 int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, 53 37 ktime_t *timeout, bool interruptible);
+8 -1
drivers/gpu/drm/msm/msm_gem.c
··· 439 439 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 440 440 } 441 441 442 - int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 442 + int msm_gem_cpu_sync(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 443 443 { 444 444 struct drm_device *dev = obj->dev; 445 445 struct msm_drm_private *priv = dev->dev_private; ··· 455 455 if (priv->gpu) 456 456 ret = msm_wait_fence(priv->gpu->fctx, fence, timeout, true); 457 457 } 458 + 459 + return ret; 460 + } 461 + 462 + int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 463 + { 464 + int ret = msm_gem_cpu_sync(obj, op, timeout); 458 465 459 466 /* TODO cache maintenance */ 460 467