Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm: 'struct fence' conversion

Signed-off-by: Rob Clark <robdclark@gmail.com>

Rob Clark b6295f9a ba00c3f2

+236 -87
+2 -2
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 168 168 OUT_PKT2(ring); 169 169 170 170 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); 171 - OUT_RING(ring, submit->fence); 171 + OUT_RING(ring, submit->fence->seqno); 172 172 173 173 if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { 174 174 /* Flush HLSQ lazy updates to make sure there is nothing ··· 185 185 OUT_PKT3(ring, CP_EVENT_WRITE, 3); 186 186 OUT_RING(ring, CACHE_FLUSH_TS); 187 187 OUT_RING(ring, rbmemptr(adreno_gpu, fence)); 188 - OUT_RING(ring, submit->fence); 188 + OUT_RING(ring, submit->fence->seqno); 189 189 190 190 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ 191 191 OUT_PKT3(ring, CP_INTERRUPT, 1);
+20 -22
drivers/gpu/drm/msm/msm_atomic.c
··· 107 107 } 108 108 } 109 109 110 - static void wait_fences(struct msm_commit *c, bool async) 111 - { 112 - int nplanes = c->dev->mode_config.num_total_plane; 113 - ktime_t timeout = ktime_add_ms(ktime_get(), 1000); 114 - int i; 115 - 116 - for (i = 0; i < nplanes; i++) { 117 - struct drm_plane *plane = c->state->planes[i]; 118 - struct drm_plane_state *new_state = c->state->plane_states[i]; 119 - 120 - if (!plane) 121 - continue; 122 - 123 - if ((plane->state->fb != new_state->fb) && new_state->fb) { 124 - struct drm_gem_object *obj = 125 - msm_framebuffer_bo(new_state->fb, 0); 126 - msm_gem_cpu_sync(obj, MSM_PREP_READ, &timeout); 127 - } 128 - } 129 - } 130 - 131 110 /* The (potentially) asynchronous part of the commit. At this point 132 111 * nothing can fail short of armageddon. 133 112 */ ··· 117 138 struct msm_drm_private *priv = dev->dev_private; 118 139 struct msm_kms *kms = priv->kms; 119 140 120 - wait_fences(c, async); 141 + drm_atomic_helper_wait_for_fences(dev, state); 121 142 122 143 kms->funcs->prepare_commit(kms, state); 123 144 ··· 192 213 struct drm_atomic_state *state, bool nonblock) 193 214 { 194 215 struct msm_drm_private *priv = dev->dev_private; 216 + int nplanes = dev->mode_config.num_total_plane; 195 217 int ncrtcs = dev->mode_config.num_crtc; 196 218 struct msm_commit *c; 197 219 int i, ret; ··· 215 235 if (!crtc) 216 236 continue; 217 237 c->crtc_mask |= (1 << drm_crtc_index(crtc)); 238 + } 239 + 240 + /* 241 + * Figure out what fence to wait for: 242 + */ 243 + for (i = 0; i < nplanes; i++) { 244 + struct drm_plane *plane = state->planes[i]; 245 + struct drm_plane_state *new_state = state->plane_states[i]; 246 + 247 + if (!plane) 248 + continue; 249 + 250 + if ((plane->state->fb != new_state->fb) && new_state->fb) { 251 + struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0); 252 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 253 + 254 + new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); 255 + } 218 256 } 219 257 220 258 /*
+3 -2
drivers/gpu/drm/msm/msm_drv.h
··· 190 190 void msm_gem_prime_unpin(struct drm_gem_object *obj); 191 191 void *msm_gem_vaddr_locked(struct drm_gem_object *obj); 192 192 void *msm_gem_vaddr(struct drm_gem_object *obj); 193 + int msm_gem_sync_object(struct drm_gem_object *obj, 194 + struct msm_fence_context *fctx, bool exclusive); 193 195 void msm_gem_move_to_active(struct drm_gem_object *obj, 194 - struct msm_gpu *gpu, bool write, uint32_t fence); 196 + struct msm_gpu *gpu, bool exclusive, struct fence *fence); 195 197 void msm_gem_move_to_inactive(struct drm_gem_object *obj); 196 - int msm_gem_cpu_sync(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); 197 198 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); 198 199 int msm_gem_cpu_fini(struct drm_gem_object *obj); 199 200 void msm_gem_free_object(struct drm_gem_object *obj);
+69 -2
drivers/gpu/drm/msm/msm_fence.c
··· 32 32 33 33 fctx->dev = dev; 34 34 fctx->name = name; 35 + fctx->context = fence_context_alloc(1); 35 36 init_waitqueue_head(&fctx->event); 37 + spin_lock_init(&fctx->spinlock); 36 38 37 39 return fctx; 38 40 } ··· 49 47 return (int32_t)(fctx->completed_fence - fence) >= 0; 50 48 } 51 49 50 + /* legacy path for WAIT_FENCE ioctl: */ 52 51 int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, 53 52 ktime_t *timeout, bool interruptible) 54 53 { ··· 91 88 /* called from workqueue */ 92 89 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) 93 90 { 94 - mutex_lock(&fctx->dev->struct_mutex); 91 + spin_lock(&fctx->spinlock); 95 92 fctx->completed_fence = max(fence, fctx->completed_fence); 96 - mutex_unlock(&fctx->dev->struct_mutex); 93 + spin_unlock(&fctx->spinlock); 97 94 98 95 wake_up_all(&fctx->event); 96 + } 97 + 98 + struct msm_fence { 99 + struct msm_fence_context *fctx; 100 + struct fence base; 101 + }; 102 + 103 + static inline struct msm_fence *to_msm_fence(struct fence *fence) 104 + { 105 + return container_of(fence, struct msm_fence, base); 106 + } 107 + 108 + static const char *msm_fence_get_driver_name(struct fence *fence) 109 + { 110 + return "msm"; 111 + } 112 + 113 + static const char *msm_fence_get_timeline_name(struct fence *fence) 114 + { 115 + struct msm_fence *f = to_msm_fence(fence); 116 + return f->fctx->name; 117 + } 118 + 119 + static bool msm_fence_enable_signaling(struct fence *fence) 120 + { 121 + return true; 122 + } 123 + 124 + static bool msm_fence_signaled(struct fence *fence) 125 + { 126 + struct msm_fence *f = to_msm_fence(fence); 127 + return fence_completed(f->fctx, f->base.seqno); 128 + } 129 + 130 + static void msm_fence_release(struct fence *fence) 131 + { 132 + struct msm_fence *f = to_msm_fence(fence); 133 + kfree_rcu(f, base.rcu); 134 + } 135 + 136 + static const struct fence_ops msm_fence_ops = { 137 + .get_driver_name = msm_fence_get_driver_name, 138 + .get_timeline_name = msm_fence_get_timeline_name, 139 + .enable_signaling = msm_fence_enable_signaling, 140 + .signaled = msm_fence_signaled, 141 + .wait = fence_default_wait, 142 + .release = msm_fence_release, 143 + }; 144 + 145 + struct fence * 146 + msm_fence_alloc(struct msm_fence_context *fctx) 147 + { 148 + struct msm_fence *f; 149 + 150 + f = kzalloc(sizeof(*f), GFP_KERNEL); 151 + if (!f) 152 + return ERR_PTR(-ENOMEM); 153 + 154 + f->fctx = fctx; 155 + 156 + fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, 157 + fctx->context, ++fctx->last_fence); 158 + 159 + return &f->base; 99 160 }
+4
drivers/gpu/drm/msm/msm_fence.h
··· 23 23 struct msm_fence_context { 24 24 struct drm_device *dev; 25 25 const char *name; 26 + unsigned context; 26 27 /* last_fence == completed_fence --> no pending work */ 27 28 uint32_t last_fence; /* last assigned fence */ 28 29 uint32_t completed_fence; /* last completed fence */ 29 30 wait_queue_head_t event; 31 + spinlock_t spinlock; 30 32 }; 31 33 32 34 struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev, ··· 40 38 int msm_queue_fence_cb(struct msm_fence_context *fctx, 41 39 struct msm_fence_cb *cb, uint32_t fence); 42 40 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); 41 + 42 + struct fence * msm_fence_alloc(struct msm_fence_context *fctx); 43 43 44 44 #endif
+98 -32
drivers/gpu/drm/msm/msm_gem.c
··· 411 411 return ret; 412 412 } 413 413 414 + /* must be called before _move_to_active().. */ 415 + int msm_gem_sync_object(struct drm_gem_object *obj, 416 + struct msm_fence_context *fctx, bool exclusive) 417 + { 418 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 419 + struct reservation_object_list *fobj; 420 + struct fence *fence; 421 + int i, ret; 422 + 423 + if (!exclusive) { 424 + /* NOTE: _reserve_shared() must happen before _add_shared_fence(), 425 + * which makes this a slightly strange place to call it. OTOH this 426 + * is a convenient can-fail point to hook it in. (And similar to 427 + * how etnaviv and nouveau handle this.) 428 + */ 429 + ret = reservation_object_reserve_shared(msm_obj->resv); 430 + if (ret) 431 + return ret; 432 + } 433 + 434 + fobj = reservation_object_get_list(msm_obj->resv); 435 + if (!fobj || (fobj->shared_count == 0)) { 436 + fence = reservation_object_get_excl(msm_obj->resv); 437 + /* don't need to wait on our own fences, since ring is fifo */ 438 + if (fence && (fence->context != fctx->context)) { 439 + ret = fence_wait(fence, true); 440 + if (ret) 441 + return ret; 442 + } 443 + } 444 + 445 + if (!exclusive || !fobj) 446 + return 0; 447 + 448 + for (i = 0; i < fobj->shared_count; i++) { 449 + fence = rcu_dereference_protected(fobj->shared[i], 450 + reservation_object_held(msm_obj->resv)); 451 + if (fence->context != fctx->context) { 452 + ret = fence_wait(fence, true); 453 + if (ret) 454 + return ret; 455 + } 456 + } 457 + 458 + return 0; 459 + } 460 + 414 461 void msm_gem_move_to_active(struct drm_gem_object *obj, 415 - struct msm_gpu *gpu, bool write, uint32_t fence) 462 + struct msm_gpu *gpu, bool exclusive, struct fence *fence) 416 463 { 417 464 struct msm_gem_object *msm_obj = to_msm_bo(obj); 418 465 msm_obj->gpu = gpu; 419 - if (write) 420 - msm_obj->write_fence = fence; 466 + if (exclusive) 467 + reservation_object_add_excl_fence(msm_obj->resv, fence); 421 468 else 422 - msm_obj->read_fence = fence; 469 + reservation_object_add_shared_fence(msm_obj->resv, fence); 423 470 list_del_init(&msm_obj->mm_list); 424 471 list_add_tail(&msm_obj->mm_list, &gpu->active_list); 425 472 } ··· 480 433 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 481 434 482 435 msm_obj->gpu = NULL; 483 - msm_obj->read_fence = 0; 484 - msm_obj->write_fence = 0; 485 436 list_del_init(&msm_obj->mm_list); 486 437 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 487 438 } 488 439 489 - int msm_gem_cpu_sync(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 490 - { 491 - struct drm_device *dev = obj->dev; 492 - struct msm_drm_private *priv = dev->dev_private; 493 - struct msm_gem_object *msm_obj = to_msm_bo(obj); 494 - int ret = 0; 495 - 496 - if (is_active(msm_obj)) { 497 - uint32_t fence = msm_gem_fence(msm_obj, op); 498 - 499 - if (op & MSM_PREP_NOSYNC) 500 - timeout = NULL; 501 - 502 - if (priv->gpu) 503 - ret = msm_wait_fence(priv->gpu->fctx, fence, timeout, true); 504 - } 505 - 506 - return ret; 507 - } 508 - 509 440 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) 510 441 { 511 - int ret = msm_gem_cpu_sync(obj, op, timeout); 442 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 443 + bool write = !!(op & MSM_PREP_WRITE); 444 + 445 + if (op & MSM_PREP_NOSYNC) { 446 + if (!reservation_object_test_signaled_rcu(msm_obj->resv, write)) 447 + return -EBUSY; 448 + } else { 449 + int ret; 450 + 451 + ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write, 452 + true, timeout_to_jiffies(timeout)); 453 + if (ret <= 0) 454 + return ret == 0 ? -ETIMEDOUT : ret; 455 + } 512 456 513 457 /* TODO cache maintenance */ 514 458 515 - return ret; 459 + return 0; 516 460 } 517 461 518 462 int msm_gem_cpu_fini(struct drm_gem_object *obj) ··· 513 475 } 514 476 515 477 #ifdef CONFIG_DEBUG_FS 478 + static void describe_fence(struct fence *fence, const char *type, 479 + struct seq_file *m) 480 + { 481 + if (!fence_is_signaled(fence)) 482 + seq_printf(m, "\t%9s: %s %s seq %u\n", type, 483 + fence->ops->get_driver_name(fence), 484 + fence->ops->get_timeline_name(fence), 485 + fence->seqno); 486 + } 487 + 516 488 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 517 489 { 518 - struct drm_device *dev = obj->dev; 519 490 struct msm_gem_object *msm_obj = to_msm_bo(obj); 491 + struct reservation_object *robj = msm_obj->resv; 492 + struct reservation_object_list *fobj; 493 + struct fence *fence; 520 494 uint64_t off = drm_vma_node_start(&obj->vma_node); 521 495 522 - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 523 - seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n", 496 + WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 497 + 498 + seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n", 524 499 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 525 - msm_obj->read_fence, msm_obj->write_fence, 526 500 obj->name, obj->refcount.refcount.counter, 527 501 off, msm_obj->vaddr, obj->size); 502 + 503 + rcu_read_lock(); 504 + fobj = rcu_dereference(robj->fence); 505 + if (fobj) { 506 + unsigned int i, shared_count = fobj->shared_count; 507 + 508 + for (i = 0; i < shared_count; i++) { 509 + fence = rcu_dereference(fobj->shared[i]); 510 + describe_fence(fence, "Shared", m); 511 + } 512 + } 513 + 514 + fence = rcu_dereference(robj->fence_excl); 515 + if (fence) 516 + describe_fence(fence, "Exclusive", m); 517 + rcu_read_unlock(); 528 518 } 529 519 530 520 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
+1 -15
drivers/gpu/drm/msm/msm_gem.h
··· 39 39 */ 40 40 struct list_head mm_list; 41 41 struct msm_gpu *gpu; /* non-null if active */ 42 - uint32_t read_fence, write_fence; 43 42 44 43 /* Transiently in the process of submit ioctl, objects associated 45 44 * with the submit are on submit->bo_list.. this only lasts for ··· 72 73 return msm_obj->gpu != NULL; 73 74 } 74 75 75 - static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj, 76 - uint32_t op) 77 - { 78 - uint32_t fence = 0; 79 - 80 - if (op & MSM_PREP_READ) 81 - fence = msm_obj->write_fence; 82 - if (op & MSM_PREP_WRITE) 83 - fence = max(fence, msm_obj->read_fence); 84 - 85 - return fence; 86 - } 87 - 88 76 #define MAX_CMDS 4 89 77 90 78 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, ··· 85 99 struct list_head node; /* node in gpu submit_list */ 86 100 struct list_head bo_list; 87 101 struct ww_acquire_ctx ticket; 88 - uint32_t fence; 102 + struct fence *fence; 89 103 bool valid; /* true if no cmdstream patching needed */ 90 104 unsigned int nr_cmds; 91 105 unsigned int nr_bos;
+21 -1
drivers/gpu/drm/msm/msm_gem_submit.c
··· 184 184 return ret; 185 185 } 186 186 187 + static int submit_fence_sync(struct msm_gem_submit *submit) 188 + { 189 + int i, ret = 0; 190 + 191 + for (i = 0; i < submit->nr_bos; i++) { 192 + struct msm_gem_object *msm_obj = submit->bos[i].obj; 193 + bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; 194 + 195 + ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); 196 + if (ret) 197 + break; 198 + } 199 + 200 + return ret; 201 + } 202 + 187 203 static int submit_pin_objects(struct msm_gem_submit *submit) 188 204 { 189 205 int i, ret = 0; ··· 374 358 if (ret) 375 359 goto out; 376 360 361 + ret = submit_fence_sync(submit); 362 + if (ret) 363 + goto out; 364 + 377 365 ret = submit_pin_objects(submit); 378 366 if (ret) 379 367 goto out; ··· 444 424 445 425 ret = msm_gpu_submit(gpu, submit, ctx); 446 426 447 - args->fence = submit->fence; 427 + args->fence = submit->fence->seqno; 448 428 449 429 out: 450 430 submit_cleanup(submit, !!ret);
+17 -10
drivers/gpu/drm/msm/msm_gpu.c
··· 266 266 * Hangcheck detection for locked gpu: 267 267 */ 268 268 269 - static void retire_submits(struct msm_gpu *gpu, uint32_t fence); 269 + static void retire_submits(struct msm_gpu *gpu); 270 270 271 271 static void recover_worker(struct work_struct *work) 272 272 { 273 273 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); 274 274 struct drm_device *dev = gpu->dev; 275 + uint32_t fence = gpu->funcs->last_fence(gpu); 275 276 276 277 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); 278 + 279 + msm_update_fence(gpu->fctx, fence + 1); 277 280 278 281 mutex_lock(&dev->struct_mutex); 279 282 if (msm_gpu_active(gpu)) { 280 283 struct msm_gem_submit *submit; 281 - uint32_t fence = gpu->funcs->last_fence(gpu); 282 284 283 285 /* retire completed submits, plus the one that hung: */ 284 - retire_submits(gpu, fence + 1); 286 + retire_submits(gpu); 285 287 286 288 inactive_cancel(gpu); 287 289 gpu->funcs->recover(gpu); ··· 446 444 drm_gem_object_unreference(&msm_obj->base); 447 445 } 448 446 447 + fence_put(submit->fence); 449 448 list_del(&submit->node); 450 449 kfree(submit); 451 450 } 452 451 453 - static void retire_submits(struct msm_gpu *gpu, uint32_t fence) 452 + static void retire_submits(struct msm_gpu *gpu) 454 453 { 455 454 struct drm_device *dev = gpu->dev; 456 455 ··· 463 460 submit = list_first_entry(&gpu->submit_list, 464 461 struct msm_gem_submit, node); 465 462 466 - if (submit->fence <= fence) { 463 + if (fence_is_signaled(submit->fence)) { 467 464 retire_submit(gpu, submit); 468 465 } else { 469 466 break; ··· 480 477 msm_update_fence(gpu->fctx, fence); 481 478 482 479 mutex_lock(&dev->struct_mutex); 483 - retire_submits(gpu, fence); 480 + retire_submits(gpu); 484 481 mutex_unlock(&dev->struct_mutex); 485 482 486 483 if (!msm_gpu_active(gpu)) ··· 505 502 506 503 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 507 504 508 - submit->fence = ++gpu->fctx->last_fence; 505 + submit->fence = msm_fence_alloc(gpu->fctx); 506 + if (IS_ERR(submit->fence)) { 507 + ret = PTR_ERR(submit->fence); 508 + submit->fence = NULL; 509 + return ret; 510 + } 509 511 510 512 inactive_cancel(gpu); 511 513 ··· 534 526 msm_gem_get_iova_locked(&msm_obj->base, 535 527 submit->gpu->id, &iova); 536 528 537 - if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) 538 - msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); 539 - 540 529 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) 541 530 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); 531 + else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) 532 + msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); 542 533 } 543 534 544 535 ret = gpu->funcs->submit(gpu, submit, ctx);
+1 -1
drivers/gpu/drm/msm/msm_rd.c
··· 296 296 297 297 n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u", 298 298 TASK_COMM_LEN, current->comm, task_pid_nr(current), 299 - submit->fence); 299 + submit->fence->seqno); 300 300 301 301 rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); 302 302