Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <linux/sched/signal.h>
29
30#include "vmwgfx_drv.h"
31
32#define VMW_FENCE_WRAP (1 << 31)
33
34struct vmw_fence_manager {
35 int num_fence_objects;
36 struct vmw_private *dev_priv;
37 spinlock_t lock;
38 struct list_head fence_list;
39 struct work_struct work;
40 u32 user_fence_size;
41 u32 fence_size;
42 u32 event_fence_action_size;
43 bool fifo_down;
44 struct list_head cleanup_list;
45 uint32_t pending_actions[VMW_ACTION_MAX];
46 struct mutex goal_irq_mutex;
47 bool goal_irq_on; /* Protected by @goal_irq_mutex */
48 bool seqno_valid; /* Protected by @lock, and may not be set to true
49 without the @goal_irq_mutex held. */
50 u64 ctx;
51};
52
53struct vmw_user_fence {
54 struct ttm_base_object base;
55 struct vmw_fence_obj fence;
56};
57
58/**
59 * struct vmw_event_fence_action - fence action that delivers a drm event.
60 *
61 * @e: A struct drm_pending_event that controls the event delivery.
62 * @action: A struct vmw_fence_action to hook up to a fence.
63 * @fence: A referenced pointer to the fence to keep it alive while @action
64 * hangs on it.
65 * @dev: Pointer to a struct drm_device so we can access the event stuff.
66 * @kref: Both @e and @action has destructors, so we need to refcount.
67 * @size: Size accounted for this object.
68 * @tv_sec: If non-null, the variable pointed to will be assigned
69 * current time tv_sec val when the fence signals.
70 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
71 * be assigned the current time tv_usec val when the fence signals.
72 */
73struct vmw_event_fence_action {
74 struct vmw_fence_action action;
75
76 struct drm_pending_event *event;
77 struct vmw_fence_obj *fence;
78 struct drm_device *dev;
79
80 uint32_t *tv_sec;
81 uint32_t *tv_usec;
82};
83
84static struct vmw_fence_manager *
85fman_from_fence(struct vmw_fence_obj *fence)
86{
87 return container_of(fence->base.lock, struct vmw_fence_manager, lock);
88}
89
90/**
91 * Note on fencing subsystem usage of irqs:
92 * Typically the vmw_fences_update function is called
93 *
94 * a) When a new fence seqno has been submitted by the fifo code.
95 * b) On-demand when we have waiters. Sleeping waiters will switch on the
96 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
97 * irq is received. When the last fence waiter is gone, that IRQ is masked
98 * away.
99 *
100 * In situations where there are no waiters and we don't submit any new fences,
101 * fence objects may not be signaled. This is perfectly OK, since there are
102 * no consumers of the signaled data, but that is NOT ok when there are fence
103 * actions attached to a fence. The fencing subsystem then makes use of the
104 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
105 * which has an action attached, and each time vmw_fences_update is called,
106 * the subsystem makes sure the fence goal seqno is updated.
107 *
108 * The fence goal seqno irq is on as long as there are unsignaled fence
109 * objects with actions attached to them.
110 */
111
112static void vmw_fence_obj_destroy(struct dma_fence *f)
113{
114 struct vmw_fence_obj *fence =
115 container_of(f, struct vmw_fence_obj, base);
116
117 struct vmw_fence_manager *fman = fman_from_fence(fence);
118
119 spin_lock(&fman->lock);
120 list_del_init(&fence->head);
121 --fman->num_fence_objects;
122 spin_unlock(&fman->lock);
123 fence->destroy(fence);
124}
125
126static const char *vmw_fence_get_driver_name(struct dma_fence *f)
127{
128 return "vmwgfx";
129}
130
131static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
132{
133 return "svga";
134}
135
136static bool vmw_fence_enable_signaling(struct dma_fence *f)
137{
138 struct vmw_fence_obj *fence =
139 container_of(f, struct vmw_fence_obj, base);
140
141 struct vmw_fence_manager *fman = fman_from_fence(fence);
142 struct vmw_private *dev_priv = fman->dev_priv;
143
144 u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
145 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
146 return false;
147
148 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
149
150 return true;
151}
152
153struct vmwgfx_wait_cb {
154 struct dma_fence_cb base;
155 struct task_struct *task;
156};
157
158static void
159vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
160{
161 struct vmwgfx_wait_cb *wait =
162 container_of(cb, struct vmwgfx_wait_cb, base);
163
164 wake_up_process(wait->task);
165}
166
167static void __vmw_fences_update(struct vmw_fence_manager *fman);
168
169static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
170{
171 struct vmw_fence_obj *fence =
172 container_of(f, struct vmw_fence_obj, base);
173
174 struct vmw_fence_manager *fman = fman_from_fence(fence);
175 struct vmw_private *dev_priv = fman->dev_priv;
176 struct vmwgfx_wait_cb cb;
177 long ret = timeout;
178
179 if (likely(vmw_fence_obj_signaled(fence)))
180 return timeout;
181
182 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
183 vmw_seqno_waiter_add(dev_priv);
184
185 spin_lock(f->lock);
186
187 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
188 goto out;
189
190 if (intr && signal_pending(current)) {
191 ret = -ERESTARTSYS;
192 goto out;
193 }
194
195 cb.base.func = vmwgfx_wait_cb;
196 cb.task = current;
197 list_add(&cb.base.node, &f->cb_list);
198
199 for (;;) {
200 __vmw_fences_update(fman);
201
202 /*
203 * We can use the barrier free __set_current_state() since
204 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
205 * fence spinlock.
206 */
207 if (intr)
208 __set_current_state(TASK_INTERRUPTIBLE);
209 else
210 __set_current_state(TASK_UNINTERRUPTIBLE);
211
212 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
213 if (ret == 0 && timeout > 0)
214 ret = 1;
215 break;
216 }
217
218 if (intr && signal_pending(current)) {
219 ret = -ERESTARTSYS;
220 break;
221 }
222
223 if (ret == 0)
224 break;
225
226 spin_unlock(f->lock);
227
228 ret = schedule_timeout(ret);
229
230 spin_lock(f->lock);
231 }
232 __set_current_state(TASK_RUNNING);
233 if (!list_empty(&cb.base.node))
234 list_del(&cb.base.node);
235
236out:
237 spin_unlock(f->lock);
238
239 vmw_seqno_waiter_remove(dev_priv);
240
241 return ret;
242}
243
244static const struct dma_fence_ops vmw_fence_ops = {
245 .get_driver_name = vmw_fence_get_driver_name,
246 .get_timeline_name = vmw_fence_get_timeline_name,
247 .enable_signaling = vmw_fence_enable_signaling,
248 .wait = vmw_fence_wait,
249 .release = vmw_fence_obj_destroy,
250};
251
252
253/**
254 * Execute signal actions on fences recently signaled.
255 * This is done from a workqueue so we don't have to execute
256 * signal actions from atomic context.
257 */
258
259static void vmw_fence_work_func(struct work_struct *work)
260{
261 struct vmw_fence_manager *fman =
262 container_of(work, struct vmw_fence_manager, work);
263 struct list_head list;
264 struct vmw_fence_action *action, *next_action;
265 bool seqno_valid;
266
267 do {
268 INIT_LIST_HEAD(&list);
269 mutex_lock(&fman->goal_irq_mutex);
270
271 spin_lock(&fman->lock);
272 list_splice_init(&fman->cleanup_list, &list);
273 seqno_valid = fman->seqno_valid;
274 spin_unlock(&fman->lock);
275
276 if (!seqno_valid && fman->goal_irq_on) {
277 fman->goal_irq_on = false;
278 vmw_goal_waiter_remove(fman->dev_priv);
279 }
280 mutex_unlock(&fman->goal_irq_mutex);
281
282 if (list_empty(&list))
283 return;
284
285 /*
286 * At this point, only we should be able to manipulate the
287 * list heads of the actions we have on the private list.
288 * hence fman::lock not held.
289 */
290
291 list_for_each_entry_safe(action, next_action, &list, head) {
292 list_del_init(&action->head);
293 if (action->cleanup)
294 action->cleanup(action);
295 }
296 } while (1);
297}
298
299struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
300{
301 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
302
303 if (unlikely(!fman))
304 return NULL;
305
306 fman->dev_priv = dev_priv;
307 spin_lock_init(&fman->lock);
308 INIT_LIST_HEAD(&fman->fence_list);
309 INIT_LIST_HEAD(&fman->cleanup_list);
310 INIT_WORK(&fman->work, &vmw_fence_work_func);
311 fman->fifo_down = true;
312 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
313 TTM_OBJ_EXTRA_SIZE;
314 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
315 fman->event_fence_action_size =
316 ttm_round_pot(sizeof(struct vmw_event_fence_action));
317 mutex_init(&fman->goal_irq_mutex);
318 fman->ctx = dma_fence_context_alloc(1);
319
320 return fman;
321}
322
323void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
324{
325 bool lists_empty;
326
327 (void) cancel_work_sync(&fman->work);
328
329 spin_lock(&fman->lock);
330 lists_empty = list_empty(&fman->fence_list) &&
331 list_empty(&fman->cleanup_list);
332 spin_unlock(&fman->lock);
333
334 BUG_ON(!lists_empty);
335 kfree(fman);
336}
337
338static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
339 struct vmw_fence_obj *fence, u32 seqno,
340 void (*destroy) (struct vmw_fence_obj *fence))
341{
342 int ret = 0;
343
344 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
345 fman->ctx, seqno);
346 INIT_LIST_HEAD(&fence->seq_passed_actions);
347 fence->destroy = destroy;
348
349 spin_lock(&fman->lock);
350 if (unlikely(fman->fifo_down)) {
351 ret = -EBUSY;
352 goto out_unlock;
353 }
354 list_add_tail(&fence->head, &fman->fence_list);
355 ++fman->num_fence_objects;
356
357out_unlock:
358 spin_unlock(&fman->lock);
359 return ret;
360
361}
362
363static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
364 struct list_head *list)
365{
366 struct vmw_fence_action *action, *next_action;
367
368 list_for_each_entry_safe(action, next_action, list, head) {
369 list_del_init(&action->head);
370 fman->pending_actions[action->type]--;
371 if (action->seq_passed != NULL)
372 action->seq_passed(action);
373
374 /*
375 * Add the cleanup action to the cleanup list so that
376 * it will be performed by a worker task.
377 */
378
379 list_add_tail(&action->head, &fman->cleanup_list);
380 }
381}
382
383/**
384 * vmw_fence_goal_new_locked - Figure out a new device fence goal
385 * seqno if needed.
386 *
387 * @fman: Pointer to a fence manager.
388 * @passed_seqno: The seqno the device currently signals as passed.
389 *
390 * This function should be called with the fence manager lock held.
391 * It is typically called when we have a new passed_seqno, and
392 * we might need to update the fence goal. It checks to see whether
393 * the current fence goal has already passed, and, in that case,
394 * scans through all unsignaled fences to get the next fence object with an
395 * action attached, and sets the seqno of that fence as a new fence goal.
396 *
397 * returns true if the device goal seqno was updated. False otherwise.
398 */
399static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
400 u32 passed_seqno)
401{
402 u32 goal_seqno;
403 struct vmw_fence_obj *fence;
404
405 if (likely(!fman->seqno_valid))
406 return false;
407
408 goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
409 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
410 return false;
411
412 fman->seqno_valid = false;
413 list_for_each_entry(fence, &fman->fence_list, head) {
414 if (!list_empty(&fence->seq_passed_actions)) {
415 fman->seqno_valid = true;
416 vmw_fifo_mem_write(fman->dev_priv,
417 SVGA_FIFO_FENCE_GOAL,
418 fence->base.seqno);
419 break;
420 }
421 }
422
423 return true;
424}
425
426
427/**
428 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
429 * needed.
430 *
431 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
432 * considered as a device fence goal.
433 *
434 * This function should be called with the fence manager lock held.
435 * It is typically called when an action has been attached to a fence to
436 * check whether the seqno of that fence should be used for a fence
437 * goal interrupt. This is typically needed if the current fence goal is
438 * invalid, or has a higher seqno than that of the current fence object.
439 *
440 * returns true if the device goal seqno was updated. False otherwise.
441 */
442static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
443{
444 struct vmw_fence_manager *fman = fman_from_fence(fence);
445 u32 goal_seqno;
446
447 if (dma_fence_is_signaled_locked(&fence->base))
448 return false;
449
450 goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
451 if (likely(fman->seqno_valid &&
452 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
453 return false;
454
455 vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
456 fence->base.seqno);
457 fman->seqno_valid = true;
458
459 return true;
460}
461
462static void __vmw_fences_update(struct vmw_fence_manager *fman)
463{
464 struct vmw_fence_obj *fence, *next_fence;
465 struct list_head action_list;
466 bool needs_rerun;
467 uint32_t seqno, new_seqno;
468
469 seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
470rerun:
471 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
472 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
473 list_del_init(&fence->head);
474 dma_fence_signal_locked(&fence->base);
475 INIT_LIST_HEAD(&action_list);
476 list_splice_init(&fence->seq_passed_actions,
477 &action_list);
478 vmw_fences_perform_actions(fman, &action_list);
479 } else
480 break;
481 }
482
483 /*
484 * Rerun if the fence goal seqno was updated, and the
485 * hardware might have raced with that update, so that
486 * we missed a fence_goal irq.
487 */
488
489 needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
490 if (unlikely(needs_rerun)) {
491 new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
492 if (new_seqno != seqno) {
493 seqno = new_seqno;
494 goto rerun;
495 }
496 }
497
498 if (!list_empty(&fman->cleanup_list))
499 (void) schedule_work(&fman->work);
500}
501
502void vmw_fences_update(struct vmw_fence_manager *fman)
503{
504 spin_lock(&fman->lock);
505 __vmw_fences_update(fman);
506 spin_unlock(&fman->lock);
507}
508
509bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
510{
511 struct vmw_fence_manager *fman = fman_from_fence(fence);
512
513 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
514 return true;
515
516 vmw_fences_update(fman);
517
518 return dma_fence_is_signaled(&fence->base);
519}
520
521int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
522 bool interruptible, unsigned long timeout)
523{
524 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
525
526 if (likely(ret > 0))
527 return 0;
528 else if (ret == 0)
529 return -EBUSY;
530 else
531 return ret;
532}
533
534void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
535{
536 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
537
538 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
539}
540
541static void vmw_fence_destroy(struct vmw_fence_obj *fence)
542{
543 dma_fence_free(&fence->base);
544}
545
546int vmw_fence_create(struct vmw_fence_manager *fman,
547 uint32_t seqno,
548 struct vmw_fence_obj **p_fence)
549{
550 struct vmw_fence_obj *fence;
551 int ret;
552
553 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
554 if (unlikely(!fence))
555 return -ENOMEM;
556
557 ret = vmw_fence_obj_init(fman, fence, seqno,
558 vmw_fence_destroy);
559 if (unlikely(ret != 0))
560 goto out_err_init;
561
562 *p_fence = fence;
563 return 0;
564
565out_err_init:
566 kfree(fence);
567 return ret;
568}
569
570
571static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
572{
573 struct vmw_user_fence *ufence =
574 container_of(fence, struct vmw_user_fence, fence);
575 struct vmw_fence_manager *fman = fman_from_fence(fence);
576
577 ttm_base_object_kfree(ufence, base);
578 /*
579 * Free kernel space accounting.
580 */
581 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
582 fman->user_fence_size);
583}
584
585static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
586{
587 struct ttm_base_object *base = *p_base;
588 struct vmw_user_fence *ufence =
589 container_of(base, struct vmw_user_fence, base);
590 struct vmw_fence_obj *fence = &ufence->fence;
591
592 *p_base = NULL;
593 vmw_fence_obj_unreference(&fence);
594}
595
596int vmw_user_fence_create(struct drm_file *file_priv,
597 struct vmw_fence_manager *fman,
598 uint32_t seqno,
599 struct vmw_fence_obj **p_fence,
600 uint32_t *p_handle)
601{
602 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
603 struct vmw_user_fence *ufence;
604 struct vmw_fence_obj *tmp;
605 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
606 struct ttm_operation_ctx ctx = {
607 .interruptible = false,
608 .no_wait_gpu = false
609 };
610 int ret;
611
612 /*
613 * Kernel memory space accounting, since this object may
614 * be created by a user-space request.
615 */
616
617 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
618 &ctx);
619 if (unlikely(ret != 0))
620 return ret;
621
622 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
623 if (unlikely(!ufence)) {
624 ret = -ENOMEM;
625 goto out_no_object;
626 }
627
628 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
629 vmw_user_fence_destroy);
630 if (unlikely(ret != 0)) {
631 kfree(ufence);
632 goto out_no_object;
633 }
634
635 /*
636 * The base object holds a reference which is freed in
637 * vmw_user_fence_base_release.
638 */
639 tmp = vmw_fence_obj_reference(&ufence->fence);
640 ret = ttm_base_object_init(tfile, &ufence->base, false,
641 VMW_RES_FENCE,
642 &vmw_user_fence_base_release, NULL);
643
644
645 if (unlikely(ret != 0)) {
646 /*
647 * Free the base object's reference
648 */
649 vmw_fence_obj_unreference(&tmp);
650 goto out_err;
651 }
652
653 *p_fence = &ufence->fence;
654 *p_handle = ufence->base.handle;
655
656 return 0;
657out_err:
658 tmp = &ufence->fence;
659 vmw_fence_obj_unreference(&tmp);
660out_no_object:
661 ttm_mem_global_free(mem_glob, fman->user_fence_size);
662 return ret;
663}
664
665
666/**
667 * vmw_wait_dma_fence - Wait for a dma fence
668 *
669 * @fman: pointer to a fence manager
670 * @fence: DMA fence to wait on
671 *
672 * This function handles the case when the fence is actually a fence
673 * array. If that's the case, it'll wait on each of the child fence
674 */
675int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
676 struct dma_fence *fence)
677{
678 struct dma_fence_array *fence_array;
679 int ret = 0;
680 int i;
681
682
683 if (dma_fence_is_signaled(fence))
684 return 0;
685
686 if (!dma_fence_is_array(fence))
687 return dma_fence_wait(fence, true);
688
689 /* From i915: Note that if the fence-array was created in
690 * signal-on-any mode, we should *not* decompose it into its individual
691 * fences. However, we don't currently store which mode the fence-array
692 * is operating in. Fortunately, the only user of signal-on-any is
693 * private to amdgpu and we should not see any incoming fence-array
694 * from sync-file being in signal-on-any mode.
695 */
696
697 fence_array = to_dma_fence_array(fence);
698 for (i = 0; i < fence_array->num_fences; i++) {
699 struct dma_fence *child = fence_array->fences[i];
700
701 ret = dma_fence_wait(child, true);
702
703 if (ret < 0)
704 return ret;
705 }
706
707 return 0;
708}
709
710
711/**
712 * vmw_fence_fifo_down - signal all unsignaled fence objects.
713 */
714
715void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
716{
717 struct list_head action_list;
718 int ret;
719
720 /*
721 * The list may be altered while we traverse it, so always
722 * restart when we've released the fman->lock.
723 */
724
725 spin_lock(&fman->lock);
726 fman->fifo_down = true;
727 while (!list_empty(&fman->fence_list)) {
728 struct vmw_fence_obj *fence =
729 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
730 head);
731 dma_fence_get(&fence->base);
732 spin_unlock(&fman->lock);
733
734 ret = vmw_fence_obj_wait(fence, false, false,
735 VMW_FENCE_WAIT_TIMEOUT);
736
737 if (unlikely(ret != 0)) {
738 list_del_init(&fence->head);
739 dma_fence_signal(&fence->base);
740 INIT_LIST_HEAD(&action_list);
741 list_splice_init(&fence->seq_passed_actions,
742 &action_list);
743 vmw_fences_perform_actions(fman, &action_list);
744 }
745
746 BUG_ON(!list_empty(&fence->head));
747 dma_fence_put(&fence->base);
748 spin_lock(&fman->lock);
749 }
750 spin_unlock(&fman->lock);
751}
752
753void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
754{
755 spin_lock(&fman->lock);
756 fman->fifo_down = false;
757 spin_unlock(&fman->lock);
758}
759
760
761/**
762 * vmw_fence_obj_lookup - Look up a user-space fence object
763 *
764 * @tfile: A struct ttm_object_file identifying the caller.
765 * @handle: A handle identifying the fence object.
766 * @return: A struct vmw_user_fence base ttm object on success or
767 * an error pointer on failure.
768 *
769 * The fence object is looked up and type-checked. The caller needs
770 * to have opened the fence object first, but since that happens on
771 * creation and fence objects aren't shareable, that's not an
772 * issue currently.
773 */
774static struct ttm_base_object *
775vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
776{
777 struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
778
779 if (!base) {
780 pr_err("Invalid fence object handle 0x%08lx.\n",
781 (unsigned long)handle);
782 return ERR_PTR(-EINVAL);
783 }
784
785 if (base->refcount_release != vmw_user_fence_base_release) {
786 pr_err("Invalid fence object handle 0x%08lx.\n",
787 (unsigned long)handle);
788 ttm_base_object_unref(&base);
789 return ERR_PTR(-EINVAL);
790 }
791
792 return base;
793}
794
795
796int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
797 struct drm_file *file_priv)
798{
799 struct drm_vmw_fence_wait_arg *arg =
800 (struct drm_vmw_fence_wait_arg *)data;
801 unsigned long timeout;
802 struct ttm_base_object *base;
803 struct vmw_fence_obj *fence;
804 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
805 int ret;
806 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
807
808 /*
809 * 64-bit division not present on 32-bit systems, so do an
810 * approximation. (Divide by 1000000).
811 */
812
813 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
814 (wait_timeout >> 26);
815
816 if (!arg->cookie_valid) {
817 arg->cookie_valid = 1;
818 arg->kernel_cookie = jiffies + wait_timeout;
819 }
820
821 base = vmw_fence_obj_lookup(tfile, arg->handle);
822 if (IS_ERR(base))
823 return PTR_ERR(base);
824
825 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
826
827 timeout = jiffies;
828 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
829 ret = ((vmw_fence_obj_signaled(fence)) ?
830 0 : -EBUSY);
831 goto out;
832 }
833
834 timeout = (unsigned long)arg->kernel_cookie - timeout;
835
836 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
837
838out:
839 ttm_base_object_unref(&base);
840
841 /*
842 * Optionally unref the fence object.
843 */
844
845 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
846 return ttm_ref_object_base_unref(tfile, arg->handle,
847 TTM_REF_USAGE);
848 return ret;
849}
850
851int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
852 struct drm_file *file_priv)
853{
854 struct drm_vmw_fence_signaled_arg *arg =
855 (struct drm_vmw_fence_signaled_arg *) data;
856 struct ttm_base_object *base;
857 struct vmw_fence_obj *fence;
858 struct vmw_fence_manager *fman;
859 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
860 struct vmw_private *dev_priv = vmw_priv(dev);
861
862 base = vmw_fence_obj_lookup(tfile, arg->handle);
863 if (IS_ERR(base))
864 return PTR_ERR(base);
865
866 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
867 fman = fman_from_fence(fence);
868
869 arg->signaled = vmw_fence_obj_signaled(fence);
870
871 arg->signaled_flags = arg->flags;
872 spin_lock(&fman->lock);
873 arg->passed_seqno = dev_priv->last_read_seqno;
874 spin_unlock(&fman->lock);
875
876 ttm_base_object_unref(&base);
877
878 return 0;
879}
880
881
882int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
883 struct drm_file *file_priv)
884{
885 struct drm_vmw_fence_arg *arg =
886 (struct drm_vmw_fence_arg *) data;
887
888 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
889 arg->handle,
890 TTM_REF_USAGE);
891}
892
893/**
894 * vmw_event_fence_action_seq_passed
895 *
896 * @action: The struct vmw_fence_action embedded in a struct
897 * vmw_event_fence_action.
898 *
899 * This function is called when the seqno of the fence where @action is
900 * attached has passed. It queues the event on the submitter's event list.
901 * This function is always called from atomic context.
902 */
903static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
904{
905 struct vmw_event_fence_action *eaction =
906 container_of(action, struct vmw_event_fence_action, action);
907 struct drm_device *dev = eaction->dev;
908 struct drm_pending_event *event = eaction->event;
909
910 if (unlikely(event == NULL))
911 return;
912
913 spin_lock_irq(&dev->event_lock);
914
915 if (likely(eaction->tv_sec != NULL)) {
916 struct timespec64 ts;
917
918 ktime_get_ts64(&ts);
919 /* monotonic time, so no y2038 overflow */
920 *eaction->tv_sec = ts.tv_sec;
921 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
922 }
923
924 drm_send_event_locked(dev, eaction->event);
925 eaction->event = NULL;
926 spin_unlock_irq(&dev->event_lock);
927}
928
929/**
930 * vmw_event_fence_action_cleanup
931 *
932 * @action: The struct vmw_fence_action embedded in a struct
933 * vmw_event_fence_action.
934 *
935 * This function is the struct vmw_fence_action destructor. It's typically
936 * called from a workqueue.
937 */
938static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
939{
940 struct vmw_event_fence_action *eaction =
941 container_of(action, struct vmw_event_fence_action, action);
942
943 vmw_fence_obj_unreference(&eaction->fence);
944 kfree(eaction);
945}
946
947
948/**
949 * vmw_fence_obj_add_action - Add an action to a fence object.
950 *
951 * @fence - The fence object.
952 * @action - The action to add.
953 *
954 * Note that the action callbacks may be executed before this function
955 * returns.
956 */
957static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
958 struct vmw_fence_action *action)
959{
960 struct vmw_fence_manager *fman = fman_from_fence(fence);
961 bool run_update = false;
962
963 mutex_lock(&fman->goal_irq_mutex);
964 spin_lock(&fman->lock);
965
966 fman->pending_actions[action->type]++;
967 if (dma_fence_is_signaled_locked(&fence->base)) {
968 struct list_head action_list;
969
970 INIT_LIST_HEAD(&action_list);
971 list_add_tail(&action->head, &action_list);
972 vmw_fences_perform_actions(fman, &action_list);
973 } else {
974 list_add_tail(&action->head, &fence->seq_passed_actions);
975
976 /*
977 * This function may set fman::seqno_valid, so it must
978 * be run with the goal_irq_mutex held.
979 */
980 run_update = vmw_fence_goal_check_locked(fence);
981 }
982
983 spin_unlock(&fman->lock);
984
985 if (run_update) {
986 if (!fman->goal_irq_on) {
987 fman->goal_irq_on = true;
988 vmw_goal_waiter_add(fman->dev_priv);
989 }
990 vmw_fences_update(fman);
991 }
992 mutex_unlock(&fman->goal_irq_mutex);
993
994}
995
996/**
997 * vmw_event_fence_action_create - Post an event for sending when a fence
998 * object seqno has passed.
999 *
1000 * @file_priv: The file connection on which the event should be posted.
1001 * @fence: The fence object on which to post the event.
1002 * @event: Event to be posted. This event should've been alloced
1003 * using k[mz]alloc, and should've been completely initialized.
1004 * @interruptible: Interruptible waits if possible.
1005 *
1006 * As a side effect, the object pointed to by @event may have been
1007 * freed when this function returns. If this function returns with
1008 * an error code, the caller needs to free that object.
1009 */
1010
1011int vmw_event_fence_action_queue(struct drm_file *file_priv,
1012 struct vmw_fence_obj *fence,
1013 struct drm_pending_event *event,
1014 uint32_t *tv_sec,
1015 uint32_t *tv_usec,
1016 bool interruptible)
1017{
1018 struct vmw_event_fence_action *eaction;
1019 struct vmw_fence_manager *fman = fman_from_fence(fence);
1020
1021 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1022 if (unlikely(!eaction))
1023 return -ENOMEM;
1024
1025 eaction->event = event;
1026
1027 eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1028 eaction->action.cleanup = vmw_event_fence_action_cleanup;
1029 eaction->action.type = VMW_ACTION_EVENT;
1030
1031 eaction->fence = vmw_fence_obj_reference(fence);
1032 eaction->dev = &fman->dev_priv->drm;
1033 eaction->tv_sec = tv_sec;
1034 eaction->tv_usec = tv_usec;
1035
1036 vmw_fence_obj_add_action(fence, &eaction->action);
1037
1038 return 0;
1039}
1040
1041struct vmw_event_fence_pending {
1042 struct drm_pending_event base;
1043 struct drm_vmw_event_fence event;
1044};
1045
1046static int vmw_event_fence_action_create(struct drm_file *file_priv,
1047 struct vmw_fence_obj *fence,
1048 uint32_t flags,
1049 uint64_t user_data,
1050 bool interruptible)
1051{
1052 struct vmw_event_fence_pending *event;
1053 struct vmw_fence_manager *fman = fman_from_fence(fence);
1054 struct drm_device *dev = &fman->dev_priv->drm;
1055 int ret;
1056
1057 event = kzalloc(sizeof(*event), GFP_KERNEL);
1058 if (unlikely(!event)) {
1059 DRM_ERROR("Failed to allocate an event.\n");
1060 ret = -ENOMEM;
1061 goto out_no_space;
1062 }
1063
1064 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1065 event->event.base.length = sizeof(*event);
1066 event->event.user_data = user_data;
1067
1068 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1069
1070 if (unlikely(ret != 0)) {
1071 DRM_ERROR("Failed to allocate event space for this file.\n");
1072 kfree(event);
1073 goto out_no_space;
1074 }
1075
1076 if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1077 ret = vmw_event_fence_action_queue(file_priv, fence,
1078 &event->base,
1079 &event->event.tv_sec,
1080 &event->event.tv_usec,
1081 interruptible);
1082 else
1083 ret = vmw_event_fence_action_queue(file_priv, fence,
1084 &event->base,
1085 NULL,
1086 NULL,
1087 interruptible);
1088 if (ret != 0)
1089 goto out_no_queue;
1090
1091 return 0;
1092
1093out_no_queue:
1094 drm_event_cancel_free(dev, &event->base);
1095out_no_space:
1096 return ret;
1097}
1098
1099int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1100 struct drm_file *file_priv)
1101{
1102 struct vmw_private *dev_priv = vmw_priv(dev);
1103 struct drm_vmw_fence_event_arg *arg =
1104 (struct drm_vmw_fence_event_arg *) data;
1105 struct vmw_fence_obj *fence = NULL;
1106 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1107 struct ttm_object_file *tfile = vmw_fp->tfile;
1108 struct drm_vmw_fence_rep __user *user_fence_rep =
1109 (struct drm_vmw_fence_rep __user *)(unsigned long)
1110 arg->fence_rep;
1111 uint32_t handle;
1112 int ret;
1113
1114 /*
1115 * Look up an existing fence object,
1116 * and if user-space wants a new reference,
1117 * add one.
1118 */
1119 if (arg->handle) {
1120 struct ttm_base_object *base =
1121 vmw_fence_obj_lookup(tfile, arg->handle);
1122
1123 if (IS_ERR(base))
1124 return PTR_ERR(base);
1125
1126 fence = &(container_of(base, struct vmw_user_fence,
1127 base)->fence);
1128 (void) vmw_fence_obj_reference(fence);
1129
1130 if (user_fence_rep != NULL) {
1131 ret = ttm_ref_object_add(vmw_fp->tfile, base,
1132 TTM_REF_USAGE, NULL, false);
1133 if (unlikely(ret != 0)) {
1134 DRM_ERROR("Failed to reference a fence "
1135 "object.\n");
1136 goto out_no_ref_obj;
1137 }
1138 handle = base->handle;
1139 }
1140 ttm_base_object_unref(&base);
1141 }
1142
1143 /*
1144 * Create a new fence object.
1145 */
1146 if (!fence) {
1147 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1148 &fence,
1149 (user_fence_rep) ?
1150 &handle : NULL);
1151 if (unlikely(ret != 0)) {
1152 DRM_ERROR("Fence event failed to create fence.\n");
1153 return ret;
1154 }
1155 }
1156
1157 BUG_ON(fence == NULL);
1158
1159 ret = vmw_event_fence_action_create(file_priv, fence,
1160 arg->flags,
1161 arg->user_data,
1162 true);
1163 if (unlikely(ret != 0)) {
1164 if (ret != -ERESTARTSYS)
1165 DRM_ERROR("Failed to attach event to fence.\n");
1166 goto out_no_create;
1167 }
1168
1169 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1170 handle, -1, NULL);
1171 vmw_fence_obj_unreference(&fence);
1172 return 0;
1173out_no_create:
1174 if (user_fence_rep != NULL)
1175 ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1176out_no_ref_obj:
1177 vmw_fence_obj_unreference(&fence);
1178 return ret;
1179}