Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_preempt_fence.h"
7
8#include <linux/slab.h>
9
10#include "xe_exec_queue.h"
11#include "xe_gt_printk.h"
12#include "xe_guc_exec_queue_types.h"
13#include "xe_vm.h"
14
15static void preempt_fence_work_func(struct work_struct *w)
16{
17 bool cookie = dma_fence_begin_signalling();
18 struct xe_preempt_fence *pfence =
19 container_of(w, typeof(*pfence), preempt_work);
20 struct xe_exec_queue *q = pfence->q;
21
22 if (pfence->error) {
23 dma_fence_set_error(&pfence->base, pfence->error);
24 } else if (!q->ops->reset_status(q)) {
25 int err = q->ops->suspend_wait(q);
26
27 if (err == -EAGAIN) {
28 xe_gt_dbg(q->gt, "PREEMPT FENCE RETRY guc_id=%d",
29 q->guc->id);
30 queue_work(q->vm->xe->preempt_fence_wq,
31 &pfence->preempt_work);
32 dma_fence_end_signalling(cookie);
33 return;
34 }
35
36 if (err)
37 dma_fence_set_error(&pfence->base, err);
38 } else {
39 dma_fence_set_error(&pfence->base, -ENOENT);
40 }
41
42 dma_fence_signal(&pfence->base);
43 /*
44 * Opt for keep everything in the fence critical section. This looks really strange since we
45 * have just signalled the fence, however the preempt fences are all signalled via single
46 * global ordered-wq, therefore anything that happens in this callback can easily block
47 * progress on the entire wq, which itself may prevent other published preempt fences from
48 * ever signalling. Therefore try to keep everything here in the callback in the fence
49 * critical section. For example if something below grabs a scary lock like vm->lock,
50 * lockdep should complain since we also hold that lock whilst waiting on preempt fences to
51 * complete.
52 */
53 xe_vm_queue_rebind_worker(q->vm);
54 xe_exec_queue_put(q);
55 dma_fence_end_signalling(cookie);
56}
57
58static const char *
59preempt_fence_get_driver_name(struct dma_fence *fence)
60{
61 return "xe";
62}
63
64static const char *
65preempt_fence_get_timeline_name(struct dma_fence *fence)
66{
67 return "preempt";
68}
69
70static bool preempt_fence_enable_signaling(struct dma_fence *fence)
71{
72 struct xe_preempt_fence *pfence =
73 container_of(fence, typeof(*pfence), base);
74 struct xe_exec_queue *q = pfence->q;
75
76 pfence->error = q->ops->suspend(q);
77 queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
78 return true;
79}
80
81static const struct dma_fence_ops preempt_fence_ops = {
82 .get_driver_name = preempt_fence_get_driver_name,
83 .get_timeline_name = preempt_fence_get_timeline_name,
84 .enable_signaling = preempt_fence_enable_signaling,
85};
86
87/**
88 * xe_preempt_fence_alloc() - Allocate a preempt fence with minimal
89 * initialization
90 *
91 * Allocate a preempt fence, and initialize its list head.
92 * If the preempt_fence allocated has been armed with
93 * xe_preempt_fence_arm(), it must be freed using dma_fence_put(). If not,
94 * it must be freed using xe_preempt_fence_free().
95 *
96 * Return: A struct xe_preempt_fence pointer used for calling into
97 * xe_preempt_fence_arm() or xe_preempt_fence_free().
98 * An error pointer on error.
99 */
100struct xe_preempt_fence *xe_preempt_fence_alloc(void)
101{
102 struct xe_preempt_fence *pfence;
103
104 pfence = kmalloc(sizeof(*pfence), GFP_KERNEL);
105 if (!pfence)
106 return ERR_PTR(-ENOMEM);
107
108 INIT_LIST_HEAD(&pfence->link);
109 INIT_WORK(&pfence->preempt_work, preempt_fence_work_func);
110
111 return pfence;
112}
113
114/**
115 * xe_preempt_fence_free() - Free a preempt fence allocated using
116 * xe_preempt_fence_alloc().
117 * @pfence: pointer obtained from xe_preempt_fence_alloc();
118 *
119 * Free a preempt fence that has not yet been armed.
120 */
121void xe_preempt_fence_free(struct xe_preempt_fence *pfence)
122{
123 list_del(&pfence->link);
124 kfree(pfence);
125}
126
127/**
128 * xe_preempt_fence_arm() - Arm a preempt fence allocated using
129 * xe_preempt_fence_alloc().
130 * @pfence: The struct xe_preempt_fence pointer returned from
131 * xe_preempt_fence_alloc().
132 * @q: The struct xe_exec_queue used for arming.
133 * @context: The dma-fence context used for arming.
134 * @seqno: The dma-fence seqno used for arming.
135 *
136 * Inserts the preempt fence into @context's timeline, takes @link off any
137 * list, and registers the struct xe_exec_queue as the xe_engine to be preempted.
138 *
139 * Return: A pointer to a struct dma_fence embedded into the preempt fence.
140 * This function doesn't error.
141 */
142struct dma_fence *
143xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
144 u64 context, u32 seqno)
145{
146 list_del_init(&pfence->link);
147 pfence->q = xe_exec_queue_get(q);
148 spin_lock_init(&pfence->lock);
149 dma_fence_init(&pfence->base, &preempt_fence_ops,
150 &pfence->lock, context, seqno);
151
152 return &pfence->base;
153}
154
155/**
156 * xe_preempt_fence_create() - Helper to create and arm a preempt fence.
157 * @q: The struct xe_exec_queue used for arming.
158 * @context: The dma-fence context used for arming.
159 * @seqno: The dma-fence seqno used for arming.
160 *
161 * Allocates and inserts the preempt fence into @context's timeline,
162 * and registers @e as the struct xe_exec_queue to be preempted.
163 *
164 * Return: A pointer to the resulting struct dma_fence on success. An error
165 * pointer on error. In particular if allocation fails it returns
166 * ERR_PTR(-ENOMEM);
167 */
168struct dma_fence *
169xe_preempt_fence_create(struct xe_exec_queue *q,
170 u64 context, u32 seqno)
171{
172 struct xe_preempt_fence *pfence;
173
174 pfence = xe_preempt_fence_alloc();
175 if (IS_ERR(pfence))
176 return ERR_CAST(pfence);
177
178 return xe_preempt_fence_arm(pfence, q, context, seqno);
179}
180
181bool xe_fence_is_xe_preempt(const struct dma_fence *fence)
182{
183 return fence->ops == &preempt_fence_ops;
184}