Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright (C) 2015 Red Hat, Inc.
4 * All Rights Reserved.
5 *
6 * Authors:
7 * Dave Airlie
8 * Alon Levy
9 */
10
11#include <linux/dma-fence-unwrap.h>
12#include <linux/file.h>
13#include <linux/sync_file.h>
14#include <linux/uaccess.h>
15
16#include <drm/drm_file.h>
17#include <drm/virtgpu_drm.h>
18
19#include "virtgpu_drv.h"
20
21struct virtio_gpu_submit {
22 struct virtio_gpu_object_array *buflist;
23 struct drm_virtgpu_execbuffer *exbuf;
24 struct virtio_gpu_fence *out_fence;
25 struct virtio_gpu_fpriv *vfpriv;
26 struct virtio_gpu_device *vgdev;
27 struct sync_file *sync_file;
28 struct drm_file *file;
29 int out_fence_fd;
30 u64 fence_ctx;
31 u32 ring_idx;
32 void *buf;
33};
34
35static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
36 struct dma_fence *in_fence)
37{
38 u32 context = submit->fence_ctx + submit->ring_idx;
39
40 if (dma_fence_match_context(in_fence, context))
41 return 0;
42
43 return dma_fence_wait(in_fence, true);
44}
45
46static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
47 struct dma_fence *fence)
48{
49 struct dma_fence_unwrap itr;
50 struct dma_fence *f;
51 int err;
52
53 dma_fence_unwrap_for_each(f, &itr, fence) {
54 err = virtio_gpu_do_fence_wait(submit, f);
55 if (err)
56 return err;
57 }
58
59 return 0;
60}
61
62static int virtio_gpu_fence_event_create(struct drm_device *dev,
63 struct drm_file *file,
64 struct virtio_gpu_fence *fence,
65 u32 ring_idx)
66{
67 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
68 struct virtio_gpu_fence_event *e = NULL;
69 int ret;
70
71 if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
72 return 0;
73
74 e = kzalloc(sizeof(*e), GFP_KERNEL);
75 if (!e)
76 return -ENOMEM;
77
78 e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
79 e->event.length = sizeof(e->event);
80
81 ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
82 if (ret) {
83 kfree(e);
84 return ret;
85 }
86
87 fence->e = e;
88
89 return 0;
90}
91
92static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
93{
94 struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
95 u32 *bo_handles;
96
97 if (!exbuf->num_bo_handles)
98 return 0;
99
100 bo_handles = kvmalloc_array(exbuf->num_bo_handles, sizeof(*bo_handles),
101 GFP_KERNEL);
102 if (!bo_handles)
103 return -ENOMEM;
104
105 if (copy_from_user(bo_handles, u64_to_user_ptr(exbuf->bo_handles),
106 exbuf->num_bo_handles * sizeof(*bo_handles))) {
107 kvfree(bo_handles);
108 return -EFAULT;
109 }
110
111 submit->buflist = virtio_gpu_array_from_handles(submit->file, bo_handles,
112 exbuf->num_bo_handles);
113 if (!submit->buflist) {
114 kvfree(bo_handles);
115 return -ENOENT;
116 }
117
118 kvfree(bo_handles);
119
120 return 0;
121}
122
123static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
124{
125 if (!IS_ERR(submit->buf))
126 kvfree(submit->buf);
127
128 if (submit->buflist)
129 virtio_gpu_array_put_free(submit->buflist);
130
131 if (submit->out_fence_fd >= 0)
132 put_unused_fd(submit->out_fence_fd);
133
134 if (submit->out_fence)
135 dma_fence_put(&submit->out_fence->f);
136
137 if (submit->sync_file)
138 fput(submit->sync_file->file);
139}
140
141static void virtio_gpu_submit(struct virtio_gpu_submit *submit)
142{
143 virtio_gpu_cmd_submit(submit->vgdev, submit->buf, submit->exbuf->size,
144 submit->vfpriv->ctx_id, submit->buflist,
145 submit->out_fence);
146 virtio_gpu_notify(submit->vgdev);
147}
148
149static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit)
150{
151 submit->buf = NULL;
152 submit->buflist = NULL;
153 submit->sync_file = NULL;
154 submit->out_fence = NULL;
155 submit->out_fence_fd = -1;
156}
157
158static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
159 struct drm_virtgpu_execbuffer *exbuf,
160 struct drm_device *dev,
161 struct drm_file *file,
162 u64 fence_ctx, u32 ring_idx)
163{
164 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
165 struct virtio_gpu_device *vgdev = dev->dev_private;
166 struct virtio_gpu_fence *out_fence;
167 int err;
168
169 memset(submit, 0, sizeof(*submit));
170
171 out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
172 if (!out_fence)
173 return -ENOMEM;
174
175 err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
176 if (err) {
177 dma_fence_put(&out_fence->f);
178 return err;
179 }
180
181 submit->out_fence = out_fence;
182 submit->fence_ctx = fence_ctx;
183 submit->ring_idx = ring_idx;
184 submit->out_fence_fd = -1;
185 submit->vfpriv = vfpriv;
186 submit->vgdev = vgdev;
187 submit->exbuf = exbuf;
188 submit->file = file;
189
190 err = virtio_gpu_init_submit_buflist(submit);
191 if (err)
192 return err;
193
194 submit->buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
195 if (IS_ERR(submit->buf))
196 return PTR_ERR(submit->buf);
197
198 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
199 err = get_unused_fd_flags(O_CLOEXEC);
200 if (err < 0)
201 return err;
202
203 submit->out_fence_fd = err;
204
205 submit->sync_file = sync_file_create(&out_fence->f);
206 if (!submit->sync_file)
207 return -ENOMEM;
208 }
209
210 return 0;
211}
212
213static int virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit)
214{
215 int ret = 0;
216
217 if (submit->exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
218 struct dma_fence *in_fence =
219 sync_file_get_fence(submit->exbuf->fence_fd);
220 if (!in_fence)
221 return -EINVAL;
222
223 /*
224 * Wait if the fence is from a foreign context, or if the
225 * fence array contains any fence from a foreign context.
226 */
227 ret = virtio_gpu_dma_fence_wait(submit, in_fence);
228
229 dma_fence_put(in_fence);
230 }
231
232 return ret;
233}
234
235static void virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit)
236{
237 if (submit->sync_file) {
238 submit->exbuf->fence_fd = submit->out_fence_fd;
239 fd_install(submit->out_fence_fd, submit->sync_file->file);
240 }
241}
242
243static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit)
244{
245 if (submit->buflist)
246 return virtio_gpu_array_lock_resv(submit->buflist);
247
248 return 0;
249}
250
251int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
252 struct drm_file *file)
253{
254 struct virtio_gpu_device *vgdev = dev->dev_private;
255 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
256 u64 fence_ctx = vgdev->fence_drv.context;
257 struct drm_virtgpu_execbuffer *exbuf = data;
258 struct virtio_gpu_submit submit;
259 u32 ring_idx = 0;
260 int ret = -EINVAL;
261
262 if (!vgdev->has_virgl_3d)
263 return -ENOSYS;
264
265 if (exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)
266 return ret;
267
268 if (exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) {
269 if (exbuf->ring_idx >= vfpriv->num_rings)
270 return ret;
271
272 if (!vfpriv->base_fence_ctx)
273 return ret;
274
275 fence_ctx = vfpriv->base_fence_ctx;
276 ring_idx = exbuf->ring_idx;
277 }
278
279 virtio_gpu_create_context(dev, file);
280
281 ret = virtio_gpu_init_submit(&submit, exbuf, dev, file,
282 fence_ctx, ring_idx);
283 if (ret)
284 goto cleanup;
285
286 /*
287 * Await in-fences in the end of the job submission path to
288 * optimize the path by proceeding directly to the submission
289 * to virtio after the waits.
290 */
291 ret = virtio_gpu_wait_in_fence(&submit);
292 if (ret)
293 goto cleanup;
294
295 ret = virtio_gpu_lock_buflist(&submit);
296 if (ret)
297 goto cleanup;
298
299 virtio_gpu_submit(&submit);
300
301 /*
302 * Set up usr-out data after submitting the job to optimize
303 * the job submission path.
304 */
305 virtio_gpu_install_out_fence_fd(&submit);
306 virtio_gpu_complete_submit(&submit);
307cleanup:
308 virtio_gpu_cleanup_submit(&submit);
309
310 return ret;
311}