Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef VIRTIO_DRV_H
27#define VIRTIO_DRV_H
28
29#include <linux/dma-direction.h>
30#include <linux/virtio.h>
31#include <linux/virtio_ids.h>
32#include <linux/virtio_config.h>
33#include <linux/virtio_gpu.h>
34
35#include <drm/drm_atomic.h>
36#include <drm/drm_drv.h>
37#include <drm/drm_encoder.h>
38#include <drm/drm_fb_helper.h>
39#include <drm/drm_fourcc.h>
40#include <drm/drm_framebuffer.h>
41#include <drm/drm_gem.h>
42#include <drm/drm_gem_shmem_helper.h>
43#include <drm/drm_ioctl.h>
44#include <drm/drm_probe_helper.h>
45#include <drm/virtgpu_drm.h>
46
47#define DRIVER_NAME "virtio_gpu"
48#define DRIVER_DESC "virtio GPU"
49#define DRIVER_DATE "0"
50
51#define DRIVER_MAJOR 0
52#define DRIVER_MINOR 1
53#define DRIVER_PATCHLEVEL 0
54
55#define STATE_INITIALIZING 0
56#define STATE_OK 1
57#define STATE_ERR 2
58
59#define MAX_CAPSET_ID 63
60#define MAX_RINGS 64
61
62struct virtio_gpu_object_params {
63 unsigned long size;
64 bool dumb;
65 /* 3d */
66 bool virgl;
67 bool blob;
68
69 /* classic resources only */
70 uint32_t format;
71 uint32_t width;
72 uint32_t height;
73 uint32_t target;
74 uint32_t bind;
75 uint32_t depth;
76 uint32_t array_size;
77 uint32_t last_level;
78 uint32_t nr_samples;
79 uint32_t flags;
80
81 /* blob resources only */
82 uint32_t ctx_id;
83 uint32_t blob_mem;
84 uint32_t blob_flags;
85 uint64_t blob_id;
86};
87
88struct virtio_gpu_object {
89 struct drm_gem_shmem_object base;
90 uint32_t hw_res_handle;
91 bool dumb;
92 bool created;
93 bool host3d_blob, guest_blob;
94 uint32_t blob_mem, blob_flags;
95
96 int uuid_state;
97 uuid_t uuid;
98};
99#define gem_to_virtio_gpu_obj(gobj) \
100 container_of((gobj), struct virtio_gpu_object, base.base)
101
102struct virtio_gpu_object_shmem {
103 struct virtio_gpu_object base;
104};
105
106struct virtio_gpu_object_vram {
107 struct virtio_gpu_object base;
108 uint32_t map_state;
109 uint32_t map_info;
110 struct drm_mm_node vram_node;
111};
112
113#define to_virtio_gpu_shmem(virtio_gpu_object) \
114 container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
115
116#define to_virtio_gpu_vram(virtio_gpu_object) \
117 container_of((virtio_gpu_object), struct virtio_gpu_object_vram, base)
118
119struct virtio_gpu_object_array {
120 struct ww_acquire_ctx ticket;
121 struct list_head next;
122 u32 nents, total;
123 struct drm_gem_object *objs[];
124};
125
126struct virtio_gpu_vbuffer;
127struct virtio_gpu_device;
128
129typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev,
130 struct virtio_gpu_vbuffer *vbuf);
131
132struct virtio_gpu_fence_driver {
133 atomic64_t last_fence_id;
134 uint64_t current_fence_id;
135 uint64_t context;
136 struct list_head fences;
137 spinlock_t lock;
138};
139
140struct virtio_gpu_fence_event {
141 struct drm_pending_event base;
142 struct drm_event event;
143};
144
145struct virtio_gpu_fence {
146 struct dma_fence f;
147 uint32_t ring_idx;
148 uint64_t fence_id;
149 bool emit_fence_info;
150 struct virtio_gpu_fence_event *e;
151 struct virtio_gpu_fence_driver *drv;
152 struct list_head node;
153};
154
155struct virtio_gpu_vbuffer {
156 char *buf;
157 int size;
158
159 void *data_buf;
160 uint32_t data_size;
161
162 char *resp_buf;
163 int resp_size;
164 virtio_gpu_resp_cb resp_cb;
165 void *resp_cb_data;
166
167 struct virtio_gpu_object_array *objs;
168 struct list_head list;
169};
170
171struct virtio_gpu_output {
172 int index;
173 struct drm_crtc crtc;
174 struct drm_connector conn;
175 struct drm_encoder enc;
176 struct virtio_gpu_display_one info;
177 struct virtio_gpu_update_cursor cursor;
178 struct edid *edid;
179 int cur_x;
180 int cur_y;
181 bool needs_modeset;
182};
183#define drm_crtc_to_virtio_gpu_output(x) \
184 container_of(x, struct virtio_gpu_output, crtc)
185
186struct virtio_gpu_framebuffer {
187 struct drm_framebuffer base;
188 struct virtio_gpu_fence *fence;
189};
190#define to_virtio_gpu_framebuffer(x) \
191 container_of(x, struct virtio_gpu_framebuffer, base)
192
193struct virtio_gpu_queue {
194 struct virtqueue *vq;
195 spinlock_t qlock;
196 wait_queue_head_t ack_queue;
197 struct work_struct dequeue_work;
198};
199
200struct virtio_gpu_drv_capset {
201 uint32_t id;
202 uint32_t max_version;
203 uint32_t max_size;
204};
205
206struct virtio_gpu_drv_cap_cache {
207 struct list_head head;
208 void *caps_cache;
209 uint32_t id;
210 uint32_t version;
211 uint32_t size;
212 atomic_t is_valid;
213};
214
215struct virtio_gpu_device {
216 struct drm_device *ddev;
217
218 struct virtio_device *vdev;
219
220 struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
221 uint32_t num_scanouts;
222
223 struct virtio_gpu_queue ctrlq;
224 struct virtio_gpu_queue cursorq;
225 struct kmem_cache *vbufs;
226
227 atomic_t pending_commands;
228
229 struct ida resource_ida;
230
231 wait_queue_head_t resp_wq;
232 /* current display info */
233 spinlock_t display_info_lock;
234 bool display_info_pending;
235
236 struct virtio_gpu_fence_driver fence_drv;
237
238 struct ida ctx_id_ida;
239
240 bool has_virgl_3d;
241 bool has_edid;
242 bool has_indirect;
243 bool has_resource_assign_uuid;
244 bool has_resource_blob;
245 bool has_host_visible;
246 bool has_context_init;
247 struct virtio_shm_region host_visible_region;
248 struct drm_mm host_visible_mm;
249
250 struct work_struct config_changed_work;
251
252 struct work_struct obj_free_work;
253 spinlock_t obj_free_lock;
254 struct list_head obj_free_list;
255
256 struct virtio_gpu_drv_capset *capsets;
257 uint32_t num_capsets;
258 uint64_t capset_id_mask;
259 struct list_head cap_cache;
260
261 /* protects uuid state when exporting */
262 spinlock_t resource_export_lock;
263 /* protects map state and host_visible_mm */
264 spinlock_t host_visible_lock;
265};
266
267struct virtio_gpu_fpriv {
268 uint32_t ctx_id;
269 uint32_t context_init;
270 bool context_created;
271 uint32_t num_rings;
272 uint64_t base_fence_ctx;
273 uint64_t ring_idx_mask;
274 struct mutex context_lock;
275};
276
277/* virtgpu_ioctl.c */
278#define DRM_VIRTIO_NUM_IOCTLS 12
279extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
280void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
281
282/* virtgpu_kms.c */
283int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev);
284void virtio_gpu_deinit(struct drm_device *dev);
285void virtio_gpu_release(struct drm_device *dev);
286int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
287void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
288
289/* virtgpu_gem.c */
290int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
291 struct drm_file *file);
292void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
293 struct drm_file *file);
294int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
295 struct drm_device *dev,
296 struct drm_mode_create_dumb *args);
297int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
298 struct drm_device *dev,
299 uint32_t handle, uint64_t *offset_p);
300
301struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents);
302struct virtio_gpu_object_array*
303virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents);
304void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs,
305 struct drm_gem_object *obj);
306int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs);
307void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs);
308void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
309 struct dma_fence *fence);
310void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs);
311void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
312 struct virtio_gpu_object_array *objs);
313void virtio_gpu_array_put_free_work(struct work_struct *work);
314
315/* virtgpu_vq.c */
316int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
317void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
318void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
319 struct virtio_gpu_object *bo,
320 struct virtio_gpu_object_params *params,
321 struct virtio_gpu_object_array *objs,
322 struct virtio_gpu_fence *fence);
323void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
324 struct virtio_gpu_object *bo);
325void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
326 uint64_t offset,
327 uint32_t width, uint32_t height,
328 uint32_t x, uint32_t y,
329 struct virtio_gpu_object_array *objs,
330 struct virtio_gpu_fence *fence);
331void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
332 uint32_t resource_id,
333 uint32_t x, uint32_t y,
334 uint32_t width, uint32_t height,
335 struct virtio_gpu_object_array *objs,
336 struct virtio_gpu_fence *fence);
337void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
338 uint32_t scanout_id, uint32_t resource_id,
339 uint32_t width, uint32_t height,
340 uint32_t x, uint32_t y);
341void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
342 struct virtio_gpu_object *obj,
343 struct virtio_gpu_mem_entry *ents,
344 unsigned int nents);
345int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
346int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
347void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
348 struct virtio_gpu_output *output);
349int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
350int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
351int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
352 int idx, int version,
353 struct virtio_gpu_drv_cap_cache **cache_p);
354int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev);
355void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
356 uint32_t context_init, uint32_t nlen,
357 const char *name);
358void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
359 uint32_t id);
360void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
361 uint32_t ctx_id,
362 struct virtio_gpu_object_array *objs);
363void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
364 uint32_t ctx_id,
365 struct virtio_gpu_object_array *objs);
366void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
367 void *data, uint32_t data_size,
368 uint32_t ctx_id,
369 struct virtio_gpu_object_array *objs,
370 struct virtio_gpu_fence *fence);
371void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
372 uint32_t ctx_id,
373 uint64_t offset, uint32_t level,
374 uint32_t stride,
375 uint32_t layer_stride,
376 struct drm_virtgpu_3d_box *box,
377 struct virtio_gpu_object_array *objs,
378 struct virtio_gpu_fence *fence);
379void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
380 uint32_t ctx_id,
381 uint64_t offset, uint32_t level,
382 uint32_t stride,
383 uint32_t layer_stride,
384 struct drm_virtgpu_3d_box *box,
385 struct virtio_gpu_object_array *objs,
386 struct virtio_gpu_fence *fence);
387void
388virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
389 struct virtio_gpu_object *bo,
390 struct virtio_gpu_object_params *params,
391 struct virtio_gpu_object_array *objs,
392 struct virtio_gpu_fence *fence);
393void virtio_gpu_ctrl_ack(struct virtqueue *vq);
394void virtio_gpu_cursor_ack(struct virtqueue *vq);
395void virtio_gpu_fence_ack(struct virtqueue *vq);
396void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
397void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
398void virtio_gpu_dequeue_fence_func(struct work_struct *work);
399
400void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
401
402int
403virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
404 struct virtio_gpu_object_array *objs);
405
406int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
407 struct virtio_gpu_object_array *objs, uint64_t offset);
408
409void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
410 struct virtio_gpu_object *bo);
411
412void
413virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
414 struct virtio_gpu_object *bo,
415 struct virtio_gpu_object_params *params,
416 struct virtio_gpu_mem_entry *ents,
417 uint32_t nents);
418void
419virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
420 uint32_t scanout_id,
421 struct virtio_gpu_object *bo,
422 struct drm_framebuffer *fb,
423 uint32_t width, uint32_t height,
424 uint32_t x, uint32_t y);
425
426/* virtgpu_display.c */
427int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
428void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
429
430/* virtgpu_plane.c */
431uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc);
432struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
433 enum drm_plane_type type,
434 int index);
435
436/* virtgpu_fence.c */
437struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
438 uint64_t base_fence_ctx,
439 uint32_t ring_idx);
440void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
441 struct virtio_gpu_ctrl_hdr *cmd_hdr,
442 struct virtio_gpu_fence *fence);
443void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
444 u64 fence_id);
445
446/* virtgpu_object.c */
447void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
448struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
449 size_t size);
450int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
451 struct virtio_gpu_object_params *params,
452 struct virtio_gpu_object **bo_ptr,
453 struct virtio_gpu_fence *fence);
454
455bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
456
457int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
458 uint32_t *resid);
459/* virtgpu_prime.c */
460int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
461 struct virtio_gpu_object *bo);
462struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
463 int flags);
464struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
465 struct dma_buf *buf);
466int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj,
467 uuid_t *uuid);
468struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
469 struct drm_device *dev, struct dma_buf_attachment *attach,
470 struct sg_table *sgt);
471
472/* virtgpu_debugfs.c */
473void virtio_gpu_debugfs_init(struct drm_minor *minor);
474
475/* virtgpu_vram.c */
476bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
477int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
478 struct virtio_gpu_object_params *params,
479 struct virtio_gpu_object **bo_ptr);
480struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
481 struct device *dev,
482 enum dma_data_direction dir);
483void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
484 struct sg_table *sgt,
485 enum dma_data_direction dir);
486
487#endif