Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2013 Red Hat
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24#ifndef VIRTGPU_DRM_H
25#define VIRTGPU_DRM_H
26
27#include "drm.h"
28
29#if defined(__cplusplus)
30extern "C" {
31#endif
32
33/* Please note that modifications to all structs defined here are
34 * subject to backwards-compatibility constraints.
35 *
36 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
37 * compatibility Keep fields aligned to their size
38 */
39
40#define DRM_VIRTGPU_MAP 0x01
41#define DRM_VIRTGPU_EXECBUFFER 0x02
42#define DRM_VIRTGPU_GETPARAM 0x03
43#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
44#define DRM_VIRTGPU_RESOURCE_INFO 0x05
45#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
46#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
47#define DRM_VIRTGPU_WAIT 0x08
48#define DRM_VIRTGPU_GET_CAPS 0x09
49#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
50
51#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
52#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
53#define VIRTGPU_EXECBUF_FLAGS (\
54 VIRTGPU_EXECBUF_FENCE_FD_IN |\
55 VIRTGPU_EXECBUF_FENCE_FD_OUT |\
56 0)
57
58struct drm_virtgpu_map {
59 __u64 offset; /* use for mmap system call */
60 __u32 handle;
61 __u32 pad;
62};
63
64struct drm_virtgpu_execbuffer {
65 __u32 flags;
66 __u32 size;
67 __u64 command; /* void* */
68 __u64 bo_handles;
69 __u32 num_bo_handles;
70 __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
71};
72
73#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
74#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
75#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
76#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
77#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
78
79struct drm_virtgpu_getparam {
80 __u64 param;
81 __u64 value;
82};
83
84/* NO_BO flags? NO resource flag? */
85/* resource flag for y_0_top */
86struct drm_virtgpu_resource_create {
87 __u32 target;
88 __u32 format;
89 __u32 bind;
90 __u32 width;
91 __u32 height;
92 __u32 depth;
93 __u32 array_size;
94 __u32 last_level;
95 __u32 nr_samples;
96 __u32 flags;
97 __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
98 __u32 res_handle; /* returned by kernel */
99 __u32 size; /* validate transfer in the host */
100 __u32 stride; /* validate transfer in the host */
101};
102
103struct drm_virtgpu_resource_info {
104 __u32 bo_handle;
105 __u32 res_handle;
106 __u32 size;
107 __u32 blob_mem;
108};
109
110struct drm_virtgpu_3d_box {
111 __u32 x;
112 __u32 y;
113 __u32 z;
114 __u32 w;
115 __u32 h;
116 __u32 d;
117};
118
119struct drm_virtgpu_3d_transfer_to_host {
120 __u32 bo_handle;
121 struct drm_virtgpu_3d_box box;
122 __u32 level;
123 __u32 offset;
124 __u32 stride;
125 __u32 layer_stride;
126};
127
128struct drm_virtgpu_3d_transfer_from_host {
129 __u32 bo_handle;
130 struct drm_virtgpu_3d_box box;
131 __u32 level;
132 __u32 offset;
133 __u32 stride;
134 __u32 layer_stride;
135};
136
137#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
138struct drm_virtgpu_3d_wait {
139 __u32 handle; /* 0 is an invalid handle */
140 __u32 flags;
141};
142
143struct drm_virtgpu_get_caps {
144 __u32 cap_set_id;
145 __u32 cap_set_ver;
146 __u64 addr;
147 __u32 size;
148 __u32 pad;
149};
150
151struct drm_virtgpu_resource_create_blob {
152#define VIRTGPU_BLOB_MEM_GUEST 0x0001
153#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
154#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
155
156#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
157#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
158#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
159 /* zero is invalid blob_mem */
160 __u32 blob_mem;
161 __u32 blob_flags;
162 __u32 bo_handle;
163 __u32 res_handle;
164 __u64 size;
165
166 /*
167 * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
168 * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
169 */
170 __u32 pad;
171 __u32 cmd_size;
172 __u64 cmd;
173 __u64 blob_id;
174};
175
176#define DRM_IOCTL_VIRTGPU_MAP \
177 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
178
179#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
180 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
181 struct drm_virtgpu_execbuffer)
182
183#define DRM_IOCTL_VIRTGPU_GETPARAM \
184 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
185 struct drm_virtgpu_getparam)
186
187#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \
188 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \
189 struct drm_virtgpu_resource_create)
190
191#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
192 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
193 struct drm_virtgpu_resource_info)
194
195#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
196 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
197 struct drm_virtgpu_3d_transfer_from_host)
198
199#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
200 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
201 struct drm_virtgpu_3d_transfer_to_host)
202
203#define DRM_IOCTL_VIRTGPU_WAIT \
204 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \
205 struct drm_virtgpu_3d_wait)
206
207#define DRM_IOCTL_VIRTGPU_GET_CAPS \
208 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
209 struct drm_virtgpu_get_caps)
210
211#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
212 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
213 struct drm_virtgpu_resource_create_blob)
214
215#if defined(__cplusplus)
216}
217#endif
218
219#endif