at v6.3 7.3 kB view raw
1/* 2 * Copyright 2013 Red Hat 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24#ifndef VIRTGPU_DRM_H 25#define VIRTGPU_DRM_H 26 27#include "drm.h" 28 29#if defined(__cplusplus) 30extern "C" { 31#endif 32 33/* Please note that modifications to all structs defined here are 34 * subject to backwards-compatibility constraints. 35 * 36 * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel 37 * compatibility Keep fields aligned to their size 38 */ 39 40#define DRM_VIRTGPU_MAP 0x01 41#define DRM_VIRTGPU_EXECBUFFER 0x02 42#define DRM_VIRTGPU_GETPARAM 0x03 43#define DRM_VIRTGPU_RESOURCE_CREATE 0x04 44#define DRM_VIRTGPU_RESOURCE_INFO 0x05 45#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06 46#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07 47#define DRM_VIRTGPU_WAIT 0x08 48#define DRM_VIRTGPU_GET_CAPS 0x09 49#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a 50#define DRM_VIRTGPU_CONTEXT_INIT 0x0b 51 52#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01 53#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02 54#define VIRTGPU_EXECBUF_RING_IDX 0x04 55#define VIRTGPU_EXECBUF_FLAGS (\ 56 VIRTGPU_EXECBUF_FENCE_FD_IN |\ 57 VIRTGPU_EXECBUF_FENCE_FD_OUT |\ 58 VIRTGPU_EXECBUF_RING_IDX |\ 59 0) 60 61struct drm_virtgpu_map { 62 __u64 offset; /* use for mmap system call */ 63 __u32 handle; 64 __u32 pad; 65}; 66 67/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */ 68struct drm_virtgpu_execbuffer { 69 __u32 flags; 70 __u32 size; 71 __u64 command; /* void* */ 72 __u64 bo_handles; 73 __u32 num_bo_handles; 74 __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */ 75 __u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */ 76 __u32 pad; 77}; 78 79#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 80#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */ 81#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */ 82#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */ 83#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */ 84#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */ 85#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */ 86 87struct drm_virtgpu_getparam { 88 __u64 param; 89 __u64 value; 90}; 91 92/* NO_BO flags? NO resource flag? */ 93/* resource flag for y_0_top */ 94struct drm_virtgpu_resource_create { 95 __u32 target; 96 __u32 format; 97 __u32 bind; 98 __u32 width; 99 __u32 height; 100 __u32 depth; 101 __u32 array_size; 102 __u32 last_level; 103 __u32 nr_samples; 104 __u32 flags; 105 __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ 106 __u32 res_handle; /* returned by kernel */ 107 __u32 size; /* validate transfer in the host */ 108 __u32 stride; /* validate transfer in the host */ 109}; 110 111struct drm_virtgpu_resource_info { 112 __u32 bo_handle; 113 __u32 res_handle; 114 __u32 size; 115 __u32 blob_mem; 116}; 117 118struct drm_virtgpu_3d_box { 119 __u32 x; 120 __u32 y; 121 __u32 z; 122 __u32 w; 123 __u32 h; 124 __u32 d; 125}; 126 127struct drm_virtgpu_3d_transfer_to_host { 128 __u32 bo_handle; 129 struct drm_virtgpu_3d_box box; 130 __u32 level; 131 __u32 offset; 132 __u32 stride; 133 __u32 layer_stride; 134}; 135 136struct drm_virtgpu_3d_transfer_from_host { 137 __u32 bo_handle; 138 struct drm_virtgpu_3d_box box; 139 __u32 level; 140 __u32 offset; 141 __u32 stride; 142 __u32 layer_stride; 143}; 144 145#define VIRTGPU_WAIT_NOWAIT 1 /* like it */ 146struct drm_virtgpu_3d_wait { 147 __u32 handle; /* 0 is an invalid handle */ 148 __u32 flags; 149}; 150 151struct drm_virtgpu_get_caps { 152 __u32 cap_set_id; 153 __u32 cap_set_ver; 154 __u64 addr; 155 __u32 size; 156 __u32 pad; 157}; 158 159struct drm_virtgpu_resource_create_blob { 160#define VIRTGPU_BLOB_MEM_GUEST 0x0001 161#define VIRTGPU_BLOB_MEM_HOST3D 0x0002 162#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003 163 164#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001 165#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002 166#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004 167 /* zero is invalid blob_mem */ 168 __u32 blob_mem; 169 __u32 blob_flags; 170 __u32 bo_handle; 171 __u32 res_handle; 172 __u64 size; 173 174 /* 175 * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and 176 * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero. 177 */ 178 __u32 pad; 179 __u32 cmd_size; 180 __u64 cmd; 181 __u64 blob_id; 182}; 183 184#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001 185#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002 186#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003 187struct drm_virtgpu_context_set_param { 188 __u64 param; 189 __u64 value; 190}; 191 192struct drm_virtgpu_context_init { 193 __u32 num_params; 194 __u32 pad; 195 196 /* pointer to drm_virtgpu_context_set_param array */ 197 __u64 ctx_set_params; 198}; 199 200/* 201 * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in 202 * effect. The event size is sizeof(drm_event), since there is no additional 203 * payload. 204 */ 205#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000 206 207#define DRM_IOCTL_VIRTGPU_MAP \ 208 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map) 209 210#define DRM_IOCTL_VIRTGPU_EXECBUFFER \ 211 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\ 212 struct drm_virtgpu_execbuffer) 213 214#define DRM_IOCTL_VIRTGPU_GETPARAM \ 215 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\ 216 struct drm_virtgpu_getparam) 217 218#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \ 219 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \ 220 struct drm_virtgpu_resource_create) 221 222#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \ 223 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \ 224 struct drm_virtgpu_resource_info) 225 226#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \ 227 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \ 228 struct drm_virtgpu_3d_transfer_from_host) 229 230#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \ 231 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \ 232 struct drm_virtgpu_3d_transfer_to_host) 233 234#define DRM_IOCTL_VIRTGPU_WAIT \ 235 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \ 236 struct drm_virtgpu_3d_wait) 237 238#define DRM_IOCTL_VIRTGPU_GET_CAPS \ 239 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \ 240 struct drm_virtgpu_get_caps) 241 242#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \ 243 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \ 244 struct drm_virtgpu_resource_create_blob) 245 246#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \ 247 DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \ 248 struct drm_virtgpu_context_init) 249 250#if defined(__cplusplus) 251} 252#endif 253 254#endif