Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

virtio-gpu: add 3d/virgl support

Add the bits needed for opengl rendering support: query
capabilities, new virtio commands, drm ioctls.

Signed-off-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>

+1368 -3
+2 -1
drivers/gpu/drm/virtio/Makefile
··· 6 6 7 7 virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \ 8 8 virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \ 9 - virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o 9 + virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \ 10 + virtgpu_ioctl.o 10 11 11 12 obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
+15
drivers/gpu/drm/virtio/virtgpu_drv.c
··· 73 73 }; 74 74 75 75 static unsigned int features[] = { 76 + #ifdef __LITTLE_ENDIAN 77 + /* 78 + * Gallium command stream send by virgl is native endian. 79 + * Because of that we only support little endian guests on 80 + * little endian hosts. 81 + */ 82 + VIRTIO_GPU_F_VIRGL, 83 + #endif 76 84 }; 77 85 static struct virtio_driver virtio_gpu_driver = { 78 86 .feature_table = features, ··· 122 114 .set_busid = drm_virtio_set_busid, 123 115 .load = virtio_gpu_driver_load, 124 116 .unload = virtio_gpu_driver_unload, 117 + .open = virtio_gpu_driver_open, 118 + .postclose = virtio_gpu_driver_postclose, 125 119 126 120 .dumb_create = virtio_gpu_mode_dumb_create, 127 121 .dumb_map_offset = virtio_gpu_mode_dumb_mmap, ··· 135 125 #endif 136 126 137 127 .gem_free_object = virtio_gpu_gem_free_object, 128 + .gem_open_object = virtio_gpu_gem_object_open, 129 + .gem_close_object = virtio_gpu_gem_object_close, 138 130 .fops = &virtio_gpu_driver_fops, 131 + 132 + .ioctls = virtio_gpu_ioctls, 133 + .num_ioctls = DRM_VIRTIO_NUM_IOCTLS, 139 134 140 135 .name = DRIVER_NAME, 141 136 .desc = DRIVER_DESC,
+60
drivers/gpu/drm/virtio/virtgpu_drv.h
··· 146 146 struct work_struct dequeue_work; 147 147 }; 148 148 149 + struct virtio_gpu_drv_capset { 150 + uint32_t id; 151 + uint32_t max_version; 152 + uint32_t max_size; 153 + }; 154 + 155 + struct virtio_gpu_drv_cap_cache { 156 + struct list_head head; 157 + void *caps_cache; 158 + uint32_t id; 159 + uint32_t version; 160 + uint32_t size; 161 + atomic_t is_valid; 162 + }; 163 + 149 164 struct virtio_gpu_device { 150 165 struct device *dev; 151 166 struct drm_device *ddev; ··· 194 179 struct idr ctx_id_idr; 195 180 spinlock_t ctx_id_idr_lock; 196 181 182 + bool has_virgl_3d; 183 + 197 184 struct work_struct config_changed_work; 185 + 186 + struct virtio_gpu_drv_capset *capsets; 187 + uint32_t num_capsets; 188 + struct list_head cap_cache; 198 189 }; 199 190 200 191 struct virtio_gpu_fpriv { ··· 214 193 /* virtio_kms.c */ 215 194 int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags); 216 195 int virtio_gpu_driver_unload(struct drm_device *dev); 196 + int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file); 197 + void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file); 217 198 218 199 /* virtio_gem.c */ 219 200 void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj); ··· 226 203 uint64_t size, 227 204 struct drm_gem_object **obj_p, 228 205 uint32_t *handle_p); 206 + int virtio_gpu_gem_object_open(struct drm_gem_object *obj, 207 + struct drm_file *file); 208 + void virtio_gpu_gem_object_close(struct drm_gem_object *obj, 209 + struct drm_file *file); 229 210 struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev, 230 211 size_t size, bool kernel, 231 212 bool pinned); ··· 287 260 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev); 288 261 void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev, 289 262 uint32_t resource_id); 263 + int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx); 264 + int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, 265 + int idx, int version, 266 + struct virtio_gpu_drv_cap_cache **cache_p); 267 + void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, 268 + uint32_t nlen, const char *name); 269 + void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, 270 + uint32_t id); 271 + void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, 272 + uint32_t ctx_id, 273 + uint32_t resource_id); 274 + void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, 275 + uint32_t ctx_id, 276 + uint32_t resource_id); 277 + void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, 278 + void *data, uint32_t data_size, 279 + uint32_t ctx_id, struct virtio_gpu_fence **fence); 280 + void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, 281 + uint32_t resource_id, uint32_t ctx_id, 282 + uint64_t offset, uint32_t level, 283 + struct virtio_gpu_box *box, 284 + struct virtio_gpu_fence **fence); 285 + void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, 286 + uint32_t resource_id, uint32_t ctx_id, 287 + uint64_t offset, uint32_t level, 288 + struct virtio_gpu_box *box, 289 + struct virtio_gpu_fence **fence); 290 + void 291 + virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, 292 + struct virtio_gpu_resource_create_3d *rc_3d, 293 + struct virtio_gpu_fence **fence); 290 294 void virtio_gpu_ctrl_ack(struct virtqueue *vq); 291 295 void virtio_gpu_cursor_ack(struct virtqueue *vq); 296 + void virtio_gpu_fence_ack(struct virtqueue *vq); 292 297 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work); 293 298 void virtio_gpu_dequeue_cursor_func(struct work_struct *work); 299 + void virtio_gpu_dequeue_fence_func(struct work_struct *work); 294 300 295 301 /* virtio_gpu_display.c */ 296 302 int virtio_gpu_framebuffer_init(struct drm_device *dev,
+41
drivers/gpu/drm/virtio/virtgpu_gem.c
··· 138 138 drm_gem_object_unreference_unlocked(gobj); 139 139 return 0; 140 140 } 141 + 142 + int virtio_gpu_gem_object_open(struct drm_gem_object *obj, 143 + struct drm_file *file) 144 + { 145 + struct virtio_gpu_device *vgdev = obj->dev->dev_private; 146 + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 147 + struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); 148 + int r; 149 + 150 + if (!vgdev->has_virgl_3d) 151 + return 0; 152 + 153 + r = virtio_gpu_object_reserve(qobj, false); 154 + if (r) 155 + return r; 156 + 157 + virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, 158 + qobj->hw_res_handle); 159 + virtio_gpu_object_unreserve(qobj); 160 + return 0; 161 + } 162 + 163 + void virtio_gpu_gem_object_close(struct drm_gem_object *obj, 164 + struct drm_file *file) 165 + { 166 + struct virtio_gpu_device *vgdev = obj->dev->dev_private; 167 + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 168 + struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); 169 + int r; 170 + 171 + if (!vgdev->has_virgl_3d) 172 + return; 173 + 174 + r = virtio_gpu_object_reserve(qobj, false); 175 + if (r) 176 + return; 177 + 178 + virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id, 179 + qobj->hw_res_handle); 180 + virtio_gpu_object_unreserve(qobj); 181 + }
+573
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 1 + /* 2 + * Copyright (C) 2015 Red Hat, Inc. 3 + * All Rights Reserved. 4 + * 5 + * Authors: 6 + * Dave Airlie 7 + * Alon Levy 8 + * 9 + * Permission is hereby granted, free of charge, to any person obtaining a 10 + * copy of this software and associated documentation files (the "Software"), 11 + * to deal in the Software without restriction, including without limitation 12 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 + * and/or sell copies of the Software, and to permit persons to whom the 14 + * Software is furnished to do so, subject to the following conditions: 15 + * 16 + * The above copyright notice and this permission notice shall be included in 17 + * all copies or substantial portions of the Software. 18 + * 19 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 + * OTHER DEALINGS IN THE SOFTWARE. 26 + */ 27 + 28 + #include <drm/drmP.h> 29 + #include "virtgpu_drv.h" 30 + #include <drm/virtgpu_drm.h> 31 + #include "ttm/ttm_execbuf_util.h" 32 + 33 + static void convert_to_hw_box(struct virtio_gpu_box *dst, 34 + const struct drm_virtgpu_3d_box *src) 35 + { 36 + dst->x = cpu_to_le32(src->x); 37 + dst->y = cpu_to_le32(src->y); 38 + dst->z = cpu_to_le32(src->z); 39 + dst->w = cpu_to_le32(src->w); 40 + dst->h = cpu_to_le32(src->h); 41 + dst->d = cpu_to_le32(src->d); 42 + } 43 + 44 + static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, 45 + struct drm_file *file_priv) 46 + { 47 + struct virtio_gpu_device *vgdev = dev->dev_private; 48 + struct drm_virtgpu_map *virtio_gpu_map = data; 49 + 50 + return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev, 51 + virtio_gpu_map->handle, 52 + &virtio_gpu_map->offset); 53 + } 54 + 55 + static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, 56 + struct list_head *head) 57 + { 58 + struct ttm_validate_buffer *buf; 59 + struct ttm_buffer_object *bo; 60 + struct virtio_gpu_object *qobj; 61 + int ret; 62 + 63 + ret = ttm_eu_reserve_buffers(ticket, head, true, NULL); 64 + if (ret != 0) 65 + return ret; 66 + 67 + list_for_each_entry(buf, head, head) { 68 + bo = buf->bo; 69 + qobj = container_of(bo, struct virtio_gpu_object, tbo); 70 + ret = ttm_bo_validate(bo, &qobj->placement, false, false); 71 + if (ret) { 72 + ttm_eu_backoff_reservation(ticket, head); 73 + return ret; 74 + } 75 + } 76 + return 0; 77 + } 78 + 79 + static void virtio_gpu_unref_list(struct list_head *head) 80 + { 81 + struct ttm_validate_buffer *buf; 82 + struct ttm_buffer_object *bo; 83 + struct virtio_gpu_object *qobj; 84 + list_for_each_entry(buf, head, head) { 85 + bo = buf->bo; 86 + qobj = container_of(bo, struct virtio_gpu_object, tbo); 87 + 88 + drm_gem_object_unreference_unlocked(&qobj->gem_base); 89 + } 90 + } 91 + 92 + static int virtio_gpu_execbuffer(struct drm_device *dev, 93 + struct drm_virtgpu_execbuffer *exbuf, 94 + struct drm_file *drm_file) 95 + { 96 + struct virtio_gpu_device *vgdev = dev->dev_private; 97 + struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; 98 + struct drm_gem_object *gobj; 99 + struct virtio_gpu_fence *fence; 100 + struct virtio_gpu_object *qobj; 101 + int ret; 102 + uint32_t *bo_handles = NULL; 103 + void __user *user_bo_handles = NULL; 104 + struct list_head validate_list; 105 + struct ttm_validate_buffer *buflist = NULL; 106 + int i; 107 + struct ww_acquire_ctx ticket; 108 + void *buf; 109 + 110 + if (vgdev->has_virgl_3d == false) 111 + return -ENOSYS; 112 + 113 + INIT_LIST_HEAD(&validate_list); 114 + if (exbuf->num_bo_handles) { 115 + 116 + bo_handles = drm_malloc_ab(exbuf->num_bo_handles, 117 + sizeof(uint32_t)); 118 + buflist = drm_calloc_large(exbuf->num_bo_handles, 119 + sizeof(struct ttm_validate_buffer)); 120 + if (!bo_handles || !buflist) { 121 + drm_free_large(bo_handles); 122 + drm_free_large(buflist); 123 + return -ENOMEM; 124 + } 125 + 126 + user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles; 127 + if (copy_from_user(bo_handles, user_bo_handles, 128 + exbuf->num_bo_handles * sizeof(uint32_t))) { 129 + ret = -EFAULT; 130 + drm_free_large(bo_handles); 131 + drm_free_large(buflist); 132 + return ret; 133 + } 134 + 135 + for (i = 0; i < exbuf->num_bo_handles; i++) { 136 + gobj = drm_gem_object_lookup(dev, 137 + drm_file, bo_handles[i]); 138 + if (!gobj) { 139 + drm_free_large(bo_handles); 140 + drm_free_large(buflist); 141 + return -ENOENT; 142 + } 143 + 144 + qobj = gem_to_virtio_gpu_obj(gobj); 145 + buflist[i].bo = &qobj->tbo; 146 + 147 + list_add(&buflist[i].head, &validate_list); 148 + } 149 + drm_free_large(bo_handles); 150 + } 151 + 152 + ret = virtio_gpu_object_list_validate(&ticket, &validate_list); 153 + if (ret) 154 + goto out_free; 155 + 156 + buf = kmalloc(exbuf->size, GFP_KERNEL); 157 + if (!buf) { 158 + ret = -ENOMEM; 159 + goto out_unresv; 160 + } 161 + if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command, 162 + exbuf->size)) { 163 + kfree(buf); 164 + ret = -EFAULT; 165 + goto out_unresv; 166 + } 167 + virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, 168 + vfpriv->ctx_id, &fence); 169 + 170 + ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); 171 + 172 + /* fence the command bo */ 173 + virtio_gpu_unref_list(&validate_list); 174 + drm_free_large(buflist); 175 + fence_put(&fence->f); 176 + return 0; 177 + 178 + out_unresv: 179 + ttm_eu_backoff_reservation(&ticket, &validate_list); 180 + out_free: 181 + virtio_gpu_unref_list(&validate_list); 182 + drm_free_large(buflist); 183 + return ret; 184 + } 185 + 186 + /* 187 + * Usage of execbuffer: 188 + * Relocations need to take into account the full VIRTIO_GPUDrawable size. 189 + * However, the command as passed from user space must *not* contain the initial 190 + * VIRTIO_GPUReleaseInfo struct (first XXX bytes) 191 + */ 192 + static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, 193 + struct drm_file *file_priv) 194 + { 195 + struct drm_virtgpu_execbuffer *execbuffer = data; 196 + return virtio_gpu_execbuffer(dev, execbuffer, file_priv); 197 + } 198 + 199 + 200 + static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, 201 + struct drm_file *file_priv) 202 + { 203 + struct virtio_gpu_device *vgdev = dev->dev_private; 204 + struct drm_virtgpu_getparam *param = data; 205 + int value; 206 + 207 + switch (param->param) { 208 + case VIRTGPU_PARAM_3D_FEATURES: 209 + value = vgdev->has_virgl_3d == true ? 1 : 0; 210 + break; 211 + default: 212 + return -EINVAL; 213 + } 214 + if (copy_to_user((void __user *)(unsigned long)param->value, 215 + &value, sizeof(int))) { 216 + return -EFAULT; 217 + } 218 + return 0; 219 + } 220 + 221 + static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, 222 + struct drm_file *file_priv) 223 + { 224 + struct virtio_gpu_device *vgdev = dev->dev_private; 225 + struct drm_virtgpu_resource_create *rc = data; 226 + int ret; 227 + uint32_t res_id; 228 + struct virtio_gpu_object *qobj; 229 + struct drm_gem_object *obj; 230 + uint32_t handle = 0; 231 + uint32_t size; 232 + struct list_head validate_list; 233 + struct ttm_validate_buffer mainbuf; 234 + struct virtio_gpu_fence *fence = NULL; 235 + struct ww_acquire_ctx ticket; 236 + struct virtio_gpu_resource_create_3d rc_3d; 237 + 238 + if (vgdev->has_virgl_3d == false) { 239 + if (rc->depth > 1) 240 + return -EINVAL; 241 + if (rc->nr_samples > 1) 242 + return -EINVAL; 243 + if (rc->last_level > 1) 244 + return -EINVAL; 245 + if (rc->target != 2) 246 + return -EINVAL; 247 + if (rc->array_size > 1) 248 + return -EINVAL; 249 + } 250 + 251 + INIT_LIST_HEAD(&validate_list); 252 + memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer)); 253 + 254 + virtio_gpu_resource_id_get(vgdev, &res_id); 255 + 256 + size = rc->size; 257 + 258 + /* allocate a single page size object */ 259 + if (size == 0) 260 + size = PAGE_SIZE; 261 + 262 + qobj = virtio_gpu_alloc_object(dev, size, false, false); 263 + if (IS_ERR(qobj)) { 264 + ret = PTR_ERR(qobj); 265 + goto fail_id; 266 + } 267 + obj = &qobj->gem_base; 268 + 269 + if (!vgdev->has_virgl_3d) { 270 + virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format, 271 + rc->width, rc->height); 272 + 273 + ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL); 274 + } else { 275 + /* use a gem reference since unref list undoes them */ 276 + drm_gem_object_reference(&qobj->gem_base); 277 + mainbuf.bo = &qobj->tbo; 278 + list_add(&mainbuf.head, &validate_list); 279 + 280 + ret = virtio_gpu_object_list_validate(&ticket, &validate_list); 281 + if (ret) { 282 + DRM_DEBUG("failed to validate\n"); 283 + goto fail_unref; 284 + } 285 + 286 + rc_3d.resource_id = cpu_to_le32(res_id); 287 + rc_3d.target = cpu_to_le32(rc->target); 288 + rc_3d.format = cpu_to_le32(rc->format); 289 + rc_3d.bind = cpu_to_le32(rc->bind); 290 + rc_3d.width = cpu_to_le32(rc->width); 291 + rc_3d.height = cpu_to_le32(rc->height); 292 + rc_3d.depth = cpu_to_le32(rc->depth); 293 + rc_3d.array_size = cpu_to_le32(rc->array_size); 294 + rc_3d.last_level = cpu_to_le32(rc->last_level); 295 + rc_3d.nr_samples = cpu_to_le32(rc->nr_samples); 296 + rc_3d.flags = cpu_to_le32(rc->flags); 297 + 298 + virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL); 299 + ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence); 300 + if (ret) { 301 + ttm_eu_backoff_reservation(&ticket, &validate_list); 302 + goto fail_unref; 303 + } 304 + ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); 305 + } 306 + 307 + qobj->hw_res_handle = res_id; 308 + 309 + ret = drm_gem_handle_create(file_priv, obj, &handle); 310 + if (ret) { 311 + 312 + drm_gem_object_release(obj); 313 + if (vgdev->has_virgl_3d) { 314 + virtio_gpu_unref_list(&validate_list); 315 + fence_put(&fence->f); 316 + } 317 + return ret; 318 + } 319 + drm_gem_object_unreference_unlocked(obj); 320 + 321 + rc->res_handle = res_id; /* similiar to a VM address */ 322 + rc->bo_handle = handle; 323 + 324 + if (vgdev->has_virgl_3d) { 325 + virtio_gpu_unref_list(&validate_list); 326 + fence_put(&fence->f); 327 + } 328 + return 0; 329 + fail_unref: 330 + if (vgdev->has_virgl_3d) { 331 + virtio_gpu_unref_list(&validate_list); 332 + fence_put(&fence->f); 333 + } 334 + //fail_obj: 335 + // drm_gem_object_handle_unreference_unlocked(obj); 336 + fail_id: 337 + virtio_gpu_resource_id_put(vgdev, res_id); 338 + return ret; 339 + } 340 + 341 + static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, 342 + struct drm_file *file_priv) 343 + { 344 + struct drm_virtgpu_resource_info *ri = data; 345 + struct drm_gem_object *gobj = NULL; 346 + struct virtio_gpu_object *qobj = NULL; 347 + 348 + gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle); 349 + if (gobj == NULL) 350 + return -ENOENT; 351 + 352 + qobj = gem_to_virtio_gpu_obj(gobj); 353 + 354 + ri->size = qobj->gem_base.size; 355 + ri->res_handle = qobj->hw_res_handle; 356 + drm_gem_object_unreference_unlocked(gobj); 357 + return 0; 358 + } 359 + 360 + static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, 361 + void *data, 362 + struct drm_file *file) 363 + { 364 + struct virtio_gpu_device *vgdev = dev->dev_private; 365 + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 366 + struct drm_virtgpu_3d_transfer_from_host *args = data; 367 + struct drm_gem_object *gobj = NULL; 368 + struct virtio_gpu_object *qobj = NULL; 369 + struct virtio_gpu_fence *fence; 370 + int ret; 371 + u32 offset = args->offset; 372 + struct virtio_gpu_box box; 373 + 374 + if (vgdev->has_virgl_3d == false) 375 + return -ENOSYS; 376 + 377 + gobj = drm_gem_object_lookup(dev, file, args->bo_handle); 378 + if (gobj == NULL) 379 + return -ENOENT; 380 + 381 + qobj = gem_to_virtio_gpu_obj(gobj); 382 + 383 + ret = virtio_gpu_object_reserve(qobj, false); 384 + if (ret) 385 + goto out; 386 + 387 + ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 388 + true, false); 389 + if (unlikely(ret)) 390 + goto out_unres; 391 + 392 + convert_to_hw_box(&box, &args->box); 393 + virtio_gpu_cmd_transfer_from_host_3d 394 + (vgdev, qobj->hw_res_handle, 395 + vfpriv->ctx_id, offset, args->level, 396 + &box, &fence); 397 + reservation_object_add_excl_fence(qobj->tbo.resv, 398 + &fence->f); 399 + 400 + fence_put(&fence->f); 401 + out_unres: 402 + virtio_gpu_object_unreserve(qobj); 403 + out: 404 + drm_gem_object_unreference_unlocked(gobj); 405 + return ret; 406 + } 407 + 408 + static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, 409 + struct drm_file *file) 410 + { 411 + struct virtio_gpu_device *vgdev = dev->dev_private; 412 + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 413 + struct drm_virtgpu_3d_transfer_to_host *args = data; 414 + struct drm_gem_object *gobj = NULL; 415 + struct virtio_gpu_object *qobj = NULL; 416 + struct virtio_gpu_fence *fence; 417 + struct virtio_gpu_box box; 418 + int ret; 419 + u32 offset = args->offset; 420 + 421 + gobj = drm_gem_object_lookup(dev, file, args->bo_handle); 422 + if (gobj == NULL) 423 + return -ENOENT; 424 + 425 + qobj = gem_to_virtio_gpu_obj(gobj); 426 + 427 + ret = virtio_gpu_object_reserve(qobj, false); 428 + if (ret) 429 + goto out; 430 + 431 + ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 432 + true, false); 433 + if (unlikely(ret)) 434 + goto out_unres; 435 + 436 + convert_to_hw_box(&box, &args->box); 437 + if (!vgdev->has_virgl_3d) { 438 + virtio_gpu_cmd_transfer_to_host_2d 439 + (vgdev, qobj->hw_res_handle, offset, 440 + box.w, box.h, box.x, box.y, NULL); 441 + } else { 442 + virtio_gpu_cmd_transfer_to_host_3d 443 + (vgdev, qobj->hw_res_handle, 444 + vfpriv ? vfpriv->ctx_id : 0, offset, 445 + args->level, &box, &fence); 446 + reservation_object_add_excl_fence(qobj->tbo.resv, 447 + &fence->f); 448 + fence_put(&fence->f); 449 + } 450 + 451 + out_unres: 452 + virtio_gpu_object_unreserve(qobj); 453 + out: 454 + drm_gem_object_unreference_unlocked(gobj); 455 + return ret; 456 + } 457 + 458 + static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, 459 + struct drm_file *file) 460 + { 461 + struct drm_virtgpu_3d_wait *args = data; 462 + struct drm_gem_object *gobj = NULL; 463 + struct virtio_gpu_object *qobj = NULL; 464 + int ret; 465 + bool nowait = false; 466 + 467 + gobj = drm_gem_object_lookup(dev, file, args->handle); 468 + if (gobj == NULL) 469 + return -ENOENT; 470 + 471 + qobj = gem_to_virtio_gpu_obj(gobj); 472 + 473 + if (args->flags & VIRTGPU_WAIT_NOWAIT) 474 + nowait = true; 475 + ret = virtio_gpu_object_wait(qobj, nowait); 476 + 477 + drm_gem_object_unreference_unlocked(gobj); 478 + return ret; 479 + } 480 + 481 + static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, 482 + void *data, struct drm_file *file) 483 + { 484 + struct virtio_gpu_device *vgdev = dev->dev_private; 485 + struct drm_virtgpu_get_caps *args = data; 486 + int size; 487 + int i; 488 + int found_valid = -1; 489 + int ret; 490 + struct virtio_gpu_drv_cap_cache *cache_ent; 491 + void *ptr; 492 + if (vgdev->num_capsets == 0) 493 + return -ENOSYS; 494 + 495 + spin_lock(&vgdev->display_info_lock); 496 + for (i = 0; i < vgdev->num_capsets; i++) { 497 + if (vgdev->capsets[i].id == args->cap_set_id) { 498 + if (vgdev->capsets[i].max_version >= args->cap_set_ver) { 499 + found_valid = i; 500 + break; 501 + } 502 + } 503 + } 504 + 505 + if (found_valid == -1) { 506 + spin_unlock(&vgdev->display_info_lock); 507 + return -EINVAL; 508 + } 509 + 510 + size = vgdev->capsets[found_valid].max_size; 511 + if (args->size > size) { 512 + spin_unlock(&vgdev->display_info_lock); 513 + return -EINVAL; 514 + } 515 + 516 + list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { 517 + if (cache_ent->id == args->cap_set_id && 518 + cache_ent->version == args->cap_set_ver) { 519 + ptr = cache_ent->caps_cache; 520 + spin_unlock(&vgdev->display_info_lock); 521 + goto copy_exit; 522 + } 523 + } 524 + spin_unlock(&vgdev->display_info_lock); 525 + 526 + /* not in cache - need to talk to hw */ 527 + virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver, 528 + &cache_ent); 529 + 530 + ret = wait_event_timeout(vgdev->resp_wq, 531 + atomic_read(&cache_ent->is_valid), 5 * HZ); 532 + 533 + ptr = cache_ent->caps_cache; 534 + 535 + copy_exit: 536 + if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size)) 537 + return -EFAULT; 538 + 539 + return 0; 540 + } 541 + 542 + struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { 543 + DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl, 544 + DRM_AUTH|DRM_UNLOCKED), 545 + 546 + DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl, 547 + DRM_AUTH|DRM_UNLOCKED), 548 + 549 + DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl, 550 + DRM_AUTH|DRM_UNLOCKED), 551 + 552 + DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE, 553 + virtio_gpu_resource_create_ioctl, 554 + DRM_AUTH|DRM_UNLOCKED), 555 + 556 + DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl, 557 + DRM_AUTH|DRM_UNLOCKED), 558 + 559 + /* make transfer async to the main ring? - no sure, can we 560 + thread these in the underlying GL */ 561 + DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST, 562 + virtio_gpu_transfer_from_host_ioctl, 563 + DRM_AUTH|DRM_UNLOCKED), 564 + DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST, 565 + virtio_gpu_transfer_to_host_ioctl, 566 + DRM_AUTH|DRM_UNLOCKED), 567 + 568 + DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl, 569 + DRM_AUTH|DRM_UNLOCKED), 570 + 571 + DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl, 572 + DRM_AUTH|DRM_UNLOCKED), 573 + };
+132 -1
drivers/gpu/drm/virtio/virtgpu_kms.c
··· 52 52 events_clear, &events_clear); 53 53 } 54 54 55 + static void virtio_gpu_ctx_id_get(struct virtio_gpu_device *vgdev, 56 + uint32_t *resid) 57 + { 58 + int handle; 59 + 60 + idr_preload(GFP_KERNEL); 61 + spin_lock(&vgdev->ctx_id_idr_lock); 62 + handle = idr_alloc(&vgdev->ctx_id_idr, NULL, 1, 0, 0); 63 + spin_unlock(&vgdev->ctx_id_idr_lock); 64 + idr_preload_end(); 65 + *resid = handle; 66 + } 67 + 68 + static void virtio_gpu_ctx_id_put(struct virtio_gpu_device *vgdev, uint32_t id) 69 + { 70 + spin_lock(&vgdev->ctx_id_idr_lock); 71 + idr_remove(&vgdev->ctx_id_idr, id); 72 + spin_unlock(&vgdev->ctx_id_idr_lock); 73 + } 74 + 75 + static void virtio_gpu_context_create(struct virtio_gpu_device *vgdev, 76 + uint32_t nlen, const char *name, 77 + uint32_t *ctx_id) 78 + { 79 + virtio_gpu_ctx_id_get(vgdev, ctx_id); 80 + virtio_gpu_cmd_context_create(vgdev, *ctx_id, nlen, name); 81 + } 82 + 83 + static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev, 84 + uint32_t ctx_id) 85 + { 86 + virtio_gpu_cmd_context_destroy(vgdev, ctx_id); 87 + virtio_gpu_ctx_id_put(vgdev, ctx_id); 88 + } 89 + 55 90 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, 56 91 void (*work_func)(struct work_struct *work)) 57 92 { 58 93 spin_lock_init(&vgvq->qlock); 59 94 init_waitqueue_head(&vgvq->ack_queue); 60 95 INIT_WORK(&vgvq->dequeue_work, work_func); 96 + } 97 + 98 + static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, 99 + int num_capsets) 100 + { 101 + int i, ret; 102 + 103 + vgdev->capsets = kcalloc(num_capsets, 104 + sizeof(struct virtio_gpu_drv_capset), 105 + GFP_KERNEL); 106 + if (!vgdev->capsets) { 107 + DRM_ERROR("failed to allocate cap sets\n"); 108 + return; 109 + } 110 + for (i = 0; i < num_capsets; i++) { 111 + virtio_gpu_cmd_get_capset_info(vgdev, i); 112 + ret = wait_event_timeout(vgdev->resp_wq, 113 + vgdev->capsets[i].id > 0, 5 * HZ); 114 + if (ret == 0) { 115 + DRM_ERROR("timed out waiting for cap set %d\n", i); 116 + kfree(vgdev->capsets); 117 + vgdev->capsets = NULL; 118 + return; 119 + } 120 + DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", 121 + i, vgdev->capsets[i].id, 122 + vgdev->capsets[i].max_version, 123 + vgdev->capsets[i].max_size); 124 + } 125 + vgdev->num_capsets = num_capsets; 61 126 } 62 127 63 128 int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) ··· 135 70 struct virtio_gpu_device *vgdev; 136 71 /* this will expand later */ 137 72 struct virtqueue *vqs[2]; 138 - u32 num_scanouts; 73 + u32 num_scanouts, num_capsets; 139 74 int ret; 140 75 141 76 if (!virtio_has_feature(dev->virtdev, VIRTIO_F_VERSION_1)) ··· 161 96 162 97 spin_lock_init(&vgdev->fence_drv.lock); 163 98 INIT_LIST_HEAD(&vgdev->fence_drv.fences); 99 + INIT_LIST_HEAD(&vgdev->cap_cache); 164 100 INIT_WORK(&vgdev->config_changed_work, 165 101 virtio_gpu_config_changed_work_func); 102 + 103 + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) 104 + vgdev->has_virgl_3d = true; 105 + DRM_INFO("virgl 3d acceleration %s\n", 106 + vgdev->has_virgl_3d ? "enabled" : "not available"); 166 107 167 108 ret = vgdev->vdev->config->find_vqs(vgdev->vdev, 2, vqs, 168 109 callbacks, names); ··· 200 129 ret = -EINVAL; 201 130 goto err_scanouts; 202 131 } 132 + DRM_INFO("number of scanouts: %d\n", num_scanouts); 133 + 134 + virtio_cread(vgdev->vdev, struct virtio_gpu_config, 135 + num_capsets, &num_capsets); 136 + DRM_INFO("number of cap sets: %d\n", num_capsets); 203 137 204 138 ret = virtio_gpu_modeset_init(vgdev); 205 139 if (ret) ··· 213 137 virtio_device_ready(vgdev->vdev); 214 138 vgdev->vqs_ready = true; 215 139 140 + if (num_capsets) 141 + virtio_gpu_get_capsets(vgdev, num_capsets); 216 142 virtio_gpu_cmd_get_display_info(vgdev); 217 143 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, 218 144 5 * HZ); ··· 235 157 return ret; 236 158 } 237 159 160 + static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev) 161 + { 162 + struct virtio_gpu_drv_cap_cache *cache_ent, *tmp; 163 + 164 + list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) { 165 + kfree(cache_ent->caps_cache); 166 + kfree(cache_ent); 167 + } 168 + } 169 + 238 170 int virtio_gpu_driver_unload(struct drm_device *dev) 239 171 { 240 172 struct virtio_gpu_device *vgdev = dev->dev_private; ··· 258 170 virtio_gpu_modeset_fini(vgdev); 259 171 virtio_gpu_ttm_fini(vgdev); 260 172 virtio_gpu_free_vbufs(vgdev); 173 + virtio_gpu_cleanup_cap_cache(vgdev); 174 + kfree(vgdev->capsets); 261 175 kfree(vgdev); 262 176 return 0; 177 + } 178 + 179 + int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) 180 + { 181 + struct virtio_gpu_device *vgdev = dev->dev_private; 182 + struct virtio_gpu_fpriv *vfpriv; 183 + uint32_t id; 184 + char dbgname[64], tmpname[TASK_COMM_LEN]; 185 + 186 + /* can't create contexts without 3d renderer */ 187 + if (!vgdev->has_virgl_3d) 188 + return 0; 189 + 190 + get_task_comm(tmpname, current); 191 + snprintf(dbgname, sizeof(dbgname), "%s", tmpname); 192 + dbgname[63] = 0; 193 + /* allocate a virt GPU context for this opener */ 194 + vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL); 195 + if (!vfpriv) 196 + return -ENOMEM; 197 + 198 + virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id); 199 + 200 + vfpriv->ctx_id = id; 201 + file->driver_priv = vfpriv; 202 + return 0; 203 + } 204 + 205 + void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) 206 + { 207 + struct virtio_gpu_device *vgdev = dev->dev_private; 208 + struct virtio_gpu_fpriv *vfpriv; 209 + 210 + if (!vgdev->has_virgl_3d) 211 + return; 212 + 213 + vfpriv = file->driver_priv; 214 + 215 + virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id); 216 + kfree(vfpriv); 217 + file->driver_priv = NULL; 263 218 }
+1
drivers/gpu/drm/virtio/virtgpu_ttm.c
··· 32 32 #include <ttm/ttm_module.h> 33 33 #include <drm/drmP.h> 34 34 #include <drm/drm.h> 35 + #include <drm/virtgpu_drm.h> 35 36 #include "virtgpu_drv.h" 36 37 37 38 #include <linux/delay.h>
+265
drivers/gpu/drm/virtio/virtgpu_vq.c
··· 586 586 drm_kms_helper_hotplug_event(vgdev->ddev); 587 587 } 588 588 589 + static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, 590 + struct virtio_gpu_vbuffer *vbuf) 591 + { 592 + struct virtio_gpu_get_capset_info *cmd = 593 + (struct virtio_gpu_get_capset_info *)vbuf->buf; 594 + struct virtio_gpu_resp_capset_info *resp = 595 + (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; 596 + int i = le32_to_cpu(cmd->capset_index); 597 + 598 + spin_lock(&vgdev->display_info_lock); 599 + vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); 600 + vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); 601 + vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); 602 + spin_unlock(&vgdev->display_info_lock); 603 + wake_up(&vgdev->resp_wq); 604 + } 605 + 606 + static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, 607 + struct virtio_gpu_vbuffer *vbuf) 608 + { 609 + struct virtio_gpu_get_capset *cmd = 610 + (struct virtio_gpu_get_capset *)vbuf->buf; 611 + struct virtio_gpu_resp_capset *resp = 612 + (struct virtio_gpu_resp_capset *)vbuf->resp_buf; 613 + struct virtio_gpu_drv_cap_cache *cache_ent; 614 + 615 + spin_lock(&vgdev->display_info_lock); 616 + list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { 617 + if (cache_ent->version == le32_to_cpu(cmd->capset_version) && 618 + cache_ent->id == le32_to_cpu(cmd->capset_id)) { 619 + memcpy(cache_ent->caps_cache, resp->capset_data, 620 + cache_ent->size); 621 + atomic_set(&cache_ent->is_valid, 1); 622 + break; 623 + } 624 + } 625 + spin_unlock(&vgdev->display_info_lock); 626 + wake_up(&vgdev->resp_wq); 627 + } 628 + 629 + 589 630 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) 590 631 { 591 632 struct virtio_gpu_ctrl_hdr *cmd_p; ··· 648 607 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); 649 608 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 650 609 return 0; 610 + } 611 + 612 + int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) 613 + { 614 + struct virtio_gpu_get_capset_info *cmd_p; 615 + struct virtio_gpu_vbuffer *vbuf; 616 + void *resp_buf; 617 + 618 + resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info), 619 + GFP_KERNEL); 620 + if (!resp_buf) 621 + return -ENOMEM; 622 + 623 + cmd_p = virtio_gpu_alloc_cmd_resp 624 + (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf, 625 + sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info), 626 + resp_buf); 627 + memset(cmd_p, 0, sizeof(*cmd_p)); 628 + 629 + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); 630 + cmd_p->capset_index = cpu_to_le32(idx); 631 + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 632 + return 0; 633 + } 634 + 635 + int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, 636 + int idx, int version, 637 + struct virtio_gpu_drv_cap_cache **cache_p) 638 + { 639 + struct virtio_gpu_get_capset *cmd_p; 640 + struct virtio_gpu_vbuffer *vbuf; 641 + int max_size = vgdev->capsets[idx].max_size; 642 + struct virtio_gpu_drv_cap_cache *cache_ent; 643 + void *resp_buf; 644 + 645 + if (idx > vgdev->num_capsets) 646 + return -EINVAL; 647 + 648 + if (version > vgdev->capsets[idx].max_version) 649 + return -EINVAL; 650 + 651 + cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL); 652 + if (!cache_ent) 653 + return -ENOMEM; 654 + 655 + cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); 656 + if (!cache_ent->caps_cache) { 657 + kfree(cache_ent); 658 + return -ENOMEM; 659 + } 660 + 661 + resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size, 662 + GFP_KERNEL); 663 + if (!resp_buf) { 664 + kfree(cache_ent->caps_cache); 665 + kfree(cache_ent); 666 + return -ENOMEM; 667 + } 668 + 669 + cache_ent->version = version; 670 + cache_ent->id = vgdev->capsets[idx].id; 671 + atomic_set(&cache_ent->is_valid, 0); 672 + cache_ent->size = max_size; 673 + spin_lock(&vgdev->display_info_lock); 674 + list_add_tail(&cache_ent->head, &vgdev->cap_cache); 675 + spin_unlock(&vgdev->display_info_lock); 676 + 677 + cmd_p = virtio_gpu_alloc_cmd_resp 678 + (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), 679 + sizeof(struct virtio_gpu_resp_capset) + max_size, 680 + resp_buf); 681 + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); 682 + cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); 683 + cmd_p->capset_version = cpu_to_le32(version); 684 + *cache_p = cache_ent; 685 + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 686 + 687 + return 0; 688 + } 689 + 690 + void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, 691 + uint32_t nlen, const char *name) 692 + { 693 + struct virtio_gpu_ctx_create *cmd_p; 694 + struct virtio_gpu_vbuffer *vbuf; 695 + 696 + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 697 + memset(cmd_p, 0, sizeof(*cmd_p)); 698 + 699 + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); 700 + cmd_p->hdr.ctx_id = cpu_to_le32(id); 701 + cmd_p->nlen = cpu_to_le32(nlen); 702 + strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name)-1); 703 + cmd_p->debug_name[sizeof(cmd_p->debug_name)-1] = 0; 704 + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 705 + } 706 + 707 + void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, 708 + uint32_t id) 709 + { 710 + struct virtio_gpu_ctx_destroy *cmd_p; 711 + struct virtio_gpu_vbuffer *vbuf; 712 + 713 + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 714 + memset(cmd_p, 0, sizeof(*cmd_p)); 715 + 716 + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); 717 + cmd_p->hdr.ctx_id = cpu_to_le32(id); 718 + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 719 + } 720 + 721 + void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, 722 + uint32_t ctx_id, 723 + uint32_t resource_id) 724 + { 725 + struct virtio_gpu_ctx_resource *cmd_p; 726 + struct virtio_gpu_vbuffer *vbuf; 727 + 728 + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 729 + memset(cmd_p, 0, sizeof(*cmd_p)); 730 + 731 + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); 732 + cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 733 + cmd_p->resource_id = cpu_to_le32(resource_id); 734 + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 735 + 736 + } 737 + 738 + void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, 739 + uint32_t ctx_id, 740 + uint32_t resource_id) 741 + { 742 + struct virtio_gpu_ctx_resource *cmd_p; 743 + struct virtio_gpu_vbuffer *vbuf; 744 + 745 + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 746 + memset(cmd_p, 0, sizeof(*cmd_p)); 747 + 748 + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); 749 + cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 750 + cmd_p->resource_id = cpu_to_le32(resource_id); 751 + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 752 + } 753 + 754 + void 755 + virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, 756 + struct virtio_gpu_resource_create_3d *rc_3d, 757 + struct virtio_gpu_fence **fence) 758 + { 759 + struct virtio_gpu_resource_create_3d *cmd_p; 760 + struct virtio_gpu_vbuffer *vbuf; 761 + 762 + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 763 + memset(cmd_p, 0, sizeof(*cmd_p)); 764 + 765 + *cmd_p = *rc_3d; 766 + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); 767 + cmd_p->hdr.flags = 0; 768 + 769 + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 770 + } 771 + 772 + void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, 773 + uint32_t resource_id, uint32_t ctx_id, 774 + uint64_t offset, uint32_t level, 775 + struct virtio_gpu_box *box, 776 + struct virtio_gpu_fence **fence) 777 + { 778 + struct virtio_gpu_transfer_host_3d *cmd_p; 779 + struct virtio_gpu_vbuffer *vbuf; 780 + 781 + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 782 + memset(cmd_p, 0, sizeof(*cmd_p)); 783 + 784 + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); 785 + cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 786 + cmd_p->resource_id = cpu_to_le32(resource_id); 787 + cmd_p->box = *box; 788 + cmd_p->offset = cpu_to_le64(offset); 789 + cmd_p->level = cpu_to_le32(level); 790 + 791 + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 792 + } 793 + 794 + void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, 795 + uint32_t resource_id, uint32_t ctx_id, 796 + uint64_t offset, uint32_t level, 797 + struct virtio_gpu_box *box, 798 + struct virtio_gpu_fence **fence) 799 + { 800 + struct virtio_gpu_transfer_host_3d *cmd_p; 801 + struct virtio_gpu_vbuffer *vbuf; 802 + 803 + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 804 + memset(cmd_p, 0, sizeof(*cmd_p)); 805 + 806 + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); 807 + cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 808 + cmd_p->resource_id = cpu_to_le32(resource_id); 809 + cmd_p->box = *box; 810 + cmd_p->offset = cpu_to_le64(offset); 811 + cmd_p->level = cpu_to_le32(level); 812 + 813 + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 814 + } 815 + 816 + void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, 817 + void *data, uint32_t data_size, 818 + uint32_t ctx_id, struct virtio_gpu_fence **fence) 819 + { 820 + struct virtio_gpu_cmd_submit *cmd_p; 821 + struct virtio_gpu_vbuffer *vbuf; 822 + 823 + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); 824 + memset(cmd_p, 0, sizeof(*cmd_p)); 825 + 826 + vbuf->data_buf = data; 827 + vbuf->data_size = data_size; 828 + 829 + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); 830 + cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); 831 + cmd_p->size = cpu_to_le32(data_size); 832 + 833 + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); 651 834 } 652 835 653 836 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+1
include/uapi/drm/Kbuild
··· 17 17 header-y += via_drm.h 18 18 header-y += vmwgfx_drm.h 19 19 header-y += msm_drm.h 20 + header-y += virtgpu_drm.h
+167
include/uapi/drm/virtgpu_drm.h
··· 1 + /* 2 + * Copyright 2013 Red Hat 3 + * All Rights Reserved. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice (including the next 13 + * paragraph) shall be included in all copies or substantial portions of the 14 + * Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 + * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 + * OTHER DEALINGS IN THE SOFTWARE. 23 + */ 24 + #ifndef VIRTGPU_DRM_H 25 + #define VIRTGPU_DRM_H 26 + 27 + #include <stddef.h> 28 + #include "drm/drm.h" 29 + 30 + /* Please note that modifications to all structs defined here are 31 + * subject to backwards-compatibility constraints. 32 + * 33 + * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel 34 + * compatibility Keep fields aligned to their size 35 + */ 36 + 37 + #define DRM_VIRTGPU_MAP 0x01 38 + #define DRM_VIRTGPU_EXECBUFFER 0x02 39 + #define DRM_VIRTGPU_GETPARAM 0x03 40 + #define DRM_VIRTGPU_RESOURCE_CREATE 0x04 41 + #define DRM_VIRTGPU_RESOURCE_INFO 0x05 42 + #define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06 43 + #define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07 44 + #define DRM_VIRTGPU_WAIT 0x08 45 + #define DRM_VIRTGPU_GET_CAPS 0x09 46 + 47 + struct drm_virtgpu_map { 48 + uint64_t offset; /* use for mmap system call */ 49 + uint32_t handle; 50 + uint32_t pad; 51 + }; 52 + 53 + struct drm_virtgpu_execbuffer { 54 + uint32_t flags; /* for future use */ 55 + uint32_t size; 56 + uint64_t command; /* void* */ 57 + uint64_t bo_handles; 58 + uint32_t num_bo_handles; 59 + uint32_t pad; 60 + }; 61 + 62 + #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ 63 + 64 + struct drm_virtgpu_getparam { 65 + uint64_t param; 66 + uint64_t value; 67 + }; 68 + 69 + /* NO_BO flags? NO resource flag? */ 70 + /* resource flag for y_0_top */ 71 + struct drm_virtgpu_resource_create { 72 + uint32_t target; 73 + uint32_t format; 74 + uint32_t bind; 75 + uint32_t width; 76 + uint32_t height; 77 + uint32_t depth; 78 + uint32_t array_size; 79 + uint32_t last_level; 80 + uint32_t nr_samples; 81 + uint32_t flags; 82 + uint32_t bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ 83 + uint32_t res_handle; /* returned by kernel */ 84 + uint32_t size; /* validate transfer in the host */ 85 + uint32_t stride; /* validate transfer in the host */ 86 + }; 87 + 88 + struct drm_virtgpu_resource_info { 89 + uint32_t bo_handle; 90 + uint32_t res_handle; 91 + uint32_t size; 92 + uint32_t stride; 93 + }; 94 + 95 + struct drm_virtgpu_3d_box { 96 + uint32_t x; 97 + uint32_t y; 98 + uint32_t z; 99 + uint32_t w; 100 + uint32_t h; 101 + uint32_t d; 102 + }; 103 + 104 + struct drm_virtgpu_3d_transfer_to_host { 105 + uint32_t bo_handle; 106 + struct drm_virtgpu_3d_box box; 107 + uint32_t level; 108 + uint32_t offset; 109 + }; 110 + 111 + struct drm_virtgpu_3d_transfer_from_host { 112 + uint32_t bo_handle; 113 + struct drm_virtgpu_3d_box box; 114 + uint32_t level; 115 + uint32_t offset; 116 + }; 117 + 118 + #define VIRTGPU_WAIT_NOWAIT 1 /* like it */ 119 + struct drm_virtgpu_3d_wait { 120 + uint32_t handle; /* 0 is an invalid handle */ 121 + uint32_t flags; 122 + }; 123 + 124 + struct drm_virtgpu_get_caps { 125 + uint32_t cap_set_id; 126 + uint32_t cap_set_ver; 127 + uint64_t addr; 128 + uint32_t size; 129 + uint32_t pad; 130 + }; 131 + 132 + #define DRM_IOCTL_VIRTGPU_MAP \ 133 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map) 134 + 135 + #define DRM_IOCTL_VIRTGPU_EXECBUFFER \ 136 + DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\ 137 + struct drm_virtgpu_execbuffer) 138 + 139 + #define DRM_IOCTL_VIRTGPU_GETPARAM \ 140 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\ 141 + struct drm_virtgpu_getparam) 142 + 143 + #define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \ 144 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \ 145 + struct drm_virtgpu_resource_create) 146 + 147 + #define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \ 148 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \ 149 + struct drm_virtgpu_resource_info) 150 + 151 + #define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \ 152 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \ 153 + struct drm_virtgpu_3d_transfer_from_host) 154 + 155 + #define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \ 156 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \ 157 + struct drm_virtgpu_3d_transfer_to_host) 158 + 159 + #define DRM_IOCTL_VIRTGPU_WAIT \ 160 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \ 161 + struct drm_virtgpu_3d_wait) 162 + 163 + #define DRM_IOCTL_VIRTGPU_GET_CAPS \ 164 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \ 165 + struct drm_virtgpu_get_caps) 166 + 167 + #endif
+111 -1
include/uapi/linux/virtio_gpu.h
··· 40 40 41 41 #include <linux/types.h> 42 42 43 + #define VIRTIO_GPU_F_VIRGL 0 44 + 43 45 enum virtio_gpu_ctrl_type { 44 46 VIRTIO_GPU_UNDEFINED = 0, 45 47 ··· 54 52 VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, 55 53 VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, 56 54 VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, 55 + VIRTIO_GPU_CMD_GET_CAPSET_INFO, 56 + VIRTIO_GPU_CMD_GET_CAPSET, 57 + 58 + /* 3d commands */ 59 + VIRTIO_GPU_CMD_CTX_CREATE = 0x0200, 60 + VIRTIO_GPU_CMD_CTX_DESTROY, 61 + VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, 62 + VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE, 63 + VIRTIO_GPU_CMD_RESOURCE_CREATE_3D, 64 + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, 65 + VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D, 66 + VIRTIO_GPU_CMD_SUBMIT_3D, 57 67 58 68 /* cursor commands */ 59 69 VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300, ··· 74 60 /* success responses */ 75 61 VIRTIO_GPU_RESP_OK_NODATA = 0x1100, 76 62 VIRTIO_GPU_RESP_OK_DISPLAY_INFO, 63 + VIRTIO_GPU_RESP_OK_CAPSET_INFO, 64 + VIRTIO_GPU_RESP_OK_CAPSET, 77 65 78 66 /* error responses */ 79 67 VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200, ··· 196 180 } pmodes[VIRTIO_GPU_MAX_SCANOUTS]; 197 181 }; 198 182 183 + /* data passed in the control vq, 3d related */ 184 + 185 + struct virtio_gpu_box { 186 + __le32 x, y, z; 187 + __le32 w, h, d; 188 + }; 189 + 190 + /* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D, VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D */ 191 + struct virtio_gpu_transfer_host_3d { 192 + struct virtio_gpu_ctrl_hdr hdr; 193 + struct virtio_gpu_box box; 194 + __le64 offset; 195 + __le32 resource_id; 196 + __le32 level; 197 + __le32 stride; 198 + __le32 layer_stride; 199 + }; 200 + 201 + /* VIRTIO_GPU_CMD_RESOURCE_CREATE_3D */ 202 + #define VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP (1 << 0) 203 + struct virtio_gpu_resource_create_3d { 204 + struct virtio_gpu_ctrl_hdr hdr; 205 + __le32 resource_id; 206 + __le32 target; 207 + __le32 format; 208 + __le32 bind; 209 + __le32 width; 210 + __le32 height; 211 + __le32 depth; 212 + __le32 array_size; 213 + __le32 last_level; 214 + __le32 nr_samples; 215 + __le32 flags; 216 + __le32 padding; 217 + }; 218 + 219 + /* VIRTIO_GPU_CMD_CTX_CREATE */ 220 + struct virtio_gpu_ctx_create { 221 + struct virtio_gpu_ctrl_hdr hdr; 222 + __le32 nlen; 223 + __le32 padding; 224 + char debug_name[64]; 225 + }; 226 + 227 + /* VIRTIO_GPU_CMD_CTX_DESTROY */ 228 + struct virtio_gpu_ctx_destroy { 229 + struct virtio_gpu_ctrl_hdr hdr; 230 + }; 231 + 232 + /* VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE, VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE */ 233 + struct virtio_gpu_ctx_resource { 234 + struct virtio_gpu_ctrl_hdr hdr; 235 + __le32 resource_id; 236 + __le32 padding; 237 + }; 238 + 239 + /* VIRTIO_GPU_CMD_SUBMIT_3D */ 240 + struct virtio_gpu_cmd_submit { 241 + struct virtio_gpu_ctrl_hdr hdr; 242 + __le32 size; 243 + __le32 padding; 244 + }; 245 + 246 + #define VIRTIO_GPU_CAPSET_VIRGL 1 247 + 248 + /* VIRTIO_GPU_CMD_GET_CAPSET_INFO */ 249 + struct virtio_gpu_get_capset_info { 250 + struct virtio_gpu_ctrl_hdr hdr; 251 + __le32 capset_index; 252 + __le32 padding; 253 + }; 254 + 255 + /* VIRTIO_GPU_RESP_OK_CAPSET_INFO */ 256 + struct virtio_gpu_resp_capset_info { 257 + struct virtio_gpu_ctrl_hdr hdr; 258 + __le32 capset_id; 259 + __le32 capset_max_version; 260 + __le32 capset_max_size; 261 + __le32 padding; 262 + }; 263 + 264 + /* VIRTIO_GPU_CMD_GET_CAPSET */ 265 + struct virtio_gpu_get_capset { 266 + struct virtio_gpu_ctrl_hdr hdr; 267 + __le32 capset_id; 268 + __le32 capset_version; 269 + }; 270 + 271 + /* VIRTIO_GPU_RESP_OK_CAPSET */ 272 + struct virtio_gpu_resp_capset { 273 + struct virtio_gpu_ctrl_hdr hdr; 274 + uint8_t capset_data[]; 275 + }; 276 + 199 277 #define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) 200 278 201 279 struct virtio_gpu_config { 202 280 __u32 events_read; 203 281 __u32 events_clear; 204 282 __u32 num_scanouts; 205 - __u32 reserved; 283 + __u32 num_capsets; 206 284 }; 207 285 208 286 /* simple formats for fbcon/X use */