at v4.5-rc2 468 lines 12 kB view raw
1/* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Authors: 6 * Dave Airlie 7 * Alon Levy 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included in 17 * all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 * OTHER DEALINGS IN THE SOFTWARE. 26 */ 27 28#include <ttm/ttm_bo_api.h> 29#include <ttm/ttm_bo_driver.h> 30#include <ttm/ttm_placement.h> 31#include <ttm/ttm_page_alloc.h> 32#include <ttm/ttm_module.h> 33#include <drm/drmP.h> 34#include <drm/drm.h> 35#include <drm/virtgpu_drm.h> 36#include "virtgpu_drv.h" 37 38#include <linux/delay.h> 39 40#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) 41 42static struct 43virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev) 44{ 45 struct virtio_gpu_mman *mman; 46 struct virtio_gpu_device *vgdev; 47 48 mman = container_of(bdev, struct virtio_gpu_mman, bdev); 49 vgdev = container_of(mman, struct virtio_gpu_device, mman); 50 return vgdev; 51} 52 53static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref) 54{ 55 return ttm_mem_global_init(ref->object); 56} 57 58static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref) 59{ 60 ttm_mem_global_release(ref->object); 61} 62 63static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev) 64{ 65 struct drm_global_reference *global_ref; 66 int r; 67 68 vgdev->mman.mem_global_referenced = false; 69 global_ref = &vgdev->mman.mem_global_ref; 70 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 71 global_ref->size = sizeof(struct ttm_mem_global); 72 global_ref->init = &virtio_gpu_ttm_mem_global_init; 73 global_ref->release = &virtio_gpu_ttm_mem_global_release; 74 75 r = drm_global_item_ref(global_ref); 76 if (r != 0) { 77 DRM_ERROR("Failed setting up TTM memory accounting " 78 "subsystem.\n"); 79 return r; 80 } 81 82 vgdev->mman.bo_global_ref.mem_glob = 83 vgdev->mman.mem_global_ref.object; 84 global_ref = &vgdev->mman.bo_global_ref.ref; 85 global_ref->global_type = DRM_GLOBAL_TTM_BO; 86 global_ref->size = sizeof(struct ttm_bo_global); 87 global_ref->init = &ttm_bo_global_init; 88 global_ref->release = &ttm_bo_global_release; 89 r = drm_global_item_ref(global_ref); 90 if (r != 0) { 91 DRM_ERROR("Failed setting up TTM BO subsystem.\n"); 92 drm_global_item_unref(&vgdev->mman.mem_global_ref); 93 return r; 94 } 95 96 vgdev->mman.mem_global_referenced = true; 97 return 0; 98} 99 100static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev) 101{ 102 if (vgdev->mman.mem_global_referenced) { 103 drm_global_item_unref(&vgdev->mman.bo_global_ref.ref); 104 drm_global_item_unref(&vgdev->mman.mem_global_ref); 105 vgdev->mman.mem_global_referenced = false; 106 } 107} 108 109#if 0 110/* 111 * Hmm, seems to not do anything useful. Leftover debug hack? 112 * Something like printing pagefaults to kernel log? 113 */ 114static struct vm_operations_struct virtio_gpu_ttm_vm_ops; 115static const struct vm_operations_struct *ttm_vm_ops; 116 117static int virtio_gpu_ttm_fault(struct vm_area_struct *vma, 118 struct vm_fault *vmf) 119{ 120 struct ttm_buffer_object *bo; 121 struct virtio_gpu_device *vgdev; 122 int r; 123 124 bo = (struct ttm_buffer_object *)vma->vm_private_data; 125 if (bo == NULL) 126 return VM_FAULT_NOPAGE; 127 vgdev = virtio_gpu_get_vgdev(bo->bdev); 128 r = ttm_vm_ops->fault(vma, vmf); 129 return r; 130} 131#endif 132 133int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma) 134{ 135 struct drm_file *file_priv; 136 struct virtio_gpu_device *vgdev; 137 int r; 138 139 file_priv = filp->private_data; 140 vgdev = file_priv->minor->dev->dev_private; 141 if (vgdev == NULL) { 142 DRM_ERROR( 143 "filp->private_data->minor->dev->dev_private == NULL\n"); 144 return -EINVAL; 145 } 146 r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev); 147#if 0 148 if (unlikely(r != 0)) 149 return r; 150 if (unlikely(ttm_vm_ops == NULL)) { 151 ttm_vm_ops = vma->vm_ops; 152 virtio_gpu_ttm_vm_ops = *ttm_vm_ops; 153 virtio_gpu_ttm_vm_ops.fault = &virtio_gpu_ttm_fault; 154 } 155 vma->vm_ops = &virtio_gpu_ttm_vm_ops; 156 return 0; 157#else 158 return r; 159#endif 160} 161 162static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev, 163 uint32_t flags) 164{ 165 return 0; 166} 167 168static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 169 struct ttm_buffer_object *bo, 170 const struct ttm_place *place, 171 struct ttm_mem_reg *mem) 172{ 173 mem->mm_node = (void *)1; 174 return 0; 175} 176 177static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, 178 struct ttm_mem_reg *mem) 179{ 180 mem->mm_node = (void *)NULL; 181 return; 182} 183 184static int ttm_bo_man_init(struct ttm_mem_type_manager *man, 185 unsigned long p_size) 186{ 187 return 0; 188} 189 190static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) 191{ 192 return 0; 193} 194 195static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, 196 const char *prefix) 197{ 198} 199 200static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = { 201 ttm_bo_man_init, 202 ttm_bo_man_takedown, 203 ttm_bo_man_get_node, 204 ttm_bo_man_put_node, 205 ttm_bo_man_debug 206}; 207 208static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 209 struct ttm_mem_type_manager *man) 210{ 211 struct virtio_gpu_device *vgdev; 212 213 vgdev = virtio_gpu_get_vgdev(bdev); 214 215 switch (type) { 216 case TTM_PL_SYSTEM: 217 /* System memory */ 218 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 219 man->available_caching = TTM_PL_MASK_CACHING; 220 man->default_caching = TTM_PL_FLAG_CACHED; 221 break; 222 case TTM_PL_TT: 223 man->func = &virtio_gpu_bo_manager_func; 224 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 225 man->available_caching = TTM_PL_MASK_CACHING; 226 man->default_caching = TTM_PL_FLAG_CACHED; 227 break; 228 default: 229 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 230 return -EINVAL; 231 } 232 return 0; 233} 234 235static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo, 236 struct ttm_placement *placement) 237{ 238 static struct ttm_place placements = { 239 .fpfn = 0, 240 .lpfn = 0, 241 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM, 242 }; 243 244 placement->placement = &placements; 245 placement->busy_placement = &placements; 246 placement->num_placement = 1; 247 placement->num_busy_placement = 1; 248 return; 249} 250 251static int virtio_gpu_verify_access(struct ttm_buffer_object *bo, 252 struct file *filp) 253{ 254 return 0; 255} 256 257static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 258 struct ttm_mem_reg *mem) 259{ 260 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 261 262 mem->bus.addr = NULL; 263 mem->bus.offset = 0; 264 mem->bus.size = mem->num_pages << PAGE_SHIFT; 265 mem->bus.base = 0; 266 mem->bus.is_iomem = false; 267 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 268 return -EINVAL; 269 switch (mem->mem_type) { 270 case TTM_PL_SYSTEM: 271 case TTM_PL_TT: 272 /* system memory */ 273 return 0; 274 default: 275 return -EINVAL; 276 } 277 return 0; 278} 279 280static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev, 281 struct ttm_mem_reg *mem) 282{ 283} 284 285/* 286 * TTM backend functions. 287 */ 288struct virtio_gpu_ttm_tt { 289 struct ttm_dma_tt ttm; 290 struct virtio_gpu_device *vgdev; 291 u64 offset; 292}; 293 294static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm, 295 struct ttm_mem_reg *bo_mem) 296{ 297 struct virtio_gpu_ttm_tt *gtt = (void *)ttm; 298 299 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 300 if (!ttm->num_pages) 301 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 302 ttm->num_pages, bo_mem, ttm); 303 304 /* Not implemented */ 305 return 0; 306} 307 308static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm) 309{ 310 /* Not implemented */ 311 return 0; 312} 313 314static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm) 315{ 316 struct virtio_gpu_ttm_tt *gtt = (void *)ttm; 317 318 ttm_dma_tt_fini(&gtt->ttm); 319 kfree(gtt); 320} 321 322static struct ttm_backend_func virtio_gpu_backend_func = { 323 .bind = &virtio_gpu_ttm_backend_bind, 324 .unbind = &virtio_gpu_ttm_backend_unbind, 325 .destroy = &virtio_gpu_ttm_backend_destroy, 326}; 327 328static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm) 329{ 330 if (ttm->state != tt_unpopulated) 331 return 0; 332 333 return ttm_pool_populate(ttm); 334} 335 336static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm) 337{ 338 ttm_pool_unpopulate(ttm); 339} 340 341static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_bo_device *bdev, 342 unsigned long size, 343 uint32_t page_flags, 344 struct page *dummy_read_page) 345{ 346 struct virtio_gpu_device *vgdev; 347 struct virtio_gpu_ttm_tt *gtt; 348 349 vgdev = virtio_gpu_get_vgdev(bdev); 350 gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL); 351 if (gtt == NULL) 352 return NULL; 353 gtt->ttm.ttm.func = &virtio_gpu_backend_func; 354 gtt->vgdev = vgdev; 355 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, 356 dummy_read_page)) { 357 kfree(gtt); 358 return NULL; 359 } 360 return &gtt->ttm.ttm; 361} 362 363static void virtio_gpu_move_null(struct ttm_buffer_object *bo, 364 struct ttm_mem_reg *new_mem) 365{ 366 struct ttm_mem_reg *old_mem = &bo->mem; 367 368 BUG_ON(old_mem->mm_node != NULL); 369 *old_mem = *new_mem; 370 new_mem->mm_node = NULL; 371} 372 373static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, 374 bool evict, bool interruptible, 375 bool no_wait_gpu, 376 struct ttm_mem_reg *new_mem) 377{ 378 virtio_gpu_move_null(bo, new_mem); 379 return 0; 380} 381 382static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo, 383 struct ttm_mem_reg *new_mem) 384{ 385 struct virtio_gpu_object *bo; 386 struct virtio_gpu_device *vgdev; 387 388 bo = container_of(tbo, struct virtio_gpu_object, tbo); 389 vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private; 390 391 if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) { 392 if (bo->hw_res_handle) 393 virtio_gpu_cmd_resource_inval_backing(vgdev, 394 bo->hw_res_handle); 395 396 } else if (new_mem->placement & TTM_PL_FLAG_TT) { 397 if (bo->hw_res_handle) { 398 virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle, 399 NULL); 400 } 401 } 402} 403 404static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo) 405{ 406 struct virtio_gpu_object *bo; 407 struct virtio_gpu_device *vgdev; 408 409 bo = container_of(tbo, struct virtio_gpu_object, tbo); 410 vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private; 411 412 if (bo->pages) 413 virtio_gpu_object_free_sg_table(bo); 414} 415 416static struct ttm_bo_driver virtio_gpu_bo_driver = { 417 .ttm_tt_create = &virtio_gpu_ttm_tt_create, 418 .ttm_tt_populate = &virtio_gpu_ttm_tt_populate, 419 .ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate, 420 .invalidate_caches = &virtio_gpu_invalidate_caches, 421 .init_mem_type = &virtio_gpu_init_mem_type, 422 .evict_flags = &virtio_gpu_evict_flags, 423 .move = &virtio_gpu_bo_move, 424 .verify_access = &virtio_gpu_verify_access, 425 .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve, 426 .io_mem_free = &virtio_gpu_ttm_io_mem_free, 427 .move_notify = &virtio_gpu_bo_move_notify, 428 .swap_notify = &virtio_gpu_bo_swap_notify, 429}; 430 431int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev) 432{ 433 int r; 434 435 r = virtio_gpu_ttm_global_init(vgdev); 436 if (r) 437 return r; 438 /* No others user of address space so set it to 0 */ 439 r = ttm_bo_device_init(&vgdev->mman.bdev, 440 vgdev->mman.bo_global_ref.ref.object, 441 &virtio_gpu_bo_driver, 442 vgdev->ddev->anon_inode->i_mapping, 443 DRM_FILE_PAGE_OFFSET, 0); 444 if (r) { 445 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 446 goto err_dev_init; 447 } 448 449 r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0); 450 if (r) { 451 DRM_ERROR("Failed initializing GTT heap.\n"); 452 goto err_mm_init; 453 } 454 return 0; 455 456err_mm_init: 457 ttm_bo_device_release(&vgdev->mman.bdev); 458err_dev_init: 459 virtio_gpu_ttm_global_fini(vgdev); 460 return r; 461} 462 463void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev) 464{ 465 ttm_bo_device_release(&vgdev->mman.bdev); 466 virtio_gpu_ttm_global_fini(vgdev); 467 DRM_INFO("virtio_gpu: ttm finalized\n"); 468}