Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: use anon-inode instead of relying on cdevs

DRM drivers share a common address_space across all character-devices of a
single DRM device. This allows simple buffer eviction and mapping-control.
However, DRM core currently waits for the first ->open() on any char-dev
to mark the underlying inode as backing inode of the device. This delayed
initialization causes ugly conditions all over the place:
if (dev->dev_mapping)
do_sth();

To avoid delayed initialization and to stop reusing the inode of the
char-dev, we allocate an anonymous inode for each DRM device and reset
filp->f_mapping to it on ->open().

Signed-off-by: David Herrmann <dh.herrmann@gmail.com>

+44 -54
+1 -1
drivers/gpu/drm/ast/ast_ttm.c
··· 324 324 } 325 325 326 326 astbo->bo.bdev = &ast->ttm.bdev; 327 - astbo->bo.bdev->dev_mapping = dev->dev_mapping; 327 + astbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping; 328 328 329 329 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 330 330
+1 -1
drivers/gpu/drm/bochs/bochs_mm.c
··· 359 359 } 360 360 361 361 bochsbo->bo.bdev = &bochs->ttm.bdev; 362 - bochsbo->bo.bdev->dev_mapping = dev->dev_mapping; 362 + bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping; 363 363 364 364 bochs_ttm_placement(bochsbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 365 365
+1 -1
drivers/gpu/drm/cirrus/cirrus_ttm.c
··· 329 329 } 330 330 331 331 cirrusbo->bo.bdev = &cirrus->ttm.bdev; 332 - cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping; 332 + cirrusbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping; 333 333 334 334 cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 335 335
+3 -22
drivers/gpu/drm/drm_fops.c
··· 84 84 struct drm_minor *minor; 85 85 int retcode = 0; 86 86 int need_setup = 0; 87 - struct address_space *old_mapping; 88 - struct address_space *old_imapping; 89 87 90 88 minor = idr_find(&drm_minors_idr, minor_id); 91 89 if (!minor) ··· 97 99 98 100 if (!dev->open_count++) 99 101 need_setup = 1; 100 - mutex_lock(&dev->struct_mutex); 101 - old_imapping = inode->i_mapping; 102 - old_mapping = dev->dev_mapping; 103 - if (old_mapping == NULL) 104 - dev->dev_mapping = &inode->i_data; 105 - /* ihold ensures nobody can remove inode with our i_data */ 106 - ihold(container_of(dev->dev_mapping, struct inode, i_data)); 107 - inode->i_mapping = dev->dev_mapping; 108 - filp->f_mapping = dev->dev_mapping; 109 - mutex_unlock(&dev->struct_mutex); 102 + 103 + /* share address_space across all char-devs of a single device */ 104 + filp->f_mapping = dev->anon_inode->i_mapping; 110 105 111 106 retcode = drm_open_helper(inode, filp, dev); 112 107 if (retcode) ··· 112 121 return 0; 113 122 114 123 err_undo: 115 - mutex_lock(&dev->struct_mutex); 116 - filp->f_mapping = old_imapping; 117 - inode->i_mapping = old_imapping; 118 - iput(container_of(dev->dev_mapping, struct inode, i_data)); 119 - dev->dev_mapping = old_mapping; 120 - mutex_unlock(&dev->struct_mutex); 121 124 dev->open_count--; 122 125 return retcode; 123 126 } ··· 419 434 420 435 drm_legacy_dma_takedown(dev); 421 436 422 - dev->dev_mapping = NULL; 423 437 mutex_unlock(&dev->struct_mutex); 424 438 425 439 drm_legacy_dev_reinit(dev); ··· 532 548 drm_master_put(&file_priv->minor->master); 533 549 } 534 550 } 535 - 536 - BUG_ON(dev->dev_mapping == NULL); 537 - iput(container_of(dev->dev_mapping, struct inode, i_data)); 538 551 539 552 /* drop the reference held my the file priv */ 540 553 if (file_priv->master)
+11 -1
drivers/gpu/drm/drm_stub.c
··· 526 526 mutex_init(&dev->struct_mutex); 527 527 mutex_init(&dev->ctxlist_mutex); 528 528 529 - if (drm_ht_create(&dev->map_hash, 12)) 529 + dev->anon_inode = drm_fs_inode_new(); 530 + if (IS_ERR(dev->anon_inode)) { 531 + ret = PTR_ERR(dev->anon_inode); 532 + DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); 530 533 goto err_free; 534 + } 535 + 536 + if (drm_ht_create(&dev->map_hash, 12)) 537 + goto err_inode; 531 538 532 539 ret = drm_ctxbitmap_init(dev); 533 540 if (ret) { ··· 556 549 drm_ctxbitmap_cleanup(dev); 557 550 err_ht: 558 551 drm_ht_remove(&dev->map_hash); 552 + err_inode: 553 + drm_fs_inode_free(dev->anon_inode); 559 554 err_free: 560 555 kfree(dev); 561 556 return NULL; ··· 585 576 586 577 drm_ctxbitmap_cleanup(dev); 587 578 drm_ht_remove(&dev->map_hash); 579 + drm_fs_inode_free(dev->anon_inode); 588 580 589 581 kfree(dev->devname); 590 582 kfree(dev);
+2 -1
drivers/gpu/drm/i915/i915_gem.c
··· 1508 1508 if (!obj->fault_mappable) 1509 1509 return; 1510 1510 1511 - drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping); 1511 + drm_vma_node_unmap(&obj->base.vma_node, 1512 + obj->base.dev->anon_inode->i_mapping); 1512 1513 obj->fault_mappable = false; 1513 1514 } 1514 1515
+1 -1
drivers/gpu/drm/mgag200/mgag200_ttm.c
··· 324 324 } 325 325 326 326 mgabo->bo.bdev = &mdev->ttm.bdev; 327 - mgabo->bo.bdev->dev_mapping = dev->dev_mapping; 327 + mgabo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping; 328 328 329 329 mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 330 330
+1 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 228 228 struct nouveau_bo *nvbo = NULL; 229 229 int ret = 0; 230 230 231 - drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; 231 + drm->ttm.bdev.dev_mapping = drm->dev->anon_inode->i_mapping; 232 232 233 233 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { 234 234 NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
+17 -17
drivers/gpu/drm/omapdrm/omap_gem.c
··· 153 153 static void evict_entry(struct drm_gem_object *obj, 154 154 enum tiler_fmt fmt, struct usergart_entry *entry) 155 155 { 156 - if (obj->dev->dev_mapping) { 157 - struct omap_gem_object *omap_obj = to_omap_bo(obj); 158 - int n = usergart[fmt].height; 159 - size_t size = PAGE_SIZE * n; 160 - loff_t off = mmap_offset(obj) + 161 - (entry->obj_pgoff << PAGE_SHIFT); 162 - const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 163 - if (m > 1) { 164 - int i; 165 - /* if stride > than PAGE_SIZE then sparse mapping: */ 166 - for (i = n; i > 0; i--) { 167 - unmap_mapping_range(obj->dev->dev_mapping, 168 - off, PAGE_SIZE, 1); 169 - off += PAGE_SIZE * m; 170 - } 171 - } else { 172 - unmap_mapping_range(obj->dev->dev_mapping, off, size, 1); 156 + struct omap_gem_object *omap_obj = to_omap_bo(obj); 157 + int n = usergart[fmt].height; 158 + size_t size = PAGE_SIZE * n; 159 + loff_t off = mmap_offset(obj) + 160 + (entry->obj_pgoff << PAGE_SHIFT); 161 + const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 162 + 163 + if (m > 1) { 164 + int i; 165 + /* if stride > than PAGE_SIZE then sparse mapping: */ 166 + for (i = n; i > 0; i--) { 167 + unmap_mapping_range(obj->dev->anon_inode->i_mapping, 168 + off, PAGE_SIZE, 1); 169 + off += PAGE_SIZE * m; 173 170 } 171 + } else { 172 + unmap_mapping_range(obj->dev->anon_inode->i_mapping, 173 + off, size, 1); 174 174 } 175 175 176 176 entry->obj = NULL;
+1 -2
drivers/gpu/drm/qxl/qxl_object.c
··· 82 82 enum ttm_bo_type type; 83 83 int r; 84 84 85 - if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) 86 - qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; 85 + qdev->mman.bdev.dev_mapping = qdev->ddev->anon_inode->i_mapping; 87 86 if (kernel) 88 87 type = ttm_bo_type_kernel; 89 88 else
+1 -2
drivers/gpu/drm/qxl/qxl_ttm.c
··· 518 518 ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); 519 519 DRM_INFO("qxl: %uM of Surface memory size\n", 520 520 (unsigned)qdev->surfaceram_size / (1024 * 1024)); 521 - if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) 522 - qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; 521 + qdev->mman.bdev.dev_mapping = qdev->ddev->anon_inode->i_mapping; 523 522 r = qxl_ttm_debugfs_init(qdev); 524 523 if (r) { 525 524 DRM_ERROR("Failed to init debugfs\n");
+1 -1
drivers/gpu/drm/radeon/radeon_object.c
··· 145 145 146 146 size = ALIGN(size, PAGE_SIZE); 147 147 148 - rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; 148 + rdev->mman.bdev.dev_mapping = rdev->ddev->anon_inode->i_mapping; 149 149 if (kernel) { 150 150 type = ttm_bo_type_kernel; 151 151 } else if (sg) {
+1 -1
drivers/gpu/drm/radeon/radeon_ttm.c
··· 745 745 } 746 746 DRM_INFO("radeon: %uM of GTT memory ready.\n", 747 747 (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); 748 - rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; 748 + rdev->mman.bdev.dev_mapping = rdev->ddev->anon_inode->i_mapping; 749 749 750 750 r = radeon_ttm_debugfs_init(rdev); 751 751 if (r) {
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 969 969 goto out_no_shman; 970 970 971 971 file_priv->driver_priv = vmw_fp; 972 - dev_priv->bdev.dev_mapping = dev->dev_mapping; 972 + dev_priv->bdev.dev_mapping = dev->anon_inode->i_mapping; 973 973 974 974 return 0; 975 975
+1 -1
include/drm/drmP.h
··· 1183 1183 struct drm_sg_mem *sg; /**< Scatter gather memory */ 1184 1184 unsigned int num_crtcs; /**< Number of CRTCs on this device */ 1185 1185 void *dev_private; /**< device private data */ 1186 - struct address_space *dev_mapping; 1186 + struct inode *anon_inode; 1187 1187 struct drm_sigdata sigdata; /**< For block_all_signals */ 1188 1188 sigset_t sigmask; 1189 1189