Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/exynos: use real device for DMA-mapping operations

This patch changes device pointer provided to all calls to DMA-mapping
subsystem from the virtual exynos-drm 'device' to the real device pointer
of one of the CRTC devices (decon, fimd or mixer). This way no more hacks
will be needed to configure proper DMA-mapping address space on the common
virtual exynos-drm device. This change also removes the need for some
hacks in IOMMU related code. It also finally solves the problem of Exynos
DRM driver not working on ARM64 architecture, which provides noop-based
DMA-mapping operations for virtual platform devices.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>

authored by

Marek Szyprowski and committed by
Inki Dae
f43c3596 a5fb26cd

+69 -46
+38 -4
drivers/gpu/drm/exynos/exynos_drm_drv.c
··· 130 130 exynos_atomic_commit_complete(commit); 131 131 } 132 132 133 + static struct device *exynos_drm_get_dma_device(void); 134 + 133 135 static int exynos_drm_load(struct drm_device *dev, unsigned long flags) 134 136 { 135 137 struct exynos_drm_private *private; ··· 148 146 149 147 dev_set_drvdata(dev->dev, dev); 150 148 dev->dev_private = (void *)private; 149 + 150 + /* the first real CRTC device is used for all dma mapping operations */ 151 + private->dma_dev = exynos_drm_get_dma_device(); 152 + if (!private->dma_dev) { 153 + DRM_ERROR("no device found for DMA mapping operations.\n"); 154 + ret = -ENODEV; 155 + goto err_free_private; 156 + } 157 + DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", 158 + dev_name(private->dma_dev)); 151 159 152 160 /* 153 161 * create mapping to manage iommu table and set a pointer to iommu ··· 500 488 501 489 #define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */ 502 490 #define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */ 491 + #define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */ 503 492 504 493 #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL) 505 494 ··· 511 498 static struct exynos_drm_driver_info exynos_drm_drivers[] = { 512 499 { 513 500 DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD), 514 - DRM_COMPONENT_DRIVER 501 + DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE 515 502 }, { 516 503 DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON), 517 - DRM_COMPONENT_DRIVER 504 + DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE 518 505 }, { 519 506 DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON), 520 - DRM_COMPONENT_DRIVER 507 + DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE 521 508 }, { 522 509 DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER), 523 - DRM_COMPONENT_DRIVER 510 + DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE 524 511 }, { 525 512 DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC), 526 513 DRM_COMPONENT_DRIVER ··· 627 614 .pm = &exynos_drm_pm_ops, 628 615 }, 629 616 }; 617 + 618 + static struct device *exynos_drm_get_dma_device(void) 619 + { 620 + int i; 621 + 622 + for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) { 623 + struct exynos_drm_driver_info *info = &exynos_drm_drivers[i]; 624 + struct device *dev; 625 + 626 + if (!info->driver || !(info->flags & DRM_DMA_DEVICE)) 627 + continue; 628 + 629 + while ((dev = bus_find_device(&platform_bus_type, NULL, 630 + &info->driver->driver, 631 + (void *)platform_bus_type.match))) { 632 + put_device(dev); 633 + return dev; 634 + } 635 + } 636 + return NULL; 637 + } 630 638 631 639 static void exynos_drm_unregister_devices(void) 632 640 {
+9
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 219 219 struct drm_crtc *crtc[MAX_CRTC]; 220 220 struct drm_property *plane_zpos_property; 221 221 222 + struct device *dma_dev; 222 223 unsigned long da_start; 223 224 unsigned long da_space_size; 225 + void *mapping; 224 226 225 227 unsigned int pipe; 226 228 ··· 231 229 spinlock_t lock; 232 230 wait_queue_head_t wait; 233 231 }; 232 + 233 + static inline struct device *to_dma_dev(struct drm_device *dev) 234 + { 235 + struct exynos_drm_private *priv = dev->dev_private; 236 + 237 + return priv->dma_dev; 238 + } 234 239 235 240 /* 236 241 * Exynos drm sub driver structure.
+1 -1
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
··· 50 50 if (vm_size > exynos_gem->size) 51 51 return -EINVAL; 52 52 53 - ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->cookie, 53 + ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie, 54 54 exynos_gem->dma_addr, exynos_gem->size, 55 55 &exynos_gem->dma_attrs); 56 56 if (ret < 0) {
+4 -3
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 259 259 init_dma_attrs(&g2d->cmdlist_dma_attrs); 260 260 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); 261 261 262 - g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev, 262 + g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev), 263 263 G2D_CMDLIST_POOL_SIZE, 264 264 &g2d->cmdlist_pool, GFP_KERNEL, 265 265 &g2d->cmdlist_dma_attrs); ··· 293 293 return 0; 294 294 295 295 err: 296 - dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, 296 + dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, 297 297 g2d->cmdlist_pool_virt, 298 298 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); 299 299 return ret; ··· 306 306 kfree(g2d->cmdlist_node); 307 307 308 308 if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) { 309 - dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, 309 + dma_free_attrs(to_dma_dev(subdrv->drm_dev), 310 + G2D_CMDLIST_POOL_SIZE, 310 311 g2d->cmdlist_pool_virt, 311 312 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); 312 313 }
+7 -7
drivers/gpu/drm/exynos/exynos_drm_gem.c
··· 65 65 return -ENOMEM; 66 66 } 67 67 68 - exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size, 68 + exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, 69 69 &exynos_gem->dma_addr, GFP_KERNEL, 70 70 &exynos_gem->dma_attrs); 71 71 if (!exynos_gem->cookie) { ··· 73 73 goto err_free; 74 74 } 75 75 76 - ret = dma_get_sgtable_attrs(dev->dev, &sgt, exynos_gem->cookie, 76 + ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie, 77 77 exynos_gem->dma_addr, exynos_gem->size, 78 78 &exynos_gem->dma_attrs); 79 79 if (ret < 0) { ··· 98 98 err_sgt_free: 99 99 sg_free_table(&sgt); 100 100 err_dma_free: 101 - dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie, 101 + dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 102 102 exynos_gem->dma_addr, &exynos_gem->dma_attrs); 103 103 err_free: 104 104 drm_free_large(exynos_gem->pages); ··· 118 118 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 119 119 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 120 120 121 - dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie, 121 + dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 122 122 (dma_addr_t)exynos_gem->dma_addr, 123 123 &exynos_gem->dma_attrs); 124 124 ··· 335 335 if (vm_size > exynos_gem->size) 336 336 return -EINVAL; 337 337 338 - ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie, 338 + ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, 339 339 exynos_gem->dma_addr, exynos_gem->size, 340 340 &exynos_gem->dma_attrs); 341 341 if (ret < 0) { ··· 381 381 382 382 mutex_lock(&drm_dev->struct_mutex); 383 383 384 - nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); 384 + nents = dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir); 385 385 if (!nents) { 386 386 DRM_ERROR("failed to map sgl with dma.\n"); 387 387 mutex_unlock(&drm_dev->struct_mutex); ··· 396 396 struct sg_table *sgt, 397 397 enum dma_data_direction dir) 398 398 { 399 - dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir); 399 + dma_unmap_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir); 400 400 } 401 401 402 402 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
+8 -29
drivers/gpu/drm/exynos/exynos_drm_iommu.c
··· 30 30 { 31 31 struct dma_iommu_mapping *mapping = NULL; 32 32 struct exynos_drm_private *priv = drm_dev->dev_private; 33 - struct device *dev = drm_dev->dev; 34 33 35 34 if (!priv->da_start) 36 35 priv->da_start = EXYNOS_DEV_ADDR_START; ··· 42 43 if (IS_ERR(mapping)) 43 44 return PTR_ERR(mapping); 44 45 45 - dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), 46 - GFP_KERNEL); 47 - if (!dev->dma_parms) 48 - goto error; 49 - 50 - dma_set_max_seg_size(dev, 0xffffffffu); 51 - dev->archdata.mapping = mapping; 46 + priv->mapping = mapping; 52 47 53 48 return 0; 54 - error: 55 - arm_iommu_release_mapping(mapping); 56 - return -ENOMEM; 57 49 } 58 50 59 51 /* ··· 57 67 */ 58 68 void drm_release_iommu_mapping(struct drm_device *drm_dev) 59 69 { 60 - struct device *dev = drm_dev->dev; 70 + struct exynos_drm_private *priv = drm_dev->dev_private; 61 71 62 - arm_iommu_release_mapping(dev->archdata.mapping); 72 + arm_iommu_release_mapping(priv->mapping); 63 73 } 64 74 65 75 /* ··· 74 84 int drm_iommu_attach_device(struct drm_device *drm_dev, 75 85 struct device *subdrv_dev) 76 86 { 77 - struct device *dev = drm_dev->dev; 87 + struct exynos_drm_private *priv = drm_dev->dev_private; 78 88 int ret; 79 89 80 - if (!dev->archdata.mapping) 90 + if (!priv->mapping) 81 91 return 0; 82 92 83 93 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, ··· 91 101 if (subdrv_dev->archdata.mapping) 92 102 arm_iommu_detach_device(subdrv_dev); 93 103 94 - ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); 104 + ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); 95 105 if (ret < 0) { 96 106 DRM_DEBUG_KMS("failed iommu attach.\n"); 97 107 return ret; 98 108 } 99 - 100 - /* 101 - * Set dma_ops to drm_device just one time. 102 - * 103 - * The dma mapping api needs device object and the api is used 104 - * to allocate physial memory and map it with iommu table. 105 - * If iommu attach succeeded, the sub driver would have dma_ops 106 - * for iommu and also all sub drivers have same dma_ops. 107 - */ 108 - if (get_dma_ops(dev) == get_dma_ops(NULL)) 109 - set_dma_ops(dev, get_dma_ops(subdrv_dev)); 110 109 111 110 return 0; 112 111 } ··· 112 133 void drm_iommu_detach_device(struct drm_device *drm_dev, 113 134 struct device *subdrv_dev) 114 135 { 115 - struct device *dev = drm_dev->dev; 116 - struct dma_iommu_mapping *mapping = dev->archdata.mapping; 136 + struct exynos_drm_private *priv = drm_dev->dev_private; 137 + struct dma_iommu_mapping *mapping = priv->mapping; 117 138 118 139 if (!mapping || !mapping->domain) 119 140 return;
+2 -2
drivers/gpu/drm/exynos/exynos_drm_iommu.h
··· 29 29 30 30 static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) 31 31 { 32 - struct device *dev = drm_dev->dev; 32 + struct exynos_drm_private *priv = drm_dev->dev_private; 33 33 34 - return dev->archdata.mapping ? true : false; 34 + return priv->mapping ? true : false; 35 35 } 36 36 37 37 #else