Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2021-12-09' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.17:

UAPI Changes:

Cross-subsystem Changes:

* dma-buf: Make fences mandatory in dma_resv_add_excl_fence

Core Changes:

* Move hashtable to legacy code
* Return error pointers from struct drm_driver.gem_create_object

* cma-helper: Improve public interfaces; Remove CONFIG_DRM_KMS_CMA_HELPER option
* mipi-dbi: Don't depend on CMA helpers
* ttm: Don't include DRM hashtable; Stop prunning fences after wait; Documentation

Driver Changes:

* aspeed: Select CONFIG_DRM_GEM_CMA_HELPER

* bridge/lontium-lt9611: Fix HDMI sensing
* bridge/parade-ps8640: Fixes
* bridge/sn65dsi86: Defer probe is no dsi host found

* fsl-dcu: Select CONFIG_DRM_GEM_CMA_HELPER

* i915: Remove dma_resv_prune

* omapdrm: Fix scatterlist export; Support virtual planes; Fixes

* panel: Boe-tv110c9m,Inx-hj110iz: Update init code

* qxl: Use dma-resv iterator

* rockchip: Use generic fbdev emulation

* tidss: Fixes

* vmwgfx: Fix leak on probe errors; Fail probing on broken hosts; New
placement for MOB page tables; Hide internal BOs from userspace; Cleanups

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/YbHskHZc9HoAYuPZ@linux-uq9g.fritz.box

+1812 -835
+11
Documentation/gpu/todo.rst
··· 646 646 647 647 Contact: Harry Wentland, Alex Deucher 648 648 649 + vmwgfx: Replace hashtable with Linux' implementation 650 + ---------------------------------------------------- 651 + 652 + The vmwgfx driver uses its own hashtable implementation. Replace the 653 + code with Linux' implementation and update the callers. It's mostly a 654 + refactoring task, but the interfaces are different. 655 + 656 + Contact: Zack Rusin, Thomas Zimmermann <tzimmermann@suse.de> 657 + 658 + Level: Intermediate 659 + 649 660 Bootsplash 650 661 ========== 651 662
+1 -2
drivers/dma-buf/dma-resv.c
··· 305 305 if (old) 306 306 i = old->shared_count; 307 307 308 - if (fence) 309 - dma_fence_get(fence); 308 + dma_fence_get(fence); 310 309 311 310 write_seqcount_begin(&obj->seq); 312 311 /* write_seqcount_begin provides the necessary memory barrier */
-7
drivers/gpu/drm/Kconfig
··· 217 217 help 218 218 Choose this if you need the GEM CMA helper functions 219 219 220 - config DRM_KMS_CMA_HELPER 221 - bool 222 - depends on DRM 223 - select DRM_GEM_CMA_HELPER 224 - help 225 - Choose this if you need the KMS CMA helper functions 226 - 227 220 config DRM_GEM_SHMEM_HELPER 228 221 tristate 229 222 depends on DRM && MMU
+4 -4
drivers/gpu/drm/Makefile
··· 6 6 drm-y := drm_aperture.o drm_auth.o drm_cache.o \ 7 7 drm_file.o drm_gem.o drm_ioctl.o \ 8 8 drm_drv.o \ 9 - drm_sysfs.o drm_hashtab.o drm_mm.o \ 9 + drm_sysfs.o drm_mm.o \ 10 10 drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o drm_displayid.o \ 11 11 drm_trace_points.o drm_prime.o \ 12 12 drm_vma_manager.o \ ··· 20 20 drm_managed.o drm_vblank_work.o 21 21 22 22 drm-$(CONFIG_DRM_LEGACY) += drm_agpsupport.o drm_bufs.o drm_context.o drm_dma.o \ 23 - drm_irq.o drm_legacy_misc.o drm_lock.o drm_memory.o \ 24 - drm_scatter.o drm_vm.o 23 + drm_hashtab.o drm_irq.o drm_legacy_misc.o drm_lock.o \ 24 + drm_memory.o drm_scatter.o drm_vm.o 25 25 drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o 26 26 drm-$(CONFIG_COMPAT) += drm_ioc32.o 27 27 drm-$(CONFIG_DRM_PANEL) += drm_panel.o ··· 36 36 obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o 37 37 38 38 drm_cma_helper-y := drm_gem_cma_helper.o 39 + drm_cma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_cma_helper.o 39 40 obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o 40 41 41 42 drm_shmem_helper-y := drm_gem_shmem_helper.o ··· 61 60 62 61 drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o 63 62 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o 64 - drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 65 63 drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o 66 64 drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o 67 65
-2
drivers/gpu/drm/arm/Kconfig
··· 6 6 depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) 7 7 depends on COMMON_CLK 8 8 select DRM_KMS_HELPER 9 - select DRM_KMS_CMA_HELPER 10 9 help 11 10 Choose this option if you have an ARM High Definition Colour LCD 12 11 controller. ··· 26 27 depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) 27 28 depends on COMMON_CLK 28 29 select DRM_KMS_HELPER 29 - select DRM_KMS_CMA_HELPER 30 30 select DRM_GEM_CMA_HELPER 31 31 select VIDEOMODE_HELPERS 32 32 help
-1
drivers/gpu/drm/arm/display/Kconfig
··· 4 4 depends on DRM && OF 5 5 depends on COMMON_CLK 6 6 select DRM_KMS_HELPER 7 - select DRM_KMS_CMA_HELPER 8 7 select DRM_GEM_CMA_HELPER 9 8 select VIDEOMODE_HELPERS 10 9 help
+1 -1
drivers/gpu/drm/aspeed/Kconfig
··· 5 5 depends on (COMPILE_TEST || ARCH_ASPEED) 6 6 depends on MMU 7 7 select DRM_KMS_HELPER 8 - select DRM_KMS_CMA_HELPER 8 + select DRM_GEM_CMA_HELPER 9 9 select DMA_CMA if HAVE_DMA_CONTIGUOUS 10 10 select CMA if HAVE_DMA_CONTIGUOUS 11 11 select MFD_SYSCON
-1
drivers/gpu/drm/atmel-hlcdc/Kconfig
··· 4 4 depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM 5 5 select DRM_GEM_CMA_HELPER 6 6 select DRM_KMS_HELPER 7 - select DRM_KMS_CMA_HELPER 8 7 select DRM_PANEL 9 8 help 10 9 Choose this option if you have an ATMEL SoC with an HLCDC display
+2 -2
drivers/gpu/drm/bridge/lontium-lt9611.c
··· 586 586 int connected = 0; 587 587 588 588 regmap_read(lt9611->regmap, 0x825e, &reg_val); 589 - connected = (reg_val & BIT(2)); 589 + connected = (reg_val & BIT(0)); 590 590 591 591 lt9611->status = connected ? connector_status_connected : 592 592 connector_status_disconnected; ··· 892 892 int connected; 893 893 894 894 regmap_read(lt9611->regmap, 0x825e, &reg_val); 895 - connected = reg_val & BIT(2); 895 + connected = reg_val & BIT(0); 896 896 897 897 lt9611->status = connected ? connector_status_connected : 898 898 connector_status_disconnected;
+1
drivers/gpu/drm/bridge/parade-ps8640.c
··· 449 449 if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) 450 450 return -EINVAL; 451 451 452 + ps_bridge->aux.drm_dev = bridge->dev; 452 453 ret = drm_dp_aux_register(&ps_bridge->aux); 453 454 if (ret) { 454 455 dev_err(dev, "failed to register DP AUX channel: %d\n", ret);
+8 -16
drivers/gpu/drm/bridge/ti-sn65dsi86.c
··· 704 704 705 705 static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata) 706 706 { 707 - int ret, val; 707 + int val; 708 708 struct mipi_dsi_host *host; 709 709 struct mipi_dsi_device *dsi; 710 710 struct device *dev = pdata->dev; ··· 714 714 }; 715 715 716 716 host = of_find_mipi_dsi_host_by_node(pdata->host_node); 717 - if (!host) { 718 - DRM_ERROR("failed to find dsi host\n"); 719 - return -ENODEV; 720 - } 717 + if (!host) 718 + return -EPROBE_DEFER; 721 719 722 720 dsi = devm_mipi_dsi_device_register_full(dev, host, &info); 723 - if (IS_ERR(dsi)) { 724 - DRM_ERROR("failed to create dsi device\n"); 721 + if (IS_ERR(dsi)) 725 722 return PTR_ERR(dsi); 726 - } 727 723 728 724 /* TODO: setting to 4 MIPI lanes always for now */ 729 725 dsi->lanes = 4; ··· 735 739 736 740 pdata->dsi = dsi; 737 741 738 - ret = devm_mipi_dsi_attach(dev, dsi); 739 - if (ret < 0) { 740 - DRM_ERROR("failed to attach dsi to host\n"); 741 - return ret; 742 - } 743 - 744 - return 0; 742 + return devm_mipi_dsi_attach(dev, dsi); 745 743 } 746 744 747 745 static int ti_sn_bridge_attach(struct drm_bridge *bridge, ··· 1257 1267 drm_bridge_add(&pdata->bridge); 1258 1268 1259 1269 ret = ti_sn_attach_host(pdata); 1260 - if (ret) 1270 + if (ret) { 1271 + dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n"); 1261 1272 goto err_remove_bridge; 1273 + } 1262 1274 1263 1275 return 0; 1264 1276
+42 -48
drivers/gpu/drm/drm_gem_cma_helper.c
··· 32 32 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer 33 33 * objects that are physically contiguous in memory. This is useful for 34 34 * display drivers that are unable to map scattered buffers via an IOMMU. 35 + * 36 + * For GEM callback helpers in struct &drm_gem_object functions, see likewise 37 + * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps 38 + * drm_gem_cma_vmap()). These helpers perform the necessary type conversion. 35 39 */ 36 40 37 41 static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = { 38 - .free = drm_gem_cma_free_object, 39 - .print_info = drm_gem_cma_print_info, 40 - .get_sg_table = drm_gem_cma_get_sg_table, 41 - .vmap = drm_gem_cma_vmap, 42 - .mmap = drm_gem_cma_mmap, 42 + .free = drm_gem_cma_object_free, 43 + .print_info = drm_gem_cma_object_print_info, 44 + .get_sg_table = drm_gem_cma_object_get_sg_table, 45 + .vmap = drm_gem_cma_object_vmap, 46 + .mmap = drm_gem_cma_object_mmap, 43 47 .vm_ops = &drm_gem_cma_vm_ops, 44 48 }; 45 49 ··· 67 63 struct drm_gem_object *gem_obj; 68 64 int ret = 0; 69 65 70 - if (drm->driver->gem_create_object) 66 + if (drm->driver->gem_create_object) { 71 67 gem_obj = drm->driver->gem_create_object(drm, size); 72 - else 73 - gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); 74 - if (!gem_obj) 75 - return ERR_PTR(-ENOMEM); 68 + if (IS_ERR(gem_obj)) 69 + return ERR_CAST(gem_obj); 70 + cma_obj = to_drm_gem_cma_obj(gem_obj); 71 + } else { 72 + cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); 73 + if (!cma_obj) 74 + return ERR_PTR(-ENOMEM); 75 + gem_obj = &cma_obj->base; 76 + } 76 77 77 78 if (!gem_obj->funcs) 78 79 gem_obj->funcs = &drm_gem_cma_default_funcs; 79 - 80 - cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base); 81 80 82 81 if (private) { 83 82 drm_gem_private_object_init(drm, gem_obj, size); ··· 199 192 } 200 193 201 194 /** 202 - * drm_gem_cma_free_object - free resources associated with a CMA GEM object 203 - * @gem_obj: GEM object to free 195 + * drm_gem_cma_free - free resources associated with a CMA GEM object 196 + * @cma_obj: CMA GEM object to free 204 197 * 205 198 * This function frees the backing memory of the CMA GEM object, cleans up the 206 199 * GEM object state and frees the memory used to store the object itself. 207 200 * If the buffer is imported and the virtual address is set, it is released. 208 - * Drivers using the CMA helpers should set this as their 209 - * &drm_gem_object_funcs.free callback. 210 201 */ 211 - void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) 202 + void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj) 212 203 { 213 - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj); 204 + struct drm_gem_object *gem_obj = &cma_obj->base; 214 205 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr); 215 206 216 207 if (gem_obj->import_attach) { ··· 229 224 230 225 kfree(cma_obj); 231 226 } 232 - EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); 227 + EXPORT_SYMBOL_GPL(drm_gem_cma_free); 233 228 234 229 /** 235 230 * drm_gem_cma_dumb_create_internal - create a dumb buffer object ··· 376 371 377 372 /** 378 373 * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs 374 + * @cma_obj: CMA GEM object 379 375 * @p: DRM printer 380 376 * @indent: Tab indentation level 381 - * @obj: GEM object 382 377 * 383 - * This function can be used as the &drm_driver->gem_print_info callback. 384 - * It prints paddr and vaddr for use in e.g. debugfs output. 378 + * This function prints paddr and vaddr for use in e.g. debugfs output. 385 379 */ 386 - void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, 387 - const struct drm_gem_object *obj) 380 + void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj, 381 + struct drm_printer *p, unsigned int indent) 388 382 { 389 - const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 390 - 391 383 drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr); 392 384 drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr); 393 385 } ··· 393 391 /** 394 392 * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned 395 393 * pages for a CMA GEM object 396 - * @obj: GEM object 394 + * @cma_obj: CMA GEM object 397 395 * 398 - * This function exports a scatter/gather table by 399 - * calling the standard DMA mapping API. Drivers using the CMA helpers should 400 - * set this as their &drm_gem_object_funcs.get_sg_table callback. 396 + * This function exports a scatter/gather table by calling the standard 397 + * DMA mapping API. 401 398 * 402 399 * Returns: 403 400 * A pointer to the scatter/gather table of pinned pages or NULL on failure. 404 401 */ 405 - struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj) 402 + struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj) 406 403 { 407 - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 404 + struct drm_gem_object *obj = &cma_obj->base; 408 405 struct sg_table *sgt; 409 406 int ret; 410 407 ··· 469 468 /** 470 469 * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual 471 470 * address space 472 - * @obj: GEM object 471 + * @cma_obj: CMA GEM object 473 472 * @map: Returns the kernel virtual address of the CMA GEM object's backing 474 473 * store. 475 474 * 476 - * This function maps a buffer into the kernel's 477 - * virtual address space. Since the CMA buffers are already mapped into the 478 - * kernel virtual address space this simply returns the cached virtual 479 - * address. Drivers using the CMA helpers should set this as their DRM 480 - * driver's &drm_gem_object_funcs.vmap callback. 475 + * This function maps a buffer into the kernel's virtual address space. 476 + * Since the CMA buffers are already mapped into the kernel virtual address 477 + * space this simply returns the cached virtual address. 481 478 * 482 479 * Returns: 483 480 * 0 on success, or a negative error code otherwise. 484 481 */ 485 - int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 482 + int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map) 486 483 { 487 - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 488 - 489 484 dma_buf_map_set_vaddr(map, cma_obj->vaddr); 490 485 491 486 return 0; ··· 490 493 491 494 /** 492 495 * drm_gem_cma_mmap - memory-map an exported CMA GEM object 493 - * @obj: GEM object 496 + * @cma_obj: CMA GEM object 494 497 * @vma: VMA for the area to be mapped 495 498 * 496 499 * This function maps a buffer into a userspace process's address space. 497 500 * In addition to the usual GEM VMA setup it immediately faults in the entire 498 - * object instead of using on-demand faulting. Drivers that use the CMA 499 - * helpers should set this as their &drm_gem_object_funcs.mmap callback. 501 + * object instead of using on-demand faulting. 500 502 * 501 503 * Returns: 502 504 * 0 on success or a negative error code on failure. 503 505 */ 504 - int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 506 + int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma) 505 507 { 506 - struct drm_gem_cma_object *cma_obj; 508 + struct drm_gem_object *obj = &cma_obj->base; 507 509 int ret; 508 510 509 511 /* ··· 512 516 */ 513 517 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); 514 518 vma->vm_flags &= ~VM_PFNMAP; 515 - 516 - cma_obj = to_drm_gem_cma_obj(obj); 517 519 518 520 if (cma_obj->map_noncoherent) { 519 521 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+10 -7
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 56 56 57 57 size = PAGE_ALIGN(size); 58 58 59 - if (dev->driver->gem_create_object) 59 + if (dev->driver->gem_create_object) { 60 60 obj = dev->driver->gem_create_object(dev, size); 61 - else 62 - obj = kzalloc(sizeof(*shmem), GFP_KERNEL); 63 - if (!obj) 64 - return ERR_PTR(-ENOMEM); 65 - 66 - shmem = to_drm_gem_shmem_obj(obj); 61 + if (IS_ERR(obj)) 62 + return ERR_CAST(obj); 63 + shmem = to_drm_gem_shmem_obj(obj); 64 + } else { 65 + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); 66 + if (!shmem) 67 + return ERR_PTR(-ENOMEM); 68 + obj = &shmem->base; 69 + } 67 70 68 71 if (!obj->funcs) 69 72 obj->funcs = &drm_gem_shmem_funcs;
+2 -2
drivers/gpu/drm/drm_gem_vram_helper.c
··· 197 197 198 198 if (dev->driver->gem_create_object) { 199 199 gem = dev->driver->gem_create_object(dev, size); 200 - if (!gem) 201 - return ERR_PTR(-ENOMEM); 200 + if (IS_ERR(gem)) 201 + return ERR_CAST(gem); 202 202 gbo = drm_gem_vram_of_gem(gem); 203 203 } else { 204 204 gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
+2 -8
drivers/gpu/drm/drm_hashtab.c
··· 32 32 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 33 33 */ 34 34 35 - #include <linux/export.h> 36 35 #include <linux/hash.h> 37 36 #include <linux/mm.h> 38 37 #include <linux/rculist.h> 39 38 #include <linux/slab.h> 40 39 #include <linux/vmalloc.h> 41 40 42 - #include <drm/drm_hashtab.h> 43 41 #include <drm/drm_print.h> 42 + 43 + #include "drm_legacy.h" 44 44 45 45 int drm_ht_create(struct drm_open_hash *ht, unsigned int order) 46 46 { ··· 58 58 } 59 59 return 0; 60 60 } 61 - EXPORT_SYMBOL(drm_ht_create); 62 61 63 62 void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) 64 63 { ··· 134 135 } 135 136 return 0; 136 137 } 137 - EXPORT_SYMBOL(drm_ht_insert_item); 138 138 139 139 /* 140 140 * Just insert an item and return any "bits" bit key that hasn't been ··· 162 164 } 163 165 return 0; 164 166 } 165 - EXPORT_SYMBOL(drm_ht_just_insert_please); 166 167 167 168 int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, 168 169 struct drm_hash_item **item) ··· 175 178 *item = hlist_entry(list, struct drm_hash_item, head); 176 179 return 0; 177 180 } 178 - EXPORT_SYMBOL(drm_ht_find_item); 179 181 180 182 int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) 181 183 { ··· 193 197 hlist_del_init_rcu(&item->head); 194 198 return 0; 195 199 } 196 - EXPORT_SYMBOL(drm_ht_remove_item); 197 200 198 201 void drm_ht_remove(struct drm_open_hash *ht) 199 202 { ··· 201 206 ht->table = NULL; 202 207 } 203 208 } 204 - EXPORT_SYMBOL(drm_ht_remove);
+39 -1
drivers/gpu/drm/drm_legacy.h
··· 35 35 #include <drm/drm_legacy.h> 36 36 37 37 struct agp_memory; 38 + struct drm_buf_desc; 38 39 struct drm_device; 39 40 struct drm_file; 40 - struct drm_buf_desc; 41 + struct drm_hash_item; 42 + struct drm_open_hash; 43 + 44 + /* 45 + * Hash-table Support 46 + */ 47 + 48 + #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) 49 + 50 + /* drm_hashtab.c */ 51 + #if IS_ENABLED(CONFIG_DRM_LEGACY) 52 + int drm_ht_create(struct drm_open_hash *ht, unsigned int order); 53 + int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); 54 + int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, 55 + unsigned long seed, int bits, int shift, 56 + unsigned long add); 57 + int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); 58 + 59 + void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); 60 + int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); 61 + int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); 62 + void drm_ht_remove(struct drm_open_hash *ht); 63 + #endif 64 + 65 + /* 66 + * RCU-safe interface 67 + * 68 + * The user of this API needs to make sure that two or more instances of the 69 + * hash table manipulation functions are never run simultaneously. 70 + * The lookup function drm_ht_find_item_rcu may, however, run simultaneously 71 + * with any of the manipulation functions as long as it's called from within 72 + * an RCU read-locked section. 73 + */ 74 + #define drm_ht_insert_item_rcu drm_ht_insert_item 75 + #define drm_ht_just_insert_please_rcu drm_ht_just_insert_please 76 + #define drm_ht_remove_key_rcu drm_ht_remove_key 77 + #define drm_ht_remove_item_rcu drm_ht_remove_item 78 + #define drm_ht_find_item_rcu drm_ht_find_item 41 79 42 80 /* 43 81 * Generic DRM Contexts
+25 -9
drivers/gpu/drm/drm_mipi_dbi.c
··· 15 15 #include <drm/drm_connector.h> 16 16 #include <drm/drm_damage_helper.h> 17 17 #include <drm/drm_drv.h> 18 - #include <drm/drm_gem_cma_helper.h> 18 + #include <drm/drm_file.h> 19 19 #include <drm/drm_format_helper.h> 20 20 #include <drm/drm_fourcc.h> 21 + #include <drm/drm_gem.h> 21 22 #include <drm/drm_gem_framebuffer_helper.h> 22 23 #include <drm/drm_mipi_dbi.h> 23 24 #include <drm/drm_modes.h> ··· 201 200 struct drm_rect *clip, bool swap) 202 201 { 203 202 struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); 204 - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem); 205 - void *src = cma_obj->vaddr; 203 + struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; 204 + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; 205 + void *src; 206 206 int ret; 207 207 208 208 ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); 209 209 if (ret) 210 210 return ret; 211 + src = data[0].vaddr; /* TODO: Use mapping abstraction properly */ 212 + 213 + ret = drm_gem_fb_vmap(fb, map, data); 214 + if (ret) 215 + goto out_drm_gem_fb_end_cpu_access; 211 216 212 217 switch (fb->format->format) { 213 218 case DRM_FORMAT_RGB565: ··· 228 221 default: 229 222 drm_err_once(fb->dev, "Format is not supported: %p4cc\n", 230 223 &fb->format->format); 231 - return -EINVAL; 224 + ret = -EINVAL; 232 225 } 233 226 227 + drm_gem_fb_vunmap(fb, map); 228 + out_drm_gem_fb_end_cpu_access: 234 229 drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); 235 230 236 231 return ret; ··· 258 249 259 250 static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) 260 251 { 261 - struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); 262 - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem); 252 + struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; 253 + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; 263 254 struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev); 264 255 unsigned int height = rect->y2 - rect->y1; 265 256 unsigned int width = rect->x2 - rect->x1; ··· 275 266 if (!drm_dev_enter(fb->dev, &idx)) 276 267 return; 277 268 269 + ret = drm_gem_fb_vmap(fb, map, data); 270 + if (ret) 271 + goto err_drm_dev_exit; 272 + 278 273 full = width == fb->width && height == fb->height; 279 274 280 275 DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); ··· 290 277 if (ret) 291 278 goto err_msg; 292 279 } else { 293 - tr = cma_obj->vaddr; 280 + tr = data[0].vaddr; /* TODO: Use mapping abstraction properly */ 294 281 } 295 282 296 283 mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1, ··· 302 289 if (ret) 303 290 drm_err_once(fb->dev, "Failed to update display %d\n", ret); 304 291 292 + drm_gem_fb_vunmap(fb, map); 293 + 294 + err_drm_dev_exit: 305 295 drm_dev_exit(idx); 306 296 } 307 297 ··· 1133 1117 1134 1118 /* 1135 1119 * Even though it's not the SPI device that does DMA (the master does), 1136 - * the dma mask is necessary for the dma_alloc_wc() in 1137 - * drm_gem_cma_create(). The dma_addr returned will be a physical 1120 + * the dma mask is necessary for the dma_alloc_wc() in the GEM code 1121 + * (e.g., drm_gem_cma_create()). The dma_addr returned will be a physical 1138 1122 * address which might be different from the bus address, but this is 1139 1123 * not a problem since the address will not be used. 1140 1124 * The virtual address is used in the transfer and the SPI core
+1 -1
drivers/gpu/drm/fsl-dcu/Kconfig
··· 3 3 tristate "DRM Support for Freescale DCU" 4 4 depends on DRM && OF && ARM && COMMON_CLK 5 5 select BACKLIGHT_CLASS_DEVICE 6 + select DRM_GEM_CMA_HELPER 6 7 select DRM_KMS_HELPER 7 - select DRM_KMS_CMA_HELPER 8 8 select DRM_PANEL 9 9 select REGMAP_MMIO 10 10 select VIDEOMODE_HELPERS
-1
drivers/gpu/drm/hisilicon/kirin/Kconfig
··· 4 4 depends on DRM && OF && ARM64 5 5 select DRM_KMS_HELPER 6 6 select DRM_GEM_CMA_HELPER 7 - select DRM_KMS_CMA_HELPER 8 7 select DRM_MIPI_DSI 9 8 help 10 9 Choose this option if you have a hisilicon Kirin chipsets(hi6220).
-1
drivers/gpu/drm/i915/Makefile
··· 60 60 61 61 # core library code 62 62 i915-y += \ 63 - dma_resv_utils.o \ 64 63 i915_memcpy.o \ 65 64 i915_mm.o \ 66 65 i915_sw_fence.o \
-17
drivers/gpu/drm/i915/dma_resv_utils.c
··· 1 - // SPDX-License-Identifier: MIT 2 - /* 3 - * Copyright © 2020 Intel Corporation 4 - */ 5 - 6 - #include <linux/dma-resv.h> 7 - 8 - #include "dma_resv_utils.h" 9 - 10 - void dma_resv_prune(struct dma_resv *resv) 11 - { 12 - if (dma_resv_trylock(resv)) { 13 - if (dma_resv_test_signaled(resv, true)) 14 - dma_resv_add_excl_fence(resv, NULL); 15 - dma_resv_unlock(resv); 16 - } 17 - }
-13
drivers/gpu/drm/i915/dma_resv_utils.h
··· 1 - /* SPDX-License-Identifier: MIT */ 2 - /* 3 - * Copyright © 2020 Intel Corporation 4 - */ 5 - 6 - #ifndef DMA_RESV_UTILS_H 7 - #define DMA_RESV_UTILS_H 8 - 9 - struct dma_resv; 10 - 11 - void dma_resv_prune(struct dma_resv *resv); 12 - 13 - #endif /* DMA_RESV_UTILS_H */
-3
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
··· 15 15 16 16 #include "gt/intel_gt_requests.h" 17 17 18 - #include "dma_resv_utils.h" 19 18 #include "i915_trace.h" 20 19 21 20 static bool swap_available(void) ··· 227 228 if (!ww) 228 229 i915_gem_object_unlock(obj); 229 230 } 230 - 231 - dma_resv_prune(obj->base.resv); 232 231 233 232 scanned += obj->base.size >> PAGE_SHIFT; 234 233 skip:
-8
drivers/gpu/drm/i915/gem/i915_gem_wait.c
··· 10 10 11 11 #include "gt/intel_engine.h" 12 12 13 - #include "dma_resv_utils.h" 14 13 #include "i915_gem_ioctls.h" 15 14 #include "i915_gem_object.h" 16 15 ··· 50 51 timeout = ret; 51 52 } 52 53 dma_resv_iter_end(&cursor); 53 - 54 - /* 55 - * Opportunistically prune the fences iff we know they have *all* been 56 - * signaled. 57 - */ 58 - if (timeout > 0) 59 - dma_resv_prune(resv); 60 54 61 55 return ret; 62 56 }
+1 -1
drivers/gpu/drm/imx/Kconfig
··· 4 4 select DRM_KMS_HELPER 5 5 select VIDEOMODE_HELPERS 6 6 select DRM_GEM_CMA_HELPER 7 - select DRM_KMS_CMA_HELPER 7 + select DRM_KMS_HELPER 8 8 depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST) 9 9 depends on IMX_IPUV3_CORE 10 10 help
+1 -1
drivers/gpu/drm/imx/dcss/Kconfig
··· 1 1 config DRM_IMX_DCSS 2 2 tristate "i.MX8MQ DCSS" 3 3 select IMX_IRQSTEER 4 - select DRM_KMS_CMA_HELPER 4 + select DRM_KMS_HELPER 5 5 select VIDEOMODE_HELPERS 6 6 depends on DRM && ARCH_MXC && ARM64 7 7 help
-1
drivers/gpu/drm/ingenic/Kconfig
··· 8 8 select DRM_BRIDGE 9 9 select DRM_PANEL_BRIDGE 10 10 select DRM_KMS_HELPER 11 - select DRM_KMS_CMA_HELPER 12 11 select DRM_GEM_CMA_HELPER 13 12 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE 14 13 help
-1
drivers/gpu/drm/kmb/Kconfig
··· 3 3 depends on DRM 4 4 depends on ARCH_KEEMBAY || COMPILE_TEST 5 5 select DRM_KMS_HELPER 6 - select DRM_KMS_CMA_HELPER 7 6 select DRM_GEM_CMA_HELPER 8 7 select DRM_MIPI_DSI 9 8 help
+1 -1
drivers/gpu/drm/lima/lima_gem.c
··· 221 221 222 222 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 223 223 if (!bo) 224 - return NULL; 224 + return ERR_PTR(-ENOMEM); 225 225 226 226 mutex_init(&bo->lock); 227 227 INIT_LIST_HEAD(&bo->va);
-1
drivers/gpu/drm/mcde/Kconfig
··· 10 10 select DRM_BRIDGE 11 11 select DRM_PANEL_BRIDGE 12 12 select DRM_KMS_HELPER 13 - select DRM_KMS_CMA_HELPER 14 13 select DRM_GEM_CMA_HELPER 15 14 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE 16 15 help
-1
drivers/gpu/drm/meson/Kconfig
··· 4 4 depends on DRM && OF && (ARM || ARM64) 5 5 depends on ARCH_MESON || COMPILE_TEST 6 6 select DRM_KMS_HELPER 7 - select DRM_KMS_CMA_HELPER 8 7 select DRM_GEM_CMA_HELPER 9 8 select DRM_DISPLAY_CONNECTOR 10 9 select VIDEOMODE_HELPERS
+1 -1
drivers/gpu/drm/mxsfb/Kconfig
··· 10 10 depends on COMMON_CLK 11 11 select DRM_MXS 12 12 select DRM_KMS_HELPER 13 - select DRM_KMS_CMA_HELPER 13 + select DRM_GEM_CMA_HELPER 14 14 select DRM_PANEL 15 15 select DRM_PANEL_BRIDGE 16 16 help
+1
drivers/gpu/drm/omapdrm/Makefile
··· 9 9 omap_debugfs.o \ 10 10 omap_crtc.o \ 11 11 omap_plane.o \ 12 + omap_overlay.o \ 12 13 omap_encoder.o \ 13 14 omap_fb.o \ 14 15 omap_gem.o \
+31 -4
drivers/gpu/drm/omapdrm/dss/dispc.c
··· 92 92 u8 mgr_height_start; 93 93 u16 mgr_width_max; 94 94 u16 mgr_height_max; 95 + u16 ovl_width_max; 96 + u16 ovl_height_max; 95 97 unsigned long max_lcd_pclk; 96 98 unsigned long max_tv_pclk; 97 99 unsigned int max_downscale; ··· 1281 1279 return dispc->feat->burst_size_unit * 8; 1282 1280 } 1283 1281 1284 - static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, 1285 - enum omap_plane_id plane, u32 fourcc) 1282 + bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, 1283 + enum omap_plane_id plane, u32 fourcc) 1286 1284 { 1287 1285 const u32 *modes; 1288 1286 unsigned int i; ··· 2489 2487 return 0; 2490 2488 } 2491 2489 2490 + enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane) 2491 + { 2492 + return dispc->feat->overlay_caps[plane]; 2493 + } 2494 + 2492 2495 #define DIV_FRAC(dividend, divisor) \ 2493 2496 ((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100)) 2494 2497 ··· 2604 2597 *x_predecim = decim_x; 2605 2598 *y_predecim = decim_y; 2606 2599 return 0; 2600 + } 2601 + 2602 + void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height) 2603 + { 2604 + *width = dispc->feat->ovl_width_max; 2605 + *height = dispc->feat->ovl_height_max; 2607 2606 } 2608 2607 2609 2608 static int dispc_ovl_setup_common(struct dispc_device *dispc, ··· 4253 4240 .mgr_height_start = 26, 4254 4241 .mgr_width_max = 2048, 4255 4242 .mgr_height_max = 2048, 4243 + .ovl_width_max = 2048, 4244 + .ovl_height_max = 2048, 4256 4245 .max_lcd_pclk = 66500000, 4257 4246 .max_downscale = 2, 4258 4247 /* ··· 4293 4278 .mgr_height_start = 26, 4294 4279 .mgr_width_max = 2048, 4295 4280 .mgr_height_max = 2048, 4281 + .ovl_width_max = 2048, 4282 + .ovl_height_max = 2048, 4296 4283 .max_lcd_pclk = 173000000, 4297 4284 .max_tv_pclk = 59000000, 4298 4285 .max_downscale = 4, ··· 4330 4313 .mgr_height_start = 26, 4331 4314 .mgr_width_max = 2048, 4332 4315 .mgr_height_max = 2048, 4316 + .ovl_width_max = 2048, 4317 + .ovl_height_max = 2048, 4333 4318 .max_lcd_pclk = 173000000, 4334 4319 .max_tv_pclk = 59000000, 4335 4320 .max_downscale = 4, ··· 4367 4348 .mgr_height_start = 26, 4368 4349 .mgr_width_max = 2048, 4369 4350 .mgr_height_max = 2048, 4351 + .ovl_width_max = 2048, 4352 + .ovl_height_max = 2048, 4370 4353 .max_lcd_pclk = 173000000, 4371 4354 .max_tv_pclk = 59000000, 4372 4355 .max_downscale = 4, ··· 4404 4383 .mgr_height_start = 26, 4405 4384 .mgr_width_max = 2048, 4406 4385 .mgr_height_max = 2048, 4386 + .ovl_width_max = 2048, 4387 + .ovl_height_max = 2048, 4407 4388 .max_lcd_pclk = 173000000, 4408 4389 .max_tv_pclk = 59000000, 4409 4390 .max_downscale = 4, ··· 4441 4418 .mgr_height_start = 26, 4442 4419 .mgr_width_max = 2048, 4443 4420 .mgr_height_max = 2048, 4421 + .ovl_width_max = 2048, 4422 + .ovl_height_max = 2048, 4444 4423 .max_lcd_pclk = 170000000, 4445 4424 .max_tv_pclk = 185625000, 4446 4425 .max_downscale = 4, ··· 4482 4457 .mgr_height_start = 27, 4483 4458 .mgr_width_max = 4096, 4484 4459 .mgr_height_max = 4096, 4460 + .ovl_width_max = 2048, 4461 + .ovl_height_max = 4096, 4485 4462 .max_lcd_pclk = 170000000, 4486 4463 .max_tv_pclk = 192000000, 4487 4464 .max_downscale = 4, ··· 4869 4842 return 0; 4870 4843 } 4871 4844 4872 - static int dispc_runtime_suspend(struct device *dev) 4845 + static __maybe_unused int dispc_runtime_suspend(struct device *dev) 4873 4846 { 4874 4847 struct dispc_device *dispc = dev_get_drvdata(dev); 4875 4848 ··· 4884 4857 return 0; 4885 4858 } 4886 4859 4887 - static int dispc_runtime_resume(struct device *dev) 4860 + static __maybe_unused int dispc_runtime_resume(struct device *dev) 4888 4861 { 4889 4862 struct dispc_device *dispc = dev_get_drvdata(dev); 4890 4863
+2 -2
drivers/gpu/drm/omapdrm/dss/dsi.c
··· 5058 5058 return 0; 5059 5059 } 5060 5060 5061 - static int dsi_runtime_suspend(struct device *dev) 5061 + static __maybe_unused int dsi_runtime_suspend(struct device *dev) 5062 5062 { 5063 5063 struct dsi_data *dsi = dev_get_drvdata(dev); 5064 5064 ··· 5071 5071 return 0; 5072 5072 } 5073 5073 5074 - static int dsi_runtime_resume(struct device *dev) 5074 + static __maybe_unused int dsi_runtime_resume(struct device *dev) 5075 5075 { 5076 5076 struct dsi_data *dsi = dev_get_drvdata(dev); 5077 5077
+2 -2
drivers/gpu/drm/omapdrm/dss/dss.c
··· 1569 1569 DSSDBG("shutdown\n"); 1570 1570 } 1571 1571 1572 - static int dss_runtime_suspend(struct device *dev) 1572 + static __maybe_unused int dss_runtime_suspend(struct device *dev) 1573 1573 { 1574 1574 struct dss_device *dss = dev_get_drvdata(dev); 1575 1575 ··· 1581 1581 return 0; 1582 1582 } 1583 1583 1584 - static int dss_runtime_resume(struct device *dev) 1584 + static __maybe_unused int dss_runtime_resume(struct device *dev) 1585 1585 { 1586 1586 struct dss_device *dss = dev_get_drvdata(dev); 1587 1587 int r;
+5
drivers/gpu/drm/omapdrm/dss/dss.h
··· 397 397 const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc, 398 398 enum omap_plane_id plane); 399 399 400 + void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height); 401 + bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, 402 + enum omap_plane_id plane, u32 fourcc); 403 + enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane); 404 + 400 405 u32 dispc_read_irqstatus(struct dispc_device *dispc); 401 406 void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask); 402 407 void dispc_write_irqenable(struct dispc_device *dispc, u32 mask);
+2 -2
drivers/gpu/drm/omapdrm/dss/venc.c
··· 879 879 return 0; 880 880 } 881 881 882 - static int venc_runtime_suspend(struct device *dev) 882 + static __maybe_unused int venc_runtime_suspend(struct device *dev) 883 883 { 884 884 struct venc_device *venc = dev_get_drvdata(dev); 885 885 ··· 889 889 return 0; 890 890 } 891 891 892 - static int venc_runtime_resume(struct device *dev) 892 + static __maybe_unused int venc_runtime_resume(struct device *dev) 893 893 { 894 894 struct venc_device *venc = dev_get_drvdata(dev); 895 895
+192 -4
drivers/gpu/drm/omapdrm/omap_drv.c
··· 117 117 dispc_runtime_put(priv->dispc); 118 118 } 119 119 120 + static int drm_atomic_state_normalized_zpos_cmp(const void *a, const void *b) 121 + { 122 + const struct drm_plane_state *sa = *(struct drm_plane_state **)a; 123 + const struct drm_plane_state *sb = *(struct drm_plane_state **)b; 124 + 125 + if (sa->normalized_zpos != sb->normalized_zpos) 126 + return sa->normalized_zpos - sb->normalized_zpos; 127 + else 128 + return sa->plane->base.id - sb->plane->base.id; 129 + } 130 + 131 + /* 132 + * This replaces the drm_atomic_normalize_zpos to handle the dual overlay case. 133 + * 134 + * Since both halves need to be 'appear' side by side the zpos is 135 + * recalculated when dealing with dual overlay cases so that the other 136 + * planes zpos is consistent. 137 + */ 138 + static int omap_atomic_update_normalize_zpos(struct drm_device *dev, 139 + struct drm_atomic_state *state) 140 + { 141 + struct drm_crtc *crtc; 142 + struct drm_crtc_state *old_state, *new_state; 143 + struct drm_plane *plane; 144 + int c, i, n, inc; 145 + int total_planes = dev->mode_config.num_total_plane; 146 + struct drm_plane_state **states; 147 + int ret = 0; 148 + 149 + states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); 150 + if (!states) 151 + return -ENOMEM; 152 + 153 + for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, c) { 154 + if (old_state->plane_mask == new_state->plane_mask && 155 + !new_state->zpos_changed) 156 + continue; 157 + 158 + /* Reset plane increment and index value for every crtc */ 159 + n = 0; 160 + 161 + /* 162 + * Normalization process might create new states for planes 163 + * which normalized_zpos has to be recalculated. 164 + */ 165 + drm_for_each_plane_mask(plane, dev, new_state->plane_mask) { 166 + struct drm_plane_state *plane_state = 167 + drm_atomic_get_plane_state(new_state->state, 168 + plane); 169 + if (IS_ERR(plane_state)) { 170 + ret = PTR_ERR(plane_state); 171 + goto done; 172 + } 173 + states[n++] = plane_state; 174 + } 175 + 176 + sort(states, n, sizeof(*states), 177 + drm_atomic_state_normalized_zpos_cmp, NULL); 178 + 179 + for (i = 0, inc = 0; i < n; i++) { 180 + plane = states[i]->plane; 181 + 182 + states[i]->normalized_zpos = i + inc; 183 + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] updated normalized zpos value %d\n", 184 + plane->base.id, plane->name, 185 + states[i]->normalized_zpos); 186 + 187 + if (is_omap_plane_dual_overlay(states[i])) 188 + inc++; 189 + } 190 + new_state->zpos_changed = true; 191 + } 192 + 193 + done: 194 + kfree(states); 195 + return ret; 196 + } 197 + 198 + static int omap_atomic_check(struct drm_device *dev, 199 + struct drm_atomic_state *state) 200 + { 201 + int ret; 202 + 203 + ret = drm_atomic_helper_check(dev, state); 204 + if (ret) 205 + return ret; 206 + 207 + if (dev->mode_config.normalize_zpos) { 208 + ret = omap_atomic_update_normalize_zpos(dev, state); 209 + if (ret) 210 + return ret; 211 + } 212 + 213 + return 0; 214 + } 215 + 120 216 static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = { 121 217 .atomic_commit_tail = omap_atomic_commit_tail, 122 218 }; ··· 220 124 static const struct drm_mode_config_funcs omap_mode_config_funcs = { 221 125 .fb_create = omap_framebuffer_create, 222 126 .output_poll_changed = drm_fb_helper_output_poll_changed, 223 - .atomic_check = drm_atomic_helper_check, 127 + .atomic_check = omap_atomic_check, 224 128 .atomic_commit = drm_atomic_helper_commit, 225 129 }; 130 + 131 + /* Global/shared object state funcs */ 132 + 133 + /* 134 + * This is a helper that returns the private state currently in operation. 135 + * Note that this would return the "old_state" if called in the atomic check 136 + * path, and the "new_state" after the atomic swap has been done. 137 + */ 138 + struct omap_global_state * 139 + omap_get_existing_global_state(struct omap_drm_private *priv) 140 + { 141 + return to_omap_global_state(priv->glob_obj.state); 142 + } 143 + 144 + /* 145 + * This acquires the modeset lock set aside for global state, creates 146 + * a new duplicated private object state. 147 + */ 148 + struct omap_global_state *__must_check 149 + omap_get_global_state(struct drm_atomic_state *s) 150 + { 151 + struct omap_drm_private *priv = s->dev->dev_private; 152 + struct drm_private_state *priv_state; 153 + 154 + priv_state = drm_atomic_get_private_obj_state(s, &priv->glob_obj); 155 + if (IS_ERR(priv_state)) 156 + return ERR_CAST(priv_state); 157 + 158 + return to_omap_global_state(priv_state); 159 + } 160 + 161 + static struct drm_private_state * 162 + omap_global_duplicate_state(struct drm_private_obj *obj) 163 + { 164 + struct omap_global_state *state; 165 + 166 + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 167 + if (!state) 168 + return NULL; 169 + 170 + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 171 + 172 + return &state->base; 173 + } 174 + 175 + static void omap_global_destroy_state(struct drm_private_obj *obj, 176 + struct drm_private_state *state) 177 + { 178 + struct omap_global_state *omap_state = to_omap_global_state(state); 179 + 180 + kfree(omap_state); 181 + } 182 + 183 + static const struct drm_private_state_funcs omap_global_state_funcs = { 184 + .atomic_duplicate_state = omap_global_duplicate_state, 185 + .atomic_destroy_state = omap_global_destroy_state, 186 + }; 187 + 188 + static int omap_global_obj_init(struct drm_device *dev) 189 + { 190 + struct omap_drm_private *priv = dev->dev_private; 191 + struct omap_global_state *state; 192 + 193 + state = kzalloc(sizeof(*state), GFP_KERNEL); 194 + if (!state) 195 + return -ENOMEM; 196 + 197 + drm_atomic_private_obj_init(dev, &priv->glob_obj, &state->base, 198 + &omap_global_state_funcs); 199 + return 0; 200 + } 201 + 202 + static void omap_global_obj_fini(struct omap_drm_private *priv) 203 + { 204 + drm_atomic_private_obj_fini(&priv->glob_obj); 205 + } 226 206 227 207 static void omap_disconnect_pipelines(struct drm_device *ddev) 228 208 { ··· 402 230 403 231 if (!omapdss_stack_is_ready()) 404 232 return -EPROBE_DEFER; 405 - 406 - drm_mode_config_init(dev); 407 233 408 234 ret = omap_modeset_init_properties(dev); 409 235 if (ret < 0) ··· 753 583 754 584 omap_gem_init(ddev); 755 585 586 + drm_mode_config_init(ddev); 587 + 588 + ret = omap_global_obj_init(ddev); 589 + if (ret) 590 + goto err_gem_deinit; 591 + 592 + ret = omap_hwoverlays_init(priv); 593 + if (ret) 594 + goto err_free_priv_obj; 595 + 756 596 ret = omap_modeset_init(ddev); 757 597 if (ret) { 758 598 dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret); 759 - goto err_gem_deinit; 599 + goto err_free_overlays; 760 600 } 761 601 762 602 /* Initialize vblank handling, start with all CRTCs disabled. */ ··· 798 618 omap_fbdev_fini(ddev); 799 619 err_cleanup_modeset: 800 620 omap_modeset_fini(ddev); 621 + err_free_overlays: 622 + omap_hwoverlays_destroy(priv); 623 + err_free_priv_obj: 624 + omap_global_obj_fini(priv); 801 625 err_gem_deinit: 626 + drm_mode_config_cleanup(ddev); 802 627 omap_gem_deinit(ddev); 803 628 destroy_workqueue(priv->wq); 804 629 omap_disconnect_pipelines(ddev); ··· 827 642 drm_atomic_helper_shutdown(ddev); 828 643 829 644 omap_modeset_fini(ddev); 645 + omap_hwoverlays_destroy(priv); 646 + omap_global_obj_fini(priv); 647 + drm_mode_config_cleanup(ddev); 830 648 omap_gem_deinit(ddev); 831 649 832 650 destroy_workqueue(priv->wq);
+24
drivers/gpu/drm/omapdrm/omap_drv.h
··· 14 14 #include "dss/omapdss.h" 15 15 #include "dss/dss.h" 16 16 17 + #include <drm/drm_atomic.h> 17 18 #include <drm/drm_gem.h> 18 19 #include <drm/omap_drm.h> 19 20 ··· 25 24 #include "omap_gem.h" 26 25 #include "omap_irq.h" 27 26 #include "omap_plane.h" 27 + #include "omap_overlay.h" 28 28 29 29 #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) 30 30 #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* verbose debug */ ··· 40 38 struct drm_connector *connector; 41 39 struct omap_dss_device *output; 42 40 unsigned int alias_id; 41 + }; 42 + 43 + /* 44 + * Global private object state for tracking resources that are shared across 45 + * multiple kms objects (planes/crtcs/etc). 46 + */ 47 + #define to_omap_global_state(x) container_of(x, struct omap_global_state, base) 48 + 49 + struct omap_global_state { 50 + struct drm_private_state base; 51 + 52 + /* global atomic state of assignment between overlays and planes */ 53 + struct drm_plane *hwoverlay_to_plane[8]; 43 54 }; 44 55 45 56 struct omap_drm_private { ··· 71 56 72 57 unsigned int num_planes; 73 58 struct drm_plane *planes[8]; 59 + 60 + unsigned int num_ovls; 61 + struct omap_hw_overlay *overlays[8]; 62 + 63 + struct drm_private_obj glob_obj; 74 64 75 65 struct drm_fb_helper *fbdev; 76 66 ··· 104 84 105 85 106 86 void omap_debugfs_init(struct drm_minor *minor); 87 + 88 + struct omap_global_state * __must_check omap_get_global_state(struct drm_atomic_state *s); 89 + 90 + struct omap_global_state *omap_get_existing_global_state(struct omap_drm_private *priv); 107 91 108 92 #endif /* __OMAPDRM_DRV_H__ */
+32 -1
drivers/gpu/drm/omapdrm/omap_fb.c
··· 131 131 /* update ovl info for scanout, handles cases of multi-planar fb's, etc. 132 132 */ 133 133 void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 134 - struct drm_plane_state *state, struct omap_overlay_info *info) 134 + struct drm_plane_state *state, 135 + struct omap_overlay_info *info, 136 + struct omap_overlay_info *r_info) 135 137 { 136 138 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 137 139 const struct drm_format_info *format = omap_fb->format; ··· 219 217 } 220 218 } else { 221 219 info->p_uv_addr = 0; 220 + } 221 + 222 + if (r_info) { 223 + info->width /= 2; 224 + info->out_width /= 2; 225 + 226 + *r_info = *info; 227 + 228 + if (fb->format->is_yuv) { 229 + if (info->width & 1) { 230 + info->width++; 231 + r_info->width--; 232 + } 233 + 234 + if (info->out_width & 1) { 235 + info->out_width++; 236 + r_info->out_width--; 237 + } 238 + } 239 + 240 + r_info->pos_x = info->pos_x + info->out_width; 241 + 242 + r_info->paddr = get_linear_addr(fb, format, 0, 243 + x + info->width, y); 244 + if (fb->format->format == DRM_FORMAT_NV12) { 245 + r_info->p_uv_addr = 246 + get_linear_addr(fb, format, 1, 247 + x + info->width, y); 248 + } 222 249 } 223 250 } 224 251
+3 -1
drivers/gpu/drm/omapdrm/omap_fb.h
··· 26 26 int omap_framebuffer_pin(struct drm_framebuffer *fb); 27 27 void omap_framebuffer_unpin(struct drm_framebuffer *fb); 28 28 void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 29 - struct drm_plane_state *state, struct omap_overlay_info *info); 29 + struct drm_plane_state *state, 30 + struct omap_overlay_info *info, 31 + struct omap_overlay_info *r_info); 30 32 bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb); 31 33 void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); 32 34
+78 -1
drivers/gpu/drm/omapdrm/omap_gem.c
··· 789 789 if (omap_obj->flags & OMAP_BO_TILED_MASK) { 790 790 block = tiler_reserve_2d(fmt, 791 791 omap_obj->width, 792 - omap_obj->height, 0); 792 + omap_obj->height, PAGE_SIZE); 793 793 } else { 794 794 block = tiler_reserve_1d(obj->size); 795 795 } ··· 851 851 return; 852 852 853 853 if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) { 854 + if (omap_obj->sgt) { 855 + sg_free_table(omap_obj->sgt); 856 + kfree(omap_obj->sgt); 857 + omap_obj->sgt = NULL; 858 + } 854 859 ret = tiler_unpin(omap_obj->block); 855 860 if (ret) { 856 861 dev_err(obj->dev->dev, ··· 966 961 * released the pages.. 967 962 */ 968 963 return 0; 964 + } 965 + 966 + struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj) 967 + { 968 + struct omap_gem_object *omap_obj = to_omap_bo(obj); 969 + dma_addr_t addr; 970 + struct sg_table *sgt; 971 + struct scatterlist *sg; 972 + unsigned int count, len, stride, i; 973 + int ret; 974 + 975 + ret = omap_gem_pin(obj, &addr); 976 + if (ret) 977 + return ERR_PTR(ret); 978 + 979 + mutex_lock(&omap_obj->lock); 980 + 981 + sgt = omap_obj->sgt; 982 + if (sgt) 983 + goto out; 984 + 985 + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 986 + if (!sgt) { 987 + ret = -ENOMEM; 988 + goto err_unpin; 989 + } 990 + 991 + if (omap_obj->flags & OMAP_BO_TILED_MASK) { 992 + enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 993 + 994 + len = omap_obj->width << (int)fmt; 995 + count = omap_obj->height; 996 + stride = tiler_stride(fmt, 0); 997 + } else { 998 + len = obj->size; 999 + count = 1; 1000 + stride = 0; 1001 + } 1002 + 1003 + ret = sg_alloc_table(sgt, count, GFP_KERNEL); 1004 + if (ret) 1005 + goto err_free; 1006 + 1007 + for_each_sg(sgt->sgl, sg, count, i) { 1008 + sg_set_page(sg, phys_to_page(addr), len, offset_in_page(addr)); 1009 + sg_dma_address(sg) = addr; 1010 + sg_dma_len(sg) = len; 1011 + 1012 + addr += stride; 1013 + } 1014 + 1015 + omap_obj->sgt = sgt; 1016 + out: 1017 + mutex_unlock(&omap_obj->lock); 1018 + return sgt; 1019 + 1020 + err_free: 1021 + kfree(sgt); 1022 + err_unpin: 1023 + mutex_unlock(&omap_obj->lock); 1024 + omap_gem_unpin(obj); 1025 + return ERR_PTR(ret); 1026 + } 1027 + 1028 + void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt) 1029 + { 1030 + struct omap_gem_object *omap_obj = to_omap_bo(obj); 1031 + 1032 + if (WARN_ON(omap_obj->sgt != sgt)) 1033 + return; 1034 + 1035 + omap_gem_unpin(obj); 969 1036 } 970 1037 971 1038 #ifdef CONFIG_DRM_FBDEV_EMULATION
+2
drivers/gpu/drm/omapdrm/omap_gem.h
··· 82 82 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, 83 83 int x, int y, dma_addr_t *dma_addr); 84 84 int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient); 85 + struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj); 86 + void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt); 85 87 86 88 #endif /* __OMAPDRM_GEM_H__ */
+5 -29
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
··· 23 23 { 24 24 struct drm_gem_object *obj = attachment->dmabuf->priv; 25 25 struct sg_table *sg; 26 - dma_addr_t dma_addr; 27 - int ret; 28 - 29 - sg = kzalloc(sizeof(*sg), GFP_KERNEL); 30 - if (!sg) 31 - return ERR_PTR(-ENOMEM); 32 - 33 - /* camera, etc, need physically contiguous.. but we need a 34 - * better way to know this.. 35 - */ 36 - ret = omap_gem_pin(obj, &dma_addr); 37 - if (ret) 38 - goto out; 39 - 40 - ret = sg_alloc_table(sg, 1, GFP_KERNEL); 41 - if (ret) 42 - goto out; 43 - 44 - sg_init_table(sg->sgl, 1); 45 - sg_dma_len(sg->sgl) = obj->size; 46 - sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0); 47 - sg_dma_address(sg->sgl) = dma_addr; 26 + sg = omap_gem_get_sg(obj); 27 + if (IS_ERR(sg)) 28 + return sg; 48 29 49 30 /* this must be after omap_gem_pin() to ensure we have pages attached */ 50 31 omap_gem_dma_sync_buffer(obj, dir); 51 32 52 33 return sg; 53 - out: 54 - kfree(sg); 55 - return ERR_PTR(ret); 56 34 } 57 35 58 36 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 59 37 struct sg_table *sg, enum dma_data_direction dir) 60 38 { 61 39 struct drm_gem_object *obj = attachment->dmabuf->priv; 62 - omap_gem_unpin(obj); 63 - sg_free_table(sg); 64 - kfree(sg); 40 + omap_gem_put_sg(obj, sg); 65 41 } 66 42 67 43 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, ··· 90 114 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 91 115 92 116 exp_info.ops = &omap_dmabuf_ops; 93 - exp_info.size = obj->size; 117 + exp_info.size = omap_gem_mmap_size(obj); 94 118 exp_info.flags = flags; 95 119 exp_info.priv = obj; 96 120
+212
drivers/gpu/drm/omapdrm/omap_overlay.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ 4 + * Author: Benoit Parrot <bparrot@ti.com> 5 + */ 6 + 7 + #include <drm/drm_atomic.h> 8 + #include <drm/drm_atomic_helper.h> 9 + #include <drm/drm_plane_helper.h> 10 + 11 + #include "omap_dmm_tiler.h" 12 + #include "omap_drv.h" 13 + 14 + /* 15 + * overlay funcs 16 + */ 17 + static const char * const overlay_id_to_name[] = { 18 + [OMAP_DSS_GFX] = "gfx", 19 + [OMAP_DSS_VIDEO1] = "vid1", 20 + [OMAP_DSS_VIDEO2] = "vid2", 21 + [OMAP_DSS_VIDEO3] = "vid3", 22 + }; 23 + 24 + /* 25 + * Find a free overlay with the required caps and supported fourcc 26 + */ 27 + static struct omap_hw_overlay * 28 + omap_plane_find_free_overlay(struct drm_device *dev, struct drm_plane *hwoverlay_to_plane[], 29 + u32 caps, u32 fourcc) 30 + { 31 + struct omap_drm_private *priv = dev->dev_private; 32 + int i; 33 + 34 + DBG("caps: %x fourcc: %x", caps, fourcc); 35 + 36 + for (i = 0; i < priv->num_ovls; i++) { 37 + struct omap_hw_overlay *cur = priv->overlays[i]; 38 + 39 + DBG("%d: id: %d cur->caps: %x", 40 + cur->idx, cur->id, cur->caps); 41 + 42 + /* skip if already in-use */ 43 + if (hwoverlay_to_plane[cur->idx]) 44 + continue; 45 + 46 + /* skip if doesn't support some required caps: */ 47 + if (caps & ~cur->caps) 48 + continue; 49 + 50 + /* check supported format */ 51 + if (!dispc_ovl_color_mode_supported(priv->dispc, 52 + cur->id, fourcc)) 53 + continue; 54 + 55 + return cur; 56 + } 57 + 58 + DBG("no match"); 59 + return NULL; 60 + } 61 + 62 + /* 63 + * Assign a new overlay to a plane with the required caps and supported fourcc 64 + * If a plane need a new overlay, the previous one should have been released 65 + * with omap_overlay_release() 66 + * This should be called from the plane atomic_check() in order to prepare the 67 + * next global overlay_map to be enabled when atomic transaction is valid. 68 + */ 69 + int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane, 70 + u32 caps, u32 fourcc, struct omap_hw_overlay **overlay, 71 + struct omap_hw_overlay **r_overlay) 72 + { 73 + /* Get the global state of the current atomic transaction */ 74 + struct omap_global_state *state = omap_get_global_state(s); 75 + struct drm_plane **overlay_map = state->hwoverlay_to_plane; 76 + struct omap_hw_overlay *ovl, *r_ovl; 77 + 78 + ovl = omap_plane_find_free_overlay(s->dev, overlay_map, caps, fourcc); 79 + if (!ovl) 80 + return -ENOMEM; 81 + 82 + overlay_map[ovl->idx] = plane; 83 + *overlay = ovl; 84 + 85 + if (r_overlay) { 86 + r_ovl = omap_plane_find_free_overlay(s->dev, overlay_map, 87 + caps, fourcc); 88 + if (!r_ovl) { 89 + overlay_map[r_ovl->idx] = NULL; 90 + *overlay = NULL; 91 + return -ENOMEM; 92 + } 93 + 94 + overlay_map[r_ovl->idx] = plane; 95 + *r_overlay = r_ovl; 96 + } 97 + 98 + DBG("%s: assign to plane %s caps %x", ovl->name, plane->name, caps); 99 + 100 + if (r_overlay) { 101 + DBG("%s: assign to right of plane %s caps %x", 102 + r_ovl->name, plane->name, caps); 103 + } 104 + 105 + return 0; 106 + } 107 + 108 + /* 109 + * Release an overlay from a plane if the plane gets not visible or the plane 110 + * need a new overlay if overlay caps changes. 111 + * This should be called from the plane atomic_check() in order to prepare the 112 + * next global overlay_map to be enabled when atomic transaction is valid. 113 + */ 114 + void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay) 115 + { 116 + /* Get the global state of the current atomic transaction */ 117 + struct omap_global_state *state = omap_get_global_state(s); 118 + struct drm_plane **overlay_map = state->hwoverlay_to_plane; 119 + 120 + if (!overlay) 121 + return; 122 + 123 + if (WARN_ON(!overlay_map[overlay->idx])) 124 + return; 125 + 126 + DBG("%s: release from plane %s", overlay->name, overlay_map[overlay->idx]->name); 127 + 128 + overlay_map[overlay->idx] = NULL; 129 + } 130 + 131 + /* 132 + * Update an overlay state that was attached to a plane before the current atomic state. 133 + * This should be called from the plane atomic_update() or atomic_disable(), 134 + * where an overlay association to a plane could have changed between the old and current 135 + * atomic state. 136 + */ 137 + void omap_overlay_update_state(struct omap_drm_private *priv, 138 + struct omap_hw_overlay *overlay) 139 + { 140 + struct omap_global_state *state = omap_get_existing_global_state(priv); 141 + struct drm_plane **overlay_map = state->hwoverlay_to_plane; 142 + 143 + /* Check if this overlay is not used anymore, then disable it */ 144 + if (!overlay_map[overlay->idx]) { 145 + DBG("%s: disabled", overlay->name); 146 + 147 + /* disable the overlay */ 148 + dispc_ovl_enable(priv->dispc, overlay->id, false); 149 + } 150 + } 151 + 152 + static void omap_overlay_destroy(struct omap_hw_overlay *overlay) 153 + { 154 + kfree(overlay); 155 + } 156 + 157 + static struct omap_hw_overlay *omap_overlay_init(enum omap_plane_id overlay_id, 158 + enum omap_overlay_caps caps) 159 + { 160 + struct omap_hw_overlay *overlay; 161 + 162 + overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); 163 + if (!overlay) 164 + return ERR_PTR(-ENOMEM); 165 + 166 + overlay->name = overlay_id_to_name[overlay_id]; 167 + overlay->id = overlay_id; 168 + overlay->caps = caps; 169 + 170 + return overlay; 171 + } 172 + 173 + int omap_hwoverlays_init(struct omap_drm_private *priv) 174 + { 175 + static const enum omap_plane_id hw_plane_ids[] = { 176 + OMAP_DSS_GFX, OMAP_DSS_VIDEO1, 177 + OMAP_DSS_VIDEO2, OMAP_DSS_VIDEO3, 178 + }; 179 + u32 num_overlays = dispc_get_num_ovls(priv->dispc); 180 + enum omap_overlay_caps caps; 181 + int i, ret; 182 + 183 + for (i = 0; i < num_overlays; i++) { 184 + struct omap_hw_overlay *overlay; 185 + 186 + caps = dispc_ovl_get_caps(priv->dispc, hw_plane_ids[i]); 187 + overlay = omap_overlay_init(hw_plane_ids[i], caps); 188 + if (IS_ERR(overlay)) { 189 + ret = PTR_ERR(overlay); 190 + dev_err(priv->dev, "failed to construct overlay for %s (%d)\n", 191 + overlay_id_to_name[i], ret); 192 + omap_hwoverlays_destroy(priv); 193 + return ret; 194 + } 195 + overlay->idx = priv->num_ovls; 196 + priv->overlays[priv->num_ovls++] = overlay; 197 + } 198 + 199 + return 0; 200 + } 201 + 202 + void omap_hwoverlays_destroy(struct omap_drm_private *priv) 203 + { 204 + int i; 205 + 206 + for (i = 0; i < priv->num_ovls; i++) { 207 + omap_overlay_destroy(priv->overlays[i]); 208 + priv->overlays[i] = NULL; 209 + } 210 + 211 + priv->num_ovls = 0; 212 + }
+35
drivers/gpu/drm/omapdrm/omap_overlay.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ 4 + * Author: Benoit Parrot <bparrot@ti.com> 5 + */ 6 + 7 + #ifndef __OMAPDRM_OVERLAY_H__ 8 + #define __OMAPDRM_OVERLAY_H__ 9 + 10 + #include <linux/types.h> 11 + 12 + enum drm_plane_type; 13 + 14 + struct drm_device; 15 + struct drm_mode_object; 16 + struct drm_plane; 17 + 18 + /* Used to associate a HW overlay/plane to a plane */ 19 + struct omap_hw_overlay { 20 + unsigned int idx; 21 + 22 + const char *name; 23 + enum omap_plane_id id; 24 + 25 + enum omap_overlay_caps caps; 26 + }; 27 + 28 + int omap_hwoverlays_init(struct omap_drm_private *priv); 29 + void omap_hwoverlays_destroy(struct omap_drm_private *priv); 30 + int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane, 31 + u32 caps, u32 fourcc, struct omap_hw_overlay **overlay, 32 + struct omap_hw_overlay **r_overlay); 33 + void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay); 34 + void omap_overlay_update_state(struct omap_drm_private *priv, struct omap_hw_overlay *overlay); 35 + #endif /* __OMAPDRM_OVERLAY_H__ */
+300 -53
drivers/gpu/drm/omapdrm/omap_plane.c
··· 8 8 #include <drm/drm_atomic_helper.h> 9 9 #include <drm/drm_gem_atomic_helper.h> 10 10 #include <drm/drm_plane_helper.h> 11 + #include <drm/drm_fourcc.h> 11 12 12 13 #include "omap_dmm_tiler.h" 13 14 #include "omap_drv.h" ··· 17 16 * plane funcs 18 17 */ 19 18 19 + #define to_omap_plane_state(x) container_of(x, struct omap_plane_state, base) 20 + 21 + struct omap_plane_state { 22 + /* Must be first. */ 23 + struct drm_plane_state base; 24 + 25 + struct omap_hw_overlay *overlay; 26 + struct omap_hw_overlay *r_overlay; /* right overlay */ 27 + }; 28 + 20 29 #define to_omap_plane(x) container_of(x, struct omap_plane, base) 21 30 22 31 struct omap_plane { 23 32 struct drm_plane base; 24 33 enum omap_plane_id id; 25 - const char *name; 26 34 }; 35 + 36 + bool is_omap_plane_dual_overlay(struct drm_plane_state *state) 37 + { 38 + struct omap_plane_state *omap_state = to_omap_plane_state(state); 39 + 40 + return !!omap_state->r_overlay; 41 + } 27 42 28 43 static int omap_plane_prepare_fb(struct drm_plane *plane, 29 44 struct drm_plane_state *new_state) ··· 63 46 struct drm_atomic_state *state) 64 47 { 65 48 struct omap_drm_private *priv = plane->dev->dev_private; 66 - struct omap_plane *omap_plane = to_omap_plane(plane); 67 49 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 68 50 plane); 69 - struct omap_overlay_info info; 51 + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 52 + plane); 53 + struct omap_plane_state *new_omap_state; 54 + struct omap_plane_state *old_omap_state; 55 + struct omap_overlay_info info, r_info; 56 + enum omap_plane_id ovl_id, r_ovl_id; 70 57 int ret; 58 + bool dual_ovl; 71 59 72 - DBG("%s, crtc=%p fb=%p", omap_plane->name, new_state->crtc, 60 + new_omap_state = to_omap_plane_state(new_state); 61 + old_omap_state = to_omap_plane_state(old_state); 62 + 63 + dual_ovl = is_omap_plane_dual_overlay(new_state); 64 + 65 + /* Cleanup previously held overlay if needed */ 66 + if (old_omap_state->overlay) 67 + omap_overlay_update_state(priv, old_omap_state->overlay); 68 + if (old_omap_state->r_overlay) 69 + omap_overlay_update_state(priv, old_omap_state->r_overlay); 70 + 71 + if (!new_omap_state->overlay) { 72 + DBG("[PLANE:%d:%s] no overlay attached", plane->base.id, plane->name); 73 + return; 74 + } 75 + 76 + ovl_id = new_omap_state->overlay->id; 77 + DBG("%s, crtc=%p fb=%p", plane->name, new_state->crtc, 73 78 new_state->fb); 74 79 75 80 memset(&info, 0, sizeof(info)); ··· 106 67 info.color_encoding = new_state->color_encoding; 107 68 info.color_range = new_state->color_range; 108 69 109 - /* update scanout: */ 110 - omap_framebuffer_update_scanout(new_state->fb, new_state, &info); 70 + r_info = info; 111 71 112 - DBG("%dx%d -> %dx%d (%d)", info.width, info.height, 113 - info.out_width, info.out_height, 114 - info.screen_width); 72 + /* update scanout: */ 73 + omap_framebuffer_update_scanout(new_state->fb, new_state, &info, 74 + dual_ovl ? &r_info : NULL); 75 + 76 + DBG("%s: %dx%d -> %dx%d (%d)", 77 + new_omap_state->overlay->name, info.width, info.height, 78 + info.out_width, info.out_height, info.screen_width); 115 79 DBG("%d,%d %pad %pad", info.pos_x, info.pos_y, 116 80 &info.paddr, &info.p_uv_addr); 117 81 82 + if (dual_ovl) { 83 + r_ovl_id = new_omap_state->r_overlay->id; 84 + /* 85 + * If the current plane uses 2 hw planes the very next 86 + * zorder is used by the r_overlay so we just use the 87 + * main overlay zorder + 1 88 + */ 89 + r_info.zorder = info.zorder + 1; 90 + 91 + DBG("%s: %dx%d -> %dx%d (%d)", 92 + new_omap_state->r_overlay->name, 93 + r_info.width, r_info.height, 94 + r_info.out_width, r_info.out_height, r_info.screen_width); 95 + DBG("%d,%d %pad %pad", r_info.pos_x, r_info.pos_y, 96 + &r_info.paddr, &r_info.p_uv_addr); 97 + } 98 + 118 99 /* and finally, update omapdss: */ 119 - ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info, 100 + ret = dispc_ovl_setup(priv->dispc, ovl_id, &info, 120 101 omap_crtc_timings(new_state->crtc), false, 121 102 omap_crtc_channel(new_state->crtc)); 122 103 if (ret) { 123 104 dev_err(plane->dev->dev, "Failed to setup plane %s\n", 124 - omap_plane->name); 125 - dispc_ovl_enable(priv->dispc, omap_plane->id, false); 105 + plane->name); 106 + dispc_ovl_enable(priv->dispc, ovl_id, false); 126 107 return; 127 108 } 128 109 129 - dispc_ovl_enable(priv->dispc, omap_plane->id, true); 110 + dispc_ovl_enable(priv->dispc, ovl_id, true); 111 + 112 + if (dual_ovl) { 113 + ret = dispc_ovl_setup(priv->dispc, r_ovl_id, &r_info, 114 + omap_crtc_timings(new_state->crtc), false, 115 + omap_crtc_channel(new_state->crtc)); 116 + if (ret) { 117 + dev_err(plane->dev->dev, "Failed to setup plane right-overlay %s\n", 118 + plane->name); 119 + dispc_ovl_enable(priv->dispc, r_ovl_id, false); 120 + dispc_ovl_enable(priv->dispc, ovl_id, false); 121 + return; 122 + } 123 + 124 + dispc_ovl_enable(priv->dispc, r_ovl_id, true); 125 + } 130 126 } 131 127 132 128 static void omap_plane_atomic_disable(struct drm_plane *plane, 133 129 struct drm_atomic_state *state) 134 130 { 135 - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 136 - plane); 137 131 struct omap_drm_private *priv = plane->dev->dev_private; 138 132 struct omap_plane *omap_plane = to_omap_plane(plane); 133 + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 134 + plane); 135 + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 136 + plane); 137 + struct omap_plane_state *new_omap_state; 138 + struct omap_plane_state *old_omap_state; 139 + 140 + new_omap_state = to_omap_plane_state(new_state); 141 + old_omap_state = to_omap_plane_state(old_state); 142 + 143 + if (!old_omap_state->overlay) 144 + return; 139 145 140 146 new_state->rotation = DRM_MODE_ROTATE_0; 141 147 new_state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id; 142 148 143 - dispc_ovl_enable(priv->dispc, omap_plane->id, false); 149 + omap_overlay_update_state(priv, old_omap_state->overlay); 150 + new_omap_state->overlay = NULL; 151 + 152 + if (is_omap_plane_dual_overlay(old_state)) { 153 + omap_overlay_update_state(priv, old_omap_state->r_overlay); 154 + new_omap_state->r_overlay = NULL; 155 + } 144 156 } 157 + 158 + #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) 145 159 146 160 static int omap_plane_atomic_check(struct drm_plane *plane, 147 161 struct drm_atomic_state *state) 148 162 { 149 163 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 150 164 plane); 165 + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, 166 + plane); 167 + struct omap_drm_private *priv = plane->dev->dev_private; 168 + struct omap_plane_state *omap_state = to_omap_plane_state(new_plane_state); 169 + struct omap_global_state *omap_overlay_global_state; 151 170 struct drm_crtc_state *crtc_state; 171 + bool new_r_hw_overlay = false; 172 + bool new_hw_overlay = false; 173 + u32 max_width, max_height; 174 + struct drm_crtc *crtc; 175 + u16 width, height; 176 + u32 caps = 0; 177 + u32 fourcc; 178 + int ret; 152 179 153 - if (!new_plane_state->fb) 180 + omap_overlay_global_state = omap_get_global_state(state); 181 + if (IS_ERR(omap_overlay_global_state)) 182 + return PTR_ERR(omap_overlay_global_state); 183 + 184 + dispc_ovl_get_max_size(priv->dispc, &width, &height); 185 + max_width = width << 16; 186 + max_height = height << 16; 187 + 188 + crtc = new_plane_state->crtc ? new_plane_state->crtc : plane->state->crtc; 189 + if (!crtc) 154 190 return 0; 155 191 156 - /* crtc should only be NULL when disabling (i.e., !new_plane_state->fb) */ 157 - if (WARN_ON(!new_plane_state->crtc)) 158 - return 0; 159 - 160 - crtc_state = drm_atomic_get_existing_crtc_state(state, 161 - new_plane_state->crtc); 192 + crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); 162 193 /* we should have a crtc state if the plane is attached to a crtc */ 163 194 if (WARN_ON(!crtc_state)) 164 195 return 0; 165 196 166 - if (!crtc_state->enable) 197 + /* 198 + * Note: these are just sanity checks to filter out totally bad scaling 199 + * factors. The real limits must be calculated case by case, and 200 + * unfortunately we currently do those checks only at the commit 201 + * phase in dispc. 202 + */ 203 + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 204 + FRAC_16_16(1, 8), FRAC_16_16(8, 1), 205 + true, true); 206 + if (ret) 207 + return ret; 208 + 209 + DBG("%s: visible %d -> %d", plane->name, 210 + old_plane_state->visible, new_plane_state->visible); 211 + 212 + if (!new_plane_state->visible) { 213 + omap_overlay_release(state, omap_state->overlay); 214 + omap_overlay_release(state, omap_state->r_overlay); 215 + omap_state->overlay = NULL; 216 + omap_state->r_overlay = NULL; 167 217 return 0; 218 + } 168 219 169 220 if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0) 170 221 return -EINVAL; ··· 265 136 if (new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay) 266 137 return -EINVAL; 267 138 139 + /* Make sure dimensions are within bounds. */ 140 + if (new_plane_state->src_h > max_height || new_plane_state->crtc_h > height) 141 + return -EINVAL; 142 + 143 + 144 + if (new_plane_state->src_w > max_width || new_plane_state->crtc_w > width) { 145 + bool is_fourcc_yuv = new_plane_state->fb->format->is_yuv; 146 + 147 + if (is_fourcc_yuv && (((new_plane_state->src_w >> 16) / 2 & 1) || 148 + new_plane_state->crtc_w / 2 & 1)) { 149 + /* 150 + * When calculating the split overlay width 151 + * and it yield an odd value we will need to adjust 152 + * the indivual width +/- 1. So make sure it fits 153 + */ 154 + if (new_plane_state->src_w <= ((2 * width - 1) << 16) && 155 + new_plane_state->crtc_w <= (2 * width - 1)) 156 + new_r_hw_overlay = true; 157 + else 158 + return -EINVAL; 159 + } else { 160 + if (new_plane_state->src_w <= (2 * max_width) && 161 + new_plane_state->crtc_w <= (2 * width)) 162 + new_r_hw_overlay = true; 163 + else 164 + return -EINVAL; 165 + } 166 + } 167 + 268 168 if (new_plane_state->rotation != DRM_MODE_ROTATE_0 && 269 169 !omap_framebuffer_supports_rotation(new_plane_state->fb)) 270 170 return -EINVAL; 171 + 172 + if ((new_plane_state->src_w >> 16) != new_plane_state->crtc_w || 173 + (new_plane_state->src_h >> 16) != new_plane_state->crtc_h) 174 + caps |= OMAP_DSS_OVL_CAP_SCALE; 175 + 176 + fourcc = new_plane_state->fb->format->format; 177 + 178 + /* 179 + * (re)allocate hw overlay if we don't have one or 180 + * there is a caps mismatch 181 + */ 182 + if (!omap_state->overlay || (caps & ~omap_state->overlay->caps)) { 183 + new_hw_overlay = true; 184 + } else { 185 + /* check supported format */ 186 + if (!dispc_ovl_color_mode_supported(priv->dispc, omap_state->overlay->id, 187 + fourcc)) 188 + new_hw_overlay = true; 189 + } 190 + 191 + /* 192 + * check if we need two overlays and only have 1 or 193 + * if we had 2 overlays but will only need 1 194 + */ 195 + if ((new_r_hw_overlay && !omap_state->r_overlay) || 196 + (!new_r_hw_overlay && omap_state->r_overlay)) 197 + new_hw_overlay = true; 198 + 199 + if (new_hw_overlay) { 200 + struct omap_hw_overlay *old_ovl = omap_state->overlay; 201 + struct omap_hw_overlay *old_r_ovl = omap_state->r_overlay; 202 + struct omap_hw_overlay *new_ovl = NULL; 203 + struct omap_hw_overlay *new_r_ovl = NULL; 204 + 205 + omap_overlay_release(state, old_ovl); 206 + omap_overlay_release(state, old_r_ovl); 207 + 208 + ret = omap_overlay_assign(state, plane, caps, fourcc, &new_ovl, 209 + new_r_hw_overlay ? &new_r_ovl : NULL); 210 + if (ret) { 211 + DBG("%s: failed to assign hw_overlay", plane->name); 212 + omap_state->overlay = NULL; 213 + omap_state->r_overlay = NULL; 214 + return ret; 215 + } 216 + 217 + omap_state->overlay = new_ovl; 218 + if (new_r_hw_overlay) 219 + omap_state->r_overlay = new_r_ovl; 220 + else 221 + omap_state->r_overlay = NULL; 222 + } 223 + 224 + DBG("plane: %s overlay_id: %d", plane->name, omap_state->overlay->id); 225 + 226 + if (omap_state->r_overlay) 227 + DBG("plane: %s r_overlay_id: %d", plane->name, omap_state->r_overlay->id); 271 228 272 229 return 0; 273 230 } ··· 370 155 { 371 156 struct omap_plane *omap_plane = to_omap_plane(plane); 372 157 373 - DBG("%s", omap_plane->name); 158 + DBG("%s", plane->name); 374 159 375 160 drm_plane_cleanup(plane); 376 161 ··· 404 189 static void omap_plane_reset(struct drm_plane *plane) 405 190 { 406 191 struct omap_plane *omap_plane = to_omap_plane(plane); 192 + struct omap_plane_state *omap_state; 407 193 408 - drm_atomic_helper_plane_reset(plane); 409 - if (!plane->state) 194 + if (plane->state) 195 + drm_atomic_helper_plane_destroy_state(plane, plane->state); 196 + 197 + omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL); 198 + if (!omap_state) 410 199 return; 200 + 201 + __drm_atomic_helper_plane_reset(plane, &omap_state->base); 411 202 412 203 /* 413 204 * Set the zpos default depending on whether we are a primary or overlay ··· 423 202 ? 0 : omap_plane->id; 424 203 plane->state->color_encoding = DRM_COLOR_YCBCR_BT601; 425 204 plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE; 205 + } 206 + 207 + static struct drm_plane_state * 208 + omap_plane_atomic_duplicate_state(struct drm_plane *plane) 209 + { 210 + struct omap_plane_state *state, *current_state; 211 + 212 + if (WARN_ON(!plane->state)) 213 + return NULL; 214 + 215 + current_state = to_omap_plane_state(plane->state); 216 + 217 + state = kmalloc(sizeof(*state), GFP_KERNEL); 218 + if (!state) 219 + return NULL; 220 + 221 + __drm_atomic_helper_plane_duplicate_state(plane, &state->base); 222 + 223 + state->overlay = current_state->overlay; 224 + state->r_overlay = current_state->r_overlay; 225 + 226 + return &state->base; 227 + } 228 + 229 + static void omap_plane_atomic_print_state(struct drm_printer *p, 230 + const struct drm_plane_state *state) 231 + { 232 + struct omap_plane_state *omap_state = to_omap_plane_state(state); 233 + 234 + if (omap_state->overlay) 235 + drm_printf(p, "\toverlay=%s (caps=0x%x)\n", 236 + omap_state->overlay->name, 237 + omap_state->overlay->caps); 238 + else 239 + drm_printf(p, "\toverlay=None\n"); 240 + if (omap_state->r_overlay) 241 + drm_printf(p, "\tr_overlay=%s (caps=0x%x)\n", 242 + omap_state->r_overlay->name, 243 + omap_state->r_overlay->caps); 244 + else 245 + drm_printf(p, "\tr_overlay=None\n"); 426 246 } 427 247 428 248 static int omap_plane_atomic_set_property(struct drm_plane *plane, ··· 501 239 .disable_plane = drm_atomic_helper_disable_plane, 502 240 .reset = omap_plane_reset, 503 241 .destroy = omap_plane_destroy, 504 - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 242 + .atomic_duplicate_state = omap_plane_atomic_duplicate_state, 505 243 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 506 244 .atomic_set_property = omap_plane_atomic_set_property, 507 245 .atomic_get_property = omap_plane_atomic_get_property, 246 + .atomic_print_state = omap_plane_atomic_print_state, 508 247 }; 509 248 510 249 static bool omap_plane_supports_yuv(struct drm_plane *plane) ··· 524 261 return false; 525 262 } 526 263 527 - static const char *plane_id_to_name[] = { 528 - [OMAP_DSS_GFX] = "gfx", 529 - [OMAP_DSS_VIDEO1] = "vid1", 530 - [OMAP_DSS_VIDEO2] = "vid2", 531 - [OMAP_DSS_VIDEO3] = "vid3", 532 - }; 533 - 534 - static const enum omap_plane_id plane_idx_to_id[] = { 535 - OMAP_DSS_GFX, 536 - OMAP_DSS_VIDEO1, 537 - OMAP_DSS_VIDEO2, 538 - OMAP_DSS_VIDEO3, 539 - }; 540 - 541 264 /* initialize plane */ 542 265 struct drm_plane *omap_plane_init(struct drm_device *dev, 543 266 int idx, enum drm_plane_type type, ··· 533 284 unsigned int num_planes = dispc_get_num_ovls(priv->dispc); 534 285 struct drm_plane *plane; 535 286 struct omap_plane *omap_plane; 536 - enum omap_plane_id id; 537 287 int ret; 538 288 u32 nformats; 539 289 const u32 *formats; 540 290 541 - if (WARN_ON(idx >= ARRAY_SIZE(plane_idx_to_id))) 291 + if (WARN_ON(idx >= num_planes)) 542 292 return ERR_PTR(-EINVAL); 543 - 544 - id = plane_idx_to_id[idx]; 545 - 546 - DBG("%s: type=%d", plane_id_to_name[id], type); 547 293 548 294 omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL); 549 295 if (!omap_plane) 550 296 return ERR_PTR(-ENOMEM); 551 297 552 - formats = dispc_ovl_get_color_modes(priv->dispc, id); 298 + omap_plane->id = idx; 299 + 300 + DBG("%d: type=%d", omap_plane->id, type); 301 + DBG(" crtc_mask: 0x%04x", possible_crtcs); 302 + 303 + formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id); 553 304 for (nformats = 0; formats[nformats]; ++nformats) 554 305 ; 555 - omap_plane->id = id; 556 - omap_plane->name = plane_id_to_name[id]; 557 306 558 307 plane = &omap_plane->base; 559 308 ··· 581 334 return plane; 582 335 583 336 error: 584 - dev_err(dev->dev, "%s(): could not create plane: %s\n", 585 - __func__, plane_id_to_name[id]); 337 + dev_err(dev->dev, "%s(): could not create plane: %d\n", 338 + __func__, omap_plane->id); 586 339 587 340 kfree(omap_plane); 588 341 return NULL;
+1
drivers/gpu/drm/omapdrm/omap_plane.h
··· 22 22 u32 possible_crtcs); 23 23 void omap_plane_install_properties(struct drm_plane *plane, 24 24 struct drm_mode_object *obj); 25 + bool is_omap_plane_dual_overlay(struct drm_plane_state *state); 25 26 26 27 #endif /* __OMAPDRM_PLANE_H__ */
+1 -1
drivers/gpu/drm/panel/Kconfig
··· 152 152 tristate "Ilitek ILI9341 240x320 QVGA panels" 153 153 depends on OF && SPI 154 154 depends on DRM_KMS_HELPER 155 - depends on DRM_KMS_CMA_HELPER 155 + depends on DRM_GEM_CMA_HELPER 156 156 depends on BACKLIGHT_CLASS_DEVICE 157 157 select DRM_MIPI_DBI 158 158 help
+74 -34
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
··· 84 84 _INIT_DCS_CMD(0x0D, 0x63), 85 85 _INIT_DCS_CMD(0x0E, 0x91), 86 86 _INIT_DCS_CMD(0x0F, 0x73), 87 - _INIT_DCS_CMD(0x95, 0xEB), 88 - _INIT_DCS_CMD(0x96, 0xEB), 87 + _INIT_DCS_CMD(0x95, 0xE6), 88 + _INIT_DCS_CMD(0x96, 0xF0), 89 89 _INIT_DCS_CMD(0x30, 0x11), 90 90 _INIT_DCS_CMD(0x6D, 0x66), 91 91 _INIT_DCS_CMD(0x75, 0xA2), ··· 111 111 _INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), 112 112 _INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), 113 113 _INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), 114 - _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7), 115 114 115 + _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0), 116 116 _INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), 117 117 _INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), 118 118 _INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), 119 - _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7), 119 + _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0), 120 120 121 121 _INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), 122 122 _INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), 123 123 _INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), 124 - _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7), 125 124 125 + _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0), 126 126 _INIT_DCS_CMD(0xFF, 0x24), 127 127 _INIT_DCS_CMD(0xFB, 0x01), 128 128 ··· 225 225 _INIT_DCS_CMD(0x7F, 0x3C), 226 226 _INIT_DCS_CMD(0x82, 0x04), 227 227 _INIT_DCS_CMD(0x97, 0xC0), 228 + 228 229 _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00), 229 230 _INIT_DCS_CMD(0x91, 0x44), 230 231 _INIT_DCS_CMD(0x92, 0xA9), ··· 333 332 _INIT_DCS_CMD(0x34, 0x78), 334 333 _INIT_DCS_CMD(0x35, 0x16), 335 334 _INIT_DCS_CMD(0xC8, 0x04), 336 - _INIT_DCS_CMD(0xC9, 0x80), 335 + _INIT_DCS_CMD(0xC9, 0x9E), 337 336 _INIT_DCS_CMD(0xCA, 0x4E), 338 337 _INIT_DCS_CMD(0xCB, 0x00), 339 - _INIT_DCS_CMD(0xA9, 0x4C), 340 - _INIT_DCS_CMD(0xAA, 0x47), 341 338 339 + _INIT_DCS_CMD(0xA9, 0x49), 340 + _INIT_DCS_CMD(0xAA, 0x4B), 341 + _INIT_DCS_CMD(0xAB, 0x48), 342 + _INIT_DCS_CMD(0xAC, 0x43), 343 + _INIT_DCS_CMD(0xAD, 0x40), 344 + _INIT_DCS_CMD(0xAE, 0x50), 345 + _INIT_DCS_CMD(0xAF, 0x44), 346 + _INIT_DCS_CMD(0xB0, 0x54), 347 + _INIT_DCS_CMD(0xB1, 0x4E), 348 + _INIT_DCS_CMD(0xB2, 0x4D), 349 + _INIT_DCS_CMD(0xB3, 0x4C), 350 + _INIT_DCS_CMD(0xB4, 0x41), 351 + _INIT_DCS_CMD(0xB5, 0x47), 352 + _INIT_DCS_CMD(0xB6, 0x53), 353 + _INIT_DCS_CMD(0xB7, 0x3E), 354 + _INIT_DCS_CMD(0xB8, 0x51), 355 + _INIT_DCS_CMD(0xB9, 0x3C), 356 + _INIT_DCS_CMD(0xBA, 0x3B), 357 + _INIT_DCS_CMD(0xBB, 0x46), 358 + _INIT_DCS_CMD(0xBC, 0x45), 359 + _INIT_DCS_CMD(0xBD, 0x55), 360 + _INIT_DCS_CMD(0xBE, 0x3D), 361 + _INIT_DCS_CMD(0xBF, 0x3F), 362 + _INIT_DCS_CMD(0xC0, 0x52), 363 + _INIT_DCS_CMD(0xC1, 0x4A), 364 + _INIT_DCS_CMD(0xC2, 0x39), 365 + _INIT_DCS_CMD(0xC3, 0x4F), 366 + _INIT_DCS_CMD(0xC4, 0x3A), 367 + _INIT_DCS_CMD(0xC5, 0x42), 342 368 _INIT_DCS_CMD(0xFF, 0x27), 343 369 _INIT_DCS_CMD(0xFB, 0x01), 344 370 ··· 447 419 {}, 448 420 }; 449 421 450 - static const struct panel_init_cmd inx_init_cmd[] = { 422 + static const struct panel_init_cmd inx_hj110iz_init_cmd[] = { 451 423 _INIT_DCS_CMD(0xFF, 0x20), 452 424 _INIT_DCS_CMD(0xFB, 0x01), 453 425 _INIT_DCS_CMD(0x05, 0xD1), ··· 456 428 _INIT_DCS_CMD(0x08, 0x4B), 457 429 _INIT_DCS_CMD(0x0E, 0x91), 458 430 _INIT_DCS_CMD(0x0F, 0x69), 459 - _INIT_DCS_CMD(0x95, 0xFF), 460 - _INIT_DCS_CMD(0x96, 0xFF), 461 - _INIT_DCS_CMD(0x9D, 0x0A), 462 - _INIT_DCS_CMD(0x9E, 0x0A), 431 + _INIT_DCS_CMD(0x95, 0xF5), 432 + _INIT_DCS_CMD(0x96, 0xF5), 433 + _INIT_DCS_CMD(0x9D, 0x00), 434 + _INIT_DCS_CMD(0x9E, 0x00), 463 435 _INIT_DCS_CMD(0x69, 0x98), 464 436 _INIT_DCS_CMD(0x75, 0xA2), 465 437 _INIT_DCS_CMD(0x77, 0xB3), ··· 521 493 _INIT_DCS_CMD(0x2A, 0x03), 522 494 _INIT_DCS_CMD(0x2B, 0x03), 523 495 524 - _INIT_DCS_CMD(0x2F, 0x06), 496 + _INIT_DCS_CMD(0x2F, 0x05), 525 497 _INIT_DCS_CMD(0x30, 0x32), 526 498 _INIT_DCS_CMD(0x31, 0x43), 527 - _INIT_DCS_CMD(0x33, 0x06), 499 + _INIT_DCS_CMD(0x33, 0x05), 528 500 _INIT_DCS_CMD(0x34, 0x32), 529 501 _INIT_DCS_CMD(0x35, 0x43), 530 502 _INIT_DCS_CMD(0x37, 0x44), 531 503 _INIT_DCS_CMD(0x38, 0x40), 532 504 _INIT_DCS_CMD(0x39, 0x00), 533 - _INIT_DCS_CMD(0x3A, 0x01), 534 - _INIT_DCS_CMD(0x3B, 0x48), 505 + _INIT_DCS_CMD(0x3A, 0x18), 506 + _INIT_DCS_CMD(0x3B, 0x00), 535 507 _INIT_DCS_CMD(0x3D, 0x93), 536 508 _INIT_DCS_CMD(0xAB, 0x44), 537 509 _INIT_DCS_CMD(0xAC, 0x40), ··· 548 520 _INIT_DCS_CMD(0x56, 0x08), 549 521 _INIT_DCS_CMD(0x58, 0x21), 550 522 _INIT_DCS_CMD(0x59, 0x40), 551 - _INIT_DCS_CMD(0x5A, 0x09), 552 - _INIT_DCS_CMD(0x5B, 0x48), 523 + _INIT_DCS_CMD(0x5A, 0x00), 524 + _INIT_DCS_CMD(0x5B, 0x2C), 553 525 _INIT_DCS_CMD(0x5E, 0x00, 0x10), 554 526 _INIT_DCS_CMD(0x5F, 0x00), 555 527 ··· 586 558 _INIT_DCS_CMD(0xEF, 0x01), 587 559 _INIT_DCS_CMD(0xF0, 0x7A), 588 560 561 + _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00), 589 562 _INIT_DCS_CMD(0xFF, 0x25), 590 563 _INIT_DCS_CMD(0xFB, 0x01), 591 564 592 565 _INIT_DCS_CMD(0x05, 0x00), 593 566 567 + _INIT_DCS_CMD(0x13, 0x02), 568 + _INIT_DCS_CMD(0x14, 0xDF), 594 569 _INIT_DCS_CMD(0xF1, 0x10), 595 570 _INIT_DCS_CMD(0x1E, 0x00), 596 - _INIT_DCS_CMD(0x1F, 0x09), 597 - _INIT_DCS_CMD(0x20, 0x46), 571 + _INIT_DCS_CMD(0x1F, 0x00), 572 + _INIT_DCS_CMD(0x20, 0x2C), 598 573 _INIT_DCS_CMD(0x25, 0x00), 599 - _INIT_DCS_CMD(0x26, 0x09), 600 - _INIT_DCS_CMD(0x27, 0x46), 574 + _INIT_DCS_CMD(0x26, 0x00), 575 + _INIT_DCS_CMD(0x27, 0x2C), 601 576 _INIT_DCS_CMD(0x3F, 0x80), 602 577 _INIT_DCS_CMD(0x40, 0x00), 603 578 _INIT_DCS_CMD(0x43, 0x00), 604 579 605 - _INIT_DCS_CMD(0x44, 0x09), 606 - _INIT_DCS_CMD(0x45, 0x46), 580 + _INIT_DCS_CMD(0x44, 0x18), 581 + _INIT_DCS_CMD(0x45, 0x00), 607 582 608 - _INIT_DCS_CMD(0x48, 0x09), 609 - _INIT_DCS_CMD(0x49, 0x46), 583 + _INIT_DCS_CMD(0x48, 0x00), 584 + _INIT_DCS_CMD(0x49, 0x2C), 610 585 _INIT_DCS_CMD(0x5B, 0x80), 611 586 _INIT_DCS_CMD(0x5C, 0x00), 612 - _INIT_DCS_CMD(0x5D, 0x01), 613 - _INIT_DCS_CMD(0x5E, 0x46), 614 - _INIT_DCS_CMD(0x61, 0x01), 615 - _INIT_DCS_CMD(0x62, 0x46), 587 + _INIT_DCS_CMD(0x5D, 0x00), 588 + _INIT_DCS_CMD(0x5E, 0x00), 589 + _INIT_DCS_CMD(0x61, 0x00), 590 + _INIT_DCS_CMD(0x62, 0x2C), 616 591 _INIT_DCS_CMD(0x68, 0x10), 617 592 _INIT_DCS_CMD(0xFF, 0x26), 618 593 _INIT_DCS_CMD(0xFB, 0x01), ··· 731 700 _INIT_DCS_CMD(0xA3, 0x30), 732 701 _INIT_DCS_CMD(0xA4, 0xC0), 733 702 _INIT_DCS_CMD(0xE8, 0x00), 703 + _INIT_DCS_CMD(0x97, 0x3C), 704 + _INIT_DCS_CMD(0x98, 0x02), 705 + _INIT_DCS_CMD(0x99, 0x95), 706 + _INIT_DCS_CMD(0x9A, 0x06), 707 + _INIT_DCS_CMD(0x9B, 0x00), 708 + _INIT_DCS_CMD(0x9C, 0x0B), 709 + _INIT_DCS_CMD(0x9D, 0x0A), 710 + _INIT_DCS_CMD(0x9E, 0x90), 734 711 _INIT_DCS_CMD(0xFF, 0xF0), 735 712 _INIT_DCS_CMD(0xFB, 0x01), 736 713 _INIT_DCS_CMD(0x3A, 0x08), 737 714 _INIT_DCS_CMD(0xFF, 0xD0), 738 715 _INIT_DCS_CMD(0xFB, 0x01), 739 716 _INIT_DCS_CMD(0x00, 0x33), 740 - _INIT_DCS_CMD(0x02, 0x77), 741 717 _INIT_DCS_CMD(0x08, 0x01), 742 718 _INIT_DCS_CMD(0x09, 0xBF), 743 - _INIT_DCS_CMD(0x28, 0x30), 744 719 _INIT_DCS_CMD(0x2F, 0x33), 745 720 _INIT_DCS_CMD(0xFF, 0x23), 746 721 _INIT_DCS_CMD(0xFB, 0x01), ··· 755 718 _INIT_DCS_CMD(0xFF, 0x20), 756 719 _INIT_DCS_CMD(0xFB, 0x01), 757 720 _INIT_DCS_CMD(0x30, 0x00), 721 + _INIT_DCS_CMD(0xFF, 0x24), 722 + _INIT_DCS_CMD(0x5C, 0x88), 723 + _INIT_DCS_CMD(0x5D, 0x08), 758 724 _INIT_DCS_CMD(0xFF, 0x10), 759 725 _INIT_DCS_CMD(0xB9, 0x01), 760 726 _INIT_DCS_CMD(0xFF, 0x20), ··· 1352 1312 | MIPI_DSI_MODE_VIDEO_HSE 1353 1313 | MIPI_DSI_CLOCK_NON_CONTINUOUS 1354 1314 | MIPI_DSI_MODE_VIDEO_BURST, 1355 - .init_cmds = inx_init_cmd, 1315 + .init_cmds = inx_hj110iz_init_cmd, 1356 1316 }; 1357 1317 1358 1318 static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
+1 -1
drivers/gpu/drm/panfrost/panfrost_gem.c
··· 223 223 224 224 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 225 225 if (!obj) 226 - return NULL; 226 + return ERR_PTR(-ENOMEM); 227 227 228 228 INIT_LIST_HEAD(&obj->mappings.list); 229 229 mutex_init(&obj->mappings.lock);
-1
drivers/gpu/drm/pl111/Kconfig
··· 6 6 depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n 7 7 depends on COMMON_CLK 8 8 select DRM_KMS_HELPER 9 - select DRM_KMS_CMA_HELPER 10 9 select DRM_GEM_CMA_HELPER 11 10 select DRM_BRIDGE 12 11 select DRM_PANEL_BRIDGE
+9 -6
drivers/gpu/drm/qxl/qxl_debugfs.c
··· 57 57 struct qxl_bo *bo; 58 58 59 59 list_for_each_entry(bo, &qdev->gem.objects, list) { 60 - struct dma_resv_list *fobj; 61 - int rel; 60 + struct dma_resv_iter cursor; 61 + struct dma_fence *fence; 62 + int rel = 0; 62 63 63 - rcu_read_lock(); 64 - fobj = dma_resv_shared_list(bo->tbo.base.resv); 65 - rel = fobj ? fobj->shared_count : 0; 66 - rcu_read_unlock(); 64 + dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true); 65 + dma_resv_for_each_fence_unlocked(&cursor, fence) { 66 + if (dma_resv_iter_is_restarted(&cursor)) 67 + rel = 0; 68 + ++rel; 69 + } 67 70 68 71 seq_printf(m, "size %ld, pc %d, num releases %d\n", 69 72 (unsigned long)bo->tbo.base.size,
-1
drivers/gpu/drm/rcar-du/Kconfig
··· 5 5 depends on ARM || ARM64 6 6 depends on ARCH_RENESAS || COMPILE_TEST 7 7 select DRM_KMS_HELPER 8 - select DRM_KMS_CMA_HELPER 9 8 select DRM_GEM_CMA_HELPER 10 9 select VIDEOMODE_HELPERS 11 10 help
+5 -5
drivers/gpu/drm/rcar-du/rcar_du_kms.c
··· 327 327 */ 328 328 329 329 static const struct drm_gem_object_funcs rcar_du_gem_funcs = { 330 - .free = drm_gem_cma_free_object, 331 - .print_info = drm_gem_cma_print_info, 332 - .get_sg_table = drm_gem_cma_get_sg_table, 333 - .vmap = drm_gem_cma_vmap, 334 - .mmap = drm_gem_cma_mmap, 330 + .free = drm_gem_cma_object_free, 331 + .print_info = drm_gem_cma_object_print_info, 332 + .get_sg_table = drm_gem_cma_object_get_sg_table, 333 + .vmap = drm_gem_cma_object_vmap, 334 + .mmap = drm_gem_cma_object_mmap, 335 335 .vm_ops = &drm_gem_cma_vm_ops, 336 336 }; 337 337
-1
drivers/gpu/drm/rockchip/Makefile
··· 5 5 6 6 rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ 7 7 rockchip_drm_gem.o rockchip_drm_vop.o rockchip_vop_reg.o 8 - rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o 9 8 10 9 rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o 11 10 rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
+2 -8
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
··· 26 26 27 27 #include "rockchip_drm_drv.h" 28 28 #include "rockchip_drm_fb.h" 29 - #include "rockchip_drm_fbdev.h" 30 29 #include "rockchip_drm_gem.h" 31 30 32 31 #define DRIVER_NAME "rockchip" ··· 158 159 159 160 drm_mode_config_reset(drm_dev); 160 161 161 - ret = rockchip_drm_fbdev_init(drm_dev); 162 - if (ret) 163 - goto err_unbind_all; 164 - 165 162 /* init kms poll for handling hpd */ 166 163 drm_kms_helper_poll_init(drm_dev); 167 164 ··· 165 170 if (ret) 166 171 goto err_kms_helper_poll_fini; 167 172 173 + drm_fbdev_generic_setup(drm_dev, 0); 174 + 168 175 return 0; 169 176 err_kms_helper_poll_fini: 170 177 drm_kms_helper_poll_fini(drm_dev); 171 - rockchip_drm_fbdev_fini(drm_dev); 172 178 err_unbind_all: 173 179 component_unbind_all(dev, drm_dev); 174 180 err_iommu_cleanup: ··· 185 189 186 190 drm_dev_unregister(drm_dev); 187 191 188 - rockchip_drm_fbdev_fini(drm_dev); 189 192 drm_kms_helper_poll_fini(drm_dev); 190 193 191 194 drm_atomic_helper_shutdown(drm_dev); ··· 198 203 199 204 static const struct drm_driver rockchip_drm_driver = { 200 205 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, 201 - .lastclose = drm_fb_helper_lastclose, 202 206 .dumb_create = rockchip_gem_dumb_create, 203 207 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 204 208 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-2
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
··· 43 43 * @mm_lock: protect drm_mm on multi-threads. 44 44 */ 45 45 struct rockchip_drm_private { 46 - struct drm_fb_helper fbdev_helper; 47 - struct drm_gem_object *fbdev_bo; 48 46 struct iommu_domain *domain; 49 47 struct mutex mm_lock; 50 48 struct drm_mm mm;
-164
drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 - * Author:Mark Yao <mark.yao@rock-chips.com> 5 - */ 6 - 7 - #include <drm/drm.h> 8 - #include <drm/drm_fb_helper.h> 9 - #include <drm/drm_fourcc.h> 10 - #include <drm/drm_prime.h> 11 - #include <drm/drm_probe_helper.h> 12 - 13 - #include "rockchip_drm_drv.h" 14 - #include "rockchip_drm_gem.h" 15 - #include "rockchip_drm_fb.h" 16 - #include "rockchip_drm_fbdev.h" 17 - 18 - #define PREFERRED_BPP 32 19 - #define to_drm_private(x) \ 20 - container_of(x, struct rockchip_drm_private, fbdev_helper) 21 - 22 - static int rockchip_fbdev_mmap(struct fb_info *info, 23 - struct vm_area_struct *vma) 24 - { 25 - struct drm_fb_helper *helper = info->par; 26 - struct rockchip_drm_private *private = to_drm_private(helper); 27 - 28 - return drm_gem_prime_mmap(private->fbdev_bo, vma); 29 - } 30 - 31 - static const struct fb_ops rockchip_drm_fbdev_ops = { 32 - .owner = THIS_MODULE, 33 - DRM_FB_HELPER_DEFAULT_OPS, 34 - .fb_mmap = rockchip_fbdev_mmap, 35 - .fb_fillrect = drm_fb_helper_cfb_fillrect, 36 - .fb_copyarea = drm_fb_helper_cfb_copyarea, 37 - .fb_imageblit = drm_fb_helper_cfb_imageblit, 38 - }; 39 - 40 - static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, 41 - struct drm_fb_helper_surface_size *sizes) 42 - { 43 - struct rockchip_drm_private *private = to_drm_private(helper); 44 - struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 45 - struct drm_device *dev = helper->dev; 46 - struct rockchip_gem_object *rk_obj; 47 - struct drm_framebuffer *fb; 48 - unsigned int bytes_per_pixel; 49 - unsigned long offset; 50 - struct fb_info *fbi; 51 - size_t size; 52 - int ret; 53 - 54 - bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); 55 - 56 - mode_cmd.width = sizes->surface_width; 57 - mode_cmd.height = sizes->surface_height; 58 - mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; 59 - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 60 - sizes->surface_depth); 61 - 62 - size = mode_cmd.pitches[0] * mode_cmd.height; 63 - 64 - rk_obj = rockchip_gem_create_object(dev, size, true); 65 - if (IS_ERR(rk_obj)) 66 - return -ENOMEM; 67 - 68 - private->fbdev_bo = &rk_obj->base; 69 - 70 - fbi = drm_fb_helper_alloc_fbi(helper); 71 - if (IS_ERR(fbi)) { 72 - DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info.\n"); 73 - ret = PTR_ERR(fbi); 74 - goto out; 75 - } 76 - 77 - helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd, 78 - private->fbdev_bo); 79 - if (IS_ERR(helper->fb)) { 80 - DRM_DEV_ERROR(dev->dev, 81 - "Failed to allocate DRM framebuffer.\n"); 82 - ret = PTR_ERR(helper->fb); 83 - goto out; 84 - } 85 - 86 - fbi->fbops = &rockchip_drm_fbdev_ops; 87 - 88 - fb = helper->fb; 89 - drm_fb_helper_fill_info(fbi, helper, sizes); 90 - 91 - offset = fbi->var.xoffset * bytes_per_pixel; 92 - offset += fbi->var.yoffset * fb->pitches[0]; 93 - 94 - dev->mode_config.fb_base = 0; 95 - fbi->screen_base = rk_obj->kvaddr + offset; 96 - fbi->screen_size = rk_obj->base.size; 97 - fbi->fix.smem_len = rk_obj->base.size; 98 - 99 - DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n", 100 - fb->width, fb->height, fb->format->depth, 101 - rk_obj->kvaddr, 102 - offset, size); 103 - 104 - return 0; 105 - 106 - out: 107 - rockchip_gem_free_object(&rk_obj->base); 108 - return ret; 109 - } 110 - 111 - static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = { 112 - .fb_probe = rockchip_drm_fbdev_create, 113 - }; 114 - 115 - int rockchip_drm_fbdev_init(struct drm_device *dev) 116 - { 117 - struct rockchip_drm_private *private = dev->dev_private; 118 - struct drm_fb_helper *helper; 119 - int ret; 120 - 121 - if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) 122 - return -EINVAL; 123 - 124 - helper = &private->fbdev_helper; 125 - 126 - drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs); 127 - 128 - ret = drm_fb_helper_init(dev, helper); 129 - if (ret < 0) { 130 - DRM_DEV_ERROR(dev->dev, 131 - "Failed to initialize drm fb helper - %d.\n", 132 - ret); 133 - return ret; 134 - } 135 - 136 - ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); 137 - if (ret < 0) { 138 - DRM_DEV_ERROR(dev->dev, 139 - "Failed to set initial hw config - %d.\n", 140 - ret); 141 - goto err_drm_fb_helper_fini; 142 - } 143 - 144 - return 0; 145 - 146 - err_drm_fb_helper_fini: 147 - drm_fb_helper_fini(helper); 148 - return ret; 149 - } 150 - 151 - void rockchip_drm_fbdev_fini(struct drm_device *dev) 152 - { 153 - struct rockchip_drm_private *private = dev->dev_private; 154 - struct drm_fb_helper *helper; 155 - 156 - helper = &private->fbdev_helper; 157 - 158 - drm_fb_helper_unregister_fbi(helper); 159 - 160 - if (helper->fb) 161 - drm_framebuffer_put(helper->fb); 162 - 163 - drm_fb_helper_fini(helper); 164 - }
-24
drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 - * Author:Mark Yao <mark.yao@rock-chips.com> 5 - */ 6 - 7 - #ifndef _ROCKCHIP_DRM_FBDEV_H 8 - #define _ROCKCHIP_DRM_FBDEV_H 9 - 10 - #ifdef CONFIG_DRM_FBDEV_EMULATION 11 - int rockchip_drm_fbdev_init(struct drm_device *dev); 12 - void rockchip_drm_fbdev_fini(struct drm_device *dev); 13 - #else 14 - static inline int rockchip_drm_fbdev_init(struct drm_device *dev) 15 - { 16 - return 0; 17 - } 18 - 19 - static inline void rockchip_drm_fbdev_fini(struct drm_device *dev) 20 - { 21 - } 22 - #endif 23 - 24 - #endif /* _ROCKCHIP_DRM_FBDEV_H */
-1
drivers/gpu/drm/shmobile/Kconfig
··· 5 5 depends on ARCH_SHMOBILE || COMPILE_TEST 6 6 select BACKLIGHT_CLASS_DEVICE 7 7 select DRM_KMS_HELPER 8 - select DRM_KMS_CMA_HELPER 9 8 select DRM_GEM_CMA_HELPER 10 9 help 11 10 Choose this option if you have an SH Mobile chipset.
-1
drivers/gpu/drm/sti/Kconfig
··· 5 5 select RESET_CONTROLLER 6 6 select DRM_KMS_HELPER 7 7 select DRM_GEM_CMA_HELPER 8 - select DRM_KMS_CMA_HELPER 9 8 select DRM_PANEL 10 9 select FW_LOADER 11 10 select SND_SOC_HDMI_CODEC if SND_SOC
-1
drivers/gpu/drm/stm/Kconfig
··· 4 4 depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM) 5 5 select DRM_KMS_HELPER 6 6 select DRM_GEM_CMA_HELPER 7 - select DRM_KMS_CMA_HELPER 8 7 select DRM_PANEL_BRIDGE 9 8 select VIDEOMODE_HELPERS 10 9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB
-1
drivers/gpu/drm/sun4i/Kconfig
··· 5 5 depends on ARCH_SUNXI || COMPILE_TEST 6 6 select DRM_GEM_CMA_HELPER 7 7 select DRM_KMS_HELPER 8 - select DRM_KMS_CMA_HELPER 9 8 select DRM_PANEL 10 9 select REGMAP_MMIO 11 10 select VIDEOMODE_HELPERS
-1
drivers/gpu/drm/tidss/Kconfig
··· 3 3 depends on DRM && OF 4 4 depends on ARM || ARM64 || COMPILE_TEST 5 5 select DRM_KMS_HELPER 6 - select DRM_KMS_CMA_HELPER 7 6 select DRM_GEM_CMA_HELPER 8 7 help 9 8 The TI Keystone family SoCs introduced a new generation of
+1 -1
drivers/gpu/drm/tidss/tidss_drv.c
··· 88 88 return drm_mode_config_helper_resume(&tidss->ddev); 89 89 } 90 90 91 - static const struct dev_pm_ops tidss_pm_ops = { 91 + static __maybe_unused const struct dev_pm_ops tidss_pm_ops = { 92 92 SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume) 93 93 SET_RUNTIME_PM_OPS(tidss_pm_runtime_suspend, tidss_pm_runtime_resume, NULL) 94 94 };
-1
drivers/gpu/drm/tilcdc/Kconfig
··· 3 3 tristate "DRM Support for TI LCDC Display Controller" 4 4 depends on DRM && OF && ARM 5 5 select DRM_KMS_HELPER 6 - select DRM_KMS_CMA_HELPER 7 6 select DRM_GEM_CMA_HELPER 8 7 select DRM_BRIDGE 9 8 select DRM_PANEL_BRIDGE
+10 -10
drivers/gpu/drm/tiny/Kconfig
··· 3 3 config DRM_ARCPGU 4 4 tristate "ARC PGU" 5 5 depends on DRM && OF 6 - select DRM_KMS_CMA_HELPER 6 + select DRM_GEM_CMA_HELPER 7 7 select DRM_KMS_HELPER 8 8 help 9 9 Choose this option if you have an ARC PGU controller. ··· 71 71 tristate "DRM support for HX8357D display panels" 72 72 depends on DRM && SPI 73 73 select DRM_KMS_HELPER 74 - select DRM_KMS_CMA_HELPER 74 + select DRM_GEM_CMA_HELPER 75 75 select DRM_MIPI_DBI 76 76 select BACKLIGHT_CLASS_DEVICE 77 77 help ··· 84 84 tristate "DRM support for ILI9163 display panels" 85 85 depends on DRM && SPI 86 86 select BACKLIGHT_CLASS_DEVICE 87 - select DRM_KMS_CMA_HELPER 87 + select DRM_GEM_CMA_HELPER 88 88 select DRM_KMS_HELPER 89 89 select DRM_MIPI_DBI 90 90 help ··· 97 97 tristate "DRM support for ILI9225 display panels" 98 98 depends on DRM && SPI 99 99 select DRM_KMS_HELPER 100 - select DRM_KMS_CMA_HELPER 100 + select DRM_GEM_CMA_HELPER 101 101 select DRM_MIPI_DBI 102 102 help 103 103 DRM driver for the following Ilitek ILI9225 panels: ··· 109 109 tristate "DRM support for ILI9341 display panels" 110 110 depends on DRM && SPI 111 111 select DRM_KMS_HELPER 112 - select DRM_KMS_CMA_HELPER 112 + select DRM_GEM_CMA_HELPER 113 113 select DRM_MIPI_DBI 114 114 select BACKLIGHT_CLASS_DEVICE 115 115 help ··· 122 122 tristate "DRM support for ILI9486 display panels" 123 123 depends on DRM && SPI 124 124 select DRM_KMS_HELPER 125 - select DRM_KMS_CMA_HELPER 125 + select DRM_GEM_CMA_HELPER 126 126 select DRM_MIPI_DBI 127 127 select BACKLIGHT_CLASS_DEVICE 128 128 help ··· 136 136 tristate "DRM support for MI0283QT" 137 137 depends on DRM && SPI 138 138 select DRM_KMS_HELPER 139 - select DRM_KMS_CMA_HELPER 139 + select DRM_GEM_CMA_HELPER 140 140 select DRM_MIPI_DBI 141 141 select BACKLIGHT_CLASS_DEVICE 142 142 help ··· 147 147 tristate "DRM support for Pervasive Displays RePaper panels (V231)" 148 148 depends on DRM && SPI 149 149 select DRM_KMS_HELPER 150 - select DRM_KMS_CMA_HELPER 150 + select DRM_GEM_CMA_HELPER 151 151 help 152 152 DRM driver for the following Pervasive Displays panels: 153 153 1.44" TFT EPD Panel (E1144CS021) ··· 161 161 tristate "DRM support for Sitronix ST7586 display panels" 162 162 depends on DRM && SPI 163 163 select DRM_KMS_HELPER 164 - select DRM_KMS_CMA_HELPER 164 + select DRM_GEM_CMA_HELPER 165 165 select DRM_MIPI_DBI 166 166 help 167 167 DRM driver for the following Sitronix ST7586 panels: ··· 173 173 tristate "DRM support for Sitronix ST7715R/ST7735R display panels" 174 174 depends on DRM && SPI 175 175 select DRM_KMS_HELPER 176 - select DRM_KMS_CMA_HELPER 176 + select DRM_GEM_CMA_HELPER 177 177 select DRM_MIPI_DBI 178 178 select BACKLIGHT_CLASS_DEVICE 179 179 help
-1
drivers/gpu/drm/ttm/ttm_bo.c
··· 1086 1086 if (timeout == 0) 1087 1087 return -EBUSY; 1088 1088 1089 - dma_resv_add_excl_fence(bo->base.resv, NULL); 1090 1089 return 0; 1091 1090 } 1092 1091 EXPORT_SYMBOL(ttm_bo_wait);
-1
drivers/gpu/drm/tve200/Kconfig
··· 8 8 select DRM_BRIDGE 9 9 select DRM_PANEL_BRIDGE 10 10 select DRM_KMS_HELPER 11 - select DRM_KMS_CMA_HELPER 12 11 select DRM_GEM_CMA_HELPER 13 12 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE 14 13 help
+2 -2
drivers/gpu/drm/v3d/v3d_bo.c
··· 70 70 struct drm_gem_object *obj; 71 71 72 72 if (size == 0) 73 - return NULL; 73 + return ERR_PTR(-EINVAL); 74 74 75 75 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 76 76 if (!bo) 77 - return NULL; 77 + return ERR_PTR(-ENOMEM); 78 78 obj = &bo->base.base; 79 79 80 80 obj->funcs = &v3d_gem_funcs;
-1
drivers/gpu/drm/vc4/Kconfig
··· 6 6 depends on SND && SND_SOC 7 7 depends on COMMON_CLK 8 8 select DRM_KMS_HELPER 9 - select DRM_KMS_CMA_HELPER 10 9 select DRM_GEM_CMA_HELPER 11 10 select DRM_PANEL_BRIDGE 12 11 select SND_PCM
+4 -4
drivers/gpu/drm/vc4/vc4_bo.c
··· 177 177 bo->validated_shader = NULL; 178 178 } 179 179 180 - drm_gem_cma_free_object(obj); 180 + drm_gem_cma_free(&bo->base); 181 181 } 182 182 183 183 static void vc4_bo_remove_from_cache(struct vc4_bo *bo) ··· 720 720 return -EINVAL; 721 721 } 722 722 723 - return drm_gem_cma_mmap(obj, vma); 723 + return drm_gem_cma_mmap(&bo->base, vma); 724 724 } 725 725 726 726 static const struct vm_operations_struct vc4_vm_ops = { ··· 732 732 static const struct drm_gem_object_funcs vc4_gem_object_funcs = { 733 733 .free = vc4_free_object, 734 734 .export = vc4_prime_export, 735 - .get_sg_table = drm_gem_cma_get_sg_table, 736 - .vmap = drm_gem_cma_vmap, 735 + .get_sg_table = drm_gem_cma_object_get_sg_table, 736 + .vmap = drm_gem_cma_object_vmap, 737 737 .mmap = vc4_gem_object_mmap, 738 738 .vm_ops = &vc4_vm_ops, 739 739 };
+1 -1
drivers/gpu/drm/vgem/vgem_drv.c
··· 97 97 98 98 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 99 99 if (!obj) 100 - return NULL; 100 + return ERR_PTR(-ENOMEM); 101 101 102 102 /* 103 103 * vgem doesn't have any begin/end cpu access ioctls, therefore must use
+1 -1
drivers/gpu/drm/virtio/virtgpu_object.c
··· 139 139 140 140 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); 141 141 if (!shmem) 142 - return NULL; 142 + return ERR_PTR(-ENOMEM); 143 143 144 144 dshmem = &shmem->base.base; 145 145 dshmem->base.funcs = &virtio_gpu_shmem_funcs;
+2 -2
drivers/gpu/drm/vmwgfx/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ 2 + vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \ 3 3 vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \ 4 4 vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 5 5 vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \ ··· 9 9 vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ 10 10 vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ 11 11 vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ 12 - vmwgfx_devcaps.o ttm_object.o ttm_memory.o 12 + vmwgfx_devcaps.o ttm_object.o ttm_memory.o vmwgfx_system_manager.o 13 13 14 14 vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o 15 15 vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
+1 -98
drivers/gpu/drm/vmwgfx/ttm_memory.c
··· 34 34 #include <linux/mm.h> 35 35 #include <linux/module.h> 36 36 #include <linux/slab.h> 37 - #include <linux/swap.h> 38 37 39 38 #include <drm/drm_device.h> 40 39 #include <drm/drm_file.h> ··· 172 173 .sysfs_ops = &ttm_mem_zone_ops, 173 174 .default_attrs = ttm_mem_zone_attrs, 174 175 }; 175 - 176 - static struct attribute ttm_mem_global_lower_mem_limit = { 177 - .name = "lower_mem_limit", 178 - .mode = S_IRUGO | S_IWUSR 179 - }; 180 - 181 - static ssize_t ttm_mem_global_show(struct kobject *kobj, 182 - struct attribute *attr, 183 - char *buffer) 184 - { 185 - struct ttm_mem_global *glob = 186 - container_of(kobj, struct ttm_mem_global, kobj); 187 - uint64_t val = 0; 188 - 189 - spin_lock(&glob->lock); 190 - val = glob->lower_mem_limit; 191 - spin_unlock(&glob->lock); 192 - /* convert from number of pages to KB */ 193 - val <<= (PAGE_SHIFT - 10); 194 - return snprintf(buffer, PAGE_SIZE, "%llu\n", 195 - (unsigned long long) val); 196 - } 197 - 198 - static ssize_t ttm_mem_global_store(struct kobject *kobj, 199 - struct attribute *attr, 200 - const char *buffer, 201 - size_t size) 202 - { 203 - int chars; 204 - uint64_t val64; 205 - unsigned long val; 206 - struct ttm_mem_global *glob = 207 - container_of(kobj, struct ttm_mem_global, kobj); 208 - 209 - chars = sscanf(buffer, "%lu", &val); 210 - if (chars == 0) 211 - return size; 212 - 213 - val64 = val; 214 - /* convert from KB to number of pages */ 215 - val64 >>= (PAGE_SHIFT - 10); 216 - 217 - spin_lock(&glob->lock); 218 - glob->lower_mem_limit = val64; 219 - spin_unlock(&glob->lock); 220 - 221 - return size; 222 - } 223 - 224 - static struct attribute *ttm_mem_global_attrs[] = { 225 - &ttm_mem_global_lower_mem_limit, 226 - NULL 227 - }; 228 - 229 - static const struct sysfs_ops ttm_mem_global_ops = { 230 - .show = &ttm_mem_global_show, 231 - .store = &ttm_mem_global_store, 232 - }; 233 - 234 - static struct kobj_type ttm_mem_glob_kobj_type = { 235 - .sysfs_ops = &ttm_mem_global_ops, 236 - .default_attrs = ttm_mem_global_attrs, 237 - }; 176 + static struct kobj_type ttm_mem_glob_kobj_type = {0}; 238 177 239 178 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, 240 179 bool from_wq, uint64_t extra) ··· 372 435 373 436 si_meminfo(&si); 374 437 375 - spin_lock(&glob->lock); 376 - /* set it as 0 by default to keep original behavior of OOM */ 377 - glob->lower_mem_limit = 0; 378 - spin_unlock(&glob->lock); 379 - 380 438 ret = ttm_mem_init_kernel_zone(glob, &si); 381 439 if (unlikely(ret != 0)) 382 440 goto out_no_zone; ··· 457 525 return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount); 458 526 } 459 527 EXPORT_SYMBOL(ttm_mem_global_free); 460 - 461 - /* 462 - * check if the available mem is under lower memory limit 463 - * 464 - * a. if no swap disk at all or free swap space is under swap_mem_limit 465 - * but available system mem is bigger than sys_mem_limit, allow TTM 466 - * allocation; 467 - * 468 - * b. if the available system mem is less than sys_mem_limit but free 469 - * swap disk is bigger than swap_mem_limit, allow TTM allocation. 470 - */ 471 - bool 472 - ttm_check_under_lowerlimit(struct ttm_mem_global *glob, 473 - uint64_t num_pages, 474 - struct ttm_operation_ctx *ctx) 475 - { 476 - int64_t available; 477 - 478 - /* We allow over commit during suspend */ 479 - if (ctx->force_alloc) 480 - return false; 481 - 482 - available = get_nr_swap_pages() + si_mem_available(); 483 - available -= num_pages; 484 - if (available < glob->lower_mem_limit) 485 - return true; 486 - 487 - return false; 488 - } 489 528 490 529 static int ttm_mem_global_reserve(struct ttm_mem_global *glob, 491 530 struct ttm_mem_zone *single_zone,
+1 -5
drivers/gpu/drm/vmwgfx/ttm_memory.h
··· 50 50 * @work: The workqueue callback for the shrink queue. 51 51 * @lock: Lock to protect the @shrink - and the memory accounting members, 52 52 * that is, essentially the whole structure with some exceptions. 53 - * @lower_mem_limit: include lower limit of swap space and lower limit of 54 - * system memory. 55 53 * @zones: Array of pointers to accounting zones. 56 54 * @num_zones: Number of populated entries in the @zones array. 57 55 * @zone_kernel: Pointer to the kernel zone. ··· 67 69 struct workqueue_struct *swap_queue; 68 70 struct work_struct work; 69 71 spinlock_t lock; 70 - uint64_t lower_mem_limit; 71 72 struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; 72 73 unsigned int num_zones; 73 74 struct ttm_mem_zone *zone_kernel; ··· 88 91 void ttm_mem_global_free_page(struct ttm_mem_global *glob, 89 92 struct page *page, uint64_t size); 90 93 size_t ttm_round_pot(size_t size); 91 - bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages, 92 - struct ttm_operation_ctx *ctx); 94 + 93 95 #endif
+26 -26
drivers/gpu/drm/vmwgfx/ttm_object.c
··· 73 73 struct ttm_object_device *tdev; 74 74 spinlock_t lock; 75 75 struct list_head ref_list; 76 - struct drm_open_hash ref_hash[TTM_REF_NUM]; 76 + struct vmwgfx_open_hash ref_hash[TTM_REF_NUM]; 77 77 struct kref refcount; 78 78 }; 79 79 ··· 91 91 92 92 struct ttm_object_device { 93 93 spinlock_t object_lock; 94 - struct drm_open_hash object_hash; 94 + struct vmwgfx_open_hash object_hash; 95 95 atomic_t object_count; 96 96 struct ttm_mem_global *mem_glob; 97 97 struct dma_buf_ops ops; ··· 123 123 124 124 struct ttm_ref_object { 125 125 struct rcu_head rcu_head; 126 - struct drm_hash_item hash; 126 + struct vmwgfx_hash_item hash; 127 127 struct list_head head; 128 128 struct kref kref; 129 129 enum ttm_ref_type ref_type; ··· 247 247 struct ttm_base_object * 248 248 ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key) 249 249 { 250 - struct drm_hash_item *hash; 251 - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 250 + struct vmwgfx_hash_item *hash; 251 + struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 252 252 int ret; 253 253 254 254 rcu_read_lock(); 255 - ret = drm_ht_find_item_rcu(ht, key, &hash); 255 + ret = vmwgfx_ht_find_item_rcu(ht, key, &hash); 256 256 if (ret) { 257 257 rcu_read_unlock(); 258 258 return NULL; ··· 267 267 uint32_t key) 268 268 { 269 269 struct ttm_base_object *base = NULL; 270 - struct drm_hash_item *hash; 271 - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 270 + struct vmwgfx_hash_item *hash; 271 + struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 272 272 int ret; 273 273 274 274 rcu_read_lock(); 275 - ret = drm_ht_find_item_rcu(ht, key, &hash); 275 + ret = vmwgfx_ht_find_item_rcu(ht, key, &hash); 276 276 277 277 if (likely(ret == 0)) { 278 278 base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; ··· 312 312 bool ttm_ref_object_exists(struct ttm_object_file *tfile, 313 313 struct ttm_base_object *base) 314 314 { 315 - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 316 - struct drm_hash_item *hash; 315 + struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 316 + struct vmwgfx_hash_item *hash; 317 317 struct ttm_ref_object *ref; 318 318 319 319 rcu_read_lock(); 320 - if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0)) 320 + if (unlikely(vmwgfx_ht_find_item_rcu(ht, base->handle, &hash) != 0)) 321 321 goto out_false; 322 322 323 323 /* ··· 349 349 enum ttm_ref_type ref_type, bool *existed, 350 350 bool require_existed) 351 351 { 352 - struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; 352 + struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type]; 353 353 struct ttm_ref_object *ref; 354 - struct drm_hash_item *hash; 354 + struct vmwgfx_hash_item *hash; 355 355 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; 356 356 struct ttm_operation_ctx ctx = { 357 357 .interruptible = false, ··· 367 367 368 368 while (ret == -EINVAL) { 369 369 rcu_read_lock(); 370 - ret = drm_ht_find_item_rcu(ht, base->handle, &hash); 370 + ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash); 371 371 372 372 if (ret == 0) { 373 373 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ··· 398 398 kref_init(&ref->kref); 399 399 400 400 spin_lock(&tfile->lock); 401 - ret = drm_ht_insert_item_rcu(ht, &ref->hash); 401 + ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash); 402 402 403 403 if (likely(ret == 0)) { 404 404 list_add_tail(&ref->head, &tfile->ref_list); ··· 426 426 container_of(kref, struct ttm_ref_object, kref); 427 427 struct ttm_base_object *base = ref->obj; 428 428 struct ttm_object_file *tfile = ref->tfile; 429 - struct drm_open_hash *ht; 429 + struct vmwgfx_open_hash *ht; 430 430 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; 431 431 432 432 ht = &tfile->ref_hash[ref->ref_type]; 433 - (void)drm_ht_remove_item_rcu(ht, &ref->hash); 433 + (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash); 434 434 list_del(&ref->head); 435 435 spin_unlock(&tfile->lock); 436 436 ··· 446 446 int ttm_ref_object_base_unref(struct ttm_object_file *tfile, 447 447 unsigned long key, enum ttm_ref_type ref_type) 448 448 { 449 - struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; 449 + struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type]; 450 450 struct ttm_ref_object *ref; 451 - struct drm_hash_item *hash; 451 + struct vmwgfx_hash_item *hash; 452 452 int ret; 453 453 454 454 spin_lock(&tfile->lock); 455 - ret = drm_ht_find_item(ht, key, &hash); 455 + ret = vmwgfx_ht_find_item(ht, key, &hash); 456 456 if (unlikely(ret != 0)) { 457 457 spin_unlock(&tfile->lock); 458 458 return -EINVAL; ··· 486 486 487 487 spin_unlock(&tfile->lock); 488 488 for (i = 0; i < TTM_REF_NUM; ++i) 489 - drm_ht_remove(&tfile->ref_hash[i]); 489 + vmwgfx_ht_remove(&tfile->ref_hash[i]); 490 490 491 491 ttm_object_file_unref(&tfile); 492 492 } ··· 508 508 INIT_LIST_HEAD(&tfile->ref_list); 509 509 510 510 for (i = 0; i < TTM_REF_NUM; ++i) { 511 - ret = drm_ht_create(&tfile->ref_hash[i], hash_order); 511 + ret = vmwgfx_ht_create(&tfile->ref_hash[i], hash_order); 512 512 if (ret) { 513 513 j = i; 514 514 goto out_err; ··· 518 518 return tfile; 519 519 out_err: 520 520 for (i = 0; i < j; ++i) 521 - drm_ht_remove(&tfile->ref_hash[i]); 521 + vmwgfx_ht_remove(&tfile->ref_hash[i]); 522 522 523 523 kfree(tfile); 524 524 ··· 539 539 tdev->mem_glob = mem_glob; 540 540 spin_lock_init(&tdev->object_lock); 541 541 atomic_set(&tdev->object_count, 0); 542 - ret = drm_ht_create(&tdev->object_hash, hash_order); 542 + ret = vmwgfx_ht_create(&tdev->object_hash, hash_order); 543 543 if (ret != 0) 544 544 goto out_no_object_hash; 545 545 ··· 564 564 565 565 WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); 566 566 idr_destroy(&tdev->idr); 567 - drm_ht_remove(&tdev->object_hash); 567 + vmwgfx_ht_remove(&tdev->object_hash); 568 568 569 569 kfree(tdev); 570 570 }
+1 -2
drivers/gpu/drm/vmwgfx/ttm_object.h
··· 42 42 #include <linux/list.h> 43 43 #include <linux/rcupdate.h> 44 44 45 - #include <drm/drm_hashtab.h> 46 - 47 45 #include "ttm_memory.h" 46 + #include "vmwgfx_hashtab.h" 48 47 49 48 /** 50 49 * enum ttm_ref_type
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 494 494 drm_vma_node_reset(&bo->base.vma_node); 495 495 496 496 ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, 497 - ttm_bo_type_device, placement, 0, 497 + ttm_bo_type_kernel, placement, 0, 498 498 &ctx, NULL, NULL, NULL); 499 499 if (unlikely(ret)) 500 500 goto error_account;
+7
drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
··· 145 145 (unsigned int) max, 146 146 (unsigned int) min, 147 147 (unsigned int) fifo->capabilities); 148 + 149 + if (unlikely(min >= max)) { 150 + drm_warn(&dev_priv->drm, 151 + "FIFO memory is not usable. Driver failed to initialize."); 152 + return ERR_PTR(-ENXIO); 153 + } 154 + 148 155 return fifo; 149 156 } 150 157
+12 -12
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
··· 42 42 */ 43 43 struct vmw_cmdbuf_res { 44 44 struct vmw_resource *res; 45 - struct drm_hash_item hash; 45 + struct vmwgfx_hash_item hash; 46 46 struct list_head head; 47 47 enum vmw_cmdbuf_res_state state; 48 48 struct vmw_cmdbuf_res_manager *man; ··· 59 59 * @resources and @list are protected by the cmdbuf mutex for now. 60 60 */ 61 61 struct vmw_cmdbuf_res_manager { 62 - struct drm_open_hash resources; 62 + struct vmwgfx_open_hash resources; 63 63 struct list_head list; 64 64 struct vmw_private *dev_priv; 65 65 }; ··· 81 81 enum vmw_cmdbuf_res_type res_type, 82 82 u32 user_key) 83 83 { 84 - struct drm_hash_item *hash; 84 + struct vmwgfx_hash_item *hash; 85 85 int ret; 86 86 unsigned long key = user_key | (res_type << 24); 87 87 88 - ret = drm_ht_find_item(&man->resources, key, &hash); 88 + ret = vmwgfx_ht_find_item(&man->resources, key, &hash); 89 89 if (unlikely(ret != 0)) 90 90 return ERR_PTR(ret); 91 91 ··· 105 105 struct vmw_cmdbuf_res *entry) 106 106 { 107 107 list_del(&entry->head); 108 - WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); 108 + WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash)); 109 109 vmw_resource_unreference(&entry->res); 110 110 kfree(entry); 111 111 } ··· 167 167 vmw_cmdbuf_res_free(entry->man, entry); 168 168 break; 169 169 case VMW_CMDBUF_RES_DEL: 170 - ret = drm_ht_insert_item(&entry->man->resources, &entry->hash); 170 + ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash); 171 171 BUG_ON(ret); 172 172 list_move_tail(&entry->head, &entry->man->list); 173 173 entry->state = VMW_CMDBUF_RES_COMMITTED; ··· 206 206 return -ENOMEM; 207 207 208 208 cres->hash.key = user_key | (res_type << 24); 209 - ret = drm_ht_insert_item(&man->resources, &cres->hash); 209 + ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash); 210 210 if (unlikely(ret != 0)) { 211 211 kfree(cres); 212 212 goto out_invalid_key; ··· 244 244 struct vmw_resource **res_p) 245 245 { 246 246 struct vmw_cmdbuf_res *entry; 247 - struct drm_hash_item *hash; 247 + struct vmwgfx_hash_item *hash; 248 248 int ret; 249 249 250 - ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24), 250 + ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24), 251 251 &hash); 252 252 if (likely(ret != 0)) 253 253 return -EINVAL; ··· 260 260 *res_p = NULL; 261 261 break; 262 262 case VMW_CMDBUF_RES_COMMITTED: 263 - (void) drm_ht_remove_item(&man->resources, &entry->hash); 263 + (void) vmwgfx_ht_remove_item(&man->resources, &entry->hash); 264 264 list_del(&entry->head); 265 265 entry->state = VMW_CMDBUF_RES_DEL; 266 266 list_add_tail(&entry->head, list); ··· 295 295 296 296 man->dev_priv = dev_priv; 297 297 INIT_LIST_HEAD(&man->list); 298 - ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER); 298 + ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER); 299 299 if (ret == 0) 300 300 return man; 301 301 ··· 320 320 list_for_each_entry_safe(entry, next, &man->list, head) 321 321 vmw_cmdbuf_res_free(man, entry); 322 322 323 - drm_ht_remove(&man->resources); 323 + vmwgfx_ht_remove(&man->resources); 324 324 kfree(man); 325 325 } 326 326
+29 -13
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 1070 1070 "3D will be disabled.\n"); 1071 1071 dev_priv->has_mob = false; 1072 1072 } 1073 + if (vmw_sys_man_init(dev_priv) != 0) { 1074 + drm_info(&dev_priv->drm, 1075 + "No MOB page table memory available. " 1076 + "3D will be disabled.\n"); 1077 + dev_priv->has_mob = false; 1078 + } 1073 1079 } 1074 1080 1075 1081 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { ··· 1126 1120 vmw_overlay_close(dev_priv); 1127 1121 vmw_kms_close(dev_priv); 1128 1122 out_no_kms: 1129 - if (dev_priv->has_mob) 1123 + if (dev_priv->has_mob) { 1130 1124 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); 1125 + vmw_sys_man_fini(dev_priv); 1126 + } 1131 1127 if (dev_priv->has_gmr) 1132 1128 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); 1133 1129 vmw_devcaps_destroy(dev_priv); ··· 1163 1155 unregister_pm_notifier(&dev_priv->pm_nb); 1164 1156 1165 1157 if (dev_priv->ctx.res_ht_initialized) 1166 - drm_ht_remove(&dev_priv->ctx.res_ht); 1158 + vmwgfx_ht_remove(&dev_priv->ctx.res_ht); 1167 1159 vfree(dev_priv->ctx.cmd_bounce); 1168 1160 if (dev_priv->enable_fb) { 1169 1161 vmw_fb_off(dev_priv); ··· 1179 1171 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); 1180 1172 1181 1173 vmw_release_device_early(dev_priv); 1182 - if (dev_priv->has_mob) 1174 + if (dev_priv->has_mob) { 1183 1175 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); 1176 + vmw_sys_man_fini(dev_priv); 1177 + } 1184 1178 vmw_devcaps_destroy(dev_priv); 1185 1179 vmw_vram_manager_fini(dev_priv); 1186 1180 ttm_device_fini(&dev_priv->bdev); ··· 1626 1616 1627 1617 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); 1628 1618 if (ret) 1629 - return ret; 1619 + goto out_error; 1630 1620 1631 1621 ret = pcim_enable_device(pdev); 1632 1622 if (ret) 1633 - return ret; 1623 + goto out_error; 1634 1624 1635 1625 vmw = devm_drm_dev_alloc(&pdev->dev, &driver, 1636 1626 struct vmw_private, drm); 1637 - if (IS_ERR(vmw)) 1638 - return PTR_ERR(vmw); 1627 + if (IS_ERR(vmw)) { 1628 + ret = PTR_ERR(vmw); 1629 + goto out_error; 1630 + } 1639 1631 1640 1632 pci_set_drvdata(pdev, &vmw->drm); 1641 1633 1642 1634 ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev); 1643 1635 if (ret) 1644 - return ret; 1636 + goto out_error; 1645 1637 1646 1638 ret = vmw_driver_load(vmw, ent->device); 1647 1639 if (ret) 1648 - return ret; 1640 + goto out_release; 1649 1641 1650 1642 ret = drm_dev_register(&vmw->drm, 0); 1651 - if (ret) { 1652 - vmw_driver_unload(&vmw->drm); 1653 - return ret; 1654 - } 1643 + if (ret) 1644 + goto out_unload; 1655 1645 1656 1646 return 0; 1647 + out_unload: 1648 + vmw_driver_unload(&vmw->drm); 1649 + out_release: 1650 + ttm_mem_global_release(&ttm_mem_glob); 1651 + out_error: 1652 + return ret; 1657 1653 } 1658 1654 1659 1655 static int __init vmwgfx_init(void)
+12 -6
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 34 34 #include <drm/drm_auth.h> 35 35 #include <drm/drm_device.h> 36 36 #include <drm/drm_file.h> 37 - #include <drm/drm_hashtab.h> 38 37 #include <drm/drm_rect.h> 39 38 40 39 #include <drm/ttm/ttm_bo_driver.h> ··· 42 43 #include "ttm_object.h" 43 44 44 45 #include "vmwgfx_fence.h" 46 + #include "vmwgfx_hashtab.h" 45 47 #include "vmwgfx_reg.h" 46 48 #include "vmwgfx_validation.h" 47 49 ··· 82 82 VMWGFX_NUM_GB_SURFACE +\ 83 83 VMWGFX_NUM_GB_SCREEN_TARGET) 84 84 85 - #define VMW_PL_GMR (TTM_PL_PRIV + 0) 86 - #define VMW_PL_MOB (TTM_PL_PRIV + 1) 85 + #define VMW_PL_GMR (TTM_PL_PRIV + 0) 86 + #define VMW_PL_MOB (TTM_PL_PRIV + 1) 87 + #define VMW_PL_SYSTEM (TTM_PL_PRIV + 2) 87 88 88 89 #define VMW_RES_CONTEXT ttm_driver_type0 89 90 #define VMW_RES_SURFACE ttm_driver_type1 ··· 134 133 */ 135 134 struct vmw_validate_buffer { 136 135 struct ttm_validate_buffer base; 137 - struct drm_hash_item hash; 136 + struct vmwgfx_hash_item hash; 138 137 bool validate_as_mob; 139 138 }; 140 139 ··· 407 406 * @ctx: The validation context 408 407 */ 409 408 struct vmw_sw_context{ 410 - struct drm_open_hash res_ht; 409 + struct vmwgfx_open_hash res_ht; 411 410 bool res_ht_initialized; 412 411 bool kernel; 413 412 struct vmw_fpriv *fp; ··· 1040 1039 extern struct ttm_placement vmw_vram_sys_placement; 1041 1040 extern struct ttm_placement vmw_vram_gmr_placement; 1042 1041 extern struct ttm_placement vmw_sys_placement; 1043 - extern struct ttm_placement vmw_evictable_placement; 1044 1042 extern struct ttm_placement vmw_srf_placement; 1045 1043 extern struct ttm_placement vmw_mob_placement; 1046 1044 extern struct ttm_placement vmw_nonfixed_placement; ··· 1250 1250 1251 1251 int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); 1252 1252 void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); 1253 + 1254 + /** 1255 + * System memory manager 1256 + */ 1257 + int vmw_sys_man_init(struct vmw_private *dev_priv); 1258 + void vmw_sys_man_fini(struct vmw_private *dev_priv); 1253 1259 1254 1260 /** 1255 1261 * Prime - vmwgfx_prime.c
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 4117 4117 vmw_binding_state_reset(sw_context->staged_bindings); 4118 4118 4119 4119 if (!sw_context->res_ht_initialized) { 4120 - ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); 4120 + ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); 4121 4121 if (unlikely(ret != 0)) 4122 4122 goto out_unlock; 4123 4123
+199
drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c
··· 1 + /* 2 + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. 3 + * All Rights Reserved. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * The above copyright notice and this permission notice (including the 14 + * next paragraph) shall be included in all copies or substantial portions 15 + * of the Software. 16 + * 17 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 20 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 21 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 22 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 23 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 + */ 25 + 26 + /* 27 + * Simple open hash tab implementation. 28 + * 29 + * Authors: 30 + * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 31 + */ 32 + 33 + #include <linux/export.h> 34 + #include <linux/hash.h> 35 + #include <linux/mm.h> 36 + #include <linux/rculist.h> 37 + #include <linux/slab.h> 38 + #include <linux/vmalloc.h> 39 + 40 + #include <drm/drm_print.h> 41 + 42 + #include "vmwgfx_hashtab.h" 43 + 44 + int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order) 45 + { 46 + unsigned int size = 1 << order; 47 + 48 + ht->order = order; 49 + ht->table = NULL; 50 + if (size <= PAGE_SIZE / sizeof(*ht->table)) 51 + ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); 52 + else 53 + ht->table = vzalloc(array_size(size, sizeof(*ht->table))); 54 + if (!ht->table) { 55 + DRM_ERROR("Out of memory for hash table\n"); 56 + return -ENOMEM; 57 + } 58 + return 0; 59 + } 60 + 61 + void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key) 62 + { 63 + struct vmwgfx_hash_item *entry; 64 + struct hlist_head *h_list; 65 + unsigned int hashed_key; 66 + int count = 0; 67 + 68 + hashed_key = hash_long(key, ht->order); 69 + DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); 70 + h_list = &ht->table[hashed_key]; 71 + hlist_for_each_entry(entry, h_list, head) 72 + DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); 73 + } 74 + 75 + static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key) 76 + { 77 + struct vmwgfx_hash_item *entry; 78 + struct hlist_head *h_list; 79 + unsigned int hashed_key; 80 + 81 + hashed_key = hash_long(key, ht->order); 82 + h_list = &ht->table[hashed_key]; 83 + hlist_for_each_entry(entry, h_list, head) { 84 + if (entry->key == key) 85 + return &entry->head; 86 + if (entry->key > key) 87 + break; 88 + } 89 + return NULL; 90 + } 91 + 92 + static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key) 93 + { 94 + struct vmwgfx_hash_item *entry; 95 + struct hlist_head *h_list; 96 + unsigned int hashed_key; 97 + 98 + hashed_key = hash_long(key, ht->order); 99 + h_list = &ht->table[hashed_key]; 100 + hlist_for_each_entry_rcu(entry, h_list, head) { 101 + if (entry->key == key) 102 + return &entry->head; 103 + if (entry->key > key) 104 + break; 105 + } 106 + return NULL; 107 + } 108 + 109 + int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item) 110 + { 111 + struct vmwgfx_hash_item *entry; 112 + struct hlist_head *h_list; 113 + struct hlist_node *parent; 114 + unsigned int hashed_key; 115 + unsigned long key = item->key; 116 + 117 + hashed_key = hash_long(key, ht->order); 118 + h_list = &ht->table[hashed_key]; 119 + parent = NULL; 120 + hlist_for_each_entry(entry, h_list, head) { 121 + if (entry->key == key) 122 + return -EINVAL; 123 + if (entry->key > key) 124 + break; 125 + parent = &entry->head; 126 + } 127 + if (parent) 128 + hlist_add_behind_rcu(&item->head, parent); 129 + else 130 + hlist_add_head_rcu(&item->head, h_list); 131 + return 0; 132 + } 133 + 134 + /* 135 + * Just insert an item and return any "bits" bit key that hasn't been 136 + * used before. 137 + */ 138 + int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item, 139 + unsigned long seed, int bits, int shift, 140 + unsigned long add) 141 + { 142 + int ret; 143 + unsigned long mask = (1UL << bits) - 1; 144 + unsigned long first, unshifted_key; 145 + 146 + unshifted_key = hash_long(seed, bits); 147 + first = unshifted_key; 148 + do { 149 + item->key = (unshifted_key << shift) + add; 150 + ret = vmwgfx_ht_insert_item(ht, item); 151 + if (ret) 152 + unshifted_key = (unshifted_key + 1) & mask; 153 + } while (ret && (unshifted_key != first)); 154 + 155 + if (ret) { 156 + DRM_ERROR("Available key bit space exhausted\n"); 157 + return -EINVAL; 158 + } 159 + return 0; 160 + } 161 + 162 + int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key, 163 + struct vmwgfx_hash_item **item) 164 + { 165 + struct hlist_node *list; 166 + 167 + list = vmwgfx_ht_find_key_rcu(ht, key); 168 + if (!list) 169 + return -EINVAL; 170 + 171 + *item = hlist_entry(list, struct vmwgfx_hash_item, head); 172 + return 0; 173 + } 174 + 175 + int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key) 176 + { 177 + struct hlist_node *list; 178 + 179 + list = vmwgfx_ht_find_key(ht, key); 180 + if (list) { 181 + hlist_del_init_rcu(list); 182 + return 0; 183 + } 184 + return -EINVAL; 185 + } 186 + 187 + int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item) 188 + { 189 + hlist_del_init_rcu(&item->head); 190 + return 0; 191 + } 192 + 193 + void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht) 194 + { 195 + if (ht->table) { 196 + kvfree(ht->table); 197 + ht->table = NULL; 198 + } 199 + }
+90
drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* 3 + * Copyright 2021 VMware, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person 6 + * obtaining a copy of this software and associated documentation 7 + * files (the "Software"), to deal in the Software without 8 + * restriction, including without limitation the rights to use, copy, 9 + * modify, merge, publish, distribute, sublicense, and/or sell copies 10 + * of the Software, and to permit persons to whom the Software is 11 + * furnished to do so, subject to the following conditions: 12 + * 13 + * The above copyright notice and this permission notice shall be 14 + * included in all copies or substantial portions of the Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 20 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 21 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 22 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 + * SOFTWARE. 24 + * 25 + */ 26 + 27 + #include "vmwgfx_drv.h" 28 + 29 + #include <drm/ttm/ttm_bo_driver.h> 30 + #include <drm/ttm/ttm_device.h> 31 + #include <drm/ttm/ttm_placement.h> 32 + #include <drm/ttm/ttm_resource.h> 33 + #include <linux/slab.h> 34 + 35 + 36 + static int vmw_sys_man_alloc(struct ttm_resource_manager *man, 37 + struct ttm_buffer_object *bo, 38 + const struct ttm_place *place, 39 + struct ttm_resource **res) 40 + { 41 + *res = kzalloc(sizeof(**res), GFP_KERNEL); 42 + if (!*res) 43 + return -ENOMEM; 44 + 45 + ttm_resource_init(bo, place, *res); 46 + return 0; 47 + } 48 + 49 + static void vmw_sys_man_free(struct ttm_resource_manager *man, 50 + struct ttm_resource *res) 51 + { 52 + kfree(res); 53 + } 54 + 55 + static const struct ttm_resource_manager_func vmw_sys_manager_func = { 56 + .alloc = vmw_sys_man_alloc, 57 + .free = vmw_sys_man_free, 58 + }; 59 + 60 + int vmw_sys_man_init(struct vmw_private *dev_priv) 61 + { 62 + struct ttm_device *bdev = &dev_priv->bdev; 63 + struct ttm_resource_manager *man = 64 + kzalloc(sizeof(*man), GFP_KERNEL); 65 + 66 + if (!man) 67 + return -ENOMEM; 68 + 69 + man->use_tt = true; 70 + man->func = &vmw_sys_manager_func; 71 + 72 + ttm_resource_manager_init(man, 0); 73 + ttm_set_driver_manager(bdev, VMW_PL_SYSTEM, man); 74 + ttm_resource_manager_set_used(man, true); 75 + return 0; 76 + } 77 + 78 + void vmw_sys_man_fini(struct vmw_private *dev_priv) 79 + { 80 + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, 81 + VMW_PL_SYSTEM); 82 + 83 + ttm_resource_manager_evict_all(&dev_priv->bdev, man); 84 + 85 + ttm_resource_manager_set_used(man, false); 86 + ttm_resource_manager_cleanup(man); 87 + 88 + ttm_set_driver_manager(&dev_priv->bdev, VMW_PL_SYSTEM, NULL); 89 + kfree(man); 90 + }
+26 -32
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
··· 92 92 } 93 93 }; 94 94 95 + static const struct ttm_place vmw_sys_placement_flags = { 96 + .fpfn = 0, 97 + .lpfn = 0, 98 + .mem_type = VMW_PL_SYSTEM, 99 + .flags = 0 100 + }; 101 + 95 102 struct ttm_placement vmw_vram_gmr_placement = { 96 103 .num_placement = 2, 97 104 .placement = vram_gmr_placement_flags, ··· 120 113 .busy_placement = &sys_placement_flags 121 114 }; 122 115 123 - static const struct ttm_place evictable_placement_flags[] = { 124 - { 125 - .fpfn = 0, 126 - .lpfn = 0, 127 - .mem_type = TTM_PL_SYSTEM, 128 - .flags = 0 129 - }, { 130 - .fpfn = 0, 131 - .lpfn = 0, 132 - .mem_type = TTM_PL_VRAM, 133 - .flags = 0 134 - }, { 135 - .fpfn = 0, 136 - .lpfn = 0, 137 - .mem_type = VMW_PL_GMR, 138 - .flags = 0 139 - }, { 140 - .fpfn = 0, 141 - .lpfn = 0, 142 - .mem_type = VMW_PL_MOB, 143 - .flags = 0 144 - } 116 + struct ttm_placement vmw_pt_sys_placement = { 117 + .num_placement = 1, 118 + .placement = &vmw_sys_placement_flags, 119 + .num_busy_placement = 1, 120 + .busy_placement = &vmw_sys_placement_flags 145 121 }; 146 122 147 123 static const struct ttm_place nonfixed_placement_flags[] = { ··· 144 154 .mem_type = VMW_PL_MOB, 145 155 .flags = 0 146 156 } 147 - }; 148 - 149 - struct ttm_placement vmw_evictable_placement = { 150 - .num_placement = 4, 151 - .placement = evictable_placement_flags, 152 - .num_busy_placement = 1, 153 - .busy_placement = &sys_placement_flags 154 157 }; 155 158 156 159 struct ttm_placement vmw_srf_placement = { ··· 467 484 &vmw_be->vsgt, ttm->num_pages, 468 485 vmw_be->gmr_id); 469 486 break; 487 + case VMW_PL_SYSTEM: 488 + /* Nothing to be done for a system bind */ 489 + break; 470 490 default: 471 491 BUG(); 472 492 } ··· 492 506 break; 493 507 case VMW_PL_MOB: 494 508 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); 509 + break; 510 + case VMW_PL_SYSTEM: 495 511 break; 496 512 default: 497 513 BUG(); ··· 612 624 613 625 switch (mem->mem_type) { 614 626 case TTM_PL_SYSTEM: 627 + case VMW_PL_SYSTEM: 615 628 case VMW_PL_GMR: 616 629 case VMW_PL_MOB: 617 630 return 0; ··· 659 670 (void) ttm_bo_wait(bo, false, false); 660 671 } 661 672 673 + static bool vmw_memtype_is_system(uint32_t mem_type) 674 + { 675 + return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM; 676 + } 677 + 662 678 static int vmw_move(struct ttm_buffer_object *bo, 663 679 bool evict, 664 680 struct ttm_operation_ctx *ctx, ··· 674 680 struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); 675 681 int ret; 676 682 677 - if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) { 683 + if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) { 678 684 ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem); 679 685 if (ret) 680 686 return ret; ··· 683 689 vmw_move_notify(bo, bo->resource, new_mem); 684 690 685 691 if (old_man->use_tt && new_man->use_tt) { 686 - if (bo->resource->mem_type == TTM_PL_SYSTEM) { 692 + if (vmw_memtype_is_system(bo->resource->mem_type)) { 687 693 ttm_bo_move_null(bo, new_mem); 688 694 return 0; 689 695 } ··· 730 736 int ret; 731 737 732 738 ret = vmw_bo_create_kernel(dev_priv, bo_size, 733 - &vmw_sys_placement, 739 + &vmw_pt_sys_placement, 734 740 &bo); 735 741 if (unlikely(ret != 0)) 736 742 return ret;
+11 -11
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
··· 43 43 */ 44 44 struct vmw_validation_bo_node { 45 45 struct ttm_validate_buffer base; 46 - struct drm_hash_item hash; 46 + struct vmwgfx_hash_item hash; 47 47 unsigned int coherent_count; 48 48 u32 as_mob : 1; 49 49 u32 cpu_blit : 1; ··· 72 72 */ 73 73 struct vmw_validation_res_node { 74 74 struct list_head head; 75 - struct drm_hash_item hash; 75 + struct vmwgfx_hash_item hash; 76 76 struct vmw_resource *res; 77 77 struct vmw_buffer_object *new_backup; 78 78 unsigned long new_backup_offset; ··· 184 184 return NULL; 185 185 186 186 if (ctx->ht) { 187 - struct drm_hash_item *hash; 187 + struct vmwgfx_hash_item *hash; 188 188 189 - if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash)) 189 + if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash)) 190 190 bo_node = container_of(hash, typeof(*bo_node), hash); 191 191 } else { 192 192 struct vmw_validation_bo_node *entry; ··· 221 221 return NULL; 222 222 223 223 if (ctx->ht) { 224 - struct drm_hash_item *hash; 224 + struct vmwgfx_hash_item *hash; 225 225 226 - if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash)) 226 + if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash)) 227 227 res_node = container_of(hash, typeof(*res_node), hash); 228 228 } else { 229 229 struct vmw_validation_res_node *entry; ··· 280 280 281 281 if (ctx->ht) { 282 282 bo_node->hash.key = (unsigned long) vbo; 283 - ret = drm_ht_insert_item(ctx->ht, &bo_node->hash); 283 + ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash); 284 284 if (ret) { 285 285 DRM_ERROR("Failed to initialize a buffer " 286 286 "validation entry.\n"); ··· 335 335 336 336 if (ctx->ht) { 337 337 node->hash.key = (unsigned long) res; 338 - ret = drm_ht_insert_item(ctx->ht, &node->hash); 338 + ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash); 339 339 if (ret) { 340 340 DRM_ERROR("Failed to initialize a resource validation " 341 341 "entry.\n"); ··· 688 688 return; 689 689 690 690 list_for_each_entry(entry, &ctx->bo_list, base.head) 691 - (void) drm_ht_remove_item(ctx->ht, &entry->hash); 691 + (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash); 692 692 693 693 list_for_each_entry(val, &ctx->resource_list, head) 694 - (void) drm_ht_remove_item(ctx->ht, &val->hash); 694 + (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash); 695 695 696 696 list_for_each_entry(val, &ctx->resource_ctx_list, head) 697 - (void) drm_ht_remove_item(ctx->ht, &val->hash); 697 + (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash); 698 698 699 699 ctx->ht = NULL; 700 700 }
+4 -3
drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
··· 31 31 #include <linux/list.h> 32 32 #include <linux/ww_mutex.h> 33 33 34 - #include <drm/drm_hashtab.h> 35 34 #include <drm/ttm/ttm_execbuf_util.h> 35 + 36 + #include "vmwgfx_hashtab.h" 36 37 37 38 #define VMW_RES_DIRTY_NONE 0 38 39 #define VMW_RES_DIRTY_SET BIT(0) ··· 74 73 * @total_mem: Amount of reserved memory. 75 74 */ 76 75 struct vmw_validation_context { 77 - struct drm_open_hash *ht; 76 + struct vmwgfx_open_hash *ht; 78 77 struct list_head resource_list; 79 78 struct list_head resource_ctx_list; 80 79 struct list_head bo_list; ··· 152 151 * available at validation context declaration time 153 152 */ 154 153 static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx, 155 - struct drm_open_hash *ht) 154 + struct vmwgfx_open_hash *ht) 156 155 { 157 156 ctx->ht = ht; 158 157 }
-1
drivers/gpu/drm/xlnx/Kconfig
··· 7 7 depends on XILINX_ZYNQMP_DPDMA 8 8 select DMA_ENGINE 9 9 select DRM_GEM_CMA_HELPER 10 - select DRM_KMS_CMA_HELPER 11 10 select DRM_KMS_HELPER 12 11 select GENERIC_PHY 13 12 help
+1 -4
include/drm/drm_device.h
··· 6 6 #include <linux/mutex.h> 7 7 #include <linux/idr.h> 8 8 9 - #include <drm/drm_hashtab.h> 9 + #include <drm/drm_legacy.h> 10 10 #include <drm/drm_mode_config.h> 11 11 12 12 struct drm_driver; 13 13 struct drm_minor; 14 14 struct drm_master; 15 - struct drm_device_dma; 16 15 struct drm_vblank_crtc; 17 - struct drm_sg_mem; 18 - struct drm_local_map; 19 16 struct drm_vma_offset_manager; 20 17 struct drm_vram_mm; 21 18 struct drm_fb_helper;
+3 -2
include/drm/drm_drv.h
··· 291 291 /** 292 292 * @gem_create_object: constructor for gem objects 293 293 * 294 - * Hook for allocating the GEM object struct, for use by the CMA and 295 - * SHMEM GEM helpers. 294 + * Hook for allocating the GEM object struct, for use by the CMA 295 + * and SHMEM GEM helpers. Returns a GEM object on success, or an 296 + * ERR_PTR()-encoded error code otherwise. 296 297 */ 297 298 struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, 298 299 size_t size);
+140 -51
include/drm/drm_gem_cma_helper.h
··· 32 32 #define to_drm_gem_cma_obj(gem_obj) \ 33 33 container_of(gem_obj, struct drm_gem_cma_object, base) 34 34 35 - #ifndef CONFIG_MMU 36 - #define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ 37 - .get_unmapped_area = drm_gem_cma_get_unmapped_area, 38 - #else 39 - #define DRM_GEM_CMA_UNMAPPED_AREA_FOPS 40 - #endif 35 + struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, 36 + size_t size); 37 + void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj); 38 + void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj, 39 + struct drm_printer *p, unsigned int indent); 40 + struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj); 41 + int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map); 42 + int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma); 43 + 44 + extern const struct vm_operations_struct drm_gem_cma_vm_ops; 45 + 46 + /* 47 + * GEM object functions 48 + */ 41 49 42 50 /** 43 - * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers 44 - * @name: name for the generated structure 51 + * drm_gem_cma_object_free - GEM object function for drm_gem_cma_free() 52 + * @obj: GEM object to free 45 53 * 46 - * This macro autogenerates a suitable &struct file_operations for CMA based 47 - * drivers, which can be assigned to &drm_driver.fops. Note that this structure 48 - * cannot be shared between drivers, because it contains a reference to the 49 - * current module using THIS_MODULE. 50 - * 51 - * Note that the declaration is already marked as static - if you need a 52 - * non-static version of this you're probably doing it wrong and will break the 53 - * THIS_MODULE reference by accident. 54 + * This function wraps drm_gem_cma_free_object(). Drivers that employ the CMA helpers 55 + * should use it as their &drm_gem_object_funcs.free handler. 54 56 */ 55 - #define DEFINE_DRM_GEM_CMA_FOPS(name) \ 56 - static const struct file_operations name = {\ 57 - .owner = THIS_MODULE,\ 58 - .open = drm_open,\ 59 - .release = drm_release,\ 60 - .unlocked_ioctl = drm_ioctl,\ 61 - .compat_ioctl = drm_compat_ioctl,\ 62 - .poll = drm_poll,\ 63 - .read = drm_read,\ 64 - .llseek = noop_llseek,\ 65 - .mmap = drm_gem_mmap,\ 66 - DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ 67 - } 57 + static inline void drm_gem_cma_object_free(struct drm_gem_object *obj) 58 + { 59 + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 68 60 69 - /* free GEM object */ 70 - void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); 61 + drm_gem_cma_free(cma_obj); 62 + } 63 + 64 + /** 65 + * drm_gem_cma_object_print_info() - Print &drm_gem_cma_object info for debugfs 66 + * @p: DRM printer 67 + * @indent: Tab indentation level 68 + * @obj: GEM object 69 + * 70 + * This function wraps drm_gem_cma_print_info(). Drivers that employ the CMA helpers 71 + * should use this function as their &drm_gem_object_funcs.print_info handler. 72 + */ 73 + static inline void drm_gem_cma_object_print_info(struct drm_printer *p, unsigned int indent, 74 + const struct drm_gem_object *obj) 75 + { 76 + const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 77 + 78 + drm_gem_cma_print_info(cma_obj, p, indent); 79 + } 80 + 81 + /** 82 + * drm_gem_cma_object_get_sg_table - GEM object function for drm_gem_cma_get_sg_table() 83 + * @obj: GEM object 84 + * 85 + * This function wraps drm_gem_cma_get_sg_table(). Drivers that employ the CMA helpers should 86 + * use it as their &drm_gem_object_funcs.get_sg_table handler. 87 + * 88 + * Returns: 89 + * A pointer to the scatter/gather table of pinned pages or NULL on failure. 90 + */ 91 + static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_object *obj) 92 + { 93 + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 94 + 95 + return drm_gem_cma_get_sg_table(cma_obj); 96 + } 97 + 98 + /* 99 + * drm_gem_cma_object_vmap - GEM object function for drm_gem_cma_vmap() 100 + * @obj: GEM object 101 + * @map: Returns the kernel virtual address of the CMA GEM object's backing store. 102 + * 103 + * This function wraps drm_gem_cma_vmap(). Drivers that employ the CMA helpers should 104 + * use it as their &drm_gem_object_funcs.vmap handler. 105 + * 106 + * Returns: 107 + * 0 on success or a negative error code on failure. 108 + */ 109 + static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 110 + { 111 + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 112 + 113 + return drm_gem_cma_vmap(cma_obj, map); 114 + } 115 + 116 + /** 117 + * drm_gem_cma_object_mmap - GEM object function for drm_gem_cma_mmap() 118 + * @obj: GEM object 119 + * @vma: VMA for the area to be mapped 120 + * 121 + * This function wraps drm_gem_cma_mmap(). Drivers that employ the cma helpers should 122 + * use it as their &drm_gem_object_funcs.mmap handler. 123 + * 124 + * Returns: 125 + * 0 on success or a negative error code on failure. 126 + */ 127 + static inline int drm_gem_cma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 128 + { 129 + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 130 + 131 + return drm_gem_cma_mmap(cma_obj, vma); 132 + } 133 + 134 + /* 135 + * Driver ops 136 + */ 71 137 72 138 /* create memory region for DRM framebuffer */ 73 139 int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, ··· 145 79 struct drm_device *drm, 146 80 struct drm_mode_create_dumb *args); 147 81 148 - /* allocate physical memory */ 149 - struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, 150 - size_t size); 151 - 152 - extern const struct vm_operations_struct drm_gem_cma_vm_ops; 153 - 154 - #ifndef CONFIG_MMU 155 - unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, 156 - unsigned long addr, 157 - unsigned long len, 158 - unsigned long pgoff, 159 - unsigned long flags); 160 - #endif 161 - 162 - void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, 163 - const struct drm_gem_object *obj); 164 - 165 - struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj); 166 82 struct drm_gem_object * 167 83 drm_gem_cma_prime_import_sg_table(struct drm_device *dev, 168 84 struct dma_buf_attachment *attach, 169 85 struct sg_table *sgt); 170 - int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); 171 - int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 172 86 173 87 /** 174 88 * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations ··· 230 184 drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm, 231 185 struct dma_buf_attachment *attach, 232 186 struct sg_table *sgt); 187 + 188 + /* 189 + * File ops 190 + */ 191 + 192 + #ifndef CONFIG_MMU 193 + unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, 194 + unsigned long addr, 195 + unsigned long len, 196 + unsigned long pgoff, 197 + unsigned long flags); 198 + #define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ 199 + .get_unmapped_area = drm_gem_cma_get_unmapped_area, 200 + #else 201 + #define DRM_GEM_CMA_UNMAPPED_AREA_FOPS 202 + #endif 203 + 204 + /** 205 + * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers 206 + * @name: name for the generated structure 207 + * 208 + * This macro autogenerates a suitable &struct file_operations for CMA based 209 + * drivers, which can be assigned to &drm_driver.fops. Note that this structure 210 + * cannot be shared between drivers, because it contains a reference to the 211 + * current module using THIS_MODULE. 212 + * 213 + * Note that the declaration is already marked as static - if you need a 214 + * non-static version of this you're probably doing it wrong and will break the 215 + * THIS_MODULE reference by accident. 216 + */ 217 + #define DEFINE_DRM_GEM_CMA_FOPS(name) \ 218 + static const struct file_operations name = {\ 219 + .owner = THIS_MODULE,\ 220 + .open = drm_open,\ 221 + .release = drm_release,\ 222 + .unlocked_ioctl = drm_ioctl,\ 223 + .compat_ioctl = drm_compat_ioctl,\ 224 + .poll = drm_poll,\ 225 + .read = drm_read,\ 226 + .llseek = noop_llseek,\ 227 + .mmap = drm_gem_mmap,\ 228 + DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ 229 + } 233 230 234 231 #endif /* __DRM_GEM_CMA_HELPER_H__ */
+29 -25
include/drm/drm_hashtab.h drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h
··· 1 - /************************************************************************** 2 - * 1 + /* 3 2 * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. 4 3 * All Rights Reserved. 5 4 * ··· 21 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 22 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 23 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 - * 25 - * 26 - **************************************************************************/ 25 + */ 26 + 27 27 /* 28 28 * Simple open hash tab implementation. 29 29 * ··· 30 32 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 31 33 */ 32 34 33 - #ifndef DRM_HASHTAB_H 34 - #define DRM_HASHTAB_H 35 + /* 36 + * TODO: Replace this hashtable with Linux' generic implementation 37 + * from <linux/hashtable.h>. 38 + */ 39 + 40 + #ifndef VMWGFX_HASHTAB_H 41 + #define VMWGFX_HASHTAB_H 35 42 36 43 #include <linux/list.h> 37 44 38 45 #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) 39 46 40 - struct drm_hash_item { 47 + struct vmwgfx_hash_item { 41 48 struct hlist_node head; 42 49 unsigned long key; 43 50 }; 44 51 45 - struct drm_open_hash { 52 + struct vmwgfx_open_hash { 46 53 struct hlist_head *table; 47 54 u8 order; 48 55 }; 49 56 50 - int drm_ht_create(struct drm_open_hash *ht, unsigned int order); 51 - int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); 52 - int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, 53 - unsigned long seed, int bits, int shift, 54 - unsigned long add); 55 - int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); 57 + int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order); 58 + int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item); 59 + int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item, 60 + unsigned long seed, int bits, int shift, 61 + unsigned long add); 62 + int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key, 63 + struct vmwgfx_hash_item **item); 56 64 57 - void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); 58 - int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); 59 - int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); 60 - void drm_ht_remove(struct drm_open_hash *ht); 65 + void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key); 66 + int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key); 67 + int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item); 68 + void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht); 61 69 62 70 /* 63 71 * RCU-safe interface 64 72 * 65 73 * The user of this API needs to make sure that two or more instances of the 66 74 * hash table manipulation functions are never run simultaneously. 67 - * The lookup function drm_ht_find_item_rcu may, however, run simultaneously 75 + * The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously 68 76 * with any of the manipulation functions as long as it's called from within 69 77 * an RCU read-locked section. 70 78 */ 71 - #define drm_ht_insert_item_rcu drm_ht_insert_item 72 - #define drm_ht_just_insert_please_rcu drm_ht_just_insert_please 73 - #define drm_ht_remove_key_rcu drm_ht_remove_key 74 - #define drm_ht_remove_item_rcu drm_ht_remove_item 75 - #define drm_ht_find_item_rcu drm_ht_find_item 79 + #define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item 80 + #define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please 81 + #define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key 82 + #define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item 83 + #define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item 76 84 77 85 #endif
+14 -1
include/drm/drm_legacy.h
··· 37 37 38 38 #include <drm/drm.h> 39 39 #include <drm/drm_auth.h> 40 - #include <drm/drm_hashtab.h> 41 40 42 41 struct drm_device; 43 42 struct drm_driver; ··· 49 50 * If you add a new driver and it uses any of these functions or structures, 50 51 * you're doing it terribly wrong. 51 52 */ 53 + 54 + /* 55 + * Hash-table Support 56 + */ 57 + 58 + struct drm_hash_item { 59 + struct hlist_node head; 60 + unsigned long key; 61 + }; 62 + 63 + struct drm_open_hash { 64 + struct hlist_head *table; 65 + u8 order; 66 + }; 52 67 53 68 /** 54 69 * DMA buffer.
-1
include/drm/ttm/ttm_bo_api.h
··· 32 32 #define _TTM_BO_API_H_ 33 33 34 34 #include <drm/drm_gem.h> 35 - #include <drm/drm_hashtab.h> 36 35 #include <drm/drm_vma_manager.h> 37 36 #include <linux/kref.h> 38 37 #include <linux/list.h>
+11
include/drm/ttm/ttm_placement.h
··· 35 35 36 36 /* 37 37 * Memory regions for data placement. 38 + * 39 + * Buffers placed in TTM_PL_SYSTEM are considered under TTMs control and can 40 + * be swapped out whenever TTMs thinks it is a good idea. 41 + * In cases where drivers would like to use TTM_PL_SYSTEM as a valid 42 + * placement they need to be able to handle the issues that arise due to the 43 + * above manually. 44 + * 45 + * For BO's which reside in system memory but for which the accelerator 46 + * requires direct access (i.e. their usage needs to be synchronized 47 + * between the CPU and accelerator via fences) a new, driver private 48 + * placement that can handle such scenarios is a good idea. 38 49 */ 39 50 40 51 #define TTM_PL_SYSTEM 0